From 7b0ad8f69e39cb698223f917c8ca73b17500940b Mon Sep 17 00:00:00 2001 From: Olof Date: Sun, 18 Aug 2024 20:54:59 +0000 Subject: [PATCH 01/19] added support for multiple backends on remote sites and round robin loadbalancing between them --- .vscode/launch.json | 2 +- odd-box-example-config.toml | 15 +- src/api/controllers/sites.rs | 2 +- src/configuration/mod.rs | 112 +++++- src/configuration/v1.rs | 236 ------------- src/configuration/v2.rs | 658 +++++++++++++++++++++++++++++++++++ src/http_proxy/service.rs | 37 +- src/http_proxy/utils.rs | 27 +- src/http_proxy/websockets.rs | 32 +- src/main.rs | 60 ++-- src/proc_host.rs | 2 +- src/proxy.rs | 111 +++--- src/tcp_proxy/tcp.rs | 126 ++++--- src/tui.rs | 6 - src/types/proxy_state.rs | 2 +- 15 files changed, 979 insertions(+), 449 deletions(-) create mode 100644 src/configuration/v2.rs diff --git a/.vscode/launch.json b/.vscode/launch.json index 61bfac3..5300e8d 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -19,7 +19,7 @@ "kind": "bin" } }, - "args": ["C:/Users/oblink/Documents/visit/odd-box.toml","-p","80","-t","443"], + "args": ["odd-box-example-config.toml"], "cwd": "${workspaceFolder}" } ] diff --git a/odd-box-example-config.toml b/odd-box-example-config.toml index 5d03853..b15ffa7 100644 --- a/odd-box-example-config.toml +++ b/odd-box-example-config.toml @@ -1,8 +1,8 @@ -version = "V1" # this is the configuration format version, dont change it +version = "V2" # this is the configuration format version, dont change it root_dir = "~" # you can use $root_dir in env_var values and dir paths etc. log_level = "info" # trace,info,debug,info,warn,error alpn = false # optional - allows alpn negotiation for http/1.0 and h2 on tls connections -admin_api_port = 6789 # optional - leave out to disable the admin api +admin_api_port = 1234 # optional - leave out to disable the admin api port_range_start = 4200 # port range for automatic port assignment (the env var PORT will be set if you did not specify one manually for a process) default_log_format = "standard" # standard | dotnet ip = "127.0.0.1" # ip for proxy to listen to , can be ipv4/6 @@ -18,15 +18,18 @@ env_vars = [ [[remote_target]] # remote targets are those that odd-box is not responsible for running host_name = "lobsters.localtest.me" # incoming name for binding to (frontend) -target_hostname = "lobste.rs" # domain name or ip to proxy the request to (backend) capture_subdomains = false # optional, false by default: allows capturing wildcard requests such as test.lobsters.local forward_subdomains = false # optional, false by default: if the request is for subdomain.configureddomain.local with target example.com, # this option would cause the proxied request go to subdomain.example.com instead of example.com. disable_tcp_tunnel_mode = false # optional, false by default -https = true # optional, false by default: must be true if the target uses tls -port = 443 # optional - 80 by default if https is false, 443 by default if https is true + # optional, false by default: must be true if the target uses tls +# optional - 80 by default if https is false, 443 by default if https is true # h2_hint = "H2"/"H2C" - optional: used to signal use of prior knowledge http2 or http2 over clear text. - +backends = [ + # list of backends. uses round-robin load balancing + { address = "lobste.rs", port = 443, https = true } , + { address = "lobsters.dev", port = 443, https = true } +] [[hosted_process]] # hosted processes are ones that odd-box will keep running disable_tcp_tunnel_mode = false # optional, false by default host_name = "some_host.local" # incoming name for binding to (frontend) diff --git a/src/api/controllers/sites.rs b/src/api/controllers/sites.rs index d74a7ee..3d0da91 100644 --- a/src/api/controllers/sites.rs +++ b/src/api/controllers/sites.rs @@ -1,4 +1,4 @@ -use crate::configuration::v1::{InProcessSiteConfig, RemoteSiteConfig}; +use crate::configuration::v2::{InProcessSiteConfig, RemoteSiteConfig}; use super::*; use axum::extract::{Query, State}; diff --git a/src/configuration/mod.rs b/src/configuration/mod.rs index 28ccda8..b00f7c5 100644 --- a/src/configuration/mod.rs +++ b/src/configuration/mod.rs @@ -1,20 +1,23 @@ use serde::{Deserialize, Serialize}; use utoipa::ToSchema; +use v1::H2Hint; pub (crate) mod legacy; pub (crate) mod v1; +pub (crate) mod v2; #[derive(Debug,Clone)] pub (crate) enum Config { #[allow(dead_code)]Legacy(legacy::Config), - V1(v1::OddBoxConfig) + V1(v1::OddBoxConfig), + V2(v2::OddBoxConfig) } #[derive(Debug,Clone)] -pub struct ConfigWrapper(pub v1::OddBoxConfig); +pub struct ConfigWrapper(pub v2::OddBoxConfig); impl std::ops::Deref for ConfigWrapper { - type Target = v1::OddBoxConfig; + type Target = v2::OddBoxConfig; fn deref(&self) -> &Self::Target { &self.0 } @@ -85,29 +88,45 @@ impl<'de> Deserialize<'de> for LogLevel { #[derive(Debug,Clone,Serialize,Deserialize,Default,ToSchema)] pub enum OddBoxConfigVersion { #[default] Unmarked, - V1 + V1, + V2 } impl Config { pub fn parse(content:&str) -> Result { + + let v2_result = toml::from_str::(content); + if let Ok(v2_config) = v2_result { + return Ok(Config::V2(v2_config)) + }; + let v1_result = toml::from_str::(content); if let Ok(v1_config) = v1_result { return Ok(Config::V1(v1_config)) }; + let legacy_result = toml::from_str::(&content); if let Ok(legacy_config) = legacy_result { return Ok(Config::Legacy(legacy_config)) }; - Err(format!("invalid v1 configuration file. {v1_result:?} ...\n\n ------- could not parse as legacy either due to error: {legacy_result:?}")) + + Err(format!("invalid configuration file. {v1_result:?} ...\n\n{legacy_result:?}")) } - pub fn try_upgrade(&self) -> Result { + pub fn try_upgrade_to_latest_version(&self) -> Result { match self { - Config::Legacy(old_cfg) => { - let updated : v1::OddBoxConfig = old_cfg.to_owned().try_into()?; - Ok(Config::V1(updated)) + Config::Legacy(legacy_config) => { + let v1 : v1::OddBoxConfig = legacy_config.to_owned().try_into()?; + let v2 : v2::OddBoxConfig = v1.to_owned().try_into()?; + Ok(v2) + }, + Config::V1(v1_config) => { + let v2 : v2::OddBoxConfig = v1_config.to_owned().try_into()?; + Ok(v2) + }, + Config::V2(v2) => { + Ok(v2.clone()) }, - Config::V1(_) => Err(format!("already in v1 format")), } } } @@ -153,6 +172,79 @@ impl TryFrom for v1::OddBoxConfig { root_dir: old_config.root_dir, tls_port: old_config.tls_port + }; + Ok(new_config) + } +} + + + + + +// V1 ---> V2 +impl TryFrom for v2::OddBoxConfig{ + + type Error = String; + + fn try_from(old_config: v1::OddBoxConfig) -> Result { + let new_config = v2::OddBoxConfig { + path: None, + version: OddBoxConfigVersion::V2, + admin_api_port: None, + alpn: Some(false), // allowing alpn would be a breaking change for h2c when using old configuration format + auto_start: old_config.auto_start, + default_log_format: old_config.default_log_format, + env_vars: old_config.env_vars, + ip: old_config.ip, + log_level: old_config.log_level, + http_port: old_config.http_port, + port_range_start: old_config.port_range_start, + hosted_process: Some(old_config.hosted_process.unwrap_or_default().into_iter().map(|x|{ + v2::InProcessSiteConfig { + forward_subdomains: x.forward_subdomains, + disable_tcp_tunnel_mode: x.disable_tcp_tunnel_mode, + args: x.args, + auto_start: x.auto_start, + bin: x.bin, + capture_subdomains: x.capture_subdomains, + env_vars: x.env_vars, + host_name: x.host_name, + port: x.port, + log_format: x.log_format, + dir: x.dir, + https: x.https, + h2_hint: match x.h2_hint { + Some(H2Hint::H2) => Some(crate::configuration::v2::H2Hint::H2), + Some(H2Hint::H2C) => Some(crate::configuration::v2::H2Hint::H2C), + None => None, + }, + disabled: x.disabled + + } + }).collect()), + remote_target: Some(old_config.remote_target.unwrap_or_default().iter().map(|x|{ + v2::RemoteSiteConfig { + disable_tcp_tunnel_mode: x.disable_tcp_tunnel_mode, + capture_subdomains: x.capture_subdomains, + forward_subdomains: x.forward_subdomains, + backends: vec![ + v2::Backend { + address: x.target_hostname.clone(), + port: x.port.expect("remote site must have a port.."), + https: x.https + } + ], + host_name: x.host_name.clone(), + h2_hint: match x.h2_hint { + Some(H2Hint::H2) => Some(crate::configuration::v2::H2Hint::H2), + Some(H2Hint::H2C) => Some(crate::configuration::v2::H2Hint::H2C), + None => None, + } + } + }).collect()), + root_dir: old_config.root_dir, + tls_port: old_config.tls_port + }; Ok(new_config) } diff --git a/src/configuration/v1.rs b/src/configuration/v1.rs index 47b1fa8..876c050 100644 --- a/src/configuration/v1.rs +++ b/src/configuration/v1.rs @@ -118,242 +118,6 @@ fn true_option() -> Option { Some(true) } -impl OddBoxConfig { - - - // Validates and populates variables in the configuration - pub fn init(&mut self,cfg_path:&str) -> anyhow::Result<()> { - - self.path = Some(std::path::Path::new(&cfg_path).canonicalize()?.to_str().unwrap_or_default().into()); - - - let resolved_home_dir_path = dirs::home_dir().ok_or(anyhow::anyhow!(String::from("Failed to resolve home directory.")))?; - let resolved_home_dir_str = resolved_home_dir_path.to_str().ok_or(anyhow::anyhow!(String::from("Failed to parse home directory.")))?; - - tracing::info!("Resolved home directory: {}",&resolved_home_dir_str); - - let cfg_dir = Self::get_parent(cfg_path)?; - - if let Some(rd) = self.root_dir.as_mut() { - - if rd.contains("$root_dir") { - anyhow::bail!("it is clearly not a good idea to use $root_dir in the configuration of root dir...") - } - - let rd_with_vars_replaced = rd - .replace("$cfg_dir", &cfg_dir) - .replace("~", resolved_home_dir_str); - - let canonicalized_with_vars = - match std::fs::canonicalize(rd_with_vars_replaced.clone()) { - Ok(resolved_path) => { - resolved_path.display().to_string() - // we dont want to use ext path def on windows - .replace("\\\\?\\", "") - } - Err(e) => { - anyhow::bail!(format!("root_dir item in configuration ({rd}) resolved to this: '{rd_with_vars_replaced}' - error: {}", e)); - } - }; - - *rd = canonicalized_with_vars; - - tracing::debug!("$root_dir resolved to: {rd}") - } - - let cloned_root_dir = self.root_dir.clone(); - - - - - if let Some(procs) = self.hosted_process.as_deref_mut() { - for x in &mut procs.iter_mut() { - - if x.dir.len() < 5 { anyhow::bail!(format!("Invalid path configuration for {:?}",x))} - - Self::massage_proc(cfg_path, &cloned_root_dir, x)?; - - - // basic sanity check.. - if x.dir.contains("$root_dir") { - anyhow::bail!("Invalid configuration: {x:?}. Missing root_dir in configuration file but referenced for this item..") - } - - // if no log format is specified for the process but there is a global format, override it - if x.log_format.is_none() { - x.log_format = Some(self.default_log_format.clone()) - } - } - } - - - - Ok(()) - } - - pub fn is_valid(&self) -> anyhow::Result<()> { - - let mut all_host_names: Vec<&str> = vec![ - self.remote_target.as_ref().and_then(|p|Some(p.iter().map(|x|x.host_name.as_str()).collect::>())).unwrap_or_default(), - self.hosted_process.as_ref().and_then(|p|Some(p.iter().map(|x|x.host_name.as_str()).collect::>())).unwrap_or_default() - - ].concat(); - - all_host_names.sort(); - - let all_count = all_host_names.len(); - - all_host_names.dedup(); - - let unique_count = all_host_names.len(); - - if all_count != unique_count { - anyhow::bail!(format!("duplicated host names detected in config.")) - } - - Ok(()) - - } - - - fn get_parent(p:&str) -> anyhow::Result { - if let Some(directory_path_str) = - std::path::Path::new(&p) - .parent() - .map(|p| p.to_str().unwrap_or_default()) - { - if directory_path_str.eq("") { - tracing::debug!("$cfg_dir resolved to '.'"); - Ok(".".into()) - } else { - tracing::debug!("$cfg_dir resolved to {directory_path_str}"); - Ok(directory_path_str.into()) - } - - } else { - bail!(format!("Failed to resolve $cfg_dir")); - } - } - - fn massage_proc(cfg_path:&str,root_dir:&Option, proc:&mut InProcessSiteConfig) -> anyhow::Result<()> { - - let cfg_dir = Self::get_parent(&cfg_path)?; - - let resolved_home_dir_path = dirs::home_dir().ok_or(anyhow::anyhow!(String::from("Failed to resolve home directory.")))?; - let resolved_home_dir_str = resolved_home_dir_path.to_str().ok_or(anyhow::anyhow!(String::from("Failed to parse home directory.")))?; - - let with_vars = |x:&str| -> String { - x.replace("$root_dir", & if let Some(rd) = &root_dir { rd.to_string() } else { "$root_dir".to_string() }) - .replace("$cfg_dir", &cfg_dir) - .replace("~", resolved_home_dir_str) - }; - - for a in &mut proc.args { - *a = with_vars(a) - } - - proc.dir = with_vars(&proc.dir); - proc.bin = with_vars(&proc.bin); - - Ok(()) - - } - - pub (crate) async fn add_or_replace_hosted_process(&mut self,hostname:&str,mut item:InProcessSiteConfig,state:GlobalState) -> anyhow::Result<()> { - - Self::massage_proc( - &self.path.clone().unwrap_or_default(), - &self.root_dir, - &mut item - )?; - - if let Some(hosted_site_configs) = &mut self.hosted_process { - - - - for x in hosted_site_configs.iter_mut() { - if hostname == x.host_name { - - let (tx,mut rx) = tokio::sync::mpsc::channel(1); - - state.2.send(crate::http_proxy::ProcMessage::Delete(hostname.into(),tx))?; - - if rx.recv().await == Some(0) { - // when we get this message, we know that the process has been stopped - // and that the loop has been exited as well. - tracing::debug!("Received a confirmation that the process was deleted"); - } else { - tracing::debug!("Failed to receive a confirmation that the process was deleted. This is a bug in odd-box."); - }; - - - break; - } - }; - - tracing::debug!("Pushing a new process to the configuration thru the admin api"); - hosted_site_configs.retain(|x| x.host_name != item.host_name); - hosted_site_configs.retain(|x| x.host_name != hostname); - hosted_site_configs.push(item.clone()); - - - // todo: auto port wont be respected here anymore as it is only used during init in main - // might need to make v2 config have port be required? - tokio::task::spawn(crate::proc_host::host( - item.clone(), - state.2.subscribe(), - state.clone(), - )); - tracing::trace!("Spawned a new thread for site: {:?}",hostname); - - let mut guard = state.0.write().await; - guard.site_states_map.retain(|k,_v| k != hostname); - guard.site_states_map.insert(hostname.to_owned(), crate::types::app_state::ProcState::Stopped); - } - - - - if let Some(p) = &self.path { - self.write_to_disk(&p) - } else { - bail!(ConfigurationUpdateError::Bug("No path found to the current configuration".into())) - } - - - - } - - - pub (crate) async fn add_or_replace_remote_site(&mut self,hostname:&str,item:RemoteSiteConfig,state:GlobalState) -> anyhow::Result<()> { - - - if let Some(sites) = self.remote_target.as_mut() { - // out with the old, in with the new - sites.retain(|x| x.host_name != hostname); - sites.retain(|x| x.host_name != item.host_name); - sites.push(item.clone()); - - // same as above but for the TUI state - let mut guard = state.0.write().await; - guard.site_states_map.retain(|k,_v| *k != item.host_name); - guard.site_states_map.retain(|k,_v| k != hostname); - guard.site_states_map.insert(hostname.to_owned(), crate::types::app_state::ProcState::Remote); - } - - - if let Some(p) = &self.path { - self.write_to_disk(&p) - } else { - bail!(ConfigurationUpdateError::Bug("No path found to the current configuration".into())) - } - - - } - - - -} - #[derive(Debug)] enum ConfigurationUpdateError { Bug(String) diff --git a/src/configuration/v2.rs b/src/configuration/v2.rs new file mode 100644 index 0000000..bc3d550 --- /dev/null +++ b/src/configuration/v2.rs @@ -0,0 +1,658 @@ +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::path::Path; + +use anyhow::bail; +use serde::Serialize; +use serde::Deserialize; +use utoipa::ToSchema; +use crate::global_state::GlobalState; + +use super::EnvVar; +use super::LogFormat; +use super::LogLevel; + + +impl InProcessSiteConfig { + pub fn set_port(&mut self, port : u16) { + self.port = Some(port) + } +} + + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +pub (crate) struct InProcessSiteConfig{ + /// This is mostly useful in case the target uses SNI sniffing/routing + pub disable_tcp_tunnel_mode : Option, + /// H2C or H2 - used to signal use of prior knowledge http2 or http2 over clear text. + pub h2_hint : Option, + pub host_name : String, + pub dir : String, + pub bin : String, + pub args : Vec, + pub env_vars : Vec, + pub log_format: Option, + /// Set this to false if you do not want this site to start automatically when odd-box starts + pub auto_start: Option, + pub port: Option, + pub https : Option, + /// If you wish to use wildcard routing for any subdomain under the 'host_name' + pub capture_subdomains : Option, + /// If you wish to use the subdomain from the request in forwarded requests: + /// test.example.com -> internal.site + /// vs + /// test.example.com -> test.internal.site + pub forward_subdomains : Option, + /// Set to true to prevent odd-box from starting this site automatically when it starts or using the 'start' command. + /// It can still be manually started by ctrl-clicking in the TUI. + pub disabled: Option + // ^ perhaps we should remove disabled from v2 and instead only use auto_start... ? +} + +#[derive(Debug, Eq,PartialEq,Hash, Clone, Serialize, Deserialize, ToSchema)] +pub (crate) enum H2Hint { + H2, + H2C +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema,Eq,PartialEq,Hash,)] +pub struct Backend { + pub address : String, + pub port: u16, + pub https : Option +} + + +#[derive(Debug, Eq,PartialEq,Hash, Clone, Serialize, Deserialize, ToSchema)] +pub (crate) struct RemoteSiteConfig{ + /// H2C or H2 - used to signal use of prior knowledge http2 or http2 over clear text. + pub h2_hint : Option, + pub host_name : String, + pub backends : Vec, + /// If you wish to use wildcard routing for any subdomain under the 'host_name' + pub capture_subdomains : Option, + /// This is mostly useful in case the target uses SNI sniffing/routing + pub disable_tcp_tunnel_mode : Option, + /// If you wish to use the subdomain from the request in forwarded requests: + /// test.example.com -> internal.site + /// vs + /// test.example.com -> test.internal.site + pub forward_subdomains : Option +} + +pub enum BackendFilter { + Http, + Https, + Any +} +fn filter_backend(backend: &Backend, filter: &BackendFilter) -> bool { + match filter { + BackendFilter::Http => backend.https.unwrap_or_default() == false, + BackendFilter::Https => backend.https.unwrap_or_default() == true, + BackendFilter::Any => true + } +} +impl RemoteSiteConfig { + + pub async fn next_backend(&self,state:&GlobalState, backend_filter: BackendFilter) -> Backend { + + + let stats = state.3.clone(); + + let ro_guard = stats.read().await; + let seen_before = ro_guard.contains_key(&self.host_name); + + let count = if seen_before { + let existing_stats_for_this_target = ro_guard.get(&self.host_name).expect("we never remove items from the map so this should always be here").clone(); + let mut guard = existing_stats_for_this_target.write().await; + guard.request_count += 1; + guard.request_count + } else { + drop(ro_guard); + let mut guard = stats.write().await; + let my_new_target_info = std::sync::Arc::new(tokio::sync::RwLock::new(crate::TargetRequestCount { request_count: 1 })); + guard.insert(self.host_name.clone(), my_new_target_info); + 1 + }; + + let filtered_backends = self.backends.iter().filter(|x|filter_backend(x,&backend_filter)) + .collect::>(); + + let selected_backend = *filtered_backends.get((count % (filtered_backends.len() as u128)) as usize ) + .expect("we should always have at least one backend but found none. this is a bug in oddbox ."); + + selected_backend.clone() + + + } +} + +#[derive(Debug, Clone, Serialize, Deserialize,ToSchema)] +pub struct OddBoxConfig { + #[schema(value_type = String)] + pub (crate) version : super::OddBoxConfigVersion, + pub (crate) root_dir : Option, + #[serde(default = "default_log_level")] + pub (crate) log_level : Option, + /// Defaults to true. Lets you enable/disable h2/http11 tls alpn algs during initial connection phase. + #[serde(default = "true_option")] + pub (crate) alpn : Option, + pub (crate) port_range_start : u16, + #[serde(default = "default_log_format")] + pub (crate) default_log_format : LogFormat, + #[schema(value_type = String)] + pub (crate) ip : Option, + #[serde(default = "default_http_port_8080")] + pub (crate) http_port : Option, + #[serde(default = "default_https_port_4343")] + pub (crate) tls_port : Option, + #[serde(default = "true_option")] + pub (crate) auto_start : Option, + pub (crate) env_vars : Vec, + pub (crate) remote_target : Option>, + pub (crate) hosted_process : Option>, + pub (crate) admin_api_port : Option, + pub (crate) path : Option + +} +fn default_log_level() -> Option { + Some(LogLevel::Info) +} +fn default_log_format() -> LogFormat { + LogFormat::standard +} +fn default_https_port_4343() -> Option { + Some(4343) +} +fn default_http_port_8080() -> Option { + Some(8080) +} + +fn true_option() -> Option { + Some(true) +} + +impl OddBoxConfig { + + + // Validates and populates variables in the configuration + pub fn init(&mut self,cfg_path:&str) -> anyhow::Result<()> { + + self.path = Some(std::path::Path::new(&cfg_path).canonicalize()?.to_str().unwrap_or_default().into()); + + + let resolved_home_dir_path = dirs::home_dir().ok_or(anyhow::anyhow!(String::from("Failed to resolve home directory.")))?; + let resolved_home_dir_str = resolved_home_dir_path.to_str().ok_or(anyhow::anyhow!(String::from("Failed to parse home directory.")))?; + + tracing::info!("Resolved home directory: {}",&resolved_home_dir_str); + + let cfg_dir = Self::get_parent(cfg_path)?; + + if let Some(rd) = self.root_dir.as_mut() { + + if rd.contains("$root_dir") { + anyhow::bail!("it is clearly not a good idea to use $root_dir in the configuration of root dir...") + } + + let rd_with_vars_replaced = rd + .replace("$cfg_dir", &cfg_dir) + .replace("~", resolved_home_dir_str); + + let canonicalized_with_vars = + match std::fs::canonicalize(rd_with_vars_replaced.clone()) { + Ok(resolved_path) => { + resolved_path.display().to_string() + // we dont want to use ext path def on windows + .replace("\\\\?\\", "") + } + Err(e) => { + anyhow::bail!(format!("root_dir item in configuration ({rd}) resolved to this: '{rd_with_vars_replaced}' - error: {}", e)); + } + }; + + *rd = canonicalized_with_vars; + + tracing::debug!("$root_dir resolved to: {rd}") + } + + let cloned_root_dir = self.root_dir.clone(); + + + + + if let Some(procs) = self.hosted_process.as_deref_mut() { + for x in &mut procs.iter_mut() { + + if x.dir.len() < 5 { anyhow::bail!(format!("Invalid path configuration for {:?}",x))} + + Self::massage_proc(cfg_path, &cloned_root_dir, x)?; + + + // basic sanity check.. + if x.dir.contains("$root_dir") { + anyhow::bail!("Invalid configuration: {x:?}. Missing root_dir in configuration file but referenced for this item..") + } + + // if no log format is specified for the process but there is a global format, override it + if x.log_format.is_none() { + x.log_format = Some(self.default_log_format.clone()) + } + } + } + + + + Ok(()) + } + + pub fn is_valid(&self) -> anyhow::Result<()> { + + let mut all_host_names: Vec<&str> = vec![ + self.remote_target.as_ref().and_then(|p|Some(p.iter().map(|x|x.host_name.as_str()).collect::>())).unwrap_or_default(), + self.hosted_process.as_ref().and_then(|p|Some(p.iter().map(|x|x.host_name.as_str()).collect::>())).unwrap_or_default() + + ].concat(); + + all_host_names.sort(); + + let all_count = all_host_names.len(); + + all_host_names.dedup(); + + let unique_count = all_host_names.len(); + + if all_count != unique_count { + anyhow::bail!(format!("duplicated host names detected in config.")) + } + + Ok(()) + + } + + + fn get_parent(p:&str) -> anyhow::Result { + if let Some(directory_path_str) = + std::path::Path::new(&p) + .parent() + .map(|p| p.to_str().unwrap_or_default()) + { + if directory_path_str.eq("") { + tracing::debug!("$cfg_dir resolved to '.'"); + Ok(".".into()) + } else { + tracing::debug!("$cfg_dir resolved to {directory_path_str}"); + Ok(directory_path_str.into()) + } + + } else { + bail!(format!("Failed to resolve $cfg_dir")); + } + } + + fn massage_proc(cfg_path:&str,root_dir:&Option, proc:&mut InProcessSiteConfig) -> anyhow::Result<()> { + + let cfg_dir = Self::get_parent(&cfg_path)?; + + let resolved_home_dir_path = dirs::home_dir().ok_or(anyhow::anyhow!(String::from("Failed to resolve home directory.")))?; + let resolved_home_dir_str = resolved_home_dir_path.to_str().ok_or(anyhow::anyhow!(String::from("Failed to parse home directory.")))?; + + let with_vars = |x:&str| -> String { + x.replace("$root_dir", & if let Some(rd) = &root_dir { rd.to_string() } else { "$root_dir".to_string() }) + .replace("$cfg_dir", &cfg_dir) + .replace("~", resolved_home_dir_str) + }; + + for a in &mut proc.args { + *a = with_vars(a) + } + + proc.dir = with_vars(&proc.dir); + proc.bin = with_vars(&proc.bin); + + Ok(()) + + } + + pub (crate) async fn add_or_replace_hosted_process(&mut self,hostname:&str,mut item:InProcessSiteConfig,state:GlobalState) -> anyhow::Result<()> { + + Self::massage_proc( + &self.path.clone().unwrap_or_default(), + &self.root_dir, + &mut item + )?; + + if let Some(hosted_site_configs) = &mut self.hosted_process { + + + + for x in hosted_site_configs.iter_mut() { + if hostname == x.host_name { + + let (tx,mut rx) = tokio::sync::mpsc::channel(1); + + state.2.send(crate::http_proxy::ProcMessage::Delete(hostname.into(),tx))?; + + if rx.recv().await == Some(0) { + // when we get this message, we know that the process has been stopped + // and that the loop has been exited as well. + tracing::debug!("Received a confirmation that the process was deleted"); + } else { + tracing::debug!("Failed to receive a confirmation that the process was deleted. This is a bug in odd-box."); + }; + + + break; + } + }; + + tracing::debug!("Pushing a new process to the configuration thru the admin api"); + hosted_site_configs.retain(|x| x.host_name != item.host_name); + hosted_site_configs.retain(|x| x.host_name != hostname); + hosted_site_configs.push(item.clone()); + + + tokio::task::spawn(crate::proc_host::host( + item.clone(), + state.2.subscribe(), + state.clone(), + )); + tracing::trace!("Spawned a new thread for site: {:?}",hostname); + + let mut guard = state.0.write().await; + guard.site_states_map.retain(|k,_v| k != hostname); + guard.site_states_map.insert(hostname.to_owned(), crate::types::app_state::ProcState::Stopped); + } + + + + if let Some(p) = &self.path { + self.write_to_disk(&p) + } else { + bail!(ConfigurationUpdateError::Bug("No path found to the current configuration".into())) + } + + + + } + + + pub (crate) async fn add_or_replace_remote_site(&mut self,hostname:&str,item:RemoteSiteConfig,state:GlobalState) -> anyhow::Result<()> { + + + if let Some(sites) = self.remote_target.as_mut() { + // out with the old, in with the new + sites.retain(|x| x.host_name != hostname); + sites.retain(|x| x.host_name != item.host_name); + sites.push(item.clone()); + + // same as above but for the TUI state + let mut guard = state.0.write().await; + guard.site_states_map.retain(|k,_v| *k != item.host_name); + guard.site_states_map.retain(|k,_v| k != hostname); + guard.site_states_map.insert(hostname.to_owned(), crate::types::app_state::ProcState::Remote); + } + + + if let Some(p) = &self.path { + self.write_to_disk(&p) + } else { + bail!(ConfigurationUpdateError::Bug("No path found to the current configuration".into())) + } + + + } + + + +} + +#[derive(Debug)] +enum ConfigurationUpdateError { + Bug(String) +} + + +impl std::fmt::Display for ConfigurationUpdateError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + // ConfigurationUpdateError::NotFound => { + // f.write_str("No such hosted process found.") + // }, + // ConfigurationUpdateError::FailedToSave(e) => { + // f.write_fmt(format_args!("Failed to save due to error: {}",e)) + // }, + ConfigurationUpdateError::Bug(e) => { + f.write_fmt(format_args!("Failed to save due to a bug in odd-box: {}",e)) + } + } + } +} + +impl OddBoxConfig { + + pub fn save(&self) -> anyhow::Result<()> { + self.write_to_disk(self.path.clone().expect("must have been loaded from somewhere..").as_str())?; + Ok(()) + } + + // note: this seems silly but its needed because neither toml-rs nor toml_edit supports any decent + // formatting customization and ends up with spread out arrays of tables rather + // than inlining like we usually do for odd-box configs. + pub fn write_to_disk(&self,current_path:&str) -> anyhow::Result<()> { + let mut formatted_toml = Vec::new(); + + formatted_toml.push(format!("version = \"{:?}\"", self.version)); + + if let Some(alpn) = self.alpn { + formatted_toml.push(format!("alpn = {}", alpn)); + } else { + formatted_toml.push(format!("alpn = {}", "false")); + } + + if let Some(port) = self.http_port { + formatted_toml.push(format!("http_port = {}", port)); + } + if let Some(port) = self.admin_api_port { + formatted_toml.push(format!("admin_api_port = {}", port)); + } + if let Some(ip) = &self.ip { + formatted_toml.push(format!("ip = \"{:?}\"", ip)); + } else { + formatted_toml.push(format!("ip = \"127.0.0.1\"")); + } + if let Some(tls_port) = self.tls_port { + formatted_toml.push(format!("tls_port = {}", tls_port)); + } + if let Some(auto_start) = self.auto_start { + formatted_toml.push(format!("auto_start = {}", auto_start)); + } else { + formatted_toml.push(format!("auto_start = false")); + } + + if let Some(root_dir) = &self.root_dir { + formatted_toml.push(format!("root_dir = {:?}", root_dir)); + } else { + formatted_toml.push(format!("root_dir = \"~\"")); + } + if let Some(log_level) = &self.log_level { + formatted_toml.push(format!("log_level = \"{:?}\"", log_level)); + } + formatted_toml.push(format!("port_range_start = {}", self.port_range_start)); + + + formatted_toml.push(format!("default_log_format = \"{:?}\"", self.default_log_format )); + + + formatted_toml.push("env_vars = [".to_string()); + for env_var in &self.env_vars { + formatted_toml.push(format!( + "\t{{ key = {:?}, value = {:?} }},", + env_var.key, env_var.value + )); + } + formatted_toml.push("]".to_string()); + + // TODO ---- backend config here + + // if let Some(remote_sites) = &self.remote_target { + // for site in remote_sites { + // formatted_toml.push("\n[[remote_target]]".to_string()); + // formatted_toml.push(format!("host_name = {:?}", site.host_name)); + // formatted_toml.push(format!("target_hostname = {:?}", site.target_hostname)); + // if let Some(hint) = &site.h2_hint { + // formatted_toml.push(format!("h2_hint = \"{:?}\"", hint)); + // } + + + // if let Some(capture_subdomains) = site.capture_subdomains { + // formatted_toml.push(format!("capture_subdomains = {}", capture_subdomains)); + // } + + // if let Some(b) = site.https { + // formatted_toml.push(format!("https = {}", b)); + // } + // if let Some(http) = site.port { + // formatted_toml.push(format!("port = {}", http)); + // } + + // if let Some(disable_tcp_tunnel_mode) = site.disable_tcp_tunnel_mode { + // formatted_toml.push(format!("disable_tcp_tunnel_mode = {}", disable_tcp_tunnel_mode)); + // } + // } + // } + + if let Some(processes) = &self.hosted_process { + for process in processes { + formatted_toml.push("\n[[hosted_process]]".to_string()); + formatted_toml.push(format!("host_name = {:?}", process.host_name)); + formatted_toml.push(format!("dir = {:?}", process.dir)); + formatted_toml.push(format!("bin = {:?}", process.bin)); + if let Some(hint) = &process.h2_hint { + formatted_toml.push(format!("h2_hint = \"{:?}\"", hint)); + } + + let args = process.args.iter().map(|arg| format!("{:?}", arg)).collect::>().join(", "); + formatted_toml.push(format!("args = [{}]", args)); + + + + + + if let Some(auto_start) = process.auto_start { + formatted_toml.push(format!("auto_start = {}", auto_start)); + } + + + if let Some(b) = process.https { + formatted_toml.push(format!("https = {}", b)); + } + if let Some(http) = process.port { + formatted_toml.push(format!("port = {}", http)); + } + + if let Some(capture_subdomains) = process.capture_subdomains { + formatted_toml.push(format!("capture_subdomains = {}", capture_subdomains)); + } else { + formatted_toml.push(format!("capture_subdomains = {}", "false")); + } + + formatted_toml.push("env_vars = [".to_string()); + for env_var in &process.env_vars { + formatted_toml.push(format!( + "\t{{ key = {:?}, value = {:?} }},", + env_var.key, env_var.value + )); + } + formatted_toml.push("]".to_string()); + + } + } + + let original_path = Path::new(current_path); + let backup_path = original_path.with_extension("toml.backup"); + std::fs::rename(original_path, &backup_path)?; + + if let Err(e) = std::fs::write(current_path, formatted_toml.join("\n")) { + bail!("Failed to write config to disk: {e}") + } else { + Ok(()) + } + + } +} + + + +pub fn example_v2() -> OddBoxConfig { + OddBoxConfig { + path: None, + admin_api_port: None, + version: super::OddBoxConfigVersion::V2, + alpn: Some(false), + auto_start: Some(true), + default_log_format: LogFormat::standard, + env_vars: vec![ + EnvVar { key: "some_key".into(), value:"some_val".into() }, + EnvVar { key: "another_key".into(), value:"another_val".into() }, + ], + ip: Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), + log_level: Some(LogLevel::Info), + http_port: Some(80), + port_range_start: 4200, + hosted_process: Some(vec![ + InProcessSiteConfig { + forward_subdomains: None, + disable_tcp_tunnel_mode: Some(false), + args: vec!["--test".to_string()], + auto_start: Some(true), + bin: "my_bin".into(), + capture_subdomains: None, + env_vars: vec![ + EnvVar { key: "some_key".into(), value:"some_val".into() }, + EnvVar { key: "another_key".into(), value:"another_val".into() }, + ], + host_name: "some_host.local".into(), + port: Some(443) , + log_format: Some(LogFormat::standard), + dir: "/tmp".into(), + https: Some(true), + h2_hint: None, + disabled :None + + } + ]), + remote_target: Some(vec![ + RemoteSiteConfig { + forward_subdomains: None, + h2_hint: None, + host_name: "lobsters.local".into(), + backends: vec![ + Backend { + address: "lobste.rs".into(), + port: 443, + https: Some(true) + } + ], + capture_subdomains: Some(false), + disable_tcp_tunnel_mode: Some(false) + }, + RemoteSiteConfig { + forward_subdomains: Some(true), + h2_hint: None, + host_name: "google.local".into(), + backends: vec![ + Backend { + address: "google.com".into(), + port: 443, + https: Some(true) + } + ], + capture_subdomains: Some(false), + disable_tcp_tunnel_mode: Some(true) + } + ]), + root_dir: Some("/tmp".into()), + tls_port: Some(443) + + } +} \ No newline at end of file diff --git a/src/http_proxy/service.rs b/src/http_proxy/service.rs index cd46161..49b6fed 100644 --- a/src/http_proxy/service.rs +++ b/src/http_proxy/service.rs @@ -120,7 +120,7 @@ impl<'a> Service> for ReverseProxyService { } // handle normal proxy path - let f = handle( + let f = handle_http( self.remote_addr.expect("there must always be a client"), req, self.tx.clone(), @@ -138,7 +138,7 @@ impl<'a> Service> for ReverseProxyService { } #[allow(dead_code)] -async fn handle( +async fn handle_http( client_ip: std::net::SocketAddr, req: Request, tx: Arc>, @@ -290,26 +290,25 @@ fn get_subdomain(requested_hostname: &str, backend_hostname: &str) -> Option, client_tls_config:rustls::ClientConfig ) -> Result { - // if a target is marked with http, we wont try to use http - let enforce_https = remote_target_config.https.is_some_and(|x|x); - - let scheme = if enforce_https { "https" } else { "http" }; - let mut original_path_and_query = req.uri().path_and_query() .and_then(|x| Some(x.as_str())).unwrap_or_default(); if original_path_and_query == "/" { original_path_and_query = ""} - - let default_port = if enforce_https { 443 } else { 80 }; - + + let next_backend_target = remote_target_config.next_backend(&state, crate::configuration::v2::BackendFilter::Any).await; + + // if a target is marked with http, we wont try to use http + let enforce_https = next_backend_target.https.unwrap_or_default(); + + let scheme = if enforce_https { "https" } else { "http" }; let resolved_host_name = { @@ -319,31 +318,29 @@ async fn perform_remote_forwarding( if forward_subdomains && subdomain.is_some() { let subdomain = subdomain.unwrap(); tracing::debug!("remote forward terminating proxy rewrote subdomain: {subdomain}!"); - format!("{subdomain}.{}", &remote_target_config.target_hostname) + format!("{subdomain}.{}", &next_backend_target.address) } else { - remote_target_config.target_hostname.clone() + next_backend_target.address.clone() } }; - - let target_url = format!("{scheme}://{}:{}{}", resolved_host_name, - remote_target_config.port.unwrap_or(default_port), + next_backend_target.port, original_path_and_query ); - tracing::info!("Incoming request to '{}' for remote proxy target {target_url}",remote_target_config.host_name); + tracing::info!("Incoming request to '{}' for remote proxy target {target_url}",next_backend_target.address); let result = proxy( &req_host_name, - is_https, + next_backend_target.https.unwrap_or_default(), state.clone(), req, &target_url, crate::http_proxy::Target::Remote(remote_target_config.clone()), client_ip, - client_tls_config + client_tls_config, ).await; diff --git a/src/http_proxy/utils.rs b/src/http_proxy/utils.rs index 5c4d669..277a117 100644 --- a/src/http_proxy/utils.rs +++ b/src/http_proxy/utils.rs @@ -17,7 +17,7 @@ use tungstenite::http; use lazy_static::lazy_static; use crate::{ - configuration::v1::H2Hint, global_state::GlobalState, http_proxy::EpicResponse, tcp_proxy::ReverseTcpProxyTarget, types::{proxy_state::{ ConnectionKey, ProxyActiveConnection, ProxyActiveConnectionType }}, CustomError + configuration::v2::H2Hint, global_state::GlobalState, http_proxy::EpicResponse, tcp_proxy::ReverseTcpProxyTarget, types::proxy_state::{ ConnectionKey, ProxyActiveConnection, ProxyActiveConnectionType }, CustomError }; lazy_static! { static ref TE_HEADER: HeaderName = HeaderName::from_static("te"); @@ -58,8 +58,8 @@ pub enum ProxyError { #[derive(Debug)] pub enum Target { - Remote(crate::configuration::v1::RemoteSiteConfig), - Proc(crate::configuration::v1::InProcessSiteConfig), + Remote(crate::configuration::v2::RemoteSiteConfig), + Proc(crate::configuration::v2::InProcessSiteConfig), } pub async fn proxy( @@ -72,11 +72,12 @@ pub async fn proxy( client_ip: SocketAddr, client_tls_config: ClientConfig ) -> Result { + let incoming_http_version = req.version(); tracing::info!( - "Incoming {incoming_http_version:?} request to proxy from {client_ip:?} with target url: {target_url}" + "Incoming {incoming_http_version:?} request to terminating proxy from {client_ip:?} with target url: {target_url}" ); @@ -85,24 +86,22 @@ pub async fn proxy( let mut connector = { https_builder.https_or_http().enable_all_versions().build() }; - let mut enforce_https = match &target { - Target::Remote(x) => x.https.unwrap_or_default(), - Target::Proc(x) => x.https.unwrap_or_default(), - }; + let mut enforce_https = is_https; let request_upgrade_type = get_upgrade_type(req.headers()); let request_upgraded = req.extensions_mut().remove::(); - let target_h2_hint = match &target { - Target::Remote(x) => x.h2_hint.clone(), - Target::Proc(x) => x.h2_hint.clone(), - }; - let mut enforce_http2 = false; let mut target_url = target_url.to_string(); - if let Some(hint) = target_h2_hint { + + let h2_hint = match &target { + Target::Remote(r) => r.h2_hint.clone(), + Target::Proc(p) => p.h2_hint.clone(), + }; + + if let Some(hint) = h2_hint { match hint { H2Hint::H2 => { diff --git a/src/http_proxy/websockets.rs b/src/http_proxy/websockets.rs index a0ccba1..fd2386e 100644 --- a/src/http_proxy/websockets.rs +++ b/src/http_proxy/websockets.rs @@ -54,17 +54,29 @@ pub async fn handle_ws(req:Request,service:ReverseProxyService,ws: return Err(CustomError(format!("No target is configured to handle requests to {req_host_name}"))) } }; - - let enforce_https = match &target { - crate::http_proxy::Target::Remote(x) => x.https.unwrap_or_default(), - crate::http_proxy::Target::Proc(x) => x.https.unwrap_or_default(), - }; - - let default_port = if enforce_https { 443 } else { 80 }; - let (target_host,port) = match &target { - crate::http_proxy::Target::Remote(x) => (x.target_hostname.clone(),x.port.unwrap_or(default_port)), - crate::http_proxy::Target::Proc(x) => (x.host_name.clone(),x.port.unwrap_or(default_port)), + let (target_host,port,enforce_https) = match &target { + crate::http_proxy::Target::Remote(x) => { + let next_backend = x.next_backend(&service.state, crate::configuration::v2::BackendFilter::Any).await; + ( + next_backend.address.clone(), + next_backend.port, + next_backend.https.unwrap_or_default() + ) + }, + crate::http_proxy::Target::Proc(x) => { + + let backend_is_https = x.https.unwrap_or_default(); + let port = match x.port { + Some(p) => p, + None => if backend_is_https == true {443} else {80} + }; + ( + x.host_name.clone(), + port, + backend_is_https + ) + } }; let svc_scheme = if service.is_https_only {"wss"} else { "ws" }; diff --git a/src/main.rs b/src/main.rs index a1ca7a0..ed573f5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -6,6 +6,7 @@ mod proxy; use http_proxy::ProcMessage; use rustls::pki_types::{CertificateDer, PrivateKeyDer}; use self_update::cargo_crate_version; +use tokio::sync::RwLock; use tracing_subscriber::layer::SubscriberExt; use std::fmt::Debug; use std::{borrow::BorrowMut, sync::Mutex}; @@ -25,15 +26,39 @@ mod logging; use types::app_state::AppState; pub mod global_state { + use std::collections::HashMap; + use crate::http_proxy::ConfigWrapper; pub (crate) type GlobalState = + ( std::sync::Arc>, - std::sync::Arc >, + std::sync::Arc>, tokio::sync::broadcast::Sender, + + // we store target request counts here outside of the global app-state proxy stats + // as we do not want to lock them at the same time as the proxy stats. + // this is purely done to avoid performance issues. + std::sync::Arc< + tokio::sync::RwLock< + HashMap + > + > + > + > ); } + +#[derive(Debug, Clone)] +pub struct TargetRequestCount { + pub request_count : u128 +} + + #[derive(Debug)] struct DynamicCertResolver { cache: Mutex>>, @@ -303,24 +328,11 @@ async fn main() -> anyhow::Result<()> { let mut file = std::fs::File::open(&cfg_path)?; let mut contents = String::new(); file.read_to_string(&mut contents)?; - + + let mut config: ConfigWrapper = ConfigWrapper(match configuration::Config::parse(&contents) { - Ok(configuration::Config::V1(configuration)) => { - configuration - }, - Ok(old_config) => { - eprintln!("Warning: you are using a legacy style configuration file, it will be automatically updated"); - match old_config.try_upgrade() { - Ok(configuration::Config::V1(configuration)) => { - - configuration.write_to_disk(&cfg_path)?; - configuration - }, - Ok(_) => anyhow::bail!(format!("Unable to update the configuration file to new schema")), - Err(e) => anyhow::bail!(e), - } - }, + Ok(configuration) => configuration.try_upgrade_to_latest_version().expect("configuration upgrade failed. this is a bug in odd-box"), Err(e) => anyhow::bail!(e), }); @@ -434,18 +446,22 @@ async fn main() -> anyhow::Result<()> { } - - - - let shared_state : crate::global_state::GlobalState = + // CURRENT APPSTATE (std::sync::Arc::new( tokio::sync::RwLock::new( inner_state ) ), + + // CONFIGURATION shared_config.clone(), - tx.clone() + + // BROADCASTER + tx.clone(), + + // TARGET REQUEST COUNTS FOR LB FEATURE + Arc::new(RwLock::new(HashMap::new())) ); diff --git a/src/proc_host.rs b/src/proc_host.rs index 63e6a7d..93decce 100644 --- a/src/proc_host.rs +++ b/src/proc_host.rs @@ -11,7 +11,7 @@ use std::time::Duration; use std::os::windows::process::CommandExt; pub (crate) async fn host( - proc: crate::configuration::v1::InProcessSiteConfig, + proc: crate::configuration::v2::InProcessSiteConfig, mut rcv:tokio::sync::broadcast::Receiver, state: GlobalState ) { diff --git a/src/proxy.rs b/src/proxy.rs index 3be1df8..ab7d0df 100644 --- a/src/proxy.rs +++ b/src/proxy.rs @@ -234,7 +234,7 @@ async fn handle_new_tcp_stream( tcp_stream:TcpStream, source_addr:SocketAddr, targets: Vec, - expect_tls: bool, + incoming_connection_is_on_tls_port: bool, tx: std::sync::Arc>, state: GlobalState, ) { @@ -260,68 +260,66 @@ async fn handle_new_tcp_stream( typ: DataType::ClearText, http_version:_, target_host: Some(target) - }) if !expect_tls => { + }) if incoming_connection_is_on_tls_port == false => { if let Some(target) = tcp_proxy::ReverseTcpProxy::try_get_target_from_vec(targets, &target) { - if target.target_http_port.is_some() { - - - if target.is_hosted { - - let proc_state = { - let guard = state.0.read().await; - match guard.site_states_map.get(&target.host_name) { - Some(v) => Some(v.clone()), - _ => None - } - }; - match proc_state { - None => { - tracing::warn!("error 0001 has occurred") - }, - Some(app_state::ProcState::Stopped) - | Some(app_state::ProcState::Starting) => { - _ = tx.send(ProcMessage::Start(target.host_name.clone())); - let thn = target.host_name.clone(); - let mut has_started = false; - // done here to allow non-browser clients to reach the target socket without receiving unexpected loading screen html blobs - // as long as we are able to start the backing process within 10 seconds - for _ in 0..2 { - tokio::time::sleep(Duration::from_secs(5)).await; - tracing::debug!("handling an incoming request to a stopped target, waiting for up to 10 seconds for {thn} to spin up - after this we will release the request to the terminating proxy and show a 'please wait' page instaead."); - { - let guard = state.0.read().await; - match guard.site_states_map.get(&target.host_name) { - Some(&ProcState::Running) => { - has_started = true; - break - }, - _ => { } + + if target.backends.iter().any(|x|x.https.unwrap_or_default()==false) { + + if target.is_hosted { + + let proc_state = { + let guard = state.0.read().await; + match guard.site_states_map.get(&target.host_name) { + Some(v) => Some(v.clone()), + _ => None + } + }; + match proc_state { + None => { + tracing::warn!("error 0001 has occurred") + }, + Some(app_state::ProcState::Stopped) + | Some(app_state::ProcState::Starting) => { + _ = tx.send(ProcMessage::Start(target.host_name.clone())); + let thn = target.host_name.clone(); + let mut has_started = false; + // done here to allow non-browser clients to reach the target socket without receiving unexpected loading screen html blobs + // as long as we are able to start the backing process within 10 seconds + for _ in 0..2 { + tokio::time::sleep(Duration::from_secs(5)).await; + tracing::debug!("handling an incoming request to a stopped target, waiting for up to 10 seconds for {thn} to spin up - after this we will release the request to the terminating proxy and show a 'please wait' page instaead."); + { + let guard = state.0.read().await; + match guard.site_states_map.get(&target.host_name) { + Some(&ProcState::Running) => { + has_started = true; + break + }, + _ => { } + } } } + if has_started { + tracing::trace!("Using unencrypted tcp tunnel for remote target: {target:?}"); + tcp_proxy::ReverseTcpProxy::tunnel(tcp_stream, target, false,state.clone(),source_addr).await; + return; + } else { + tracing::trace!("{thn} is still not running... handing this request over to the terminating proxy.") + } } - if has_started { + , _ => { tracing::trace!("Using unencrypted tcp tunnel for remote target: {target:?}"); tcp_proxy::ReverseTcpProxy::tunnel(tcp_stream, target, false,state.clone(),source_addr).await; return; - } else { - tracing::trace!("{thn} is still not running... handing this request over to the terminating proxy.") } } - ,_=> { - tracing::trace!("Using unencrypted tcp tunnel for remote target: {target:?}"); - tcp_proxy::ReverseTcpProxy::tunnel(tcp_stream, target, false,state.clone(),source_addr).await; - return; - } - } - } else { - tracing::trace!("Using unencrypted tcp tunnel for remote target: {target:?}"); - tcp_proxy::ReverseTcpProxy::tunnel(tcp_stream, target, false,state.clone(),source_addr).await; - return; - } - } else { - tracing::debug!("peeked some clear text tcp data and found that the target exists but is not configured for clear text. we will use terminating mode for this..") + } else { + tracing::trace!("Using unencrypted tcp tunnel for remote target: {target:?}"); + tcp_proxy::ReverseTcpProxy::tunnel(tcp_stream, target, false,state.clone(),source_addr).await; + return; + } } } }, @@ -332,17 +330,18 @@ async fn handle_new_tcp_stream( typ: DataType::TLS, http_version:_, target_host: Some(target) - }) if expect_tls => { + }) if incoming_connection_is_on_tls_port => { if let Some(target) = tcp_proxy::ReverseTcpProxy::try_get_target_from_vec(targets, &target) { - - if target.target_tls_port.is_some() { - _ = tx.send(ProcMessage::Start(target.host_name.clone())); + + if target.backends.iter().any(|x|x.https.unwrap_or_default()) { + // at least one backend has https enabled so we will use the tls tunnel mode to there tracing::info!("USING TCP PROXY FOR TLS TUNNEL TO TARGET {target:?}"); tcp_proxy::ReverseTcpProxy::tunnel(tcp_stream, target, true,state.clone(),source_addr).await; return; } else { tracing::debug!("peeked some tls tcp data and found that the target exists but is not configured for https/tls. we will use terminating mode for this..") } + } }, e => { diff --git a/src/tcp_proxy/tcp.rs b/src/tcp_proxy/tcp.rs index 8ad7671..bcc6d7b 100644 --- a/src/tcp_proxy/tcp.rs +++ b/src/tcp_proxy/tcp.rs @@ -6,6 +6,7 @@ use std::{ net::SocketAddr, sync::Arc, }; +use crate::configuration::v2::BackendFilter; use crate::global_state::GlobalState; use crate::tcp_proxy::tls::client_hello::TlsClientHello; use crate::tcp_proxy::tls::client_hello::TlsClientHelloError; @@ -18,9 +19,8 @@ use tracing::*; /// Achieves TLS passthru by peeking at the ClientHello SNI ext data. #[derive(Debug,Eq,PartialEq,Hash,Clone)] pub struct ReverseTcpProxyTarget { - pub target_hostname: String, - pub target_http_port: Option, - pub target_tls_port: Option, + pub remote_target_config: Option, + pub backends: Vec, pub host_name: String, pub is_hosted : bool, pub capture_subdomains: bool, @@ -41,41 +41,43 @@ impl ReverseTcpProxyTargets { if let Some(x) = &cfg.hosted_process { for y in x.iter().filter(|xx|xx.disable_tcp_tunnel_mode.unwrap_or_default() == false) { - let mut target_http_port = None; - let mut target_tls_port = None; - if y.https.unwrap_or_default() { - target_tls_port = y.port; + let port = y.port.unwrap_or_default(); + if port > 0 { + tcp_targets.push(ReverseTcpProxyTarget { + remote_target_config: None, // we dont need this for hosted processes + capture_subdomains: y.capture_subdomains.unwrap_or_default(), + forward_wildcard: y.forward_subdomains.unwrap_or_default(), + backends: vec![crate::configuration::v2::Backend { + address: y.host_name.to_owned(), + https: y.https, + port: port + }], + host_name: y.host_name.to_owned(), + is_hosted: true, + sub_domain: None + }) } else { - target_http_port = y.port; + tracing::warn!("hosted process: {} has no port configured. skipping this target for tcp tunnel mode. this is most likely a bug in odd-box.",y.host_name); } - tcp_targets.push(ReverseTcpProxyTarget { - capture_subdomains: y.capture_subdomains.unwrap_or_default(), - forward_wildcard: y.forward_subdomains.unwrap_or_default(), - target_http_port, - target_tls_port, - target_hostname: y.host_name.to_owned(), - host_name: y.host_name.to_owned(), - is_hosted: true, - sub_domain: None - }) + + } } if let Some(x) = &cfg.remote_target { for y in x.iter().filter(|xx|xx.disable_tcp_tunnel_mode.unwrap_or_default() == false) { - let mut target_http_port = None; - let mut target_tls_port = None; - if y.https.unwrap_or_default() { - target_tls_port = y.port; - } else { - target_http_port = y.port; - } + + // we support comma separated hostnames for the same target temporarily for remotes. + // in this mode we require all backends to have the same scheme and port configuration.. + // this is temporary and will be removed once we have a v2 configuration format that + // supports multiple backend configurations for the same hostname. + + tcp_targets.push(ReverseTcpProxyTarget { + remote_target_config: Some(y.clone()), capture_subdomains: y.capture_subdomains.unwrap_or_default(), forward_wildcard: y.forward_subdomains.unwrap_or_default(), - target_hostname: y.target_hostname.to_owned(), - target_http_port, - target_tls_port, + backends: y.backends.clone(), host_name: y.host_name.to_owned(), is_hosted: false, sub_domain: None @@ -93,21 +95,23 @@ impl ReverseTcpProxyTarget { sub_domain: None, capture_subdomains: x.capture_subdomains.unwrap_or_default(), forward_wildcard: x.forward_subdomains.unwrap_or_default(), - target_hostname: x.target_hostname.clone(), - target_http_port: if x.https.unwrap_or_default() {None} else { x.port }, - target_tls_port:if x.https.unwrap_or_default() {x.port} else { None }, + backends: x.backends.clone(), host_name: x.host_name.clone(), - is_hosted: false + is_hosted: false, + remote_target_config : Some(x.clone()) }, crate::http_proxy::Target::Proc(x) => ReverseTcpProxyTarget { sub_domain: None, capture_subdomains: x.capture_subdomains.unwrap_or_default(), forward_wildcard: x.forward_subdomains.unwrap_or_default(), - target_hostname: x.host_name.clone(), - target_http_port: if x.https.unwrap_or_default() {None} else { x.port }, - target_tls_port:if x.https.unwrap_or_default() {x.port} else { None }, + backends: vec![crate::configuration::v2::Backend { + address: x.host_name.to_owned(), + https: x.https, + port: x.port.expect("remote target must have a port configured") + }], host_name: x.host_name.clone(), - is_hosted: true + is_hosted: true, + remote_target_config : None }, } } @@ -180,24 +184,22 @@ impl ReverseTcpProxy { Some(ReverseTcpProxyTarget { capture_subdomains: x.capture_subdomains, forward_wildcard: x.forward_wildcard, - target_hostname: x.target_hostname.clone(), - target_http_port: x.target_http_port, - target_tls_port: x.target_tls_port, + backends: x.backends.clone(), host_name: x.host_name.clone(), is_hosted: x.is_hosted, - sub_domain: None + sub_domain: None, + remote_target_config: x.remote_target_config.clone() }) } else { match Self::get_subdomain(parsed_name, &x.host_name) { Some(subdomain) => Some(ReverseTcpProxyTarget { capture_subdomains: x.capture_subdomains, forward_wildcard: x.forward_wildcard, - target_hostname: x.target_hostname.clone(), - target_http_port: x.target_http_port, - target_tls_port: x.target_tls_port, + backends: x.backends.clone(), host_name: x.host_name.clone(), is_hosted: x.is_hosted, sub_domain: Some(subdomain), + remote_target_config: x.remote_target_config.clone() }), None => None, } @@ -384,33 +386,27 @@ impl ReverseTcpProxy { client_address: SocketAddr ) { - let resolved_target = { + // only remotes have more than one backend. hosted processes always have a single backend. + let primary_backend = if let Some(remconf) = &target.remote_target_config { + remconf.next_backend(&state, if incoming_traffic_is_tls { BackendFilter::Https } else { BackendFilter::Http }).await + } else { + target.backends.first().expect("target must have at least one backend").to_owned() + }; + + + let resolved_target_address = { let subdomain = target.sub_domain.as_ref(); if target.forward_wildcard && subdomain.is_some() { tracing::debug!("tcp tunnel rewrote for subdomain: {:?}", subdomain); - format!("{:?}.{}", subdomain, target.target_hostname) + format!("{}.{}:{}", subdomain.unwrap(), primary_backend.address, primary_backend.port) } else { - target.target_hostname.clone() + format!("{}:{}", primary_backend.address, primary_backend.port) } }; - let target_addr = { - if incoming_traffic_is_tls { - if let Some(tls_port) = target.target_tls_port { - format!("{}:{}", resolved_target, tls_port) - } else if let Some(http_port) = target.target_http_port { - format!("{}:{}", resolved_target, http_port) - } else { - unreachable!() - } - } else if let Some(http_port) = target.target_http_port { - format!("{}:{}", resolved_target, http_port) - } else { - unreachable!() - } - }; - - match TcpStream::connect(target_addr.clone()).await { + tracing::trace!("tcp tunneling to target: {resolved_target_address} (tls: {incoming_traffic_is_tls})"); + + match TcpStream::connect(resolved_target_address.clone()).await { Ok(mut rem_stream) => { @@ -419,7 +415,7 @@ impl ReverseTcpProxy { let item = ProxyActiveConnection { target, - target_addr: format!("{target_addr} ({})",target_addr_socket.ip()), + target_addr: format!("{resolved_target_address} ({})",target_addr_socket.ip()), source_addr: source_addr.clone(), creation_time: Local::now(), description: None, @@ -460,7 +456,7 @@ impl ReverseTcpProxy { tracing::warn!("failed to read socket peer address.."); } }, - Err(e) => warn!("failed to connect to target {target:?} (using addr: {target_addr}) --> {e:?}"), + Err(e) => warn!("failed to connect to target {target:?} (using addr: {resolved_target_address}) --> {e:?}"), } } diff --git a/src/tui.rs b/src/tui.rs index a3f7e5a..ae3db2d 100644 --- a/src/tui.rs +++ b/src/tui.rs @@ -401,8 +401,6 @@ pub (crate) async fn run( } -// TODO - move all the state types to a separate module - #[derive(Debug,Default)] pub struct TrafficTabState { pub test : String, @@ -589,8 +587,6 @@ fn draw_logs( let level = x.lvl; - // todo - theme - let s = match level { Level::ERROR => Style::default().fg(Color::Red), Level::TRACE => Style::default().fg(Color::Gray), @@ -603,8 +599,6 @@ fn draw_logs( let lvl_str = format!("{:>1$} ",x.lvl.as_str(),5); let thread_str = if let Some(n) = &x.thread {format!("{n} ")} else { format!("") }; - // todo: theme - let number = ratatui::text::Span::styled(nr_str.clone(),Style::default().fg(Color::DarkGray)); let level = ratatui::text::Span::styled(lvl_str.clone(),s); let thread_name = ratatui::text::Span::styled(thread_str.clone(),Style::default().fg(Color::DarkGray)); diff --git a/src/types/proxy_state.rs b/src/types/proxy_state.rs index 9073a1c..6baa457 100644 --- a/src/types/proxy_state.rs +++ b/src/types/proxy_state.rs @@ -5,7 +5,7 @@ use crate::tcp_proxy::ReverseTcpProxyTarget; #[derive(Debug)] pub (crate) struct ProxyStats { pub (crate) received_tcp_connections : usize, - pub (crate) active_connections : HashMap, + pub (crate) active_connections : HashMap } From e7fc773923d9b5e3002c3a8f600e295b45f90397 Mon Sep 17 00:00:00 2001 From: Olof Date: Mon, 19 Aug 2024 12:49:03 +0000 Subject: [PATCH 02/19] support wildcard for start/stop endpoints --- src/api/controllers/sites.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/api/controllers/sites.rs b/src/api/controllers/sites.rs index 3d0da91..dd9012d 100644 --- a/src/api/controllers/sites.rs +++ b/src/api/controllers/sites.rs @@ -311,16 +311,20 @@ pub (crate) async fn stop_handler( Query(query): Query ) -> axum::response::Result { + let signal = if query.hostname == "*" { + crate::http_proxy::ProcMessage::StartAll + } else { + crate::http_proxy::ProcMessage::Start(query.hostname) + }; + // todo - check if site exists and if its already stopped? - global_state.2.send(crate::http_proxy::ProcMessage::Stop(query.hostname)).map_err(|e| + global_state.2.send(signal).map_err(|e| SitesError::UnknownError(format!("{e:?}")) )?; Ok(()) } - - #[derive(Deserialize,IntoParams)] #[into_params( @@ -348,8 +352,14 @@ pub (crate) async fn start_handler( Query(query): Query ) -> axum::response::Result { + let signal = if query.hostname == "*" { + crate::http_proxy::ProcMessage::StartAll + } else { + crate::http_proxy::ProcMessage::Start(query.hostname) + }; + // todo - check if site exists and if its already started? - global_state.2.send(crate::http_proxy::ProcMessage::Start(query.hostname)).map_err(|e| + global_state.2.send(signal).map_err(|e| SitesError::UnknownError(format!("{e:?}")) )?; Ok(()) From 0e9a8dc75438a53229f69d8f4ce1ff9ea57605c0 Mon Sep 17 00:00:00 2001 From: Olof Date: Fri, 30 Aug 2024 18:20:39 +0000 Subject: [PATCH 03/19] checkpoint --- CaddyTest | 9 + Cargo.lock | 528 ++++++-- Cargo.toml | 37 +- README.md | 13 +- odd-box-example-config-minimal.toml | 7 +- odd-box-example-config.toml | 111 +- odd-box.toml | 50 + src/api/controllers/mod.rs | 4 +- src/api/controllers/settings.rs | 63 +- src/api/controllers/sites.rs | 59 +- src/api/mod.rs | 10 +- src/configuration/legacy.rs | 83 +- src/configuration/mod.rs | 553 ++++++-- src/configuration/v1.rs | 396 ++---- src/configuration/v2.rs | 895 +++++++------ src/http_proxy/mod.rs | 23 +- src/http_proxy/service.rs | 230 +++- src/http_proxy/utils.rs | 328 ++--- src/http_proxy/websockets.rs | 48 +- src/logging.rs | 15 +- src/main.rs | 338 ++--- src/proc_host.rs | 253 ++-- src/proxy.rs | 173 ++- src/tcp_proxy/http1.rs | 86 +- src/tcp_proxy/http2.rs | 4 +- src/tcp_proxy/mod.rs | 2 +- src/tcp_proxy/tcp.rs | 296 ++--- src/tcp_proxy/tls/client_hello.rs | 6 +- src/tcp_proxy/tls/extension.rs | 6 +- src/tcp_proxy/tls/mod.rs | 4 +- src/tests/configuration.rs | 173 +++ src/tests/main.rs | 4 + src/tests/mod.rs | 2 + src/tui/connections_widget.rs | 134 ++ src/tui/logs_widget.rs | 156 +++ src/{tui.rs => tui/mod.rs} | 1844 ++++++++++++--------------- src/tui/scroll_state_wrapper.rs | 105 ++ src/tui/stats_widget.rs | 30 + src/tui/threads_widget.rs | 129 ++ src/types/app_state.rs | 241 +--- src/types/mod.rs | 9 +- src/types/proxy_state.rs | 29 +- src/types/tui_state.rs | 118 ++ 43 files changed, 4492 insertions(+), 3112 deletions(-) create mode 100644 CaddyTest mode change 100644 => 100755 Cargo.toml create mode 100644 odd-box.toml create mode 100644 src/tests/configuration.rs create mode 100644 src/tests/main.rs create mode 100644 src/tests/mod.rs create mode 100644 src/tui/connections_widget.rs create mode 100644 src/tui/logs_widget.rs rename src/{tui.rs => tui/mod.rs} (52%) create mode 100644 src/tui/scroll_state_wrapper.rs create mode 100644 src/tui/stats_widget.rs create mode 100644 src/tui/threads_widget.rs create mode 100644 src/types/tui_state.rs diff --git a/CaddyTest b/CaddyTest new file mode 100644 index 0000000..7e563a2 --- /dev/null +++ b/CaddyTest @@ -0,0 +1,9 @@ +{ + log { + level DEBUG + output stdout + } +} +:9999 { + respond "pong" +} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 18a42f6..14d4247 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -216,7 +216,7 @@ dependencies = [ "futures-lite 2.3.0", "parking", "polling 3.6.0", - "rustix 0.38.32", + "rustix 0.38.34", "slab", "tracing", "windows-sys 0.52.0", @@ -255,7 +255,7 @@ dependencies = [ "cfg-if", "event-listener 3.1.0", "futures-lite 1.13.0", - "rustix 0.38.32", + "rustix 0.38.34", "windows-sys 0.48.0", ] @@ -282,7 +282,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.32", + "rustix 0.38.34", "signal-hook-registry", "slab", "windows-sys 0.48.0", @@ -296,9 +296,9 @@ checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" [[package]] name = "async-trait" -version = "0.1.79" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", @@ -317,6 +317,33 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "aws-lc-rs" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ae74d9bd0a7530e8afd1770739ad34b36838829d6ad61818f9230f683f5ad77" +dependencies = [ + "aws-lc-sys", + "mirai-annotations", + "paste", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", + "libc", + "paste", +] + [[package]] name = "axum" version = "0.7.5" @@ -329,9 +356,9 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", - "hyper 1.2.0", + "hyper 1.4.1", "hyper-util", "itoa", "matchit", @@ -347,7 +374,7 @@ dependencies = [ "sha1", "sync_wrapper 1.0.1", "tokio", - "tokio-tungstenite", + "tokio-tungstenite 0.21.0", "tower", "tower-layer", "tower-service", @@ -364,7 +391,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -387,7 +414,7 @@ dependencies = [ "futures-util", "headers", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -425,6 +452,29 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +[[package]] +name = "bindgen" +version = "0.69.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +dependencies = [ + "bitflags 2.5.0", + "cexpr", + "clang-sys", + "itertools", + "lazy_static", + "lazycell", + "log 0.4.21", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.55", + "which", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -476,9 +526,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cassowary" @@ -500,6 +550,19 @@ name = "cc" version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" +dependencies = [ + "jobserver", + "libc", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] [[package]] name = "cfg-if" @@ -507,6 +570,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chrono" version = "0.4.35" @@ -521,6 +590,17 @@ dependencies = [ "windows-targets 0.52.4", ] +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "clap" version = "4.5.3" @@ -561,6 +641,15 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +[[package]] +name = "cmake" +version = "0.1.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" +dependencies = [ + "cc", +] + [[package]] name = "colorchoice" version = "1.0.0" @@ -651,13 +740,29 @@ dependencies = [ "bitflags 2.5.0", "crossterm_winapi", "libc", - "mio", + "mio 0.8.11", "parking_lot", "signal-hook", "signal-hook-mio", "winapi", ] +[[package]] +name = "crossterm" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" +dependencies = [ + "bitflags 2.5.0", + "crossterm_winapi", + "mio 1.0.2", + "parking_lot", + "rustix 0.38.34", + "signal-hook", + "signal-hook-mio", + "winapi", +] + [[package]] name = "crossterm_winapi" version = "0.9.1" @@ -677,6 +782,16 @@ dependencies = [ "typenum", ] +[[package]] +name = "ctrlc" +version = "3.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "672465ae37dc1bc6380a6547a8883d5dd397b0f1faaad4f265726cc7042a5345" +dependencies = [ + "nix 0.28.0", + "windows-sys 0.52.0", +] + [[package]] name = "dark-light" version = "1.0.0" @@ -694,6 +809,20 @@ dependencies = [ "zvariant", ] +[[package]] +name = "dashmap" +version = "6.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "804c8821570c3f8b70230c2ba75ffa5c0f9a4189b9a432b6656c536712acae28" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.3", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "data-encoding" version = "2.5.0" @@ -811,6 +940,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "either" version = "1.10.0" @@ -1007,6 +1142,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "futures-channel" version = "0.3.30" @@ -1124,6 +1265,12 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + [[package]] name = "h2" version = "0.3.25" @@ -1272,6 +1419,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "hpack" version = "0.3.0" @@ -1316,9 +1472,9 @@ dependencies = [ [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http 1.1.0", @@ -1333,7 +1489,7 @@ dependencies = [ "bytes", "futures-core", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -1381,16 +1537,16 @@ dependencies = [ [[package]] name = "hyper" -version = "1.2.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ "bytes", "futures-channel", "futures-util", "h2 0.4.3", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "httparse", "httpdate", "itoa", @@ -1433,17 +1589,34 @@ checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.2.0", + "hyper 1.4.1", "hyper-util", - "log 0.4.21", "rustls 0.22.2", - "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls 0.25.0", "tower-service", ] +[[package]] +name = "hyper-rustls" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +dependencies = [ + "futures-util", + "http 1.1.0", + "hyper 1.4.1", + "hyper-util", + "log 0.4.21", + "rustls 0.23.12", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.0", + "tower-service", +] + [[package]] name = "hyper-tls" version = "0.6.0" @@ -1452,7 +1625,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.2.0", + "hyper 1.4.1", "hyper-util", "native-tls", "tokio", @@ -1474,31 +1647,31 @@ dependencies = [ [[package]] name = "hyper-tungstenite" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a343d17fe7885302ed7252767dc7bb83609a874b6ff581142241ec4b73957ad" +checksum = "69ce21dae6ce6e5f336a444d846e592faf42c5c28f70a5c8ff67893cbcb304d3" dependencies = [ "http-body-util", - "hyper 1.2.0", + "hyper 1.4.1", "hyper-util", "pin-project-lite", "tokio", - "tokio-tungstenite", - "tungstenite", + "tokio-tungstenite 0.23.1", + "tungstenite 0.23.0", ] [[package]] name = "hyper-util" -version = "0.1.3" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.0", - "hyper 1.2.0", + "http-body 1.0.1", + "hyper 1.4.1", "pin-project-lite", "socket2 0.5.6", "tokio", @@ -1632,6 +1805,15 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +[[package]] +name = "jobserver" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.69" @@ -1647,12 +1829,28 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + [[package]] name = "libc" version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +[[package]] +name = "libloading" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +dependencies = [ + "cfg-if", + "windows-targets 0.52.4", +] + [[package]] name = "libredox" version = "0.0.1" @@ -1757,9 +1955,9 @@ checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memoffset" @@ -1795,6 +1993,12 @@ dependencies = [ "unicase", ] +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.7.2" @@ -1816,6 +2020,25 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "mio" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +dependencies = [ + "hermit-abi", + "libc", + "log 0.4.21", + "wasi", + "windows-sys 0.52.0", +] + +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + [[package]] name = "native-tls" version = "0.2.11" @@ -1846,6 +2069,28 @@ dependencies = [ "memoffset 0.7.1", ] +[[package]] +name = "nix" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +dependencies = [ + "bitflags 2.5.0", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -1871,16 +2116,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - [[package]] name = "num_enum" version = "0.7.3" @@ -1938,27 +2173,30 @@ dependencies = [ "bytes", "chrono", "clap", - "crossterm", + "crossterm 0.28.1", + "ctrlc", "dark-light", + "dashmap", "dirs 5.0.1", "futures-util", "h2 0.4.3", "hpack", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", - "hyper 1.2.0", + "hyper 1.4.1", "hyper-hickory", - "hyper-rustls 0.26.0", + "hyper-rustls 0.27.2", "hyper-tls", "hyper-trust-dns", "hyper-tungstenite", "hyper-util", "lazy_static", + "memchr", "ratatui", "rcgen", "regex", "reqwest", - "rustls 0.22.2", + "rustls 0.23.12", "rustls-pemfile 2.1.1", "self_update", "serde", @@ -1967,16 +2205,16 @@ dependencies = [ "socket2 0.5.6", "time", "tokio", - "tokio-rustls 0.25.0", + "tokio-rustls 0.26.0", "tokio-stream", - "tokio-tungstenite", + "tokio-tungstenite 0.23.1", "tokio-util", "toml", "toml_edit 0.22.9", "tower-http", "tracing", "tracing-subscriber", - "tungstenite", + "tungstenite 0.24.0", "unicase", "url", "utoipa", @@ -2197,7 +2435,7 @@ dependencies = [ "concurrent-queue", "hermit-abi", "pin-project-lite", - "rustix 0.38.32", + "rustix 0.38.34", "tracing", "windows-sys 0.52.0", ] @@ -2220,6 +2458,16 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "prettyplease" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7" +dependencies = [ + "proc-macro2", + "syn 2.0.55", +] + [[package]] name = "proc-macro-crate" version = "1.3.1" @@ -2320,7 +2568,7 @@ dependencies = [ "bitflags 2.5.0", "cassowary", "compact_str", - "crossterm", + "crossterm 0.27.0", "indoc", "itertools", "lru", @@ -2422,9 +2670,9 @@ dependencies = [ "futures-util", "h2 0.4.3", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", - "hyper 1.2.0", + "hyper 1.4.1", "hyper-rustls 0.26.0", "hyper-tls", "hyper-util", @@ -2536,6 +2784,12 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + [[package]] name = "rustix" version = "0.37.27" @@ -2552,9 +2806,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.32" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags 2.5.0", "errno", @@ -2589,6 +2843,21 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls" +version = "0.23.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +dependencies = [ + "aws-lc-rs", + "log 0.4.21", + "once_cell", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + [[package]] name = "rustls-native-certs" version = "0.7.0" @@ -2623,16 +2892,17 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.5.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beb461507cee2c2ff151784c52762cf4d9ff6a61f3e80968600ed24fa837fa54" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" dependencies = [ + "aws-lc-rs", "ring 0.17.8", "rustls-pki-types", "untrusted 0.9.0", @@ -2724,7 +2994,7 @@ version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e4997484b55df069a4773d822715695b2cc27b23829eca2a4b41690e948bdeb" dependencies = [ - "hyper 1.2.0", + "hyper 1.4.1", "indicatif", "log 0.4.21", "quick-xml", @@ -2860,6 +3130,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signal-hook" version = "0.3.17" @@ -2872,12 +3148,13 @@ dependencies = [ [[package]] name = "signal-hook-mio" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af" +checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" dependencies = [ "libc", - "mio", + "mio 0.8.11", + "mio 1.0.2", "signal-hook", ] @@ -3050,7 +3327,7 @@ checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand 2.0.2", - "rustix 0.38.32", + "rustix 0.38.34", "windows-sys 0.52.0", ] @@ -3132,28 +3409,27 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.39.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" dependencies = [ "backtrace", "bytes", "libc", - "mio", - "num_cpus", + "mio 1.0.2", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2 0.5.6", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", @@ -3192,6 +3468,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +dependencies = [ + "rustls 0.23.12", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.15" @@ -3211,25 +3498,36 @@ checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" dependencies = [ "futures-util", "log 0.4.21", - "rustls 0.22.2", + "tokio", + "tungstenite 0.21.0", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" +dependencies = [ + "futures-util", + "log 0.4.21", + "rustls 0.23.12", "rustls-pki-types", "tokio", - "tokio-rustls 0.25.0", - "tungstenite", + "tokio-rustls 0.26.0", + "tungstenite 0.23.0", ] [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] @@ -3303,7 +3601,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "http-range-header", "httpdate", @@ -3467,14 +3765,50 @@ dependencies = [ "httparse", "log 0.4.21", "rand", - "rustls 0.22.2", - "rustls-pki-types", "sha1", "thiserror", "url", "utf-8", ] +[[package]] +name = "tungstenite" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.1.0", + "httparse", + "log 0.4.21", + "rand", + "rustls 0.23.12", + "rustls-pki-types", + "sha1", + "thiserror", + "utf-8", +] + +[[package]] +name = "tungstenite" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18e5b8366ee7a95b16d32197d0b2604b43a0be89dc5fac9f8e96ccafbaedda8a" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.1.0", + "httparse", + "log 0.4.21", + "rand", + "sha1", + "thiserror", + "utf-8", +] + [[package]] name = "typenum" version = "1.17.0" @@ -3848,6 +4182,18 @@ dependencies = [ "rustls-pki-types", ] +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.34", +] + [[package]] name = "winapi" version = "0.3.9" @@ -4110,7 +4456,7 @@ dependencies = [ "futures-sink", "futures-util", "hex", - "nix", + "nix 0.26.4", "once_cell", "ordered-stream", "rand", @@ -4177,6 +4523,20 @@ name = "zeroize" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.55", +] [[package]] name = "zip" diff --git a/Cargo.toml b/Cargo.toml old mode 100644 new mode 100755 index f189849..45088c3 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,13 +9,13 @@ license-file = "LICENSE" [dependencies] dirs = "5.0.1" -futures-util = "0.3.28" -hyper = { version = "1.1.0" , features=["http2","client","server"] } -hyper-util = { version = "0.1.1", features = ["full"] } +futures-util = "0.3.30" +hyper = { version = "1.4.1" , features=["http2","client","server"] } +hyper-util = { version = "0.1.7", features = ["full"] } regex = "1.9.5" serde = { version = "1.0.88", features = ["derive"] } -tokio = { version = "1.32.0", features = ["full"] } -tokio-tungstenite = { version="0.21.0" , features = ["__rustls-tls"] } +tokio = { version = "1.39.3", features = ["full"] } +tokio-tungstenite = { version="0.23.1" , features = ["__rustls-tls"] } toml = "0.8.4" tracing = "0.1.37" tracing-subscriber = { version="0.3.18", features=[ "env-filter","std","fmt","time"] } @@ -32,37 +32,37 @@ lazy_static = "1.4.0" unicase = "2.7.0" hyper-tls = "0.6.0" clap = { version="4.4.7", features=["derive"]} -rustls = "0.22.2" -tokio-rustls = "0.25.0" +rustls = "0.23.12" +tokio-rustls = "0.26.0" rustls-pemfile = "2.0.0" rcgen = "0.13.1" socket2 = "0.5.5" -hyper-tungstenite = "0.13.0" +hyper-tungstenite = "0.14.0" ratatui = "0.26.2" #ratatui = { git = "https://github.com/ratatui-org/ratatui" } -crossterm = { version = "0.27.0" } +crossterm = { version = "0.28.1" } chrono = "0.4.31" time = {version="0.3.30",features=["macros","formatting","parsing"]} reqwest = { version = "0.12.4", features = ["json"] } serde_json = "1.0.111" self_update = "0.40.0" -bytes = "1.5.0" +bytes = "1.7.1" http-body-util = "0.1.0" #active-win-pos-rs = "0.8.3" h2 = "0.4.2" -hyper-rustls = { version = "0.26.0", features = ["http2"] } +hyper-rustls = { version = "0.27.2", features = ["http2"] } hyper-hickory = "0.7.0" -http-body = "1.0.0" -tokio-stream = "0.1.14" - +http-body = "1.0.1" +tokio-stream = "0.1.15" +ctrlc = "3.2" hpack = { version = "0.3.0" } webpki = { version = "0.22.4" } anyhow = "1.0.79" uuid = { version = "1.7.0", features = ["v4"] } -tungstenite = "0.21.0" -tokio-util = "0.7.10" +tungstenite = "0.24.0" +tokio-util = "0.7.11" toml_edit = "0.22.6" serde_yaml = "0.9.32" ahash = "0.8.7" @@ -77,8 +77,10 @@ utoipa-rapidoc = { version = "4.0.0", features = ["axum"] } utoipa-redoc = { version = "4.0.0", features = ["axum"] } utoipauto = "0.1.10" tower-http = { version = "0.5.2" , features = ["fs","cors","trace"]} -async-trait = "0.1.79" +async-trait = "0.1.81" axum-extra = { version = "0.9.3", features = ["typed-header"] } +memchr = "2.7.4" +dashmap = "6.0.1" # =============================================================== [target.'cfg(windows)'.dependencies] @@ -86,7 +88,6 @@ windows = { version = "0.52.0", features = ["Win32","Win32_Foundation","Win32_Sy - [profile.release] opt-level = 'z' # Optimize for size #lto = true # Enable link-time optimization diff --git a/README.md b/README.md index 550864f..367e70b 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ As configuration is done thru basic files (toml format) which are easy to share, ### Performance -While the goal of this project **is not** to provide a state-of-the-art level performing proxy server for production environments, but rather a tool for simplifying local development scenarios, we do try to keep performance in mind.. Some rudimentary testing on this authors development machine shows that TCP tunnel mode supports 100k+ requests per second while the intercepting proxy mode handles 20k+ requests per second in most cases. More specific measurements of different scenarios will be added here at some point. +While the goal of this project **is not** to provide a state-of-the-art level performing proxy server for production environments, but rather a tool for simplifying local development scenarios, we do try to keep performance in mind.. Some rudimentary testing on this authors development machine shows that TCP tunnel mode supports at least 200k requests per second while the intercepting proxy mode handles at least 100k requests per second. More specific measurements of different scenarios will be added here at some point. ### Terminal User Interface @@ -51,16 +51,17 @@ See the odd-box-example-config.toml file in this repository for details around h ### Configuration Variables -| Variable | Description | -|------------|----------------------------------| -| $root_dir | Resolves to whatever you set it to in the global configuration section. | -| $cfg_dir | Resolves to the directory which the configuration toml file was read from. | +| Variable | Description | +|-------------|----------------------------------| +| $root_dir | Resolves to whatever you set it to in the global configuration section. | +| $cfg_dir | Resolves to the directory which the configuration toml file was read from. | +| $port | Resolves to whatever port has been specified in the configuration. Only used for hosted processes. | ### Workflow tips If you are hosting a local project that you are currently working on, and want to do a rebuild without having to manually start and stop your site - you may want to consider having a pre-build step that does it for you: -You can enable or disable all sites or specific ones using the http://localhost:port/START and http://localhost:port/STOP endpoints, optionally using query parameter "?proc=my_site" to stop or start a specific site. Sites start automatically again on the next request). The same can be acomplished thru the admin-api if you enable it. +You can enable or disable all sites or specific ones using the http://localhost:port/START and http://localhost:port/STOP endpoints, optionally using query parameter "?proc=my_site" to stop or start a specific site. Sites start automatically again on the next request. The same can be acomplished thru the admin-api if you enable it. ### DNS diff --git a/odd-box-example-config-minimal.toml b/odd-box-example-config-minimal.toml index cb15965..ffba15b 100644 --- a/odd-box-example-config-minimal.toml +++ b/odd-box-example-config-minimal.toml @@ -1,4 +1,4 @@ -version = "V1" +version = "V2" root_dir = "~" log_level = "info" port_range_start = 4200 @@ -6,7 +6,10 @@ env_vars = [] [[remote_target]] host_name = "lobsters.localtest.me" -target_hostname = "lobste.rs" +backends = [ + { address = "lobste.rs", port = 443, https = true } , + { address = "lobsters.dev", port = 443, https = true } +] [[hosted_process]] host_name = "py.localtest.me" diff --git a/odd-box-example-config.toml b/odd-box-example-config.toml index b15ffa7..bb6c933 100644 --- a/odd-box-example-config.toml +++ b/odd-box-example-config.toml @@ -1,21 +1,22 @@ -version = "V2" # this is the configuration format version, dont change it -root_dir = "~" # you can use $root_dir in env_var values and dir paths etc. -log_level = "info" # trace,info,debug,info,warn,error -alpn = false # optional - allows alpn negotiation for http/1.0 and h2 on tls connections -admin_api_port = 1234 # optional - leave out to disable the admin api -port_range_start = 4200 # port range for automatic port assignment (the env var PORT will be set if you did not specify one manually for a process) -default_log_format = "standard" # standard | dotnet -ip = "127.0.0.1" # ip for proxy to listen to , can be ipv4/6 -http_port = 8080 # optional, 8080 by default -tls_port = 4343 # optional, 4343 by default +version = "V2" # this is the configuration format version, dont change it +alpn = false # optional - allows alpn negotiation for http/1.0 and h2 on tls connections +http_port = 8080 # optional, 8080 by default +admin_api_port = 1234 # optional - leave out to disable the admin api +ip = "127.0.0.1" # ip for proxy to listen to , can be ipv4/6 +tls_port = 4343# optional, 4343 by default auto_start = false # optional, defaults to true - used as default value for configured sites. auto_start false means a site will not start automatically with odd-box, # but it will still be automatically started on incoming requests to that site. -env_vars = [ - # these are global environment variables - they will be set for all hosted processes - { key = "GRPC_TRACE" , value = "http,http1,http_keepalive,http2_stream_state" }, - { key = "GRPC_VERBOSITY" , value = "DEBUG" }, +root_dir = "~" # you can use $root_dir in env_var values and dir paths etc. +log_level = "warn" # trace,info,debug,info,warn,error +port_range_start = 4200 # port range for automatic port assignment (the env var PORT will be set if you did not specify one manually for a process) +default_log_format = "standard" +env_vars = [ + # these are global environment variables - they will be set for all hosted processes + { key = "GRPC_TRACE", value = "http,http1,http_keepalive,http2_stream_state" }, + { key = "GRPC_VERBOSITY", value = "DEBUG" }, ] + [[remote_target]] # remote targets are those that odd-box is not responsible for running host_name = "lobsters.localtest.me" # incoming name for binding to (frontend) capture_subdomains = false # optional, false by default: allows capturing wildcard requests such as test.lobsters.local @@ -24,43 +25,57 @@ forward_subdomains = false # optional, false by default: if the request is for s disable_tcp_tunnel_mode = false # optional, false by default # optional, false by default: must be true if the target uses tls # optional - 80 by default if https is false, 443 by default if https is true -# h2_hint = "H2"/"H2C" - optional: used to signal use of prior knowledge http2 or http2 over clear text. -backends = [ - # list of backends. uses round-robin load balancing - { address = "lobste.rs", port = 443, https = true } , - { address = "lobsters.dev", port = 443, https = true } +#hints = ["H2","H2C","H2CPK"] # - optional: used to decide which protocol to use for the target +backends = [ + { https = true, address="lobste.rs", port=443 }, + { https = true, address="lobsters.dev", port=443 } +] + +[[hosted_process]] # hosted processes are those that odd-box is responsible for running +host_name = "python.localtest.me" # incoming name for binding to (frontend) +dir = "$cfg_dir" # path where we should be located when running the binary file +bin = "/usr/bin/python3" # name or path of the binary that should be executed +args = [ + # any arguments you wish to pass to the binary + "-m", + "http.server", + "$port" ] -[[hosted_process]] # hosted processes are ones that odd-box will keep running -disable_tcp_tunnel_mode = false # optional, false by default -host_name = "some_host.local" # incoming name for binding to (frontend) -dir = "$root_dir" # path where we should be located when running the binary file -bin = "my_bin" # name or path of the binary that should be executed -args = ["--test"] # any arguments you wish to pass to the binary -log_format = "standard" # standard | dotnet auto_start = false # optional, uses global auto_start by default. set to false to prevent the process from starting when launching odd-box -port = 443 # optional, defaults to 443 for https configurations and 80 otherwise -https = true # must be set to https if the target expects tls connections -capture_subdomains = false # optional, false by default: allows capturing wildcard requests such as test.lobsters.local -forward_subdomains = false # optional, false by default: if the request is for subdomain.configureddomain.local with target example.com, - # this option would cause the proxied request go to subdomain.example.com instead of example.com. +https = false # must be set to https if the target expects tls connections env_vars = [ - # environment variables specific to this process - { key = "logserver", value = "http://www.example.com" }, - { key = "host", value = "odd-box" } + # environment variables specific to this process + # { key = "logserver", value = "http://www.example.com" }, + # { key = "host", value = "odd-box" }, ] -disabled = true # optional, false by default: set to true to disable the process from running when odd-box is started and when using the start-all commands. you can still manually start it in the tui. -# h2_hint = "H2"/"H2C" - optional: used to signal use of prior knowledge http2 or http2 over clear text. -[[hosted_process]] -host_name = "py.localtest.me" -dir = "$cfg_dir" -bin = "/usr/bin/python3" -args = ["-m", "http.server", "8012"] -auto_start = false -https = false -port = 8012 -capture_subdomains = false -env_vars = [ - { key = "PORT", value = "8012" }, -] \ No newline at end of file +# [[hosted_process]] +# host_name = "some_host.local" +# dir = "$root_dir" +# bin = "my_bin" +# args = [ # any arguments you wish to pass to the binary +# "--test" +# ] +# auto_start = false # optional, uses global auto_start by default. set to false to prevent the process from starting when launching odd-box +# https = true # must be set to https if the target expects tls connections +# port = 443 # optional, defaults to 443 for https configurations and 80 otherwise +# env_vars = [ +# # environment variables specific to this process +# { key = "logserver", value = "http://www.example.com" }, +# { key = "host", value = "odd-box" }, +# ] + +# [[hosted_process]] +# host_name = "caddy.localtest.me" +# disable_tcp_tunnel_mode = true +# port = 9999 +# bin = "/nix/store/aq5r61lmr9six0lyi6xikxwvnyp16dfy-user-environment/bin/caddy" +# args = [ +# "run", +# "--config", +# "./CaddyTest", +# "--adapter", +# "caddyfile" +# ] + diff --git a/odd-box.toml b/odd-box.toml new file mode 100644 index 0000000..76113e5 --- /dev/null +++ b/odd-box.toml @@ -0,0 +1,50 @@ +version = "V2" +alpn = false +http_port = 8080 +admin_api_port = 1234 +ip = "127.0.0.1" +tls_port = 4343 +auto_start = false +root_dir = "~" +log_level = "trace" +port_range_start = 4200 +default_log_format = "standard" +env_vars = [ + { key = "GRPC_TRACE", value = "http,http1,http_keepalive,http2_stream_state" }, + { key = "GRPC_VERBOSITY", value = "DEBUG" }, +] + +[[remote_target]] +host_name = "caddy-lb-terminated.localtest.me" +disable_tcp_tunnel_mode = true +backends = [ + { https = false, address="127.0.0.1", port=9999 }, + { https = false, address="127.0.0.1", port=9999 } + +] + +[[hosted_process]] +host_name = "caddy-proc-terminated.localtest.me" +disable_tcp_tunnel_mode = true +auto_start = true +port = 9999 +dir = "$cfg_dir" +bin = "/nix/store/aq5r61lmr9six0lyi6xikxwvnyp16dfy-user-environment/bin/caddy" +args = [ + "run", + "--config", + "./CaddyTest", + "--adapter", + "caddyfile" +] + + + +[[remote_target]] +host_name = "lobsters.localtest.me" +disable_tcp_tunnel_mode = true +backends = [ + { https = true, address="lobste.rs", port=443 }, + # { https = true, address="lobsters.dev", port=443 }, + { https = false, address="nnerastoundinglushmorning.neverssl.com", port=80 } +] diff --git a/src/api/controllers/mod.rs b/src/api/controllers/mod.rs index 0e765b2..17c5df8 100644 --- a/src/api/controllers/mod.rs +++ b/src/api/controllers/mod.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use axum::{http::StatusCode, response::{IntoResponse, Response}, Json, Router}; use serde::{Deserialize, Serialize}; use crate::global_state::GlobalState; @@ -5,7 +7,7 @@ use crate::global_state::GlobalState; pub mod sites; pub mod settings; -pub (crate) async fn routes(state:GlobalState) -> Router { +pub async fn routes(state:Arc) -> Router { let sites = Router::new() .route("/sites", axum::routing::post(sites::update_handler)).with_state(state.clone()) diff --git a/src/api/controllers/settings.rs b/src/api/controllers/settings.rs index e7e9c24..9c5594a 100644 --- a/src/api/controllers/settings.rs +++ b/src/api/controllers/settings.rs @@ -1,7 +1,7 @@ use super::*; use utoipa::ToSchema; - +use crate::configuration::OddBoxConfiguration; #[derive(Debug,Serialize,Deserialize,Clone,ToSchema)] @@ -67,34 +67,34 @@ pub struct KvP { #[derive(Debug, Clone, Serialize, Deserialize,ToSchema)] pub struct OddBoxConfigGlobalPart { #[schema(value_type = String)] - pub (crate) root_dir : String, - pub (crate) log_level : BasicLogLevel, - pub (crate) alpn : bool, - pub (crate) port_range_start : u16, - pub (crate) default_log_format : BasicLogFormat, - pub (crate) ip : String, - pub (crate) http_port : u16, - pub (crate) tls_port : u16, - pub (crate) auto_start : bool, - pub (crate) env_vars : Vec, - pub (crate) admin_api_port : u16, - pub (crate) path : String + pub root_dir : String, + pub log_level : BasicLogLevel, + pub alpn : bool, + pub port_range_start : u16, + pub default_log_format : BasicLogFormat, + pub ip : String, + pub http_port : u16, + pub tls_port : u16, + pub auto_start : bool, + pub env_vars : Vec, + pub admin_api_port : u16, + pub path : String } #[derive(Debug, Clone, Serialize, Deserialize,ToSchema)] pub struct SaveGlobalConfig{ #[schema(value_type = String)] - pub (crate) root_dir : String, - pub (crate) log_level : BasicLogLevel, - pub (crate) alpn : bool, - pub (crate) port_range_start : u16, - pub (crate) default_log_format : BasicLogFormat, - pub (crate) ip : String, - pub (crate) http_port : u16, - pub (crate) tls_port : u16, - pub (crate) auto_start : bool, - pub (crate) env_vars : Vec, - pub (crate) admin_api_port : u16 + pub root_dir : String, + pub log_level : BasicLogLevel, + pub alpn : bool, + pub port_range_start : u16, + pub default_log_format : BasicLogFormat, + pub ip : String, + pub http_port : u16, + pub tls_port : u16, + pub auto_start : bool, + pub env_vars : Vec, + pub admin_api_port : u16 } /// Get global settings @@ -108,10 +108,10 @@ pub struct SaveGlobalConfig{ (status = 500, description = "When something goes wrong", body = String), ) )] -pub (crate) async fn get_settings_handler( - axum::extract::State(global_state): axum::extract::State, +pub async fn get_settings_handler( + axum::extract::State(global_state): axum::extract::State>, ) -> axum::response::Result { - let guard = global_state.1.read().await; + let guard = global_state.config.read().await; let cfg = OddBoxConfigGlobalPart { admin_api_port : guard.admin_api_port.unwrap_or(6789), @@ -151,14 +151,13 @@ pub (crate) async fn get_settings_handler( (status = 500, description = "When something goes wrong", body = String), ) )] -pub (crate) async fn set_settings_handler( - axum::extract::State(global_state): axum::extract::State, +pub async fn set_settings_handler( + axum::extract::State(global_state): axum::extract::State>, Json(new_settings): Json ) -> axum::response::Result { - let mut guard = global_state.1.write().await; + let mut guard = global_state.config.write().await; - guard.admin_api_port = Some(new_settings.admin_api_port); guard.http_port = Some(new_settings.http_port); guard.tls_port = Some(new_settings.tls_port); @@ -183,7 +182,7 @@ pub (crate) async fn set_settings_handler( guard.root_dir = Some(new_settings.root_dir.clone()); - guard.save().map_err(|e|(StatusCode::BAD_REQUEST,format!("{}",e.to_string())))?; + guard.write_to_disk().map_err(|e|(StatusCode::BAD_REQUEST,format!("{}",e.to_string())))?; tracing::debug!("Global settings updated thru api"); diff --git a/src/api/controllers/sites.rs b/src/api/controllers/sites.rs index dd9012d..74099a8 100644 --- a/src/api/controllers/sites.rs +++ b/src/api/controllers/sites.rs @@ -1,11 +1,13 @@ -use crate::configuration::v2::{InProcessSiteConfig, RemoteSiteConfig}; +use std::sync::Arc; +use crate::configuration::v2::{InProcessSiteConfig, RemoteSiteConfig}; +use crate::configuration::OddBoxConfiguration; use super::*; use axum::extract::{Query, State}; use utoipa::{IntoParams, ToSchema}; #[derive(Serialize,ToSchema)] -pub (crate) enum SitesError { +pub enum SitesError { UnknownError(String) } @@ -22,18 +24,18 @@ impl IntoResponse for SitesError { #[derive(ToSchema,Serialize)] -pub (crate) enum ConfigurationItem { +pub enum ConfigurationItem { HostedProcess(InProcessSiteConfig), RemoteSite(RemoteSiteConfig) } #[derive(ToSchema,Serialize)] -pub (crate) struct ListResponse { +pub struct ListResponse { pub items : Vec } #[derive(ToSchema,Serialize)] -pub (crate) struct StatusResponse { +pub struct StatusResponse { pub items : Vec } @@ -76,9 +78,9 @@ pub struct StatusItem { (status = 500, description = "When something goes wrong", body = String), ) )] -pub (crate) async fn list_handler(state: axum::extract::State) -> axum::response::Result { +pub async fn list_handler(state: axum::extract::State>) -> axum::response::Result { - let cfg_guard = state.0.1.read().await; + let cfg_guard = state.config.read().await; let procs = cfg_guard.hosted_process.clone().unwrap_or_default(); let rems = cfg_guard.remote_target.clone().unwrap_or_default(); @@ -101,15 +103,14 @@ pub (crate) async fn list_handler(state: axum::extract::State) -> a (status = 500, description = "When something goes wrong", body = String), ) )] -pub (crate) async fn status_handler(state: axum::extract::State) -> axum::response::Result { - - let cfg_guard = state.0.0.read().await; +pub async fn status_handler(state: axum::extract::State>) -> axum::response::Result { Ok(Json(StatusResponse { - items: cfg_guard.site_states_map.clone().into_iter().map(|(site,state)|{ + items: state.app_state.site_status_map.iter().map(|guard|{ + let (site,state) = guard.pair(); StatusItem { - hostname: site, - state: state.into() + hostname: site.clone(), + state: state.clone().into() } }).collect() })) @@ -157,9 +158,9 @@ pub struct UpdateQuery { (status = 500, description = "When something goes wrong", body = String), ) )] -pub (crate) async fn update_handler(State(state): axum::extract::State,Query(query): Query, body: Json) -> axum::response::Result { +pub async fn update_handler(State(state): axum::extract::State>,Query(query): Query, body: Json) -> axum::response::Result { - let mut conf_guard = state.1.write().await; + let mut conf_guard = state.config.write().await; match &body.new_configuration { ConfigItem::RemoteSite(new_cfg) => { @@ -215,14 +216,14 @@ pub struct DeleteQueryParams { (status = 500, description = "When something goes wrong", body = String), ) )] -pub (crate) async fn delete_handler( - axum::extract::State(global_state): axum::extract::State, - Query(query): Query +pub async fn delete_handler( + axum::extract::State(global_state): axum::extract::State>, + Query(query): Query, ) -> axum::response::Result { - let mut conf_guard = global_state.1.write().await; + let mut conf_guard = global_state.config.write().await; let mut deleted = false; @@ -250,15 +251,16 @@ pub (crate) async fn delete_handler( if deleted { - global_state.0.write().await.site_states_map.remove( &query.hostname); - conf_guard.write_to_disk(&conf_guard.path.clone().unwrap_or_default()) + global_state.app_state.site_status_map.remove( &query.hostname); + + conf_guard.write_to_disk() .map_err(|e| SitesError::UnknownError(format!("{e:?}")) )?; drop(conf_guard); tracing::info!("Config file updated due to change to site: {}", query.hostname); let (tx,mut rx) = tokio::sync::mpsc::channel(1); - global_state.2.send(crate::http_proxy::ProcMessage::Delete( query.hostname.to_owned(),tx)).map_err(|e| + global_state.broadcaster.send(crate::http_proxy::ProcMessage::Delete( query.hostname.to_owned(),tx)).map_err(|e| SitesError::UnknownError(format!("{e:?}")) )?; @@ -306,8 +308,8 @@ pub struct StopQueryParams { (status = 500, description = "When something goes wrong", body = String), ) )] -pub (crate) async fn stop_handler( - axum::extract::State(global_state): axum::extract::State, +pub async fn stop_handler( + axum::extract::State(global_state): axum::extract::State>, Query(query): Query ) -> axum::response::Result { @@ -318,7 +320,7 @@ pub (crate) async fn stop_handler( }; // todo - check if site exists and if its already stopped? - global_state.2.send(signal).map_err(|e| + global_state.broadcaster.send(signal).map_err(|e| SitesError::UnknownError(format!("{e:?}")) )?; Ok(()) @@ -347,8 +349,8 @@ pub struct StartQueryParams { (status = 500, description = "When something goes wrong", body = String), ) )] -pub (crate) async fn start_handler( - axum::extract::State(global_state): axum::extract::State, +pub async fn start_handler( + axum::extract::State(global_state): axum::extract::State>, Query(query): Query ) -> axum::response::Result { @@ -358,8 +360,7 @@ pub (crate) async fn start_handler( crate::http_proxy::ProcMessage::Start(query.hostname) }; - // todo - check if site exists and if its already started? - global_state.2.send(signal).map_err(|e| + global_state.broadcaster.send(signal).map_err(|e| SitesError::UnknownError(format!("{e:?}")) )?; Ok(()) diff --git a/src/api/mod.rs b/src/api/mod.rs index 1aaf7bb..a284ed8 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,4 +1,4 @@ -use std::net::SocketAddr; +use std::{net::SocketAddr, sync::Arc}; use axum::{body::Body, extract::{ws::{Message, WebSocket, WebSocketUpgrade}, State}, response::{Html, IntoResponse, Response}, Router}; use futures_util::{SinkExt, StreamExt}; @@ -16,7 +16,7 @@ pub struct WebSocketGlobalState { pub broadcast_channel: tokio::sync::broadcast::Sender, - pub global_state: crate::global_state::GlobalState + pub global_state: Arc } @@ -72,7 +72,7 @@ async fn set_cors(request: axum::extract::Request, next: axum::middleware::Next, } -pub (crate) async fn run(globally_shared_state: crate::global_state::GlobalState,port:Option,tracing_broadcaster:tokio::sync::broadcast::Sender::) { +pub async fn run(globally_shared_state: Arc,port:Option,tracing_broadcaster:tokio::sync::broadcast::Sender::) { if let Some(p) = port { @@ -185,7 +185,7 @@ async fn ws_log_messages_handler( } } else { - let possibly_admin_port = state.global_state.1.read().await.admin_api_port; + let possibly_admin_port = state.global_state.config.read().await.admin_api_port; if let Some(p) = possibly_admin_port { let expected_origin = format!("http://localhost:{p}"); @@ -224,7 +224,7 @@ async fn ws_log_messages_handler( String::from("Unknown client") }; - tracing::info!("`{user_agent}` at {addr} connected."); + tracing::trace!("`{user_agent}` at {addr} connected."); let response = ws.on_upgrade(move |socket| handle_socket(socket, addr,state.0)); return response; diff --git a/src/configuration/legacy.rs b/src/configuration/legacy.rs index 4ad7d99..0a33413 100644 --- a/src/configuration/legacy.rs +++ b/src/configuration/legacy.rs @@ -1,3 +1,7 @@ +use std::net::IpAddr; +use std::net::Ipv4Addr; + +use anyhow::bail; use serde::Serialize; use serde::Deserialize; @@ -7,7 +11,7 @@ use super::LogLevel; #[derive(Debug, Clone, Serialize, Deserialize)] -pub (crate) struct SiteConfig{ +pub struct SiteConfig{ pub host_name : String, pub path : String, pub bin : String, @@ -19,14 +23,14 @@ pub (crate) struct SiteConfig{ /// Set this to true in case your backend service uses https pub https : Option, pub capture_subdomains : Option, - #[serde(skip)] pub (crate) port : u16, + #[serde(skip)] pub port : u16, // BACKPORTING FOR V1 CONFIGS pub h2_hint : Option, pub disable_tcp_tunnel_mode : Option } #[derive(Debug, Clone, Serialize, Deserialize)] -pub (crate) struct Config { +pub struct OddBoxLegacyConfig { pub processes : Vec, pub env_vars : Vec, pub root_dir : Option, @@ -37,6 +41,75 @@ pub (crate) struct Config { pub tls_port : Option, pub auto_start : Option, // BACKPORTING FOR V1 CONFIGS - pub (crate) ip : Option, - pub (crate) remote_sites : Option>, + pub ip : Option, + pub remote_sites : Option>, +} + +impl crate::configuration::OddBoxConfiguration for OddBoxLegacyConfig { + + + #[allow(unused)] + fn example() -> OddBoxLegacyConfig { + OddBoxLegacyConfig { + auto_start: Some(true), + default_log_format: Some(LogFormat::standard), + env_vars: vec![ + EnvVar { key: "some_key".into(), value:"some_val".into() }, + EnvVar { key: "another_key".into(), value:"another_val".into() }, + ], + ip: Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), + log_level: Some(LogLevel::Info), + port: Some(80), + port_range_start: 4200, + processes: vec![ + SiteConfig { + disable_tcp_tunnel_mode: Some(false), + args: vec!["--test".to_string()], + auto_start: Some(true), + bin: "my_bin".into(), + capture_subdomains: None, + env_vars: vec![ + EnvVar { key: "some_key".into(), value:"some_val".into() }, + EnvVar { key: "another_key".into(), value:"another_val".into() }, + ], + host_name: "some_host.local".into(), + port: 443 , + log_format: Some(LogFormat::standard), + path: "/tmp".into(), + https: Some(true), + h2_hint: None + + } + ], + remote_sites: Some(vec![ + super::v1::RemoteSiteConfig { + h2_hint: None, + host_name: "lobsters.localtest.me".into(), + target_hostname: "lobsters.rs".into(), + port: Some(443), + https: Some(true), + capture_subdomains: Some(false), + disable_tcp_tunnel_mode: Some(true), + forward_subdomains: None + }, + super::v1::RemoteSiteConfig { + h2_hint: None, + host_name: "google.localtest.me".into(), + target_hostname: "google.com".into(), + port: Some(443), + https: Some(true), + capture_subdomains: Some(false), + disable_tcp_tunnel_mode: Some(true), + forward_subdomains: None + } + ]), + root_dir: Some("/tmp".into()), + tls_port: Some(443) + + } + } + + } + + diff --git a/src/configuration/mod.rs b/src/configuration/mod.rs index b00f7c5..2d40645 100644 --- a/src/configuration/mod.rs +++ b/src/configuration/mod.rs @@ -1,23 +1,37 @@ +use std::sync::Arc; + +use anyhow::bail; use serde::{Deserialize, Serialize}; use utoipa::ToSchema; use v1::H2Hint; +use v2::FullyResolvedInProcessSiteConfig; + +pub mod legacy; +pub mod v1; +pub mod v2; -pub (crate) mod legacy; -pub (crate) mod v1; -pub (crate) mod v2; +pub trait OddBoxConfiguration { + fn example() -> T; + fn to_string(&self) -> anyhow::Result { + bail!("to_string is not implemented for this configuration version") + } + fn write_to_disk(&self) -> anyhow::Result<()> { + bail!("write_to_disk is not implemented for this configuration version") + } +} #[derive(Debug,Clone)] -pub (crate) enum Config { - #[allow(dead_code)]Legacy(legacy::Config), - V1(v1::OddBoxConfig), - V2(v2::OddBoxConfig) +pub enum OddBoxConfig { + #[allow(dead_code)]Legacy(legacy::OddBoxLegacyConfig), + V1(v1::OddBoxV1Config), + V2(v2::OddBoxV2Config) } #[derive(Debug,Clone)] -pub struct ConfigWrapper(pub v2::OddBoxConfig); +pub struct ConfigWrapper(pub v2::OddBoxV2Config); impl std::ops::Deref for ConfigWrapper { - type Target = v2::OddBoxConfig; + type Target = v2::OddBoxV2Config; fn deref(&self) -> &Self::Target { &self.0 } @@ -28,21 +42,27 @@ impl std::ops::DerefMut for ConfigWrapper { } } +impl ConfigWrapper { + pub fn wrapv2(config:v2::OddBoxV2Config) -> Self { + ConfigWrapper(config) + } +} + -#[derive(Debug, Clone, Serialize, Deserialize,ToSchema)] -pub (crate) struct EnvVar { +#[derive(Debug, Clone, Serialize, Deserialize,ToSchema,PartialEq, Eq, Hash)] +pub struct EnvVar { pub key: String, pub value: String, } -#[derive(Serialize,Deserialize,Debug,Clone,ToSchema)] +#[derive(Serialize,Deserialize,Debug,Clone,ToSchema,PartialEq, Eq, Hash)] #[allow(non_camel_case_types)] pub enum LogFormat { standard, dotnet } -#[derive(Debug,Serialize,Clone,ToSchema)] +#[derive(Debug,Serialize,Clone,ToSchema, PartialEq, Eq, Hash)] pub enum LogLevel { Trace, Debug, @@ -85,167 +105,434 @@ impl<'de> Deserialize<'de> for LogLevel { } -#[derive(Debug,Clone,Serialize,Deserialize,Default,ToSchema)] +#[derive(Debug,Clone,Serialize,Deserialize,Default,ToSchema,PartialEq, Eq, Hash)] pub enum OddBoxConfigVersion { #[default] Unmarked, V1, V2 } -impl Config { - pub fn parse(content:&str) -> Result { + + +impl OddBoxConfig { + + pub fn parse(content:&str) -> Result { - let v2_result = toml::from_str::(content); + let v2_result = toml::from_str::(content); if let Ok(v2_config) = v2_result { - return Ok(Config::V2(v2_config)) + return Ok(OddBoxConfig::V2(v2_config)) }; - let v1_result = toml::from_str::(content); + let v1_result = toml::from_str::(content); if let Ok(v1_config) = v1_result { - return Ok(Config::V1(v1_config)) + return Ok(OddBoxConfig::V1(v1_config)) }; - let legacy_result = toml::from_str::(&content); + let legacy_result = toml::from_str::(&content); if let Ok(legacy_config) = legacy_result { - return Ok(Config::Legacy(legacy_config)) + return Ok(OddBoxConfig::Legacy(legacy_config)) }; - - Err(format!("invalid configuration file. {v1_result:?} ...\n\n{legacy_result:?}")) + + if content.contains("version = \"V2\"") { + Err(format!("invalid v2 configuration file.\n{}", v2_result.unwrap_err().to_string())) + } else if content.contains("version = \"V1\"") { + Err(format!("invalid v1 configuration file.\n{}", v1_result.unwrap_err().to_string())) + } else { + Err(format!("invalid (legacy) configuration file.\n{}", legacy_result.unwrap_err().to_string())) + } } - pub fn try_upgrade_to_latest_version(&self) -> Result { + pub fn try_upgrade_to_latest_version(&self) -> Result { match self { - Config::Legacy(legacy_config) => { - let v1 : v1::OddBoxConfig = legacy_config.to_owned().try_into()?; - let v2 : v2::OddBoxConfig = v1.to_owned().try_into()?; + OddBoxConfig::Legacy(legacy_config) => { + let v1 : v1::OddBoxV1Config = legacy_config.to_owned().try_into()?; + let v2 : v2::OddBoxV2Config = v1.to_owned().try_into()?; Ok(v2) }, - Config::V1(v1_config) => { - let v2 : v2::OddBoxConfig = v1_config.to_owned().try_into()?; + OddBoxConfig::V1(v1_config) => { + let v2 : v2::OddBoxV2Config = v1_config.to_owned().try_into()?; Ok(v2) }, - Config::V2(v2) => { + OddBoxConfig::V2(v2) => { Ok(v2.clone()) }, } } } -// LEGACY ---> V1 -impl TryFrom for v1::OddBoxConfig { + + +impl ConfigWrapper { - type Error = String; - - fn try_from(old_config: legacy::Config) -> Result { - let new_config = v1::OddBoxConfig { - path: None, - version: OddBoxConfigVersion::V1, - admin_api_port: None, - alpn: Some(false), // allowing alpn would be a breaking change for h2c when using old configuration format - auto_start: old_config.auto_start, - default_log_format: old_config.default_log_format.unwrap_or(LogFormat::standard), - env_vars: old_config.env_vars, - ip: old_config.ip, - log_level: old_config.log_level, - http_port: old_config.port, - port_range_start: old_config.port_range_start, - hosted_process: Some(old_config.processes.into_iter().map(|x|{ - v1::InProcessSiteConfig { - forward_subdomains: None, - disable_tcp_tunnel_mode: x.disable_tcp_tunnel_mode, - args: x.args, - auto_start: x.auto_start, - bin: x.bin, - capture_subdomains: None, - env_vars: x.env_vars, - host_name: x.host_name, - port: if x.https.unwrap_or_default() { Some(x.port) } else { None } , - log_format: x.log_format, - dir: x.path, - https: x.https, - h2_hint: x.h2_hint, - disabled: None - - } - }).collect::>()), - remote_target: old_config.remote_sites, - root_dir: old_config.root_dir, - tls_port: old_config.tls_port + + pub fn init(&mut self,cfg_path:&str) -> anyhow::Result<()> { + self.path = Some(std::path::Path::new(&cfg_path).canonicalize()?.to_str().unwrap_or_default().into()); + Ok(()) + } + + pub fn is_valid(&self) -> anyhow::Result<()> { + for x in self.env_vars.iter() { + if x.key.to_lowercase().trim() == "port" { + anyhow::bail!(format!("Invalid configuration. You cannot use 'port' as a global environment variable")); + } }; - Ok(new_config) + + // ALL HOST NAMES ARE UNIQUE + let mut all_host_names: Vec<&str> = vec![ + self.remote_target.as_ref().and_then(|p|Some(p.iter().map(|x|x.host_name.as_str()).collect::>())).unwrap_or_default(), + self.hosted_process.as_ref().and_then(|p|Some(p.iter().map(|x|x.host_name.as_str()).collect::>())).unwrap_or_default() + + ].concat(); + all_host_names.sort(); + let all_count = all_host_names.len(); + all_host_names.dedup(); + let unique_count = all_host_names.len(); + if all_count != unique_count { + anyhow::bail!(format!("You have more than one hosted process configured with the same host_name... not allowed.")) + } + + // ALL HOSTED SERVICES USE DIFFERENT PORTS + let mut all_ports = self.hosted_process.clone().unwrap_or_default().iter().filter_map(|x|x.port).collect::>(); + all_ports.sort(); + let all_count = all_ports.len(); + all_ports.dedup(); + let unique_count = all_ports.len(); + if all_count != unique_count { + anyhow::bail!(format!("You have more than one hosted process configured with the same port... not allowed.")) + } + + // NO HOSTED PROCESS USES AN ENV VAR FOR PORT THAT DIFFERS FROM THE PORT SPECIFIED IN ITS CONFIG + // TODO: this is just horrible + for x in self.hosted_process.clone().unwrap_or_default() { + if let Some(port) = x.port { + if let Some(env_vars) = x.env_vars { + for env_var in env_vars { + if env_var.key.to_lowercase().trim() == "port" { + if let Ok(parsed_port) = env_var.value.parse::() { + if parsed_port != port { + anyhow::bail!(format!("Environment variable PORT for {} does not match the port specified in the configuration.\nIt is recommended you do not specify the PORT environment variable explicitly but instead rely on the port setting -\nit will automatically inject the port variable to the process-local context.",x.host_name)) + } + } + } + } + } + } + } + + Ok(()) + } -} + pub fn get_parent(p:&str) -> anyhow::Result { + if let Some(directory_path_str) = + std::path::Path::new(&p) + .parent() + .map(|p| p.to_str().unwrap_or_default()) + { + if directory_path_str.eq("") { + tracing::debug!("$cfg_dir resolved to '.'"); + Ok(".".into()) + } else { + tracing::debug!("$cfg_dir resolved to {directory_path_str}"); + Ok(directory_path_str.into()) + } + + } else { + bail!(format!("Failed to resolve $cfg_dir")); + } + } + + + // ---> port-mapping... + pub async fn add_or_replace_hosted_process(&mut self,hostname:&str,mut item:crate::InProcessSiteConfig,state:Arc) -> anyhow::Result<()> { + + if let Some(hosted_site_configs) = &mut self.hosted_process { + + for x in hosted_site_configs.iter_mut() { + if hostname == x.host_name { + + + + let (tx,mut rx) = tokio::sync::mpsc::channel(1); + + state.broadcaster.send(crate::http_proxy::ProcMessage::Delete(hostname.into(),tx))?; + + if rx.recv().await == Some(0) { + // when we get this message, we know that the process has been stopped + // and that the loop has been exited as well. + tracing::debug!("Received a confirmation that the process was deleted"); + } else { + tracing::debug!("Failed to receive a confirmation that the process was deleted. This is a bug in odd-box."); + }; + + + break; + } + }; + + tracing::debug!("Pushing a new process to the configuration thru the admin api"); + hosted_site_configs.retain(|x| x.host_name != item.host_name); + hosted_site_configs.retain(|x| x.host_name != hostname); + hosted_site_configs.push(item.clone()); + + + let resolved_proc = self.resolve_process_configuration(&item)?; + + tokio::task::spawn(crate::proc_host::host( + resolved_proc, + state.broadcaster.subscribe(), + state.clone(), + )); + tracing::trace!("Spawned a new thread for site: {:?}",hostname); + + let guard = &state.app_state.site_status_map; + guard.retain(|k,_v| k != hostname); + guard.insert(hostname.to_owned(), crate::types::app_state::ProcState::Stopped); + } + + + + self.write_to_disk() + + + + + } + + pub fn busy_ports(&self) -> Vec<(String,u16)> { + self.hosted_process.iter().flatten().flat_map(|x| { + + let mut items = Vec::new(); + + // manually set ports needs to be marked as busy even if the process is not running + if let Some(p) = x.port { + items.push((x.host_name.clone(),p)) + } + + + // active ports means that there is a loop active for this process using that port + if let Some(p) = x.active_port { + items.push((x.host_name.clone(),p)) + } + + if items.len() > 0 { + Some(items) + } else { + None + } + + }).flatten().collect::>() + } + + pub async fn find_and_set_unused_port(selfy : &mut Self, proc:&mut crate::InProcessSiteConfig) -> anyhow::Result { + + if let Some(procs) = &selfy.hosted_process { + + let used_ports = procs.iter().filter_map(|x|x.port).collect::>(); + + if let Some(manually_chosen_port) = proc.port { + if used_ports.contains(&manually_chosen_port) { + // this port is already in use + bail!("The port configured for this site is already in use..") + } else { + return Ok(manually_chosen_port) + } + } + + }; + + if let Some(manually_chosen_port) = proc.port { + // clearly this port is not in use yet + Ok(manually_chosen_port) + } else { + // if nothing is running and user has not selected any specific one lets just use the first port from the start range + Ok(selfy.port_range_start) + } + } + + pub async fn add_or_replace_remote_site(&mut self,hostname:&str,item:crate::RemoteSiteConfig,state:Arc) -> anyhow::Result<()> { + + if let Some(sites) = self.remote_target.as_mut() { + // out with the old, in with the new + sites.retain(|x| x.host_name != hostname); + sites.retain(|x| x.host_name != item.host_name); + sites.push(item.clone()); + + // same as above but for the TUI state + let map_guard = &state.app_state.site_status_map; + map_guard.retain(|k,_v| *k != item.host_name); + map_guard.retain(|k,_v| k != hostname); + map_guard.insert(hostname.to_owned(), crate::types::app_state::ProcState::Remote); + } + + + self.write_to_disk() + + } -// V1 ---> V2 -impl TryFrom for v2::OddBoxConfig{ + // TODO - this does not work correctly. it doesnt use the PORT from procs config but always auto. + pub fn set_active_port(&mut self, resolved_proc:&mut FullyResolvedInProcessSiteConfig) -> anyhow::Result { + - type Error = String; - - fn try_from(old_config: v1::OddBoxConfig) -> Result { - let new_config = v2::OddBoxConfig { - path: None, - version: OddBoxConfigVersion::V2, - admin_api_port: None, - alpn: Some(false), // allowing alpn would be a breaking change for h2c when using old configuration format - auto_start: old_config.auto_start, - default_log_format: old_config.default_log_format, - env_vars: old_config.env_vars, - ip: old_config.ip, - log_level: old_config.log_level, - http_port: old_config.http_port, - port_range_start: old_config.port_range_start, - hosted_process: Some(old_config.hosted_process.unwrap_or_default().into_iter().map(|x|{ - v2::InProcessSiteConfig { - forward_subdomains: x.forward_subdomains, - disable_tcp_tunnel_mode: x.disable_tcp_tunnel_mode, - args: x.args, - auto_start: x.auto_start, - bin: x.bin, - capture_subdomains: x.capture_subdomains, - env_vars: x.env_vars, - host_name: x.host_name, - port: x.port, - log_format: x.log_format, - dir: x.dir, - https: x.https, - h2_hint: match x.h2_hint { - Some(H2Hint::H2) => Some(crate::configuration::v2::H2Hint::H2), - Some(H2Hint::H2C) => Some(crate::configuration::v2::H2Hint::H2C), - None => None, - }, - disabled: x.disabled - + let mut selected_port = None; + + // ports in use or configured for use by other sites + let unavailable_ports = self.busy_ports().into_iter().filter(|x|{ + x.0 != resolved_proc.host_name + }).collect::>(); + + // decide which port to use (ie. which port to add as the environment variable PORT) + if let Some(prefered_port) = resolved_proc.port { + if let Some(taken_by) = unavailable_ports.iter().find(|x|x.1 == prefered_port) { + tracing::warn!("[{}] The configured port '{}' is unavailable (configured for another site: '{}').. ",&resolved_proc.host_name,prefered_port,taken_by.1); + } else { + tracing::info!("[{}] Starting on port '{}' as configured for the process!",&resolved_proc.host_name,prefered_port); + selected_port = Some(prefered_port); + } + } else if let Some(EnvVar { key: _, value }) = resolved_proc.env_vars.iter().flatten().find(|x|x.key.to_lowercase()=="port") { + if let Some(taken_by) = unavailable_ports.iter().find(|x|x.1.to_string() == *value) { + tracing::warn!("[{}] The configured port (via env var in cfg) '{}' is unavailable (configured for another site: '{}').. ",&resolved_proc.host_name,value,taken_by.1); + } else { + if let Ok(spbev) = value.parse::() { + tracing::info!("[{}] Starting on port '{}' as selected via a configured environment variable for port!",&resolved_proc.host_name,value); + selected_port = Some(spbev) + } else { + tracing::info!("[{}] The env var for port was configured to '{}' which is not a valid u16, ignoring.",&resolved_proc.host_name,value); } - }).collect()), - remote_target: Some(old_config.remote_target.unwrap_or_default().iter().map(|x|{ - v2::RemoteSiteConfig { - disable_tcp_tunnel_mode: x.disable_tcp_tunnel_mode, - capture_subdomains: x.capture_subdomains, - forward_subdomains: x.forward_subdomains, - backends: vec![ - v2::Backend { - address: x.target_hostname.clone(), - port: x.port.expect("remote site must have a port.."), - https: x.https - } - ], - host_name: x.host_name.clone(), - h2_hint: match x.h2_hint { - Some(H2Hint::H2) => Some(crate::configuration::v2::H2Hint::H2), - Some(H2Hint::H2C) => Some(crate::configuration::v2::H2Hint::H2C), - None => None, - } + } + } + + // if no port manually specified, find the first available port + if selected_port.is_none() { + let min_auto_port = self.port_range_start; + let unavailable = unavailable_ports.iter().map(|x|x.1).collect::>(); + // find first port that is not in use starting from min_auto_port, looking at the unavailable_ports list: + let mut inner_selected_port = min_auto_port; + loop { + if unavailable.contains(&inner_selected_port) { + inner_selected_port += 1; + } else { + break } - }).collect()), - root_dir: old_config.root_dir, - tls_port: old_config.tls_port + } + + tracing::info!("[{}] Using the first available port found (starting from the configured start port: {min_auto_port}) ---> '{}'",&resolved_proc.host_name,inner_selected_port); + selected_port = Some(inner_selected_port); + } + // make sure nobody else is using this port before returning it to caller. + // mark this process as using this port + if let Some(sp) = selected_port { + if let Some(hosted_processes) = &mut self.hosted_process { + if let Some(mm) = hosted_processes.iter_mut().find(|x| x.host_name == resolved_proc.host_name) { + // save the selected port in the globally shared state + mm.active_port = Some(sp); + } else { + tracing::error!("[{}] Could not find an active site in the hosted process list.. This is a bug in odd-box!",&resolved_proc.host_name); + } + } else { + tracing::error!("[{}] The site proc list is empty! Most likely this is a bug in odd-box.",&resolved_proc.host_name); + } + } + + if let Some(p) = selected_port { + Ok(p) + } else { + bail!("Failed to find a port for the process..") + } + } + + + // this MUST be called by proc_host prior to starting a process in order to resolve all variables. + // it is done this way in order to avoid changing the global state of the configuration in to the resolved state + // since that would then be saved to disk and we would lose the original configuration with dynamic variables + // making configuration files less portable. + pub fn resolve_process_configuration(&self,proc:&crate::InProcessSiteConfig) -> anyhow::Result { + + let mut resolved_proc = crate::FullyResolvedInProcessSiteConfig { + excluded_from_start_all: proc.exclude_from_start_all.unwrap_or(false), + proc_id: proc.get_id().clone(), + active_port: proc.active_port, + disable_tcp_tunnel_mode: proc.disable_tcp_tunnel_mode, + hints: proc.hints.clone(), + host_name: proc.host_name.clone(), + dir: proc.dir.clone(), + bin: proc.bin.clone(), + args: proc.args.clone(), + env_vars: proc.env_vars.clone(), + log_format: proc.log_format.clone(), + auto_start: proc.auto_start, + port: proc.port, + https: proc.https, + capture_subdomains: proc.capture_subdomains, + forward_subdomains: proc.forward_subdomains }; - Ok(new_config) + + let resolved_home_dir_path = dirs::home_dir().ok_or(anyhow::anyhow!(String::from("Failed to resolve home directory.")))?; + let resolved_home_dir_str = resolved_home_dir_path.to_str().ok_or(anyhow::anyhow!(String::from("Failed to parse home directory.")))?; + + // tracing::info!("Resolved home directory: {}",&resolved_home_dir_str); + + let cfg_dir = Self::get_parent(&self.path.clone().expect("all configurations need a path on disk. if you see this, there is a bug in odd-box."))?; + + + let root_dir = if let Some(rd) = &self.root_dir { + + if rd.contains("$root_dir") { + anyhow::bail!("it is clearly not a good idea to use $root_dir in the configuration of root dir...") + } + + let rd_with_vars_replaced = rd + .replace("$cfg_dir", &cfg_dir) + .replace("~", resolved_home_dir_str); + + let canonicalized_with_vars = + match std::fs::canonicalize(rd_with_vars_replaced.clone()) { + Ok(resolved_path) => { + resolved_path.display().to_string() + // we dont want to use ext path def on windows + .replace("\\\\?\\", "") + } + Err(e) => { + anyhow::bail!(format!("root_dir item in configuration ({rd}) resolved to this: '{rd_with_vars_replaced}' - error: {}", e)); + } + }; + + // tracing::debug!("$root_dir resolved to: {rd}"); + canonicalized_with_vars + } else { + "$root_dir".to_string() + }; + + let resolved_home_dir_path = dirs::home_dir().ok_or(anyhow::anyhow!(String::from("Failed to resolve home directory.")))?; + let resolved_home_dir_str = resolved_home_dir_path.to_str().ok_or(anyhow::anyhow!(String::from("Failed to parse home directory.")))?; + + let with_vars = |x:&str| -> String { + x.replace("$root_dir", &root_dir) + .replace("$cfg_dir", &cfg_dir) + .replace("~", resolved_home_dir_str) + }; + + if let Some(args) = &mut resolved_proc.args { + for argument in args { + *argument = with_vars(argument) + } + } + + if let Some(dir) = &mut resolved_proc.dir { + *dir = with_vars(&dir); + } + + resolved_proc.bin = with_vars(&resolved_proc.bin); + + + Ok(resolved_proc) + } + + } \ No newline at end of file diff --git a/src/configuration/v1.rs b/src/configuration/v1.rs index 876c050..8c3920a 100644 --- a/src/configuration/v1.rs +++ b/src/configuration/v1.rs @@ -1,27 +1,17 @@ use std::net::IpAddr; use std::net::Ipv4Addr; -use std::path::Path; - use anyhow::bail; use serde::Serialize; use serde::Deserialize; use utoipa::ToSchema; -use crate::global_state::GlobalState; use super::EnvVar; use super::LogFormat; use super::LogLevel; -impl InProcessSiteConfig { - pub fn set_port(&mut self, port : u16) { - self.port = Some(port) - } -} - - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub (crate) struct InProcessSiteConfig{ +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, Default)] +pub struct InProcessSiteConfig{ /// This is mostly useful in case the target uses SNI sniffing/routing pub disable_tcp_tunnel_mode : Option, /// H2C or H2 - used to signal use of prior knowledge http2 or http2 over clear text. @@ -49,13 +39,13 @@ pub (crate) struct InProcessSiteConfig{ } #[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub (crate) enum H2Hint { +pub enum H2Hint { H2, H2C } #[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub (crate) struct RemoteSiteConfig{ +pub struct RemoteSiteConfig{ /// H2C or H2 - used to signal use of prior knowledge http2 or http2 over clear text. pub h2_hint : Option, pub host_name : String, @@ -74,33 +64,105 @@ pub (crate) struct RemoteSiteConfig{ } #[derive(Debug, Clone, Serialize, Deserialize,ToSchema)] -pub struct OddBoxConfig { +pub struct OddBoxV1Config { #[schema(value_type = String)] - pub (crate) version : super::OddBoxConfigVersion, - pub (crate) root_dir : Option, + pub version : super::OddBoxConfigVersion, + pub root_dir : Option, #[serde(default = "default_log_level")] - pub (crate) log_level : Option, + pub log_level : Option, /// Defaults to true. Lets you enable/disable h2/http11 tls alpn algs during initial connection phase. #[serde(default = "true_option")] - pub (crate) alpn : Option, - pub (crate) port_range_start : u16, + pub alpn : Option, + pub port_range_start : u16, #[serde(default = "default_log_format")] - pub (crate) default_log_format : LogFormat, + pub default_log_format : LogFormat, #[schema(value_type = String)] - pub (crate) ip : Option, + pub ip : Option, #[serde(default = "default_http_port_8080")] - pub (crate) http_port : Option, + pub http_port : Option, #[serde(default = "default_https_port_4343")] - pub (crate) tls_port : Option, + pub tls_port : Option, #[serde(default = "true_option")] - pub (crate) auto_start : Option, - pub (crate) env_vars : Vec, - pub (crate) remote_target : Option>, - pub (crate) hosted_process : Option>, - pub (crate) admin_api_port : Option, - pub (crate) path : Option + pub auto_start : Option, + pub env_vars : Vec, + pub remote_target : Option>, + pub hosted_process : Option>, + pub admin_api_port : Option, + pub path : Option + +} +impl crate::configuration::OddBoxConfiguration for OddBoxV1Config { + + + #[allow(unused)] + fn example() -> OddBoxV1Config { + OddBoxV1Config { + path: None, + admin_api_port: None, + version: super::OddBoxConfigVersion::V1, + alpn: Some(false), + auto_start: Some(true), + default_log_format: LogFormat::standard, + env_vars: vec![ + EnvVar { key: "some_key".into(), value:"some_val".into() }, + EnvVar { key: "another_key".into(), value:"another_val".into() }, + ], + ip: Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), + log_level: Some(LogLevel::Info), + http_port: Some(80), + port_range_start: 4200, + hosted_process: Some(vec![ + InProcessSiteConfig { + forward_subdomains: None, + disable_tcp_tunnel_mode: Some(false), + args: vec!["--test".to_string()], + auto_start: Some(true), + bin: "my_bin".into(), + capture_subdomains: None, + env_vars: vec![ + EnvVar { key: "some_key".into(), value:"some_val".into() }, + EnvVar { key: "another_key".into(), value:"another_val".into() }, + ], + host_name: "some_host.local".into(), + port: Some(443) , + log_format: Some(LogFormat::standard), + dir: "/tmp".into(), + https: Some(true), + h2_hint: None, + disabled :None + + } + ]), + remote_target: Some(vec![ + RemoteSiteConfig { + forward_subdomains: None, + h2_hint: None, + host_name: "lobsters.local".into(), + target_hostname: "lobste.rs".into(), + port: None, + https: Some(true), + capture_subdomains: Some(false), + disable_tcp_tunnel_mode: Some(false) + }, + RemoteSiteConfig { + forward_subdomains: Some(true), + h2_hint: None, + host_name: "google.local".into(), + target_hostname: "google.com".into(), + port: Some(443), + https: Some(true), + capture_subdomains: Some(false), + disable_tcp_tunnel_mode: Some(true) + } + ]), + root_dir: Some("/tmp".into()), + tls_port: Some(443) + + } + } } + fn default_log_level() -> Option { Some(LogLevel::Info) } @@ -118,244 +180,50 @@ fn true_option() -> Option { Some(true) } -#[derive(Debug)] -enum ConfigurationUpdateError { - Bug(String) -} - - -impl std::fmt::Display for ConfigurationUpdateError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - // ConfigurationUpdateError::NotFound => { - // f.write_str("No such hosted process found.") - // }, - // ConfigurationUpdateError::FailedToSave(e) => { - // f.write_fmt(format_args!("Failed to save due to error: {}",e)) - // }, - ConfigurationUpdateError::Bug(e) => { - f.write_fmt(format_args!("Failed to save due to a bug in odd-box: {}",e)) - } - } - } -} - -impl OddBoxConfig { - pub fn save(&self) -> anyhow::Result<()> { - self.write_to_disk(self.path.clone().expect("must have been loaded from somewhere..").as_str())?; - Ok(()) - } +// LEGACY ---> V1 +impl TryFrom for crate::configuration::v1::OddBoxV1Config { - // note: this seems silly but its needed because neither toml-rs nor toml_edit supports any decent - // formatting customization and ends up with spread out arrays of tables rather - // than inlining like we usually do for odd-box configs. - pub fn write_to_disk(&self,current_path:&str) -> anyhow::Result<()> { - let mut formatted_toml = Vec::new(); - - formatted_toml.push(format!("version = \"{:?}\"", self.version)); - - if let Some(alpn) = self.alpn { - formatted_toml.push(format!("alpn = {}", alpn)); - } else { - formatted_toml.push(format!("alpn = {}", "false")); - } - - if let Some(port) = self.http_port { - formatted_toml.push(format!("http_port = {}", port)); - } - if let Some(port) = self.admin_api_port { - formatted_toml.push(format!("admin_api_port = {}", port)); - } - if let Some(ip) = &self.ip { - formatted_toml.push(format!("ip = \"{:?}\"", ip)); - } else { - formatted_toml.push(format!("ip = \"127.0.0.1\"")); - } - if let Some(tls_port) = self.tls_port { - formatted_toml.push(format!("tls_port = {}", tls_port)); - } - if let Some(auto_start) = self.auto_start { - formatted_toml.push(format!("auto_start = {}", auto_start)); - } else { - formatted_toml.push(format!("auto_start = false")); - } - - if let Some(root_dir) = &self.root_dir { - formatted_toml.push(format!("root_dir = {:?}", root_dir)); - } else { - formatted_toml.push(format!("root_dir = \"~\"")); - } - if let Some(log_level) = &self.log_level { - formatted_toml.push(format!("log_level = \"{:?}\"", log_level)); - } - formatted_toml.push(format!("port_range_start = {}", self.port_range_start)); - - - formatted_toml.push(format!("default_log_format = \"{:?}\"", self.default_log_format )); - - - formatted_toml.push("env_vars = [".to_string()); - for env_var in &self.env_vars { - formatted_toml.push(format!( - "\t{{ key = {:?}, value = {:?} }},", - env_var.key, env_var.value - )); - } - formatted_toml.push("]".to_string()); - - - if let Some(remote_sites) = &self.remote_target { - for site in remote_sites { - formatted_toml.push("\n[[remote_target]]".to_string()); - formatted_toml.push(format!("host_name = {:?}", site.host_name)); - formatted_toml.push(format!("target_hostname = {:?}", site.target_hostname)); - if let Some(hint) = &site.h2_hint { - formatted_toml.push(format!("h2_hint = \"{:?}\"", hint)); - } - - - if let Some(capture_subdomains) = site.capture_subdomains { - formatted_toml.push(format!("capture_subdomains = {}", capture_subdomains)); - } - - if let Some(b) = site.https { - formatted_toml.push(format!("https = {}", b)); - } - if let Some(http) = site.port { - formatted_toml.push(format!("port = {}", http)); - } - - if let Some(disable_tcp_tunnel_mode) = site.disable_tcp_tunnel_mode { - formatted_toml.push(format!("disable_tcp_tunnel_mode = {}", disable_tcp_tunnel_mode)); - } - } - } - - if let Some(processes) = &self.hosted_process { - for process in processes { - formatted_toml.push("\n[[hosted_process]]".to_string()); - formatted_toml.push(format!("host_name = {:?}", process.host_name)); - formatted_toml.push(format!("dir = {:?}", process.dir)); - formatted_toml.push(format!("bin = {:?}", process.bin)); - if let Some(hint) = &process.h2_hint { - formatted_toml.push(format!("h2_hint = \"{:?}\"", hint)); - } - - let args = process.args.iter().map(|arg| format!("{:?}", arg)).collect::>().join(", "); - formatted_toml.push(format!("args = [{}]", args)); - - - - - - if let Some(auto_start) = process.auto_start { - formatted_toml.push(format!("auto_start = {}", auto_start)); - } - - - if let Some(b) = process.https { - formatted_toml.push(format!("https = {}", b)); - } - if let Some(http) = process.port { - formatted_toml.push(format!("port = {}", http)); + type Error = String; + + fn try_from(old_config: crate::configuration::legacy::OddBoxLegacyConfig) -> Result { + let new_config = crate::configuration::v1::OddBoxV1Config { + path: None, + version: crate::configuration::OddBoxConfigVersion::V1, + admin_api_port: None, + alpn: Some(false), // allowing alpn would be a breaking change for h2c when using old configuration format + auto_start: old_config.auto_start, + default_log_format: old_config.default_log_format.unwrap_or(LogFormat::standard), + env_vars: old_config.env_vars, + ip: old_config.ip, + log_level: old_config.log_level, + http_port: old_config.port, + port_range_start: old_config.port_range_start, + hosted_process: Some(old_config.processes.into_iter().map(|x|{ + crate::configuration::v1::InProcessSiteConfig { + forward_subdomains: None, + disable_tcp_tunnel_mode: x.disable_tcp_tunnel_mode, + args: x.args, + auto_start: x.auto_start, + bin: x.bin, + capture_subdomains: None, + env_vars: x.env_vars, + host_name: x.host_name, + port: if x.https.unwrap_or_default() { Some(x.port) } else { None } , + log_format: x.log_format, + dir: x.path, + https: x.https, + h2_hint: x.h2_hint, + disabled: None + } - - if let Some(capture_subdomains) = process.capture_subdomains { - formatted_toml.push(format!("capture_subdomains = {}", capture_subdomains)); - } else { - formatted_toml.push(format!("capture_subdomains = {}", "false")); - } - - formatted_toml.push("env_vars = [".to_string()); - for env_var in &process.env_vars { - formatted_toml.push(format!( - "\t{{ key = {:?}, value = {:?} }},", - env_var.key, env_var.value - )); - } - formatted_toml.push("]".to_string()); - - } - } - - let original_path = Path::new(current_path); - let backup_path = original_path.with_extension("toml.backup"); - std::fs::rename(original_path, &backup_path)?; - - if let Err(e) = std::fs::write(current_path, formatted_toml.join("\n")) { - bail!("Failed to write config to disk: {e}") - } else { - Ok(()) - } + }).collect::>()), + remote_target: old_config.remote_sites, + root_dir: old_config.root_dir, + tls_port: old_config.tls_port + }; + Ok(new_config) } } - - -pub fn example_v1() -> OddBoxConfig { - OddBoxConfig { - path: None, - admin_api_port: None, - version: super::OddBoxConfigVersion::V1, - alpn: Some(false), - auto_start: Some(true), - default_log_format: LogFormat::standard, - env_vars: vec![ - EnvVar { key: "some_key".into(), value:"some_val".into() }, - EnvVar { key: "another_key".into(), value:"another_val".into() }, - ], - ip: Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), - log_level: Some(LogLevel::Info), - http_port: Some(80), - port_range_start: 4200, - hosted_process: Some(vec![ - InProcessSiteConfig { - forward_subdomains: None, - disable_tcp_tunnel_mode: Some(false), - args: vec!["--test".to_string()], - auto_start: Some(true), - bin: "my_bin".into(), - capture_subdomains: None, - env_vars: vec![ - EnvVar { key: "some_key".into(), value:"some_val".into() }, - EnvVar { key: "another_key".into(), value:"another_val".into() }, - ], - host_name: "some_host.local".into(), - port: Some(443) , - log_format: Some(LogFormat::standard), - dir: "/tmp".into(), - https: Some(true), - h2_hint: None, - disabled :None - - } - ]), - remote_target: Some(vec![ - RemoteSiteConfig { - forward_subdomains: None, - h2_hint: None, - host_name: "lobsters.local".into(), - target_hostname: "lobste.rs".into(), - port: None, - https: Some(true), - capture_subdomains: Some(false), - disable_tcp_tunnel_mode: Some(false) - }, - RemoteSiteConfig { - forward_subdomains: Some(true), - h2_hint: None, - host_name: "google.local".into(), - target_hostname: "google.com".into(), - port: Some(443), - https: Some(true), - capture_subdomains: Some(false), - disable_tcp_tunnel_mode: Some(true) - } - ]), - root_dir: Some("/tmp".into()), - tls_port: Some(443) - - } -} \ No newline at end of file diff --git a/src/configuration/v2.rs b/src/configuration/v2.rs index bc3d550..fb5fdf9 100644 --- a/src/configuration/v2.rs +++ b/src/configuration/v2.rs @@ -3,37 +3,46 @@ use std::net::Ipv4Addr; use std::path::Path; use anyhow::bail; +use dashmap::DashMap; use serde::Serialize; use serde::Deserialize; use utoipa::ToSchema; use crate::global_state::GlobalState; +use crate::types::app_state::ProcState; +use crate::ProcId; +use super::ConfigWrapper; use super::EnvVar; use super::LogFormat; use super::LogLevel; +use super::OddBoxConfiguration; -impl InProcessSiteConfig { - pub fn set_port(&mut self, port : u16) { - self.port = Some(port) - } -} +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, Hash)] +pub struct InProcessSiteConfig { + + #[serde(skip, default = "crate::ProcId::new")] + proc_id : ProcId, + /// This is set automatically each time we start a process so that we know which ports are in use + /// and can avoid conflicts when starting new processes. settings this in toml conf file will have no effect. + #[serde(skip)] // <-- dont want to even read or write this to the config file, nor exposed in the api docs + pub active_port : Option, -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub (crate) struct InProcessSiteConfig{ /// This is mostly useful in case the target uses SNI sniffing/routing pub disable_tcp_tunnel_mode : Option, /// H2C or H2 - used to signal use of prior knowledge http2 or http2 over clear text. - pub h2_hint : Option, + pub hints : Option>, pub host_name : String, - pub dir : String, + pub dir : Option, pub bin : String, - pub args : Vec, - pub env_vars : Vec, + pub args : Option>, + pub env_vars : Option>, pub log_format: Option, - /// Set this to false if you do not want this site to start automatically when odd-box starts + /// Set this to false if you do not want this site to start automatically when odd-box starts. + /// This also means that the site is excluded from the start_all command. pub auto_start: Option, + /// If this is set to None, the next available port will be used. Starting from the global port_range_start pub port: Option, pub https : Option, /// If you wish to use wildcard routing for any subdomain under the 'host_name' @@ -43,30 +52,98 @@ pub (crate) struct InProcessSiteConfig{ /// vs /// test.example.com -> test.internal.site pub forward_subdomains : Option, - /// Set to true to prevent odd-box from starting this site automatically when it starts or using the 'start' command. - /// It can still be manually started by ctrl-clicking in the TUI. - pub disabled: Option - // ^ perhaps we should remove disabled from v2 and instead only use auto_start... ? + /// If you wish to exclude this site from the start_all command. + /// This setting was previously called "disable" but has been renamed for clarity + pub exclude_from_start_all: Option +} + + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, Hash)] +pub struct FullyResolvedInProcessSiteConfig { + pub excluded_from_start_all: bool, + pub proc_id : ProcId, + pub active_port : Option, + pub disable_tcp_tunnel_mode : Option, + pub hints : Option>, + pub host_name : String, + pub dir : Option, + pub bin : String, + pub args : Option>, + pub env_vars : Option>, + pub log_format: Option, + pub auto_start: Option, + pub port: Option, + pub https : Option, + pub capture_subdomains : Option, + pub forward_subdomains : Option, +} + +impl InProcessSiteConfig { + pub fn get_id(&self) -> &ProcId { + &self.proc_id + } +} + +impl PartialEq for InProcessSiteConfig { + + fn eq(&self, other: &Self) -> bool { + compare_option_bool(self.disable_tcp_tunnel_mode,other.disable_tcp_tunnel_mode) && + self.hints == other.hints && + self.host_name == other.host_name && + self.dir == other.dir && + self.bin == other.bin && + self.args == other.args && + self.env_vars == other.env_vars && + compare_option_log_format(&self.log_format,& other.log_format) && + compare_option_bool(self.auto_start, other.auto_start) && + self.port == other.port && + self.https == other.https && + compare_option_bool(self.capture_subdomains, other.capture_subdomains) && + compare_option_bool(self.forward_subdomains, other.forward_subdomains) + } +} + +impl Eq for InProcessSiteConfig {} +fn compare_option_bool(a: Option, b: Option) -> bool { + let result = match (a, b) { + (None, Some(false)) | (Some(false), None) => true, + _ => a == b, + }; + println!("Comparing Option: {:?} vs {:?} -- result: {result}", a, b); + result +} + +fn compare_option_log_format(a: &Option, b: &Option) -> bool { + let result = match (a, b) { + (None, Some(LogFormat::standard)) | (Some(LogFormat::standard), None) => true, + _ => a == b, + }; + println!("Comparing Option: {:?} vs {:?} -- result: {result}", a, b); + result } #[derive(Debug, Eq,PartialEq,Hash, Clone, Serialize, Deserialize, ToSchema)] -pub (crate) enum H2Hint { +pub enum Hint { + /// Server supports http2 over tls H2, - H2C + /// Server supports http2 via clear text by using an upgrade header + H2C, + /// Server supports http2 via clear text by using prior knowledge + H2CPK } #[derive(Debug, Clone, Serialize, Deserialize, ToSchema,Eq,PartialEq,Hash,)] pub struct Backend { pub address : String, + /// This can be zero in case the backend is a hosted process, in which case we will need to resolve the current active_port pub port: u16, - pub https : Option + pub https : Option, + /// H2C,H2,H2CPK - used to signal use of prior knowledge http2 or http2 over clear text. + pub hints : Option>, } - -#[derive(Debug, Eq,PartialEq,Hash, Clone, Serialize, Deserialize, ToSchema)] -pub (crate) struct RemoteSiteConfig{ - /// H2C or H2 - used to signal use of prior knowledge http2 or http2 over clear text. - pub h2_hint : Option, +#[derive(Debug, Hash, Clone, Serialize, Deserialize, ToSchema)] +pub struct RemoteSiteConfig{ pub host_name : String, pub backends : Vec, /// If you wish to use wildcard routing for any subdomain under the 'host_name' @@ -80,6 +157,18 @@ pub (crate) struct RemoteSiteConfig{ pub forward_subdomains : Option } +impl PartialEq for RemoteSiteConfig { + fn eq(&self, other: &Self) -> bool { + self.host_name == other.host_name && + self.backends == other.backends && + compare_option_bool(self.capture_subdomains, other.capture_subdomains) && + compare_option_bool(self.disable_tcp_tunnel_mode, other.disable_tcp_tunnel_mode) && + compare_option_bool(self.forward_subdomains, other.forward_subdomains) + } +} + +impl Eq for RemoteSiteConfig {} + pub enum BackendFilter { Http, Https, @@ -92,353 +181,95 @@ fn filter_backend(backend: &Backend, filter: &BackendFilter) -> bool { BackendFilter::Any => true } } + impl RemoteSiteConfig { - pub async fn next_backend(&self,state:&GlobalState, backend_filter: BackendFilter) -> Backend { - - let stats = state.3.clone(); + pub async fn next_backend(&self,state:&GlobalState, backend_filter: BackendFilter) -> Option { - let ro_guard = stats.read().await; - let seen_before = ro_guard.contains_key(&self.host_name); - - let count = if seen_before { - let existing_stats_for_this_target = ro_guard.get(&self.host_name).expect("we never remove items from the map so this should always be here").clone(); - let mut guard = existing_stats_for_this_target.write().await; - guard.request_count += 1; - guard.request_count - } else { - drop(ro_guard); - let mut guard = stats.write().await; - let my_new_target_info = std::sync::Arc::new(tokio::sync::RwLock::new(crate::TargetRequestCount { request_count: 1 })); - guard.insert(self.host_name.clone(), my_new_target_info); - 1 - }; - let filtered_backends = self.backends.iter().filter(|x|filter_backend(x,&backend_filter)) .collect::>(); - let selected_backend = *filtered_backends.get((count % (filtered_backends.len() as u128)) as usize ) + if filtered_backends.len() == 1 { return Some(filtered_backends[0].clone()) }; + if filtered_backends.len() == 0 { return None }; + + + let count = match state.app_state.statistics.remote_targets_stats.get_mut(&self.host_name) { + Some(mut guard) => { + let (_k,v) = guard.pair_mut(); + 1 + v.fetch_add(1, std::sync::atomic::Ordering::SeqCst) + }, + None => { + state.app_state.statistics.remote_targets_stats.insert(self.host_name.clone(), std::sync::atomic::AtomicUsize::new(1)); + 1 + } + } as usize; + + let selected_backend = *filtered_backends.get((count % (filtered_backends.len() as usize)) as usize ) .expect("we should always have at least one backend but found none. this is a bug in oddbox ."); - selected_backend.clone() + + + Some(selected_backend.clone()) } } -#[derive(Debug, Clone, Serialize, Deserialize,ToSchema)] -pub struct OddBoxConfig { +#[derive(Debug, Clone, Serialize, Deserialize,ToSchema, PartialEq, Eq, Hash)] +pub struct OddBoxV2Config { #[schema(value_type = String)] - pub (crate) version : super::OddBoxConfigVersion, - pub (crate) root_dir : Option, + pub version : super::OddBoxConfigVersion, + pub root_dir : Option, #[serde(default = "default_log_level")] - pub (crate) log_level : Option, + pub log_level : Option, /// Defaults to true. Lets you enable/disable h2/http11 tls alpn algs during initial connection phase. #[serde(default = "true_option")] - pub (crate) alpn : Option, - pub (crate) port_range_start : u16, + pub alpn : Option, + pub port_range_start : u16, #[serde(default = "default_log_format")] - pub (crate) default_log_format : LogFormat, + pub default_log_format : LogFormat, #[schema(value_type = String)] - pub (crate) ip : Option, + pub ip : Option, #[serde(default = "default_http_port_8080")] - pub (crate) http_port : Option, + pub http_port : Option, #[serde(default = "default_https_port_4343")] - pub (crate) tls_port : Option, + pub tls_port : Option, #[serde(default = "true_option")] - pub (crate) auto_start : Option, - pub (crate) env_vars : Vec, - pub (crate) remote_target : Option>, - pub (crate) hosted_process : Option>, - pub (crate) admin_api_port : Option, - pub (crate) path : Option - -} -fn default_log_level() -> Option { - Some(LogLevel::Info) -} -fn default_log_format() -> LogFormat { - LogFormat::standard -} -fn default_https_port_4343() -> Option { - Some(4343) -} -fn default_http_port_8080() -> Option { - Some(8080) -} + pub auto_start : Option, + pub env_vars : Vec, + pub remote_target : Option>, + pub hosted_process : Option>, + pub admin_api_port : Option, + pub path : Option -fn true_option() -> Option { - Some(true) } -impl OddBoxConfig { - - - // Validates and populates variables in the configuration - pub fn init(&mut self,cfg_path:&str) -> anyhow::Result<()> { - - self.path = Some(std::path::Path::new(&cfg_path).canonicalize()?.to_str().unwrap_or_default().into()); - - - let resolved_home_dir_path = dirs::home_dir().ok_or(anyhow::anyhow!(String::from("Failed to resolve home directory.")))?; - let resolved_home_dir_str = resolved_home_dir_path.to_str().ok_or(anyhow::anyhow!(String::from("Failed to parse home directory.")))?; - - tracing::info!("Resolved home directory: {}",&resolved_home_dir_str); - - let cfg_dir = Self::get_parent(cfg_path)?; - - if let Some(rd) = self.root_dir.as_mut() { - - if rd.contains("$root_dir") { - anyhow::bail!("it is clearly not a good idea to use $root_dir in the configuration of root dir...") - } - - let rd_with_vars_replaced = rd - .replace("$cfg_dir", &cfg_dir) - .replace("~", resolved_home_dir_str); - - let canonicalized_with_vars = - match std::fs::canonicalize(rd_with_vars_replaced.clone()) { - Ok(resolved_path) => { - resolved_path.display().to_string() - // we dont want to use ext path def on windows - .replace("\\\\?\\", "") - } - Err(e) => { - anyhow::bail!(format!("root_dir item in configuration ({rd}) resolved to this: '{rd_with_vars_replaced}' - error: {}", e)); - } - }; - - *rd = canonicalized_with_vars; - - tracing::debug!("$root_dir resolved to: {rd}") - } - - let cloned_root_dir = self.root_dir.clone(); - - - - - if let Some(procs) = self.hosted_process.as_deref_mut() { - for x in &mut procs.iter_mut() { - - if x.dir.len() < 5 { anyhow::bail!(format!("Invalid path configuration for {:?}",x))} - - Self::massage_proc(cfg_path, &cloned_root_dir, x)?; - - - // basic sanity check.. - if x.dir.contains("$root_dir") { - anyhow::bail!("Invalid configuration: {x:?}. Missing root_dir in configuration file but referenced for this item..") - } - - // if no log format is specified for the process but there is a global format, override it - if x.log_format.is_none() { - x.log_format = Some(self.default_log_format.clone()) - } - } - } - - - - Ok(()) - } - - pub fn is_valid(&self) -> anyhow::Result<()> { - - let mut all_host_names: Vec<&str> = vec![ - self.remote_target.as_ref().and_then(|p|Some(p.iter().map(|x|x.host_name.as_str()).collect::>())).unwrap_or_default(), - self.hosted_process.as_ref().and_then(|p|Some(p.iter().map(|x|x.host_name.as_str()).collect::>())).unwrap_or_default() - - ].concat(); - - all_host_names.sort(); - - let all_count = all_host_names.len(); - - all_host_names.dedup(); - - let unique_count = all_host_names.len(); - - if all_count != unique_count { - anyhow::bail!(format!("duplicated host names detected in config.")) - } - - Ok(()) - - } - - - fn get_parent(p:&str) -> anyhow::Result { - if let Some(directory_path_str) = - std::path::Path::new(&p) - .parent() - .map(|p| p.to_str().unwrap_or_default()) - { - if directory_path_str.eq("") { - tracing::debug!("$cfg_dir resolved to '.'"); - Ok(".".into()) - } else { - tracing::debug!("$cfg_dir resolved to {directory_path_str}"); - Ok(directory_path_str.into()) - } - - } else { - bail!(format!("Failed to resolve $cfg_dir")); - } - } - - fn massage_proc(cfg_path:&str,root_dir:&Option, proc:&mut InProcessSiteConfig) -> anyhow::Result<()> { - - let cfg_dir = Self::get_parent(&cfg_path)?; - - let resolved_home_dir_path = dirs::home_dir().ok_or(anyhow::anyhow!(String::from("Failed to resolve home directory.")))?; - let resolved_home_dir_str = resolved_home_dir_path.to_str().ok_or(anyhow::anyhow!(String::from("Failed to parse home directory.")))?; - - let with_vars = |x:&str| -> String { - x.replace("$root_dir", & if let Some(rd) = &root_dir { rd.to_string() } else { "$root_dir".to_string() }) - .replace("$cfg_dir", &cfg_dir) - .replace("~", resolved_home_dir_str) - }; - - for a in &mut proc.args { - *a = with_vars(a) - } - - proc.dir = with_vars(&proc.dir); - proc.bin = with_vars(&proc.bin); - - Ok(()) - - } - - pub (crate) async fn add_or_replace_hosted_process(&mut self,hostname:&str,mut item:InProcessSiteConfig,state:GlobalState) -> anyhow::Result<()> { - - Self::massage_proc( - &self.path.clone().unwrap_or_default(), - &self.root_dir, - &mut item - )?; - - if let Some(hosted_site_configs) = &mut self.hosted_process { - - - - for x in hosted_site_configs.iter_mut() { - if hostname == x.host_name { - - let (tx,mut rx) = tokio::sync::mpsc::channel(1); - - state.2.send(crate::http_proxy::ProcMessage::Delete(hostname.into(),tx))?; - - if rx.recv().await == Some(0) { - // when we get this message, we know that the process has been stopped - // and that the loop has been exited as well. - tracing::debug!("Received a confirmation that the process was deleted"); - } else { - tracing::debug!("Failed to receive a confirmation that the process was deleted. This is a bug in odd-box."); - }; +impl crate::configuration::OddBoxConfiguration for OddBoxV2Config { - break; - } - }; - tracing::debug!("Pushing a new process to the configuration thru the admin api"); - hosted_site_configs.retain(|x| x.host_name != item.host_name); - hosted_site_configs.retain(|x| x.host_name != hostname); - hosted_site_configs.push(item.clone()); - - - tokio::task::spawn(crate::proc_host::host( - item.clone(), - state.2.subscribe(), - state.clone(), - )); - tracing::trace!("Spawned a new thread for site: {:?}",hostname); - - let mut guard = state.0.write().await; - guard.site_states_map.retain(|k,_v| k != hostname); - guard.site_states_map.insert(hostname.to_owned(), crate::types::app_state::ProcState::Stopped); - } - - + fn write_to_disk(&self) -> anyhow::Result<()> { - if let Some(p) = &self.path { - self.write_to_disk(&p) - } else { + let current_path = if let Some(p) = &self.path {p} else { bail!(ConfigurationUpdateError::Bug("No path found to the current configuration".into())) - } - - - - } + }; + let formatted_toml = self.to_string()?; - pub (crate) async fn add_or_replace_remote_site(&mut self,hostname:&str,item:RemoteSiteConfig,state:GlobalState) -> anyhow::Result<()> { - - - if let Some(sites) = self.remote_target.as_mut() { - // out with the old, in with the new - sites.retain(|x| x.host_name != hostname); - sites.retain(|x| x.host_name != item.host_name); - sites.push(item.clone()); - - // same as above but for the TUI state - let mut guard = state.0.write().await; - guard.site_states_map.retain(|k,_v| *k != item.host_name); - guard.site_states_map.retain(|k,_v| k != hostname); - guard.site_states_map.insert(hostname.to_owned(), crate::types::app_state::ProcState::Remote); - } - + let original_path = Path::new(¤t_path); + let backup_path = original_path.with_extension("toml.backup"); + std::fs::rename(original_path, &backup_path)?; - if let Some(p) = &self.path { - self.write_to_disk(&p) + if let Err(e) = std::fs::write(current_path, formatted_toml) { + bail!("Failed to write config to disk: {e}") } else { - bail!(ConfigurationUpdateError::Bug("No path found to the current configuration".into())) + Ok(()) } - - } - - - -} - -#[derive(Debug)] -enum ConfigurationUpdateError { - Bug(String) -} - - -impl std::fmt::Display for ConfigurationUpdateError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - // ConfigurationUpdateError::NotFound => { - // f.write_str("No such hosted process found.") - // }, - // ConfigurationUpdateError::FailedToSave(e) => { - // f.write_fmt(format_args!("Failed to save due to error: {}",e)) - // }, - ConfigurationUpdateError::Bug(e) => { - f.write_fmt(format_args!("Failed to save due to a bug in odd-box: {}",e)) - } - } - } -} - -impl OddBoxConfig { - - pub fn save(&self) -> anyhow::Result<()> { - self.write_to_disk(self.path.clone().expect("must have been loaded from somewhere..").as_str())?; - Ok(()) } - // note: this seems silly but its needed because neither toml-rs nor toml_edit supports any decent - // formatting customization and ends up with spread out arrays of tables rather - // than inlining like we usually do for odd-box configs. - pub fn write_to_disk(&self,current_path:&str) -> anyhow::Result<()> { + fn to_string(&self) -> anyhow::Result { let mut formatted_toml = Vec::new(); formatted_toml.push(format!("version = \"{:?}\"", self.version)); @@ -483,61 +314,78 @@ impl OddBoxConfig { formatted_toml.push(format!("default_log_format = \"{:?}\"", self.default_log_format )); - formatted_toml.push("env_vars = [".to_string()); - for env_var in &self.env_vars { - formatted_toml.push(format!( - "\t{{ key = {:?}, value = {:?} }},", - env_var.key, env_var.value - )); + if &self.env_vars.len() > &0 { + formatted_toml.push("env_vars = [".to_string()); + for env_var in &self.env_vars { + formatted_toml.push(format!( + "\t{{ key = {:?}, value = {:?} }},", + env_var.key, env_var.value + )); + } + formatted_toml.push("]".to_string()); } - formatted_toml.push("]".to_string()); - - // TODO ---- backend config here - - // if let Some(remote_sites) = &self.remote_target { - // for site in remote_sites { - // formatted_toml.push("\n[[remote_target]]".to_string()); - // formatted_toml.push(format!("host_name = {:?}", site.host_name)); - // formatted_toml.push(format!("target_hostname = {:?}", site.target_hostname)); - // if let Some(hint) = &site.h2_hint { - // formatted_toml.push(format!("h2_hint = \"{:?}\"", hint)); - // } - + + if let Some(remote_sites) = &self.remote_target { + for site in remote_sites { + formatted_toml.push("\n[[remote_target]]".to_string()); + formatted_toml.push(format!("host_name = {:?}", site.host_name)); + + if let Some(true) = site.forward_subdomains { + formatted_toml.push(format!("forward_subdomains = true")); + } - // if let Some(capture_subdomains) = site.capture_subdomains { - // formatted_toml.push(format!("capture_subdomains = {}", capture_subdomains)); - // } + if let Some(true) = site.capture_subdomains { + formatted_toml.push(format!("capture_subdomains = true")); + } - // if let Some(b) = site.https { - // formatted_toml.push(format!("https = {}", b)); - // } - // if let Some(http) = site.port { - // formatted_toml.push(format!("port = {}", http)); - // } - - // if let Some(disable_tcp_tunnel_mode) = site.disable_tcp_tunnel_mode { - // formatted_toml.push(format!("disable_tcp_tunnel_mode = {}", disable_tcp_tunnel_mode)); - // } - // } - // } + if let Some(true) = site.disable_tcp_tunnel_mode { + formatted_toml.push(format!("disable_tcp_tunnel_mode = {}", true)); + } + + formatted_toml.push("backends = [".to_string()); + + let backend_strings = site.backends.iter().map(|b| { + let https = if let Some(true) = b.https { format!("https = true, ") } else { format!("") }; + + let hints = if let Some(hints) = &b.hints { + format!(", hints = [{}]",hints.iter().map(|h|format!("{h:?}")).collect::>().join(", ")) + } else { + String::new() + }; + + format!("\t{{ {}address=\"{}\", port={}{hints}}}",https,b.address, b.port)} + + ).collect::>(); + + formatted_toml.push(backend_strings.join(",\n")); + + formatted_toml.push("]".to_string()); + + } + } if let Some(processes) = &self.hosted_process { for process in processes { formatted_toml.push("\n[[hosted_process]]".to_string()); formatted_toml.push(format!("host_name = {:?}", process.host_name)); - formatted_toml.push(format!("dir = {:?}", process.dir)); + if let Some(d) = &process.dir { + formatted_toml.push(format!("dir = {:?}", d)); + } + formatted_toml.push(format!("bin = {:?}", process.bin)); - if let Some(hint) = &process.h2_hint { - formatted_toml.push(format!("h2_hint = \"{:?}\"", hint)); + + if let Some(hint) = &process.hints { + formatted_toml.push("h2_hints = [".to_string()); + let joint = hint.iter().map(|h| format!("{:?}", h)).collect::>().join(", "); + formatted_toml.push("]".to_string()); } - let args = process.args.iter().map(|arg| format!("{:?}", arg)).collect::>().join(", "); - formatted_toml.push(format!("args = [{}]", args)); + let args = process.args.clone().unwrap_or_default().iter() + .map(|arg| format!("\n {:?}", arg)).collect::>().join(", "); + + formatted_toml.push(format!("args = [{}\n]", args)); - - - if let Some(auto_start) = process.auto_start { formatted_toml.push(format!("auto_start = {}", auto_start)); } @@ -546,113 +394,232 @@ impl OddBoxConfig { if let Some(b) = process.https { formatted_toml.push(format!("https = {}", b)); } - if let Some(http) = process.port { - formatted_toml.push(format!("port = {}", http)); + if let Some(port) = process.port { + formatted_toml.push(format!("port = {}", port)); } - if let Some(capture_subdomains) = process.capture_subdomains { - formatted_toml.push(format!("capture_subdomains = {}", capture_subdomains)); - } else { - formatted_toml.push(format!("capture_subdomains = {}", "false")); + if let Some(true) = process.capture_subdomains { + formatted_toml.push(format!("capture_subdomains = {}", "true")); } - formatted_toml.push("env_vars = [".to_string()); - for env_var in &process.env_vars { - formatted_toml.push(format!( - "\t{{ key = {:?}, value = {:?} }},", - env_var.key, env_var.value - )); + if let Some(evars) = &process.env_vars { + formatted_toml.push("env_vars = [".to_string()); + for env_var in evars { + formatted_toml.push(format!( + "\t{{ key = {:?}, value = {:?} }},", + env_var.key, env_var.value + )); + } + formatted_toml.push("]".to_string()); } - formatted_toml.push("]".to_string()); + } } + Ok(formatted_toml.join("\n")) + } + fn example() -> OddBoxV2Config { + OddBoxV2Config { + path: None, + admin_api_port: None, + version: super::OddBoxConfigVersion::V2, + alpn: Some(false), + auto_start: Some(true), + default_log_format: LogFormat::standard, + env_vars: vec![ + EnvVar { key: "some_key".into(), value:"some_val".into() }, + EnvVar { key: "another_key".into(), value:"another_val".into() }, + ], + ip: Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), + log_level: Some(LogLevel::Info), + http_port: Some(80), + port_range_start: 4200, + hosted_process: Some(vec![ + InProcessSiteConfig { + proc_id: crate::ProcId::new(), + active_port: None, + forward_subdomains: None, + disable_tcp_tunnel_mode: Some(false), + args: Some(vec!["--test".to_string()]), + auto_start: Some(true), + bin: "my_bin".into(), + capture_subdomains: None, + env_vars: Some(vec![ + EnvVar { key: "some_key".into(), value:"some_val".into() }, + EnvVar { key: "another_key".into(), value:"another_val".into() }, + ]), + host_name: "some_host.local".into(), + port: Some(443) , + log_format: Some(LogFormat::standard), + dir: None, + https: Some(true), + hints: None, + exclude_from_start_all: None + + } + ]), + remote_target: Some(vec![ + RemoteSiteConfig { + forward_subdomains: None, + host_name: "lobsters.local".into(), + backends: vec![ + Backend { + hints: None, + address: "lobste.rs".into(), + port: 443, + https: Some(true) + } + ], + capture_subdomains: Some(false), + disable_tcp_tunnel_mode: Some(false) + }, + RemoteSiteConfig { + forward_subdomains: Some(true), + host_name: "google.local".into(), + backends: vec![ + Backend { + hints: None, + address: "google.com".into(), + port: 443, + https: Some(true) + } + ], + capture_subdomains: Some(false), + disable_tcp_tunnel_mode: Some(true) + } + ]), + root_dir: Some("/tmp".into()), + tls_port: Some(443) - let original_path = Path::new(current_path); - let backup_path = original_path.with_extension("toml.backup"); - std::fs::rename(original_path, &backup_path)?; - - if let Err(e) = std::fs::write(current_path, formatted_toml.join("\n")) { - bail!("Failed to write config to disk: {e}") - } else { - Ok(()) } - } + + } + +fn default_log_level() -> Option { + Some(LogLevel::Info) +} +fn default_log_format() -> LogFormat { + LogFormat::standard +} +fn default_https_port_4343() -> Option { + Some(4343) +} +fn default_http_port_8080() -> Option { + Some(8080) } +fn true_option() -> Option { + Some(true) +} -pub fn example_v2() -> OddBoxConfig { - OddBoxConfig { - path: None, - admin_api_port: None, - version: super::OddBoxConfigVersion::V2, - alpn: Some(false), - auto_start: Some(true), - default_log_format: LogFormat::standard, - env_vars: vec![ - EnvVar { key: "some_key".into(), value:"some_val".into() }, - EnvVar { key: "another_key".into(), value:"another_val".into() }, - ], - ip: Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), - log_level: Some(LogLevel::Info), - http_port: Some(80), - port_range_start: 4200, - hosted_process: Some(vec![ - InProcessSiteConfig { - forward_subdomains: None, - disable_tcp_tunnel_mode: Some(false), - args: vec!["--test".to_string()], - auto_start: Some(true), - bin: "my_bin".into(), - capture_subdomains: None, - env_vars: vec![ - EnvVar { key: "some_key".into(), value:"some_val".into() }, - EnvVar { key: "another_key".into(), value:"another_val".into() }, - ], - host_name: "some_host.local".into(), - port: Some(443) , - log_format: Some(LogFormat::standard), - dir: "/tmp".into(), - https: Some(true), - h2_hint: None, - disabled :None - +#[derive(Debug)] +enum ConfigurationUpdateError { + Bug(String) +} + + +impl std::fmt::Display for ConfigurationUpdateError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + // ConfigurationUpdateError::NotFound => { + // f.write_str("No such hosted process found.") + // }, + // ConfigurationUpdateError::FailedToSave(e) => { + // f.write_fmt(format_args!("Failed to save due to error: {}",e)) + // }, + ConfigurationUpdateError::Bug(e) => { + f.write_fmt(format_args!("Failed to save due to a bug in odd-box: {}",e)) } - ]), - remote_target: Some(vec![ - RemoteSiteConfig { - forward_subdomains: None, - h2_hint: None, - host_name: "lobsters.local".into(), - backends: vec![ - Backend { - address: "lobste.rs".into(), - port: 443, - https: Some(true) - } - ], - capture_subdomains: Some(false), - disable_tcp_tunnel_mode: Some(false) - }, - RemoteSiteConfig { - forward_subdomains: Some(true), - h2_hint: None, - host_name: "google.local".into(), - backends: vec![ - Backend { - address: "google.com".into(), - port: 443, - https: Some(true) + } + } +} + + + + + +// V1 ---> V2 +impl TryFrom for super::v2::OddBoxV2Config{ + + type Error = String; + + fn try_from(old_config: super::v1::OddBoxV1Config) -> Result { + let new_config = super::v2::OddBoxV2Config { + path: None, + version: super::OddBoxConfigVersion::V2, + admin_api_port: None, + alpn: Some(false), // allowing alpn would be a breaking change for h2c when using old configuration format + auto_start: old_config.auto_start, + default_log_format: old_config.default_log_format, + env_vars: old_config.env_vars, + ip: old_config.ip, + log_level: old_config.log_level, + http_port: old_config.http_port, + port_range_start: old_config.port_range_start, + hosted_process: Some(old_config.hosted_process.unwrap_or_default().into_iter().map(|x|{ + super::v2::InProcessSiteConfig { + exclude_from_start_all: None, + proc_id: crate::ProcId::new(), + active_port: None, + forward_subdomains: x.forward_subdomains, + disable_tcp_tunnel_mode: x.disable_tcp_tunnel_mode, + args: if x.args.len() > 0 { Some(x.args) } else { None }, + auto_start: { + if x.disabled != x.auto_start { + tracing::warn!("Your configuration contains both auto_start and disabled for the same process. The auto_start setting will be used. Please remove the disabled setting as it is no longer used.") + } + if let Some(d) = x.disabled { + Some(!d) + } else if let Some(a) = x.auto_start { + Some(a) + } else { + None + } + }, + bin: x.bin, + capture_subdomains: x.capture_subdomains, + env_vars: if x.env_vars.len() > 0 { Some(x.env_vars) } else { None }, + host_name: x.host_name, + port: x.port, + log_format: x.log_format, + dir: if x.dir.is_empty() { None } else { Some(x.dir) }, + https: x.https, + hints: match x.h2_hint { + Some(super::H2Hint::H2) => Some(vec![crate::configuration::v2::Hint::H2]), + Some(super::H2Hint::H2C) => Some(vec![crate::configuration::v2::Hint::H2C]), + None => None, } - ], - capture_subdomains: Some(false), - disable_tcp_tunnel_mode: Some(true) - } - ]), - root_dir: Some("/tmp".into()), - tls_port: Some(443) + + } + }).collect()), + remote_target: Some(old_config.remote_target.unwrap_or_default().iter().map(|x|{ + super::v2::RemoteSiteConfig { + disable_tcp_tunnel_mode: x.disable_tcp_tunnel_mode, + capture_subdomains: x.capture_subdomains, + forward_subdomains: x.forward_subdomains, + backends: vec![ + super::v2::Backend { + hints: match x.h2_hint { + Some(super::H2Hint::H2) => Some(vec![crate::configuration::v2::Hint::H2]), + Some(super::H2Hint::H2C) => Some(vec![crate::configuration::v2::Hint::H2C]), + None => None, + }, + address: x.target_hostname.clone(), + port: if let Some(p) = x.port {p} else { + if x.https.unwrap_or_default() { 443 } else { 80 } + }, + https: x.https + } + ], + host_name: x.host_name.clone(), + } + }).collect()), + root_dir: old_config.root_dir, + tls_port: old_config.tls_port + }; + Ok(new_config) } } \ No newline at end of file diff --git a/src/http_proxy/mod.rs b/src/http_proxy/mod.rs index dd3bccc..c975a8c 100644 --- a/src/http_proxy/mod.rs +++ b/src/http_proxy/mod.rs @@ -1,11 +1,15 @@ mod websockets; mod service; mod utils; -use rustls::ClientConfig; -pub (crate) use service::*; +use std::sync::Arc; + +use hyper::body::Incoming; +use hyper_rustls::HttpsConnector; +use hyper_util::client::legacy::{connect::HttpConnector, Client}; +pub use service::*; use tokio::sync::mpsc::Sender; -pub (crate) use utils::*; -pub (crate) use crate::configuration::ConfigWrapper; +pub use utils::*; +pub use crate::configuration::ConfigWrapper; use crate::global_state::GlobalState; #[derive(Clone,Debug)] @@ -19,9 +23,10 @@ pub enum ProcMessage { #[derive(Debug, Clone)] pub struct ReverseProxyService { - pub(crate) state: GlobalState, - pub(crate) remote_addr : Option, - pub(crate) tx: std::sync::Arc>, - pub(crate) is_https_only:bool, - pub(crate) client_tls_config: ClientConfig + pub state: Arc, + pub remote_addr : Option, + pub tx: std::sync::Arc>, + pub is_https_only:bool, + pub client: Client, Incoming>, + pub h2_client: Client, Incoming> } diff --git a/src/http_proxy/service.rs b/src/http_proxy/service.rs index 49b6fed..82a700c 100644 --- a/src/http_proxy/service.rs +++ b/src/http_proxy/service.rs @@ -1,42 +1,53 @@ +use std::borrow::Cow; use std::sync::Arc; use std::time::Duration; use bytes::Bytes; -use http_body::Frame; -use http_body_util::{Either, Full, StreamBody}; +use http_body::{Body, Frame}; +use http_body_util::combinators::BoxBody; +use http_body_util::{BodyExt, Either, Full, StreamBody}; use hyper::service::Service; use hyper::{body::Incoming as IncomingBody, Request, Response}; +use hyper_rustls::HttpsConnector; +use hyper_util::client::legacy::connect::{Connection, HttpConnector}; +use hyper_util::client::legacy::Client; use hyper_util::rt::TokioExecutor; +use tokio::io::AsyncWriteExt; use tokio::net::TcpStream; use tokio_stream::wrappers::ReceiverStream; +use tower_http::services::fs::ServeFileSystemResponseBody; use std::future::Future; use std::pin::Pin; use crate::global_state::GlobalState; use crate::types::app_state::ProcState; use crate::CustomError; -use hyper::{Method, StatusCode}; - +use hyper::{upgrade, Method, StatusCode}; +use lazy_static::lazy_static; use super::{ProcMessage, ReverseProxyService, WrappedNormalResponse}; use super::proxy; + pub enum SomeIo { Https(hyper_util::rt::TokioIo>), Http(hyper_util::rt::TokioIo) } -pub (crate) async fn serve(service:ReverseProxyService,io:SomeIo) { - +lazy_static! { + static ref SERVER_ONE: hyper_util::server::conn::auto::Builder = + hyper_util::server::conn::auto::Builder::new(TokioExecutor::new()); +} +pub async fn serve(service:ReverseProxyService,io:SomeIo) { + let result = match io { SomeIo::Https(tls_stream) => { - hyper_util::server::conn::auto::Builder::new(TokioExecutor::new()) - .serve_connection_with_upgrades(tls_stream, service).await + SERVER_ONE.serve_connection_with_upgrades(tls_stream, service).await }, SomeIo::Http(tcp_stream) => { - - hyper_util::server::conn::auto::Builder::new(TokioExecutor::new()) + SERVER_ONE .serve_connection_with_upgrades(tcp_stream, service).await - } + }, + }; match result { Ok(_) => {}, @@ -65,7 +76,7 @@ pub type EpicBody = FullOrStreamBody >; -pub(crate) type EpicResponse = hyper::Response; +pub type EpicResponse = hyper::Response; pub fn create_response_channel(buf_size:usize) -> ( @@ -101,9 +112,12 @@ impl<'a> Service> for ReverseProxyService { type Response = EpicResponse; type Error = CustomError; type Future = Pin> + Send>>; + + fn call(&self, req: hyper::Request) -> Self::Future { tracing::trace!("INCOMING REQ: {:?}",req); + tracing::trace!("VERSION: {:?}",req.version()); // handle websocket upgrades separately if hyper_tungstenite::is_upgrade_request(&req) { @@ -120,17 +134,25 @@ impl<'a> Service> for ReverseProxyService { } // handle normal proxy path - let f = handle_http( + let f = handle_http_request( self.remote_addr.expect("there must always be a client"), req, self.tx.clone(), self.state.clone(), self.is_https_only, - self.client_tls_config.clone() + self.client.clone(), + self.h2_client.clone() ); return Box::pin(async move { - f.await + match f.await { + Ok(x) => { + Ok(x) + }, + Err(e) => { + Err(CustomError(format!("yeah that was not cool {e:?}"))) + }, + } }) @@ -138,15 +160,28 @@ impl<'a> Service> for ReverseProxyService { } #[allow(dead_code)] -async fn handle_http( +async fn handle_http_request( client_ip: std::net::SocketAddr, req: Request, tx: Arc>, - state: GlobalState, + state: Arc, is_https:bool, - client_tls_config:rustls::ClientConfig + client: Client, hyper::body::Incoming>, + h2_client: Client, hyper::body::Incoming>, + ) -> Result { + + + + let cfg_clone = { state.config.read().await.0.clone()} ; + + + // let mut response = EpicResponse::new(create_epic_string_full_body(&"hey nerd")); + // *response.status_mut() = StatusCode::OK; + + // return Ok(response); + let req_host_name = if let Some(hh) = req.headers().get("host") { let hostname_and_port = hh.to_str().map_err(|e|CustomError(format!("{e:?}")))?.to_string(); @@ -155,6 +190,7 @@ async fn handle_http( req.uri().authority().ok_or(CustomError(format!("No hostname and no Authority found")))?.host().to_string() }; + tracing::trace!("Handling request from {client_ip:?} on hostname {req_host_name:?}"); let req_path = req.uri().path(); @@ -169,31 +205,37 @@ async fn handle_http( }) .unwrap_or_else(std::collections::HashMap::new); - + // TODO - try remove this for perf if let Some(r) = intercept_local_commands(&req_host_name,¶ms,req_path,tx.clone()).await { return Ok(r) } - let guarded = state.1.read().await; - let processes = guarded.hosted_process.clone().unwrap_or_default(); - if let Some(target_cfg) = processes.iter().find(|p| { + + let found_target = if let Some(processes) = &cfg_clone.hosted_process { + processes.iter().find(|p| { req_host_name == p.host_name || p.capture_subdomains.unwrap_or_default() && req_host_name.ends_with(&format!(".{}",p.host_name)) - }) { + }) + } else { + None + }; + + + + + if let Some(target_cfg) = found_target { let current_target_status : Option = { - let guard = state.0.read().await; - let info = guard.site_states_map.iter().find(|x|x.0==&target_cfg.host_name); + let info = state.app_state.site_status_map.get(&target_cfg.host_name); match info { - Some((_,target_state)) => Some(target_state.clone()), + Some(data) => Some(data.value().clone()), None => None, } }; match current_target_status { - Some(ProcState::Running) => {}, - None => {}, + Some(ProcState::Running) | Some(ProcState::Faulty) | Some(ProcState::Starting) => {}, _ => { // auto start site in case its been disabled by other requests _ = tx.send(super::ProcMessage::Start(target_cfg.host_name.to_owned())).map_err(|e|format!("{e:?}")); @@ -205,6 +247,7 @@ async fn handle_http( if cts == crate::ProcState::Stopped || cts == crate::ProcState::Starting { match req.method() { &Method::GET => { + // todo - opt in/out via cfg ? return Ok(EpicResponse::new(create_epic_string_full_body(&please_wait_response()))) } _ => { @@ -214,6 +257,12 @@ async fn handle_http( } } } + + let port = if let Some(active_port) = target_cfg.active_port { + active_port + } else { + return Err(CustomError(format!("No active port found for {req_host_name}"))) + }; let enforce_https = target_cfg.https.is_some_and(|x|x); let scheme = if enforce_https { "https" } else { "http" }; @@ -222,52 +271,87 @@ async fn handle_http( .and_then(|x| Some(x.as_str())).unwrap_or_default(); if original_path_and_query == "/" { original_path_and_query = ""} - let default_port = if enforce_https { 443 } else { 80 }; - let resolved_host_name = { + let parsed_host_name = { let forward_subdomains = target_cfg.forward_subdomains.unwrap_or_default(); if forward_subdomains { if let Some(subdomain) = get_subdomain(&req_host_name, &target_cfg.host_name) { - tracing::debug!("in-proc forward terminating proxy rewrote subdomain: {subdomain}!"); - format!("{subdomain}.{}", &target_cfg.host_name) + Cow::Owned(format!("{subdomain}.{}", &target_cfg.host_name)) } else { - target_cfg.host_name.clone() + Cow::Borrowed(&target_cfg.host_name) } } else { - target_cfg.host_name.clone() + Cow::Borrowed(&target_cfg.host_name) } }; - - tracing::info!("USING THIS RESOLVED TARGET: {resolved_host_name}"); + // THE RESOLVED HOSTNAME SHOULD BE ADDED AS A HOST HEADER HERE. + + // using ip to avoid dns lookup for local targets + // todo - should this be opt in/out ? let target_url = format!("{scheme}://{}:{}{}", - resolved_host_name, - target_cfg.port.unwrap_or(default_port), + parsed_host_name, + port, + original_path_and_query + ); + + + // we add the host flag manually in proxy method, this is only to avoid dns lookup for local targets. + // todo: opt in/out via cfg + let skip_dns_for_local_target_url = format!("{scheme}://{}:{}{}", + "127.0.0.1", + port, original_path_and_query ); - let target = crate::http_proxy::Target::Proc(target_cfg.clone()); + let target_cfg = (*target_cfg).clone(); + let hints = target_cfg.hints.clone(); + let target = crate::http_proxy::Target::Proc(target_cfg); + + // return Ok(EpicResponse::new(create_epic_string_full_body(&"hey nerd!!!"))); + let result = - proxy(&req_host_name,is_https,state.clone(),req,&target_url,target,client_ip,client_tls_config).await; + proxy( + &parsed_host_name, + is_https, + state.clone(), + req, + &target_url, + target, + client_ip, + client, + h2_client, + &target_url, + enforce_https, + crate::configuration::v2::Backend { + hints: hints, + address: parsed_host_name.to_string(), + port: port, + https: Some(enforce_https) + } + ).await; - map_result(&req_host_name,result).await + map_result(&target_url,result).await } else { - let config = &state.1.read().await.0; - if let Some(remote_target_cfg) = config.remote_target.clone().unwrap_or_default().iter().find(|p|{ + if let Some(remote_target_cfg) = cfg_clone.remote_target.clone().unwrap_or_default().iter().find(|p|{ //tracing::info!("comparing incoming req: {} vs {} ",req_host_name,p.host_name); req_host_name == p.host_name || p.capture_subdomains.unwrap_or_default() && req_host_name.ends_with(&format!(".{}",p.host_name)) }) { - return perform_remote_forwarding(req_host_name,is_https,state.clone(),client_ip,remote_target_cfg,req,client_tls_config).await + return perform_remote_forwarding(req_host_name,is_https,state.clone(),client_ip,remote_target_cfg,req,client.clone(),h2_client.clone()).await } tracing::warn!("Received request that does not match any known target: {:?}", req_host_name); let body_str = format!("Sorry, I don't know how to proxy this request.. {:?}", req); + + + // TODO --- even this seems to cause tcp_closing issues at extreme load .. + let mut response = EpicResponse::new(create_epic_string_full_body(&body_str)); *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; @@ -291,11 +375,12 @@ fn get_subdomain(requested_hostname: &str, backend_hostname: &str) -> Option, client_ip:std::net::SocketAddr, remote_target_config:&crate::configuration::v2::RemoteSiteConfig, req:hyper::Request, - client_tls_config:rustls::ClientConfig + client: Client, hyper::body::Incoming>, + h2_client: Client, hyper::body::Incoming>, ) -> Result { @@ -303,7 +388,11 @@ async fn perform_remote_forwarding( .and_then(|x| Some(x.as_str())).unwrap_or_default(); if original_path_and_query == "/" { original_path_and_query = ""} - let next_backend_target = remote_target_config.next_backend(&state, crate::configuration::v2::BackendFilter::Any).await; + let next_backend_target = if let Some(b) = remote_target_config.next_backend(&state, crate::configuration::v2::BackendFilter::Any).await { + b + } else { + return Err(CustomError("No backend found".to_string())) + }; // if a target is marked with http, we wont try to use http let enforce_https = next_backend_target.https.unwrap_or_default(); @@ -312,13 +401,13 @@ async fn perform_remote_forwarding( let resolved_host_name = { - let forward_subdomains = remote_target_config.forward_subdomains.unwrap_or_default(); - let subdomain = get_subdomain(&req_host_name, &remote_target_config.host_name); - - if forward_subdomains && subdomain.is_some() { - let subdomain = subdomain.unwrap(); - tracing::debug!("remote forward terminating proxy rewrote subdomain: {subdomain}!"); - format!("{subdomain}.{}", &next_backend_target.address) + if remote_target_config.forward_subdomains.unwrap_or_default() { + if let Some(subdomain) = get_subdomain(&req_host_name, &remote_target_config.host_name) { + //tracing::debug!("remote forward terminating proxy rewrote subdomain: {subdomain}!"); + format!("{subdomain}.{}", &next_backend_target.address) + } else { + next_backend_target.address.clone() + } } else { next_backend_target.address.clone() } @@ -330,7 +419,7 @@ async fn perform_remote_forwarding( original_path_and_query ); - tracing::info!("Incoming request to '{}' for remote proxy target {target_url}",next_backend_target.address); + //tracing::info!("Incoming request to '{}' for remote proxy target {target_url}",next_backend_target.address); let result = proxy( &req_host_name, @@ -340,8 +429,11 @@ async fn perform_remote_forwarding( &target_url, crate::http_proxy::Target::Remote(remote_target_config.clone()), client_ip, - client_tls_config, - + client, + h2_client, + &target_url, + next_backend_target.https.unwrap_or_default(), + next_backend_target ).await; map_result(&target_url,result).await @@ -351,49 +443,49 @@ async fn perform_remote_forwarding( async fn map_result(target_url:&str,result:Result) -> Result { match result { - Ok(super::ProxyCallResult::EpicResponse(epic_response)) => { + Ok(super::ProxyCallResult::EpicResponse(epic_response)) => { return Ok(epic_response) - } - Ok(crate::http_proxy::ProxyCallResult::NormalResponse(response)) => { + } + Ok(crate::http_proxy::ProxyCallResult::NormalResponse(response)) => { return create_simple_response_from_incoming(response).await; } Err(crate::http_proxy::ProxyError::LegacyError(error)) => { - tracing::info!("HyperLegacyError - Failed to call {}: {error:?}", &target_url); + tracing::debug!("HyperLegacyError - Failed to call {}: {error:?}", &target_url); Ok(Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(create_epic_string_full_body(&format!("HyperLegacyError - {error:?}"))) .expect("body building always works")) }, Err(crate::http_proxy::ProxyError::HyperError(error)) => { - tracing::info!("HyperError - Failed to call {}: {error:?}", &target_url); + tracing::debug!("HyperError - Failed to call {}: {error:?}", &target_url); Ok(Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(create_epic_string_full_body(&format!("HyperError - {error:?}"))) .expect("body building always works")) }, Err(crate::http_proxy::ProxyError::OddBoxError(error)) => { - tracing::info!("OddBoxError - Failed to call {}: {error:?}", &target_url); + tracing::debug!("OddBoxError - Failed to call {}: {error:?}", &target_url); Ok(Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(create_epic_string_full_body(&format!("ODD-BOX-ERROR: {error:?}"))) .expect("body building always works")) }, Err(crate::http_proxy::ProxyError::ForwardHeaderError) => { - tracing::info!("ForwardHeaderError - Failed to call {}", &target_url); + tracing::debug!("ForwardHeaderError - Failed to call {}", &target_url); Ok(Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(create_epic_string_full_body("ForwardHeaderError")) .expect("body building always works")) }, Err(crate::http_proxy::ProxyError::InvalidUri(error)) => { - tracing::info!("InvalidUri - Failed to call {}: {error:?}", &target_url); + tracing::debug!("InvalidUri - Failed to call {}: {error:?}", &target_url); Ok(Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(create_epic_string_full_body(&format!("InvalidUri: {error:?}"))) .expect("body building always works")) }, Err(crate::http_proxy::ProxyError::UpgradeError(error)) => { - tracing::info!("UpgradeError - Failed to call {}: {error:?}", &target_url); + tracing::debug!("UpgradeError - Failed to call {}: {error:?}", &target_url); Ok(Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(create_epic_string_full_body(&format!("UpgradeError: {error:?}"))) @@ -403,6 +495,10 @@ async fn map_result(target_url:&str,result:Result, @@ -468,6 +564,8 @@ async fn intercept_local_commands( None } +// TODO - package these mjs/jsons with the binary if we want to keep it as is +// otherwise get rid of the deps fn please_wait_response() -> String { r#" diff --git a/src/http_proxy/utils.rs b/src/http_proxy/utils.rs index 277a117..fe19e70 100644 --- a/src/http_proxy/utils.rs +++ b/src/http_proxy/utils.rs @@ -3,21 +3,17 @@ use futures_util::FutureExt; use http_body::Frame; use http_body_util::BodyExt; use hyper::{ - body::Incoming, - header::{HeaderName, HeaderValue, InvalidHeaderValue, ToStrError, HOST}, - upgrade::OnUpgrade, - HeaderMap, Request, Response, StatusCode, Version, + body::Incoming, client::conn::http1::Builder, header::{HeaderName, HeaderValue, InvalidHeaderValue, ToStrError, HOST}, upgrade::OnUpgrade, HeaderMap, Request, Response, StatusCode, Version }; -use hyper_util::rt::{TokioExecutor, TokioIo}; - -use rustls::ClientConfig; -use std::{net::SocketAddr, task::Poll, time::Duration}; +use hyper_rustls::HttpsConnector; +use hyper_util::{client::legacy::{connect::HttpConnector, Client}, rt::{TokioExecutor, TokioIo}}; +use std::{borrow::Cow, net::SocketAddr, sync::Arc, task::Poll, time::Duration}; use tungstenite::http; use lazy_static::lazy_static; use crate::{ - configuration::v2::H2Hint, global_state::GlobalState, http_proxy::EpicResponse, tcp_proxy::ReverseTcpProxyTarget, types::proxy_state::{ ConnectionKey, ProxyActiveConnection, ProxyActiveConnectionType }, CustomError + configuration::v2::Hint, global_state::GlobalState, http_proxy::EpicResponse, tcp_proxy::ReverseTcpProxyTarget, types::proxy_state::{ ConnectionKey, ProxyActiveConnection, ProxyActiveConnectionType }, CustomError }; lazy_static! { static ref TE_HEADER: HeaderName = HeaderName::from_static("te"); @@ -62,147 +58,158 @@ pub enum Target { Proc(crate::configuration::v2::InProcessSiteConfig), } +// We don't care about the original call scheme, version, etc. +// The target_url is the full URL to the target, including the scheme, it is expected that +// our caller has already determined if the target is http or https depending on whatever backend was selected. +// The job of this method is simply to create a new request with the target url and the original request's headers. +// while also selecting http version and handling upgraded connections. +// TODO: simplify the signature, we dont need it to be this complicated.. pub async fn proxy( - _req_host_name: &str, - is_https:bool, - state: GlobalState, + req_host_name: &str, + original_connection_is_https:bool, + state: Arc, mut req: hyper::Request, target_url: &str, target: Target, client_ip: SocketAddr, - client_tls_config: ClientConfig + client: Client, hyper::body::Incoming>, + h2_only_client: Client, hyper::body::Incoming>, + _fallback_url: &str, + use_https_to_backend_target: bool, + backend: crate::configuration::v2::Backend ) -> Result { let incoming_http_version = req.version(); - - tracing::info!( - "Incoming {incoming_http_version:?} request to terminating proxy from {client_ip:?} with target url: {target_url}" - ); - - - let https_builder = - hyper_rustls::HttpsConnectorBuilder::default().with_tls_config(client_tls_config); - - let mut connector = { https_builder.https_or_http().enable_all_versions().build() }; - - let mut enforce_https = is_https; - let request_upgrade_type = get_upgrade_type(req.headers()); - let request_upgraded = req.extensions_mut().remove::(); - let mut enforce_http2 = false; - let mut target_url = target_url.to_string(); - - - let h2_hint = match &target { - Target::Remote(r) => r.h2_hint.clone(), - Target::Proc(p) => p.h2_hint.clone(), - }; - if let Some(hint) = h2_hint { - match hint { - H2Hint::H2 => { - tracing::debug!("H2 HINT DETECTED"); - enforce_http2 = true; - } - H2Hint::H2C => { - tracing::debug!("H2C HINT DETECTED"); - target_url = target_url.replace("https://", "http://").to_string(); - if enforce_https { - tracing::warn!("Suspicious configuration for target: {target_url}. the domain is marked both with https and h2c.. will connect using h2c..") - } - enforce_https = false; - enforce_http2 = true; + tracing::trace!( + "Incoming {incoming_http_version:?} request to terminating proxy from {client_ip:?} with target url: {target_url}!" + ); + + + let mut backend_supports_prior_knowledge_http2_over_tls = false; + let mut backend_supports_http2_over_clear_text_via_h2c_upgrade_header = false; + let mut backend_supports_http2_h2c_using_prior_knowledge = false; + let mut use_prior_knowledge_http2 = false; + let mut use_h2c_upgrade_header = false; + + for x in &backend.hints.iter().flatten().collect::>() { + match x { + Hint::H2 => { + backend_supports_prior_knowledge_http2_over_tls = true; + }, + Hint::H2C => { + backend_supports_http2_over_clear_text_via_h2c_upgrade_header = true; + }, + Hint::H2CPK => { + backend_supports_http2_h2c_using_prior_knowledge = true; } } - - } else { - if !is_https && req.version() == Version::HTTP_2 { - *req.version_mut() = Version::HTTP_2; - target_url = target_url.replace("https://", "http://").to_string(); - if enforce_https { - tracing::warn!("Suspicious request: h2c request incoming to proxy but target is https.. this is bound to fail..") - } else { - tracing::debug!("Incoming prior knowledge h2c request to {target_url}") - } - enforce_https = false; - enforce_http2 = true; + } + let mut target_url = target_url.to_string(); + // Handle upgrade headers + if let Some(typ) = &request_upgrade_type { + if typ.to_uppercase()=="H2C" { + // if backend_supports_http2_over_clear_text_via_h2c_upgrade_header { + // tracing::trace!("Client used h2c header and backend supports h2c upgrades, this should be fine!") + // } else { + // tracing::trace!("Client used {typ:?} header. The backend has no hint that it supports h2c but we will attempt to upgrade anyway."); + // } + use_h2c_upgrade_header = true; } else { - - // in most other cases it seems safe to just start from http/1.1 - *req.version_mut() = Version::HTTP_11; + //tracing::trace!("Client requested upgrade to {typ:?}. We don't know if the backend supports it, but we will try anyway."); + // note: wont be websocket here as that is handled in another route } } - if enforce_http2 && req.version() != Version::HTTP_2 { - return Err(ProxyError::OddBoxError(format!("connection to {target_url} is only allowed over http2 due to h2/h2c hint on target site."))); - } + + let mut proxied_request = + create_proxied_request(&target_url, req, request_upgrade_type.as_ref(), &req_host_name)?; - if enforce_http2 { - tracing::trace!("enforcing http2!"); - *req.version_mut() = Version::HTTP_2; - } - if enforce_https { - tracing::trace!("enforcing https!"); - connector.enforce_https(); + + if proxied_request.version() == Version::HTTP_2 { + // if client connected to us with http2, we will attempt to do so with the backend as well.. + // todo: not sure this is what we want to do but this is how the old code worked and i dont want to change it right now. + use_prior_knowledge_http2 = true; + } else if backend_supports_prior_knowledge_http2_over_tls && use_https_to_backend_target { + use_prior_knowledge_http2 = true; + } else if backend_supports_http2_over_clear_text_via_h2c_upgrade_header && !use_https_to_backend_target { + use_prior_knowledge_http2 = true; + } else if backend_supports_http2_over_clear_text_via_h2c_upgrade_header && !use_https_to_backend_target { + if use_h2c_upgrade_header { + use_prior_knowledge_http2 = false; + } else { + tracing::warn!("Backend supports h2c but client did not request it. Falling back to http1.1."); + } } - let mut proxied_request = - create_proxied_request(&target_url, req, request_upgrade_type.as_ref())?; - + // --------------------------------------------------------------------------------------------- + // H2 THRU ALPN -- SUPPORTS HTTP2 OVER TLS + // H2 PRIOR KNOWLEDGE -- SUPPORTS HTTP2 OVER TLS + // H2C PRIOR KNOWLEDGE -- SUPPORTS HTTP2 OVER CLEAR TEXT + // H2C UPGRADE HEADER -- SUPPORTS HTTP2 OVER CLEAR TEXT VIA UPGRADE HEADER + // if backend does not support http2, we will just use http1.1 and act like nothing happened. + // --------------------------------------------------------------------------------------------- + - let target_scheme = if enforce_https || target_url.to_lowercase().starts_with("https") { - "https" + let client = if use_prior_knowledge_http2 { + *proxied_request.version_mut() = Version::HTTP_2; + &h2_only_client // this requires the backend to support h2 prior knowledge or h2 selection by alpn } else { - "http(s?)" // stupidest thing I've ever seen.. - // we dont know if it will be upgraded at this point, and we dont upgrade the con info after this step.. + &client // this will use the default http1 client, which will upgrade to h2 if the backend supports it thru upgrade header or alpn }; + + let req_is_https = proxied_request.uri().scheme().is_some_and(|x|*x==http::uri::Scheme::HTTPS); + let target_scheme_info_str = if use_https_to_backend_target != req_is_https { + tracing::warn!("Target URL scheme does not match use_https_to_backend_target setting. This is a bug in odd-box, please report it. Will fallback to using the target URL scheme ({}).",target_url); + if req_is_https { + "https" + } else { + "http" + } + } else if use_https_to_backend_target { + "https" + } else { + "http" + }; + let con: ProxyActiveConnection = create_connection( &proxied_request, incoming_http_version, target, &client_ip, - target_scheme, + target_scheme_info_str, proxied_request.version(), &target_url, - is_https + original_connection_is_https ); - tracing::trace!("Sending request:\n{:?}", proxied_request); + tracing::warn!("Sending request:\n{:?}", proxied_request); - if enforce_https { - _ = proxied_request - .headers_mut() - .remove("upgrade-insecure-requests"); - _ = proxied_request.headers_mut().remove("host"); - } - - let executor = TokioExecutor::new(); + + // todo - prevent making a connection if client already has too many tcp connections open let mut response = { - hyper_util::client::legacy::Builder::new(executor) - .http2_only(enforce_http2) - .build(connector) + client .request(proxied_request) .await .map_err(ProxyError::LegacyError)? }; - - - tracing::trace!( - "GOT THIS RESPONSE FROM REQ TO '{target_url}' : {:?}", - response + tracing::warn!( + "GOT THIS RESPONSE FROM REQ TO '{target_url}' : {:?}",response ); - + + // if the backend agreed to upgrade to some other protocol, we will create a bidirectional tunnel for the client and backend to communicate directly. if response.status() == StatusCode::SWITCHING_PROTOCOLS { let response_upgrade_type = get_upgrade_type(response.headers()); - tracing::info!("RESPONSE IS TO UPGRADE TO : {response_upgrade_type:?}!!!"); + tracing::warn!("RESPONSE IS TO UPGRADE TO : {response_upgrade_type:?}."); if request_upgrade_type == response_upgrade_type { if let Some(request_upgraded) = request_upgraded { @@ -244,7 +251,7 @@ pub async fn proxy( let response = super::create_simple_response_from_incoming( - WrappedNormalResponse::new(response,state.clone(),con).await + WrappedNormalResponse::new(response,state.clone(),con) ) .await.map_err(|e|ProxyError::OddBoxError(format!("{e:?}")))?; @@ -261,14 +268,15 @@ pub async fn proxy( ))) } } else { - + // Got a normal response from the backend, we will just forward it to the client! let proxied_response = create_proxied_response(response); - Ok(ProxyCallResult::NormalResponse(WrappedNormalResponse::new(proxied_response,state.clone(),con).await)) + Ok(ProxyCallResult::NormalResponse(WrappedNormalResponse::new(proxied_response,state.clone(),con))) } } + pub struct WrappedNormalResponseBody { b : Incoming, on_drop : Option>, @@ -279,6 +287,7 @@ impl Drop for WrappedNormalResponseBody { //tracing::trace!("dropping active connection due to body drop"); on_drop(); } + } } pub struct WrappedNormalResponse { @@ -289,9 +298,11 @@ impl WrappedNormalResponse { pub fn into_parts(self) -> (http::response::Parts,WrappedNormalResponseBody) { (self.a,self.b) } - pub async fn new(res:Response,state: GlobalState,con: ProxyActiveConnection) -> Self { - //tracing::trace!("Adding connection for this WrappedNormalResponse."); - let con_key = add_connection(state.clone(), con).await; + + + pub fn new(res:Response,state: Arc,con: ProxyActiveConnection) -> Self { + tracing::trace!("Adding connection for this WrappedNormalResponse."); + let con_key = add_connection(state.clone(), con); let drop_state = state.clone(); let on_drop: Box = Box::new(move || { @@ -300,8 +311,9 @@ impl WrappedNormalResponse { tokio::spawn(async move { //tracing::trace!("Dropping connection for this WrappedNormalResponse (with 1s delay for visibility in ui)."); tokio::time::sleep(Duration::from_secs(1)).await; - del_connection(state, &con_key).await; + del_connection(state, &con_key); }); + }); let (a,b) = res.into_parts(); @@ -314,18 +326,23 @@ impl WrappedNormalResponse { impl hyper::body::Body for WrappedNormalResponseBody { type Data = bytes::Bytes; type Error = hyper::Error; + fn poll_frame( mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, - ) -> std::task::Poll, Self::Error>>> { - match self.b.frame().poll_unpin(cx) { - Poll::Ready(Some(data)) => Poll::Ready(Some(data)), - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending + ) -> Poll, Self::Error>>> { + match self.b.frame().poll_unpin(cx) { + Poll::Ready(Some(Ok(data))) => Poll::Ready(Some(Ok(data))), + Poll::Ready(Some(Err(e))) => { + // Handle error properly here + tracing::error!("Error while polling frame: {:?}", e); + Poll::Ready(Some(Err(e))) + } + Poll::Ready(None) => Poll::Ready(None), + Poll::Pending => Poll::Pending, } } } - fn get_upgrade_type(headers: &HeaderMap) -> Option { // note: this is not really legal for http/1, but in reallity it is used when doing h2c upgrade from http/1 -> http/2.. // (http1 normally would only allow in connect but we dont care here) @@ -349,37 +366,41 @@ fn create_proxied_request( target_url: &str, mut request: Request, upgrade_type: Option<&String>, + req_host_name: &str ) -> Result, ProxyError> { - // replace the target uri - *request.uri_mut() = target_url - .parse() - .expect(&format!("the target url is not valid: {:?}", target_url)); - - let uri = request.uri().clone(); - - // replace the host header if it exists - let headers = request.headers_mut(); - if let Some(x) = headers.get_mut(HOST) { - if let Some(new_host) = uri.host() { - tracing::trace!("Replaced original host header: {:?} with {}", x, new_host); - *x = HeaderValue::from_str(new_host).map_err(map_to_err)?; - } - }; + + // replace the uri + let target_uri = target_url.parse::() + .map_err(|e| ProxyError::InvalidUri(e))?; + *request.uri_mut() = target_uri; + + + // we want to pass the original host header to the backend (the one that the client requested) + // and not the one we are connecting to as that might as well just be an internal name or IP. + if let Ok(v) = HeaderValue::from_str(req_host_name) { + _ = request.headers_mut().insert("host",v); + } else { + tracing::warn!("Failed to insert host header for '{req_host_name}'. Falling back to direct hostname call rather than 127.0.0.1."); + _ = request.uri_mut().host().replace(req_host_name); + } + // we will decide to use https or not to the backend ourselves, no need to forward this. + _ = request + .headers_mut() + .remove("upgrade-insecure-requests"); + + // add the upgrade headers back if we are upgrading, so that the backend also knows what to do. if let Some(value) = upgrade_type { - tracing::trace!("Repopulate upgrade headers! :: {value}"); - - request - .headers_mut() - .insert(&*UPGRADE_HEADER, value.parse().map_err(map_to_err)?); - request - .headers_mut() - .insert(&*CONNECTION_HEADER, HeaderValue::from_str(value).map_err(map_to_err)?); + tracing::trace!("Re-populate upgrade headers! :: {value}"); + let value_header = HeaderValue::from_str(value).map_err(map_to_err)?; + let headers = request.headers_mut(); + headers.insert(&*UPGRADE_HEADER, value_header.clone()); + headers.insert(&*CONNECTION_HEADER, value_header); } - Ok(request) } + impl From for ProxyError { fn from(err: hyper_util::client::legacy::Error) -> ProxyError { ProxyError::LegacyError(err) @@ -513,21 +534,17 @@ pub async fn h2_stream_test( -async fn add_connection(state:GlobalState,connection:ProxyActiveConnection) -> ConnectionKey { - let id = uuid::Uuid::new_v4(); - let global_state = state.0.read().await; - let mut guard = global_state.statistics.write().expect("should always be able to add connections to state."); - let key = ( - connection.source_addr.clone(), - id - ); - _ = guard.active_connections.insert(key, connection); - key +fn add_connection(state:Arc,connection:ProxyActiveConnection) -> ConnectionKey { + + let id: u64 = crate::generate_unique_id(); + let app_state = state.app_state.clone(); + _ = app_state.statistics.active_connections.insert(id, connection); + id } -async fn del_connection(state:GlobalState,key:&ConnectionKey) { - let global_state = state.0.read().await; - let mut guard = global_state.statistics.write().expect("should always be able to delete connections from state."); +fn del_connection(state:Arc,key:&ConnectionKey) { + let app_state = state.app_state.clone(); + let guard = app_state.statistics.clone(); _ = guard.active_connections.remove(key); } @@ -541,19 +558,20 @@ fn create_connection( target_addr: &str, incoming_known_tls_only: bool ) -> ProxyActiveConnection { - + let uri = req.uri(); let typ_info = ProxyActiveConnectionType::TerminatingHttp { - incoming_scheme: req.uri().scheme_str().unwrap_or(if incoming_known_tls_only { "HTTPS" } else {"HTTP"} ).to_owned(), + incoming_scheme: uri.scheme_str().unwrap_or(if incoming_known_tls_only { "HTTPS" } else {"HTTP"} ).to_owned(), incoming_http_version: format!("{:?}",incoming_http_version), outgoing_http_version: format!("{:?}",target_http_version), outgoing_scheme: target_scheme.to_owned() }; ProxyActiveConnection { + target_name: uri.to_string(), source_addr: client_addr.clone(), target_addr: target_addr.to_owned(), - target: ReverseTcpProxyTarget::from_target(target), + //target: ReverseTcpProxyTarget::from_target(target), creation_time: Local::now(), description: None, connection_type: typ_info diff --git a/src/http_proxy/websockets.rs b/src/http_proxy/websockets.rs index fd2386e..6b960cb 100644 --- a/src/http_proxy/websockets.rs +++ b/src/http_proxy/websockets.rs @@ -33,7 +33,7 @@ pub async fn handle_ws(req:Request,service:ReverseProxyService,ws: tracing::trace!("Handling websocket request: {req_host_name:?} --> {req_path}"); - let read_guard = service.state.1.read().await; + let read_guard = service.state.config.read().await; let processes = read_guard.hosted_process.clone().unwrap_or_default(); let remote_targets = read_guard.0.clone().remote_target.unwrap_or_default(); @@ -56,8 +56,10 @@ pub async fn handle_ws(req:Request,service:ReverseProxyService,ws: }; let (target_host,port,enforce_https) = match &target { + crate::http_proxy::Target::Remote(x) => { - let next_backend = x.next_backend(&service.state, crate::configuration::v2::BackendFilter::Any).await; + let next_backend = x.next_backend(&service.state, crate::configuration::v2::BackendFilter::Any).await + .ok_or(CustomError(format!("no backend found")))?; ( next_backend.address.clone(), next_backend.port, @@ -65,27 +67,28 @@ pub async fn handle_ws(req:Request,service:ReverseProxyService,ws: ) }, crate::http_proxy::Target::Proc(x) => { - let backend_is_https = x.https.unwrap_or_default(); - let port = match x.port { - Some(p) => p, - None => if backend_is_https == true {443} else {80} - }; ( x.host_name.clone(), - port, + x.active_port.unwrap_or_default(), backend_is_https ) } }; + if 0 == port { + tracing::warn!("No port found for target {target_host}"); + return Err(CustomError(format!("no active port found for target {target_host} - possibly process is not running"))); + }; + + let svc_scheme = if service.is_https_only {"wss"} else { "ws" }; let proto = if enforce_https { "wss" } else { req.uri().scheme_str().unwrap_or(svc_scheme) }; let ws_url = format!("{proto}://{target_host}:{}{}",port,req_path); - tracing::info!("initiating websocket tunnel to {}",ws_url); + tracing::debug!("initiating websocket tunnel to {}",ws_url); let client_tls_config = ClientConfig::builder_with_protocol_versions(rustls::ALL_VERSIONS) .with_native_roots() @@ -99,7 +102,7 @@ pub async fn handle_ws(req:Request,service:ReverseProxyService,ws: Some(tokio_tungstenite::Connector::Rustls(Arc::new(client_tls_config))) ).await { Ok(x) => { - tracing::info!("Successfully connected to target websocket"); + tracing::debug!("Successfully connected to target websocket"); x }, Err(e) => { @@ -111,8 +114,6 @@ pub async fn handle_ws(req:Request,service:ReverseProxyService,ws: let target_version = upstream_client.1.version(); - //let hax : &tokio_tungstenite::MaybeTlsStream = upstream_client.0.get_ref(); - let target_is_tls = match upstream_client.0.get_ref() { tokio_tungstenite::MaybeTlsStream::Rustls(_) => true, _ => false @@ -178,22 +179,14 @@ pub async fn handle_ws(req:Request,service:ReverseProxyService,ws: } -async fn add_connection(state:GlobalState,connection:ProxyActiveConnection) -> ConnectionKey { - let id = uuid::Uuid::new_v4(); - let global_state = state.0.read().await; - let mut guard = global_state.statistics.write().expect("should always be able to add statistics"); - let key = ( - connection.source_addr.clone(), - id - ); - _ = guard.active_connections.insert(key, connection); +async fn add_connection(global_state:Arc,connection:ProxyActiveConnection) -> ConnectionKey { + let key = crate::generate_unique_id(); + _ = global_state.app_state.statistics.active_connections.insert(key, connection); key } -async fn del_connection(state:GlobalState,key:&ConnectionKey) { - let global_state = state.0.read().await; - let mut guard = global_state.statistics.write().expect("should always be able to add statistics"); - _ = guard.active_connections.remove(key); +async fn del_connection(global_state:Arc,key:&ConnectionKey) { + _ = global_state.app_state.statistics.active_connections.remove(key); } fn create_connection( @@ -215,9 +208,12 @@ fn create_connection( }; ProxyActiveConnection { + target_name: match target { + Target::Proc(p) => p.host_name.clone(), + Target::Remote(r) => r.host_name.clone() + }, source_addr: client_addr.clone(), target_addr: target_addr.to_owned(), - target: ReverseTcpProxyTarget::from_target(target), creation_time: Local::now(), description: Some(format!("websocket connection")), connection_type: typ_info diff --git a/src/logging.rs b/src/logging.rs index 1020d14..0c78685 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -7,11 +7,11 @@ use tracing_subscriber::layer::Context; use tracing_subscriber::Layer; #[derive(Clone)] -pub (crate) struct LogMsg { - pub (crate) msg: String, - pub (crate) lvl: tracing::Level, - pub (crate) src: String, - pub (crate) thread: Option +pub struct LogMsg { + pub msg: String, + pub lvl: tracing::Level, + pub src: String, + pub thread: Option } @@ -101,8 +101,8 @@ impl tracing_subscriber::Layer for NonTuiLoggerLayer { pub struct SharedLogBuffer { - pub (crate) logs: VecDeque, - pub (crate) limit : Option + pub logs: VecDeque, + pub limit : Option } impl SharedLogBuffer { @@ -138,6 +138,7 @@ pub struct TuiLoggerLayer { impl Layer for TuiLoggerLayer { fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context<'_, S>) { + let metadata = event.metadata(); diff --git a/src/main.rs b/src/main.rs index ed573f5..52c493c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,18 +3,28 @@ mod types; mod tcp_proxy; mod http_proxy; mod proxy; +use configuration::v2::FullyResolvedInProcessSiteConfig; +use dashmap::DashMap; +use global_state::GlobalState; +use configuration::v2::InProcessSiteConfig; +use configuration::v2::RemoteSiteConfig; +use configuration::OddBoxConfiguration; use http_proxy::ProcMessage; use rustls::pki_types::{CertificateDer, PrivateKeyDer}; use self_update::cargo_crate_version; use tokio::sync::RwLock; use tracing_subscriber::layer::SubscriberExt; use std::fmt::Debug; -use std::{borrow::BorrowMut, sync::Mutex}; +use std::os::linux::raw::stat; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; +use std::sync::Mutex; use std::time::Duration; use types::custom_error::*; use std::collections::HashMap; use std::io::Read; -use std::sync::Arc; +use std::sync::{Arc, Weak}; use tracing_subscriber::filter::LevelFilter; use tracing_subscriber::EnvFilter; mod proc_host; @@ -23,44 +33,44 @@ use crate::types::app_state::ProcState; mod tui; mod api; mod logging; +mod tests; use types::app_state::AppState; +use lazy_static::lazy_static; -pub mod global_state { - use std::collections::HashMap; - - use crate::http_proxy::ConfigWrapper; - - pub (crate) type GlobalState = - - ( std::sync::Arc>, - std::sync::Arc>, - tokio::sync::broadcast::Sender, - - // we store target request counts here outside of the global app-state proxy stats - // as we do not want to lock them at the same time as the proxy stats. - // this is purely done to avoid performance issues. - std::sync::Arc< - tokio::sync::RwLock< - HashMap - > - > - > - > - ); +#[derive(Eq,PartialEq,Debug,Clone,Hash, Serialize, Deserialize)] +pub struct ProcId { id: String } +impl ProcId { + pub fn new() -> Self { + Self { id: uuid::Uuid::new_v4().to_string() } + } } +lazy_static! { + static ref THREAD_MAP: Arc>>> = Arc::new(Mutex::new(HashMap::new())); +} + +static REQUEST_ID_COUNTER: AtomicU64 = AtomicU64::new(1); +pub fn generate_unique_id() -> u64 { + REQUEST_ID_COUNTER.fetch_add(1, Ordering::Relaxed) +} -#[derive(Debug, Clone)] -pub struct TargetRequestCount { - pub request_count : u128 +pub mod global_state { + use std::{collections::HashMap, sync::atomic::AtomicU64}; + #[derive(Debug)] + pub struct GlobalState { + pub app_state: std::sync::Arc, + pub config: std::sync::Arc>, + pub broadcaster: tokio::sync::broadcast::Sender, + pub target_request_counts: dashmap::DashMap, + pub request_count: std::sync::atomic::AtomicUsize + } + } #[derive(Debug)] struct DynamicCertResolver { + // todo: dashmap? cache: Mutex>>, } @@ -105,7 +115,7 @@ impl ResolvesServerCert for DynamicCertResolver { return None } if let Ok(private_key) = my_rsa_private_keys(&key_path) { - if let Ok(rsa_signing_key) = rustls::crypto::ring::sign::any_supported_type(&private_key) { + if let Ok(rsa_signing_key) = rustls::crypto::aws_lc_rs::sign::any_supported_type(&private_key) { let result = std::sync::Arc::new(rustls::sign::CertifiedKey::new( cert_chain, rsa_signing_key @@ -139,7 +149,7 @@ fn generate_cert_if_not_exist(hostname: &str, cert_path: &str,key_path: &str) -> let key_exists = std::fs::metadata(key_path).is_ok(); if crt_exists && key_exists { - tracing::info!("Using existing certificate for {}",hostname); + tracing::debug!("Using existing certificate for {}",hostname); return Ok(()) } @@ -147,14 +157,14 @@ fn generate_cert_if_not_exist(hostname: &str, cert_path: &str,key_path: &str) -> return Err(String::from("Missing key or crt for this hostname. Remove both if you want to generate a new set, or add the missing one.")) } - tracing::info!("Generating new certificate for site '{}'",hostname); + tracing::debug!("Generating new certificate for site '{}'",hostname); match rcgen::generate_simple_self_signed( vec![hostname.to_owned()] ) { Ok(cert) => { - tracing::info!("Generating new self-signed certificate for host '{}'!",hostname); + tracing::trace!("Generating new self-signed certificate for host '{}'!",hostname); let _ = std::fs::write(&cert_path, cert.cert.pem()); let _ = std::fs::write(&key_path, &cert.key_pair.serialize_pem()); Ok(()) @@ -221,10 +231,10 @@ struct Args { } -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use serde_json::Result as JsonResult; -use crate::configuration::{ConfigWrapper, EnvVar, LogLevel}; +use crate::configuration::{ConfigWrapper, LogLevel}; #[derive(Deserialize, Debug, Clone)] struct Release { #[allow(dead_code)] html_url: Option, @@ -287,24 +297,113 @@ pub fn initialize_panic_handler() { } - +fn thread_cleaner() { + let mut map = THREAD_MAP.lock().unwrap(); + map.retain(|_k,v| v.upgrade().is_some()); +} #[tokio::main(flavor="multi_thread")] async fn main() -> anyhow::Result<()> { - - initialize_panic_handler(); + // spawn thread cleaner and loop ever 1 second + tokio::spawn(async move { + loop { + thread_cleaner(); + tokio::time::sleep(Duration::from_secs(1)).await; + } + }); let args = Args::parse(); + + let (filter, reload_handle) = tracing_subscriber::reload::Layer::new( + EnvFilter::from_default_env() + .add_directive("h2=info".parse().expect("this directive will always work")) + .add_directive("tokio_util=info".parse().expect("this directive will always work")) + .add_directive("hyper=info".parse().expect("this directive will always work"))); + + initialize_panic_handler(); + + + + let (tx,_) = tokio::sync::broadcast::channel::(33); + + + let inner_state = AppState::new(); + let temp_cfg = ConfigWrapper::wrapv2(configuration::v2::OddBoxV2Config { + version: configuration::OddBoxConfigVersion::V2, + root_dir: None, + log_level: None, + alpn: None, + port_range_start: 4000, + default_log_format: configuration::LogFormat::standard, + ip: None, + http_port: None, + tls_port: None, + auto_start: None, + env_vars: vec![], + remote_target: None, + hosted_process: None, + admin_api_port: None, + path: None + }); { + + }; + + let shared_config = std::sync::Arc::new( + tokio::sync::RwLock::new(temp_cfg) + ); + + let inner_state_arc = std::sync::Arc::new(inner_state); + + let global_state = Arc::new(crate::global_state::GlobalState { + app_state: inner_state_arc.clone(), + config: shared_config.clone(), + broadcaster:tx.clone(), + target_request_counts: DashMap::new(), + request_count: std::sync::atomic::AtomicUsize::new(0) + }); + + + + let tracing_broadcaster = tokio::sync::broadcast::Sender::::new(10); + + let mut tui_thread = None; + + // tui is explicit opt out via arg only + match args.tui { + Some(false) => {}, + _ => { + tui::init(); + tui_thread = Some(tokio::task::spawn(tui::run( + global_state.clone(), + tx.clone(), + tracing_broadcaster.clone(), + filter + ))) + }, + } + + + let running = Arc::new(AtomicBool::new(true)); + let r = running.clone(); + let cstate = global_state.clone(); + ctrlc::set_handler(move || { + cstate.app_state.exit.store(false, std::sync::atomic::Ordering::SeqCst); + r.store(false, std::sync::atomic::Ordering::SeqCst); + }).expect("Error setting Ctrl-C handler"); + + + + if args.update { _ = update().await; return Ok(()); } if args.generate_example_cfg { - let cfg = crate::configuration::v1::example_v1(); + let cfg = crate::configuration::v2::OddBoxV2Config::example(); let serialized = toml::to_string_pretty(&cfg).unwrap(); std::fs::write("odd-box-example-config.toml", serialized).unwrap(); return Ok(()) @@ -331,7 +430,7 @@ async fn main() -> anyhow::Result<()> { let mut config: ConfigWrapper = - ConfigWrapper(match configuration::Config::parse(&contents) { + ConfigWrapper(match configuration::OddBoxConfig::parse(&contents) { Ok(configuration) => configuration.try_upgrade_to_latest_version().expect("configuration upgrade failed. this is a bug in odd-box"), Err(e) => anyhow::bail!(e), }); @@ -378,11 +477,15 @@ async fn main() -> anyhow::Result<()> { None => LevelFilter::INFO }; - let tracing_broadcaster = tokio::sync::broadcast::Sender::::new(10); + reload_handle.reload(EnvFilter::from_default_env() + .add_directive("h2=info".parse().expect("this directive will always work")) + .add_directive("tokio_util=info".parse().expect("this directive will always work")) + .add_directive("hyper=info".parse().expect("this directive will always work")).add_directive(log_level.into())).expect("Failed to reload filter"); + + - let use_tui = args.tui.unwrap_or_default(); - if !use_tui { + if tui_thread.is_none() { let fmt_layer = tracing_subscriber::fmt::layer() .compact() .with_thread_names(true) @@ -428,94 +531,35 @@ async fn main() -> anyhow::Result<()> { } } + // replace initial empty config with resolved one + let mut cfg_write_guard = shared_config.write().await; + *cfg_write_guard = config; - let sites_len = config.hosted_process.as_ref().and_then(|x|Some(x.len())).unwrap_or_default() as u16; - - let (tx,_) = tokio::sync::broadcast::channel::(sites_len.max(1).into()); - - - - - let shared_config = std::sync::Arc::new(tokio::sync::RwLock::new(config)); - let mut inner_state = AppState::new(); - - for x in shared_config.read().await.remote_target.as_ref().unwrap_or(&vec![]) { - inner_state.site_states_map.insert(x.host_name.to_owned(), ProcState::Remote); + for x in cfg_write_guard.remote_target.as_ref().unwrap_or(&vec![]) { + inner_state_arc.site_status_map.insert(x.host_name.to_owned(), ProcState::Remote); } - let shared_state : crate::global_state::GlobalState = - // CURRENT APPSTATE - (std::sync::Arc::new( - tokio::sync::RwLock::new( - inner_state - ) - ), - - // CONFIGURATION - shared_config.clone(), - - // BROADCASTER - tx.clone(), - - // TARGET REQUEST COUNTS FOR LB FEATURE - Arc::new(RwLock::new(HashMap::new())) - ); - - let mut shared_write_guard = shared_config.write().await; - - let global_auto_start_default_value = shared_write_guard.auto_start.clone(); - let port_range_start = shared_write_guard.port_range_start.clone(); - - let mut_procs = shared_write_guard.hosted_process.borrow_mut(); - if let Some(processes) = mut_procs { - for (i, x) in processes.iter_mut().enumerate() { - - let auto_port = port_range_start + i as u16; - // if a custom PORT variable is set, we use it - if let Some(cp) = x.env_vars.iter().find(|x| x.key.to_lowercase() == "port") { - let custom_port = cp.value.parse::()?; - - if custom_port > sites_len { - tracing::warn!("Using custom port for {} as specified in configuration! ({})", x.host_name, custom_port); - x.set_port(custom_port); - } else if custom_port < 1 { - anyhow::bail!("Invalid port configured for {}: {}.", x.host_name, cp.value); - } else { - anyhow::bail!("Invalid port configured for {}: {}. Please use a port number above {}", - x.host_name, cp.value, sites_len + port_range_start); - } - } else if let Some(p) = x.port { - x.env_vars.push(EnvVar { key: "PORT".to_string(), value: p.to_string() }); - } - else { - x.set_port(auto_port); - x.env_vars.push(EnvVar { key: "PORT".to_string(), value: auto_port.to_string() }); - } - - - tracing::trace!("Initializing {} on port {:?}", x.host_name, x.port); - + for x in cfg_write_guard.hosted_process.iter().flatten() { + let resolved_proc = cfg_write_guard.resolve_process_configuration(&x)?; + tokio::task::spawn(proc_host::host( + resolved_proc, + tx.subscribe(), + global_state.clone(), + )); + } - if x.auto_start.is_none() { - x.auto_start = global_auto_start_default_value; - } - tokio::task::spawn(proc_host::host( - x.clone(), - tx.subscribe(), - shared_state.clone() - )); - } - } + drop(cfg_write_guard); - drop(shared_write_guard); let shared_read_guard = shared_config.read().await; + + let srv_ip = if let Some(ip) = shared_read_guard.ip { ip.to_string() } else { "127.0.0.1".to_string() }; let arced_tx = std::sync::Arc::new(tx.clone()); let shutdown_signal = Arc::new(tokio::sync::Notify::new()); @@ -526,12 +570,12 @@ async fn main() -> anyhow::Result<()> { format!("{srv_ip}:{srv_port}").parse().expect("bind address for http must be valid.."), format!("{srv_ip}:{srv_tls_port}").parse().expect("bind address for https must be valid.."), arced_tx.clone(), - shared_state.clone(), + global_state.clone(), shutdown_signal )); let api_port = shared_read_guard.admin_api_port.clone(); - let api_state = shared_state.clone(); + let api_state = global_state.clone(); let api_broadcaster = tracing_broadcaster.clone(); @@ -541,37 +585,28 @@ async fn main() -> anyhow::Result<()> { api::run(api_state,api_port, api_broadcaster).await }); - + let use_tui = tui_thread.is_some(); - if use_tui { - tui::init(); - tui::run(EnvFilter::from_default_env() - .add_directive(log_level.into()) - .add_directive("h2=info".parse().expect("this directive will always work")) - .add_directive("tokio_util=info".parse().expect("this directive will always work")) - .add_directive("hyper=info".parse().expect("this directive will always work")),shared_state.clone(),tx.clone(),tracing_broadcaster.clone()).await; + if let Some(tui) = tui_thread{ + _ = tui.await; } else { - let mut stdin = std::io::stdin(); - let mut buf : [u8;1] = [0;1]; - loop { - _ = stdin.read_exact(&mut buf); - if buf[0] == 3 || buf[0] == 113 { - tracing::info!("bye: {:?}",buf); + loop { + if running.load(std::sync::atomic::Ordering::SeqCst) != true { + tracing::info!("leaving main loop"); break; - } else { - tracing::info!("press 'q' or ctrl-c to quit, not {:?}",buf); } - tokio::time::sleep(Duration::from_millis(100)).await; - - } + tokio::time::sleep(Duration::from_millis(500)).await; + } } { tracing::warn!("Changing application state to EXIT"); - let mut state = shared_state.0.write().await; - state.exit = true; + global_state.app_state.exit.store(true, std::sync::atomic::Ordering::SeqCst); } + + + if use_tui { println!("Waiting for processes to stop.."); } else { @@ -583,15 +618,15 @@ async fn main() -> anyhow::Result<()> { } { - let state = shared_state.0.read().await; - for (name,status) in state.site_states_map.iter().filter(|x|x.1!=&ProcState::Remote) { - if use_tui { - println!("{name} ==> {status:?}") - } else { - tracing::info!("{name} ==> {status:?}") - } + for guard in global_state.app_state.site_status_map.iter().filter(|x|x.value()!=&ProcState::Remote) { + let (name,status) = guard.pair(); + if use_tui { + println!("{name} ==> {status:?}") + } else { + tracing::info!("{name} ==> {status:?}") } - } + } +} if use_tui { println!("Performing cleanup, please wait.."); @@ -612,6 +647,5 @@ async fn main() -> anyhow::Result<()> { _ = proxy_thread.await.ok(); Ok(()) - - } + diff --git a/src/proc_host.rs b/src/proc_host.rs index 93decce..a945932 100644 --- a/src/proc_host.rs +++ b/src/proc_host.rs @@ -1,33 +1,55 @@ -use crate::configuration::LogFormat; -use crate::global_state::GlobalState; +use crate::configuration::{EnvVar, LogFormat}; +use crate::global_state::{self, GlobalState}; use crate::http_proxy::ProcMessage; use crate::types::app_state::ProcState; use std::collections::HashMap; use std::io::Write; use std::process::{Command, Stdio}; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; use std::time::Duration; -#[cfg(target_os="windows")] -use std::os::windows::process::CommandExt; -pub (crate) async fn host( - proc: crate::configuration::v2::InProcessSiteConfig, +pub async fn host( + mut resolved_proc: crate::configuration::v2::FullyResolvedInProcessSiteConfig, mut rcv:tokio::sync::broadcast::Receiver, - state: GlobalState + state: Arc ) { - // if auto_start is not set in the config, we assume that user wants to start site automatically like before - let mut enabled = proc.auto_start.unwrap_or(true); - if proc.disabled == Some(true) { - enabled = false; + let my_arc = std::sync::Arc::new(AtomicBool::new(true)); + { + let thread_map_guard = crate::THREAD_MAP.clone(); + let mut thread_map = thread_map_guard.lock().unwrap(); + thread_map.insert(resolved_proc.proc_id.clone(), std::sync::Arc::::downgrade(&my_arc)); } - let disabled = proc.disabled.is_some_and(|x|x==true); + + // if auto_start is not set in the config, we assume that user wants to start site automatically like before + let mut enabled = { + // if auto_start is at all set for the specific process, use that value, otherwise use the global value + // and otherwise fallback to assume that the site should be started automatically. + match resolved_proc.auto_start { + Some(v) => v, + None => { + let guard = state.config.read().await; + guard.auto_start.unwrap_or(true) + } + } + }; + + + // previously this was configured separately in config via the "disabled" prop, but we now use auto_start + // as a combined prop for this. If auto_start is set to false, we will not start the site automatically also + // when using the "start_all_sites" command. + let excluded_from_auto_start = resolved_proc.excluded_from_start_all; + + + let mut initialized = false; - let domsplit = proc.host_name.split(".").collect::>(); + let domsplit = resolved_proc.host_name.split(".").collect::>(); - let mut acceptable_names = vec![proc.host_name.clone()]; + let mut acceptable_names = vec![resolved_proc.host_name.clone()]; if domsplit.len() > 0 { acceptable_names.push(domsplit[0].to_owned()); @@ -35,40 +57,44 @@ pub (crate) async fn host( let re = regex::Regex::new(r"^\d* *\[.*?\] .*? - ").expect("host regex always works"); + + let mut selected_port: Option = None; + loop { + + + let exit = state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) == true; + + if exit { + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopped); + tracing::debug!("exiting host for {}",&resolved_proc.host_name); + break + } + if initialized == false { - let mut guard = state.0.write().await; - guard.site_states_map.insert(proc.host_name.clone(), ProcState::Stopped); + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopped); initialized = true; } else { - let exit = { - let guard = state.0.read().await; - guard.exit - }; - if exit { - let mut guard = state.0.write().await; - guard.site_states_map.insert(proc.host_name.clone(), ProcState::Stopped); - tracing::debug!("exiting host for {}",&proc.host_name); - break - } + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopped); + } let is_enabled_before = enabled == true; while let Ok(msg) = rcv.try_recv() { match msg { - ProcMessage::StartAll if disabled => tracing::debug!("Refusing to start {} as thru the start all command as it is disabled",&proc.host_name), - ProcMessage::Start(s) if disabled && s == "all" => tracing::debug!("Refusing to start {} as thru the start all command as it is disabled",&proc.host_name), + ProcMessage::StartAll if excluded_from_auto_start => tracing::debug!("Refusing to start {} as thru the start all command as it is disabled",&resolved_proc.host_name), + ProcMessage::Start(s) if excluded_from_auto_start && s == "all" => tracing::debug!("Refusing to start {} as thru the start all command as it is disabled",&resolved_proc.host_name), ProcMessage::Delete(s,sender) => { if acceptable_names.contains(&s) { - tracing::warn!("[{}] Dropping due to having been deleted by proxy.", proc.host_name); - let mut guard = state.0.write().await; - guard.site_states_map.remove(&proc.host_name); + tracing::warn!("[{}] Dropping due to having been deleted by proxy.", resolved_proc.host_name); + state.app_state.site_status_map.remove(&resolved_proc.host_name); match sender.send(0).await { Ok(_) => {}, - Err(e) => {tracing::warn!("Failed to send confirmation to proxy service that we stopped! {e:?}")}, + Err(e) => {tracing::warn!("Failed to send confirmation to proxy service that we stopped! {e:?}") + }, } return } @@ -92,57 +118,99 @@ pub (crate) async fn host( if !enabled { if enabled != is_enabled_before { - tracing::info!("[{}] Disabled via command from proxy service",&proc.host_name); + tracing::info!("[{}] Disabled via command from proxy service",&resolved_proc.host_name); { - let mut guard = state.0.write().await; - guard.site_states_map.insert(proc.host_name.clone(), ProcState::Stopped); + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopped); } } - tokio::time::sleep(Duration::from_millis(500)).await; continue; } if enabled != is_enabled_before { - tracing::info!("[{}] Enabled via command from proxy service",&proc.host_name); + tracing::info!("[{}] Enabled via command from proxy service",&resolved_proc.host_name); } - { - let mut guard = state.0.write().await; - guard.site_states_map.insert(proc.host_name.clone(), ProcState::Starting); - } + + + if selected_port == None { + + let mut guard = state.config.write().await; + + if let Ok(p) = guard.set_active_port(&mut resolved_proc) { + selected_port = Some(p); + } + + if selected_port.is_none() { + let ms = 3000; + tracing::warn!("[{}] No usable port found. Waiting for {}ms before retrying..",&resolved_proc.host_name,ms); + tokio::time::sleep(Duration::from_millis(ms)).await; + continue; + } + - tracing::info!("[{}] Executing command '{}' in directory '{}'",proc.host_name,proc.bin,proc.dir); + } + else { + tracing::info!("[{}] Using the previously selected port '{}'",&resolved_proc.host_name,selected_port.unwrap()); + } - let mut bin_path = std::path::PathBuf::from(&proc.dir); - bin_path.push(&proc.bin); + let current_work_dir = std::env::current_dir().expect("could not get current directory").to_str().expect("could not convert current directory to string").to_string(); + let workdir = &resolved_proc.dir.clone().unwrap_or(current_work_dir); + + tracing::warn!("[{}] Executing command '{}' in directory '{}'",resolved_proc.host_name,resolved_proc.bin,workdir); + + let mut bin_path = std::path::PathBuf::from(&workdir); + bin_path.push(&resolved_proc.bin); + let mut process_specific_environment_variables = HashMap::new(); - for kvp in &proc.env_vars.clone(){ - tracing::debug!("[{}] ADDING ENV VAR '{}': {}", &proc.host_name,&kvp.key,&kvp.value); - process_specific_environment_variables.insert(kvp.key.clone(), kvp.value.clone()); - } - { - let state_guard = state.1.read().await; + let state_guard = state.config.read().await; for kvp in &state_guard.env_vars.clone() { - tracing::debug!("[{}] ADDING GLOBAL ENV VAR '{}': {}", &proc.host_name,&kvp.key,&kvp.value); + tracing::debug!("[{}] ADDING GLOBAL ENV VAR '{}': {}", &resolved_proc.host_name,&kvp.key,&kvp.value); process_specific_environment_variables.insert(kvp.key.clone(), kvp.value.clone()); } } + // more specific env vars should override globals + for kvp in &resolved_proc.env_vars.clone().unwrap_or_default() { + tracing::debug!("[{}] ADDING ENV VAR '{}': {}", &resolved_proc.host_name,&kvp.key,&kvp.value); + process_specific_environment_variables.insert(kvp.key.clone(), kvp.value.clone()); + } + + let port = selected_port + .expect("it should not be possible to start a process without a port first having been chosen - this is a bug in odd-box").to_string(); + + process_specific_environment_variables.insert("PORT".into(), port.clone()); + + + let mut pre_resolved_args = resolved_proc.args.clone().unwrap_or_default(); + + for p in &mut pre_resolved_args { + *p = p.replace("$port",&port); + } + + + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Starting); + + + + const _CREATE_NO_WINDOW: u32 = 0x08000000; #[cfg(target_os = "windows")] const DETACHED_PROCESS: u32 = 0x00000008; + + #[cfg(target_os="windows")] + use std::os::windows::process::CommandExt; #[cfg(target_os = "windows")] let cmd = Command::new(bin_path) - .args(proc.args.clone()) + .args(pre_resolved_args) .envs(&process_specific_environment_variables) - .current_dir(&proc.dir) + .current_dir(&workdir) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .stdin(Stdio::null()) @@ -151,21 +219,20 @@ pub (crate) async fn host( #[cfg(not(target_os = "windows"))] let cmd = Command::new(bin_path) - .args(proc.args.clone()) + .args(pre_resolved_args) .envs(&process_specific_environment_variables) - .current_dir(&proc.dir) + .current_dir(&workdir) .stdout(Stdio::piped()) .stderr(Stdio::piped()) - .stdin(Stdio::null()).spawn(); + .stdin(Stdio::null()) + .spawn(); match cmd { Ok(mut child) => { - { - let mut guard = state.0.write().await; - guard.site_states_map.insert(proc.host_name.clone(), ProcState::Running); - } + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Running); + //let stdin = child.stdin.take().expect("Failed to capture stdin"); @@ -175,9 +242,9 @@ pub (crate) async fn host( let stdout_reader = std::io::BufReader::new(stdout); let stderr_reader = std::io::BufReader::new(stderr); - let procname = proc.host_name.clone(); + let procname = resolved_proc.host_name.clone(); let reclone = re.clone(); - let logformat = proc.log_format.clone(); + let logformat = resolved_proc.log_format.clone(); _ = std::thread::Builder::new().name(format!("{procname}")).spawn(move || { let mut current_log_level = 0; @@ -185,7 +252,7 @@ pub (crate) async fn host( for line in std::io::BufRead::lines(stdout_reader) { if let Ok(line) = line{ - // should move custom logging elsewhere if theres ever more than one + // todo: should move custom logging elsewhere if theres ever more than one if let Some(LogFormat::dotnet) = &logformat { if line.len() > 0 { let mut trimmed = reclone.replace(&line, "").to_string(); @@ -219,7 +286,7 @@ pub (crate) async fn host( } }); - let procname = proc.host_name.clone(); + let procname = resolved_proc.host_name.clone(); _ = std::thread::Builder::new().name(format!("{procname}")).spawn(move || { for line in std::io::BufRead::lines(stderr_reader) { if let Ok(line) = line{ @@ -231,16 +298,10 @@ pub (crate) async fn host( }); while let Ok(None) = child.try_wait() { - let exit = { - let guard = state.0.read().await; - guard.exit - } ; + let exit = state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) == true; if exit { - tracing::info!("[{}] Stopping due to app exit", proc.host_name); - { - let mut guard = state.0.write().await; - guard.site_states_map.insert(proc.host_name.clone(), ProcState::Stopping); - } + tracing::info!("[{}] Stopping due to app exit", resolved_proc.host_name); + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopping); _ = child.kill(); break } @@ -250,9 +311,8 @@ pub (crate) async fn host( match msg { ProcMessage::Delete(s,sender) => { if acceptable_names.contains(&s) { - tracing::warn!("[{}] Dropping due to having been deleted by proxy.", proc.host_name); - let mut guard = state.0.write().await; - guard.site_states_map.remove(&proc.host_name); + tracing::warn!("[{}] Dropping due to having been deleted by proxy.", resolved_proc.host_name); + state.app_state.site_status_map.remove(&resolved_proc.host_name); if let Some(mut stdin) = child.stdin.take() { _ = stdin.write_all(b"q"); } @@ -260,7 +320,9 @@ pub (crate) async fn host( // inform sender that we actually stopped the process and that we are exiting our loop match sender.send(0).await { Ok(_) => {}, - Err(e) => {tracing::warn!("Failed to send confirmation to proxy service that we stopped! {e:?}")}, + Err(e) => { + tracing::warn!("Failed to send confirmation to proxy service that we stopped! {e:?}") + }, } return } @@ -277,36 +339,41 @@ pub (crate) async fn host( } } if !enabled { - tracing::warn!("[{}] Stopping due to having been disabled by proxy.", proc.host_name); + tracing::warn!("[{}] Stopping due to having been disabled by proxy.", resolved_proc.host_name); // note: we just send q here because some apps like iisexpress requires it - { - let mut guard = state.0.write().await; - guard.site_states_map.insert(proc.host_name.clone(), ProcState::Stopping); - } + + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopping); + if let Some(mut stdin) = child.stdin.take() { _ = stdin.write_all(b"q"); } _ = child.kill(); - break; } tokio::time::sleep(Duration::from_millis(100)).await; } - let mut guard = state.0.write().await; - guard.site_states_map.insert(procname, ProcState::Stopped); - tracing::warn!("[{}] Stopped.",proc.host_name) + state.app_state.site_status_map.insert(procname, ProcState::Stopped); + }, Err(e) => { - tracing::info!("[{}] Failed to start! {e:?}",proc.host_name); - { - let mut guard = state.0.write().await; - guard.site_states_map.insert(proc.host_name.clone(), ProcState::Faulty); - } + tracing::info!("[{}] Failed to start! {e:?}",resolved_proc.host_name); + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Faulty); }, } - - tokio::time::sleep(Duration::from_millis(200)).await; + let mut time_to_sleep_ms = 500; + if enabled { + if !state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) { + tracing::warn!("[{}] Stopped unexpectedly. Will automatically restart the process in 5 seconds.",resolved_proc.host_name); + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Faulty); + time_to_sleep_ms = 5000; // wait 5 seconds before restarting but NOT in here as we have a lock + } else { + tracing::info!("[{}] Stopped due to exit signal. Will not restart.",resolved_proc.host_name); + break + } + } + + tokio::time::sleep(Duration::from_millis(time_to_sleep_ms)).await; } } diff --git a/src/proxy.rs b/src/proxy.rs index ab7d0df..fc42f8f 100644 --- a/src/proxy.rs +++ b/src/proxy.rs @@ -2,12 +2,21 @@ use std::collections::HashMap; use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; +use futures_util::task::UnsafeFutureObj; +use hyper::body::Incoming; +use hyper::Method; +use hyper::Uri; use hyper_rustls::ConfigBuilderExt; +use hyper_util::client::legacy::connect::Connection; +use lazy_static::lazy_static; +use reqwest::Request; use socket2::Socket; use tokio::net::TcpSocket; use tokio::net::TcpStream; use tokio::sync::Notify; use tokio_rustls::TlsAcceptor; +use tungstenite::util::NonBlockingResult; +use url::Url; use crate::configuration::ConfigWrapper; use crate::global_state::GlobalState; @@ -28,7 +37,7 @@ pub async fn listen( bind_addr: SocketAddr, bind_addr_tls: SocketAddr, tx: std::sync::Arc>, - state: GlobalState, + state: Arc, shutdown_signal: Arc ) { @@ -40,20 +49,50 @@ pub async fn listen( }); let client_tls_config = rustls::ClientConfig::builder_with_protocol_versions(rustls::ALL_VERSIONS) + // todo - add support for accepting self-signed certificates etc + // .dangerous() + // .with_custom_certificate_verifier(verifier) + .with_native_roots() - .expect("should always be able to build a tls client") + .unwrap() .with_no_client_auth(); + + + let https_builder = + hyper_rustls::HttpsConnectorBuilder::default().with_tls_config(client_tls_config); + + let connector: hyper_rustls::HttpsConnector = + https_builder.https_or_http().enable_all_versions().build(); + + let executor = hyper_util::rt::TokioExecutor::new(); + + + let client : hyper_util::client::legacy::Client, hyper::body::Incoming> = + hyper_util::client::legacy::Builder::new(executor.clone()) + .http2_only(false) + .build(connector.clone()); + + let h2_client : hyper_util::client::legacy::Client, hyper::body::Incoming> = + hyper_util::client::legacy::Builder::new(executor) + .http2_only(true) + .build(connector); + + + // let c2 = reqwest::Client::builder().build().unwrap(); + // let what = c2.execute(Request::new(Method::DELETE, Url::parse("what").unwrap())); + + let terminating_proxy_service = ReverseProxyService { state:state.clone(), remote_addr: None, tx:tx.clone(), is_https_only:false, - client_tls_config: client_tls_config + client, + h2_client }; - - + tokio::join!( @@ -81,11 +120,15 @@ pub async fn listen( } +// a lazy static atomic usize to keep count of active tcp connections: +lazy_static! { + static ref ACTIVE_TCP_CONNECTIONS: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(0); +} async fn listen_http( bind_addr: SocketAddr, tx: std::sync::Arc>, - state: GlobalState, + state: Arc, terminating_service_template: ReverseProxyService, targets: Arc, shutdown_signal: Arc @@ -94,37 +137,50 @@ async fn listen_http( let socket = TcpSocket::new_v4().expect("new v4 socket should always work"); socket.set_reuseaddr(true).expect("set reuseaddr fail?"); socket.bind(bind_addr).expect(&format!("must be able to bind http serveraddr {bind_addr:?}")); - let listener = socket.listen(128).expect("must be able to bind http listener."); + + let listener = socket.listen(3000).expect("must be able to bind http listener."); + + // // TODO - let shutdown_signal = shutdown_signal.clone(); + // _ = shutdown_signal.notified() => { + // // eprintln!("stream aborted due to app shutdown."); break + // // } loop { - //tracing::trace!("waiting for new http connection.."); - - tokio::select!{ - Ok((tcp_stream, source_addr)) = listener.accept() => { - let shutdown_signal = shutdown_signal.clone(); - tracing::trace!("tcp listener accepted a new http connection"); + if state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) { + tracing::debug!("exiting http server loop due to receiving shutdown signal."); + break; + } + + if ACTIVE_TCP_CONNECTIONS.load(std::sync::atomic::Ordering::SeqCst) >= 666 { + tokio::time::sleep(Duration::from_millis(10)).await; + continue; + } + + match listener.accept().await { + Ok((tcp_stream,source_addr)) => { + + let c = ACTIVE_TCP_CONNECTIONS.fetch_add(1, std::sync::atomic::Ordering::SeqCst); + tracing::trace!("accepted connection! current active: {}", 1+c); let mut service: ReverseProxyService = terminating_service_template.clone(); service.remote_addr = Some(source_addr); - let targets = targets.clone().reverse_tcp_proxy_targets().await; + let arc_clone_targets = targets.clone(); let tx = tx.clone(); let state = state.clone(); - tokio::spawn( async move { - tokio::select!{ - _ = handle_new_tcp_stream(None,service, tcp_stream, source_addr, targets, false,tx.clone(),state.clone()) => { - tracing::trace!("http tcp stream handled") - } - _ = shutdown_signal.notified() => { - eprintln!("stream aborted due to app shutdown."); - } - }; + tokio::spawn(async move { + handle_new_tcp_stream(None,service, tcp_stream, source_addr, arc_clone_targets, false,tx.clone(),state.clone()) + .await; + ACTIVE_TCP_CONNECTIONS.fetch_sub(1, std::sync::atomic::Ordering::SeqCst); }); - }, - _ = shutdown_signal.notified() => { - tracing::debug!("exiting http server loop due to receiving shutdown signal."); - break; + + + } + Err(e) => { + tracing::warn!("error accepting tcp connection: {:?}", e); + //break; } } + } } @@ -153,7 +209,7 @@ async fn accept_tcp_stream_via_tls_terminating_proxy_service( async fn listen_https( bind_addr: SocketAddr, tx: std::sync::Arc>, - state: GlobalState, + state: Arc, terminating_service_template: ReverseProxyService, targets: Arc, shutdown_signal: Arc @@ -184,7 +240,7 @@ async fn listen_https( cache: std::sync::Mutex::new(HashMap::new()) })); - if let Some(true) = state.1.read().await.alpn { + if let Some(true) = state.config.read().await.alpn { rustls_config.alpn_protocols.push("h2".into()); rustls_config.alpn_protocols.push("http/1.1".into()); } @@ -193,7 +249,7 @@ async fn listen_https( loop { //tracing::trace!("waiting for new https connection.."); - let targets = targets.clone(); + let targets_arc = targets.clone(); tokio::select!{ Ok((tcp_stream, source_addr)) = tcp_listener.accept() => { @@ -201,13 +257,13 @@ async fn listen_https( let mut service: ReverseProxyService = terminating_service_template.clone(); service.remote_addr = Some(source_addr); let shutdown_signal = shutdown_signal.clone(); - let targets = targets.clone().reverse_tcp_proxy_targets().await; + let arc_targets_clone = targets_arc.clone(); let tx = tx.clone(); let arced_tls_config = arced_tls_config.clone(); let state = state.clone(); tokio::task::spawn(async move { tokio::select!{ - _ = handle_new_tcp_stream(Some(arced_tls_config),service, tcp_stream, source_addr, targets, true,tx.clone(),state.clone()) => { + _ = handle_new_tcp_stream(Some(arced_tls_config),service, tcp_stream, source_addr, arc_targets_clone, true,tx.clone(),state.clone()) => { tracing::trace!("https tcp stream handled"); } @@ -224,6 +280,7 @@ async fn listen_https( } } } + tracing::warn!("listen_https went bye bye.") } // this will peek in to the incoming tcp stream and either create a direct tcp tunnel (passthru mode) @@ -233,24 +290,19 @@ async fn handle_new_tcp_stream( service:ReverseProxyService, tcp_stream:TcpStream, source_addr:SocketAddr, - targets: Vec, + targets: Arc, incoming_connection_is_on_tls_port: bool, tx: std::sync::Arc>, - state: GlobalState, + state: Arc, ) { - //tracing::warn!("handle_new_tcp_stream!"); - - { - let s = state.0.read().await; - let mut guard = s.statistics.write().expect("must always be able to write stats"); - guard.received_tcp_connections += 1; - } + let n = state.request_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + //tracing::warn!("handle_new_tcp_stream ({})!",n+1); //tracing::info!("handle_new_tcp_stream called with expect tls: {expect_tls}"); let targets = targets.clone(); - + _ = tcp_stream.set_linger(None); match tcp_proxy::ReverseTcpProxy::peek_tcp_stream(&tcp_stream, source_addr).await { @@ -262,15 +314,14 @@ async fn handle_new_tcp_stream( target_host: Some(target) }) if incoming_connection_is_on_tls_port == false => { - if let Some(target) = tcp_proxy::ReverseTcpProxy::try_get_target_from_vec(targets, &target) { + if let Some(target) = targets.try_find(move |p|tcp_proxy::ReverseTcpProxy::req_target_filter_map(p,&target )).await { if target.backends.iter().any(|x|x.https.unwrap_or_default()==false) { if target.is_hosted { let proc_state = { - let guard = state.0.read().await; - match guard.site_states_map.get(&target.host_name) { + match state.app_state.site_status_map.get(&target.host_name) { Some(v) => Some(v.clone()), _ => None } @@ -290,12 +341,15 @@ async fn handle_new_tcp_stream( tokio::time::sleep(Duration::from_secs(5)).await; tracing::debug!("handling an incoming request to a stopped target, waiting for up to 10 seconds for {thn} to spin up - after this we will release the request to the terminating proxy and show a 'please wait' page instaead."); { - let guard = state.0.read().await; - match guard.site_states_map.get(&target.host_name) { - Some(&ProcState::Running) => { - has_started = true; - break - }, + match state.app_state.site_status_map.get(&target.host_name) { + Some(my_ref) => { + match my_ref.value() { + app_state::ProcState::Running => { + has_started = true; + break + }, + _ => { } + } }, _ => { } } } @@ -329,13 +383,13 @@ async fn handle_new_tcp_stream( Ok(PeekResult { typ: DataType::TLS, http_version:_, - target_host: Some(target) + target_host: Some(target_host_name) }) if incoming_connection_is_on_tls_port => { - if let Some(target) = tcp_proxy::ReverseTcpProxy::try_get_target_from_vec(targets, &target) { + if let Some(target) = targets.try_find(move |p|tcp_proxy::ReverseTcpProxy::req_target_filter_map(&p, &target_host_name)).await { if target.backends.iter().any(|x|x.https.unwrap_or_default()) { // at least one backend has https enabled so we will use the tls tunnel mode to there - tracing::info!("USING TCP PROXY FOR TLS TUNNEL TO TARGET {target:?}"); + tracing::trace!("USING TCP PROXY FOR TLS TUNNEL TO TARGET {target:?}"); tcp_proxy::ReverseTcpProxy::tunnel(tcp_stream, target, true,state.clone(),source_addr).await; return; } else { @@ -358,12 +412,9 @@ async fn handle_new_tcp_stream( accept_tcp_stream_via_tls_terminating_proxy_service(tcp_stream, source_addr, tls_acceptor, service).await } else { tracing::trace!("handing off clear text tcp stream to terminating proxy!"); - let io = hyper_util::rt::TokioIo::new(tcp_stream); - http_proxy::serve(service, SomeIo::Http(io)).await; - } - - - - + let io = hyper_util::rt::TokioIo::new(tcp_stream); + + http_proxy::serve(service, SomeIo::Http(io)).await + }; } diff --git a/src/tcp_proxy/http1.rs b/src/tcp_proxy/http1.rs index 400fb85..5c9e4ff 100644 --- a/src/tcp_proxy/http1.rs +++ b/src/tcp_proxy/http1.rs @@ -1,44 +1,64 @@ -pub (crate) fn is_valid_http_request(bytes: &[u8]) -> bool { - // Convert bytes to string for easy manipulation; HTTP is ASCII based. - let request_str = match std::str::from_utf8(bytes) { - Ok(s) => s, - Err(_) => return false, // Not valid UTF-8, unlikely to be a valid HTTP request. - }; +use anyhow::bail; +use hyper::http::Version; - // HTTP methods to check against. - let methods = [ - "GET", "POST", "PUT", "DELETE", "HEAD", "OPTIONS", "PATCH", "CONNECT", +// note: this method is performance critical, be careful when changing it +// todo: add as many tests as possible to ensure this method is correct and performant +pub fn is_valid_http_request(bytes: &[u8]) -> anyhow::Result { + + const METHODS: &[&[u8]] = &[ + b"GET ", b"POST ", b"PUT ", b"DELETE ", b"HEAD ", b"OPTIONS ", b"PATCH ", b"CONNECT ", b"TRACE ", ]; - // Check if the request starts with a known HTTP method followed by a space. - let valid_start = methods - .iter() - .any(|&method| request_str.starts_with(&format!("{method} /"))); + let mut method_found = false; + for &method in METHODS { + if bytes.starts_with(method) { + method_found = true; + break; + } + } + if !method_found { + bail!("this is not a http request. no method found"); + } - // Check if the request contains a valid HTTP version. - let valid_version = - request_str.contains("HTTP/1.1\r\n") || request_str.contains("HTTP/1.0\r\n"); + let version = if let Some(pos) = memchr::memmem::find(bytes, b" HTTP/1.1\r\n") { + Version::HTTP_11 + } else if let Some(pos) = memchr::memmem::find(bytes, b" HTTP/1.0\r\n") { + Version::HTTP_10 + } else if let Some(pos) = memchr::memmem::find(bytes, b" HTTP/2.0\r\n") { + Version::HTTP_2 + } else if let Some(pos) = memchr::memmem::find(bytes, b" HTTP/3.0\r\n") { + Version::HTTP_3 + } else if let Some(pos) = memchr::memmem::find(bytes, b" HTTP/0.9\r\n") { + Version::HTTP_09 + } else { + if let Some(start) = bytes.windows(6).position(|window| window.starts_with(b"HTTP/")) { + let end = start + 8; // "HTTP/x.x" + if end <= bytes.len() { + let version_str = String::from_utf8_lossy(&bytes[start..end]); + bail!("unsupported http method: {}", version_str); + } + } + bail!("this is not a http request. no method found"); + }; - // Minimum validation for headers: at least one CRLF should be present after the initial request line. - let has_headers = request_str - .splitn(2, "\r\n") - .nth(1) - .map_or(false, |s| s.contains("\r\n")); + let has_headers = memchr::memmem::find(bytes, b"\r\n\r\n").is_some(); + let is_valid = has_headers || matches!(version, Version::HTTP_09); // <-- no headers required in HTTP/0.9 - // The request is considered valid if it starts with a known method, contains a valid HTTP version, and has headers. - valid_start && valid_version && has_headers + if is_valid { + Ok(version) + } else { + bail!("invalid http request"); + } + } -pub (crate) fn try_decode_http_host (http_request: &str) -> Option { - // Split the request into lines - let lines: Vec<&str> = http_request.split("\r\n").collect(); - // Iterate through each line to find the Host header - for line in lines { - if line.to_lowercase().starts_with("host:") { - // Extract the value part of the Host header - let parts: Vec<&str> = line.splitn(2, ": ").collect(); - if parts.len() == 2 { - return Some(parts[1].to_string()); +// note: this method is performance critical, be careful when changing it +// todo: add as many tests as possible to ensure this method is correct and performant +pub fn try_decode_http_host(http_request: &str) -> Option { + for line in http_request.split("\r\n") { + if line.len() > 5 && line[..5].eq_ignore_ascii_case("Host:") { + if let Some((_, host)) = line.split_once(": ") { + return Some(host.to_string()); } } } diff --git a/src/tcp_proxy/http2.rs b/src/tcp_proxy/http2.rs index e65e25f..f5e7b13 100644 --- a/src/tcp_proxy/http2.rs +++ b/src/tcp_proxy/http2.rs @@ -2,7 +2,7 @@ use hpack::Decoder; #[allow(dead_code)] // https://datatracker.ietf.org/doc/html/rfc9113 -pub (crate) fn find_http2_authority(bytes: &[u8]) -> Option { +pub fn find_http2_authority(bytes: &[u8]) -> Option { let mut current = 0; while current + 9 <= bytes.len() { let length = @@ -59,7 +59,7 @@ fn decompress_hpack(fragment: &[u8]) -> Result, String> { } // https://datatracker.ietf.org/doc/html/rfc9113 -pub (crate) fn is_valid_http2_request(bytes: &[u8]) -> bool { +pub fn is_valid_http2_request(bytes: &[u8]) -> bool { // HTTP/2 client connection preface let http2_preface = b"PRI * HTTP/2.0"; // ...\r\n\r\nSM\r\n\r\n // Check if the bytes start with the HTTP/2 preface diff --git a/src/tcp_proxy/mod.rs b/src/tcp_proxy/mod.rs index 48acec6..ba1b76a 100644 --- a/src/tcp_proxy/mod.rs +++ b/src/tcp_proxy/mod.rs @@ -2,4 +2,4 @@ mod tls; mod http1; mod http2; mod tcp; -pub (crate) use tcp::*; \ No newline at end of file +pub use tcp::*; \ No newline at end of file diff --git a/src/tcp_proxy/tcp.rs b/src/tcp_proxy/tcp.rs index bcc6d7b..78d74d3 100644 --- a/src/tcp_proxy/tcp.rs +++ b/src/tcp_proxy/tcp.rs @@ -30,42 +30,53 @@ pub struct ReverseTcpProxyTarget { } pub struct ReverseTcpProxyTargets { - pub global_state : GlobalState + pub global_state : Arc } impl ReverseTcpProxyTargets { - pub async fn reverse_tcp_proxy_targets(&self) -> Vec { - let cfg = self.global_state.1.read().await; - let mut tcp_targets = vec![]; - if let Some(x) = &cfg.hosted_process { - for y in x.iter().filter(|xx|xx.disable_tcp_tunnel_mode.unwrap_or_default() == false) { - - let port = y.port.unwrap_or_default(); - if port > 0 { - tcp_targets.push(ReverseTcpProxyTarget { - remote_target_config: None, // we dont need this for hosted processes - capture_subdomains: y.capture_subdomains.unwrap_or_default(), - forward_wildcard: y.forward_subdomains.unwrap_or_default(), - backends: vec![crate::configuration::v2::Backend { - address: y.host_name.to_owned(), - https: y.https, - port: port - }], - host_name: y.host_name.to_owned(), - is_hosted: true, - sub_domain: None - }) - } else { - tracing::warn!("hosted process: {} has no port configured. skipping this target for tcp tunnel mode. this is most likely a bug in odd-box.",y.host_name); - } - + pub async fn try_find(&self,filter_fun: F) -> Option + where F: Fn(&ReverseTcpProxyTarget) -> Option, + { + + let cfg = self.global_state.config.read().await; + + + for y in cfg.hosted_process.iter().flatten().filter(|xx| + xx.disable_tcp_tunnel_mode.unwrap_or_default() == false + ) { + + let port = y.active_port.unwrap_or_default(); + if port > 0 { + let t = ReverseTcpProxyTarget { + remote_target_config: None, // we dont need this for hosted processes + capture_subdomains: y.capture_subdomains.unwrap_or_default(), + forward_wildcard: y.forward_subdomains.unwrap_or_default(), + backends: vec![crate::configuration::v2::Backend { + hints: y.hints.clone(), + address: y.host_name.to_owned(), + https: y.https, + port: y.active_port.unwrap_or_default() + }], + host_name: y.host_name.to_owned(), + is_hosted: true, + sub_domain: None + }; + let filtered = filter_fun(&t); + if filtered.is_some() { + return filtered + } } + + } + if let Some(x) = &cfg.remote_target { - for y in x.iter().filter(|xx|xx.disable_tcp_tunnel_mode.unwrap_or_default() == false) { + for y in x.iter().filter(|xx| + xx.disable_tcp_tunnel_mode.unwrap_or_default() == false + ) { // we support comma separated hostnames for the same target temporarily for remotes. // in this mode we require all backends to have the same scheme and port configuration.. @@ -73,7 +84,7 @@ impl ReverseTcpProxyTargets { // supports multiple backend configurations for the same hostname. - tcp_targets.push(ReverseTcpProxyTarget { + let t = ReverseTcpProxyTarget { remote_target_config: Some(y.clone()), capture_subdomains: y.capture_subdomains.unwrap_or_default(), forward_wildcard: y.forward_subdomains.unwrap_or_default(), @@ -81,10 +92,16 @@ impl ReverseTcpProxyTargets { host_name: y.host_name.to_owned(), is_hosted: false, sub_domain: None - }) + }; + let filtered = filter_fun(&t); + if filtered.is_some() { + return filtered + } } } - tcp_targets + + None + } } @@ -105,9 +122,10 @@ impl ReverseTcpProxyTarget { capture_subdomains: x.capture_subdomains.unwrap_or_default(), forward_wildcard: x.forward_subdomains.unwrap_or_default(), backends: vec![crate::configuration::v2::Backend { + hints: x.hints.clone(), address: x.host_name.to_owned(), https: x.https, - port: x.port.expect("remote target must have a port configured") + port: x.active_port.unwrap_or_default() }], host_name: x.host_name.clone(), is_hosted: true, @@ -125,9 +143,9 @@ pub enum DataType { #[derive(Debug)] pub struct PeekResult { - pub (crate) typ : DataType, - #[allow(dead_code)]pub (crate) http_version : Option, - pub (crate) target_host : Option + pub typ : DataType, + #[allow(dead_code)]pub http_version : Option, + pub target_host : Option } #[derive(Debug)] @@ -164,8 +182,8 @@ impl ReverseTcpProxy { } None } - pub fn try_get_target_from_vec( - targets: Vec, + pub fn req_target_filter_map( + target: &ReverseTcpProxyTarget, req_host_name: &str, ) -> Option { @@ -175,42 +193,35 @@ impl ReverseTcpProxy { req_host_name }; - targets.iter().find_map(|x| { - - - if x.host_name.to_lowercase().trim() == parsed_name.to_lowercase().trim() { - // we dont want to impl clone on this so we just create it manually for now - // altough we could return refs but I don't have time for lifetimes atm - Some(ReverseTcpProxyTarget { - capture_subdomains: x.capture_subdomains, - forward_wildcard: x.forward_wildcard, - backends: x.backends.clone(), - host_name: x.host_name.clone(), - is_hosted: x.is_hosted, - sub_domain: None, - remote_target_config: x.remote_target_config.clone() - }) - } else { - match Self::get_subdomain(parsed_name, &x.host_name) { - Some(subdomain) => Some(ReverseTcpProxyTarget { - capture_subdomains: x.capture_subdomains, - forward_wildcard: x.forward_wildcard, - backends: x.backends.clone(), - host_name: x.host_name.clone(), - is_hosted: x.is_hosted, - sub_domain: Some(subdomain), - remote_target_config: x.remote_target_config.clone() - }), - None => None, - } - } + if target.host_name.to_lowercase().trim() == parsed_name.to_lowercase().trim() { + Some(ReverseTcpProxyTarget { + capture_subdomains: target.capture_subdomains, + forward_wildcard: target.forward_wildcard, + backends: target.backends.clone(), + host_name: target.host_name.clone(), + is_hosted: target.is_hosted, + sub_domain: None, + remote_target_config: target.remote_target_config.clone() }) - + } else { + match Self::get_subdomain(parsed_name, &target.host_name) { + Some(subdomain) => Some(ReverseTcpProxyTarget { + capture_subdomains: target.capture_subdomains, + forward_wildcard: target.forward_wildcard, + backends: target.backends.clone(), + host_name: target.host_name.clone(), + is_hosted: target.is_hosted, + sub_domain: Some(subdomain), + remote_target_config: target.remote_target_config.clone() + }), + None => None, + } + } } #[instrument(skip_all)] - pub (crate) async fn peek_tcp_stream( + pub async fn peek_tcp_stream( tcp_stream: &TcpStream, client_address: SocketAddr, ) -> Result { @@ -299,53 +310,41 @@ impl ReverseTcpProxy { } // if we dont already know this traffic is NOT http1: - if we_know_its_not_h1 == false && super::http1::is_valid_http_request(&buf) { - - we_know_its_not_h2 = true; - we_know_this_is_not_tls_handshake = true; - - if let Ok(str_data) = std::str::from_utf8(&buf) { - - if let Some(valid_host_name) = super::http1::try_decode_http_host(str_data) { - trace!("Found valid http1 host header while peeking in to tcp stream: {valid_host_name}"); - return Ok(PeekResult { - typ: DataType::ClearText, - // todo : use version from the peeked tcp bytes - http_version: Some(Version::HTTP_11), - target_host: Some(valid_host_name) - }) + if we_know_its_not_h1 == false { + match super::http1::is_valid_http_request(&buf) { + Ok(http_version) => { + we_know_its_not_h2 = true; + we_know_this_is_not_tls_handshake = true; + if let Ok(str_data) = std::str::from_utf8(&buf) { + + if let Some(valid_host_name) = super::http1::try_decode_http_host(str_data) { + trace!("Found valid http1 host header while peeking in to tcp stream: {valid_host_name}"); + return Ok(PeekResult { + typ: DataType::ClearText, + http_version: Some(http_version), + target_host: Some(valid_host_name) + }) + } else { + tracing::trace!("received an invalid http1 request. missing host header.."); + we_know_its_not_h1 = true; + } } else { - trace!("well, its not a valid http request (yet).."); + tracing::trace!("received an invalid http1 request. not valid utf8.."); + we_know_its_not_h1 = true; } - } else { - trace!("seems to be a valid http request, yet not valid utf8... strange!") + }, + Err(e) => { + tracing::trace!("received an invalid http1 request: {e:?}"); + we_know_its_not_h1 = true; + }, } } - // if we dont already know the traffic is NOT http1: + + // if we dont already know the traffic is NOT http2: else if we_know_its_not_h2 == false && super::http2::is_valid_http2_request(&buf) { return Err(PeekError::Unknown("oddbox does not currently support h2c for tcp tunnel mode".into())); - // note: the issue here is that clients do not send their header/settings frame until it receives a response from the server - // containing the server settings, which we don't yet know at this point. - - // we_know_its_not_h1 = true; - // if let Some(valid_host_name) = super::http2::find_http2_authority(&buf) { - // trace!("Found valid http2 authority while peeking in to tcp stream: {valid_host_name}"); - // return Ok(PeekResult { - // typ: DataType::ClearText, - // // todo : use version from the peeked tcp bytes - // http_version: Some(Version::HTTP_2), - // target_host: Some(valid_host_name) - // }); - // } else { - // trace!("it is a valid http2 request but no authority is yet to be found"); - // we_know_this_is_not_tls_handshake = true; - // // wait for more bytes to arrive as we need the authority info from a header frame to be able to proceed - - // tokio::time::sleep(std::time::Duration::from_millis(1000)).await; - // continue - // } } if we_know_this_is_not_tls_handshake { @@ -382,15 +381,28 @@ impl ReverseTcpProxy { mut client_tcp_stream:TcpStream, target:ReverseTcpProxyTarget, incoming_traffic_is_tls:bool, - state: GlobalState, + state: Arc, client_address: SocketAddr ) { // only remotes have more than one backend. hosted processes always have a single backend. - let primary_backend = if let Some(remconf) = &target.remote_target_config { - remconf.next_backend(&state, if incoming_traffic_is_tls { BackendFilter::Https } else { BackendFilter::Http }).await - } else { - target.backends.first().expect("target must have at least one backend").to_owned() + let primary_backend = { + let b = if let Some(remconf) = &target.remote_target_config { + remconf.next_backend(&state, if incoming_traffic_is_tls { BackendFilter::Https } else { BackendFilter::Http }).await + } else { + target.backends.first().cloned() + }; + if let Some(b) = b { + b + } else { + tracing::warn!("no backend found for target {target:?}"); + return; + } + }; + + if 0 == primary_backend.port { + tracing::warn!("no active target port found for target {target:?}, wont be able to establish a tcp connection for site {}",target.host_name); + return }; @@ -398,9 +410,9 @@ impl ReverseTcpProxy { let subdomain = target.sub_domain.as_ref(); if target.forward_wildcard && subdomain.is_some() { tracing::debug!("tcp tunnel rewrote for subdomain: {:?}", subdomain); - format!("{}.{}:{}", subdomain.unwrap(), primary_backend.address, primary_backend.port) + format!("{}.{}:{}", subdomain.unwrap(), primary_backend.address, primary_backend.port ) } else { - format!("{}:{}", primary_backend.address, primary_backend.port) + format!("{}:{}", primary_backend.address, primary_backend.port ) } }; @@ -414,7 +426,7 @@ impl ReverseTcpProxy { let source_addr = client_address.clone(); let item = ProxyActiveConnection { - target, + target_name: target.host_name.clone(), target_addr: format!("{resolved_target_address} ({})",target_addr_socket.ip()), source_addr: source_addr.clone(), creation_time: Local::now(), @@ -425,31 +437,24 @@ impl ReverseTcpProxy { ProxyActiveConnectionType::TcpTunnelUnencryptedHttp }, }; - - let item_key = (source_addr,uuid::Uuid::new_v4()); - { // ADD THIS CONNECTION TO STATE - let s = state.0.read().await; - let mut guard = s.statistics.write().expect("should always be able to add connections to state"); - _=guard.active_connections.insert(item_key, item); - } + let item_key = crate::generate_unique_id(); + + // ADD TO STATE BEFORE STARTING THE STREAM + state.app_state.statistics.active_connections.insert(item_key, item); - match tokio::io::copy_bidirectional(&mut client_tcp_stream, &mut rem_stream).await - { - Ok(_a) => { - // could add this to target stats at some point - //debug!("stream completed ok! -- {} <--> {}", a.0, a.1) - } - Err(e) => { - trace!("Stream failed with err: {e:?}") - } - } - - { // DROP THIS CONNECTION FROM STATE - let s = state.0.read().await; - let mut guard = s.statistics.write().expect("should always be able to drop connections from state"); - _ = guard.active_connections.remove(&item_key); + match tokio::io::copy_bidirectional(&mut client_tcp_stream, &mut rem_stream).await { + Ok(_a) => { + // could add this to target stats at some point + //debug!("stream completed ok! -- {} <--> {}", a.0, a.1) + } + Err(e) => { + trace!("Stream failed with err: {e:?}") + } } + + // DROP FROM ACTIVE STATE ONCE DONE + state.app_state.statistics.active_connections.remove(&item_key); } else { @@ -461,7 +466,7 @@ impl ReverseTcpProxy { } #[instrument(skip_all)] - pub async fn listen(&self,shutdown_signal:std::sync::Arc,state: GlobalState,) -> Result<(), std::io::Error> { + pub async fn listen(&self,shutdown_signal:std::sync::Arc,state: Arc,) -> Result<(), std::io::Error> { tracing::info!("Starting TCP proxy on {:?}",self.socket_addr); let listener = TcpListener::bind(self.socket_addr).await?; @@ -473,7 +478,8 @@ impl ReverseTcpProxy { let peek_result = Self::peek_tcp_stream(&tcp_stream, client_address).await; - let cloned_list = self.targets.clone().reverse_tcp_proxy_targets().await; + let targets_arc = self.targets.clone(); + tokio::spawn(async move { match peek_result { Ok(PeekResult { @@ -485,7 +491,13 @@ impl ReverseTcpProxy { DataType::TLS => true, _ => false, }; - if let Some(t) = Self::try_get_target_from_vec(cloned_list, &target_host) { + + fn filter_fun(p: &ReverseTcpProxyTarget, target_host: &str) -> Option { + ReverseTcpProxy::req_target_filter_map(p, target_host) + } + + let target_host_str = target_host.as_str(); + if let Some(t) = targets_arc.try_find(|p| filter_fun(p, target_host_str)).await { _ = Self::tunnel( tcp_stream, t, @@ -494,14 +506,14 @@ impl ReverseTcpProxy { client_address ).await; } else { - tracing::warn!("no such target is configured: {target_host:?}") + tracing::debug!("no such target is configured: {target_host:?}") } }, Ok(_) => { - tracing::info!("could not find a host name so we dont know where to proxy this traffic. giving up on this stream!") + tracing::debug!("could not find a host name so we dont know where to proxy this traffic. giving up on this stream!") } Err(e) => { - tracing::info!("giving up on this stream due to error: {e:?}") + tracing::debug!("giving up on this stream due to error: {e:?}") }, } }); diff --git a/src/tcp_proxy/tls/client_hello.rs b/src/tcp_proxy/tls/client_hello.rs index 3ad8e07..bf0464a 100644 --- a/src/tcp_proxy/tls/client_hello.rs +++ b/src/tcp_proxy/tls/client_hello.rs @@ -3,14 +3,14 @@ use super::extension::SniParserError; use super::extension::TlsExtensionType; #[derive(Debug)] -pub (crate) enum TlsClientHelloError { +pub enum TlsClientHelloError { NotTLSHandshake, NotClientHello, MessageIncomplete(#[allow(dead_code)]usize), } #[derive(Debug)] -pub (crate) struct TlsClientHello { +pub struct TlsClientHello { protocol_version: (u8, u8), random: Vec, session_id: Vec, @@ -114,7 +114,7 @@ impl TlsClientHello { } } - pub (crate) fn read_sni_hostname(&self) -> Result { + pub fn read_sni_hostname(&self) -> Result { for extension in &self.extensions { if let TlsExtensionType::ServerName = extension.typ { // SNI extension type diff --git a/src/tcp_proxy/tls/extension.rs b/src/tcp_proxy/tls/extension.rs index 34a4a0f..a2aeec3 100644 --- a/src/tcp_proxy/tls/extension.rs +++ b/src/tcp_proxy/tls/extension.rs @@ -1,6 +1,6 @@ #[derive(Debug, PartialEq, Eq,Clone)] -pub (crate) enum TlsExtensionType { +pub enum TlsExtensionType { ServerName, // 0x0000 MaxFragmentLength, // 0x0001 StatusRequest, // 0x0005 @@ -18,7 +18,7 @@ pub (crate) enum TlsExtensionType { } #[derive(Debug)] -pub (crate) struct TlsExtension { +pub struct TlsExtension { pub typ : TlsExtensionType, pub data : Vec } @@ -75,7 +75,7 @@ impl Into for TlsExtensionType { #[derive(Debug)] -pub (crate) enum SniParserError { +pub enum SniParserError { NoSniFound, InvalidExtensionFormat, Utf8Error(#[allow(dead_code)]std::str::Utf8Error), diff --git a/src/tcp_proxy/tls/mod.rs b/src/tcp_proxy/tls/mod.rs index ff76344..3541703 100644 --- a/src/tcp_proxy/tls/mod.rs +++ b/src/tcp_proxy/tls/mod.rs @@ -1,2 +1,2 @@ -pub (crate) mod client_hello; -pub (crate) mod extension; +pub mod client_hello; +pub mod extension; diff --git a/src/tests/configuration.rs b/src/tests/configuration.rs new file mode 100644 index 0000000..f6e73b5 --- /dev/null +++ b/src/tests/configuration.rs @@ -0,0 +1,173 @@ +use std::borrow::BorrowMut; + +#[allow(unused)] +use crate::configuration::OddBoxConfiguration; +use crate::http_proxy::ConfigWrapper; + +#[test] pub fn legacy_upgrade() { + let legacy_config = crate::configuration::legacy::OddBoxLegacyConfig::example(); + let cfg = crate::configuration::OddBoxConfig::Legacy(legacy_config); + cfg.try_upgrade_to_latest_version().unwrap(); +} + +#[test] pub fn v1_upgrade() { + let v1_config = crate::configuration::v1::OddBoxV1Config::example(); + let cfg = crate::configuration::OddBoxConfig::V1(v1_config); + cfg.try_upgrade_to_latest_version().unwrap(); + +} + +#[test] pub fn v1_to_v2_default_to_port_80_for_backends_with_unspecified_scheme() { + + let mut v1_config = crate::configuration::v1::OddBoxV1Config::example(); + + let test_site = crate::configuration::v1::RemoteSiteConfig { + port: None, + capture_subdomains: None, + disable_tcp_tunnel_mode: None, + h2_hint: None, + host_name: "test".into(), + forward_subdomains: None, + https: None, + target_hostname: "test-domain.com".into(), + }; + + if let Some(ref mut v) = v1_config.remote_target { + *v = vec![test_site]; + } + + let v2 : crate::configuration::v2::OddBoxV2Config = v1_config.to_owned().try_into().unwrap(); + + let remote_sites = v2.remote_target.expect("should have remote sites"); + + assert_eq!(remote_sites.len(),1); + + let test_site = remote_sites.get(0).expect("should have test site"); + + assert_eq!(test_site.backends.len(),1); + + let backend = test_site.backends.get(0).expect("should have backend"); + + assert_eq!(backend.port,80); + assert_eq!(backend.address,"test-domain.com"); + assert_eq!(backend.https,None); + + +} + + +#[test] pub fn v1_to_v2_default_to_port_80_for_backends_with_http() { + + let mut v1_config = crate::configuration::v1::OddBoxV1Config::example(); + + let test_site = crate::configuration::v1::RemoteSiteConfig { + port: None, + capture_subdomains: None, + disable_tcp_tunnel_mode: None, + h2_hint: None, + host_name: "test".into(), + forward_subdomains: None, + https: Some(false), + target_hostname: "test-domain.com".into(), + }; + + if let Some(ref mut v) = v1_config.remote_target { + *v = vec![test_site]; + } + + let v2 : crate::configuration::v2::OddBoxV2Config = v1_config.to_owned().try_into().unwrap(); + + let remote_sites = v2.remote_target.expect("should have remote sites"); + + assert_eq!(remote_sites.len(),1); + + let test_site = remote_sites.get(0).expect("should have test site"); + + assert_eq!(test_site.backends.len(),1); + + let backend = test_site.backends.get(0).expect("should have backend"); + + assert_eq!(backend.port,80); + assert_eq!(backend.address,"test-domain.com"); + assert_eq!(backend.https,Some(false)); + + +} + + +#[test] pub fn v1_to_v2_default_to_port_443_for_backends_with_https() { + + let mut v1_config = crate::configuration::v1::OddBoxV1Config::example(); + + let test_site = crate::configuration::v1::RemoteSiteConfig { + port: None, + capture_subdomains: None, + disable_tcp_tunnel_mode: None, + h2_hint: None, + host_name: "test".into(), + forward_subdomains: None, + https: Some(true), + target_hostname: "test-domain.com".into(), + }; + + if let Some(ref mut v) = v1_config.remote_target { + *v = vec![test_site]; + } + + let v2 : crate::configuration::v2::OddBoxV2Config = v1_config.to_owned().try_into().unwrap(); + + let remote_sites = v2.remote_target.expect("should have remote sites"); + + assert_eq!(remote_sites.len(),1); + + let test_site = remote_sites.get(0).expect("should have test site"); + + assert_eq!(test_site.backends.len(),1); + + let backend = test_site.backends.get(0).expect("should have backend"); + + assert_eq!(backend.port,443); + assert_eq!(backend.address,"test-domain.com"); + assert_eq!(backend.https,Some(true)); + + +} + +#[test] pub fn v2_reserialize_is_lossless() -> Result<(),String> { + let v2_config_example = crate::configuration::v2::OddBoxV2Config::example(); + let serialized = v2_config_example.to_string().expect("should be able to serialize v2 configurations"); + let deserialized = crate::configuration::OddBoxConfig::parse(&serialized).expect("should be able to deserialize v2 configurations"); + match deserialized { + crate::configuration::OddBoxConfig::Legacy(_) => Err("expected v2 config".to_string()), + crate::configuration::OddBoxConfig::V1(_) => Err("expected v2 config".to_string()), + crate::configuration::OddBoxConfig::V2(v2_after_se_de) => { + // make sure that the deserialized version is the same as the original so we + // can be sure that the serialization and deserialization process is lossless + if v2_config_example.remote_target.eq(&v2_after_se_de.remote_target) == false { + + panic!("deserialized version of v2 config is not the same as the original: {:?}\n\n{:?}",v2_config_example,v2_after_se_de); + }; + Ok(()) + }, + } + +} + + +// #[test] pub fn proc_should_use_first_available_port_from_start_range() { +// // let mut v1_config = crate::configuration::v1::OddBoxV1Config::example(); +// // let test_site = crate::configuration::v1::InProcessSiteConfig { +// // port: None, +// // https: Some(true), +// // ..Default::default() +// // }; +// // if let Some(ref mut v) = v1_config.hosted_process { +// // *v = vec![test_site]; +// // } +// // let mut wrapped = ConfigWrapper(v1_config.to_owned().try_into().unwrap()); +// // wrapped.path = Some("/tmp".into()); +// // if let Some(procs) = wrapped.hosted_process.as_ref() { +// // let resolved = wrapped.resolve_process_configuration(&procs[0]).expect("should be able to resolve process configuration"); +// // let busy = wrapped.busy_ports(); +// // } +// } diff --git a/src/tests/main.rs b/src/tests/main.rs new file mode 100644 index 0000000..8a75316 --- /dev/null +++ b/src/tests/main.rs @@ -0,0 +1,4 @@ +#[test] +fn test_src_tests_main_rs_1() -> anyhow::Result<()> { + Ok(()) +} \ No newline at end of file diff --git a/src/tests/mod.rs b/src/tests/mod.rs new file mode 100644 index 0000000..d565000 --- /dev/null +++ b/src/tests/mod.rs @@ -0,0 +1,2 @@ +mod configuration; +mod main; \ No newline at end of file diff --git a/src/tui/connections_widget.rs b/src/tui/connections_widget.rs new file mode 100644 index 0000000..d8630f3 --- /dev/null +++ b/src/tui/connections_widget.rs @@ -0,0 +1,134 @@ +use std::sync::Arc; + +use ratatui::layout::{Flex, Rect}; +use ratatui::style::{Color, Modifier, Style, Stylize}; +use ratatui::widgets::{Cell, Row, Scrollbar, ScrollbarOrientation, Table}; +use crate::global_state::GlobalState; +use crate::types::proxy_state::*; +use crate::types::tui_state::TuiState; +use ratatui::layout::Constraint; +use super::Theme; + + +pub fn draw( + f: &mut ratatui::Frame, + global_state: Arc, + tui_state: &mut TuiState, + area: Rect, + theme: &Theme +) { + let headers = [ "Site", "Source", "Target", "Description"]; + + let rows : Vec> = global_state.app_state.statistics.active_connections.iter().map(|guard| { + let (_,active_connection) = guard.pair(); + let typ = match &active_connection.connection_type { + ProxyActiveConnectionType::TcpTunnelUnencryptedHttp => "UNENCRYPTED TCP TUNNEL".to_string(), + ProxyActiveConnectionType::TcpTunnelTls => + "TLS ENCRYPTED TCP TUNNEL".to_string(), + ProxyActiveConnectionType::TerminatingHttp { incoming_scheme, incoming_http_version, outgoing_scheme, outgoing_http_version }=> + format!("{incoming_scheme}@{incoming_http_version:?} <-TERMINATING_HTTP-> {outgoing_scheme}@{outgoing_http_version:?}"), + ProxyActiveConnectionType::TerminatingWs { incoming_scheme, incoming_http_version, outgoing_scheme, outgoing_http_version } => + format!("{incoming_scheme}@{incoming_http_version:?} <-TERMINATING_WS-> {outgoing_scheme}@{outgoing_http_version:?}"), + }; + let description = format!("{}",typ); + // pub struct ProxyActiveConnection { + // pub target_name : String, + // pub creation_time : chrono::DateTime, + // pub description : Option, + // pub connection_type : ProxyActiveConnectionType, + // pub source_addr: SocketAddr, + // pub target_addr: String + // } + + vec![ + active_connection.target_name.clone(), + active_connection.source_addr.to_string(), + active_connection.target_addr.clone(), + description + ] + }).collect(); + + + + let header_height = 1; + let visible_rows = area.height as usize - header_height; + let start = tui_state.connections_tab_state.scroll_state.vertical_scroll.unwrap_or_default(); + let end = std::cmp::min(start + visible_rows, rows.len()); + + let is_dark_theme = matches!(&theme,Theme::Dark(_)); + + let display_rows = &rows[start..end]; + + let odd_row_bg = if is_dark_theme { Color::from_hsl(15.0, 10.0, 10.0) } else { + Color::Rgb(250,250,250) + }; + let row_bg = if is_dark_theme { Color::from_hsl(10.0, 10.0, 5.0) } else { + Color::Rgb(235,235,255) + }; + + + + let table_rows : Vec<_> = display_rows.iter().enumerate().map(|(i,row)| { + + let is_odd = i % 2 == 0; + + + Row::new(row.iter().map(|x|Cell::new(x.to_string()))).height(1 as u16) + .style( + Style::new() + .bg( + if is_odd { + odd_row_bg + } else { + row_bg + } + ).fg(if is_dark_theme { Color::White } else { Color::Black }) + ) + }).collect(); + + + tui_state.connections_tab_state.scroll_state.visible_rows = display_rows.iter().len() as usize; + tui_state.connections_tab_state.scroll_state.total_rows = rows.len(); + + let widths = [ + Constraint::Fill(1), + Constraint::Fill(1), + Constraint::Fill(2), + Constraint::Fill(4), + ]; + + + let headers = Row::new(headers + .iter() + .map(|&h| Cell::from(h).fg(if is_dark_theme {Color::LightGreen} else {Color::Blue}).underlined().add_modifier(Modifier::BOLD)) + ).height(1); + + + let table = Table::new(table_rows, widths.clone()) + .header(headers) + .highlight_style(Style::default().add_modifier(Modifier::BOLD)) + .widths(&widths) + .flex(Flex::Legacy) + .column_spacing(1); + + f.render_widget(table, area); + + + let scrollbar = Scrollbar::default() + .style(Style::default()) + .orientation(ScrollbarOrientation::VerticalRight) + .begin_symbol(Some("↑")) + .end_symbol(Some("↓")).thumb_style(Style::new().fg(Color::LightBlue)) + .orientation(ScrollbarOrientation::VerticalRight); + + let height_of_traf_area = area.height.saturating_sub(2); + tui_state.connections_tab_state.scroll_state.area_height = height_of_traf_area as usize; + + tui_state.connections_tab_state.scroll_state.vertical_scroll_state = tui_state.connections_tab_state.scroll_state.vertical_scroll_state.content_length(rows.len().saturating_sub(height_of_traf_area as usize)); + + let scrollbar_area = Rect::new(area.right() - 1, area.top(), 1, area.height); + + f.render_stateful_widget(scrollbar,scrollbar_area, &mut tui_state.connections_tab_state.scroll_state.vertical_scroll_state); + +} + diff --git a/src/tui/logs_widget.rs b/src/tui/logs_widget.rs new file mode 100644 index 0000000..c27e66d --- /dev/null +++ b/src/tui/logs_widget.rs @@ -0,0 +1,156 @@ +use std::borrow::BorrowMut; +use std::sync::{Arc, Mutex}; +use ratatui::layout::Rect; +use ratatui::style::{Color, Style}; +use ratatui::text::Line; +use ratatui::widgets::{Paragraph, Scrollbar, ScrollbarOrientation}; +use tokio::sync::RwLockWriteGuard; +use tracing::Level; +use crate::global_state::GlobalState; +use crate::logging::SharedLogBuffer; +use crate::types::app_state::*; +use crate::types::tui_state::TuiState; +use super::{wrap_string, Theme}; + + +pub fn draw( + f: &mut ratatui::Frame, + mut global_state: Arc, + tui_state: &mut TuiState, + log_buffer: &Arc>, + area: Rect, + _theme: &Theme +) { + + { + let mut buffer = log_buffer.lock().expect("locking shared buffer mutex should always work"); + if tui_state.log_tab_stage.scroll_state.vertical_scroll.is_none() && buffer.limit.is_none() { + let l = buffer.limit.borrow_mut(); + *l = Some(500); + } else if tui_state.log_tab_stage.scroll_state.vertical_scroll.is_some() && buffer.limit.is_some() { + let l = buffer.limit.borrow_mut(); + *l = None; + } + } + + + let buffer = log_buffer.lock().expect("locking shared buffer mutex should always work"); + + let max_msg_width = area.width; + + let item_count = buffer.logs.len().to_string().len().max(6); + + // we do this recalculation on each render in case of window-resize and such + // we should move so that this is done ONCE per log message and not for each log message ever on each render. + let items: Vec = buffer.logs.iter().enumerate().flat_map(|(i,x)|{ + + let level = x.lvl; + + let s = match level { + Level::ERROR => Style::default().fg(Color::Red), + Level::TRACE => Style::default().fg(Color::Gray), + Level::DEBUG => Style::default().fg(Color::Magenta), + Level::WARN => Style::default().fg(Color::Yellow), + Level::INFO => Style::default().fg(Color::Blue) + }; + + let nr_str = format!("{:1$} | ",i+1, item_count); + let lvl_str = format!("{:>1$} ",x.lvl.as_str(),5); + let thread_str = if let Some(n) = &x.thread {format!("{n} ")} else { format!("") }; + + let number = ratatui::text::Span::styled(nr_str.clone(),Style::default().fg(Color::DarkGray)); + let level = ratatui::text::Span::styled(lvl_str.clone(),s); + let thread_name = ratatui::text::Span::styled(thread_str.clone(),Style::default().fg(Color::DarkGray)); + + // if x.msg is wider than the available width, we need to split the message in multiple lines.. + let max_width = (max_msg_width as usize).saturating_sub(8).saturating_sub(nr_str.len() + lvl_str.len() + thread_str.len()); + + let l = if x.msg.len() > max_width as usize { + + wrap_string(x.msg.as_str(), max_width as usize) + .into_iter().enumerate() + .map(|(i,m)| + Line::from( + vec![ + number.clone(), + if i == 0 { level.clone() } else { + ratatui::text::Span::styled(" ".repeat(level.clone().content.len()).to_string() ,Style::default()) + }, + thread_name.clone(), + ratatui::text::Span::styled(m,Style::default()) + ] + ) + ).collect::>() + + + } else { + let message = ratatui::text::Span::styled(format!("{} {}",x.src.clone(),x.msg),Style::default()); + vec![Line::from(vec![number,level,thread_name,message])] + + }; + + l + + + }).collect(); + + let wrapped_line_count = items.len(); + + tui_state.log_tab_stage.scroll_state.total_rows = wrapped_line_count; + + let height_of_logs_area = area.height.saturating_sub(0); // header and footer + tui_state.log_tab_stage.scroll_state.area_height = height_of_logs_area as usize; + tui_state.log_tab_stage.scroll_state.area_width = area.width as usize; + + let scroll_pos = { tui_state.log_tab_stage.scroll_state.vertical_scroll }; + + let scrollbar_hovered = tui_state.log_tab_stage.scroll_state.scroll_bar_hovered; + let mut scrollbar_state = tui_state.log_tab_stage.scroll_state.vertical_scroll_state.borrow_mut(); + + let max_scroll_pos = items.len().saturating_sub(height_of_logs_area as usize); + + //let clamped_scroll_pos = scroll_pos.unwrap_or(max_scroll_pos).min(max_scroll_pos) as u16; + + let visible_rows = area.height as usize; // Adjust as needed based on your UI + + let start = scroll_pos.unwrap_or(max_scroll_pos); + let end = std::cmp::min(start + visible_rows, items.len()); + + + if start > items.len() || end > items.len() || start >= end { + return + } + + let display_rows = &items[start..end]; + + + let clamped_items : Vec = display_rows.iter().map(|x| { + x.clone() + }).collect(); + + let paragraph = Paragraph::new(clamped_items); + + let mut scrollbar = Scrollbar::default() + .style( Style::default()) + .orientation(ScrollbarOrientation::VerticalRight) + .begin_symbol(Some("↑")) + .end_symbol(Some("↓")).thumb_style(Style::new().fg(Color::LightBlue)); + + if scrollbar_hovered { + scrollbar = scrollbar.thumb_style(Style::default().fg(Color::Yellow).bg(Color::Red)); + } + + *scrollbar_state = scrollbar_state.content_length(items.len().saturating_sub(height_of_logs_area as usize)); + + if scroll_pos.is_none() { + *scrollbar_state = scrollbar_state.position(items.len().saturating_sub(height_of_logs_area as usize)); + } + + + f.render_widget(paragraph, area); + f.render_stateful_widget(scrollbar,area, &mut scrollbar_state); + + + +} + diff --git a/src/tui.rs b/src/tui/mod.rs similarity index 52% rename from src/tui.rs rename to src/tui/mod.rs index ae3db2d..979275a 100644 --- a/src/tui.rs +++ b/src/tui/mod.rs @@ -1,1029 +1,817 @@ -use ratatui::layout::{Alignment, Flex, Margin, Offset, Rect}; -use ratatui::style::{Color, Modifier, Style, Stylize}; -use ratatui::text::Line; -use ratatui::widgets::{BorderType, Cell, List, ListItem, Row, Scrollbar, ScrollbarOrientation, ScrollbarState, Table}; -use tokio::sync::RwLockWriteGuard; -use tokio::task; -use tracing::Level; -use tracing_subscriber::EnvFilter; -use tracing_subscriber::layer::SubscriberExt; -use std::borrow::BorrowMut; -use std::io::Stdout; -use crate::global_state::GlobalState; -use crate::logging::SharedLogBuffer; -use crate::logging::LogMsg; -use crate::types::app_state::*; -use std::sync::{Arc, Mutex}; -use std::time::Duration; -use crate::http_proxy::ProcMessage; -use crate::types::proxy_state::*; - -use serde::ser::SerializeStruct; - - -use ratatui::{ - layout::{Constraint, Direction, Layout}, - widgets::{Block, Borders, Paragraph} -}; - -use crossterm::{ - event::{self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode}, - execute, - terminal::{ - disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, -}; - -use ratatui::{ - backend::CrosstermBackend, - Terminal, -}; - -impl serde::Serialize for LogMsg { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer { - let mut s = serializer.serialize_struct("LogMsg", 4)?; - s.serialize_field("msg", &self.msg)?; - s.serialize_field("lvl", &self.lvl.as_str())?; - s.serialize_field("src", &self.src)?; - s.serialize_field("thread", &self.thread.as_ref().unwrap_or(&"".to_string()))?; - s.end() - - } -} - - -pub (crate) fn init() { - _ = enable_raw_mode().expect("must be able to enable raw mode");(); - execute!(std::io::stdout(), EnterAlternateScreen, EnableMouseCapture).expect("must always be able to enter alt screen"); -} - -pub (crate) async fn run( - filter:EnvFilter, - shared_state:GlobalState, - tx: tokio::sync::broadcast::Sender, - trace_msg_broadcaster: tokio::sync::broadcast::Sender -) { - - let log_buffer = Arc::new(Mutex::new(SharedLogBuffer::new())); - let layer = crate::logging::TuiLoggerLayer { log_buffer: log_buffer.clone(), broadcaster: trace_msg_broadcaster }; - - let subscriber = tracing_subscriber::registry() - .with(filter).with(layer); - - tracing::subscriber::set_global_default(subscriber).expect("Failed to set collector"); - - - let backend = CrosstermBackend::new(std::io::stdout()); - - let terminal = Terminal::new(backend).expect("must be possible to create terminal"); - - let terminal = Arc::new(tokio::sync::Mutex::new(terminal)); - - - let mut theme = match dark_light::detect() { - dark_light::Mode::Dark => Theme::Dark(dark_theme()), - dark_light::Mode::Light => Theme::Light(light_theme()), - dark_light::Mode::Default => Theme::Dark(dark_theme()), - }; - - let mut count = 0; - - let disabled_items : Vec = shared_state.1.read().await.hosted_process.clone().unwrap_or_default().iter_mut().filter_map( |x| - if x.disabled.unwrap_or_default() { - Some(x.host_name.clone()) - } else { - None - } - ).collect(); - - - - // TUI event loop - let tui_handle = { - let terminal = Arc::clone(&terminal); - let app_state = shared_state.clone(); - let tx = tx.clone(); - task::spawn(async move { - - let tx = tx.clone(); - - let mut last_key_time = tokio::time::Instant::now(); - let debounce_duration = Duration::from_millis(100); - - //let mut last_toggle : Option = None; - - loop { - - { - let app = app_state.0.read().await; - - if app.exit { - if app.site_states_map.iter().find(|x| - x.1 == &ProcState::Stopping - || x.1 == &ProcState::Running - || x.1 == &ProcState::Starting - - ).is_none() { - break; // nothing is running,stopping or starting.. we can exit now - } - } - } - - if count > 100 { - theme = match dark_light::detect() { - dark_light::Mode::Dark => Theme::Dark(dark_theme()), - dark_light::Mode::Light => Theme::Light(light_theme()), - dark_light::Mode::Default => Theme::Dark(dark_theme()), - }; - count = 0; - - } - - // KEEP LOCK SHORT TO AVOID DEADLOCK - { - let mut state = app_state.0.write().await; - - let mut terminal = terminal.lock().await; - - terminal.draw(|f| draw_ui::>(f, &mut state, &log_buffer,&theme))?; - - } - - // } - - - // Handle input - if event::poll(std::time::Duration::from_millis(20))? { - let now = tokio::time::Instant::now(); - let time_since_last_keypress = now.duration_since(last_key_time); - - // let time_since_last_toggle = if let Some(t) = last_toggle { - // Some(now.duration_since(t)) - // } else { - // None - // }; - let (current_page,sites_open) = { - let guard = app_state.0.read().await; - (guard.current_page.clone(),guard.show_apps_window) - }; - let evt = event::read()?; - match evt { - Event::Mouse(mouse) => { - if sites_open { - match mouse.kind { - event::MouseEventKind::Moved => { - let mut app = app_state.0.write().await; - app.sites_handle_mouse_hover(mouse.column,mouse.row) - } - event::MouseEventKind::Down(event::MouseButton::Left) => { - let mut app = app_state.0.write().await; - app.sites_handle_mouse_click(mouse.column,mouse.row,tx.clone()) - } - _ => {} - } - } - match current_page { - Page::Logs => { - match mouse.kind { - event::MouseEventKind::Drag(event::MouseButton::Left) => { - let mut app = app_state.0.write().await; - app.logs_handle_mouse_scroll_drag(mouse.column,mouse.row) - } - event::MouseEventKind::Moved => { - let mut app = app_state.0.write().await; - app.logs_handle_mouse_move(mouse.column,mouse.row) - } - event::MouseEventKind::ScrollDown => { - let mut app = app_state.0.write().await; - app.logs_tab_scroll_down(Some(10)); - }, - event::MouseEventKind::ScrollUp => { - let mut app = app_state.0.write().await; - app.logs_tab_scroll_up(Some(10)); - }, - _ => {} - } - }, - Page::Statistics => {}, - Page::Connections => { - match mouse.kind { - event::MouseEventKind::ScrollDown => { - let mut app = app_state.0.write().await; - app.traf_tab_scroll_down(Some(10)); - }, - event::MouseEventKind::ScrollUp => { - let mut app = app_state.0.write().await; - app.traf_tab_scroll_up(Some(10)); - }, - _ => {} - } - }, - } - - } - Event::Key(key) => { - - if time_since_last_keypress >= debounce_duration { - last_key_time = now; - - match current_page { - Page::Logs => { - match key.code { - KeyCode::Enter => { - let mut app = app_state.0.write().await; - app.vertical_scroll = None; - let mut buf = log_buffer.lock().expect("must always be able to lock log buffer"); - match buf.limit { - Some(x) => { - while buf.logs.len() > x { - buf.logs.pop_front(); - } - }, - None => {}, - } - - } - KeyCode::Up => { - let mut app = app_state.0.write().await; - app.logs_tab_scroll_up(Some(1)); - } - KeyCode::Down => { - let mut app = app_state.0.write().await; - app.logs_tab_scroll_down(Some(1)); - } - KeyCode::PageUp => { - let mut app = app_state.0.write().await; - let scroll_count = app.logs_area_height.saturating_div(2); - if scroll_count > 0 { - app.logs_tab_scroll_up(Some(scroll_count)); - } - } - KeyCode::PageDown => { - let mut app = app_state.0.write().await; - let scroll_count = app.logs_area_height.saturating_div(2); - if scroll_count > 0 { - app.logs_tab_scroll_down(Some(scroll_count)); - } - }, - KeyCode::Char('c') => { - let mut app = app_state.0.write().await; - let mut buf = log_buffer.lock().expect("must always be able to lock log buffer"); - app.total_line_count = 0; - app.vertical_scroll = None; - app.scroll_state = app.scroll_state.position(0); - buf.logs.clear(); - - } - _ => {} - } - } - Page::Statistics => {}, - Page::Connections => { - match key.code { - KeyCode::PageUp => { - let mut app = app_state.0.write().await; - app.traf_tab_scroll_up(Some(10)); - } - KeyCode::PageDown => { - let mut app = app_state.0.write().await; - app.traf_tab_scroll_down(Some(10)); - - }, - KeyCode::Up => { - let mut app = app_state.0.write().await; - let scroll_count = app.logs_area_height.saturating_div(2); - if scroll_count > 0 { - app.traf_tab_scroll_up(None); - } - } - KeyCode::Down => { - let mut app = app_state.0.write().await; - app.traf_tab_scroll_down(None); - } - _ => {} - } - }, - } - - match key.code { - KeyCode::Char('1') => { - let mut app = app_state.0.write().await; - app.current_page = Page::Logs; - } - KeyCode::Char('2') => { - let mut app = app_state.0.write().await; - app.current_page = Page::Connections; - } - KeyCode::Char('3') => { - let mut app = app_state.0.write().await; - app.current_page = Page::Statistics; - } - KeyCode::BackTab | KeyCode::Left => { - let mut app = app_state.0.write().await; - match app.current_page { - Page::Logs => app.current_page = Page::Statistics, - Page::Statistics => app.current_page = Page::Connections, - Page::Connections => app.current_page = Page::Logs, - } - } - KeyCode::Tab | KeyCode::Right => { - let mut app = app_state.0.write().await; - match app.current_page { - Page::Logs => app.current_page = Page::Connections, - Page::Statistics => app.current_page = Page::Logs, - Page::Connections => app.current_page = Page::Statistics, - } - } - KeyCode::Char('z') => { - { - let mut app = app_state.0.write().await; - for (_,state) in app.site_states_map.iter_mut() { - if let ProcState::Running = state { - *state = ProcState::Stopping; - } - } - } - tx.clone().send(ProcMessage::StopAll).expect("must always be able to send internal messages"); - } - KeyCode::Char('s') => { - { - let mut app = app_state.0.write().await; - for (k,state) in app.site_states_map.iter_mut() { - if disabled_items.contains(k) { continue } - if let ProcState::Stopped = state { - *state = ProcState::Starting; - } - } - } - tx.clone().send(ProcMessage::StartAll).expect("must always be able to send internal messages"); - } - KeyCode::Char('a') => { - //if let Some(t) = time_since_last_toggle{ - //{ - let mut app = app_state.0.write().await; - app.show_apps_window = !app.show_apps_window; - //last_toggle = Some(now) - //} - //} - } - KeyCode::Esc | KeyCode::Char('q')=> { - { - let mut app = app_state.0.write().await; - app.exit = true; - } - - } - _ => { - - } - } - - - } - - }, - _=> {} - } - - } - } - Result::<(), std::io::Error>::Ok(()) - }) - - }; - - _ = tui_handle.await.ok(); - - _ = disable_raw_mode().ok(); - let mut stdout = std::io::stdout(); - execute!(stdout, LeaveAlternateScreen, DisableMouseCapture).expect("should always be possible to leave tui"); - -} - -#[derive(Debug,Default)] -pub struct TrafficTabState { - pub test : String, - pub vertical_scroll_state: ScrollbarState, - pub horizontal_scroll_state: ScrollbarState, - pub vertical_scroll: usize, - pub horizontal_scroll: usize, - pub total_rows : usize, - pub visible_rows : usize, - pub area_height : usize -} - -#[derive(Clone,Debug,Eq,PartialEq)] -pub enum Page { - Logs, - Statistics, - Connections -} - -fn draw_traffic( - f: &mut ratatui::Frame, - app_state: &mut RwLockWriteGuard<'_, AppState>, - area: Rect, - theme: &Theme -) { - let headers = [ "Site", "Source", "Target", "Description"]; - - let rows : Vec> = app_state.statistics.read().expect("must be able to read stats").active_connections.iter().map(|((src,_id),target)| { - let typ = match &target.connection_type { - ProxyActiveConnectionType::TcpTunnelUnencryptedHttp => "UNENCRYPTED TCP TUNNEL".to_string(), - ProxyActiveConnectionType::TcpTunnelTls => - "TLS ENCRYPTED TCP TUNNEL".to_string(), - ProxyActiveConnectionType::TerminatingHttp { incoming_scheme, incoming_http_version, outgoing_scheme, outgoing_http_version }=> - format!("{incoming_scheme}@{incoming_http_version:?} <-TERMINATING_HTTP-> {outgoing_scheme}@{outgoing_http_version:?}"), - ProxyActiveConnectionType::TerminatingWs { incoming_scheme, incoming_http_version, outgoing_scheme, outgoing_http_version } => - format!("{incoming_scheme}@{incoming_http_version:?} <-TERMINATING_WS-> {outgoing_scheme}@{outgoing_http_version:?}"), - }; - let description = format!("{}",typ); - vec![ - target.target.host_name.clone(), - src.to_string(), - target.target_addr.clone(), - description - ] - }).collect(); - - - let state = &mut app_state.traffic_tab_state; - - let header_height = 1; - let visible_rows = area.height as usize - header_height; - - let start = state.vertical_scroll; - let end = std::cmp::min(start + visible_rows, rows.len()); - - let is_dark_theme = matches!(&theme,Theme::Dark(_)); - - let display_rows = &rows[start..end]; - - let odd_row_bg = if is_dark_theme { Color::from_hsl(15.0, 10.0, 10.0) } else { - Color::Rgb(250,250,250) - }; - let row_bg = if is_dark_theme { Color::from_hsl(10.0, 10.0, 5.0) } else { - Color::Rgb(235,235,255) - }; - - - - let table_rows : Vec<_> = display_rows.iter().enumerate().map(|(i,row)| { - - let is_odd = i % 2 == 0; - - - Row::new(row.iter().map(|x|Cell::new(x.to_string()))).height(1 as u16) - .style( - Style::new() - .bg( - if is_odd { - odd_row_bg - } else { - row_bg - } - ).fg(if is_dark_theme { Color::White } else { Color::Black }) - ) - }).collect(); - - - state.visible_rows = display_rows.iter().len() as usize; - state.total_rows = rows.len(); - - let widths = [ - Constraint::Fill(1), - Constraint::Fill(1), - Constraint::Fill(2), - Constraint::Fill(4), - ]; - - - let headers = Row::new(headers - .iter() - .map(|&h| Cell::from(h).fg(if is_dark_theme {Color::LightGreen} else {Color::Blue}).underlined().add_modifier(Modifier::BOLD)) - ).height(1); - - - let table = Table::new(table_rows, widths.clone()) - .header(headers) - .highlight_style(Style::default().add_modifier(Modifier::BOLD)) - .widths(&widths) - .flex(Flex::Legacy) - .column_spacing(1); - - f.render_widget(table, area); - - - let scrollbar = Scrollbar::default() - .style(Style::default()) - .orientation(ScrollbarOrientation::VerticalRight) - .begin_symbol(Some("↑")) - .end_symbol(Some("↓")).thumb_style(Style::new().fg(Color::LightBlue)) - .orientation(ScrollbarOrientation::VerticalRight); - - let height_of_traf_area = area.height.saturating_sub(2); - state.area_height = height_of_traf_area as usize; - - state.vertical_scroll_state = state.vertical_scroll_state.content_length(rows.len().saturating_sub(height_of_traf_area as usize)); - - let scrollbar_area = Rect::new(area.right() - 1, area.top(), 1, area.height); - - f.render_stateful_widget(scrollbar,scrollbar_area, &mut state.vertical_scroll_state); - -} - - - -fn draw_stats( - f: &mut ratatui::Frame, - app_state: &mut RwLockWriteGuard<'_, AppState>, - area: Rect, - _theme: &Theme -) { - - let total_received_tcp_connections = { - let guard = app_state.statistics.read().expect("must always be able to read statistics"); - guard.received_tcp_connections - }; - - let p = Paragraph::new(format!("Total received TCP connections: {total_received_tcp_connections}")); - let p2 = Paragraph::new(format!("..More to come on this page at some point! :D")).fg(Color::DarkGray); - - f.render_widget(p, area.offset(Offset{x:4,y:2})); - f.render_widget(p2, area.offset(Offset{x:4,y:4})); -} - -fn draw_logs( - f: &mut ratatui::Frame, - app_state: &mut RwLockWriteGuard<'_, AppState>, - log_buffer: &Arc>, - area: Rect, - _theme: &Theme -) { - - { - let mut buffer = log_buffer.lock().expect("locking shared buffer mutex should always work"); - - if app_state.vertical_scroll.is_none() && buffer.limit.is_none() { - let l = buffer.limit.borrow_mut(); - *l = Some(500); - } else if app_state.vertical_scroll.is_some() && buffer.limit.is_some() { - let l = buffer.limit.borrow_mut(); - *l = None; - } - } - - - let buffer = log_buffer.lock().expect("locking shared buffer mutex should always work"); - - let max_msg_width = area.width; - - let item_count = buffer.logs.len().to_string().len().max(6); - - // we do this recalculation on each render in case of window-resize and such - // we should move so that this is done ONCE per log message and not for each log message ever on each render. - let items: Vec = buffer.logs.iter().enumerate().flat_map(|(i,x)|{ - - let level = x.lvl; - - let s = match level { - Level::ERROR => Style::default().fg(Color::Red), - Level::TRACE => Style::default().fg(Color::Gray), - Level::DEBUG => Style::default().fg(Color::Magenta), - Level::WARN => Style::default().fg(Color::Yellow), - Level::INFO => Style::default().fg(Color::Blue) - }; - - let nr_str = format!("{:1$} | ",i+1, item_count); - let lvl_str = format!("{:>1$} ",x.lvl.as_str(),5); - let thread_str = if let Some(n) = &x.thread {format!("{n} ")} else { format!("") }; - - let number = ratatui::text::Span::styled(nr_str.clone(),Style::default().fg(Color::DarkGray)); - let level = ratatui::text::Span::styled(lvl_str.clone(),s); - let thread_name = ratatui::text::Span::styled(thread_str.clone(),Style::default().fg(Color::DarkGray)); - - // if x.msg is wider than the available width, we need to split the message in multiple lines.. - let max_width = (max_msg_width as usize).saturating_sub(8).saturating_sub(nr_str.len() + lvl_str.len() + thread_str.len()); - - let l = if x.msg.len() > max_width as usize { - - wrap_string(x.msg.as_str(), max_width as usize) - .into_iter().enumerate() - .map(|(i,m)| - Line::from( - vec![ - number.clone(), - if i == 0 { level.clone() } else { - ratatui::text::Span::styled(" ".repeat(level.clone().content.len()).to_string() ,Style::default()) - }, - thread_name.clone(), - ratatui::text::Span::styled(m,Style::default()) - ] - ) - ).collect::>() - - - } else { - let message = ratatui::text::Span::styled(format!("{} {}",x.src.clone(),x.msg),Style::default()); - vec![Line::from(vec![number,level,thread_name,message])] - - }; - - l - - - }).collect(); - - let wrapped_line_count = items.len(); - app_state.total_line_count = wrapped_line_count; - - let height_of_logs_area = area.height.saturating_sub(0); // header and footer - app_state.logs_area_height = height_of_logs_area as usize; - app_state.logs_area_width = area.width as usize; - - let scroll_pos = { app_state.vertical_scroll }; - - let scrollbar_hovered = app_state.logs_scroll_bar_hovered; - let mut scrollbar_state = app_state.scroll_state.borrow_mut(); - - let max_scroll_pos = items.len().saturating_sub(height_of_logs_area as usize); - - //let clamped_scroll_pos = scroll_pos.unwrap_or(max_scroll_pos).min(max_scroll_pos) as u16; - - let visible_rows = area.height as usize; // Adjust as needed based on your UI - - let start = scroll_pos.unwrap_or(max_scroll_pos); - let end = std::cmp::min(start + visible_rows, items.len()); - - - if start > items.len() || end > items.len() || start >= end { - return - } - - let display_rows = &items[start..end]; - - - let clamped_items : Vec = display_rows.iter().map(|x| { - x.clone() - }).collect(); - - let paragraph = Paragraph::new(clamped_items); - - let mut scrollbar = Scrollbar::default() - .style( Style::default()) - .orientation(ScrollbarOrientation::VerticalRight) - .begin_symbol(Some("↑")) - .end_symbol(Some("↓")).thumb_style(Style::new().fg(Color::LightBlue)); - - if scrollbar_hovered { - scrollbar = scrollbar.thumb_style(Style::default().fg(Color::Yellow).bg(Color::Red)); - } - - *scrollbar_state = scrollbar_state.content_length(items.len().saturating_sub(height_of_logs_area as usize)); - - if scroll_pos.is_none() { - *scrollbar_state = scrollbar_state.position(items.len().saturating_sub(height_of_logs_area as usize)); - } - - - f.render_widget(paragraph, area); - f.render_stateful_widget(scrollbar,area, &mut scrollbar_state); - - - -} - - - -/// Returns a `Style` configured for a dark theme. -fn dark_theme() -> Style { - Style::default() - .fg(Color::White) // Text color - //.bg(Color::Black) // Background color - .add_modifier(Modifier::BOLD) // Text modifier -} - -/// Returns a `Style` configured for a light theme. -fn light_theme() -> Style { - Style::default() - .fg(Color::Black) // Text color - // .bg(Color::White) // Background color - .add_modifier(Modifier::ITALIC) // Text modifier -} - - -#[derive(Clone)] -pub enum Theme { - Light(Style), - Dark(Style) -} - -fn draw_ui(f: &mut ratatui::Frame, app_state: &mut RwLockWriteGuard<'_, AppState>,log_buffer: &Arc>, theme: &Theme) { - - let is_dark_theme = matches!(&theme,Theme::Dark(_)); - let theme_style = match theme { - Theme::Light(s) => s, - Theme::Dark(s) => s - }; - - - let size = f.size(); - if size.height < 10 || size.width < 10 { - return - } - - let help_bar_height = 3 as u16; - - - - let constraints = if app_state.show_apps_window { - vec![ - Constraint::Percentage(70), // MAIN SECTION - Constraint::Min(0), // SITES SECTION - Constraint::Length(help_bar_height), // QUICK BAR - ] - } else { - vec![ - Constraint::Min(1), // MAIN SECTION - Constraint::Max(0), - Constraint::Length(help_bar_height), // QUICK BAR - ] - }; - - let vertical = Layout::vertical(constraints); - let [top_area, mid_area, bot_area] = vertical.areas(size); - - //et x = format!("Logs {:?}",app_state.vertical_scroll); - - let main_area = Layout::default() - .direction(Direction::Vertical) - .constraints([ - Constraint::Min(0) - ]) - .split(top_area.clone()); - - // let totrows = app_state.traffic_tab_state.total_rows; - // let traheight = size.height; - - let tabs = ratatui::widgets::Tabs::new( - vec![ - "[1] Logs", - "[2] Connections", - "[3] Stats", - ]).highlight_style( - if is_dark_theme { - Style::new().fg(Color::Cyan) - } else { - Style::new().fg(Color::LightRed) - } - ) - .select(match app_state.current_page { - Page::Logs => 0, - Page::Connections => 1, - Page::Statistics => 2, - }) - - .divider(ratatui::text::Span::raw("|")); - - let frame_margin = &Margin { horizontal: 1, vertical: 1 }; - - match &app_state.current_page { - Page::Logs => draw_logs(f,app_state,log_buffer,main_area[0].inner(frame_margin),&theme), - Page::Statistics => draw_stats(f,app_state,main_area[0].inner(frame_margin),&theme), - Page::Connections => draw_traffic(f,app_state,main_area[0].inner(frame_margin),&theme), - } - - let frame = - Block::new() - .border_style( - if matches!(&theme,Theme::Dark(_)) { - Style::new().fg(Color::DarkGray) - } else { - Style::new().fg(Color::DarkGray) - } - - ) - .border_type(BorderType::Rounded) - .borders(Borders::ALL); - - f.render_widget(frame, main_area[0]); - - - // render the tab bar on top of the tab content - f.render_widget(tabs, main_area[0].inner(&Margin { horizontal: 2, vertical: 0 })); - - - if app_state.show_apps_window { - - let sites_area_height = mid_area.height.saturating_sub(2); - if sites_area_height == 0 { - return - } - let sites_count = app_state.site_states_map.len() as u16; - let columns_needed = ((sites_count as f32 / sites_area_height as f32).ceil()).max(1.0) as usize; - - let site_columns = Layout::default() - .direction(Direction::Horizontal) - .flex(ratatui::layout::Flex::Legacy) - .constraints(vec![Constraint::Percentage(100 / columns_needed as u16); columns_needed]) - .split(mid_area); - - let mut site_rects = vec![]; - - for (col_idx, col) in site_columns.iter().enumerate() { - - let mut procly : Vec<(&String, &ProcState)> = app_state.site_states_map.iter().collect(); - procly.sort_by_key(|k| k.0); - - let start_idx = col_idx * sites_area_height as usize; - let end_idx = ((col_idx + 1) * sites_area_height as usize).min(app_state.site_states_map.len()); - let items: Vec = procly[start_idx..end_idx].iter().enumerate().map(|(index,(id, state))| { - - let item_rect = ratatui::layout::Rect { - x: col.x, - y: col.y + index as u16 + 1, // Assuming each ListItem is one line high - width: col.width, - height: 1, // Assuming each ListItem is one line high - }; - - site_rects.push((item_rect,id.to_string())); - - let mut s = match state { - &ProcState::Running => Style::default().fg( - if is_dark_theme { - Color::LightGreen - } else { - Color::Green - } - ), - &ProcState::Faulty => Style::default().fg( - if is_dark_theme { - Color::Red - } else { - Color::Red - } - ), - &ProcState::Starting => Style::default().fg( - if is_dark_theme { - Color::Green - } else { - Color::Green - } - ), - &ProcState::Stopped => Style::default().fg( - if is_dark_theme{ - Color::DarkGray - } else { - Color::DarkGray - } - ), - &ProcState::Stopping => Style::default().fg( - if is_dark_theme { - Color::Black - } else { - Color::Yellow - } - ), - &ProcState::Remote => Style::default().fg( - if is_dark_theme { - Color::Blue - } else { - Color::Blue - } - ), - }; - - let mut id_style = theme_style.clone(); - if let Some(hovered) = &app_state.currently_hovered_site { - if hovered == *id { - id_style = id_style.add_modifier(Modifier::BOLD); - s = if is_dark_theme { s.bg(Color::Gray) } else { s.bg(Color::Gray) }; - } - } - - - let message = ratatui::text::Span::styled(format!(" {id} "),id_style); - - let status = ratatui::text::Span::styled(format!("{:?}",state),s); - - ListItem::new(Line::from(vec![ - message, - status - ])) - - }).collect(); - - let sites_list = List::new(items) - .block( - Block::new() - .border_style(Style::default().fg(Color::DarkGray)) - .border_type(BorderType::Rounded) - .borders(Borders::ALL) - .title(" Sites ").title_alignment(Alignment::Left) - .title_style( - if is_dark_theme { - Style::default().fg(Color::Cyan) - } else { - Style::default().fg(Color::Blue) - } - ) - ) - .highlight_style(Style::default().add_modifier(Modifier::BOLD)) - .highlight_symbol(">> "); - - f.render_widget(sites_list, *col); - } - app_state.site_rects = site_rects; - } - - - - let help_bar_chunk = Layout::default() - .direction(Direction::Vertical) - .constraints([ - Constraint::Min(0), - Constraint::Length(3) - ]) - .split(bot_area.clone()); - - - - let mut help_bar_text = vec![ - ratatui::text::Span::raw("q: Quit | "), - ratatui::text::Span::raw("a: Toggle Sites | "), - ]; - - - help_bar_text.push(ratatui::text::Span::raw("s: Start all | ")); - help_bar_text.push(ratatui::text::Span::raw("z: Stop all | ")); - - help_bar_text.push(ratatui::text::Span::raw("↑/↓: Scroll | ")); - help_bar_text.push(ratatui::text::Span::raw("PgUp/PgDn Scroll ")); - - - if Page::Logs == app_state.current_page { - help_bar_text.push(ratatui::text::Span::raw("c: Clear | ")); - help_bar_text.push(ratatui::text::Span::raw("tab: toggle page ")); - if app_state.vertical_scroll.is_some() { - help_bar_text.push(ratatui::text::Span::raw("| enter: Tail log ")); - } - } else { - help_bar_text.push(ratatui::text::Span::raw("| tab: toggle page")); - } - - - // // DEBUG - // help_bar_text.push(ratatui::text::Span::raw(format!("| DBG: {}", - // app_state.dbg - // ))); - let current_version = self_update::cargo_crate_version!(); - let help_bar = Paragraph::new(Line::from(help_bar_text)) - .style(Style::default().fg(Color::DarkGray)) - .alignment(Alignment::Center) - .block(Block::default().borders(Borders::ALL) - .border_type(BorderType::Rounded) - .border_style(Style::default().fg(Color::DarkGray)) - .title(format!(" ODD-BOX v{current_version}")).title_style( - if is_dark_theme { - Style::default().fg(Color::LightYellow) - } else { - Style::default().fg(Color::Black) - } - )); - - f.render_widget(help_bar, help_bar_chunk[1]); - - -} - -fn wrap_string(input: &str, max_length: usize) -> Vec { - - let words = input.split_whitespace(); - let mut wrapped_lines = Vec::new(); - let mut current_line = String::new(); - - for word in words { - // Check if adding the next word exceeds the max_length - if current_line.len() + word.len() + 1 > max_length { - // Add the current line to the vector and start a new line - wrapped_lines.push(current_line); - current_line = String::new(); - } - - // If the line is not empty, add a space before the next word - if !current_line.is_empty() { - current_line.push(' '); - } - - // Add the word to the current line - current_line.push_str(word); - } - - // Add the last line if it's not empty - if !current_line.is_empty() { - wrapped_lines.push(current_line); - } - - wrapped_lines +use axum_extra::handler::Or; +use crossterm::event::{KeyEvent, KeyModifiers}; +use ratatui::layout::{Alignment, Margin}; +use ratatui::style::{Color, Modifier, Style }; +use ratatui::text::Line; +use ratatui::widgets::{BorderType, List, ListItem }; +use tokio::sync::RwLockWriteGuard; +use tokio::task; +use tracing_subscriber::EnvFilter; +use tracing_subscriber::layer::SubscriberExt; +use std::io::Stdout; +use crate::global_state::GlobalState; +use crate::logging::SharedLogBuffer; +use crate::logging::LogMsg; +use crate::types::app_state::{self, *}; +use crate::types::tui_state::{Page, TuiState}; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::Duration; +use crate::http_proxy::ProcMessage; + +use serde::ser::SerializeStruct; + +mod connections_widget; +mod logs_widget; +mod stats_widget; +mod threads_widget; +pub mod scroll_state_wrapper; + +use ratatui::{ + layout::{Constraint, Direction, Layout}, + widgets::{Block, Borders, Paragraph} +}; + +use crossterm::{ + event::{self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode}, + execute, + terminal::{ + disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, +}; + +use ratatui::{ + backend::CrosstermBackend, + Terminal, +}; + +impl serde::Serialize for LogMsg { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer { + let mut s = serializer.serialize_struct("LogMsg", 4)?; + s.serialize_field("msg", &self.msg)?; + s.serialize_field("lvl", &self.lvl.as_str())?; + s.serialize_field("src", &self.src)?; + s.serialize_field("thread", &self.thread.as_ref().unwrap_or(&"".to_string()))?; + s.end() + + } +} + + +pub fn init() { + _ = enable_raw_mode().expect("must be able to enable raw mode");(); + execute!(std::io::stdout(), EnterAlternateScreen, EnableMouseCapture).expect("must always be able to enter alt screen"); +} + +pub async fn run( + global_state: Arc, + tx: tokio::sync::broadcast::Sender, + trace_msg_broadcaster: tokio::sync::broadcast::Sender, + reloadable_filter : tracing_subscriber::reload::Layer>, +) { + + + let log_buffer = Arc::new(Mutex::new(SharedLogBuffer::new())); + let layer = crate::logging::TuiLoggerLayer { log_buffer: log_buffer.clone(), broadcaster: trace_msg_broadcaster }; + + let subscriber = tracing_subscriber::registry() + .with(layer).with(reloadable_filter); + + tracing::subscriber::set_global_default(subscriber).expect("Failed to set collector"); + + + let backend = CrosstermBackend::new(std::io::stdout()); + + let terminal = Terminal::new(backend).expect("must be possible to create terminal"); + + let terminal = Arc::new(tokio::sync::Mutex::new(terminal)); + + + let mut manually_selected_theme : Option = None; + + let dark_style = dark_theme(); + let light_style = light_theme(); + + let mut theme = match dark_light::detect() { + dark_light::Mode::Dark => Theme::Dark(dark_style), + dark_light::Mode::Light => Theme::Light(light_style), + dark_light::Mode::Default => Theme::Dark(dark_style), + }; + + let mut count = 0; + + let disabled_items : Vec = global_state.config.read().await.hosted_process.clone().unwrap_or_default().iter_mut().filter_map( |x| + if x.auto_start.unwrap_or_default() { + Some(x.host_name.clone()) + } else { + None + } + ).collect(); + + + // TUI event loop + let tui_handle = { + let terminal = Arc::clone(&terminal); + let state = global_state.clone(); + let tx = tx.clone(); + task::spawn(async move { + + let tx = tx.clone(); + + let mut last_key_time = tokio::time::Instant::now(); + let debounce_duration = Duration::from_millis(100); + + let mut tui_state = crate::types::tui_state::TuiState::new(); + + loop { + + { + + if global_state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) == true { + if global_state.app_state.site_status_map.iter().find(|x| + x.value() == &ProcState::Stopping + || x.value() == &ProcState::Running + || x.value() == &ProcState::Starting + + ).is_none() { + break; // nothing is running,stopping or starting.. we can exit now + } + } + } + + if count > 100 { + theme = match dark_light::detect() { + dark_light::Mode::Dark => Theme::Dark(dark_style), + dark_light::Mode::Light => Theme::Light(light_style), + dark_light::Mode::Default => Theme::Dark(dark_style), + }; + count = 0; + + } + + // KEEP LOCK SHORT TO AVOID DEADLOCK + { + let mut terminal = terminal.lock().await; + + terminal.draw(|f| + draw_ui::>( + f, + global_state.clone(), + &mut tui_state, + &log_buffer,&theme + ) + )?; + } + + // } + + + // Handle input + if event::poll(std::time::Duration::from_millis(20))? { + let now = tokio::time::Instant::now(); + let time_since_last_keypress = now.duration_since(last_key_time); + let (current_page,sites_open) = { + (tui_state.current_page.clone(),tui_state.show_apps_window) + }; + let evt = event::read()?; + match evt { + Event::Key(KeyEvent { + code: crossterm::event::KeyCode::Char('c'), + modifiers: KeyModifiers::CONTROL, + kind: _, + state:_ + }) => { + state.app_state.exit.store(true, std::sync::atomic::Ordering::SeqCst); + break; + }, + Event::Mouse(mouse) => { + if sites_open { + match mouse.kind { + event::MouseEventKind::Moved => { + tui_state.sites_handle_mouse_hover(mouse.column,mouse.row); + } + event::MouseEventKind::Down(event::MouseButton::Left) => { + tui_state.sites_handle_mouse_click(mouse.column,mouse.row,tx.clone(), &state.app_state.site_status_map) + } + _ => {} + } + } + match current_page { + Page::Statistics => { + + } + Page::Logs => { + match mouse.kind { + event::MouseEventKind::Drag(event::MouseButton::Left) => { + tui_state.log_tab_stage.scroll_state.handle_mouse_drag(mouse.column,mouse.row); + } + event::MouseEventKind::Moved => { + tui_state.log_tab_stage.scroll_state.handle_mouse_move(mouse.column,mouse.row); + } + + event::MouseEventKind::ScrollDown => { + tui_state.log_tab_stage.scroll_state.scroll_down(Some(10)); + }, + event::MouseEventKind::ScrollUp => { + tui_state.log_tab_stage.scroll_state.scroll_up(Some(10)); + }, + _ => {} + } + }, + Page::Threads => { + match mouse.kind { + + event::MouseEventKind::Drag(event::MouseButton::Left) => { + tui_state.threads_tab_state.scroll_state.handle_mouse_drag(mouse.column,mouse.row); + } + event::MouseEventKind::Moved => { + tui_state.threads_tab_state.scroll_state.handle_mouse_move(mouse.column,mouse.row); + } + event::MouseEventKind::ScrollDown => { + tui_state.threads_tab_state.scroll_state.scroll_down(Some(10)); + }, + event::MouseEventKind::ScrollUp => { + tui_state.threads_tab_state.scroll_state.scroll_up(Some(10)); + }, + _ => {} + } + }, + Page::Connections => { + match mouse.kind { + event::MouseEventKind::ScrollDown => { + tui_state.connections_tab_state.scroll_state.scroll_down(Some(10)); + }, + event::MouseEventKind::ScrollUp => { + tui_state.connections_tab_state.scroll_state.scroll_up(Some(10)); + }, + _ => {} + } + }, + } + + } + Event::Key(key) => { + + if time_since_last_keypress >= debounce_duration { + last_key_time = now; + match key.code { + KeyCode::Esc | KeyCode::Char('q') => { + { + tracing::warn!("User requested exit"); + state.app_state.exit.store(true, std::sync::atomic::Ordering::SeqCst); + break; + } + + }, + KeyCode::Char('t') => { + match manually_selected_theme { + None => { + // when switching from auto to manual theme, we will switch to the opposite of the current theme + match theme { + Theme::Light(_) => { + manually_selected_theme = Some(dark_light::Mode::Dark); + theme = Theme::Dark(dark_style); + }, + Theme::Dark(_) => { + manually_selected_theme = Some(dark_light::Mode::Light); + theme = Theme::Light(light_style); + }, + } + } + Some(dark_light::Mode::Dark) => { + manually_selected_theme = Some(dark_light::Mode::Light); + theme = Theme::Light(light_style); + }, + Some(dark_light::Mode::Light) => { + manually_selected_theme = Some(dark_light::Mode::Dark); + theme = Theme::Dark(dark_style); + }, + _ => {}, + }; + }, + _ => {} + } + match current_page { + Page::Threads => { + match key.code { + KeyCode::PageUp => { + tui_state.threads_tab_state.scroll_state.scroll_up(Some(10)); + } + KeyCode::PageDown => { + tui_state.threads_tab_state.scroll_state.scroll_down(Some(10)) + + }, + KeyCode::Up => { + tui_state.threads_tab_state.scroll_state.scroll_up(None) + } + KeyCode::Down => { + tui_state.threads_tab_state.scroll_state.scroll_down(None) + } + _ => {} + } + }, + Page::Logs => { + match key.code { + KeyCode::Enter => { + tui_state.log_tab_stage.scroll_state.vertical_scroll = None; + let mut buf = log_buffer.lock().expect("must always be able to lock log buffer"); + match buf.limit { + Some(x) => { + while buf.logs.len() > x { + buf.logs.pop_front(); + } + }, + None => {}, + } + + } + KeyCode::Up => { + tui_state.log_tab_stage.scroll_state.scroll_up(Some(1)); + } + KeyCode::Down => { + tui_state.log_tab_stage.scroll_state.scroll_down(Some(1)); + } + KeyCode::PageUp => { + let scroll_count = tui_state.log_tab_stage.scroll_state.area_height.saturating_div(2); + if scroll_count > 0 { + tui_state.log_tab_stage.scroll_state.scroll_up(Some(scroll_count)); + } + } + KeyCode::PageDown => { + let scroll_count = tui_state.log_tab_stage.scroll_state.area_height.saturating_div(2); + if scroll_count > 0 { + tui_state.log_tab_stage.scroll_state.scroll_down(Some(scroll_count)); + } + }, + KeyCode::Char('c') => { + let mut buf = log_buffer.lock().expect("must always be able to lock log buffer"); + tui_state.log_tab_stage.scroll_state.total_rows = 0; + tui_state.log_tab_stage.scroll_state.vertical_scroll = None; + tui_state.log_tab_stage.scroll_state.vertical_scroll_state = tui_state.log_tab_stage.scroll_state.vertical_scroll_state.position(0); + buf.logs.clear(); + + } + _ => {} + } + } + Page::Statistics => {}, + Page::Connections => { + match key.code { + KeyCode::PageUp => { + tui_state.connections_tab_state.scroll_state.scroll_up(Some(10)); + } + KeyCode::PageDown => { + tui_state.connections_tab_state.scroll_state.scroll_down(Some(10)); + + }, + KeyCode::Up => { + let scroll_count = tui_state.connections_tab_state.scroll_state.area_height.saturating_div(2); + if scroll_count > 0 { + tui_state.connections_tab_state.scroll_state.scroll_up(None); + } + } + KeyCode::Down => { + tui_state.connections_tab_state.scroll_state.scroll_down(None); + } + _ => {} + } + }, + } + + match key.code { + + KeyCode::Char('1') => { + tui_state.current_page = Page::Logs; + } + KeyCode::Char('2') => { + tui_state.current_page = Page::Connections; + } + KeyCode::Char('3') => { + tui_state.current_page = Page::Statistics; + } + KeyCode::Char('4') => { + tui_state.current_page = Page::Threads; + } + KeyCode::BackTab | KeyCode::Left => { + match tui_state.current_page { + Page::Logs => tui_state.current_page = Page::Threads, + Page::Threads => tui_state.current_page = Page::Statistics, + Page::Statistics => tui_state.current_page = Page::Connections, + Page::Connections => tui_state.current_page = Page::Logs + } + } + KeyCode::Tab | KeyCode::Right => { + match tui_state.current_page { + Page::Logs => tui_state.current_page = Page::Connections, + Page::Connections => tui_state.current_page = Page::Statistics, + Page::Statistics => tui_state.current_page = Page::Threads, + Page::Threads => tui_state.current_page = Page::Logs + } + } + KeyCode::Char('z') => { + { + for mut guard in global_state.app_state.site_status_map.iter_mut() { + let (_k,state) = guard.pair_mut(); + if let ProcState::Running = state { + *state = ProcState::Stopping; + } + } + } + tx.clone().send(ProcMessage::StopAll).expect("must always be able to send internal messages"); + } + KeyCode::Char('s') => { + { + for mut guard in global_state.app_state.site_status_map.iter_mut() { + let (_k,state) = guard.pair_mut(); + if let ProcState::Running = state { + *state = ProcState::Starting; + } + } + } + tx.clone().send(ProcMessage::StartAll).expect("must always be able to send internal messages"); + } + KeyCode::Char('a') => { + tui_state.show_apps_window = !tui_state.show_apps_window; + } + + _ => { + + } + } + + + + } + + }, + _=> {} + } + + } + } + Result::<(), std::io::Error>::Ok(()) + }) + + }; + + _ = tui_handle.await.ok(); + + _ = disable_raw_mode().ok(); + let mut stdout = std::io::stdout(); + execute!(stdout, LeaveAlternateScreen, DisableMouseCapture).expect("should always be possible to leave tui"); + +} + + + +/// Returns a `Style` configured for a dark theme. +fn dark_theme() -> Style { + Style::default() + .fg(Color::White) // Text color + //.bg(Color::Black) // Background color + .add_modifier(Modifier::BOLD) // Text modifier +} + +/// Returns a `Style` configured for a light theme. +fn light_theme() -> Style { + Style::default() + .fg(Color::Black) // Text color + // .bg(Color::White) // Background color + .add_modifier(Modifier::ITALIC) // Text modifier +} + +#[derive(Clone)] +pub enum Theme { + Light(Style), + Dark(Style) +} + +fn draw_ui( + f: &mut ratatui::Frame, + global_state: Arc, + tui_state: &mut TuiState, + log_buffer: &Arc>, + theme: &Theme +) { + + + let is_dark_theme = matches!(&theme,Theme::Dark(_)); + let theme_style = match theme { + Theme::Light(s) => s, + Theme::Dark(s) => s + }; + + + let size = f.size(); + if size.height < 10 || size.width < 10 { + return + } + + let help_bar_height = 3 as u16; + + let constraints = if tui_state.show_apps_window { + vec![ + Constraint::Percentage(70), // MAIN SECTION + Constraint::Min(0), // SITES SECTION + Constraint::Length(help_bar_height), // QUICK BAR + ] + } else { + vec![ + Constraint::Min(1), // MAIN SECTION + Constraint::Max(0), + Constraint::Length(help_bar_height), // QUICK BAR + ] + }; + + let vertical = Layout::vertical(constraints); + let [top_area, mid_area, bot_area] = vertical.areas(size); + + //et x = format!("Logs {:?}",app_state.vertical_scroll); + + let main_area = Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Min(0) + ]) + .split(top_area.clone()); + + // let totrows = app_state.traffic_tab_state.total_rows; + // let traheight = size.height; + + let tabs = ratatui::widgets::Tabs::new( + vec![ + "[1] Logs", + "[2] Connections", + "[3] Stats", + "[4] Threads" + ]).highlight_style( + if is_dark_theme { + Style::new().fg(Color::Cyan) + } else { + Style::new().fg(Color::LightRed) + } + ) + .select(match tui_state.current_page { + Page::Logs => 0, + Page::Connections => 1, + Page::Statistics => 2, + Page::Threads => 3 + }) + + .divider(ratatui::text::Span::raw("|")); + + let frame_margin = &Margin { horizontal: 1, vertical: 1 }; + + match tui_state.current_page { + Page::Logs => logs_widget::draw(f,global_state.clone(),tui_state,log_buffer,main_area[0].inner(frame_margin),&theme), + Page::Statistics => stats_widget::draw(f,global_state.clone(),tui_state,main_area[0].inner(frame_margin),&theme), + Page::Connections => connections_widget::draw(f,global_state.clone(),tui_state,main_area[0].inner(frame_margin),&theme), + Page::Threads => threads_widget::draw(f,global_state.clone(),tui_state,main_area[0].inner(frame_margin),&theme) + } + + let frame = + Block::new() + .border_style( + if matches!(&theme,Theme::Dark(_)) { + Style::new().fg(Color::DarkGray) + } else { + Style::new().fg(Color::DarkGray) + } + + ) + .border_type(BorderType::Rounded) + .borders(Borders::ALL); + + f.render_widget(frame, main_area[0]); + + + // render the tab bar on top of the tab content + f.render_widget(tabs, main_area[0].inner(&Margin { horizontal: 2, vertical: 0 })); + + if tui_state.show_apps_window { + + let sites_area_height = mid_area.height.saturating_sub(2); + if sites_area_height == 0 { + return + } + let sites_count = global_state.app_state.site_status_map.iter().count(); + let columns_needed = ((sites_count as f32 / sites_area_height as f32).ceil()).max(1.0) as usize; + + let site_columns = Layout::default() + .direction(Direction::Horizontal) + .flex(ratatui::layout::Flex::Legacy) + .constraints(vec![Constraint::Percentage(100 / columns_needed as u16); columns_needed]) + .split(mid_area); + + let mut site_rects = vec![]; + + for (col_idx, col) in site_columns.iter().enumerate() { + + let mut procly : Vec<(String, ProcState)> = global_state.app_state.site_status_map.iter() + .map(|x|{ + let (a,b) = x.pair(); + (a.to_string(),b.to_owned()) + }).collect(); + + // todo -- no clone + procly.sort_by_key(|k| k.0.clone()); + + let start_idx = col_idx * sites_area_height as usize; + let end_idx = ((col_idx + 1) * sites_area_height as usize).min(sites_count); + let items: Vec = procly[start_idx..end_idx].iter().enumerate().map(|(index,(id, state))| { + + let item_rect = ratatui::layout::Rect { + x: col.x, + y: col.y + index as u16 + 1, // Assuming each ListItem is one line high + width: col.width, + height: 1, // Assuming each ListItem is one line high + }; + + site_rects.push((item_rect,id.to_string())); + + let mut s = match state { + &ProcState::Running => Style::default().fg( + if is_dark_theme { + Color::LightGreen + } else { + Color::Green + } + ), + &ProcState::Faulty => Style::default().fg( + if is_dark_theme { + Color::Rgb(200, 150, 150) + } else { + Color::Rgb(200, 150, 150) + } + ), + &ProcState::Starting => Style::default().fg( + if is_dark_theme { + Color::Green + } else { + Color::Green + } + ), + &ProcState::Stopped => Style::default().fg( + if is_dark_theme{ + Color::DarkGray + } else { + Color::DarkGray + } + ), + &ProcState::Stopping => Style::default().fg( + if is_dark_theme { + Color::Black + } else { + Color::Yellow + } + ), + &ProcState::Remote => Style::default().fg( + if is_dark_theme { + Color::Blue + } else { + Color::Blue + } + ), + }; + + let mut id_style = theme_style.clone(); + if let Some(hovered) = &tui_state.currently_hovered_site { + if hovered == id { + id_style = id_style.add_modifier(Modifier::BOLD); + s = if is_dark_theme { s.bg(Color::Gray) } else { s.bg(Color::Gray) }; + } + } + + + let message = ratatui::text::Span::styled(format!(" {id} "),id_style); + + let status = match state { + &ProcState::Running => ratatui::text::Span::styled(format!("{:?}",state),s), + &ProcState::Faulty => ratatui::text::Span::styled(format!("{:?} (retrying in 5s)",state),s), + &ProcState::Starting => ratatui::text::Span::styled(format!("{:?}",state),s), + &ProcState::Stopped => ratatui::text::Span::styled(format!("{:?}",state),s), + &ProcState::Stopping => ratatui::text::Span::styled(format!("{:?}..",state),s), + &ProcState::Remote => ratatui::text::Span::styled(format!("{:?}",state),s) + }; + + ListItem::new(Line::from(vec![ + message, + status + ])) + + }).collect(); + + let sites_list = List::new(items) + .block( + Block::new() + .border_style(Style::default().fg(Color::DarkGray)) + .border_type(BorderType::Rounded) + .borders(Borders::ALL) + .title(" Sites ").title_alignment(Alignment::Left) + .title_style( + if is_dark_theme { + Style::default().fg(Color::Cyan) + } else { + Style::default().fg(Color::Blue) + } + ) + ) + .highlight_style(Style::default().add_modifier(Modifier::BOLD)) + .highlight_symbol(">> "); + + f.render_widget(sites_list, *col); + } + + tui_state.site_rects = site_rects; + } + + + + let help_bar_chunk = Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Min(0), + Constraint::Length(3) + ]) + .split(bot_area.clone()); + + + + let mut help_bar_text = vec![ + ratatui::text::Span::raw("q: Quit | "), + ratatui::text::Span::raw("a: Toggle Sites | "), + ]; + + + help_bar_text.push(ratatui::text::Span::raw("s: Start all | ")); + help_bar_text.push(ratatui::text::Span::raw("z: Stop all | ")); + + help_bar_text.push(ratatui::text::Span::raw("↑/↓: Scroll | ")); + help_bar_text.push(ratatui::text::Span::raw("PgUp/PgDn Scroll ")); + + + if Page::Logs == tui_state.current_page { + help_bar_text.push(ratatui::text::Span::raw("c: Clear | ")); + help_bar_text.push(ratatui::text::Span::raw("tab: toggle page ")); + if tui_state.log_tab_stage.scroll_state.vertical_scroll.is_some() { + help_bar_text.push(ratatui::text::Span::raw("| enter: Tail log ")); + } + } else { + help_bar_text.push(ratatui::text::Span::raw("| tab: toggle page")); + } + + + // // DEBUG + // help_bar_text.push(ratatui::text::Span::raw(format!("| DBG: {}", + // app_state.dbg + // ))); + let current_version = self_update::cargo_crate_version!(); + let help_bar = Paragraph::new(Line::from(help_bar_text)) + .style(Style::default().fg(Color::DarkGray)) + .alignment(Alignment::Center) + .block(Block::default().borders(Borders::ALL) + .border_type(BorderType::Rounded) + .border_style(Style::default().fg(Color::DarkGray)) + .title(format!(" ODD-BOX v{current_version}")).title_style( + if is_dark_theme { + Style::default().fg(Color::LightYellow) + } else { + Style::default().fg(Color::Black) + } + )); + + f.render_widget(help_bar, help_bar_chunk[1]); + + +} + +fn wrap_string(input: &str, max_length: usize) -> Vec { + + let words = input.split_whitespace(); + let mut wrapped_lines = Vec::new(); + let mut current_line = String::new(); + + for word in words { + // Check if adding the next word exceeds the max_length + if current_line.len() + word.len() + 1 > max_length { + // Add the current line to the vector and start a new line + wrapped_lines.push(current_line); + current_line = String::new(); + } + + // If the line is not empty, add a space before the next word + if !current_line.is_empty() { + current_line.push(' '); + } + + // Add the word to the current line + current_line.push_str(word); + } + + // Add the last line if it's not empty + if !current_line.is_empty() { + wrapped_lines.push(current_line); + } + + wrapped_lines } \ No newline at end of file diff --git a/src/tui/scroll_state_wrapper.rs b/src/tui/scroll_state_wrapper.rs new file mode 100644 index 0000000..28b0390 --- /dev/null +++ b/src/tui/scroll_state_wrapper.rs @@ -0,0 +1,105 @@ +use ratatui::widgets::ScrollbarState; + + +#[derive(Debug,Default)] +pub struct ScrollStateWrapper { + pub vertical_scroll_state: ScrollbarState, + pub horizontal_scroll_state: ScrollbarState, + pub vertical_scroll: Option, + pub horizontal_scroll: Option, + pub total_rows : usize, + pub visible_rows : usize, + pub area_height : usize, + pub area_width : usize, + pub scroll_bar_hovered : bool, + pub last_mouse_down_y_pos : usize +} + +impl ScrollStateWrapper { + + pub fn scroll_up(&mut self, count:Option) { + match self.vertical_scroll { + Some(current) if current > 0 => { + let new_val = current.saturating_sub(count.unwrap_or(1)).max(0); + self.vertical_scroll = Some(new_val); + self.vertical_scroll_state = self.vertical_scroll_state.position(new_val); + } + None => { + let max = self.total_rows.saturating_sub(self.area_height); + let new_val = max.saturating_sub(count.unwrap_or(1)); + self.vertical_scroll = Some(new_val); + self.vertical_scroll_state = self.vertical_scroll_state.position(new_val); + } + _ => {} + } + } + + pub fn calculate_thumb_size(&self) -> f32 { + if self.total_rows <= self.area_height { + // this is just if we dont need a scrollbar - in which case its just going to be hidden anyway + self.area_height as f32 + } else { + let thumb_size = (self.area_height as f64 / self.total_rows as f64) * self.area_height as f64; + thumb_size.ceil() as f32 + } + } + pub fn handle_mouse_move(&mut self, column: u16, row: u16) { + let thumb_size = self.calculate_thumb_size().max(1.0); + let max_scroll = self.total_rows.saturating_sub(self.area_height); + let vscroll = self.vertical_scroll.unwrap_or(max_scroll); + let thumb_position = if self.total_rows > self.area_height { + (vscroll as f32 / (self.total_rows as f32 - self.area_height as f32)) * (self.area_height as f32 - thumb_size) + } else { + 1.0 + }.max(1.0); + let horizontal_match = column as usize >= self.area_width - 1 && column as usize <= self.area_width + 1; + let vertical_match = (row as isize >= thumb_position as isize - 2) && row as usize <= (thumb_position + thumb_size + 1.0) as usize; + //self.dbg = format!("dragging pos: {row}/{column} - vscroll: {} - tpos: {thumb_position} | V: {vertical_match}, H: {horizontal_match}",vscroll); + self.scroll_bar_hovered = horizontal_match && vertical_match; + } + + + pub fn handle_mouse_drag(&mut self, _column: u16, row: u16) { + + if self.scroll_bar_hovered { + + let max_scroll = self.total_rows.saturating_sub(self.area_height); + + let click_position = (row as usize).min(self.area_height).max(0); + let percentage = click_position as f32 / self.area_height as f32; + let scroll_to = (percentage * self.total_rows as f32).round() as usize; + + let new_val = scroll_to.min(max_scroll); + + if new_val == max_scroll { + self.vertical_scroll = None; + self.vertical_scroll_state = self.vertical_scroll_state.position(new_val); + } else { + self.vertical_scroll = Some(new_val); + self.vertical_scroll_state = self.vertical_scroll_state.position(new_val); + } + + } else { + + self.last_mouse_down_y_pos = row as usize; + } + + + } + + pub fn scroll_down(&mut self, count: Option) { + + let max = self.total_rows.saturating_sub(self.area_height); + let current = self.vertical_scroll.unwrap_or(max); + + if current < max { + let new_val = current.saturating_add(count.unwrap_or(1)).min(max); + self.vertical_scroll = Some(new_val); + self.vertical_scroll_state = self.vertical_scroll_state.position(new_val); + } + else { + self.vertical_scroll = None; + } + } + +} diff --git a/src/tui/stats_widget.rs b/src/tui/stats_widget.rs new file mode 100644 index 0000000..4561007 --- /dev/null +++ b/src/tui/stats_widget.rs @@ -0,0 +1,30 @@ +use std::sync::Arc; + +use ratatui::layout::{Offset, Rect}; +use ratatui::style::{Color, Stylize}; +use ratatui::widgets::Paragraph; +use tokio::sync::RwLockWriteGuard; +use crate::global_state::GlobalState; +use crate::types::app_state::*; +use crate::types::tui_state::TuiState; +use super::Theme; + + + + +pub fn draw( + f: &mut ratatui::Frame, + global_state: Arc, + tui_state: &mut TuiState, + area: Rect, + _theme: &Theme +) { + + let total_received_tcp_connections = global_state.request_count.load(std::sync::atomic::Ordering::Relaxed); + + let p = Paragraph::new(format!("Total received TCP connections: {total_received_tcp_connections}")); + let p2 = Paragraph::new(format!("..More to come on this page at some point! :D")).fg(Color::DarkGray); + + f.render_widget(p, area.offset(Offset{x:4,y:2})); + f.render_widget(p2, area.offset(Offset{x:4,y:4})); +} diff --git a/src/tui/threads_widget.rs b/src/tui/threads_widget.rs new file mode 100644 index 0000000..9373543 --- /dev/null +++ b/src/tui/threads_widget.rs @@ -0,0 +1,129 @@ +use std::sync::Arc; + +use ratatui::layout::{ Constraint, Flex, Rect}; +use ratatui::style::{Color, Modifier, Style, Stylize}; +use ratatui::widgets::{ Cell, Row, Scrollbar, ScrollbarOrientation, Table}; +use tokio::sync::RwLockWriteGuard; +use crate::global_state::GlobalState; +use crate::types::app_state::*; +use crate::types::tui_state::TuiState; +use super::Theme; + + +pub fn draw( + f: &mut ratatui::Frame, + mut global_state: Arc, + tui_state: &mut TuiState, + area: Rect, + theme: &Theme +) { + + + let headers = [ "Site", "Source", "Target", "Description"]; + + let rows : Vec> = crate::THREAD_MAP.lock().unwrap().iter().map(|(thread_id, _thread_info)| { + let typ = "some thread"; + let description = format!("{}",typ); + vec![ + format!("{:?}",thread_id), + "something else".to_string(), + "more stuff".to_string(), + description + ] + }).collect(); + + let wrapped_line_count = rows.len(); + + tui_state.log_tab_stage.scroll_state.total_rows = wrapped_line_count; + + let height_of_logs_area = area.height.saturating_sub(0); // header and footer + tui_state.log_tab_stage.scroll_state.area_height = height_of_logs_area as usize; + tui_state.log_tab_stage.scroll_state.area_width = area.width as usize; + + let header_height = 1; + let visible_rows = area.height as usize - header_height; + + let start = tui_state.log_tab_stage.scroll_state.vertical_scroll.unwrap_or_default(); + let end = std::cmp::min(start + visible_rows, rows.len()); + + let is_dark_theme = matches!(&theme,Theme::Dark(_)); + + let display_rows = &rows[start..end]; + + let odd_row_bg = if is_dark_theme { Color::from_hsl(15.0, 10.0, 10.0) } else { + Color::Rgb(250,250,250) + }; + let row_bg = if is_dark_theme { Color::from_hsl(10.0, 10.0, 5.0) } else { + Color::Rgb(235,235,255) + }; + + + + let table_rows : Vec<_> = display_rows.iter().enumerate().map(|(i,row)| { + + let is_odd = i % 2 == 0; + + + Row::new(row.iter().map(|x|Cell::new(x.to_string()))).height(1 as u16) + .style( + Style::new() + .bg( + if is_odd { + odd_row_bg + } else { + row_bg + } + ).fg(if is_dark_theme { Color::White } else { Color::Black }) + ) + }).collect(); + + + tui_state.log_tab_stage.scroll_state.visible_rows = display_rows.iter().len() as usize; + tui_state.log_tab_stage.scroll_state.total_rows = rows.len(); + + let widths = [ + Constraint::Fill(1), + Constraint::Fill(1), + Constraint::Fill(2), + Constraint::Fill(4), + ]; + + + let headers = Row::new(headers + .iter() + .map(|&h| Cell::from(h).fg(if is_dark_theme {Color::LightGreen} else {Color::Blue}).underlined().add_modifier(Modifier::BOLD)) + ).height(1); + + + let table = Table::new(table_rows, widths.clone()) + .header(headers) + .highlight_style(Style::default().add_modifier(Modifier::BOLD)) + .widths(&widths) + .flex(Flex::Legacy) + .column_spacing(1); + + f.render_widget(table, area); + + + let mut scrollbar = Scrollbar::default() + .style(Style::default()) + .orientation(ScrollbarOrientation::VerticalRight) + .begin_symbol(Some("↑")) + .end_symbol(Some("↓")).thumb_style(Style::new().fg(Color::LightBlue)) + .orientation(ScrollbarOrientation::VerticalRight); + + let height_of_traf_area = area.height.saturating_sub(2); + tui_state.log_tab_stage.scroll_state.area_height = height_of_traf_area as usize; + + tui_state.log_tab_stage.scroll_state.vertical_scroll_state = tui_state.log_tab_stage.scroll_state.vertical_scroll_state.content_length(rows.len().saturating_sub(height_of_traf_area as usize)); + + if tui_state.log_tab_stage.scroll_state.scroll_bar_hovered { + scrollbar = scrollbar.thumb_style(Style::default().fg(Color::Yellow).bg(Color::Red)); + } + + let scrollbar_area = Rect::new(area.right() - 1, area.top(), 1, area.height); + + f.render_stateful_widget(scrollbar,scrollbar_area, &mut tui_state.log_tab_stage.scroll_state.vertical_scroll_state); + + +} diff --git a/src/types/app_state.rs b/src/types/app_state.rs index 844375f..4477176 100644 --- a/src/types/app_state.rs +++ b/src/types/app_state.rs @@ -1,9 +1,13 @@ use std::collections::HashMap; -use ratatui::widgets::ScrollbarState; +use std::default; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::AtomicUsize; use ratatui::prelude::Rect; use utoipa::ToSchema; -use crate::tui::Page; -use crate::tui::TrafficTabState; +use crate::types::tui_state::LogPageState; +use crate::types::tui_state::Page; +use crate::types::tui_state::ThreadsTabState; +use crate::types::tui_state::ConnectionsTabState; use std::sync::Arc; use crate::types::proxy_state::*; use ratatui::widgets::ListState; @@ -20,23 +24,10 @@ pub enum ProcState { } #[derive(Debug)] -pub (crate) struct AppState { - pub (crate) logs_scroll_bar_hovered : bool, - pub (crate) last_mouse_down_y_pos : usize, - pub (crate) dbg : String, - pub (crate) total_line_count: usize, - pub (crate) exit: bool, - pub (crate) site_states_map: HashMap, - pub (crate) vertical_scroll: Option, - pub (crate) scroll_state : ScrollbarState, - pub (crate) show_apps_window : bool, - pub (crate) logs_area_height:usize, - pub (crate) logs_area_width:usize, - pub (crate) site_rects: Vec<(Rect,String)>, - pub (crate) currently_hovered_site: Option, - pub (crate) current_page : Page, - pub (crate) traffic_tab_state: TrafficTabState, - pub (crate) statistics : Arc> +pub struct AppState { + pub exit: AtomicBool, + pub site_status_map: Arc>, + pub statistics : Arc, } impl AppState { @@ -45,28 +36,16 @@ impl AppState { let mut list_state = ListState::default(); list_state.select(Some(0)); let result = AppState { - logs_scroll_bar_hovered:false, - last_mouse_down_y_pos: 1, - dbg: String::new(), - statistics : Arc::new(std::sync::RwLock::new(ProxyStats { - received_tcp_connections: 0, - active_connections: HashMap::new() - })), - traffic_tab_state: TrafficTabState { - ..Default::default() - }, - current_page: Page::Logs, - currently_hovered_site: None, - site_rects: vec![], - total_line_count:0, - logs_area_height: 5, - logs_area_width: 5, - scroll_state: ScrollbarState::new(0), - vertical_scroll: None, - exit: false, + site_status_map: Arc::new(dashmap::DashMap::new()), + statistics : Arc::new(ProxyStats { + active_connections: dashmap::DashMap::new(), + hosted_process_stats: dashmap::DashMap::new(), + remote_targets_stats: dashmap::DashMap::new(), + total_request_count: AtomicUsize::new(0), + + }), + exit: AtomicBool::new(false), //view_mode: ViewMode::Console, - site_states_map: HashMap::::new(), - show_apps_window : true }; @@ -74,183 +53,5 @@ impl AppState { } - - pub fn sites_handle_mouse_click(&mut self, _column: u16, _row: u16,tx: tokio::sync::broadcast::Sender) { - - let selected_site = if let Some(x) = &self.currently_hovered_site {x} else { return }; - - let new_state : Option = { - - let (_,state) = if let Some(info) = self.site_states_map.iter_mut().find(|x|x.0==selected_site) {info} else {return}; - - match state { - ProcState::Faulty => { - *state = ProcState::Stopped; - Some(false) - }, - ProcState::Stopped => { - *state = ProcState::Starting; - Some(true) - } - ProcState::Running => { - *state = ProcState::Stopping; - Some(false) - } - _ => None - } - }; - - if let Some(s) = new_state { - if s { - tx.send(ProcMessage::Start(selected_site.to_owned())).expect("should always be able to send internal messages"); - } else { - tx.send(ProcMessage::Stop(selected_site.to_owned())).expect("should always be able to send internal messages"); - } - } - - } - - pub fn sites_handle_mouse_hover(&mut self, column: u16, row: u16) { - let mut highlight : Option = None; - for (rect,site) in &self.site_rects { - if rect.left() <= column && rect.right() >= column && row == rect.top() { - highlight = Some(site.to_string()); - break; - } - } - - self.currently_hovered_site = highlight; - } - - - pub fn calculate_thumb_size(&self) -> f32 { - if self.total_line_count <= self.logs_area_height { - // this is just if we dont need a scrollbar - in which case its just going to be hidden anyway - self.logs_area_height as f32 - } else { - let thumb_size = (self.logs_area_height as f64 / self.total_line_count as f64) * self.logs_area_height as f64; - thumb_size.ceil() as f32 - } - } - - pub fn logs_handle_mouse_move(&mut self, column: u16, row: u16) { - let thumb_size = self.calculate_thumb_size().max(1.0); - let max_scroll = self.total_line_count.saturating_sub(self.logs_area_height); - let vscroll = self.vertical_scroll.unwrap_or(max_scroll); - let thumb_position = if self.total_line_count > self.logs_area_height { - (vscroll as f32 / (self.total_line_count as f32 - self.logs_area_height as f32)) * (self.logs_area_height as f32 - thumb_size) - } else { - 1.0 - }.max(1.0); - let horizontal_match = column as usize >= self.logs_area_width - 1 && column as usize <= self.logs_area_width + 1; - let vertical_match = (row as isize >= thumb_position as isize - 2) && row as usize <= (thumb_position + thumb_size + 1.0) as usize; - //self.dbg = format!("dragging pos: {row}/{column} - vscroll: {} - tpos: {thumb_position} | V: {vertical_match}, H: {horizontal_match}",vscroll); - self.logs_scroll_bar_hovered = horizontal_match && vertical_match; - } - - pub fn logs_handle_mouse_scroll_drag(&mut self, _column: u16, row: u16) { - - if self.logs_scroll_bar_hovered { - - let max_scroll = self.total_line_count.saturating_sub(self.logs_area_height); - - let vscroll = self.vertical_scroll.unwrap_or(max_scroll); - - self.dbg = format!("WE ARE MOVING TO {} (from: {vscroll}, min: 1, max:{max_scroll}) - last_pos:{}",row,self.last_mouse_down_y_pos); - - let click_position = (row as usize).min(self.logs_area_height).max(0); - let percentage = click_position as f32 / self.logs_area_height as f32; - let scroll_to = (percentage * self.total_line_count as f32).round() as usize; - - let new_val = scroll_to.min(max_scroll); - if new_val == max_scroll { - self.vertical_scroll = None; - self.scroll_state = self.scroll_state.position(new_val); - } else { - self.vertical_scroll = Some(new_val); - self.scroll_state = self.scroll_state.position(new_val); - } - } else { - - self.last_mouse_down_y_pos = row as usize; - } - - - } - - - pub fn logs_tab_scroll_up(&mut self, count:Option) { - match self.vertical_scroll { - Some(current) if current > 0 => { - let new_val = current.saturating_sub(count.unwrap_or(1)).max(0); - self.vertical_scroll = Some(new_val); - self.scroll_state = self.scroll_state.position(new_val); - } - None => { - let max = self.total_line_count.saturating_sub(self.logs_area_height); - let new_val = max.saturating_sub(count.unwrap_or(1)); - self.vertical_scroll = Some(new_val); - self.scroll_state = self.scroll_state.position(new_val); - } - _ => {} - } - } - - // we usually only call this if app.logs_area_height.saturating_div(2) is greater than 0 - pub fn logs_tab_scroll_down(&mut self, count:Option) { - if self.vertical_scroll.is_some() { - let current = self.vertical_scroll.unwrap_or_default(); - let max = self.total_line_count.saturating_sub(self.logs_area_height).saturating_sub(1); - if current < max { - let new_val = current.saturating_add(count.unwrap_or(1)).min(max); - self.vertical_scroll = Some(new_val); - self.scroll_state = self.scroll_state.position(new_val); - - } - else { - self.vertical_scroll = None; - } - } - } - - - pub fn traf_tab_scroll_up(&mut self, count:Option) { - - self.traffic_tab_state.vertical_scroll = - self.traffic_tab_state.vertical_scroll.saturating_sub(count.unwrap_or(1)); - - self.traffic_tab_state.vertical_scroll_state = - self.traffic_tab_state.vertical_scroll_state.position(self.traffic_tab_state.vertical_scroll); - - } - - - pub fn traf_tab_scroll_down(&mut self, count: Option) { - - let current = self.traffic_tab_state.vertical_scroll; - let max = self.traffic_tab_state.total_rows.saturating_sub(self.traffic_tab_state.area_height); - if current < max { - let new_val = current.saturating_add(count.unwrap_or(1)).min(max); - self.traffic_tab_state.vertical_scroll = new_val; - self.traffic_tab_state.vertical_scroll_state = self.traffic_tab_state.vertical_scroll_state.position(new_val); - } - else { - self.traffic_tab_state.vertical_scroll = max; - } - } - - - - // fn toggle_view(&mut self) { - - - // self.view_mode = match self.view_mode { - // ViewMode::Console => { - // ViewMode::TUI - // }, - // ViewMode::TUI => { - // ViewMode::Console - // }, - // }; - // } } + diff --git a/src/types/mod.rs b/src/types/mod.rs index f86de39..bb68664 100644 --- a/src/types/mod.rs +++ b/src/types/mod.rs @@ -1,4 +1,5 @@ -pub (crate) mod custom_error; -pub (crate) mod statistics; -pub (crate) mod app_state; -pub (crate) mod proxy_state; \ No newline at end of file +pub mod custom_error; +pub mod statistics; +pub mod app_state; +pub mod proxy_state; +pub mod tui_state; \ No newline at end of file diff --git a/src/types/proxy_state.rs b/src/types/proxy_state.rs index 6baa457..404ed69 100644 --- a/src/types/proxy_state.rs +++ b/src/types/proxy_state.rs @@ -1,16 +1,17 @@ -use std::collections::HashMap; use std::net::SocketAddr; -use crate::tcp_proxy::ReverseTcpProxyTarget; +use std::sync::atomic::AtomicUsize; #[derive(Debug)] -pub (crate) struct ProxyStats { - pub (crate) received_tcp_connections : usize, - pub (crate) active_connections : HashMap +pub struct ProxyStats { + pub active_connections : dashmap::DashMap, + pub hosted_process_stats : dashmap::DashMap, + pub remote_targets_stats : dashmap::DashMap, + pub total_request_count : AtomicUsize } #[derive(Debug,Clone)] -pub (crate) enum ProxyActiveConnectionType { +pub enum ProxyActiveConnectionType { TcpTunnelUnencryptedHttp, TcpTunnelTls, TerminatingHttp { @@ -27,15 +28,15 @@ pub (crate) enum ProxyActiveConnectionType { } } -pub type ConnectionKey = (SocketAddr,uuid::Uuid); +pub type ConnectionKey = u64; #[derive(Debug,Clone)] #[allow(dead_code)] -pub (crate) struct ProxyActiveConnection { - pub (crate) target : ReverseTcpProxyTarget, - pub (crate) creation_time : chrono::DateTime, - pub (crate) description : Option, - pub (crate) connection_type : ProxyActiveConnectionType, - pub (crate) source_addr: SocketAddr, - pub (crate) target_addr: String +pub struct ProxyActiveConnection { + pub target_name : String, + pub creation_time : chrono::DateTime, + pub description : Option, + pub connection_type : ProxyActiveConnectionType, + pub source_addr: SocketAddr, + pub target_addr: String } diff --git a/src/types/tui_state.rs b/src/types/tui_state.rs new file mode 100644 index 0000000..403a869 --- /dev/null +++ b/src/types/tui_state.rs @@ -0,0 +1,118 @@ +use std::{default, sync::Arc}; + +use ratatui::layout::Rect; + +use crate::{http_proxy::ProcMessage, tui::scroll_state_wrapper::ScrollStateWrapper}; + +use super::app_state::ProcState; + + + +#[derive(Debug)] +pub struct TuiState { + pub site_rects: Vec<(Rect,String)>, + pub show_apps_window : bool, + pub currently_hovered_site: Option, + pub current_page : Page, + pub connections_tab_state: ConnectionsTabState, + pub threads_tab_state: ThreadsTabState, + pub log_tab_stage : LogPageState, +} +impl TuiState { + pub fn new() -> TuiState { + TuiState { + current_page: Page::Logs, + currently_hovered_site: None, + site_rects: Vec::new(), + show_apps_window : true, + connections_tab_state: default::Default::default(), + threads_tab_state: default::Default::default(), + log_tab_stage: default::Default::default(), + + } + } +} + + + +impl TuiState { + + + pub fn sites_handle_mouse_click(&mut self, _column: u16, _row: u16,tx: tokio::sync::broadcast::Sender, site_states_map: &Arc>) { + + let selected_site = if let Some(s) = &self.currently_hovered_site { s } else { return }; + + let new_state : Option = { + + let mut info = if let Some(v) = site_states_map.get_mut(selected_site) {v} else {return}; + let (_,state) = info.pair_mut(); + match state { + ProcState::Faulty => { + *state = ProcState::Stopped; + Some(false) + }, + ProcState::Stopped => { + *state = ProcState::Starting; + Some(true) + } + ProcState::Running => { + *state = ProcState::Stopping; + Some(false) + } + _ => None + } + }; + + if let Some(s) = new_state { + if s { + tx.send(ProcMessage::Start(selected_site.to_owned())).expect("should always be able to send internal messages"); + } else { + tx.send(ProcMessage::Stop(selected_site.to_owned())).expect("should always be able to send internal messages"); + } + } + + } + + pub fn sites_handle_mouse_hover(&mut self, column: u16, row: u16) { + let mut highlight : Option = None; + for (rect,site) in self.site_rects.iter() { + if rect.left() <= column && rect.right() >= column && row == rect.top() { + highlight = Some(site.to_string()); + break; + } + } + + self.currently_hovered_site = highlight; + } + +} + + + + +#[derive(Debug,Default)] +pub struct LogPageState { + pub scroll_state : ScrollStateWrapper +} + +#[derive(Debug,Default)] +pub struct ThreadsTabState { + pub test : String, + pub scroll_state : ScrollStateWrapper +} + +#[derive(Debug,Default)] +pub struct ConnectionsTabState { + pub test : String, + pub scroll_state : ScrollStateWrapper +} + + +#[derive(Clone,Debug,Eq,PartialEq)] +pub enum Page { + Logs, + Statistics, + Connections, + Threads +} + From 0eb6215d7a11c4332686bfba57afa933105b4c8b Mon Sep 17 00:00:00 2001 From: Olof Date: Fri, 30 Aug 2024 18:29:45 +0000 Subject: [PATCH 04/19] ugh --- src/main.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index 52c493c..32d3cb4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -12,10 +12,8 @@ use configuration::OddBoxConfiguration; use http_proxy::ProcMessage; use rustls::pki_types::{CertificateDer, PrivateKeyDer}; use self_update::cargo_crate_version; -use tokio::sync::RwLock; use tracing_subscriber::layer::SubscriberExt; use std::fmt::Debug; -use std::os::linux::raw::stat; use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; From 41818908acb8b2c078814cc583367d2ce064916b Mon Sep 17 00:00:00 2001 From: Olof Blomqvist Date: Fri, 30 Aug 2024 23:40:55 +0200 Subject: [PATCH 05/19] add nasm for windows builds --- .github/workflows/build_artifacts_without_release.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/build_artifacts_without_release.yml b/.github/workflows/build_artifacts_without_release.yml index 2f36ca4..a057812 100644 --- a/.github/workflows/build_artifacts_without_release.yml +++ b/.github/workflows/build_artifacts_without_release.yml @@ -55,6 +55,16 @@ jobs: uses: dtolnay/rust-toolchain@stable with: toolchain: nightly + + - name: Install NASM (Windows only) + if: startsWith(matrix.os, 'windows') + run: | + choco install nasm -y + + - name: Configure NASM for CMake (Windows only) + if: startsWith(matrix.os, 'windows') + run: | + $env:CMAKE_ASM_NASM_COMPILER = "nasm" - name: Build run: | From 45bb9bd32477c11fa37748ae58f9ce09e49ca492 Mon Sep 17 00:00:00 2001 From: Olof Blomqvist Date: Fri, 30 Aug 2024 23:48:32 +0200 Subject: [PATCH 06/19] Update build_artifacts_without_release.yml --- .github/workflows/build_artifacts_without_release.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build_artifacts_without_release.yml b/.github/workflows/build_artifacts_without_release.yml index a057812..9eb0d0d 100644 --- a/.github/workflows/build_artifacts_without_release.yml +++ b/.github/workflows/build_artifacts_without_release.yml @@ -60,7 +60,14 @@ jobs: if: startsWith(matrix.os, 'windows') run: | choco install nasm -y - + # Add NASM to PATH + $nasmPath = "C:\ProgramData\chocolatey\lib\nasm\tools\nasm" + if (Test-Path $nasmPath) { + $env:Path += ";$nasmPath" + } else { + Write-Error "NASM path not found!" + } + - name: Configure NASM for CMake (Windows only) if: startsWith(matrix.os, 'windows') run: | From c6b9db0ff1137e1fc9c0d06b85c80fc6953aba5d Mon Sep 17 00:00:00 2001 From: Olof Blomqvist Date: Fri, 30 Aug 2024 23:52:31 +0200 Subject: [PATCH 07/19] Update build_artifacts_without_release.yml --- .github/workflows/build_artifacts_without_release.yml | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build_artifacts_without_release.yml b/.github/workflows/build_artifacts_without_release.yml index 9eb0d0d..486d2bf 100644 --- a/.github/workflows/build_artifacts_without_release.yml +++ b/.github/workflows/build_artifacts_without_release.yml @@ -60,13 +60,8 @@ jobs: if: startsWith(matrix.os, 'windows') run: | choco install nasm -y - # Add NASM to PATH - $nasmPath = "C:\ProgramData\chocolatey\lib\nasm\tools\nasm" - if (Test-Path $nasmPath) { - $env:Path += ";$nasmPath" - } else { - Write-Error "NASM path not found!" - } + # Add Chocolatey bin directory to PATH + $env:Path += ";C:\ProgramData\chocolatey\bin" - name: Configure NASM for CMake (Windows only) if: startsWith(matrix.os, 'windows') From 2b039fb247a9d8b88ea341ac8fc97a855937a698 Mon Sep 17 00:00:00 2001 From: Olof Blomqvist Date: Sat, 31 Aug 2024 00:05:56 +0200 Subject: [PATCH 08/19] Update build_artifacts_without_release.yml --- .../build_artifacts_without_release.yml | 24 +++++++++++++++---- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build_artifacts_without_release.yml b/.github/workflows/build_artifacts_without_release.yml index 486d2bf..f2504bf 100644 --- a/.github/workflows/build_artifacts_without_release.yml +++ b/.github/workflows/build_artifacts_without_release.yml @@ -56,13 +56,27 @@ jobs: with: toolchain: nightly - - name: Install NASM (Windows only) + # Step to install NASM on Windows + - name: Install NASM on Windows + if: startsWith(matrix.os, 'windows') + uses: jberezanski/chocolatey-setup@v1 + with: + package-list: 'nasm' + + # Step to check if NASM is installed and in PATH + - name: Verify NASM Installation (Windows only) if: startsWith(matrix.os, 'windows') run: | - choco install nasm -y - # Add Chocolatey bin directory to PATH - $env:Path += ";C:\ProgramData\chocolatey\bin" - + echo "Checking NASM version..." + if (!(Get-Command nasm -ErrorAction SilentlyContinue)) { + Write-Host "NASM not found in PATH. Installation might have failed." + exit 1 + } else { + nasm -v + } + shell: pwsh + + # Step to configure NASM for CMake (if required) - name: Configure NASM for CMake (Windows only) if: startsWith(matrix.os, 'windows') run: | From 68e50306537791179c79928ef5f9b6afd9055415 Mon Sep 17 00:00:00 2001 From: Olof Blomqvist Date: Sat, 31 Aug 2024 00:09:59 +0200 Subject: [PATCH 09/19] Update build_artifacts_without_release.yml --- .../build_artifacts_without_release.yml | 35 +++---------------- 1 file changed, 4 insertions(+), 31 deletions(-) diff --git a/.github/workflows/build_artifacts_without_release.yml b/.github/workflows/build_artifacts_without_release.yml index f2504bf..1338cd6 100644 --- a/.github/workflows/build_artifacts_without_release.yml +++ b/.github/workflows/build_artifacts_without_release.yml @@ -55,43 +55,16 @@ jobs: uses: dtolnay/rust-toolchain@stable with: toolchain: nightly - - # Step to install NASM on Windows - - name: Install NASM on Windows + + - name: Install NASM if: startsWith(matrix.os, 'windows') - uses: jberezanski/chocolatey-setup@v1 - with: - package-list: 'nasm' - - # Step to check if NASM is installed and in PATH - - name: Verify NASM Installation (Windows only) - if: startsWith(matrix.os, 'windows') - run: | - echo "Checking NASM version..." - if (!(Get-Command nasm -ErrorAction SilentlyContinue)) { - Write-Host "NASM not found in PATH. Installation might have failed." - exit 1 - } else { - nasm -v - } - shell: pwsh - - # Step to configure NASM for CMake (if required) + uses: ilammy/setup-nasm@v1 + - name: Configure NASM for CMake (Windows only) if: startsWith(matrix.os, 'windows') run: | $env:CMAKE_ASM_NASM_COMPILER = "nasm" - - - name: Build - run: | - if ("${{ matrix.no_default_features }}" -eq "true") { - cargo build --release --verbose --no-default-features - } else { - cargo build --release --verbose - } - shell: pwsh - - name: Rename Artifact run: mv ${{ matrix.artifact_path }} ${{ matrix.artifact_name }} From 16ae58854f845f6b52214e56193f5dbd2ed07d06 Mon Sep 17 00:00:00 2001 From: Olof Blomqvist Date: Sat, 31 Aug 2024 00:14:13 +0200 Subject: [PATCH 10/19] Update build_artifacts_without_release.yml --- .github/workflows/build_artifacts_without_release.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/build_artifacts_without_release.yml b/.github/workflows/build_artifacts_without_release.yml index 1338cd6..e721c41 100644 --- a/.github/workflows/build_artifacts_without_release.yml +++ b/.github/workflows/build_artifacts_without_release.yml @@ -65,6 +65,15 @@ jobs: run: | $env:CMAKE_ASM_NASM_COMPILER = "nasm" + - name: Build + run: | + if ("${{ matrix.no_default_features }}" -eq "true") { + cargo build --release --verbose --no-default-features + } else { + cargo build --release --verbose + } + shell: pwsh + - name: Rename Artifact run: mv ${{ matrix.artifact_path }} ${{ matrix.artifact_name }} From 88b00976ab2071ecffca65e94c5ae894c69be035 Mon Sep 17 00:00:00 2001 From: Olof Date: Sun, 1 Sep 2024 21:12:01 +0000 Subject: [PATCH 11/19] switch to semaphore and fix https listener --- Cargo.lock | 600 +++++++++++--------------------- Cargo.toml | 14 +- odd-box.toml | 6 +- src/configuration/mod.rs | 40 ++- src/http_proxy/service.rs | 93 ++--- src/http_proxy/utils.rs | 10 +- src/http_proxy/websockets.rs | 5 +- src/logging.rs | 1 + src/main.rs | 147 ++++---- src/proc_host.rs | 540 ++++++++++++++-------------- src/proxy.rs | 96 ++--- src/tcp_proxy/tcp.rs | 110 +++--- src/tui/logs_widget.rs | 141 ++++---- src/tui/mod.rs | 104 +++--- src/tui/scroll_state_wrapper.rs | 2 +- src/tui/stats_widget.rs | 4 +- src/tui/threads_widget.rs | 28 +- 17 files changed, 891 insertions(+), 1050 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 14d4247..5c09aaa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -355,10 +355,10 @@ dependencies = [ "base64 0.21.7", "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", - "hyper 1.4.1", + "hyper", "hyper-util", "itoa", "matchit", @@ -390,8 +390,8 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "mime", "pin-project-lite", @@ -413,8 +413,8 @@ dependencies = [ "bytes", "futures-util", "headers", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "mime", "pin-project-lite", @@ -461,7 +461,7 @@ dependencies = [ "bitflags 2.5.0", "cexpr", "clang-sys", - "itertools", + "itertools 0.12.1", "lazy_static", "lazycell", "log 0.4.21", @@ -538,9 +538,9 @@ checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" [[package]] name = "castaway" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a17ed5635fc8536268e5d4de1e22e81ac34419e5f052d4d51f4e01dcc263fcc" +checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" dependencies = [ "rustversion", ] @@ -587,7 +587,7 @@ dependencies = [ "js-sys", "num-traits", "wasm-bindgen", - "windows-targets 0.52.4", + "windows-targets 0.52.6", ] [[package]] @@ -658,13 +658,14 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "compact_str" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" +checksum = "6050c3a16ddab2e412160b31f2c871015704239bca62f72f6e5f0be631d3f644" dependencies = [ "castaway", "cfg-if", "itoa", + "rustversion", "ryu", "static_assertions", ] @@ -731,22 +732,6 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" -[[package]] -name = "crossterm" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" -dependencies = [ - "bitflags 2.5.0", - "crossterm_winapi", - "libc", - "mio 0.8.11", - "parking_lot", - "signal-hook", - "signal-hook-mio", - "winapi", -] - [[package]] name = "crossterm" version = "0.28.1" @@ -755,7 +740,7 @@ checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ "bitflags 2.5.0", "crossterm_winapi", - "mio 1.0.2", + "mio", "parking_lot", "rustix 0.38.34", "signal-hook", @@ -967,18 +952,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "enum-as-inner" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" -dependencies = [ - "heck 0.4.1", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "enum-as-inner" version = "0.6.0" @@ -1271,25 +1244,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" -[[package]] -name = "h2" -version = "0.3.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fbd2820c5e49886948654ab546d0688ff24530286bdcf8fca3cefb16d4618eb" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.12", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - [[package]] name = "h2" version = "0.4.3" @@ -1301,7 +1255,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 1.1.0", + "http", "indexmap", "slab", "tokio", @@ -1337,7 +1291,7 @@ dependencies = [ "base64 0.21.7", "bytes", "headers-core", - "http 1.1.0", + "http", "httpdate", "mime", "sha1", @@ -1349,7 +1303,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" dependencies = [ - "http 1.1.0", + "http", ] [[package]] @@ -1385,7 +1339,7 @@ dependencies = [ "async-trait", "cfg-if", "data-encoding", - "enum-as-inner 0.6.0", + "enum-as-inner", "futures-channel", "futures-io", "futures-util", @@ -1437,17 +1391,6 @@ dependencies = [ "log 0.3.9", ] -[[package]] -name = "http" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - [[package]] name = "http" version = "1.1.0" @@ -1459,17 +1402,6 @@ dependencies = [ "itoa", ] -[[package]] -name = "http-body" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" -dependencies = [ - "bytes", - "http 0.2.12", - "pin-project-lite", -] - [[package]] name = "http-body" version = "1.0.1" @@ -1477,7 +1409,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.1.0", + "http", ] [[package]] @@ -1488,8 +1420,8 @@ checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" dependencies = [ "bytes", "futures-core", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "pin-project-lite", ] @@ -1511,30 +1443,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" -[[package]] -name = "hyper" -version = "0.14.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.25", - "http 0.2.12", - "http-body 0.4.6", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2 0.5.6", - "tokio", - "tower-service", - "tracing", - "want", -] - [[package]] name = "hyper" version = "1.4.1" @@ -1544,9 +1452,9 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.3", - "http 1.1.0", - "http-body 1.0.1", + "h2", + "http", + "http-body", "httparse", "httpdate", "itoa", @@ -1567,20 +1475,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-rustls" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" -dependencies = [ - "http 0.2.12", - "hyper 0.14.28", - "rustls 0.20.9", - "tokio", - "tokio-rustls 0.23.4", - "webpki-roots 0.22.6", -] - [[package]] name = "hyper-rustls" version = "0.26.0" @@ -1588,8 +1482,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ "futures-util", - "http 1.1.0", - "hyper 1.4.1", + "http", + "hyper", "hyper-util", "rustls 0.22.2", "rustls-pki-types", @@ -1605,8 +1499,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", - "http 1.1.0", - "hyper 1.4.1", + "http", + "hyper", "hyper-util", "log 0.4.21", "rustls 0.23.12", @@ -1625,7 +1519,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.4.1", + "hyper", "hyper-util", "native-tls", "tokio", @@ -1633,18 +1527,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-trust-dns" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0deaf08b5c5409c0c74011f696a82bdadae4c6d70b7a71edf8378b29bdd840bd" -dependencies = [ - "hyper 0.14.28", - "hyper-rustls 0.23.2", - "tokio", - "trust-dns-resolver", -] - [[package]] name = "hyper-tungstenite" version = "0.14.0" @@ -1652,7 +1534,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69ce21dae6ce6e5f336a444d846e592faf42c5c28f70a5c8ff67893cbcb304d3" dependencies = [ "http-body-util", - "hyper 1.4.1", + "hyper", "hyper-util", "pin-project-lite", "tokio", @@ -1669,9 +1551,9 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.1.0", - "http-body 1.0.1", - "hyper 1.4.1", + "http", + "http-body", + "hyper", "pin-project-lite", "socket2 0.5.6", "tokio", @@ -1691,7 +1573,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows-core 0.52.0", ] [[package]] @@ -1703,17 +1585,6 @@ dependencies = [ "cc", ] -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "0.4.0" @@ -1759,10 +1630,14 @@ dependencies = [ ] [[package]] -name = "indoc" -version = "2.0.5" +name = "instability" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" +checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" +dependencies = [ + "quote", + "syn 2.0.55", +] [[package]] name = "instant" @@ -1799,6 +1674,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.10" @@ -1848,7 +1732,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.4", + "windows-targets 0.52.6", ] [[package]] @@ -1941,12 +1825,6 @@ dependencies = [ "regex-automata 0.1.10", ] -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - [[package]] name = "matchit" version = "0.7.3" @@ -2008,18 +1886,6 @@ dependencies = [ "adler", ] -[[package]] -name = "mio" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" -dependencies = [ - "libc", - "log 0.4.21", - "wasi", - "windows-sys 0.48.0", -] - [[package]] name = "mio" version = "1.0.2" @@ -2173,21 +2039,20 @@ dependencies = [ "bytes", "chrono", "clap", - "crossterm 0.28.1", + "crossterm", "ctrlc", "dark-light", "dashmap", "dirs 5.0.1", "futures-util", - "h2 0.4.3", + "h2", "hpack", - "http-body 1.0.1", + "http-body", "http-body-util", - "hyper 1.4.1", + "hyper", "hyper-hickory", "hyper-rustls 0.27.2", "hyper-tls", - "hyper-trust-dns", "hyper-tungstenite", "hyper-util", "lazy_static", @@ -2197,7 +2062,7 @@ dependencies = [ "regex", "reqwest", "rustls 0.23.12", - "rustls-pemfile 2.1.1", + "rustls-pemfile", "self_update", "serde", "serde_json", @@ -2561,21 +2426,22 @@ dependencies = [ [[package]] name = "ratatui" -version = "0.26.2" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a564a852040e82671dc50a37d88f3aa83bbc690dfc6844cfe7a2591620206a80" +checksum = "fdef7f9be5c0122f890d58bdf4d964349ba6a6161f705907526d891efabba57d" dependencies = [ "bitflags 2.5.0", "cassowary", "compact_str", - "crossterm 0.27.0", - "indoc", - "itertools", + "crossterm", + "instability", + "itertools 0.13.0", "lru", "paste", - "stability", "strum", + "strum_macros", "unicode-segmentation", + "unicode-truncate", "unicode-width", ] @@ -2586,7 +2452,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54077e1872c46788540de1ea3d7f4ccb1983d12f9aa909b234468676c1a36779" dependencies = [ "pem", - "ring 0.17.8", + "ring", "rustls-pki-types", "time", "yasna", @@ -2668,11 +2534,11 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.4.3", - "http 1.1.0", - "http-body 1.0.1", + "h2", + "http", + "http-body", "http-body-util", - "hyper 1.4.1", + "hyper", "hyper-rustls 0.26.0", "hyper-tls", "hyper-util", @@ -2685,7 +2551,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls 0.22.2", - "rustls-pemfile 2.1.1", + "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", @@ -2700,25 +2566,10 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.26.3", + "webpki-roots", "winreg 0.52.0", ] -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", -] - [[package]] name = "ring" version = "0.17.8" @@ -2729,8 +2580,8 @@ dependencies = [ "cfg-if", "getrandom", "libc", - "spin 0.9.8", - "untrusted 0.9.0", + "spin", + "untrusted", "windows-sys 0.52.0", ] @@ -2817,18 +2668,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rustls" -version = "0.20.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" -dependencies = [ - "log 0.4.21", - "ring 0.16.20", - "sct", - "webpki", -] - [[package]] name = "rustls" version = "0.22.2" @@ -2836,7 +2675,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" dependencies = [ "log 0.4.21", - "ring 0.17.8", + "ring", "rustls-pki-types", "rustls-webpki", "subtle", @@ -2860,26 +2699,17 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile 2.1.1", + "rustls-pemfile", "rustls-pki-types", "schannel", "security-framework", ] -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - [[package]] name = "rustls-pemfile" version = "2.1.1" @@ -2903,9 +2733,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" dependencies = [ "aws-lc-rs", - "ring 0.17.8", + "ring", "rustls-pki-types", - "untrusted 0.9.0", + "untrusted", ] [[package]] @@ -2944,16 +2774,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", -] - [[package]] name = "security-framework" version = "2.9.2" @@ -2990,11 +2810,11 @@ dependencies = [ [[package]] name = "self_update" -version = "0.40.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4997484b55df069a4773d822715695b2cc27b23829eca2a4b41690e948bdeb" +checksum = "469a3970061380c19852269f393e74c0fe607a4e23d85267382cf25486aa8de5" dependencies = [ - "hyper 1.4.1", + "hyper", "indicatif", "log 0.4.21", "quick-xml", @@ -3153,8 +2973,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" dependencies = [ "libc", - "mio 0.8.11", - "mio 1.0.2", + "mio", "signal-hook", ] @@ -3202,28 +3021,12 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -[[package]] -name = "stability" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" -dependencies = [ - "quote", - "syn 2.0.55", -] - [[package]] name = "static_assertions" version = "1.1.0" @@ -3247,11 +3050,11 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "rustversion", @@ -3416,7 +3219,7 @@ dependencies = [ "backtrace", "bytes", "libc", - "mio 1.0.2", + "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", @@ -3446,17 +3249,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-rustls" -version = "0.23.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" -dependencies = [ - "rustls 0.20.9", - "tokio", - "webpki", -] - [[package]] name = "tokio-rustls" version = "0.25.0" @@ -3600,8 +3392,8 @@ dependencies = [ "bitflags 2.5.0", "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "http-range-header", "httpdate", @@ -3691,61 +3483,6 @@ dependencies = [ "tracing-log", ] -[[package]] -name = "trust-dns-proto" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" -dependencies = [ - "async-trait", - "bytes", - "cfg-if", - "data-encoding", - "enum-as-inner 0.5.1", - "futures-channel", - "futures-io", - "futures-util", - "h2 0.3.25", - "http 0.2.12", - "idna 0.2.3", - "ipnet", - "lazy_static", - "rand", - "ring 0.16.20", - "rustls 0.20.9", - "rustls-pemfile 1.0.4", - "smallvec", - "thiserror", - "tinyvec", - "tokio", - "tokio-rustls 0.23.4", - "tracing", - "url", - "webpki", - "webpki-roots 0.22.6", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" -dependencies = [ - "cfg-if", - "futures-util", - "lazy_static", - "lru-cache", - "parking_lot", - "rustls 0.20.9", - "smallvec", - "thiserror", - "tokio", - "tokio-rustls 0.23.4", - "tracing", - "trust-dns-proto", - "webpki-roots 0.22.6", -] - [[package]] name = "try-lock" version = "0.2.5" @@ -3761,7 +3498,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http 1.1.0", + "http", "httparse", "log 0.4.21", "rand", @@ -3780,7 +3517,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http 1.1.0", + "http", "httparse", "log 0.4.21", "rand", @@ -3800,7 +3537,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http 1.1.0", + "http", "httparse", "log 0.4.21", "rand", @@ -3862,11 +3599,22 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +[[package]] +name = "unicode-truncate" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" +dependencies = [ + "itertools 0.13.0", + "unicode-segmentation", + "unicode-width", +] + [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" [[package]] name = "unsafe-libyaml" @@ -3874,12 +3622,6 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - [[package]] name = "untrusted" version = "0.9.0" @@ -4160,17 +3902,8 @@ version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", -] - -[[package]] -name = "webpki-roots" -version = "0.22.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" -dependencies = [ - "webpki", + "ring", + "untrusted", ] [[package]] @@ -4227,12 +3960,12 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.52.0" +version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" dependencies = [ - "windows-core", - "windows-targets 0.52.4", + "windows-core 0.58.0", + "windows-targets 0.52.6", ] [[package]] @@ -4241,7 +3974,61 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.55", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.55", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", ] [[package]] @@ -4259,7 +4046,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.6", ] [[package]] @@ -4279,17 +4066,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.4", - "windows_aarch64_msvc 0.52.4", - "windows_i686_gnu 0.52.4", - "windows_i686_msvc 0.52.4", - "windows_x86_64_gnu 0.52.4", - "windows_x86_64_gnullvm 0.52.4", - "windows_x86_64_msvc 0.52.4", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -4300,9 +4088,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -4312,9 +4100,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -4324,9 +4112,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.4" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -4336,9 +4130,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -4348,9 +4142,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -4360,9 +4154,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -4372,9 +4166,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" diff --git a/Cargo.toml b/Cargo.toml index 45088c3..658d496 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,14 +20,6 @@ toml = "0.8.4" tracing = "0.1.37" tracing-subscriber = { version="0.3.18", features=[ "env-filter","std","fmt","time"] } url = "2.4.1" - -hyper-trust-dns = { version = "0.5.0", features = [ - "rustls-http2", - "dnssec-ring", - "dns-over-https-rustls", - "rustls-webpki" -] } - lazy_static = "1.4.0" unicase = "2.7.0" hyper-tls = "0.6.0" @@ -39,7 +31,7 @@ rcgen = "0.13.1" socket2 = "0.5.5" hyper-tungstenite = "0.14.0" -ratatui = "0.26.2" +ratatui = "0.28.1" #ratatui = { git = "https://github.com/ratatui-org/ratatui" } crossterm = { version = "0.28.1" } chrono = "0.4.31" @@ -47,7 +39,7 @@ chrono = "0.4.31" time = {version="0.3.30",features=["macros","formatting","parsing"]} reqwest = { version = "0.12.4", features = ["json"] } serde_json = "1.0.111" -self_update = "0.40.0" +self_update = "0.41.0" bytes = "1.7.1" http-body-util = "0.1.0" #active-win-pos-rs = "0.8.3" @@ -84,7 +76,7 @@ dashmap = "6.0.1" # =============================================================== [target.'cfg(windows)'.dependencies] -windows = { version = "0.52.0", features = ["Win32","Win32_Foundation","Win32_System","Win32_System_Console"] } +windows = { version = "0.58.0", features = ["Win32","Win32_Foundation","Win32_System","Win32_System_Console"] } diff --git a/odd-box.toml b/odd-box.toml index 76113e5..d9887c1 100644 --- a/odd-box.toml +++ b/odd-box.toml @@ -6,7 +6,7 @@ ip = "127.0.0.1" tls_port = 4343 auto_start = false root_dir = "~" -log_level = "trace" +log_level = "warn" port_range_start = 4200 default_log_format = "standard" env_vars = [ @@ -16,7 +16,7 @@ env_vars = [ [[remote_target]] host_name = "caddy-lb-terminated.localtest.me" -disable_tcp_tunnel_mode = true +disable_tcp_tunnel_mode = false backends = [ { https = false, address="127.0.0.1", port=9999 }, { https = false, address="127.0.0.1", port=9999 } @@ -25,7 +25,7 @@ backends = [ [[hosted_process]] host_name = "caddy-proc-terminated.localtest.me" -disable_tcp_tunnel_mode = true +disable_tcp_tunnel_mode = false auto_start = true port = 9999 dir = "$cfg_dir" diff --git a/src/configuration/mod.rs b/src/configuration/mod.rs index 2d40645..ff234cb 100644 --- a/src/configuration/mod.rs +++ b/src/configuration/mod.rs @@ -28,7 +28,11 @@ pub enum OddBoxConfig { } #[derive(Debug,Clone)] -pub struct ConfigWrapper(pub v2::OddBoxV2Config); +pub struct ConfigWrapper( + pub v2::OddBoxV2Config, + Option // <-- resolved path cache - todo, use and rename +); + impl std::ops::Deref for ConfigWrapper { type Target = v2::OddBoxV2Config; @@ -44,7 +48,7 @@ impl std::ops::DerefMut for ConfigWrapper { impl ConfigWrapper { pub fn wrapv2(config:v2::OddBoxV2Config) -> Self { - ConfigWrapper(config) + ConfigWrapper(config,None) } } @@ -163,8 +167,9 @@ impl OddBoxConfig { impl ConfigWrapper { - - + pub fn new(cfg:v2::OddBoxV2Config) -> Self { + ConfigWrapper(cfg,None) + } pub fn init(&mut self,cfg_path:&str) -> anyhow::Result<()> { self.path = Some(std::path::Path::new(&cfg_path).canonicalize()?.to_str().unwrap_or_default().into()); Ok(()) @@ -225,19 +230,28 @@ impl ConfigWrapper { } - pub fn get_parent(p:&str) -> anyhow::Result { + pub fn get_parent(&mut self) -> anyhow::Result { + // todo - use cache and clear on path change + // if let Some(pre_resolved) = &self.1 { + // return Ok(pre_resolved.to_string()) + // } + let p = self.path.clone().ok_or(anyhow::anyhow!(String::from("Failed to resolve path.")))?; if let Some(directory_path_str) = std::path::Path::new(&p) .parent() .map(|p| p.to_str().unwrap_or_default()) { if directory_path_str.eq("") { - tracing::debug!("$cfg_dir resolved to '.'"); - Ok(".".into()) + tracing::trace!("$cfg_dir resolved to '.'"); + let xx = ".".to_string(); + //self.1 = Some(xx.clone()); + Ok(xx) } else { - tracing::debug!("$cfg_dir resolved to {directory_path_str}"); - Ok(directory_path_str.into()) - } + tracing::trace!("$cfg_dir resolved to {directory_path_str}"); + let xx = directory_path_str.to_string(); + //self.1 = Some(xx.clone()); + Ok(xx) + } } else { bail!(format!("Failed to resolve $cfg_dir")); @@ -246,7 +260,7 @@ impl ConfigWrapper { // ---> port-mapping... - pub async fn add_or_replace_hosted_process(&mut self,hostname:&str,mut item:crate::InProcessSiteConfig,state:Arc) -> anyhow::Result<()> { + pub async fn add_or_replace_hosted_process(&mut self,hostname:&str,item:crate::InProcessSiteConfig,state:Arc) -> anyhow::Result<()> { if let Some(hosted_site_configs) = &mut self.hosted_process { @@ -451,7 +465,7 @@ impl ConfigWrapper { // it is done this way in order to avoid changing the global state of the configuration in to the resolved state // since that would then be saved to disk and we would lose the original configuration with dynamic variables // making configuration files less portable. - pub fn resolve_process_configuration(&self,proc:&crate::InProcessSiteConfig) -> anyhow::Result { + pub fn resolve_process_configuration(&mut self,proc:&crate::InProcessSiteConfig) -> anyhow::Result { let mut resolved_proc = crate::FullyResolvedInProcessSiteConfig { excluded_from_start_all: proc.exclude_from_start_all.unwrap_or(false), @@ -477,7 +491,7 @@ impl ConfigWrapper { // tracing::info!("Resolved home directory: {}",&resolved_home_dir_str); - let cfg_dir = Self::get_parent(&self.path.clone().expect("all configurations need a path on disk. if you see this, there is a bug in odd-box."))?; + let cfg_dir = self.get_parent()?; let root_dir = if let Some(rd) = &self.root_dir { diff --git a/src/http_proxy/service.rs b/src/http_proxy/service.rs index 82a700c..ea7f264 100644 --- a/src/http_proxy/service.rs +++ b/src/http_proxy/service.rs @@ -150,7 +150,7 @@ impl<'a> Service> for ReverseProxyService { Ok(x) }, Err(e) => { - Err(CustomError(format!("yeah that was not cool {e:?}"))) + Err(CustomError(format!("{e:?}"))) }, } }) @@ -173,15 +173,6 @@ async fn handle_http_request( - - let cfg_clone = { state.config.read().await.0.clone()} ; - - - // let mut response = EpicResponse::new(create_epic_string_full_body(&"hey nerd")); - // *response.status_mut() = StatusCode::OK; - - // return Ok(response); - let req_host_name = if let Some(hh) = req.headers().get("host") { let hostname_and_port = hh.to_str().map_err(|e|CustomError(format!("{e:?}")))?.to_string(); @@ -211,23 +202,30 @@ async fn handle_http_request( } - - let found_target = if let Some(processes) = &cfg_clone.hosted_process { - processes.iter().find(|p| { - req_host_name == p.host_name - || p.capture_subdomains.unwrap_or_default() && req_host_name.ends_with(&format!(".{}",p.host_name)) - }) - } else { - None - }; + let mut found_hosted_target = { + let cfg_guard = state.config.read().await; + if let Some(processes) = &cfg_guard.hosted_process { + if let Some(pp) = processes.iter().find(|p| { + req_host_name == p.host_name + || p.capture_subdomains.unwrap_or_default() + && req_host_name.ends_with(&format!(".{}",p.host_name)) + }) { + Some(pp.clone()) + } else { + None + } + } else { + None + } + }; - if let Some(target_cfg) = found_target { + if let Some(target_proc_cfg) = found_hosted_target { let current_target_status : Option = { - let info = state.app_state.site_status_map.get(&target_cfg.host_name); + let info = state.app_state.site_status_map.get(&target_proc_cfg.host_name); match info { Some(data) => Some(data.value().clone()), None => None, @@ -238,7 +236,7 @@ async fn handle_http_request( Some(ProcState::Running) | Some(ProcState::Faulty) | Some(ProcState::Starting) => {}, _ => { // auto start site in case its been disabled by other requests - _ = tx.send(super::ProcMessage::Start(target_cfg.host_name.to_owned())).map_err(|e|format!("{e:?}")); + _ = tx.send(super::ProcMessage::Start(target_proc_cfg.host_name.to_owned())).map_err(|e|format!("{e:?}")); } } @@ -258,13 +256,36 @@ async fn handle_http_request( } } - let port = if let Some(active_port) = target_cfg.active_port { + + // re-read config from global state in case it was not started before + if target_proc_cfg.active_port.is_none() { + found_hosted_target = { + let cfg_guard = state.config.read().await; + if let Some(processes) = &cfg_guard.hosted_process { + if let Some(pp) = processes.iter().find(|p| { + req_host_name == p.host_name + || p.capture_subdomains.unwrap_or_default() + && req_host_name.ends_with(&format!(".{}",p.host_name)) + }) { + Some(pp.clone()) + } else { + None + } + } else { + None + } + }; + } + + + + let port = if let Some(active_port) = target_proc_cfg.active_port { active_port } else { return Err(CustomError(format!("No active port found for {req_host_name}"))) }; - let enforce_https = target_cfg.https.is_some_and(|x|x); + let enforce_https = target_proc_cfg.https.is_some_and(|x|x); let scheme = if enforce_https { "https" } else { "http" }; let mut original_path_and_query = req.uri().path_and_query() @@ -273,28 +294,23 @@ async fn handle_http_request( let parsed_host_name = { - let forward_subdomains = target_cfg.forward_subdomains.unwrap_or_default(); + let forward_subdomains = target_proc_cfg.forward_subdomains.unwrap_or_default(); if forward_subdomains { - if let Some(subdomain) = get_subdomain(&req_host_name, &target_cfg.host_name) { - Cow::Owned(format!("{subdomain}.{}", &target_cfg.host_name)) + if let Some(subdomain) = get_subdomain(&req_host_name, &target_proc_cfg.host_name) { + Cow::Owned(format!("{subdomain}.{}", &target_proc_cfg.host_name)) } else { - Cow::Borrowed(&target_cfg.host_name) + Cow::Borrowed(&target_proc_cfg.host_name) } } else { - Cow::Borrowed(&target_cfg.host_name) + Cow::Borrowed(&target_proc_cfg.host_name) } }; - // THE RESOLVED HOSTNAME SHOULD BE ADDED AS A HOST HEADER HERE. - - // using ip to avoid dns lookup for local targets - // todo - should this be opt in/out ? let target_url = format!("{scheme}://{}:{}{}", parsed_host_name, port, original_path_and_query ); - // we add the host flag manually in proxy method, this is only to avoid dns lookup for local targets. // todo: opt in/out via cfg @@ -304,20 +320,17 @@ async fn handle_http_request( original_path_and_query ); - let target_cfg = (*target_cfg).clone(); + let target_cfg = target_proc_cfg.clone(); let hints = target_cfg.hints.clone(); let target = crate::http_proxy::Target::Proc(target_cfg); - // return Ok(EpicResponse::new(create_epic_string_full_body(&"hey nerd!!!"))); - - let result = proxy( &parsed_host_name, is_https, state.clone(), req, - &target_url, + &skip_dns_for_local_target_url, target, client_ip, client, @@ -337,7 +350,7 @@ async fn handle_http_request( else { - if let Some(remote_target_cfg) = cfg_clone.remote_target.clone().unwrap_or_default().iter().find(|p|{ + if let Some(remote_target_cfg) = &state.config.read().await.remote_target.clone().unwrap_or_default().iter().find(|p|{ //tracing::info!("comparing incoming req: {} vs {} ",req_host_name,p.host_name); req_host_name == p.host_name || p.capture_subdomains.unwrap_or_default() && req_host_name.ends_with(&format!(".{}",p.host_name)) diff --git a/src/http_proxy/utils.rs b/src/http_proxy/utils.rs index fe19e70..679ed50 100644 --- a/src/http_proxy/utils.rs +++ b/src/http_proxy/utils.rs @@ -110,7 +110,7 @@ pub async fn proxy( } } } - let mut target_url = target_url.to_string(); + // Handle upgrade headers if let Some(typ) = &request_upgrade_type { if typ.to_uppercase()=="H2C" { @@ -191,7 +191,7 @@ pub async fn proxy( ); - tracing::warn!("Sending request:\n{:?}", proxied_request); + tracing::trace!("Sending request:\n{:?}", proxied_request); // todo - prevent making a connection if client already has too many tcp connections open @@ -202,14 +202,14 @@ pub async fn proxy( .map_err(ProxyError::LegacyError)? }; - tracing::warn!( + tracing::trace!( "GOT THIS RESPONSE FROM REQ TO '{target_url}' : {:?}",response ); // if the backend agreed to upgrade to some other protocol, we will create a bidirectional tunnel for the client and backend to communicate directly. if response.status() == StatusCode::SWITCHING_PROTOCOLS { let response_upgrade_type = get_upgrade_type(response.headers()); - tracing::warn!("RESPONSE IS TO UPGRADE TO : {response_upgrade_type:?}."); + tracing::trace!("RESPONSE IS TO UPGRADE TO : {response_upgrade_type:?}."); if request_upgrade_type == response_upgrade_type { if let Some(request_upgraded) = request_upgraded { @@ -225,7 +225,7 @@ pub async fn proxy( let upgraded = match request_upgraded.await { Err(e) => { - tracing::warn!("failed to upgrade req: {e:?}"); + tracing::trace!("failed to upgrade req: {e:?}"); return; } diff --git a/src/http_proxy/websockets.rs b/src/http_proxy/websockets.rs index 6b960cb..aa89a98 100644 --- a/src/http_proxy/websockets.rs +++ b/src/http_proxy/websockets.rs @@ -2,10 +2,9 @@ use std::{net::SocketAddr, sync::Arc}; use chrono::Local; use hyper_tungstenite::HyperWebsocket; use hyper::{body::Incoming as IncomingBody, Request}; -use rustls::ClientConfig; +use tokio_rustls::rustls::ClientConfig; use crate::{global_state::GlobalState, CustomError}; use futures_util::{SinkExt,StreamExt}; -use crate::tcp_proxy::ReverseTcpProxyTarget; use super::{ReverseProxyService, Target}; use crate::types::proxy_state::{ ConnectionKey, @@ -90,7 +89,7 @@ pub async fn handle_ws(req:Request,service:ReverseProxyService,ws: tracing::debug!("initiating websocket tunnel to {}",ws_url); - let client_tls_config = ClientConfig::builder_with_protocol_versions(rustls::ALL_VERSIONS) + let client_tls_config = ClientConfig::builder_with_protocol_versions(tokio_rustls::rustls::ALL_VERSIONS) .with_native_roots() .expect("should always be able to build a tls client") .with_no_client_auth(); diff --git a/src/logging.rs b/src/logging.rs index 0c78685..c290e31 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -2,6 +2,7 @@ use std::collections::VecDeque; use std::{collections::HashMap, sync::Arc}; use std::sync::Mutex; +use ratatui::layout::Size; use tracing::Subscriber; use tracing_subscriber::layer::Context; use tracing_subscriber::Layer; diff --git a/src/main.rs b/src/main.rs index 32d3cb4..6e3c3ea 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,6 +3,7 @@ mod types; mod tcp_proxy; mod http_proxy; mod proxy; +use anyhow::bail; use configuration::v2::FullyResolvedInProcessSiteConfig; use dashmap::DashMap; use global_state::GlobalState; @@ -10,7 +11,8 @@ use configuration::v2::InProcessSiteConfig; use configuration::v2::RemoteSiteConfig; use configuration::OddBoxConfiguration; use http_proxy::ProcMessage; -use rustls::pki_types::{CertificateDer, PrivateKeyDer}; +use ratatui::text::ToLine; +use tokio_rustls::rustls::pki_types::{CertificateDer, PrivateKeyDer}; use self_update::cargo_crate_version; use tracing_subscriber::layer::SubscriberExt; use std::fmt::Debug; @@ -44,7 +46,7 @@ impl ProcId { } lazy_static! { - static ref THREAD_MAP: Arc>>> = Arc::new(Mutex::new(HashMap::new())); + static ref PROC_THREAD_MAP: Arc>>> = Arc::new(Mutex::new(HashMap::new())); } static REQUEST_ID_COUNTER: AtomicU64 = AtomicU64::new(1); @@ -69,13 +71,13 @@ pub mod global_state { #[derive(Debug)] struct DynamicCertResolver { // todo: dashmap? - cache: Mutex>>, + cache: Mutex>>, } -use rustls::server::{ClientHello, ResolvesServerCert}; +use tokio_rustls::rustls::server::{ClientHello, ResolvesServerCert}; impl ResolvesServerCert for DynamicCertResolver { - fn resolve(&self, client_hello: ClientHello) -> Option> { + fn resolve(&self, client_hello: ClientHello) -> Option> { let server_name = client_hello.server_name()?; @@ -113,8 +115,8 @@ impl ResolvesServerCert for DynamicCertResolver { return None } if let Ok(private_key) = my_rsa_private_keys(&key_path) { - if let Ok(rsa_signing_key) = rustls::crypto::aws_lc_rs::sign::any_supported_type(&private_key) { - let result = std::sync::Arc::new(rustls::sign::CertifiedKey::new( + if let Ok(rsa_signing_key) = tokio_rustls::rustls::crypto::aws_lc_rs::sign::any_supported_type(&private_key) { + let result = std::sync::Arc::new(tokio_rustls::rustls::sign::CertifiedKey::new( cert_chain, rsa_signing_key )); @@ -187,7 +189,7 @@ fn my_rsa_private_keys(path: &str) -> Result { let file = File::open(&path).map_err(|e|format!("{e:?}"))?; let mut reader = BufReader::new(file); let mut keys = rustls_pemfile::pkcs8_private_keys(&mut reader) - .collect::,_>>().map_err(|e|format!("{e:?}"))?; + .collect::,_>>().map_err(|e|format!("{e:?}"))?; match keys.len() { 0 => Err(format!("No PKCS8-encoded private key found in {path}").into()), @@ -296,7 +298,7 @@ pub fn initialize_panic_handler() { fn thread_cleaner() { - let mut map = THREAD_MAP.lock().unwrap(); + let mut map = PROC_THREAD_MAP.lock().unwrap(); map.retain(|_k,v| v.upgrade().is_some()); } @@ -304,15 +306,48 @@ fn thread_cleaner() { #[tokio::main(flavor="multi_thread")] async fn main() -> anyhow::Result<()> { - // spawn thread cleaner and loop ever 1 second - tokio::spawn(async move { - loop { - thread_cleaner(); - tokio::time::sleep(Duration::from_secs(1)).await; + let args = Args::parse(); + let tui_flag = args.tui.unwrap_or(true); + + if args.update { + _ = update().await; + return Ok(()); + } + + initialize_panic_handler(); + + let result = inner(&args).await; + + if tui_flag { + use crossterm::{ + event::DisableMouseCapture, + execute, + terminal::{disable_raw_mode, LeaveAlternateScreen}, + }; + _ = disable_raw_mode(); + let mut stdout = std::io::stdout(); + _ = execute!(stdout, LeaveAlternateScreen, DisableMouseCapture); + } + + match result { + Ok(_) => { + std::process::exit(0); + }, + Err(e) => { + println!("odd-box exited with error: {:?}",e); + std::process::exit(1); } - }); + } - let args = Args::parse(); + + +} + +async fn inner( + args:&Args +) -> anyhow::Result<()> { + + let (filter, reload_handle) = tracing_subscriber::reload::Layer::new( EnvFilter::from_default_env() @@ -320,10 +355,6 @@ async fn main() -> anyhow::Result<()> { .add_directive("tokio_util=info".parse().expect("this directive will always work")) .add_directive("hyper=info".parse().expect("this directive will always work"))); - initialize_panic_handler(); - - - let (tx,_) = tokio::sync::broadcast::channel::(33); @@ -394,12 +425,6 @@ async fn main() -> anyhow::Result<()> { - - if args.update { - _ = update().await; - return Ok(()); - } - if args.generate_example_cfg { let cfg = crate::configuration::v2::OddBoxV2Config::example(); let serialized = toml::to_string_pretty(&cfg).unwrap(); @@ -409,8 +434,8 @@ async fn main() -> anyhow::Result<()> { // By default we use odd-box.toml, and otherwise we try to read from Config.toml let cfg_path = - if let Some(cfg) = args.configuration { - cfg + if let Some(cfg) = &args.configuration { + cfg.to_string() } else { if std::fs::metadata("odd-box.toml").is_ok() { "odd-box.toml".to_owned() @@ -428,7 +453,7 @@ async fn main() -> anyhow::Result<()> { let mut config: ConfigWrapper = - ConfigWrapper(match configuration::OddBoxConfig::parse(&contents) { + ConfigWrapper::new(match configuration::OddBoxConfig::parse(&contents) { Ok(configuration) => configuration.try_upgrade_to_latest_version().expect("configuration upgrade failed. this is a bug in odd-box"), Err(e) => anyhow::bail!(e), }); @@ -436,12 +461,12 @@ async fn main() -> anyhow::Result<()> { config.is_valid()?; // some times we are invoked with specific sites to run - if let Some(sites) = args.enable_site { + if let Some(sites) = &args.enable_site { if config.hosted_process.as_ref().map_or(true, Vec::is_empty) { anyhow::bail!("You have not configured any sites yet..".to_string()); } - for site in &sites { + for site in sites { let site_config = config.hosted_process.as_ref() .and_then(|processes| processes.iter().find(|x| &x.host_name == site)); @@ -529,29 +554,33 @@ async fn main() -> anyhow::Result<()> { } } - // replace initial empty config with resolved one - let mut cfg_write_guard = shared_config.write().await; - *cfg_write_guard = config; - - for x in cfg_write_guard.remote_target.as_ref().unwrap_or(&vec![]) { + for x in config.remote_target.as_ref().unwrap_or(&vec![]) { inner_state_arc.site_status_map.insert(x.host_name.to_owned(), ProcState::Remote); } - - for x in cfg_write_guard.hosted_process.iter().flatten() { - let resolved_proc = cfg_write_guard.resolve_process_configuration(&x)?; - tokio::task::spawn(proc_host::host( - resolved_proc, - tx.subscribe(), - global_state.clone(), - )); + // todo: clean this up + for x in config.hosted_process.clone().iter().flatten() { + match config.resolve_process_configuration(&x) { + Ok(x) => { + tokio::task::spawn(proc_host::host( + x, + tx.subscribe(), + global_state.clone(), + )); + } + Err(e) => bail!("Failed to resolve process configuration for:\n=====================================================\n{:?}.\n=====================================================\n\nThe error was: {:?}",x,e) + } } + // replace initial empty config with resolved one + let mut cfg_write_guard = shared_config.write().await; + *cfg_write_guard = config; + drop(cfg_write_guard); let shared_read_guard = shared_config.read().await; @@ -582,6 +611,14 @@ async fn main() -> anyhow::Result<()> { tokio::spawn(async move { api::run(api_state,api_port, api_broadcaster).await }); + + // spawn thread cleaner and loop ever 1 second + tokio::spawn(async move { + loop { + thread_cleaner(); + tokio::time::sleep(Duration::from_secs(1)).await; + } + }); let use_tui = tui_thread.is_some(); @@ -593,7 +630,7 @@ async fn main() -> anyhow::Result<()> { tracing::info!("leaving main loop"); break; } - tokio::time::sleep(Duration::from_millis(500)).await; + tokio::time::sleep(Duration::from_millis(2000)).await; } } @@ -624,26 +661,10 @@ async fn main() -> anyhow::Result<()> { tracing::info!("{name} ==> {status:?}") } } -} + } - if use_tui { - println!("Performing cleanup, please wait.."); - - use crossterm::{ - event::DisableMouseCapture, - execute, - terminal::{disable_raw_mode, LeaveAlternateScreen}, - }; - _ = disable_raw_mode(); - let mut stdout = std::io::stdout(); - _ = execute!(stdout, LeaveAlternateScreen, DisableMouseCapture); - } else { - tracing::info!("Performing cleanup, please wait.."); - } - _ = proxy_thread.abort(); _ = proxy_thread.await.ok(); Ok(()) -} - +} \ No newline at end of file diff --git a/src/proc_host.rs b/src/proc_host.rs index a945932..4749076 100644 --- a/src/proc_host.rs +++ b/src/proc_host.rs @@ -18,7 +18,7 @@ pub async fn host( let my_arc = std::sync::Arc::new(AtomicBool::new(true)); { - let thread_map_guard = crate::THREAD_MAP.clone(); + let thread_map_guard = crate::PROC_THREAD_MAP.clone(); let mut thread_map = thread_map_guard.lock().unwrap(); thread_map.insert(resolved_proc.proc_id.clone(), std::sync::Arc::::downgrade(&my_arc)); } @@ -37,15 +37,8 @@ pub async fn host( } }; - - // previously this was configured separately in config via the "disabled" prop, but we now use auto_start - // as a combined prop for this. If auto_start is set to false, we will not start the site automatically also - // when using the "start_all_sites" command. let excluded_from_auto_start = resolved_proc.excluded_from_start_all; - - - let mut initialized = false; let domsplit = resolved_proc.host_name.split(".").collect::>(); @@ -61,319 +54,322 @@ pub async fn host( let mut selected_port: Option = None; loop { + tokio::time::sleep(Duration::from_millis(200)).await; + let mut time_to_sleep_ms_after_loop = 500; + // scope for clarity + { + let exit = state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) == true; + + if exit { + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopped); + tracing::debug!("exiting host for {}",&resolved_proc.host_name); + break + } - - - let exit = state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) == true; - - if exit { - state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopped); - tracing::debug!("exiting host for {}",&resolved_proc.host_name); - break - } - - if initialized == false { - state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopped); - initialized = true; - } else { - state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopped); + if initialized == false { + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopped); + initialized = true; + } else { + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopped); + + } - } - - let is_enabled_before = enabled == true; - - while let Ok(msg) = rcv.try_recv() { - match msg { - ProcMessage::StartAll if excluded_from_auto_start => tracing::debug!("Refusing to start {} as thru the start all command as it is disabled",&resolved_proc.host_name), - ProcMessage::Start(s) if excluded_from_auto_start && s == "all" => tracing::debug!("Refusing to start {} as thru the start all command as it is disabled",&resolved_proc.host_name), - - ProcMessage::Delete(s,sender) => { - if acceptable_names.contains(&s) { - tracing::warn!("[{}] Dropping due to having been deleted by proxy.", resolved_proc.host_name); - state.app_state.site_status_map.remove(&resolved_proc.host_name); - match sender.send(0).await { - Ok(_) => {}, - Err(e) => {tracing::warn!("Failed to send confirmation to proxy service that we stopped! {e:?}") - }, + let is_enabled_before = enabled == true; + + while let Ok(msg) = rcv.try_recv() { + match msg { + ProcMessage::StartAll if excluded_from_auto_start => tracing::debug!("Refusing to start {} as thru the start all command as it is disabled",&resolved_proc.host_name), + ProcMessage::Start(s) if excluded_from_auto_start && s == "all" => tracing::debug!("Refusing to start {} as thru the start all command as it is disabled",&resolved_proc.host_name), + + ProcMessage::Delete(s,sender) => { + if acceptable_names.contains(&s) { + tracing::warn!("[{}] Dropping due to having been deleted by proxy.", resolved_proc.host_name); + state.app_state.site_status_map.remove(&resolved_proc.host_name); + match sender.send(0).await { + Ok(_) => {}, + Err(e) => {tracing::warn!("Failed to send confirmation to proxy service that we stopped! {e:?}") + }, + } + return + } + }, + ProcMessage::StartAll => enabled = true, + ProcMessage::StopAll => enabled = false, + ProcMessage::Start(s) => { + let is_for_me = s == "all" || acceptable_names.contains(&s); + if is_for_me { + enabled = true; + } + }, + ProcMessage::Stop(s) => { + let is_for_me = s == "all" || acceptable_names.contains(&s); + if is_for_me { + enabled = false; } - return - } - }, - ProcMessage::StartAll => enabled = true, - ProcMessage::StopAll => enabled = false, - ProcMessage::Start(s) => { - let is_for_me = s == "all" || acceptable_names.contains(&s); - if is_for_me { - enabled = true; - } - }, - ProcMessage::Stop(s) => { - let is_for_me = s == "all" || acceptable_names.contains(&s); - if is_for_me { - enabled = false; } } } - } - - if !enabled { - if enabled != is_enabled_before { - tracing::info!("[{}] Disabled via command from proxy service",&resolved_proc.host_name); - { - state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopped); + + if !enabled { + if enabled != is_enabled_before { + tracing::info!("[{}] Disabled via command from proxy service",&resolved_proc.host_name); + { + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopped); + } } + continue; } - continue; - } - - if enabled != is_enabled_before { - tracing::info!("[{}] Enabled via command from proxy service",&resolved_proc.host_name); - } + + if enabled != is_enabled_before { + tracing::info!("[{}] Enabled via command from proxy service",&resolved_proc.host_name); + } - - - - if selected_port == None { - let mut guard = state.config.write().await; + - if let Ok(p) = guard.set_active_port(&mut resolved_proc) { - selected_port = Some(p); + if selected_port == None { + + let mut guard = state.config.write().await; + + if let Ok(p) = guard.set_active_port(&mut resolved_proc) { + selected_port = Some(p); + } + + if selected_port.is_none() { + let ms = 3000; + tracing::warn!("[{}] No usable port found. Waiting for {}ms before retrying..",&resolved_proc.host_name,ms); + tokio::time::sleep(Duration::from_millis(ms)).await; + continue; + } + + } - - if selected_port.is_none() { - let ms = 3000; - tracing::warn!("[{}] No usable port found. Waiting for {}ms before retrying..",&resolved_proc.host_name,ms); - tokio::time::sleep(Duration::from_millis(ms)).await; - continue; + else { + tracing::info!("[{}] Using the previously selected port '{}'",&resolved_proc.host_name,selected_port.unwrap()); } - - } - else { - tracing::info!("[{}] Using the previously selected port '{}'",&resolved_proc.host_name,selected_port.unwrap()); - } + let current_work_dir = std::env::current_dir().expect("could not get current directory").to_str().expect("could not convert current directory to string").to_string(); + + let workdir = &resolved_proc.dir.clone().unwrap_or(current_work_dir); - let current_work_dir = std::env::current_dir().expect("could not get current directory").to_str().expect("could not convert current directory to string").to_string(); - - let workdir = &resolved_proc.dir.clone().unwrap_or(current_work_dir); + tracing::warn!("[{}] Executing command '{}' in directory '{}'",resolved_proc.host_name,resolved_proc.bin,workdir); - tracing::warn!("[{}] Executing command '{}' in directory '{}'",resolved_proc.host_name,resolved_proc.bin,workdir); + let mut bin_path = std::path::PathBuf::from(&workdir); + bin_path.push(&resolved_proc.bin); - let mut bin_path = std::path::PathBuf::from(&workdir); - bin_path.push(&resolved_proc.bin); + let mut process_specific_environment_variables = HashMap::new(); + + { + let state_guard = state.config.read().await; + for kvp in &state_guard.env_vars.clone() { + tracing::debug!("[{}] ADDING GLOBAL ENV VAR '{}': {}", &resolved_proc.host_name,&kvp.key,&kvp.value); + process_specific_environment_variables.insert(kvp.key.clone(), kvp.value.clone()); + } + } - let mut process_specific_environment_variables = HashMap::new(); - - { - let state_guard = state.config.read().await; - for kvp in &state_guard.env_vars.clone() { - tracing::debug!("[{}] ADDING GLOBAL ENV VAR '{}': {}", &resolved_proc.host_name,&kvp.key,&kvp.value); + // more specific env vars should override globals + for kvp in &resolved_proc.env_vars.clone().unwrap_or_default() { + tracing::debug!("[{}] ADDING ENV VAR '{}': {}", &resolved_proc.host_name,&kvp.key,&kvp.value); process_specific_environment_variables.insert(kvp.key.clone(), kvp.value.clone()); } - } - // more specific env vars should override globals - for kvp in &resolved_proc.env_vars.clone().unwrap_or_default() { - tracing::debug!("[{}] ADDING ENV VAR '{}': {}", &resolved_proc.host_name,&kvp.key,&kvp.value); - process_specific_environment_variables.insert(kvp.key.clone(), kvp.value.clone()); - } + let port = selected_port + .expect("it should not be possible to start a process without a port first having been chosen - this is a bug in odd-box").to_string(); - let port = selected_port - .expect("it should not be possible to start a process without a port first having been chosen - this is a bug in odd-box").to_string(); + process_specific_environment_variables.insert("PORT".into(), port.clone()); - process_specific_environment_variables.insert("PORT".into(), port.clone()); + let mut pre_resolved_args = resolved_proc.args.clone().unwrap_or_default(); - let mut pre_resolved_args = resolved_proc.args.clone().unwrap_or_default(); - - for p in &mut pre_resolved_args { - *p = p.replace("$port",&port); - } + for p in &mut pre_resolved_args { + *p = p.replace("$port",&port); + } - - state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Starting); - + + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Starting); + - const _CREATE_NO_WINDOW: u32 = 0x08000000; - - #[cfg(target_os = "windows")] - const DETACHED_PROCESS: u32 = 0x00000008; + const _CREATE_NO_WINDOW: u32 = 0x08000000; - #[cfg(target_os="windows")] - use std::os::windows::process::CommandExt; - - #[cfg(target_os = "windows")] - let cmd = Command::new(bin_path) - .args(pre_resolved_args) - .envs(&process_specific_environment_variables) - .current_dir(&workdir) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .stdin(Stdio::null()) - // dont want windows to let child take over our keyboard input and such - .creation_flags(DETACHED_PROCESS).spawn(); - - #[cfg(not(target_os = "windows"))] - let cmd = Command::new(bin_path) - .args(pre_resolved_args) - .envs(&process_specific_environment_variables) - .current_dir(&workdir) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .stdin(Stdio::null()) - .spawn(); - - match cmd { - Ok(mut child) => { - - - state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Running); + #[cfg(target_os = "windows")] + const DETACHED_PROCESS: u32 = 0x00000008; + #[cfg(target_os="windows")] + use std::os::windows::process::CommandExt; + + #[cfg(target_os = "windows")] + let cmd = Command::new(bin_path) + .args(pre_resolved_args) + .envs(&process_specific_environment_variables) + .current_dir(&workdir) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .stdin(Stdio::null()) + // dont want windows to let child take over our keyboard input and such + .creation_flags(DETACHED_PROCESS).spawn(); + + #[cfg(not(target_os = "windows"))] + let cmd = Command::new(bin_path) + .args(pre_resolved_args) + .envs(&process_specific_environment_variables) + .current_dir(&workdir) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .stdin(Stdio::null()) + .spawn(); + + match cmd { + Ok(mut child) => { - //let stdin = child.stdin.take().expect("Failed to capture stdin"); + + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Running); + - let stdout = child.stdout.take().expect("Failed to capture stdout"); - let stderr = child.stderr.take().expect("Failed to capture stderr"); + //let stdin = child.stdin.take().expect("Failed to capture stdin"); + let stdout = child.stdout.take().expect("Failed to capture stdout"); + let stderr = child.stderr.take().expect("Failed to capture stderr"); - let stdout_reader = std::io::BufReader::new(stdout); - let stderr_reader = std::io::BufReader::new(stderr); - let procname = resolved_proc.host_name.clone(); - let reclone = re.clone(); - let logformat = resolved_proc.log_format.clone(); - _ = std::thread::Builder::new().name(format!("{procname}")).spawn(move || { - - let mut current_log_level = 0; - - for line in std::io::BufRead::lines(stdout_reader) { - if let Ok(line) = line{ - // todo: should move custom logging elsewhere if theres ever more than one - if let Some(LogFormat::dotnet) = &logformat { - if line.len() > 0 { - let mut trimmed = reclone.replace(&line, "").to_string(); - if trimmed.contains(" WARN ") || trimmed.contains("warn:") { - current_log_level = 3; - trimmed.replace("warn:", "").trim().to_string(); - } else if trimmed.contains("ERROR") || trimmed.contains("error:"){ - current_log_level = 4; - trimmed.replace("error:", "").trim().to_string(); - } else if trimmed.contains("DEBUG")|| trimmed.contains("debug:"){ - current_log_level = 1; - trimmed.replace("debug:", "").trim().to_string(); - } else if trimmed.contains("INFO")|| trimmed.contains("info:"){ - current_log_level = 2; - trimmed = trimmed.replace("info:", "").trim().to_string() + let stdout_reader = std::io::BufReader::new(stdout); + let stderr_reader = std::io::BufReader::new(stderr); + let procname = resolved_proc.host_name.clone(); + let reclone = re.clone(); + let logformat = resolved_proc.log_format.clone(); + _ = std::thread::Builder::new().name(format!("{procname}")).spawn(move || { + + let mut current_log_level = 0; + + for line in std::io::BufRead::lines(stdout_reader) { + if let Ok(line) = line{ + + // todo: should move custom logging elsewhere if theres ever more than one + if let Some(LogFormat::dotnet) = &logformat { + if line.len() > 0 { + let mut trimmed = reclone.replace(&line, "").to_string(); + if trimmed.contains(" WARN ") || trimmed.contains("warn:") { + current_log_level = 3; + trimmed.replace("warn:", "").trim().to_string(); + } else if trimmed.contains("ERROR") || trimmed.contains("error:"){ + current_log_level = 4; + trimmed.replace("error:", "").trim().to_string(); + } else if trimmed.contains("DEBUG")|| trimmed.contains("debug:"){ + current_log_level = 1; + trimmed.replace("debug:", "").trim().to_string(); + } else if trimmed.contains("INFO")|| trimmed.contains("info:"){ + current_log_level = 2; + trimmed = trimmed.replace("info:", "").trim().to_string() + } + match ¤t_log_level { + 1 => tracing::debug!("{}",trimmed), + 2 => tracing::info!("{}",trimmed), + 3 => tracing::warn!("{}",trimmed), + 4 => tracing::error!("{}",trimmed), + _ => tracing::trace!("{}",trimmed) // hide anything does has no explicit level unless running in trace mode + } + } else { + current_log_level = 0; } - match ¤t_log_level { - 1 => tracing::debug!("{}",trimmed), - 2 => tracing::info!("{}",trimmed), - 3 => tracing::warn!("{}",trimmed), - 4 => tracing::error!("{}",trimmed), - _ => tracing::trace!("{}",trimmed) // hide anything does has no explicit level unless running in trace mode - } } else { - current_log_level = 0; + tracing::info!("{}",line) } - } else { - tracing::info!("{}",line) - } - } - } - }); - - let procname = resolved_proc.host_name.clone(); - _ = std::thread::Builder::new().name(format!("{procname}")).spawn(move || { - for line in std::io::BufRead::lines(stderr_reader) { - if let Ok(line) = line{ - if line.len() > 0 { - tracing::error!("{}",line.trim()); - } - } - } - }); - - while let Ok(None) = child.try_wait() { - let exit = state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) == true; - if exit { - tracing::info!("[{}] Stopping due to app exit", resolved_proc.host_name); - state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopping); - _ = child.kill(); - break - } - - - while let Ok(msg) = rcv.try_recv() { - match msg { - ProcMessage::Delete(s,sender) => { - if acceptable_names.contains(&s) { - tracing::warn!("[{}] Dropping due to having been deleted by proxy.", resolved_proc.host_name); - state.app_state.site_status_map.remove(&resolved_proc.host_name); - if let Some(mut stdin) = child.stdin.take() { - _ = stdin.write_all(b"q"); - } - _ = child.kill(); - // inform sender that we actually stopped the process and that we are exiting our loop - match sender.send(0).await { - Ok(_) => {}, - Err(e) => { - tracing::warn!("Failed to send confirmation to proxy service that we stopped! {e:?}") - }, - } - return - } - }, - ProcMessage::StartAll => enabled = true, - ProcMessage::StopAll => enabled = false, - ProcMessage::Stop(s) => { - let is_for_me = s == "all" || acceptable_names.contains(&s); - if is_for_me { - enabled = false; + } + } + }); + + let procname = resolved_proc.host_name.clone(); + _ = std::thread::Builder::new().name(format!("{procname}")).spawn(move || { + for line in std::io::BufRead::lines(stderr_reader) { + if let Ok(line) = line{ + if line.len() > 0 { + tracing::error!("{}",line.trim()); } - }, - _ => {} + } } - } - if !enabled { - tracing::warn!("[{}] Stopping due to having been disabled by proxy.", resolved_proc.host_name); - // note: we just send q here because some apps like iisexpress requires it + }); + + while let Ok(None) = child.try_wait() { - state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopping); + let exit = state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) == true; + if exit { + tracing::info!("[{}] Stopping due to app exit", resolved_proc.host_name); + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopping); + _ = child.kill(); + break + } - if let Some(mut stdin) = child.stdin.take() { - _ = stdin.write_all(b"q"); + + while let Ok(msg) = rcv.try_recv() { + match msg { + ProcMessage::Delete(s,sender) => { + if acceptable_names.contains(&s) { + tracing::warn!("[{}] Dropping due to having been deleted by proxy.", resolved_proc.host_name); + state.app_state.site_status_map.remove(&resolved_proc.host_name); + if let Some(mut stdin) = child.stdin.take() { + _ = stdin.write_all(b"q"); + } + _ = child.kill(); + // inform sender that we actually stopped the process and that we are exiting our loop + match sender.send(0).await { + Ok(_) => {}, + Err(e) => { + tracing::warn!("Failed to send confirmation to proxy service that we stopped! {e:?}") + }, + } + return + } + }, + ProcMessage::StartAll => enabled = true, + ProcMessage::StopAll => enabled = false, + ProcMessage::Stop(s) => { + let is_for_me = s == "all" || acceptable_names.contains(&s); + if is_for_me { + enabled = false; + } + }, + _ => {} + } + } + if !enabled { + tracing::warn!("[{}] Stopping due to having been disabled by proxy.", resolved_proc.host_name); + // note: we just send q here because some apps like iisexpress requires it + + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Stopping); + + if let Some(mut stdin) = child.stdin.take() { + _ = stdin.write_all(b"q"); + } + _ = child.kill(); + break; } - _ = child.kill(); - break; - } + + tokio::time::sleep(Duration::from_millis(100)).await; + } + state.app_state.site_status_map.insert(procname, ProcState::Stopped); - tokio::time::sleep(Duration::from_millis(100)).await; + }, + Err(e) => { + tracing::info!("[{}] Failed to start! {e:?}",resolved_proc.host_name); + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Faulty); + }, + } + + if enabled { + if !state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) { + tracing::warn!("[{}] Stopped unexpectedly.. Will automatically restart the process in 5 seconds unless stopped.",resolved_proc.host_name); + state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Faulty); + time_to_sleep_ms_after_loop = 5000; // wait 5 seconds before restarting but NOT in here as we have a lock + } else { + tracing::info!("[{}] Stopped due to exit signal. Will not restart.",resolved_proc.host_name); + break } - state.app_state.site_status_map.insert(procname, ProcState::Stopped); - - }, - Err(e) => { - tracing::info!("[{}] Failed to start! {e:?}",resolved_proc.host_name); - state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Faulty); - }, - } - let mut time_to_sleep_ms = 500; - if enabled { - if !state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) { - tracing::warn!("[{}] Stopped unexpectedly. Will automatically restart the process in 5 seconds.",resolved_proc.host_name); - state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Faulty); - time_to_sleep_ms = 5000; // wait 5 seconds before restarting but NOT in here as we have a lock - } else { - tracing::info!("[{}] Stopped due to exit signal. Will not restart.",resolved_proc.host_name); - break } + } - - tokio::time::sleep(Duration::from_millis(time_to_sleep_ms)).await; + tokio::time::sleep(Duration::from_millis(time_to_sleep_ms_after_loop)).await; } } diff --git a/src/proxy.rs b/src/proxy.rs index fc42f8f..b2bffd9 100644 --- a/src/proxy.rs +++ b/src/proxy.rs @@ -48,7 +48,7 @@ pub async fn listen( global_state: state.clone() }); - let client_tls_config = rustls::ClientConfig::builder_with_protocol_versions(rustls::ALL_VERSIONS) + let client_tls_config = tokio_rustls::rustls::ClientConfig::builder_with_protocol_versions(tokio_rustls::rustls::ALL_VERSIONS) // todo - add support for accepting self-signed certificates etc // .dangerous() // .with_custom_certificate_verifier(verifier) @@ -120,9 +120,8 @@ pub async fn listen( } -// a lazy static atomic usize to keep count of active tcp connections: lazy_static! { - static ref ACTIVE_TCP_CONNECTIONS: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(0); + static ref ACTIVE_TCP_CONNECTIONS_SEMAPHORE : tokio::sync::Semaphore = tokio::sync::Semaphore::new(666); } async fn listen_http( @@ -131,46 +130,46 @@ async fn listen_http( state: Arc, terminating_service_template: ReverseProxyService, targets: Arc, - shutdown_signal: Arc + _shutdown_signal: Arc ) { let socket = TcpSocket::new_v4().expect("new v4 socket should always work"); socket.set_reuseaddr(true).expect("set reuseaddr fail?"); socket.bind(bind_addr).expect(&format!("must be able to bind http serveraddr {bind_addr:?}")); - let listener = socket.listen(3000).expect("must be able to bind http listener."); - - // // TODO - let shutdown_signal = shutdown_signal.clone(); - // _ = shutdown_signal.notified() => { - // // eprintln!("stream aborted due to app shutdown."); break - // // } + let listener = socket.listen(128).expect("must be able to bind http listener."); loop { + // should use semaphore here to limit the number of active connections + if state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) { tracing::debug!("exiting http server loop due to receiving shutdown signal."); break; } - if ACTIVE_TCP_CONNECTIONS.load(std::sync::atomic::Ordering::SeqCst) >= 666 { - tokio::time::sleep(Duration::from_millis(10)).await; - continue; - } + let permit = if let Ok(p) = ACTIVE_TCP_CONNECTIONS_SEMAPHORE.acquire().await { + p + } else { + tracing::warn!("Error acquiring semaphore permit.. This is a bug in odd-box :<"); + break + }; + + match listener.accept().await { Ok((tcp_stream,source_addr)) => { - let c = ACTIVE_TCP_CONNECTIONS.fetch_add(1, std::sync::atomic::Ordering::SeqCst); - tracing::trace!("accepted connection! current active: {}", 1+c); + tracing::trace!("accepted connection! current active: {}/666", ACTIVE_TCP_CONNECTIONS_SEMAPHORE.available_permits() ); let mut service: ReverseProxyService = terminating_service_template.clone(); service.remote_addr = Some(source_addr); let arc_clone_targets = targets.clone(); let tx = tx.clone(); let state = state.clone(); tokio::spawn(async move { + let _moved_permit = permit; handle_new_tcp_stream(None,service, tcp_stream, source_addr, arc_clone_targets, false,tx.clone(),state.clone()) .await; - ACTIVE_TCP_CONNECTIONS.fetch_sub(1, std::sync::atomic::Ordering::SeqCst); }); @@ -212,7 +211,7 @@ async fn listen_https( state: Arc, terminating_service_template: ReverseProxyService, targets: Arc, - shutdown_signal: Arc + _shutdown_signal: Arc ) { use socket2::{Domain,Type}; @@ -230,11 +229,11 @@ async fn listen_https( socket.listen(128).expect("we must be able to listen to https addr socket.."); let listener: std::net::TcpListener = socket.into(); listener.set_nonblocking(true).expect("must be able to set_nonblocking on https listener"); - let tcp_listener = tokio::net::TcpListener::from_std(listener).expect("we must be able to listen to https port.."); + let tokio_listener = tokio::net::TcpListener::from_std(listener).expect("we must be able to listen to https port.."); let mut rustls_config = - rustls::ServerConfig::builder() + tokio_rustls::rustls::ServerConfig::builder() .with_no_client_auth() .with_cert_resolver(Arc::new(crate::DynamicCertResolver { cache: std::sync::Mutex::new(HashMap::new()) @@ -248,37 +247,46 @@ async fn listen_https( let arced_tls_config = std::sync::Arc::new(rustls_config); loop { - //tracing::trace!("waiting for new https connection.."); - let targets_arc = targets.clone(); - tokio::select!{ - Ok((tcp_stream, source_addr)) = tcp_listener.accept() => { - tracing::trace!("tcp listener accepted a new https connection"); + // should use semaphore here to limit the number of active connections + + if state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) { + tracing::debug!("exiting http server loop due to receiving shutdown signal."); + break; + } + + let permit = if let Ok(p) = ACTIVE_TCP_CONNECTIONS_SEMAPHORE.acquire().await { + p + } else { + tracing::warn!("Error acquiring semaphore permit.. This is a bug in odd-box :<"); + break + }; + + + match tokio_listener.accept().await { + Ok((tcp_stream,source_addr)) => { + + tracing::trace!("accepted connection! current active: {}/666", ACTIVE_TCP_CONNECTIONS_SEMAPHORE.available_permits() ); let mut service: ReverseProxyService = terminating_service_template.clone(); service.remote_addr = Some(source_addr); - let shutdown_signal = shutdown_signal.clone(); - let arc_targets_clone = targets_arc.clone(); + let arc_clone_targets = targets.clone(); let tx = tx.clone(); - let arced_tls_config = arced_tls_config.clone(); + let arced_tls_config = Some(arced_tls_config.clone()); let state = state.clone(); - tokio::task::spawn(async move { - tokio::select!{ - _ = handle_new_tcp_stream(Some(arced_tls_config),service, tcp_stream, source_addr, arc_targets_clone, true,tx.clone(),state.clone()) => { - tracing::trace!("https tcp stream handled"); - - } - _ = shutdown_signal.notified() => { - eprintln!("https tcp stream aborted due to app shutdown."); - } - }; + tokio::spawn(async move { + let _moved_permit = permit; + handle_new_tcp_stream(arced_tls_config,service, tcp_stream, source_addr, arc_clone_targets, true,tx.clone(),state.clone()) + .await; }); - - }, - _ = shutdown_signal.notified() => { - tracing::debug!("exiting https server loop due to receiving shutdown signal."); - break; + + + } + Err(e) => { + tracing::warn!("error accepting tcp connection: {:?}", e); + //break; } } + } tracing::warn!("listen_https went bye bye.") } @@ -286,7 +294,7 @@ async fn listen_https( // this will peek in to the incoming tcp stream and either create a direct tcp tunnel (passthru mode) // or hand it off to the terminating http/https hyper services async fn handle_new_tcp_stream( - rustls_config: Option>, + rustls_config: Option>, service:ReverseProxyService, tcp_stream:TcpStream, source_addr:SocketAddr, diff --git a/src/tcp_proxy/tcp.rs b/src/tcp_proxy/tcp.rs index 78d74d3..f16eefe 100644 --- a/src/tcp_proxy/tcp.rs +++ b/src/tcp_proxy/tcp.rs @@ -465,68 +465,68 @@ impl ReverseTcpProxy { } } - #[instrument(skip_all)] - pub async fn listen(&self,shutdown_signal:std::sync::Arc,state: Arc,) -> Result<(), std::io::Error> { + // #[instrument(skip_all)] + // pub async fn listen_tcp_only(&self,shutdown_signal:std::sync::Arc,state: Arc,) -> Result<(), std::io::Error> { - tracing::info!("Starting TCP proxy on {:?}",self.socket_addr); - let listener = TcpListener::bind(self.socket_addr).await?; + // tracing::info!("Starting TCP proxy on {:?}",self.socket_addr); + // let listener = TcpListener::bind(self.socket_addr).await?; - loop { - let local_state_clone = state.clone(); - tokio::select! { - Ok((tcp_stream, client_address)) = listener.accept() => { + // loop { + // let local_state_clone = state.clone(); + // tokio::select! { + // Ok((tcp_stream, client_address)) = listener.accept() => { - let peek_result = Self::peek_tcp_stream(&tcp_stream, client_address).await; + // let peek_result = Self::peek_tcp_stream(&tcp_stream, client_address).await; - let targets_arc = self.targets.clone(); + // let targets_arc = self.targets.clone(); - tokio::spawn(async move { - match peek_result { - Ok(PeekResult { - typ, - http_version : _, - target_host : Some(target_host) - }) => { - let is_tls = match typ { - DataType::TLS => true, - _ => false, - }; + // tokio::spawn(async move { + // match peek_result { + // Ok(PeekResult { + // typ, + // http_version : _, + // target_host : Some(target_host) + // }) => { + // let is_tls = match typ { + // DataType::TLS => true, + // _ => false, + // }; - fn filter_fun(p: &ReverseTcpProxyTarget, target_host: &str) -> Option { - ReverseTcpProxy::req_target_filter_map(p, target_host) - } + // fn filter_fun(p: &ReverseTcpProxyTarget, target_host: &str) -> Option { + // ReverseTcpProxy::req_target_filter_map(p, target_host) + // } - let target_host_str = target_host.as_str(); - if let Some(t) = targets_arc.try_find(|p| filter_fun(p, target_host_str)).await { - _ = Self::tunnel( - tcp_stream, - t, - is_tls, - local_state_clone, - client_address - ).await; - } else { - tracing::debug!("no such target is configured: {target_host:?}") - } - }, - Ok(_) => { - tracing::debug!("could not find a host name so we dont know where to proxy this traffic. giving up on this stream!") - } - Err(e) => { - tracing::debug!("giving up on this stream due to error: {e:?}") - }, - } - }); - - }, - _ = shutdown_signal.notified() => { - break; - }, - } - } - - Ok(()) - } + // let target_host_str = target_host.as_str(); + // if let Some(t) = targets_arc.try_find(|p| filter_fun(p, target_host_str)).await { + // _ = Self::tunnel( + // tcp_stream, + // t, + // is_tls, + // local_state_clone, + // client_address + // ).await; + // } else { + // tracing::debug!("no such target is configured: {target_host:?}") + // } + // }, + // Ok(_) => { + // tracing::debug!("could not find a host name so we dont know where to proxy this traffic. giving up on this stream!") + // } + // Err(e) => { + // tracing::debug!("giving up on this stream due to error: {e:?}") + // }, + // } + // }); + + // }, + // _ = shutdown_signal.notified() => { + // break; + // }, + // } + // } + + // Ok(()) + // } } diff --git a/src/tui/logs_widget.rs b/src/tui/logs_widget.rs index c27e66d..8d08f55 100644 --- a/src/tui/logs_widget.rs +++ b/src/tui/logs_widget.rs @@ -1,98 +1,102 @@ -use std::borrow::BorrowMut; +use std::borrow::{BorrowMut, Cow}; use std::sync::{Arc, Mutex}; use ratatui::layout::Rect; use ratatui::style::{Color, Style}; use ratatui::text::Line; use ratatui::widgets::{Paragraph, Scrollbar, ScrollbarOrientation}; -use tokio::sync::RwLockWriteGuard; use tracing::Level; use crate::global_state::GlobalState; use crate::logging::SharedLogBuffer; -use crate::types::app_state::*; use crate::types::tui_state::TuiState; use super::{wrap_string, Theme}; pub fn draw( f: &mut ratatui::Frame, - mut global_state: Arc, + _global_state: Arc, tui_state: &mut TuiState, log_buffer: &Arc>, area: Rect, _theme: &Theme ) { - { - let mut buffer = log_buffer.lock().expect("locking shared buffer mutex should always work"); - if tui_state.log_tab_stage.scroll_state.vertical_scroll.is_none() && buffer.limit.is_none() { - let l = buffer.limit.borrow_mut(); - *l = Some(500); - } else if tui_state.log_tab_stage.scroll_state.vertical_scroll.is_some() && buffer.limit.is_some() { - let l = buffer.limit.borrow_mut(); - *l = None; - } - } + let mut buffer = log_buffer.lock().expect("locking shared buffer mutex should always work"); + + if tui_state.log_tab_stage.scroll_state.vertical_scroll.is_none() && buffer.limit.is_none() { + let l = buffer.limit.borrow_mut(); + *l = Some(500); + } else if tui_state.log_tab_stage.scroll_state.vertical_scroll.is_some() && buffer.limit.is_some() { + let l = buffer.limit.borrow_mut(); + *l = None; + } - let buffer = log_buffer.lock().expect("locking shared buffer mutex should always work"); let max_msg_width = area.width; - let item_count = buffer.logs.len().to_string().len().max(6); + // if we have 0-9 messages, the len will be 1, if we have 10-99 messages, the len will be 2, etc. + let item_count_len = buffer.logs.len().to_string().len().max(6); + + let items: Vec = + buffer.logs.iter_mut().enumerate().flat_map(|(i, x)| { - // we do this recalculation on each render in case of window-resize and such - // we should move so that this is done ONCE per log message and not for each log message ever on each render. - let items: Vec = buffer.logs.iter().enumerate().flat_map(|(i,x)|{ - - let level = x.lvl; - - let s = match level { - Level::ERROR => Style::default().fg(Color::Red), - Level::TRACE => Style::default().fg(Color::Gray), - Level::DEBUG => Style::default().fg(Color::Magenta), - Level::WARN => Style::default().fg(Color::Yellow), - Level::INFO => Style::default().fg(Color::Blue) - }; - - let nr_str = format!("{:1$} | ",i+1, item_count); - let lvl_str = format!("{:>1$} ",x.lvl.as_str(),5); - let thread_str = if let Some(n) = &x.thread {format!("{n} ")} else { format!("") }; - - let number = ratatui::text::Span::styled(nr_str.clone(),Style::default().fg(Color::DarkGray)); - let level = ratatui::text::Span::styled(lvl_str.clone(),s); - let thread_name = ratatui::text::Span::styled(thread_str.clone(),Style::default().fg(Color::DarkGray)); - - // if x.msg is wider than the available width, we need to split the message in multiple lines.. - let max_width = (max_msg_width as usize).saturating_sub(8).saturating_sub(nr_str.len() + lvl_str.len() + thread_str.len()); - - let l = if x.msg.len() > max_width as usize { - - wrap_string(x.msg.as_str(), max_width as usize) - .into_iter().enumerate() - .map(|(i,m)| - Line::from( - vec![ - number.clone(), - if i == 0 { level.clone() } else { - ratatui::text::Span::styled(" ".repeat(level.clone().content.len()).to_string() ,Style::default()) - }, - thread_name.clone(), - ratatui::text::Span::styled(m,Style::default()) - ] - ) - ).collect::>() - - } else { - let message = ratatui::text::Span::styled(format!("{} {}",x.src.clone(),x.msg),Style::default()); - vec![Line::from(vec![number,level,thread_name,message])] - - }; + let level = x.lvl; - l + let s = match level { + Level::ERROR => Style::default().fg(Color::Red), + Level::TRACE => Style::default().fg(Color::Gray), + Level::DEBUG => Style::default().fg(Color::Magenta), + Level::WARN => Style::default().fg(Color::Yellow), + Level::INFO => Style::default().fg(Color::Blue), + }; - }).collect(); + let nr_str = format!("{:1$} | ", i + 1, item_count_len); + let lvl_str = format!("{:>1$} ", x.lvl.as_str(), 5); + let thread_str = if let Some(n) = &x.thread { + format!("{n} ") + } else { + ("").into() + }; + + let max_width = (max_msg_width as usize) + .saturating_sub(8) + .saturating_sub(nr_str.len() + lvl_str.len() + thread_str.len()); + + if x.msg.len() > max_width { + wrap_string(x.msg.as_str(), max_width) + .into_iter().enumerate().map(|(i, m)| { + let level_span = if i == 0 { + ratatui::text::Span::styled(lvl_str.clone(), s) + } else { + ratatui::text::Span::styled( + Cow::from(" ".repeat(lvl_str.len())), + Style::default(), + ) + }; + + Line::from(vec![ + ratatui::text::Span::styled(nr_str.to_string(), Style::default().fg(Color::DarkGray)), + level_span, + ratatui::text::Span::styled(thread_str.to_string(), Style::default().fg(Color::DarkGray)), + ratatui::text::Span::styled(m.clone(), Style::default()), + ]) + }).collect::>() + } else { + let message = ratatui::text::Span::styled( + format!("{} {}", &x.src, &x.msg), + Style::default(), + ); + + vec![Line::from(vec![ + ratatui::text::Span::styled(nr_str, Style::default().fg(Color::DarkGray)), + ratatui::text::Span::styled(lvl_str, s), + ratatui::text::Span::styled(thread_str, Style::default().fg(Color::DarkGray)), + message, + ])] + } + }).collect(); let wrapped_line_count = items.len(); @@ -105,13 +109,11 @@ pub fn draw( let scroll_pos = { tui_state.log_tab_stage.scroll_state.vertical_scroll }; let scrollbar_hovered = tui_state.log_tab_stage.scroll_state.scroll_bar_hovered; - let mut scrollbar_state = tui_state.log_tab_stage.scroll_state.vertical_scroll_state.borrow_mut(); + let max_scroll_pos = items.len().saturating_sub(height_of_logs_area as usize); - //let clamped_scroll_pos = scroll_pos.unwrap_or(max_scroll_pos).min(max_scroll_pos) as u16; - - let visible_rows = area.height as usize; // Adjust as needed based on your UI + let visible_rows = area.height as usize; let start = scroll_pos.unwrap_or(max_scroll_pos); let end = std::cmp::min(start + visible_rows, items.len()); @@ -140,6 +142,7 @@ pub fn draw( scrollbar = scrollbar.thumb_style(Style::default().fg(Color::Yellow).bg(Color::Red)); } + let mut scrollbar_state = tui_state.log_tab_stage.scroll_state.vertical_scroll_state.borrow_mut(); *scrollbar_state = scrollbar_state.content_length(items.len().saturating_sub(height_of_logs_area as usize)); if scroll_pos.is_none() { diff --git a/src/tui/mod.rs b/src/tui/mod.rs index 979275a..bc167cd 100644 --- a/src/tui/mod.rs +++ b/src/tui/mod.rs @@ -1,10 +1,8 @@ -use axum_extra::handler::Or; use crossterm::event::{KeyEvent, KeyModifiers}; use ratatui::layout::{Alignment, Margin}; use ratatui::style::{Color, Modifier, Style }; use ratatui::text::Line; use ratatui::widgets::{BorderType, List, ListItem }; -use tokio::sync::RwLockWriteGuard; use tokio::task; use tracing_subscriber::EnvFilter; use tracing_subscriber::layer::SubscriberExt; @@ -12,9 +10,9 @@ use std::io::Stdout; use crate::global_state::GlobalState; use crate::logging::SharedLogBuffer; use crate::logging::LogMsg; -use crate::types::app_state::{self, *}; +use crate::types::app_state::*; use crate::types::tui_state::{Page, TuiState}; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, Mutex}; use std::time::Duration; use crate::http_proxy::ProcMessage; @@ -119,25 +117,38 @@ pub async fn run( let tx = tx.clone(); let mut last_key_time = tokio::time::Instant::now(); - let debounce_duration = Duration::from_millis(100); + let debounce_duration = Duration::from_millis(300); let mut tui_state = crate::types::tui_state::TuiState::new(); loop { - + + // KEEP LOCK SHORT TO AVOID DEADLOCK { + let mut terminal = terminal.lock().await; - if global_state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) == true { - if global_state.app_state.site_status_map.iter().find(|x| - x.value() == &ProcState::Stopping - || x.value() == &ProcState::Running - || x.value() == &ProcState::Starting - - ).is_none() { - break; // nothing is running,stopping or starting.. we can exit now - } + terminal.draw(|f| + draw_ui::>( + f, + global_state.clone(), + &mut tui_state, + &log_buffer,&theme + ) + )?; + } + + + if global_state.app_state.exit.load(std::sync::atomic::Ordering::SeqCst) == true { + if global_state.app_state.site_status_map.iter().find(|x| + x.value() == &ProcState::Stopping + || x.value() == &ProcState::Running + || x.value() == &ProcState::Starting + + ).is_none() { + break; // nothing is running,stopping or starting.. we can exit now } } + if count > 100 { theme = match dark_light::detect() { @@ -148,32 +159,16 @@ pub async fn run( count = 0; } + + if let Ok(true) = event::poll(std::time::Duration::from_millis(100)) { + - // KEEP LOCK SHORT TO AVOID DEADLOCK - { - let mut terminal = terminal.lock().await; - - terminal.draw(|f| - draw_ui::>( - f, - global_state.clone(), - &mut tui_state, - &log_buffer,&theme - ) - )?; - } - - // } - - - // Handle input - if event::poll(std::time::Duration::from_millis(20))? { - let now = tokio::time::Instant::now(); - let time_since_last_keypress = now.duration_since(last_key_time); let (current_page,sites_open) = { (tui_state.current_page.clone(),tui_state.show_apps_window) }; + let evt = event::read()?; + match evt { Event::Key(KeyEvent { code: crossterm::event::KeyCode::Char('c'), @@ -185,6 +180,7 @@ pub async fn run( break; }, Event::Mouse(mouse) => { + if sites_open { match mouse.kind { event::MouseEventKind::Moved => { @@ -252,8 +248,6 @@ pub async fn run( } Event::Key(key) => { - if time_since_last_keypress >= debounce_duration { - last_key_time = now; match key.code { KeyCode::Esc | KeyCode::Char('q') => { { @@ -412,8 +406,10 @@ pub async fn run( { for mut guard in global_state.app_state.site_status_map.iter_mut() { let (_k,state) = guard.pair_mut(); - if let ProcState::Running = state { - *state = ProcState::Stopping; + match state { + ProcState::Faulty => *state = ProcState::Stopping, + ProcState::Running => *state = ProcState::Stopping, + _ => {} } } } @@ -423,6 +419,9 @@ pub async fn run( { for mut guard in global_state.app_state.site_status_map.iter_mut() { let (_k,state) = guard.pair_mut(); + if disabled_items.contains(_k) { + continue; + } if let ProcState::Running = state { *state = ProcState::Starting; } @@ -439,15 +438,20 @@ pub async fn run( } } - - - } + + // } + + }, _=> {} } - + } + + + + } Result::<(), std::io::Error>::Ok(()) }) @@ -476,7 +480,7 @@ fn dark_theme() -> Style { fn light_theme() -> Style { Style::default() .fg(Color::Black) // Text color - // .bg(Color::White) // Background color + //.bg(Color::White) // Background color .add_modifier(Modifier::ITALIC) // Text modifier } @@ -502,7 +506,7 @@ fn draw_ui( }; - let size = f.size(); + let size = f.area(); if size.height < 10 || size.width < 10 { return } @@ -560,7 +564,7 @@ fn draw_ui( .divider(ratatui::text::Span::raw("|")); - let frame_margin = &Margin { horizontal: 1, vertical: 1 }; + let frame_margin = Margin { horizontal: 1, vertical: 1 }; match tui_state.current_page { Page::Logs => logs_widget::draw(f,global_state.clone(),tui_state,log_buffer,main_area[0].inner(frame_margin),&theme), @@ -586,7 +590,7 @@ fn draw_ui( // render the tab bar on top of the tab content - f.render_widget(tabs, main_area[0].inner(&Margin { horizontal: 2, vertical: 0 })); + f.render_widget(tabs, main_area[0].inner(Margin { horizontal: 2, vertical: 0 })); if tui_state.show_apps_window { @@ -660,9 +664,9 @@ fn draw_ui( ), &ProcState::Stopping => Style::default().fg( if is_dark_theme { - Color::Black + Color::Cyan } else { - Color::Yellow + Color::Black } ), &ProcState::Remote => Style::default().fg( diff --git a/src/tui/scroll_state_wrapper.rs b/src/tui/scroll_state_wrapper.rs index 28b0390..b048d17 100644 --- a/src/tui/scroll_state_wrapper.rs +++ b/src/tui/scroll_state_wrapper.rs @@ -52,7 +52,7 @@ impl ScrollStateWrapper { } else { 1.0 }.max(1.0); - let horizontal_match = column as usize >= self.area_width - 1 && column as usize <= self.area_width + 1; + let horizontal_match = column as usize >= self.area_width.saturating_sub(1) && column as usize <= self.area_width.saturating_add(1); let vertical_match = (row as isize >= thumb_position as isize - 2) && row as usize <= (thumb_position + thumb_size + 1.0) as usize; //self.dbg = format!("dragging pos: {row}/{column} - vscroll: {} - tpos: {thumb_position} | V: {vertical_match}, H: {horizontal_match}",vscroll); self.scroll_bar_hovered = horizontal_match && vertical_match; diff --git a/src/tui/stats_widget.rs b/src/tui/stats_widget.rs index 4561007..894320a 100644 --- a/src/tui/stats_widget.rs +++ b/src/tui/stats_widget.rs @@ -3,9 +3,7 @@ use std::sync::Arc; use ratatui::layout::{Offset, Rect}; use ratatui::style::{Color, Stylize}; use ratatui::widgets::Paragraph; -use tokio::sync::RwLockWriteGuard; use crate::global_state::GlobalState; -use crate::types::app_state::*; use crate::types::tui_state::TuiState; use super::Theme; @@ -15,7 +13,7 @@ use super::Theme; pub fn draw( f: &mut ratatui::Frame, global_state: Arc, - tui_state: &mut TuiState, + _tui_state: &mut TuiState, area: Rect, _theme: &Theme ) { diff --git a/src/tui/threads_widget.rs b/src/tui/threads_widget.rs index 9373543..1ebc993 100644 --- a/src/tui/threads_widget.rs +++ b/src/tui/threads_widget.rs @@ -3,16 +3,14 @@ use std::sync::Arc; use ratatui::layout::{ Constraint, Flex, Rect}; use ratatui::style::{Color, Modifier, Style, Stylize}; use ratatui::widgets::{ Cell, Row, Scrollbar, ScrollbarOrientation, Table}; -use tokio::sync::RwLockWriteGuard; use crate::global_state::GlobalState; -use crate::types::app_state::*; use crate::types::tui_state::TuiState; use super::Theme; pub fn draw( f: &mut ratatui::Frame, - mut global_state: Arc, + _global_state: Arc, tui_state: &mut TuiState, area: Rect, theme: &Theme @@ -21,7 +19,7 @@ pub fn draw( let headers = [ "Site", "Source", "Target", "Description"]; - let rows : Vec> = crate::THREAD_MAP.lock().unwrap().iter().map(|(thread_id, _thread_info)| { + let rows : Vec> = crate::PROC_THREAD_MAP.lock().unwrap().iter().map(|(thread_id, _thread_info)| { let typ = "some thread"; let description = format!("{}",typ); vec![ @@ -34,16 +32,16 @@ pub fn draw( let wrapped_line_count = rows.len(); - tui_state.log_tab_stage.scroll_state.total_rows = wrapped_line_count; + tui_state.threads_tab_state.scroll_state.total_rows = wrapped_line_count; - let height_of_logs_area = area.height.saturating_sub(0); // header and footer - tui_state.log_tab_stage.scroll_state.area_height = height_of_logs_area as usize; - tui_state.log_tab_stage.scroll_state.area_width = area.width as usize; + let height_of_threads_area = area.height.saturating_sub(0); // header and footer + tui_state.threads_tab_state.scroll_state.area_height = height_of_threads_area as usize; + tui_state.threads_tab_state.scroll_state.area_width = area.width as usize; let header_height = 1; let visible_rows = area.height as usize - header_height; - let start = tui_state.log_tab_stage.scroll_state.vertical_scroll.unwrap_or_default(); + let start = tui_state.threads_tab_state.scroll_state.vertical_scroll.unwrap_or_default(); let end = std::cmp::min(start + visible_rows, rows.len()); let is_dark_theme = matches!(&theme,Theme::Dark(_)); @@ -78,8 +76,8 @@ pub fn draw( }).collect(); - tui_state.log_tab_stage.scroll_state.visible_rows = display_rows.iter().len() as usize; - tui_state.log_tab_stage.scroll_state.total_rows = rows.len(); + tui_state.threads_tab_state.scroll_state.visible_rows = display_rows.iter().len() as usize; + tui_state.threads_tab_state.scroll_state.total_rows = rows.len(); let widths = [ Constraint::Fill(1), @@ -113,17 +111,17 @@ pub fn draw( .orientation(ScrollbarOrientation::VerticalRight); let height_of_traf_area = area.height.saturating_sub(2); - tui_state.log_tab_stage.scroll_state.area_height = height_of_traf_area as usize; + tui_state.threads_tab_state.scroll_state.area_height = height_of_traf_area as usize; - tui_state.log_tab_stage.scroll_state.vertical_scroll_state = tui_state.log_tab_stage.scroll_state.vertical_scroll_state.content_length(rows.len().saturating_sub(height_of_traf_area as usize)); + tui_state.threads_tab_state.scroll_state.vertical_scroll_state = tui_state.threads_tab_state.scroll_state.vertical_scroll_state.content_length(rows.len().saturating_sub(height_of_traf_area as usize)); - if tui_state.log_tab_stage.scroll_state.scroll_bar_hovered { + if tui_state.threads_tab_state.scroll_state.scroll_bar_hovered { scrollbar = scrollbar.thumb_style(Style::default().fg(Color::Yellow).bg(Color::Red)); } let scrollbar_area = Rect::new(area.right() - 1, area.top(), 1, area.height); - f.render_stateful_widget(scrollbar,scrollbar_area, &mut tui_state.log_tab_stage.scroll_state.vertical_scroll_state); + f.render_stateful_widget(scrollbar,scrollbar_area, &mut tui_state.threads_tab_state.scroll_state.vertical_scroll_state); } From 4342af3caace377a8dbe95a94d36ccc0fe702978 Mon Sep 17 00:00:00 2001 From: Olof Date: Mon, 2 Sep 2024 15:18:37 +0000 Subject: [PATCH 12/19] add useful info to threads tab --- README.md | 3 ++- odd-box-example-config-minimal.toml | 2 +- src/http_proxy/service.rs | 10 +++----- src/main.rs | 12 ++++++--- src/proc_host.rs | 40 ++++++++++++++++++++++++----- src/tui/threads_widget.rs | 15 +++++------ 6 files changed, 56 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index 33ff65c..f0229f4 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,8 @@ As configuration is done thru basic files (toml format) which are easy to share, - TCP tunnelling for HTTP/2 over HTTP/1 (h2c upgrade) - H2C via terminating proxy - Automatic self-signed certs for all hosted processes - +- Basic round-robin loadbalancing for remote targets + ### Performance While the goal of this project **is not** to provide a state-of-the-art level performing proxy server for production environments, but rather a tool for simplifying local development scenarios, we do try to keep performance in mind.. Some rudimentary testing on this authors development machine shows that TCP tunnel mode supports 200k+ requests per second while the terminating proxy mode handles 100k+ requests per second. More specific measurements of different scenarios will be added here at some point. This was tested by using odd-box in front of a highly performant Caddy server using oha for benchmarking. There is much room for improvement here, especially for terminating mode.. It will be prioritized in to at some point :) diff --git a/odd-box-example-config-minimal.toml b/odd-box-example-config-minimal.toml index ffba15b..f9d735a 100644 --- a/odd-box-example-config-minimal.toml +++ b/odd-box-example-config-minimal.toml @@ -15,6 +15,6 @@ backends = [ host_name = "py.localtest.me" dir = "$cfg_dir" bin = "/usr/bin/python3" -args = ["-m", "http.server", "8012"] +args = ["-m", "http.server", "$port"] port = 8012 env_vars = [] \ No newline at end of file diff --git a/src/http_proxy/service.rs b/src/http_proxy/service.rs index ea7f264..2a95e5d 100644 --- a/src/http_proxy/service.rs +++ b/src/http_proxy/service.rs @@ -362,10 +362,6 @@ async fn handle_http_request( tracing::warn!("Received request that does not match any known target: {:?}", req_host_name); let body_str = format!("Sorry, I don't know how to proxy this request.. {:?}", req); - - // TODO --- even this seems to cause tcp_closing issues at extreme load .. - - let mut response = EpicResponse::new(create_epic_string_full_body(&body_str)); *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; Ok(response) @@ -534,13 +530,13 @@ async fn intercept_local_commands( let html = r#"
-

All sites stopped by your command

+

Stop signal received.

-

The proxy will also resume if you visit any of the sites

+

The proxy will also resume if you visit any of the stopped sites

"#; return Some(EpicResponse::new(create_epic_string_full_body(html))) @@ -563,7 +559,7 @@ async fn intercept_local_commands( let html = r#"
-

All sites resumed

+

Start signal received.

diff --git a/src/main.rs b/src/main.rs index 6e3c3ea..1fc4291 100644 --- a/src/main.rs +++ b/src/main.rs @@ -45,8 +45,15 @@ impl ProcId { } } +#[derive(Debug)] +pub struct ProcInfo { + pub liveness_ptr : Weak, + pub config : FullyResolvedInProcessSiteConfig, + pub pid : Option +} + lazy_static! { - static ref PROC_THREAD_MAP: Arc>>> = Arc::new(Mutex::new(HashMap::new())); + static ref PROC_THREAD_MAP: Arc> = Arc::new(DashMap::new()); } static REQUEST_ID_COUNTER: AtomicU64 = AtomicU64::new(1); @@ -298,8 +305,7 @@ pub fn initialize_panic_handler() { fn thread_cleaner() { - let mut map = PROC_THREAD_MAP.lock().unwrap(); - map.retain(|_k,v| v.upgrade().is_some()); + PROC_THREAD_MAP.retain(|_k,v| v.liveness_ptr.upgrade().is_some()); } diff --git a/src/proc_host.rs b/src/proc_host.rs index 4749076..dcc9d77 100644 --- a/src/proc_host.rs +++ b/src/proc_host.rs @@ -17,11 +17,13 @@ pub async fn host( ) { let my_arc = std::sync::Arc::new(AtomicBool::new(true)); - { - let thread_map_guard = crate::PROC_THREAD_MAP.clone(); - let mut thread_map = thread_map_guard.lock().unwrap(); - thread_map.insert(resolved_proc.proc_id.clone(), std::sync::Arc::::downgrade(&my_arc)); - } + + crate::PROC_THREAD_MAP.insert(resolved_proc.proc_id.clone(), crate::ProcInfo { + config: resolved_proc.clone(), + pid: None, + liveness_ptr: std::sync::Arc::::downgrade(&my_arc) + }); + // if auto_start is not set in the config, we assume that user wants to start site automatically like before @@ -54,6 +56,19 @@ pub async fn host( let mut selected_port: Option = None; loop { + + { + let entry = crate::PROC_THREAD_MAP.get_mut(&resolved_proc.proc_id); + match entry { + Some(mut item) => { + item.pid = None; + }, + None => { + tracing::warn!("Something has gone very wrong! A thread is missing from the global thread map.. this is a bug in odd-box.") + } + } + } + tokio::time::sleep(Duration::from_millis(200)).await; let mut time_to_sleep_ms_after_loop = 500; // scope for clarity @@ -134,6 +149,7 @@ pub async fn host( if let Ok(p) = guard.set_active_port(&mut resolved_proc) { selected_port = Some(p); + resolved_proc.active_port = selected_port; } if selected_port.is_none() { @@ -226,7 +242,19 @@ pub async fn host( state.app_state.site_status_map.insert(resolved_proc.host_name.clone(), ProcState::Running); - + { + let entry = crate::PROC_THREAD_MAP.get_mut(&resolved_proc.proc_id); + match entry { + Some(mut item) => { + item.pid = Some(child.id().to_string()); + // this is the only thing that is supposed to change during the lifetime of a proc loop + item.config.active_port = resolved_proc.active_port; + }, + None => { + tracing::warn!("Something has gone very wrong! A thread is missing from the global thread map.. this is a bug in odd-box.") + } + } + } //let stdin = child.stdin.take().expect("Failed to capture stdin"); diff --git a/src/tui/threads_widget.rs b/src/tui/threads_widget.rs index 1ebc993..29b15ec 100644 --- a/src/tui/threads_widget.rs +++ b/src/tui/threads_widget.rs @@ -17,16 +17,15 @@ pub fn draw( ) { - let headers = [ "Site", "Source", "Target", "Description"]; + let headers = [ "TaskId", "Pid", "HostName", "Port"]; - let rows : Vec> = crate::PROC_THREAD_MAP.lock().unwrap().iter().map(|(thread_id, _thread_info)| { - let typ = "some thread"; - let description = format!("{}",typ); + let rows : Vec> = crate::PROC_THREAD_MAP.iter().map(|guard| { + let (thread_id, thread_info) = guard.pair(); vec![ - format!("{:?}",thread_id), - "something else".to_string(), - "more stuff".to_string(), - description + format!("{}",thread_id.id), + format!("{:?}",thread_info.pid), + format!("{}",thread_info.config.host_name), + format!("{:?}",thread_info.config.active_port) ] }).collect(); From af9ab1d1be9f995261294c1b5d6c9a2a4087f7e6 Mon Sep 17 00:00:00 2001 From: Olof Date: Mon, 2 Sep 2024 15:53:08 +0000 Subject: [PATCH 13/19] allow pause of logging and add absolute max buffer size --- src/configuration/v2.rs | 3 ++- src/logging.rs | 17 +++++++++++++---- src/tui/mod.rs | 26 ++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 5 deletions(-) diff --git a/src/configuration/v2.rs b/src/configuration/v2.rs index fb5fdf9..5cbb334 100644 --- a/src/configuration/v2.rs +++ b/src/configuration/v2.rs @@ -375,8 +375,9 @@ impl crate::configuration::OddBoxConfiguration for OddBoxV2Confi formatted_toml.push(format!("bin = {:?}", process.bin)); if let Some(hint) = &process.hints { - formatted_toml.push("h2_hints = [".to_string()); + formatted_toml.push("hints = [".to_string()); let joint = hint.iter().map(|h| format!("{:?}", h)).collect::>().join(", "); + formatted_toml.push(joint); formatted_toml.push("]".to_string()); } diff --git a/src/logging.rs b/src/logging.rs index c290e31..09c3798 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -103,7 +103,8 @@ impl tracing_subscriber::Layer for NonTuiLoggerLayer { pub struct SharedLogBuffer { pub logs: VecDeque, - pub limit : Option + pub limit : Option, + pub pause : bool } impl SharedLogBuffer { @@ -111,12 +112,15 @@ impl SharedLogBuffer { pub fn new() -> Self { SharedLogBuffer { logs: VecDeque::new(), - limit: Some(500) + limit: Some(500), + pause: false } } fn push(&mut self, message: LogMsg) { - + if self.pause { + return + } self.logs.push_back(message); match self.limit { Some(x) => { @@ -124,7 +128,12 @@ impl SharedLogBuffer { self.logs.pop_front(); } }, - None => {}, + None => { + // hard max even if user is scrolled up in the tui + while self.logs.len() > 1000 { + self.logs.pop_front(); + } + }, } } diff --git a/src/tui/mod.rs b/src/tui/mod.rs index bc167cd..ff5fd6e 100644 --- a/src/tui/mod.rs +++ b/src/tui/mod.rs @@ -4,6 +4,7 @@ use ratatui::style::{Color, Modifier, Style }; use ratatui::text::Line; use ratatui::widgets::{BorderType, List, ListItem }; use tokio::task; +use tracing::Level; use tracing_subscriber::EnvFilter; use tracing_subscriber::layer::SubscriberExt; use std::io::Stdout; @@ -170,6 +171,27 @@ pub async fn run( let evt = event::read()?; match evt { + Event::Key(KeyEvent { + code: crossterm::event::KeyCode::Char(' '), + modifiers: KeyModifiers::NONE, + kind: _, + state:_ + }) if tui_state.current_page==Page::Logs => { + + let mut buf = log_buffer.lock().expect("must always be able to lock log buffer"); + buf.pause = !buf.pause; + let paused = buf.pause; + buf.logs.push_back(LogMsg { + msg: if paused { + format!("LOGGING PAUSED! PRESS SPACE TO RESUME.") + } else { + format!("LOGGING RESUMED! PRESS SPACE TO PAUSE.") + }, + lvl: Level::WARN, + src: String::from("odd-box tracing"), + thread: None, + }); + } Event::Key(KeyEvent { code: crossterm::event::KeyCode::Char('c'), modifiers: KeyModifiers::CONTROL, @@ -309,6 +331,7 @@ pub async fn run( KeyCode::Enter => { tui_state.log_tab_stage.scroll_state.vertical_scroll = None; let mut buf = log_buffer.lock().expect("must always be able to lock log buffer"); + // immediate effect instead of waiting for next log item match buf.limit { Some(x) => { while buf.logs.len() > x { @@ -764,6 +787,9 @@ fn draw_ui( help_bar_text.push(ratatui::text::Span::raw("| tab: toggle page")); } + if tui_state.current_page == Page::Logs { + help_bar_text.push(ratatui::text::Span::raw("| space: un/pause logging")); + } // // DEBUG // help_bar_text.push(ratatui::text::Span::raw(format!("| DBG: {}", From a5e7005836962d8934ef39b3f3aa0fde9134bb6a Mon Sep 17 00:00:00 2001 From: Olof Date: Mon, 2 Sep 2024 16:05:33 +0000 Subject: [PATCH 14/19] clippy --- src/configuration/legacy.rs | 2 +- src/configuration/mod.rs | 7 +++--- src/configuration/v1.rs | 1 - src/configuration/v2.rs | 4 ---- src/http_proxy/mod.rs | 1 - src/http_proxy/service.rs | 44 +++++++------------------------------ src/http_proxy/utils.rs | 17 +++++++------- src/logging.rs | 2 +- src/main.rs | 3 +-- src/proc_host.rs | 6 ++--- src/proxy.rs | 22 +++++++++---------- src/tcp_proxy/http1.rs | 10 ++++----- src/tcp_proxy/tcp.rs | 33 +--------------------------- src/tests/configuration.rs | 3 --- src/tui/mod.rs | 4 ++-- src/types/app_state.rs | 8 ------- 16 files changed, 45 insertions(+), 122 deletions(-) diff --git a/src/configuration/legacy.rs b/src/configuration/legacy.rs index 0a33413..eba9328 100644 --- a/src/configuration/legacy.rs +++ b/src/configuration/legacy.rs @@ -1,7 +1,7 @@ use std::net::IpAddr; use std::net::Ipv4Addr; -use anyhow::bail; + use serde::Serialize; use serde::Deserialize; diff --git a/src/configuration/mod.rs b/src/configuration/mod.rs index ff234cb..a1b0769 100644 --- a/src/configuration/mod.rs +++ b/src/configuration/mod.rs @@ -29,8 +29,7 @@ pub enum OddBoxConfig { #[derive(Debug,Clone)] pub struct ConfigWrapper( - pub v2::OddBoxV2Config, - Option // <-- resolved path cache - todo, use and rename + pub v2::OddBoxV2Config ); @@ -48,7 +47,7 @@ impl std::ops::DerefMut for ConfigWrapper { impl ConfigWrapper { pub fn wrapv2(config:v2::OddBoxV2Config) -> Self { - ConfigWrapper(config,None) + ConfigWrapper(config) } } @@ -168,7 +167,7 @@ impl OddBoxConfig { impl ConfigWrapper { pub fn new(cfg:v2::OddBoxV2Config) -> Self { - ConfigWrapper(cfg,None) + ConfigWrapper(cfg) } pub fn init(&mut self,cfg_path:&str) -> anyhow::Result<()> { self.path = Some(std::path::Path::new(&cfg_path).canonicalize()?.to_str().unwrap_or_default().into()); diff --git a/src/configuration/v1.rs b/src/configuration/v1.rs index 8c3920a..3800326 100644 --- a/src/configuration/v1.rs +++ b/src/configuration/v1.rs @@ -1,6 +1,5 @@ use std::net::IpAddr; use std::net::Ipv4Addr; -use anyhow::bail; use serde::Serialize; use serde::Deserialize; use utoipa::ToSchema; diff --git a/src/configuration/v2.rs b/src/configuration/v2.rs index 5cbb334..84f412f 100644 --- a/src/configuration/v2.rs +++ b/src/configuration/v2.rs @@ -3,19 +3,15 @@ use std::net::Ipv4Addr; use std::path::Path; use anyhow::bail; -use dashmap::DashMap; use serde::Serialize; use serde::Deserialize; use utoipa::ToSchema; use crate::global_state::GlobalState; -use crate::types::app_state::ProcState; use crate::ProcId; -use super::ConfigWrapper; use super::EnvVar; use super::LogFormat; use super::LogLevel; -use super::OddBoxConfiguration; #[derive(Debug, Clone, Serialize, Deserialize, ToSchema, Hash)] diff --git a/src/http_proxy/mod.rs b/src/http_proxy/mod.rs index c975a8c..b3bd361 100644 --- a/src/http_proxy/mod.rs +++ b/src/http_proxy/mod.rs @@ -9,7 +9,6 @@ use hyper_util::client::legacy::{connect::HttpConnector, Client}; pub use service::*; use tokio::sync::mpsc::Sender; pub use utils::*; -pub use crate::configuration::ConfigWrapper; use crate::global_state::GlobalState; #[derive(Clone,Debug)] diff --git a/src/http_proxy/service.rs b/src/http_proxy/service.rs index 2a95e5d..6113073 100644 --- a/src/http_proxy/service.rs +++ b/src/http_proxy/service.rs @@ -2,25 +2,23 @@ use std::borrow::Cow; use std::sync::Arc; use std::time::Duration; use bytes::Bytes; -use http_body::{Body, Frame}; -use http_body_util::combinators::BoxBody; -use http_body_util::{BodyExt, Either, Full, StreamBody}; +use http_body::Frame; + +use http_body_util::{Either, Full, StreamBody}; use hyper::service::Service; use hyper::{body::Incoming as IncomingBody, Request, Response}; use hyper_rustls::HttpsConnector; -use hyper_util::client::legacy::connect::{Connection, HttpConnector}; +use hyper_util::client::legacy::connect::HttpConnector; use hyper_util::client::legacy::Client; use hyper_util::rt::TokioExecutor; -use tokio::io::AsyncWriteExt; use tokio::net::TcpStream; use tokio_stream::wrappers::ReceiverStream; -use tower_http::services::fs::ServeFileSystemResponseBody; use std::future::Future; use std::pin::Pin; use crate::global_state::GlobalState; use crate::types::app_state::ProcState; use crate::CustomError; -use hyper::{upgrade, Method, StatusCode}; +use hyper::{Method, StatusCode}; use lazy_static::lazy_static; use super::{ProcMessage, ReverseProxyService, WrappedNormalResponse}; use super::proxy; @@ -196,15 +194,11 @@ async fn handle_http_request( }) .unwrap_or_else(std::collections::HashMap::new); - // TODO - try remove this for perf if let Some(r) = intercept_local_commands(&req_host_name,¶ms,req_path,tx.clone()).await { return Ok(r) } - - - - let mut found_hosted_target = { + let found_hosted_target = { let cfg_guard = state.config.read().await; if let Some(processes) = &cfg_guard.hosted_process { if let Some(pp) = processes.iter().find(|p| { @@ -242,7 +236,7 @@ async fn handle_http_request( if let Some(cts) = current_target_status { - if cts == crate::ProcState::Stopped || cts == crate::ProcState::Starting { + if cts == crate::ProcState::Stopped || cts == crate::ProcState::Starting || cts == crate::ProcState::Faulty { match req.method() { &Method::GET => { // todo - opt in/out via cfg ? @@ -256,33 +250,11 @@ async fn handle_http_request( } } - - // re-read config from global state in case it was not started before - if target_proc_cfg.active_port.is_none() { - found_hosted_target = { - let cfg_guard = state.config.read().await; - if let Some(processes) = &cfg_guard.hosted_process { - if let Some(pp) = processes.iter().find(|p| { - req_host_name == p.host_name - || p.capture_subdomains.unwrap_or_default() - && req_host_name.ends_with(&format!(".{}",p.host_name)) - }) { - Some(pp.clone()) - } else { - None - } - } else { - None - } - }; - } - - let port = if let Some(active_port) = target_proc_cfg.active_port { active_port } else { - return Err(CustomError(format!("No active port found for {req_host_name}"))) + return Err(CustomError(format!("No active port found for {req_host_name}."))) }; let enforce_https = target_proc_cfg.https.is_some_and(|x|x); diff --git a/src/http_proxy/utils.rs b/src/http_proxy/utils.rs index 679ed50..f2a092d 100644 --- a/src/http_proxy/utils.rs +++ b/src/http_proxy/utils.rs @@ -3,17 +3,17 @@ use futures_util::FutureExt; use http_body::Frame; use http_body_util::BodyExt; use hyper::{ - body::Incoming, client::conn::http1::Builder, header::{HeaderName, HeaderValue, InvalidHeaderValue, ToStrError, HOST}, upgrade::OnUpgrade, HeaderMap, Request, Response, StatusCode, Version + body::Incoming, header::{HeaderName, HeaderValue, InvalidHeaderValue, ToStrError}, upgrade::OnUpgrade, HeaderMap, Request, Response, StatusCode, Version }; use hyper_rustls::HttpsConnector; -use hyper_util::{client::legacy::{connect::HttpConnector, Client}, rt::{TokioExecutor, TokioIo}}; -use std::{borrow::Cow, net::SocketAddr, sync::Arc, task::Poll, time::Duration}; +use hyper_util::{client::legacy::{connect::HttpConnector, Client}, rt::TokioIo}; +use std::{net::SocketAddr, sync::Arc, task::Poll, time::Duration}; use tungstenite::http; use lazy_static::lazy_static; use crate::{ - configuration::v2::Hint, global_state::GlobalState, http_proxy::EpicResponse, tcp_proxy::ReverseTcpProxyTarget, types::proxy_state::{ ConnectionKey, ProxyActiveConnection, ProxyActiveConnectionType }, CustomError + configuration::v2::Hint, global_state::GlobalState, http_proxy::EpicResponse, types::proxy_state::{ ConnectionKey, ProxyActiveConnection, ProxyActiveConnectionType }, CustomError }; lazy_static! { static ref TE_HEADER: HeaderName = HeaderName::from_static("te"); @@ -93,7 +93,7 @@ pub async fn proxy( let mut backend_supports_prior_knowledge_http2_over_tls = false; let mut backend_supports_http2_over_clear_text_via_h2c_upgrade_header = false; - let mut backend_supports_http2_h2c_using_prior_knowledge = false; + let mut _backend_supports_http2_h2c_using_prior_knowledge = false; let mut use_prior_knowledge_http2 = false; let mut use_h2c_upgrade_header = false; @@ -106,7 +106,7 @@ pub async fn proxy( backend_supports_http2_over_clear_text_via_h2c_upgrade_header = true; }, Hint::H2CPK => { - backend_supports_http2_h2c_using_prior_knowledge = true; + _backend_supports_http2_h2c_using_prior_knowledge = true; } } } @@ -147,12 +147,13 @@ pub async fn proxy( } } + // FFR: // --------------------------------------------------------------------------------------------- // H2 THRU ALPN -- SUPPORTS HTTP2 OVER TLS // H2 PRIOR KNOWLEDGE -- SUPPORTS HTTP2 OVER TLS // H2C PRIOR KNOWLEDGE -- SUPPORTS HTTP2 OVER CLEAR TEXT // H2C UPGRADE HEADER -- SUPPORTS HTTP2 OVER CLEAR TEXT VIA UPGRADE HEADER - // if backend does not support http2, we will just use http1.1 and act like nothing happened. + // if backend does not support http2, we should just use http1.1 and act like nothing happened. // --------------------------------------------------------------------------------------------- @@ -551,7 +552,7 @@ fn del_connection(state:Arc,key:&ConnectionKey) { fn create_connection( req:&Request, incoming_http_version: Version, - target:Target, + _target:Target, client_addr:&SocketAddr, target_scheme: &str, target_http_version: hyper::http::Version, diff --git a/src/logging.rs b/src/logging.rs index 09c3798..c0dce59 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -2,7 +2,7 @@ use std::collections::VecDeque; use std::{collections::HashMap, sync::Arc}; use std::sync::Mutex; -use ratatui::layout::Size; + use tracing::Subscriber; use tracing_subscriber::layer::Context; use tracing_subscriber::Layer; diff --git a/src/main.rs b/src/main.rs index 1fc4291..f24c60e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,7 +11,6 @@ use configuration::v2::InProcessSiteConfig; use configuration::v2::RemoteSiteConfig; use configuration::OddBoxConfiguration; use http_proxy::ProcMessage; -use ratatui::text::ToLine; use tokio_rustls::rustls::pki_types::{CertificateDer, PrivateKeyDer}; use self_update::cargo_crate_version; use tracing_subscriber::layer::SubscriberExt; @@ -62,7 +61,7 @@ pub fn generate_unique_id() -> u64 { } pub mod global_state { - use std::{collections::HashMap, sync::atomic::AtomicU64}; + use std::{sync::atomic::AtomicU64}; #[derive(Debug)] pub struct GlobalState { pub app_state: std::sync::Arc, diff --git a/src/proc_host.rs b/src/proc_host.rs index dcc9d77..f50f8ea 100644 --- a/src/proc_host.rs +++ b/src/proc_host.rs @@ -1,5 +1,5 @@ -use crate::configuration::{EnvVar, LogFormat}; -use crate::global_state::{self, GlobalState}; +use crate::configuration::LogFormat; +use crate::global_state::GlobalState; use crate::http_proxy::ProcMessage; use crate::types::app_state::ProcState; use std::collections::HashMap; @@ -217,7 +217,7 @@ pub async fn host( use std::os::windows::process::CommandExt; #[cfg(target_os = "windows")] - let cmd = Command::new(bin_path) + let cmdx = Command::new(bin_path) .args(pre_resolved_args) .envs(&process_specific_environment_variables) .current_dir(&workdir) diff --git a/src/proxy.rs b/src/proxy.rs index b2bffd9..aede828 100644 --- a/src/proxy.rs +++ b/src/proxy.rs @@ -2,21 +2,21 @@ use std::collections::HashMap; use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; -use futures_util::task::UnsafeFutureObj; -use hyper::body::Incoming; -use hyper::Method; -use hyper::Uri; + + + + use hyper_rustls::ConfigBuilderExt; -use hyper_util::client::legacy::connect::Connection; + use lazy_static::lazy_static; -use reqwest::Request; + use socket2::Socket; use tokio::net::TcpSocket; use tokio::net::TcpStream; use tokio::sync::Notify; use tokio_rustls::TlsAcceptor; -use tungstenite::util::NonBlockingResult; -use url::Url; + + use crate::configuration::ConfigWrapper; use crate::global_state::GlobalState; @@ -27,10 +27,10 @@ use crate::tcp_proxy; use crate::http_proxy; use crate::tcp_proxy::DataType; use crate::tcp_proxy::PeekResult; -use crate::tcp_proxy::ReverseTcpProxyTarget; + use crate::tcp_proxy::ReverseTcpProxyTargets; use crate::types::app_state; -use crate::types::app_state::ProcState; + pub async fn listen( _cfg: std::sync::Arc>, @@ -305,7 +305,7 @@ async fn handle_new_tcp_stream( ) { - let n = state.request_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + let _n = state.request_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed); //tracing::warn!("handle_new_tcp_stream ({})!",n+1); //tracing::info!("handle_new_tcp_stream called with expect tls: {expect_tls}"); diff --git a/src/tcp_proxy/http1.rs b/src/tcp_proxy/http1.rs index 5c9e4ff..140c751 100644 --- a/src/tcp_proxy/http1.rs +++ b/src/tcp_proxy/http1.rs @@ -20,15 +20,15 @@ pub fn is_valid_http_request(bytes: &[u8]) -> anyhow::Result { bail!("this is not a http request. no method found"); } - let version = if let Some(pos) = memchr::memmem::find(bytes, b" HTTP/1.1\r\n") { + let version = if let Some(_pos) = memchr::memmem::find(bytes, b" HTTP/1.1\r\n") { Version::HTTP_11 - } else if let Some(pos) = memchr::memmem::find(bytes, b" HTTP/1.0\r\n") { + } else if let Some(_pos) = memchr::memmem::find(bytes, b" HTTP/1.0\r\n") { Version::HTTP_10 - } else if let Some(pos) = memchr::memmem::find(bytes, b" HTTP/2.0\r\n") { + } else if let Some(_pos) = memchr::memmem::find(bytes, b" HTTP/2.0\r\n") { Version::HTTP_2 - } else if let Some(pos) = memchr::memmem::find(bytes, b" HTTP/3.0\r\n") { + } else if let Some(_pos) = memchr::memmem::find(bytes, b" HTTP/3.0\r\n") { Version::HTTP_3 - } else if let Some(pos) = memchr::memmem::find(bytes, b" HTTP/0.9\r\n") { + } else if let Some(_pos) = memchr::memmem::find(bytes, b" HTTP/0.9\r\n") { Version::HTTP_09 } else { if let Some(start) = bytes.windows(6).position(|window| window.starts_with(b"HTTP/")) { diff --git a/src/tcp_proxy/tcp.rs b/src/tcp_proxy/tcp.rs index f16eefe..df38fd2 100644 --- a/src/tcp_proxy/tcp.rs +++ b/src/tcp_proxy/tcp.rs @@ -1,6 +1,5 @@ use chrono::Local; use hyper::Version; -use tokio::sync::Notify; use std::net::IpAddr; use std::{ net::SocketAddr, @@ -11,7 +10,7 @@ use crate::global_state::GlobalState; use crate::tcp_proxy::tls::client_hello::TlsClientHello; use crate::tcp_proxy::tls::client_hello::TlsClientHelloError; use crate::types::proxy_state::{ProxyActiveConnection, ProxyActiveConnectionType}; -use tokio::net::{TcpListener, TcpStream}; +use tokio::net::TcpStream; use tracing::*; @@ -105,36 +104,6 @@ impl ReverseTcpProxyTargets { } } -impl ReverseTcpProxyTarget { - pub fn from_target(target:crate::http_proxy::Target) -> Self { - match &target { - crate::http_proxy::Target::Remote(x) => ReverseTcpProxyTarget { - sub_domain: None, - capture_subdomains: x.capture_subdomains.unwrap_or_default(), - forward_wildcard: x.forward_subdomains.unwrap_or_default(), - backends: x.backends.clone(), - host_name: x.host_name.clone(), - is_hosted: false, - remote_target_config : Some(x.clone()) - }, - crate::http_proxy::Target::Proc(x) => ReverseTcpProxyTarget { - sub_domain: None, - capture_subdomains: x.capture_subdomains.unwrap_or_default(), - forward_wildcard: x.forward_subdomains.unwrap_or_default(), - backends: vec![crate::configuration::v2::Backend { - hints: x.hints.clone(), - address: x.host_name.to_owned(), - https: x.https, - port: x.active_port.unwrap_or_default() - }], - host_name: x.host_name.clone(), - is_hosted: true, - remote_target_config : None - }, - } - } -} - #[derive(Debug)] pub enum DataType { TLS, diff --git a/src/tests/configuration.rs b/src/tests/configuration.rs index f6e73b5..0363746 100644 --- a/src/tests/configuration.rs +++ b/src/tests/configuration.rs @@ -1,8 +1,5 @@ -use std::borrow::BorrowMut; - #[allow(unused)] use crate::configuration::OddBoxConfiguration; -use crate::http_proxy::ConfigWrapper; #[test] pub fn legacy_upgrade() { let legacy_config = crate::configuration::legacy::OddBoxLegacyConfig::example(); diff --git a/src/tui/mod.rs b/src/tui/mod.rs index ff5fd6e..474d2dd 100644 --- a/src/tui/mod.rs +++ b/src/tui/mod.rs @@ -117,8 +117,8 @@ pub async fn run( let tx = tx.clone(); - let mut last_key_time = tokio::time::Instant::now(); - let debounce_duration = Duration::from_millis(300); + let _last_key_time = tokio::time::Instant::now(); + let _debounce_duration = Duration::from_millis(300); let mut tui_state = crate::types::tui_state::TuiState::new(); diff --git a/src/types/app_state.rs b/src/types/app_state.rs index 4477176..367d727 100644 --- a/src/types/app_state.rs +++ b/src/types/app_state.rs @@ -1,17 +1,9 @@ -use std::collections::HashMap; -use std::default; use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicUsize; -use ratatui::prelude::Rect; use utoipa::ToSchema; -use crate::types::tui_state::LogPageState; -use crate::types::tui_state::Page; -use crate::types::tui_state::ThreadsTabState; -use crate::types::tui_state::ConnectionsTabState; use std::sync::Arc; use crate::types::proxy_state::*; use ratatui::widgets::ListState; -use crate::ProcMessage; #[derive(Debug,PartialEq,Clone,serde::Serialize,ToSchema)] pub enum ProcState { From 4edfe820a9c4ee3370347eb97be5f97edb043948 Mon Sep 17 00:00:00 2001 From: Olof Date: Mon, 2 Sep 2024 16:10:01 +0000 Subject: [PATCH 15/19] remove incorrect comment --- src/configuration/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/configuration/mod.rs b/src/configuration/mod.rs index a1b0769..ede3363 100644 --- a/src/configuration/mod.rs +++ b/src/configuration/mod.rs @@ -387,7 +387,6 @@ impl ConfigWrapper { } - // TODO - this does not work correctly. it doesnt use the PORT from procs config but always auto. pub fn set_active_port(&mut self, resolved_proc:&mut FullyResolvedInProcessSiteConfig) -> anyhow::Result { @@ -464,6 +463,7 @@ impl ConfigWrapper { // it is done this way in order to avoid changing the global state of the configuration in to the resolved state // since that would then be saved to disk and we would lose the original configuration with dynamic variables // making configuration files less portable. + // todo - cache resolved configurations by hash? pub fn resolve_process_configuration(&mut self,proc:&crate::InProcessSiteConfig) -> anyhow::Result { let mut resolved_proc = crate::FullyResolvedInProcessSiteConfig { From 8b9be8941ff9e5ab4c70b757524ab56ff814471a Mon Sep 17 00:00:00 2001 From: Olof Date: Mon, 2 Sep 2024 16:22:41 +0000 Subject: [PATCH 16/19] clean up config validation and mention which sites conflict name or port --- src/configuration/mod.rs | 102 +++++++++++++++++++++++---------------- 1 file changed, 61 insertions(+), 41 deletions(-) diff --git a/src/configuration/mod.rs b/src/configuration/mod.rs index ede3363..d6e3c81 100644 --- a/src/configuration/mod.rs +++ b/src/configuration/mod.rs @@ -176,46 +176,38 @@ impl ConfigWrapper { pub fn is_valid(&self) -> anyhow::Result<()> { - for x in self.env_vars.iter() { - if x.key.to_lowercase().trim() == "port" { - anyhow::bail!(format!("Invalid configuration. You cannot use 'port' as a global environment variable")); - } - }; - - // ALL HOST NAMES ARE UNIQUE - let mut all_host_names: Vec<&str> = vec![ - self.remote_target.as_ref().and_then(|p|Some(p.iter().map(|x|x.host_name.as_str()).collect::>())).unwrap_or_default(), - self.hosted_process.as_ref().and_then(|p|Some(p.iter().map(|x|x.host_name.as_str()).collect::>())).unwrap_or_default() - - ].concat(); - all_host_names.sort(); - let all_count = all_host_names.len(); - all_host_names.dedup(); - let unique_count = all_host_names.len(); - if all_count != unique_count { - anyhow::bail!(format!("You have more than one hosted process configured with the same host_name... not allowed.")) + if self.env_vars.iter().any(|x| x.key.eq_ignore_ascii_case("port")) { + anyhow::bail!("Invalid configuration. You cannot use 'port' as a global environment variable"); } - - // ALL HOSTED SERVICES USE DIFFERENT PORTS - let mut all_ports = self.hosted_process.clone().unwrap_or_default().iter().filter_map(|x|x.port).collect::>(); - all_ports.sort(); - let all_count = all_ports.len(); - all_ports.dedup(); - let unique_count = all_ports.len(); - if all_count != unique_count { - anyhow::bail!(format!("You have more than one hosted process configured with the same port... not allowed.")) - } - - // NO HOSTED PROCESS USES AN ENV VAR FOR PORT THAT DIFFERS FROM THE PORT SPECIFIED IN ITS CONFIG - // TODO: this is just horrible - for x in self.hosted_process.clone().unwrap_or_default() { - if let Some(port) = x.port { - if let Some(env_vars) = x.env_vars { + + let mut host_names = std::collections::HashMap::new(); + let mut ports = std::collections::HashMap::new(); + + for process in self.hosted_process.clone().unwrap_or_default() { + + host_names + .entry(process.host_name.clone()) + .and_modify(|count| *count += 1) + .or_insert(1); + + if let Some(port) = process.port { + ports + .entry(port) + .or_insert_with(Vec::new) + .push(process.host_name.clone()); + } + + if let Some(port) = process.port { + if let Some(env_vars) = process.env_vars { for env_var in env_vars { - if env_var.key.to_lowercase().trim() == "port" { - if let Ok(parsed_port) = env_var.value.parse::() { - if parsed_port != port { - anyhow::bail!(format!("Environment variable PORT for {} does not match the port specified in the configuration.\nIt is recommended you do not specify the PORT environment variable explicitly but instead rely on the port setting -\nit will automatically inject the port variable to the process-local context.",x.host_name)) + if env_var.key.eq_ignore_ascii_case("port") { + if let Ok(env_port) = env_var.value.parse::() { + if env_port != port { + anyhow::bail!(format!( + "Environment variable PORT for '{}' does not match the port specified in the configuration.\n\ + It is recommended to rely on the port setting - it will automatically inject the port variable to the process-local context.", + process.host_name + )); } } } @@ -223,11 +215,39 @@ impl ConfigWrapper { } } } - + + let duplicate_host_names: Vec = host_names + .into_iter() + .filter_map(|(name, count)| if count > 1 { Some(name) } else { None }) + .collect(); + + if !duplicate_host_names.is_empty() { + anyhow::bail!(format!( + "Duplicate host names found: {}", + duplicate_host_names.join(", ") + )); + } + + let duplicate_ports: Vec<(u16, Vec)> = ports + .into_iter() + .filter(|(_, sites)| sites.len() > 1) + .collect(); + + if !duplicate_ports.is_empty() { + let conflict_details: Vec = duplicate_ports + .into_iter() + .map(|(port, sites)| format!("Port {}: [{}]", port, sites.join(", "))) + .collect(); + + anyhow::bail!(format!( + "Duplicate ports found with conflicting sites: {}", + conflict_details.join("; ") + )); + } + Ok(()) - } - + pub fn get_parent(&mut self) -> anyhow::Result { // todo - use cache and clear on path change From 92f9781545cc69f0357c88714978d852a42e339a Mon Sep 17 00:00:00 2001 From: Olof Date: Mon, 2 Sep 2024 16:23:57 +0000 Subject: [PATCH 17/19] oops --- src/proc_host.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proc_host.rs b/src/proc_host.rs index f50f8ea..8575802 100644 --- a/src/proc_host.rs +++ b/src/proc_host.rs @@ -217,7 +217,7 @@ pub async fn host( use std::os::windows::process::CommandExt; #[cfg(target_os = "windows")] - let cmdx = Command::new(bin_path) + let cmd = Command::new(bin_path) .args(pre_resolved_args) .envs(&process_specific_environment_variables) .current_dir(&workdir) From b638ba8925f6baf678703f4f9817e9efb1af05b0 Mon Sep 17 00:00:00 2001 From: Olof Date: Mon, 2 Sep 2024 20:33:54 +0000 Subject: [PATCH 18/19] fix possible crash when terminal size too small to render widgets. support NOH2 hint to allow for H2->H1 via termination. up version to 0.1.0. drop unused deps --- CaddyTest => CaddyTest1 | 0 CaddyTest2 | 9 +++ Cargo.lock | 133 +--------------------------------- Cargo.toml | 19 +---- odd-box.toml | 33 ++++++++- src/configuration/v2.rs | 3 +- src/http_proxy/utils.rs | 7 +- src/main.rs | 4 +- src/tui/connections_widget.rs | 52 ++++++++++--- src/tui/logs_widget.rs | 7 +- src/tui/mod.rs | 13 ++-- src/tui/stats_widget.rs | 8 +- src/tui/threads_widget.rs | 6 ++ 13 files changed, 125 insertions(+), 169 deletions(-) rename CaddyTest => CaddyTest1 (100%) create mode 100644 CaddyTest2 diff --git a/CaddyTest b/CaddyTest1 similarity index 100% rename from CaddyTest rename to CaddyTest1 diff --git a/CaddyTest2 b/CaddyTest2 new file mode 100644 index 0000000..95e4494 --- /dev/null +++ b/CaddyTest2 @@ -0,0 +1,9 @@ +{ + log { + level DEBUG + output stdout + } +} +:8888 { + respond "pong" +} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 5c09aaa..a004faa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,7 +35,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom", "once_cell", "version_check", "zerocopy", @@ -629,7 +628,7 @@ version = "4.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90239a040c80f5e14809ca132ddc4176ab33d5e17e49691793296e3fcb34d72f" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "syn 2.0.55", @@ -952,18 +951,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "enum-as-inner" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" -dependencies = [ - "heck 0.4.1", - "proc-macro2", - "quote", - "syn 2.0.55", -] - [[package]] name = "enumflags2" version = "0.7.9" @@ -1306,12 +1293,6 @@ dependencies = [ "http", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" @@ -1330,49 +1311,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -[[package]] -name = "hickory-proto" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "091a6fbccf4860009355e3efc52ff4acf37a63489aad7435372d44ceeb6fbbcf" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.4.0", - "ipnet", - "once_cell", - "rand", - "thiserror", - "tinyvec", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "hickory-resolver" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35b8f021164e6a984c9030023544c57789c51760065cd510572fedcfb04164e8" -dependencies = [ - "cfg-if", - "futures-util", - "hickory-proto", - "lru-cache", - "once_cell", - "parking_lot", - "rand", - "smallvec", - "thiserror", - "tokio", - "tracing", -] - [[package]] name = "home" version = "0.5.9" @@ -1464,17 +1402,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-hickory" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3aaf7e4c6b490fc4863a491325b81a01cfa234ee182d11c712b0abfbf504fc" -dependencies = [ - "hickory-resolver", - "hyper-util", - "tower-service", -] - [[package]] name = "hyper-rustls" version = "0.26.0" @@ -1585,16 +1512,6 @@ dependencies = [ "cc", ] -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "0.5.0" @@ -1746,12 +1663,6 @@ dependencies = [ "redox_syscall", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" version = "0.3.8" @@ -1798,15 +1709,6 @@ dependencies = [ "hashbrown 0.14.3", ] -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "malloc_buf" version = "0.0.6" @@ -2029,9 +1931,8 @@ dependencies = [ [[package]] name = "odd-box" -version = "0.0.14" +version = "0.1.0" dependencies = [ - "ahash 0.8.11", "anyhow", "async-trait", "axum", @@ -2045,14 +1946,11 @@ dependencies = [ "dashmap", "dirs 5.0.1", "futures-util", - "h2", "hpack", "http-body", "http-body-util", "hyper", - "hyper-hickory", "hyper-rustls 0.27.2", - "hyper-tls", "hyper-tungstenite", "hyper-util", "lazy_static", @@ -2066,21 +1964,17 @@ dependencies = [ "self_update", "serde", "serde_json", - "serde_yaml", "socket2 0.5.6", "time", "tokio", "tokio-rustls 0.26.0", "tokio-stream", "tokio-tungstenite 0.23.1", - "tokio-util", "toml", - "toml_edit 0.22.9", "tower-http", "tracing", "tracing-subscriber", "tungstenite 0.24.0", - "unicase", "url", "utoipa", "utoipa-rapidoc", @@ -2906,19 +2800,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_yaml" -version = "0.9.34+deprecated" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" -dependencies = [ - "indexmap", - "itoa", - "ryu", - "serde", - "unsafe-libyaml", -] - [[package]] name = "sha1" version = "0.10.6" @@ -3054,7 +2935,7 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "rustversion", @@ -3616,12 +3497,6 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" -[[package]] -name = "unsafe-libyaml" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" - [[package]] name = "untrusted" version = "0.9.0" @@ -3635,7 +3510,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna", "percent-encoding", ] diff --git a/Cargo.toml b/Cargo.toml index 658d496..97e7c6c 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "odd-box" description = "dead simple reverse proxy server" -version = "0.0.14" +version = "0.1.0" edition = "2021" authors = ["Olof Blomqvist "] repository = "https://github.com/OlofBlomqvist/odd-box" @@ -21,8 +21,6 @@ tracing = "0.1.37" tracing-subscriber = { version="0.3.18", features=[ "env-filter","std","fmt","time"] } url = "2.4.1" lazy_static = "1.4.0" -unicase = "2.7.0" -hyper-tls = "0.6.0" clap = { version="4.4.7", features=["derive"]} rustls = "0.23.12" tokio-rustls = "0.26.0" @@ -30,22 +28,16 @@ rustls-pemfile = "2.0.0" rcgen = "0.13.1" socket2 = "0.5.5" hyper-tungstenite = "0.14.0" - ratatui = "0.28.1" -#ratatui = { git = "https://github.com/ratatui-org/ratatui" } crossterm = { version = "0.28.1" } chrono = "0.4.31" - time = {version="0.3.30",features=["macros","formatting","parsing"]} reqwest = { version = "0.12.4", features = ["json"] } serde_json = "1.0.111" self_update = "0.41.0" bytes = "1.7.1" http-body-util = "0.1.0" -#active-win-pos-rs = "0.8.3" -h2 = "0.4.2" hyper-rustls = { version = "0.27.2", features = ["http2"] } -hyper-hickory = "0.7.0" http-body = "1.0.1" tokio-stream = "0.1.15" ctrlc = "3.2" @@ -54,12 +46,7 @@ webpki = { version = "0.22.4" } anyhow = "1.0.79" uuid = { version = "1.7.0", features = ["v4"] } tungstenite = "0.24.0" -tokio-util = "0.7.11" -toml_edit = "0.22.6" -serde_yaml = "0.9.32" -ahash = "0.8.7" dark-light = "1.0.0" -#console-subscriber = "0.2.0" # === MANAGEMENT API ============================================ axum = { version="0.7.5",features=["ws"] } @@ -84,6 +71,6 @@ windows = { version = "0.58.0", features = ["Win32","Win32_Foundation","Win32_Sy opt-level = 'z' # Optimize for size #lto = true # Enable link-time optimization #codegen-units = 1 # Reduce number of codegen units to increase optimizations -# panic = 'abort' # Abort on panic +#panic = 'abort' # Abort on panic #strip = true # Strip symbols from binary* -#debug = true +#debug = false diff --git a/odd-box.toml b/odd-box.toml index d9887c1..d6e9401 100644 --- a/odd-box.toml +++ b/odd-box.toml @@ -16,16 +16,43 @@ env_vars = [ [[remote_target]] host_name = "caddy-lb-terminated.localtest.me" +disable_tcp_tunnel_mode = true +backends = [ + { https = false, address="127.0.0.1", port=9999, hints = ["NOH2"] }, + { https = false, address="127.0.0.1", port=8888, hints = ["NOH2"] } + +] + +[[hosted_process]] +host_name = "caddy-proc-terminated.localtest.me" +disable_tcp_tunnel_mode = true +hints = ["NOH2"] +auto_start = true +port = 8888 +dir = "$cfg_dir" +bin = "/nix/store/aq5r61lmr9six0lyi6xikxwvnyp16dfy-user-environment/bin/caddy" +args = [ + "run", + "--config", + "./CaddyTest2", + "--adapter", + "caddyfile" +] + +[[remote_target]] +host_name = "caddy-lb-tcp.localtest.me" disable_tcp_tunnel_mode = false +hints = ["NOH2"] backends = [ { https = false, address="127.0.0.1", port=9999 }, - { https = false, address="127.0.0.1", port=9999 } + { https = false, address="127.0.0.1", port=8888 } ] [[hosted_process]] -host_name = "caddy-proc-terminated.localtest.me" +host_name = "caddy-proc-tcp.localtest.me" disable_tcp_tunnel_mode = false +hints = ["NOH2"] auto_start = true port = 9999 dir = "$cfg_dir" @@ -33,7 +60,7 @@ bin = "/nix/store/aq5r61lmr9six0lyi6xikxwvnyp16dfy-user-environment/bin/caddy" args = [ "run", "--config", - "./CaddyTest", + "./CaddyTest1", "--adapter", "caddyfile" ] diff --git a/src/configuration/v2.rs b/src/configuration/v2.rs index 84f412f..acc2b35 100644 --- a/src/configuration/v2.rs +++ b/src/configuration/v2.rs @@ -125,7 +125,8 @@ pub enum Hint { /// Server supports http2 via clear text by using an upgrade header H2C, /// Server supports http2 via clear text by using prior knowledge - H2CPK + H2CPK, + NOH2 } #[derive(Debug, Clone, Serialize, Deserialize, ToSchema,Eq,PartialEq,Hash,)] diff --git a/src/http_proxy/utils.rs b/src/http_proxy/utils.rs index f2a092d..010a721 100644 --- a/src/http_proxy/utils.rs +++ b/src/http_proxy/utils.rs @@ -96,6 +96,7 @@ pub async fn proxy( let mut _backend_supports_http2_h2c_using_prior_knowledge = false; let mut use_prior_knowledge_http2 = false; let mut use_h2c_upgrade_header = false; + let mut backend_might_support_h2 = true; for x in &backend.hints.iter().flatten().collect::>() { match x { @@ -107,6 +108,9 @@ pub async fn proxy( }, Hint::H2CPK => { _backend_supports_http2_h2c_using_prior_knowledge = true; + }, + Hint::NOH2 => { + backend_might_support_h2 = false } } } @@ -157,10 +161,11 @@ pub async fn proxy( // --------------------------------------------------------------------------------------------- - let client = if use_prior_knowledge_http2 { + let client = if backend_might_support_h2 && use_prior_knowledge_http2 { *proxied_request.version_mut() = Version::HTTP_2; &h2_only_client // this requires the backend to support h2 prior knowledge or h2 selection by alpn } else { + *proxied_request.version_mut() = Version::HTTP_11; &client // this will use the default http1 client, which will upgrade to h2 if the backend supports it thru upgrade header or alpn }; diff --git a/src/main.rs b/src/main.rs index f24c60e..22d5829 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,3 +1,5 @@ +#![warn(unused_extern_crates)] + mod configuration; mod types; mod tcp_proxy; @@ -61,7 +63,7 @@ pub fn generate_unique_id() -> u64 { } pub mod global_state { - use std::{sync::atomic::AtomicU64}; + use std::sync::atomic::AtomicU64; #[derive(Debug)] pub struct GlobalState { pub app_state: std::sync::Arc, diff --git a/src/tui/connections_widget.rs b/src/tui/connections_widget.rs index d8630f3..19a7de3 100644 --- a/src/tui/connections_widget.rs +++ b/src/tui/connections_widget.rs @@ -1,3 +1,4 @@ +use std::borrow::BorrowMut; use std::sync::Arc; use ratatui::layout::{Flex, Rect}; @@ -17,9 +18,15 @@ pub fn draw( area: Rect, theme: &Theme ) { + + let size = area.as_size(); + if size.height < 10 || size.width < 10 { + return + } + let headers = [ "Site", "Source", "Target", "Description"]; - let rows : Vec> = global_state.app_state.statistics.active_connections.iter().map(|guard| { + let items : Vec> = global_state.app_state.statistics.active_connections.iter().map(|guard| { let (_,active_connection) = guard.pair(); let typ = match &active_connection.connection_type { ProxyActiveConnectionType::TcpTunnelUnencryptedHttp => "UNENCRYPTED TCP TUNNEL".to_string(), @@ -48,16 +55,30 @@ pub fn draw( ] }).collect(); + let height_of_connections_area = area.height.saturating_sub(0); // header and footer + let scroll_pos = { tui_state.connections_tab_state.scroll_state.vertical_scroll }; + let scrollbar_hovered = tui_state.connections_tab_state.scroll_state.scroll_bar_hovered; + let max_scroll_pos = items.len().saturating_sub(height_of_connections_area as usize); + let visible_rows = area.height as usize; + + + // let header_height = 1; + //let visible_rows = area.height as usize - header_height; + let start = scroll_pos.unwrap_or(max_scroll_pos); + let end = std::cmp::min(start + visible_rows, items.len() - 1); + - let header_height = 1; - let visible_rows = area.height as usize - header_height; - let start = tui_state.connections_tab_state.scroll_state.vertical_scroll.unwrap_or_default(); - let end = std::cmp::min(start + visible_rows, rows.len()); + + if start > items.len() || end > items.len() || start >= end { + return + } let is_dark_theme = matches!(&theme,Theme::Dark(_)); + - let display_rows = &rows[start..end]; + + let display_rows = &items[start..end]; let odd_row_bg = if is_dark_theme { Color::from_hsl(15.0, 10.0, 10.0) } else { Color::Rgb(250,250,250) @@ -88,7 +109,7 @@ pub fn draw( tui_state.connections_tab_state.scroll_state.visible_rows = display_rows.iter().len() as usize; - tui_state.connections_tab_state.scroll_state.total_rows = rows.len(); + tui_state.connections_tab_state.scroll_state.total_rows = items.len(); let widths = [ Constraint::Fill(1), @@ -114,7 +135,7 @@ pub fn draw( f.render_widget(table, area); - let scrollbar = Scrollbar::default() + let mut scrollbar = Scrollbar::default() .style(Style::default()) .orientation(ScrollbarOrientation::VerticalRight) .begin_symbol(Some("↑")) @@ -124,11 +145,20 @@ pub fn draw( let height_of_traf_area = area.height.saturating_sub(2); tui_state.connections_tab_state.scroll_state.area_height = height_of_traf_area as usize; - tui_state.connections_tab_state.scroll_state.vertical_scroll_state = tui_state.connections_tab_state.scroll_state.vertical_scroll_state.content_length(rows.len().saturating_sub(height_of_traf_area as usize)); + tui_state.connections_tab_state.scroll_state.vertical_scroll_state = tui_state.connections_tab_state.scroll_state.vertical_scroll_state.content_length(items.len().saturating_sub(height_of_traf_area as usize)); - let scrollbar_area = Rect::new(area.right() - 1, area.top(), 1, area.height); - f.render_stateful_widget(scrollbar,scrollbar_area, &mut tui_state.connections_tab_state.scroll_state.vertical_scroll_state); + if scrollbar_hovered { + scrollbar = scrollbar.thumb_style(Style::default().fg(Color::Yellow).bg(Color::Red)); + } + + let scrollbar_state = tui_state.connections_tab_state.scroll_state.vertical_scroll_state.borrow_mut(); + *scrollbar_state = scrollbar_state.content_length(items.len().saturating_sub(height_of_connections_area as usize)); + + if scroll_pos.is_none() { + *scrollbar_state = scrollbar_state.position(items.len().saturating_sub(height_of_connections_area as usize)); + } + f.render_stateful_widget(scrollbar,area, &mut tui_state.connections_tab_state.scroll_state.vertical_scroll_state); } diff --git a/src/tui/logs_widget.rs b/src/tui/logs_widget.rs index 8d08f55..cd4116c 100644 --- a/src/tui/logs_widget.rs +++ b/src/tui/logs_widget.rs @@ -19,6 +19,11 @@ pub fn draw( area: Rect, _theme: &Theme ) { + + let size = area.as_size(); + if size.height < 10 || size.width < 10 { + return + } let mut buffer = log_buffer.lock().expect("locking shared buffer mutex should always work"); @@ -116,7 +121,7 @@ pub fn draw( let visible_rows = area.height as usize; let start = scroll_pos.unwrap_or(max_scroll_pos); - let end = std::cmp::min(start + visible_rows, items.len()); + let end = std::cmp::min(start + visible_rows, items.len() - 1); if start > items.len() || end > items.len() || start >= end { diff --git a/src/tui/mod.rs b/src/tui/mod.rs index 474d2dd..fe84e89 100644 --- a/src/tui/mod.rs +++ b/src/tui/mod.rs @@ -117,9 +117,6 @@ pub async fn run( let tx = tx.clone(); - let _last_key_time = tokio::time::Instant::now(); - let _debounce_duration = Duration::from_millis(300); - let mut tui_state = crate::types::tui_state::TuiState::new(); loop { @@ -127,7 +124,6 @@ pub async fn run( // KEEP LOCK SHORT TO AVOID DEADLOCK { let mut terminal = terminal.lock().await; - terminal.draw(|f| draw_ui::>( f, @@ -136,6 +132,7 @@ pub async fn run( &log_buffer,&theme ) )?; + } @@ -238,7 +235,6 @@ pub async fn run( }, Page::Threads => { match mouse.kind { - event::MouseEventKind::Drag(event::MouseButton::Left) => { tui_state.threads_tab_state.scroll_state.handle_mouse_drag(mouse.column,mouse.row); } @@ -256,6 +252,12 @@ pub async fn run( }, Page::Connections => { match mouse.kind { + event::MouseEventKind::Drag(event::MouseButton::Left) => { + tui_state.connections_tab_state.scroll_state.handle_mouse_drag(mouse.column,mouse.row); + } + event::MouseEventKind::Moved => { + tui_state.connections_tab_state.scroll_state.handle_mouse_move(mouse.column,mouse.row); + } event::MouseEventKind::ScrollDown => { tui_state.connections_tab_state.scroll_state.scroll_down(Some(10)); }, @@ -530,6 +532,7 @@ fn draw_ui( let size = f.area(); + if size.height < 10 || size.width < 10 { return } diff --git a/src/tui/stats_widget.rs b/src/tui/stats_widget.rs index 894320a..d4cf233 100644 --- a/src/tui/stats_widget.rs +++ b/src/tui/stats_widget.rs @@ -18,11 +18,17 @@ pub fn draw( _theme: &Theme ) { + let size = area.as_size(); + if size.height < 20 || size.width < 50 { + return + } + + let total_received_tcp_connections = global_state.request_count.load(std::sync::atomic::Ordering::Relaxed); let p = Paragraph::new(format!("Total received TCP connections: {total_received_tcp_connections}")); let p2 = Paragraph::new(format!("..More to come on this page at some point! :D")).fg(Color::DarkGray); - + f.render_widget(p, area.offset(Offset{x:4,y:2})); f.render_widget(p2, area.offset(Offset{x:4,y:4})); } diff --git a/src/tui/threads_widget.rs b/src/tui/threads_widget.rs index 29b15ec..344d763 100644 --- a/src/tui/threads_widget.rs +++ b/src/tui/threads_widget.rs @@ -16,6 +16,12 @@ pub fn draw( theme: &Theme ) { + let size = area.as_size(); + if size.height < 10 || size.width < 10 { + return + } + + let headers = [ "TaskId", "Pid", "HostName", "Port"]; From 2495fc5c49e92ec1868f6a2de1b6e265c788e925 Mon Sep 17 00:00:00 2001 From: Olof Date: Mon, 2 Sep 2024 20:35:52 +0000 Subject: [PATCH 19/19] clippy --- src/tui/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/tui/mod.rs b/src/tui/mod.rs index fe84e89..33961e1 100644 --- a/src/tui/mod.rs +++ b/src/tui/mod.rs @@ -14,7 +14,6 @@ use crate::logging::LogMsg; use crate::types::app_state::*; use crate::types::tui_state::{Page, TuiState}; use std::sync::{Arc, Mutex}; -use std::time::Duration; use crate::http_proxy::ProcMessage; use serde::ser::SerializeStruct; @@ -532,7 +531,7 @@ fn draw_ui( let size = f.area(); - + if size.height < 10 || size.width < 10 { return }