From c9144f158aaed0a21e8be6886bbf66c6c3fd1e1e Mon Sep 17 00:00:00 2001 From: Cynthia Date: Thu, 9 Jul 2020 18:23:16 -0600 Subject: [PATCH] fix output, fix previous ignored clippy warnings * as of the error reworking commit, tasks sometimes didn't erase lines when it should have. most specifically at the beginning of tasks being executed, and all tasks completing. this is now fixed, as the task_inidcator ignores logs that happened before it started, and properly counts how many lines it added, that it needs to erase. * we've reworked all of the codebase, so almost all clippy warnings that were previously ignored, are no longer ignored, and code has been rewritten so the lint step passes. * There are some "clippy::too_many_arguments" being ignored, I intend to fix these, but these require more of a deft hand, and so I don't want to rush it. reworking may also not be needed once I try it. (This is currently the most ignored clippy warning according too: https://github.com/rust-lang/rust-clippy/issues/5418) and reasoning I agree with is there. * There is one: `#[allow(unused)]` for pipeline descriptions. This is true, it is unused right now, but I want to keep it in the schema because I do want to render the pipeline in list one day. I just haven't found quite a way to do it yet, but i added it in because I know I want to do it (and I don't want to remove it). * this has also resulted in a giant split up of docker executor, so it is no longer all in one gigantic file. This really _really_ needed to be done, so I'm glad I finally did it. --- src/commands/clean.rs | 6 +- src/commands/exec.rs | 3 +- src/commands/list.rs | 143 +- src/commands/mod.rs | 8 +- src/commands/run.rs | 3 +- src/config/mod.rs | 9 +- src/config/types.rs | 15 +- src/dirs.rs | 125 +- src/executors/docker.rs | 1589 +---------------- src/executors/docker_engine/container.rs | 305 ++++ src/executors/docker_engine/container_api.rs | 294 +++ src/executors/docker_engine/execution_api.rs | 182 ++ src/executors/docker_engine/image_api.rs | 39 + src/executors/docker_engine/mod.rs | 223 +++ src/executors/docker_engine/network_api.rs | 182 ++ .../docker_engine/permissions_helper.rs | 245 +++ src/executors/docker_engine/version_api.rs | 16 + src/executors/host.rs | 235 +-- src/executors/mod.rs | 27 +- src/executors/shared.rs | 132 ++ src/fetch/fs.rs | 6 +- src/fetch/mod.rs | 37 +- src/fetch/remote.rs | 6 +- src/log.rs | 4 +- src/main.rs | 32 +- src/sigint.rs | 2 +- src/tasks/execution/mod.rs | 49 +- src/tasks/execution/preparation.rs | 702 ++++---- src/tasks/mod.rs | 170 +- src/terminal/mod.rs | 18 +- src/terminal/task_indicator.rs | 15 +- src/yaml_err.rs | 2 +- 32 files changed, 2550 insertions(+), 2274 deletions(-) create mode 100644 src/executors/docker_engine/container.rs create mode 100644 src/executors/docker_engine/container_api.rs create mode 100644 src/executors/docker_engine/execution_api.rs create mode 100644 src/executors/docker_engine/image_api.rs create mode 100644 src/executors/docker_engine/mod.rs create mode 100644 src/executors/docker_engine/network_api.rs create mode 100644 src/executors/docker_engine/permissions_helper.rs create mode 100644 src/executors/docker_engine/version_api.rs create mode 100644 src/executors/shared.rs diff --git a/src/commands/clean.rs b/src/commands/clean.rs index 0c215d7..628ca5a 100644 --- a/src/commands/clean.rs +++ b/src/commands/clean.rs @@ -3,7 +3,7 @@ //! circumstances where we like powered off while running. However, it should //! always be safe to run. -use crate::executors::{docker::DockerExecutor, host::HostExecutor}; +use crate::executors::{docker, host}; use color_eyre::Result; use tracing::info; @@ -19,8 +19,8 @@ pub async fn handle_clean_command() -> Result<()> { info!("Cleaning resources ..."); - HostExecutor::clean().await; - DockerExecutor::clean().await?; + host::Executor::clean().await; + docker::Executor::clean().await?; info!("Cleaned."); Ok(()) diff --git a/src/commands/exec.rs b/src/commands/exec.rs index 5d5336b..2e05d07 100644 --- a/src/commands/exec.rs +++ b/src/commands/exec.rs @@ -107,7 +107,6 @@ fn report_potential_internal_task_names( /// - Error creating an executor/choosing an executor for tasks. /// - Error writing the helper scripts. /// - Error running the task. -#[allow(clippy::cognitive_complexity)] pub async fn handle_exec_command( config: &TopLevelConf, fetcher: &FetcherRepository, @@ -204,7 +203,7 @@ pub async fn handle_exec_command( Ok(exit_code) => { if exit_code == 0 { // Don't cause an error for cleaning if the task succeeded, the user can always clean manually. - let _ = crate::executors::docker::DockerExecutor::clean().await; + let _ = crate::executors::docker::Executor::clean().await; Ok(()) } else { Err(eyre!( diff --git a/src/commands/list.rs b/src/commands/list.rs index 1c48197..cb3a311 100644 --- a/src/commands/list.rs +++ b/src/commands/list.rs @@ -9,6 +9,7 @@ use crate::{ config::types::{OneofOption, TaskConf, TaskType, TopLevelConf}, fetch::FetcherRepository, + strsim::calculate_did_you_mean_possibilities, tasks::TaskGraph, terminal::TERM, }; @@ -95,6 +96,52 @@ fn turn_oneof_into_listable(options: Option<&Vec>) -> Vec<(String, results } +/// Check if an argument is a selectable top level argument aka: +/// +/// 1. Is a task. +/// 2. Is a `Oneof` type. +/// 3. Is not marked as internal. +fn is_selectable_top_level_arg<'a, 'b>( + arg: &'b str, + tasks: &'a HashMap, +) -> Option<&'a TaskConf> { + if !tasks.contains_key(arg) { + error!( + "Argument #1 ({}) is not a task that exists. Listing all possible tasks.", + arg, + ); + return None; + } + + let selected_task = &tasks[arg]; + if selected_task.get_type() != &TaskType::Oneof { + error!( + "Argument #1 ({}) is not a task that can be listed. Listing all possible tasks.", + arg, + ); + return None; + } + + if selected_task.is_internal() { + error!( + "Argument #1 ({}) is an internal task, and cannot be listed. Listing all the possible tasks.", + arg, + ); + return None; + } + + Some(selected_task) +} + +/// Check if a task has options that can be selected. +fn task_has_options(task: &TaskConf) -> bool { + if let Some(options) = task.get_options() { + !options.is_empty() + } else { + false + } +} + /// Handle a raw list configuration. /// /// `config`: The configuration object. @@ -134,41 +181,15 @@ fn handle_raw_list(config: &TopLevelConf, tasks: &HashMap) { ); } -/// Handle the actual `list command`. -/// -/// `config` - the top level configuration object. -/// `fetcher` - the thing that goes and fetches for us. -/// `args` - the arguments for this list command. -/// -/// # Errors -/// -/// - When constructing the task graph. -#[allow(clippy::cognitive_complexity, clippy::too_many_lines)] -pub async fn handle_list_command( - config: &TopLevelConf, - fetcher: &FetcherRepository, - args: &[String], -) -> Result<()> { - let span = tracing::info_span!("list"); - let _guard = span.enter(); - - // The list command is the main command you get when running the binary. - // It is not really ideal for CI environments, and we really only ever - // expect humans to run it. This is why we specifically colour it, and - // try to always output _something_. - let tasks = TaskGraph::new(config, fetcher) - .await? - .consume_and_get_tasks(); - +fn handle_listing_arg<'a, 'b>( + tasks: &'a HashMap, + args: &'b [String], +) -> Option<&'a TaskConf> { let mut last_selected_task: Option<&TaskConf> = None; + for (arg_idx, arg) in args.iter().enumerate() { if let Some(prior_task) = last_selected_task { - let empty_vec = Vec::with_capacity(0); - if prior_task - .get_options() - .unwrap_or_else(|| &empty_vec) - .is_empty() - { + if !task_has_options(prior_task) { error!( "Argument #{} ({}) could not be found since the previous argument: #{} ({}) has no options. Performing top level list.", arg_idx + 1, @@ -176,9 +197,8 @@ pub async fn handle_list_command( arg_idx, prior_task.get_name(), ); - handle_raw_list(config, &tasks); - return Ok(()); + return None; } let options = prior_task.get_options().unwrap(); @@ -186,9 +206,8 @@ pub async fn handle_list_command( // Don't check for internal task here since a oneof could be built off // of internal options, and we want those to be selectable. - if potential_current_option.is_none() { - let did_you_mean_options = crate::strsim::calculate_did_you_mean_possibilities( + let did_you_mean_options = calculate_did_you_mean_possibilities( arg, &options .iter() @@ -239,31 +258,39 @@ pub async fn handle_list_command( last_selected_task = Some(selected_task); } else { - if !tasks.contains_key(arg) { - error!( - "Argument #1 ({}) is not a task that exists. Listing all possible tasks.", - arg - ); - handle_raw_list(config, &tasks); - return Ok(()); - } + let arg = is_selectable_top_level_arg(arg, tasks)?; + last_selected_task = Some(arg); + } + } - let selected_task = &tasks[arg]; - if selected_task.get_type() != &TaskType::Oneof { - error!("Argument #1 ({}) is not a task that can be listed. Listing all possible tasks.", arg); - handle_raw_list(config, &tasks); - return Ok(()); - } + last_selected_task +} - if selected_task.is_internal() { - error!("Argument #1 ({}) is an internal task, and cannot be listed. Listing all the possible tasks.", arg); - handle_raw_list(config, &tasks); - return Ok(()); - } +/// Handle the actual `list command`. +/// +/// `config` - the top level configuration object. +/// `fetcher` - the thing that goes and fetches for us. +/// `args` - the arguments for this list command. +/// +/// # Errors +/// +/// - When constructing the task graph. +pub async fn handle_list_command( + config: &TopLevelConf, + fetcher: &FetcherRepository, + args: &[String], +) -> Result<()> { + let span = tracing::info_span!("list"); + let _guard = span.enter(); - last_selected_task = Some(selected_task); - } - } + // The list command is the main command you get when running the binary. + // It is not really ideal for CI environments, and we really only ever + // expect humans to run it. This is why we specifically colour it, and + // try to always output _something_. + let tasks = TaskGraph::new(config, fetcher) + .await? + .consume_and_get_tasks(); + let last_selected_task = handle_listing_arg(&tasks, args); if last_selected_task.is_none() { handle_raw_list(config, &tasks); diff --git a/src/commands/mod.rs b/src/commands/mod.rs index 78b6ad0..45843de 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -1,6 +1,6 @@ //! The overarching module for all particular commands implemented by Dev-Loop. -pub mod clean; -pub mod exec; -pub mod list; -pub mod run; +pub(crate) mod clean; +pub(crate) mod exec; +pub(crate) mod list; +pub(crate) mod run; diff --git a/src/commands/run.rs b/src/commands/run.rs index 5c3e4d3..de37a73 100644 --- a/src/commands/run.rs +++ b/src/commands/run.rs @@ -30,7 +30,6 @@ use std::path::PathBuf; /// - Error creating an executor/choosing an executor for tasks. /// - Error writing the helper scripts. /// - Error running the task. -#[allow(clippy::cognitive_complexity)] pub async fn handle_run_command( config: &TopLevelConf, fetcher: &FetcherRepository, @@ -112,7 +111,7 @@ pub async fn handle_run_command( Ok(exit_code) => { if exit_code == 0 { // Don't cause an error for cleaning if the task succeeded, the user can always clean manually. - let _ = crate::executors::docker::DockerExecutor::clean().await; + let _ = crate::executors::docker::Executor::clean().await; Ok(()) } else { Err(eyre!( diff --git a/src/config/mod.rs b/src/config/mod.rs index cfb9998..559f5e1 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -4,7 +4,7 @@ //! //! Those validations happen at different stages within the program. -use crate::yaml_err::contextualize_yaml_err; +use crate::yaml_err::contextualize; use color_eyre::{eyre::WrapErr, Result, Section}; use std::{ fs::{canonicalize, File}, @@ -13,13 +13,12 @@ use std::{ }; use tracing::{error, trace}; -pub mod types; +pub(crate) mod types; /// Get the root of the project repository. /// /// This discovers the project directory automatically by looking at /// `std::env::current_dir()`, and walking the path up. -#[allow(clippy::cognitive_complexity)] #[must_use] pub fn get_project_root() -> Option { // Get the current directory (this is where we start looking...) @@ -79,7 +78,7 @@ fn find_and_open_project_config() -> Option<(File, PathBuf)> { /// # Errors /// /// - When there is error doing a file read on a found configuration file. -pub fn get_top_level_config() -> Result> { +pub fn get_top_level() -> Result> { let config_fh_opt = find_and_open_project_config(); if config_fh_opt.is_none() { error!("Could not find project configuration [.dl/config.yml] looking in current directory, and parent directories."); @@ -92,7 +91,7 @@ pub fn get_top_level_config() -> Result> { config_fh.read_to_string(&mut contents)?; Ok(Some( - contextualize_yaml_err( + contextualize( serde_yaml::from_str::(&contents), ".dl/config.yml", &contents, diff --git a/src/config/types.rs b/src/config/types.rs index 5c6d878..bd91308 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -45,6 +45,7 @@ pub struct ExecutorConf { impl ProvideConf { /// Create a new implementation of `ProvideConf` + #[cfg(test)] #[must_use] pub fn new(name: String, version: Option) -> Self { Self { name, version } @@ -274,6 +275,7 @@ pub struct NeedsRequirement { impl NeedsRequirement { /// Create a new `NeedsRequirements`. + #[cfg(test)] #[must_use] pub fn new(name: String, version_matcher: Option) -> Self { Self { @@ -332,6 +334,7 @@ impl PipelineStep { } /// Get the description of this `PipelineStep` + #[allow(unused)] #[must_use] pub fn get_description(&self) -> Option<&str> { if let Some(desc) = &self.description { @@ -564,12 +567,6 @@ pub struct TaskConfFile { } impl TaskConfFile { - /// Get the list of tasks from this configuration file. - #[must_use] - pub fn get_tasks(&self) -> &[TaskConf] { - &self.tasks - } - /// Set the task location for all the configuration items /// in this file. pub fn set_task_location(&mut self, loc: &str) { @@ -593,12 +590,6 @@ pub struct ExecutorConfFile { } impl ExecutorConfFile { - /// Get the list of executors that this file added. - #[must_use] - pub fn get_executors(&self) -> &[ExecutorConf] { - &self.executors - } - /// Consume the representation of this `ExecutorConfFile`, and receive the executors. #[must_use] pub fn consume_and_get_executors(self) -> Vec { diff --git a/src/dirs.rs b/src/dirs.rs index 75a14f5..bc4886f 100644 --- a/src/dirs.rs +++ b/src/dirs.rs @@ -1,6 +1,9 @@ +use color_eyre::{eyre::WrapErr, Result, Section}; use std::{ + convert::TryFrom, env, ffi::{CStr, OsString}, + fs::set_permissions, mem, path::PathBuf, ptr, @@ -14,6 +17,53 @@ cfg_if::cfg_if! { } } +/// Rewrite the temporary directory. +pub fn rewrite_tmp_dir(host_tmp_dir: &str, path: &PathBuf) -> String { + let replacement_str = if host_tmp_dir.ends_with('/') { + "/tmp/" + } else { + "/tmp" + }; + + path.to_string_lossy() + .replace(&host_tmp_dir, replacement_str) +} + +#[cfg(target_family = "unix")] +pub fn mark_as_world_editable(path: &PathBuf) -> Result<()> { + use std::fs::Permissions; + use std::os::unix::fs::PermissionsExt; + + let executable_permissions = Permissions::from_mode(0o666); + set_permissions(path, executable_permissions) + .map(|_| ()) + .wrap_err(format!("Failed to mark file as editable which is needed: [{:?}]", path)) + .suggestion("If the error isn't immediately clear, please file an issue as it's probably a bug in dev-loop with your system.") +} + +#[cfg(not(target_family = "unix"))] +pub fn mark_as_world_editable(path: &PathBuf) -> Result<()> { + Ok(()) +} + +#[cfg(target_family = "unix")] +pub fn mark_file_as_executable(path: &PathBuf) -> Result<()> { + use std::fs::Permissions; + use std::os::unix::fs::PermissionsExt; + + let executable_permissions = Permissions::from_mode(0o777); + + set_permissions(path, executable_permissions) + .map(|_| ()) + .wrap_err(format!("Failed to mark file as executable which is needed: [{:?}]", path)) + .suggestion("If the error isn't immediately clear, please file an issue as it's probably a bug in dev-loop with your system.") +} + +#[cfg(not(target_family = "unix"))] +pub fn mark_file_as_executable(path: &PathBuf) -> Result<()> { + Ok(()) +} + /// Get the temporary directory for this host. #[must_use] pub fn get_tmp_dir() -> PathBuf { @@ -35,50 +85,49 @@ pub fn get_tmp_dir() -> PathBuf { } } -/// Calculate the home directory of a user. -#[allow(clippy::items_after_statements)] -#[must_use] -pub fn home_dir() -> Option { - return env::var_os("HOME") - .and_then(|h| if h.is_empty() { None } else { Some(h) }) - .or_else(|| unsafe { fallback() }) - .map(PathBuf::from); +#[cfg(any(target_os = "android", target_os = "ios", target_os = "emscripten"))] +unsafe fn home_dir_fallback() -> Option { + None +} - #[cfg(any(target_os = "android", target_os = "ios", target_os = "emscripten"))] - unsafe fn fallback() -> Option { - None - } - #[cfg(not(any(target_os = "android", target_os = "ios", target_os = "emscripten")))] - #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] - unsafe fn fallback() -> Option { - let amt = match libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX) { - n if n < 0 => 512 as usize, - n => n as usize, - }; - let mut buf = Vec::with_capacity(amt); - let mut passwd: libc::passwd = mem::zeroed(); - let mut result = ptr::null_mut(); - match libc::getpwuid_r( - libc::getuid(), - &mut passwd, - buf.as_mut_ptr(), - buf.capacity(), - &mut result, - ) { - 0 if !result.is_null() => { - let ptr = passwd.pw_dir as *const _; - let bytes = CStr::from_ptr(ptr).to_bytes(); - if bytes.is_empty() { - None - } else { - Some(OsStringExt::from_vec(bytes.to_vec())) - } +#[cfg(not(any(target_os = "android", target_os = "ios", target_os = "emscripten")))] +unsafe fn home_dir_fallback() -> Option { + let amt = match libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX) { + n if n < 0 => 512 as usize, + n => usize::try_from(n).unwrap_or(512), + }; + let mut buf = Vec::with_capacity(amt); + let mut passwd: libc::passwd = mem::zeroed(); + let mut result = ptr::null_mut(); + match libc::getpwuid_r( + libc::getuid(), + &mut passwd, + buf.as_mut_ptr(), + buf.capacity(), + &mut result, + ) { + 0 if !result.is_null() => { + let ptr = passwd.pw_dir as *const _; + let bytes = CStr::from_ptr(ptr).to_bytes(); + if bytes.is_empty() { + None + } else { + Some(OsStringExt::from_vec(bytes.to_vec())) } - _ => None, } + _ => None, } } +/// Calculate the home directory of a user. +#[must_use] +pub fn home_dir() -> Option { + env::var_os("HOME") + .and_then(|h| if h.is_empty() { None } else { Some(h) }) + .or_else(|| unsafe { home_dir_fallback() }) + .map(PathBuf::from) +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/executors/docker.rs b/src/executors/docker.rs index 8161fed..2824000 100644 --- a/src/executors/docker.rs +++ b/src/executors/docker.rs @@ -1,53 +1,16 @@ -//! Contains the code that represents the Docker Executor, or the executor -//! that runs your code inside of a Docker Container. Dev-Loop manages the -//! state of this container, it's network, and the like in it's entirety. -//! -//! The docker executor has many arguments to it, that control it's behaviour, -//! but they're hopefully all sensible. -//! -//! The options are configured through `params` (a map of strings passed to the -//! executor itself). The following attributes you can set are: -//! -//! `user`: the user to launch commands as in the container, defaults to root. -//! -//! `name_prefix`: the prefix of the container to use. this is required, and -//! used to help derive the container name which follows a -//! format like: `dl-${name_prefix}${data}`. As such your name -//! prefix should end with: `-`. -//! -//! `image`: the docker image to use for this container. This is required. This -//! should be a full pullable image. For example: `ubuntu:18.04`, or -//! `gcr.io/....:latest` -//! -//! `extra_mounts`: a list of extra directories to mount for the docker executor. -//! it should be noted the root project directory, and $TMPDIR -//! will always be mounted. -//! -//! `hostname`: the hostname to use for the docker container. If you don't -//! provide one, it will be derived automatically for you. This is -//! almost always preferred since dev-loop will ensure there are no -//! possible conflicts. -//! -//! `export_env`: a comma seperated list of environment variables to allow -//! to be passed into the container. -//! -//! `tcp_ports_to_expose`: a comma seperated list of ports to export to the -//! host machine. you won't need to set these if you're -//! using two tasks in a pipeline, as each pipeline -//! gets it's own docker network that allows services -//! to natively communicate. -//! -//! `udp_ports_to_expose`: the same as `tcp_ports_to_export` just for udp instead -//! -//! If you ever find yourself with a docker container/network that's running -//! when it's not supposed to be you can use the `clean` command. The clean -//! command will automatically remove all resources associated with `dev-loop`. - use crate::{ config::types::{NeedsRequirement, ProvideConf}, - dirs::get_tmp_dir, - executors::{CompatibilityStatus, Executor}, - future_helper::timeout_with_log_msg, + dirs::{get_tmp_dir, rewrite_tmp_dir}, + executors::{ + docker_engine::{ + delete_container, delete_network, docker_version_check, ensure_docker_container, + ensure_network_attached, ensure_network_exists, execute_command_in_container_async, + get_command_exit_code, has_command_finished, list_devloop_containers, + list_devloop_networks, DockerContainerInfo, SOCKET_PATH, + }, + shared::{create_entrypoint, create_executor_shared_dir, create_log_proxy_files}, + CompatibilityStatus, Executor as ExecutorTrait, + }, tasks::execution::preparation::ExecutableTask, }; @@ -59,12 +22,11 @@ use crossbeam_channel::Sender; use isahc::{ config::VersionNegotiation, prelude::*, Error as HttpError, HttpClient, HttpClientBuilder, }; -use once_cell::sync::Lazy; use semver::{Version, VersionReq}; -use serde_json::Value as JsonValue; use std::{ collections::HashMap, - io::{prelude::*, BufReader, Error as IoError}, + fs::File, + io::{prelude::*, BufReader}, path::PathBuf, sync::{ atomic::{AtomicBool, Ordering}, @@ -72,83 +34,27 @@ use std::{ }, time::Duration, }; -use tracing::{debug, error, info, warn}; - -/// This is the api version we use for talking to the docker socket. -/// -/// The docker socket allows us to choose a versioned api like this, which is -/// why we use it as opposed to using a terminal command (not to mention we -/// don't have to worry about escaping correctly). -/// -/// `v1.40` is chosen because as of the time of writing this -/// `v1.40` is the version for Docker Engine 19.03, which at -/// the time of writing this (July 3rd, 2020) is the lowest supported -/// version according to docker: -/// -/// -/// -/// We can bump this in the future when we know it won't run into anyone. -const DOCKER_API_VERSION: &str = "/v1.40"; -const DOCKER_STATUS_CODES_ERR_NOTE: &str = "To find out what the status code means you can check the Docker documentation: https://docs.docker.com/engine/api/v1.40/."; -const _PERMISSIONS_HELPER_EXPERIMENTAL_SUGGESTION: &str = "The permissions helper is still experimental. Please report this so it can be fixed before stabilization."; - -cfg_if::cfg_if! { - if #[cfg(unix)] { - const SOCKET_PATH: &str = "/var/run/docker.sock"; - } else if #[cfg(win)] { - // TODO(xxx): named pipes? url? - const SOCKET_PATH: &str = "UNIMPLEMENTED"; - } -} - -// A global lock for the unix socket since it can't have multiple things communicating -// at the same time. -// -// You can techincally have multiple writers on windows but only up to a particular buff -// size, and it's just much easier to have just a global lock, and take the extra bit. Really -// the only time this is truly slow is when we're downloading a docker image. -static DOCK_SOCK_LOCK: Lazy> = - Lazy::new(|| async_std::sync::Mutex::new(())); -static DOCK_USER_LOCK: Lazy> = - Lazy::new(|| async_std::sync::Mutex::new(())); +use tracing::{debug, error, info}; -/// Represents the actual `DockerExecutor`, responsible for maintaining +/// Represents the actual `Executor` for docker, responsible for maintaining /// the lifecycle of a single docker container. #[derive(Debug)] -pub struct DockerExecutor { +pub struct Executor { /// The HTTPClient used to talking to the docker socket. client: HttpClient, - /// The name of the underlying container. - container_name: String, - /// The environment variables to export. - environment_to_export: Vec, - /// The list of extra directories to mount. - extra_mounts: Vec, - /// The image to use for this docker executor. - image: String, - /// The hostname for this container. - hostname: String, /// The root of the project on the host filesystem represented as a string. project_root: String, /// The list of things this provides. provides: HashMap>, /// A random string used for various unique identifiers. random_str: String, - /// The group id to run as. - run_as_group_id: u32, - /// The user id to run as. - run_as_user_id: u32, - /// The list of ports to export (for tcp). - tcp_ports_to_expose: Vec, - /// The temporary directory of the host represented as a string. + /// Represents the docker container api. + container: DockerContainerInfo, + /// The temporary directory. tmp_dir: String, - /// The list of udp ports to export. - udp_ports_to_expose: Vec, - /// The user to launch the container as. - user: String, } -impl DockerExecutor { +impl Executor { /// Create a new Docker Executor. /// /// `project_root`: the root of the project directory. @@ -163,26 +69,12 @@ impl DockerExecutor { /// /// The docker executor must have all of it's required arguments passed in /// otherwise it will error on construction. - #[allow(unused_assignments, clippy::too_many_lines)] pub fn new( project_root: &PathBuf, executor_args: &HashMap, provided_conf: &[ProvideConf], override_sock_path: Option, ) -> Result { - // First get the project_root directory, and tmp_dir as strings. - // - // We need them as strings more than paths. - - let tmp_dir = get_tmp_dir(); - let tmp_dir_as_string = tmp_dir.to_str(); - if tmp_dir_as_string.is_none() { - return Err(eyre!( - "Failed to turn the temporary directory: [{:?}] into a utf8-string.", - tmp_dir, - )).suggestion("Please change the environment variable TMPDIR to a path that is a UTF-8 only file path.") - .note("You can also unset it if you have a `/tmp` directory."); - } let pr_as_string = project_root.to_str(); if pr_as_string.is_none() { return Err(eyre!( @@ -198,98 +90,6 @@ impl DockerExecutor { // Next Generate the random name for the container to use that won't clash. let random_str = format!("{}", uuid::Uuid::new_v4()); - // Finally parse out all the executor arguments, including the required ones. - let mut container_name = "dl-".to_owned(); - if let Some(user_specified_prefix) = executor_args.get("name_prefix") { - container_name += user_specified_prefix; - } else { - return Err(eyre!( - "Docker Executor requires a `name_prefix` field to know how to name containers!" - )).suggestion("Add a `name_prefix` field to `params` that specifys the name prefix for containers") - .note("You can find the full list of fields here: https://dev-loop.kungfury.io/docs/schemas/executor-conf"); - } - container_name += &random_str; - - let mut image = String::new(); - if let Some(image_identifier) = executor_args.get("image") { - image = image_identifier.to_owned(); - } else { - return Err(eyre!( - "Docker Executor requires an `image` to know which docker image to use." - )).suggestion("Add an `image` field to `params` that specifys the docker image to use.") - .note("You can find the full list of fields here: https://dev-loop.kungfury.io/docs/schemas/executor-conf"); - } - - let mut group_id = 0; - let mut user_id = 0; - if let Some(permission_helper_active) = executor_args.get("experimental_permission_helper") - { - if &permission_helper_active.to_ascii_lowercase() == "true" { - user_id = users::get_effective_uid(); - group_id = users::get_effective_gid(); - } - } - - let mut env_vars = Vec::new(); - if let Some(envs_to_export) = executor_args.get("export_env") { - env_vars = envs_to_export - .split(',') - .map(|the_str| { - std::env::var(the_str) - .map(|val| format!("{}={}", the_str, val)) - .unwrap_or_else(|_| the_str.to_owned()) - }) - .collect::>(); - } - - let mut tcp_ports_to_expose = Vec::new(); - if let Some(ports_to_expose) = executor_args.get("tcp_ports_to_expose") { - tcp_ports_to_expose = ports_to_expose - .split(',') - .filter_map(|item| { - let item_pr = item.parse::(); - if item_pr.is_err() { - warn!( - "Not exposing tcp port: [{}] as it is not a valid positive number.", - item - ); - } - - item_pr.ok() - }) - .collect::>(); - } - - let mut udp_ports_to_expose = Vec::new(); - if let Some(ports_to_expose) = executor_args.get("udp_ports_to_expose") { - udp_ports_to_expose = ports_to_expose - .split(',') - .filter_map(|item| { - let item_pr = item.parse::(); - if item_pr.is_err() { - warn!( - "Not exposing udp port: [{}] as it is not a valid positive number.", - item - ); - } - - item_pr.ok() - }) - .collect::>(); - } - - let user = executor_args - .get("user") - .map_or_else(|| "root".to_owned(), String::from); - - let hostname = if let Some(hostname_ref) = executor_args.get("hostname") { - hostname_ref.to_owned() - } else { - let mut string = executor_args.get("name_prefix").unwrap().to_owned(); - string.pop(); - string - }; - let mut provides = HashMap::new(); for provided in provided_conf { let version_opt = if provided.get_version().is_empty() { @@ -303,88 +103,6 @@ impl DockerExecutor { provides.insert(provided.get_name().to_owned(), version_opt); } - // For mounts remember they: - // - // 1. Need to be relative to home (signified by starting with: `~` as a - // source location), or be relative to the project root. - // 2. Each mount is in the form: `${src}:${dest}`. - // 3. Docker will error if a mount doesn't exist, so we need to skip it if - // it's not on the FS. - let mut extra_mounts = Vec::new(); - if let Some(mount_str_ref) = executor_args.get("extra_mounts") { - extra_mounts = mount_str_ref - .split(',') - .filter_map(|item| { - let mounts = item.split(':').collect::>(); - if mounts.len() != 2 { - warn!( - "{:?}", - Err::<(), Report>(eyre!( - "Mount String for Docker Executor: [{}] is invalid, missing path for container. Will not mount.", - item, - )) - .note("Mounts should be in the format: `host_path:path_in_container`") - .unwrap_err() - ); - return None; - } - - let src = mounts[0]; - let dest = mounts[1]; - - let src = - if src.starts_with('~') { - let potential_home_dir = crate::dirs::home_dir(); - if potential_home_dir.is_none() { - warn!( - "{:?}", - Err::<(), Report>(eyre!( - "Mount String: [{}] for Docker Executor's source path is relative to the home directory, but the home directory couldn't be found. Will not mount.", - item, - )) - .suggestion("You can manually specify the home directory with the `HOME` environment variable.") - .unwrap_err() - ); - return None; - } - let home_dir = potential_home_dir.unwrap(); - let home_dir = home_dir.to_str(); - if home_dir.is_none() { - warn!( - "{:?}", - Err::<(), Report>(eyre!( - "Home directory is not set to a UTF-8 only string." - )).note("If you're not sure how to solve this error, please open an issue.").unwrap_err(), - ); - return None; - } - let home_dir = home_dir.unwrap(); - - src.replace("~", home_dir) - } else if src.starts_with('/') { - src.to_owned() - } else { - pr_as_string.to_owned() + "/" + src - }; - - let src_as_pb = std::path::PathBuf::from(&src); - if !src_as_pb.exists() { - warn!( - "{:?}", - Err::<(), Report>(eyre!( - "Mount String: [{}] specified a source directory: [{}] that does not exist. Will not mount.", - item, - src, - )).unwrap_err(), - ); - return None; - } - - Some(format!("{}:{}", src, dest)) - }) - .collect::>(); - } - let client = if cfg!(target_os = "windows") { // TODO(xxx): set windows named pipe/url HttpClientBuilder::new() @@ -397,242 +115,23 @@ impl DockerExecutor { .build() }?; + let container = DockerContainerInfo::new(executor_args, pr_as_string, &random_str)?; + Ok(Self { client, - container_name, - environment_to_export: env_vars, - extra_mounts, - image, - hostname, project_root: pr_as_string.to_owned(), provides, random_str, - run_as_group_id: group_id, - run_as_user_id: user_id, - tcp_ports_to_expose, - tmp_dir: tmp_dir_as_string.unwrap().to_owned(), - udp_ports_to_expose, - user, + container, + tmp_dir: get_tmp_dir().to_string_lossy().to_string(), }) } - /// Call the docker engine api using the GET http method. - /// - /// `client`: the http client to use. - /// `path`: the path to call (along with Query Args). - /// `long_call_msg`: the message to print when docker is taking awhile to respond. - /// `timeout`: The optional timeout. Defaults to 30 seconds. - /// `is_json`: whether or not to parse the response as json. - async fn docker_api_get( - client: &HttpClient, - path: &str, - long_call_msg: String, - timeout: Option, - is_json: bool, - ) -> Result { - let _guard = DOCK_SOCK_LOCK.lock().await; - - let log_timeout = Duration::from_secs(3); - let timeout_frd = timeout.unwrap_or_else(|| Duration::from_secs(30)); - let url = format!("http://localhost{}{}", DOCKER_API_VERSION, path); - debug!("URL for get will be: {}", url); - let req = Request::get(url) - .header("Accept", "application/json; charset=UTF-8") - .header("Content-Type", "application/json; charset=UTF-8") - .body(()) - .wrap_err("Internal-Error: Failed to construct http request.") - .suggestion("Please report this as an issue so it can be fixed.")?; - - let mut resp = timeout_with_log_msg( - long_call_msg, - log_timeout, - timeout_frd, - client.send_async(req), - ) - .await - .context(format!("Requesting URL: {}", path))? - .context(format!("Requesting URL: {}", path))?; - - let status = resp.status().as_u16(); - if status < 200 || status > 299 { - return Err(eyre!( - "Docker responded to path: [{}] with a status code: [{}] which is not in the 200-300 range.", - path, - status, - )) - .note(DOCKER_STATUS_CODES_ERR_NOTE); - } - - if is_json { - Ok(resp - .json() - .wrap_err("Failure to response Docker response as JSON") - .context(format!("Requesting URL: {}", path))?) - } else { - // Ensure the response body is read in it's entirerty. Otherwise - // the body could still be writing, but we think we're done with the - // request, and all of a sudden we're writing to a socket while - // a response body is all being written and it's all bad. - let _ = resp.text(); - Ok(serde_json::Value::default()) - } - } - - /// Call the docker engine api using the POST http method. - /// - /// `client`: the http client to use. - /// `path`: the path to call (along with Query Args). - /// `long_call_msg`: the message to print when docker is taking awhile to respond. - /// `body`: The body to send to the remote endpoint. - /// `timeout`: the optional timeout. Defaults to 30 seconds. - /// `is_json`: whether to attempt to read the response body as json. - async fn docker_api_post( - client: &HttpClient, - path: &str, - long_call_message: String, - body: Option, - timeout: Option, - is_json: bool, - ) -> Result { - let _guard = DOCK_SOCK_LOCK.lock().await; - - let long_timeout = Duration::from_secs(3); - let timeout_frd = timeout.unwrap_or_else(|| Duration::from_secs(30)); - let url = format!("http://localhost{}{}", DOCKER_API_VERSION, path); - debug!("URL for get will be: {}", url); - let req_part = Request::post(url) - .header("Accept", "application/json; charset=UTF-8") - .header("Content-Type", "application/json; charset=UTF-8") - .header("Expect", ""); - let req = if let Some(body_data) = body { - req_part - .body( - serde_json::to_vec(&body_data) - .wrap_err("Failure converting HTTP Request Body to JSON") - .suggestion("This is an internal error, please report this issue.")?, - ) - .wrap_err("Failed to write body to request") - .suggestion("This is an internal error, please report this issue.")? - } else { - req_part - .body(Vec::new()) - .wrap_err("Failed to write body to request") - .suggestion("This is an internal error, please report this issue.")? - }; - let mut resp = timeout_with_log_msg( - long_call_message, - long_timeout, - timeout_frd, - client.send_async(req), - ) - .await - .context(format!("Requesting URL: {}", path))? - .context(format!("Requesting URL: {}", path))?; - - let status = resp.status().as_u16(); - if status < 200 || status > 299 { - return Err(eyre!( - "Docker responded to path: [{}] with a status code: [{}] which is not in the 200-300 range.", - path, - status, - )) - .note(DOCKER_STATUS_CODES_ERR_NOTE); - } - - if is_json { - Ok(resp - .json() - .wrap_err("Failure to response Docker response as JSON") - .context(format!("HTTP Request Path: {}", path))?) - } else { - // Ensure the response body is read in it's entirerty. Otherwise - // the body could still be writing, but we think we're done with the - // request, and all of a sudden we're writing to a socket while - // a response body is all being written and it's all bad. - let _ = resp.text(); - Ok(serde_json::Value::default()) - } - } - - /// Call the docker engine api using the POST http method. - /// - /// `client`: the http client to use. - /// `path`: the path to call (along with Query Args). - /// `long_call_msg`: the message to print when docker is taking awhile to respond. - /// `body`: The body to send to the remote endpoint. - /// `timeout`: the timeout for this requests, defaults to 30 seconds. - /// `is_json`: whether to actually try to read the response body as json. - async fn docker_api_delete( - client: &HttpClient, - path: &str, - long_call_msg: String, - body: Option, - timeout: Option, - is_json: bool, - ) -> Result { - let _guard = DOCK_SOCK_LOCK.lock().await; - - let long_dur = Duration::from_secs(3); - let timeout_frd = timeout.unwrap_or_else(|| Duration::from_secs(30)); - let url = format!("http://localhost{}{}", DOCKER_API_VERSION, path); - debug!("URL for get will be: {}", url); - - let req_part = Request::delete(url) - .header("Accept", "application/json; charset=UTF-8") - .header("Content-Type", "application/json; charset=UTF-8") - .header("Expect", ""); - let req = if let Some(body_data) = body { - req_part - .body( - serde_json::to_vec(&body_data) - .wrap_err("Failure converting HTTP Request Body to JSON") - .suggestion("This is an internal error, please report this issue.")?, - ) - .wrap_err("Failed to write body to request") - .suggestion("This is an internal error, please report this issue.")? - } else { - req_part - .body(Vec::new()) - .wrap_err("Failed to write body to request") - .suggestion("This is an internal error, please report this issue.")? - }; - let mut resp = - timeout_with_log_msg(long_call_msg, long_dur, timeout_frd, client.send_async(req)) - .await - .context(format!("Requesting URL: {}", path))? - .context(format!("Requesting URL: {}", path))?; - - let status = resp.status().as_u16(); - if status < 200 || status > 299 { - return Err(eyre!( - "Docker responded to path: [{}] with a status code: [{}] which is not in the 200-300 range.", - path, - status, - )) - .note(DOCKER_STATUS_CODES_ERR_NOTE); - } - - if is_json { - Ok(resp - .json() - .wrap_err("Failure to response Docker response as JSON") - .context(format!("HTTP Request Path: {}", path))?) - } else { - // Ensure the response body is read in it's entirerty. Otherwise - // the body could still be writing, but we think we're done with the - // request, and all of a sudden we're writing to a socket while - // a response body is all being written and it's all bad. - let _ = resp.text(); - Ok(serde_json::Value::default()) - } - } - /// Attempt to clean up all resources left behind by the docker executor. /// /// # Errors /// /// - when there is an issue talking to the docker api for containers. - #[allow(clippy::cognitive_complexity, clippy::too_many_lines)] pub async fn clean() -> Result<()> { // Cleanup all things left behind by the docker executor. if Self::is_compatible().await != CompatibilityStatus::Compatible { @@ -652,137 +151,25 @@ impl DockerExecutor { } .wrap_err("Failed to construct HTTP-Client to talk to Docker")?; - // First cleanup containers. - let containers_json_res = Self::docker_api_get( - &client, - "/containers/json?all=true", - "Taking awhile to query containers from docker. Will wait up to 30 seconds.".to_owned(), - None, - true, - ) - .await; - match containers_json_res { - Ok(res) => { - if let Some(containers) = res.as_array() { - for container in containers { - let names_opt = container.get("Names"); - if names_opt.is_none() { - continue; - } - let names_untyped = names_opt.unwrap(); - let names_typed_opt = names_untyped.as_array(); - if names_typed_opt.is_none() { - continue; - } - let names = names_typed_opt.unwrap(); - - let mut dl_name = String::new(); - for name in names { - if let Some(name_str) = name.as_str() { - if name_str.starts_with("/dl-") { - dl_name = name_str.to_owned(); - } - } - } - - if dl_name.is_empty() { - continue; - } - - debug!("Found dev-loop container: [{}]", dl_name); - // The container may already be stopped so ignore kill. - let _ = Self::docker_api_post( - &client, - &format!("/containers{}/kill", dl_name), - "Docker is not killing the container in a timely manner. Will wait up to 30 seconds.".to_owned(), - None, - None, - false, - ) - .await; - // The container may have been launched with --rm. - let _ = Self::docker_api_delete( - &client, - &format!("/containers{}?v=true&force=true&link=true", dl_name,), - "Docker is taking awhile to remove the container. Will wait up to 30 seconds.".to_owned(), - None, - None, - false, - ) - .await; - } - } - } - Err(container_json_err) => { - error!( - "{:?}", - Err::<(), Report>(container_json_err) - .note("Will not clean up docker containers due to this error.") - .suggestion("To manually clean up containers use `docker ps -a` to list containers, and `docker kill ${container name that starts with `dl-`}`.") - .unwrap_err() - ); - } + for container in list_devloop_containers(&client).await.wrap_err("Failed to list containers").note("Will not clean up docker containers due to this error.").suggestion("To manually clean up containers use `docker ps -a` to list containers, and `docker kill ${container name that starts with `dl-`}`")? { + debug!("Found dev-loop container: [{}]", container); + delete_container(&client, &container).await; } - // Next checkout networks... - let network_json_res = Self::docker_api_get( - &client, - "/networks", - "Taking ahwile to query networks from docker. Will wait up til 30 seconds.".to_owned(), - None, - true, - ) - .await; - match network_json_res { - Ok(res) => { - if let Some(networks) = res.as_array() { - for network in networks { - if let Some(name_untyped) = network.get("Name") { - if let Some(name_str) = name_untyped.as_str() { - if name_str.starts_with("dl-") { - debug!("Found dev-loop network: [{}]", name_str); - - if let Err(delete_err) = Self::docker_api_delete( - &client, - &format!("/networks/{}", name_str), - "Docker is taking awhile to delete a docker network. Will wait up to 30 seconds.".to_owned(), - None, - None, - false, - ) - .await - { - error!( - "{:?}", - Err::<(), Report>(delete_err) - .note(format!("Failed to delete docker network: [{}]", name_str)) - .suggestion(format!("You can also try deleting the network manually with: `docker network rm {}`", name_str)) - .unwrap_err() - ); - } - } - } - } - } - } - } - Err(network_err) => { - error!( - "{:?}", - Err::<(), Report>(network_err) - .note("Will not clean up docker networks due to this error.") - .suggestion("To manually clean up networks use `docker network ls` to list networks, and `docker network rm ${network name that starts with `dl-`}`.") - .unwrap_err() - ); - } + for network in list_devloop_networks(&client) + .await + .wrap_err("Failed to list networks") + .note("Will not delete docker networks due to this error.")? + { + debug!("Found dev-loop network: [{}]", network); + delete_network(&client, &network).await; } // Done! \o/ Ok(()) } - /// Determines if this `DockerExecutor` is compatible with the system. - #[allow(clippy::single_match_else)] + /// Determines if this `Executor` is compatible with the system. pub async fn is_compatible() -> CompatibilityStatus { let client = if cfg!(target_os = "windows") { HttpClientBuilder::new() @@ -805,23 +192,15 @@ impl DockerExecutor { } let client = client.unwrap(); - let version_resp_res = Self::docker_api_get( - &client, - "/version", - "Taking awhile to query version from docker. Will wait up to 30 seconds.".to_owned(), - None, - true, - ) - .await; - - match version_resp_res { - Ok(data) => match data.get("Version") { - Some(_) => CompatibilityStatus::Compatible, - None => { + match docker_version_check(&client).await { + Ok(data) => { + if data.get("Version").is_some() { + CompatibilityStatus::Compatible + } else { debug!("Failed to get key: `Version` from docker executor api!"); CompatibilityStatus::CouldBeCompatible("install docker".to_owned()) } - }, + } Err(http_err) => { debug!( "{:?}", @@ -834,632 +213,13 @@ impl DockerExecutor { } } - /// Get the container name used for this Docker Executor. - #[must_use] pub fn get_container_name(&self) -> &str { - &self.container_name - } - - /// Ensure a particular network exists. - /// - /// `client`: the http client. - /// `pipeline_id`: the pipeline id for the task. - /// - /// # Errors - /// - /// If we cannot talk to the docker socket, or there is an error creating the network. - pub async fn ensure_network_exists(client: &HttpClient, pipeline_id: &str) -> Result<()> { - let network_id = format!("dl-{}", pipeline_id); - let network_url = format!("/networks/{}", network_id); - let res = Self::docker_api_get( - client, - &network_url, - "Taking awhile to query network existance status from docker. Will wait up to 30 seconds.".to_owned(), - None, - true - ).await - .wrap_err(format!("Failed to query network information: {}", network_id)); - - if res.is_err() { - let json_body = serde_json::json!({ - "Name": network_id, - }); - let _ = Self::docker_api_post( - client, - "/networks/create", - "Docker is not creating the network in a timely manner. Will wait up to 30 seconds.".to_owned(), - Some(json_body), - None, - false - ) - .await - .wrap_err(format!("Failed to create docker network: {}", network_id))?; - } - - Ok(()) - } - - /// Download the Image for this docker executor. - /// - /// # Errors - /// - /// Errors when it the docker api cannot be talked too, or the image cannot be downloaded. - pub async fn download_image(&self) -> Result<()> { - let image_tag_split = self.image.rsplitn(2, ':').collect::>(); - let (image_name, tag_name) = if image_tag_split.len() == 2 { - (image_tag_split[1], image_tag_split[0]) - } else { - (image_tag_split[0], "latest") - }; - let url = format!("/images/create?fromImage={}&tag={}", image_name, tag_name); - - let _ = Self::docker_api_post( - &self.client, - &url, - format!( - "Downloading the docker_image: [{}:{}]", - image_name, tag_name - ), - None, - Some(Duration::from_secs(3600)), - false, - ) - .await - .wrap_err(format!( - "Failed to download image: [{}:{}]", - image_name, tag_name - ))?; - - Ok(()) - } - - /// Determine if the container is created, and then if it's wondering. - /// - /// # Errors - /// - /// Errors when the docker api cannot be talked to. - pub async fn is_container_created_and_running(&self) -> Result<(bool, bool)> { - let url = format!("/containers/{}/json?size=false", &self.container_name); - let mut is_created = false; - let mut is_running = false; - - // Ignore errors since a 404 for no container is an Error. - if let Ok(value) = Self::docker_api_get( - &self.client, - &url, - "Taking awhile to query container status from docker. Will wait up to 30 seconds." - .to_owned(), - None, - true, - ) - .await - { - is_created = true; - let is_running_status = &value["State"]["Running"]; - if is_running_status.is_boolean() { - is_running = is_running_status.as_bool().unwrap(); - } - } - - Ok((is_created, is_running)) - } - - /// Creates the container, should only be called when it does not yet exist. - /// - /// # Errors - /// - /// Errors when the docker socket cannot be talked too, or there is a conflict - /// creating the container. - pub async fn create_container(&self) -> Result<()> { - let tmp_dir = get_tmp_dir(); - let tmp_path = tmp_dir.to_str().unwrap(); - - let mut mounts = Vec::new(); - mounts.push(serde_json::json!({ - "Source": self.project_root, - "Target": "/mnt/dl-root", - "Type": "bind", - "Consistency": "consistent", - })); - mounts.push(serde_json::json!({ - "Source": tmp_path, - "Target": "/tmp", - "Type": "bind", - "Consistency": "consistent", - })); - for emount in &self.extra_mounts { - let mut split = emount.split(':'); - let source: &str = split.next().unwrap(); - let target: &str = split.next().unwrap(); - mounts.push(serde_json::json!({ - "Source": source, - "Target": target, - "Type": "bind", - "Consistency": "consistent", - })); - } - - let mut port_mapping = serde_json::map::Map::::new(); - let mut host_config_mapping = serde_json::map::Map::::new(); - - for tcp_port in &self.tcp_ports_to_expose { - port_mapping.insert(format!("{}/tcp", tcp_port), serde_json::json!({})); - host_config_mapping.insert( - format!("{}/tcp", tcp_port), - serde_json::json!([serde_json::json!({ "HostPort": format!("{}", tcp_port) }),]), - ); - } - for udp_port in &self.udp_ports_to_expose { - port_mapping.insert(format!("{}/udp", udp_port), serde_json::json!({})); - host_config_mapping.insert( - format!("{}/udp", udp_port), - serde_json::json!([serde_json::json!({ "HostPort": format!("{}", udp_port) }),]), - ); - } - - let url = format!("/containers/create?name={}", &self.container_name); - let body = serde_json::json!({ - "Cmd": ["tail", "-f", "/dev/null"], - "Entrypoint": "", - "Image": self.image, - "Hostname": self.hostname, - "User": self.user, - "HostConfig": { - "AutoRemove": true, - "Mounts": mounts, - "Privileged": true, - "PortBindings": host_config_mapping, - }, - "WorkingDir": "/mnt/dl-root", - "AttachStdout": true, - "AttachStderr": true, - "Privileged": true, - "Tty": true, - "ExposedPorts": port_mapping, - }); - let _ = Self::docker_api_post( - &self.client, - &url, - "Docker is not creating the container in a timely manner. Will wait up to 30 seconds." - .to_owned(), - Some(body), - None, - false, - ) - .await - .wrap_err("Failed to create the docker container")?; - - Ok(()) - } - - /// Execute a raw command, returning the "execution id" to check back in on it. - /// - /// `command`: the command to execute. - /// `use_user_ids`: if to use the setup user ids (always true during non-setup). - /// - /// # Errors - /// - /// Errors if we fail to create an exec instance with docker. - pub async fn raw_execute(&self, command: &[String], use_user_ids: bool) -> Result { - let url = format!("/containers/{}/exec", &self.container_name); - let body = if use_user_ids && (self.run_as_user_id != 0 || self.run_as_group_id != 0) { - serde_json::json!({ - "AttachStdout": true, - "AttachStderr": true, - "Tty": false, - "User": &format!("{}:{}", self.run_as_user_id, self.run_as_group_id), - "Privileged": true, - "Cmd": command, - "Env": &self.environment_to_export, - }) - } else { - serde_json::json!({ - "AttachStdout": true, - "AttachStderr": true, - "Tty": false, - "User": &self.user, - "Privileged": true, - "Cmd": command, - "Env": &self.environment_to_export, - }) - }; - - let resp = Self::docker_api_post( - &self.client, - &url, - "Docker is taking awhile to start running a new command. Will wait up to 30 seconds." - .to_owned(), - Some(body), - None, - true, - ) - .await - .wrap_err("Failed to send new command to Docker container")?; - let potential_id = &resp["Id"]; - if !potential_id.is_string() { - return Err(eyre!( - "Failed to find \"Id\" in response from docker: [{:?}]", - resp, - )); - } - let exec_id = potential_id.as_str().unwrap().to_owned(); - - let start_url = format!("/exec/{}/start", &exec_id); - let start_body = serde_json::json!({ - "Detach": true, - "Tty": false, - }); - - let _ = Self::docker_api_post( - &self.client, - &start_url, - "Docker is taking awhile to start running a new command. Will wait up to 30 seconds." - .to_owned(), - Some(start_body), - None, - false, - ) - .await - .wrap_err("Failed to tell Docker container to start executing command")?; - - Ok(exec_id) - } - - /// Execute a raw command, and wait til it's finished. Returns execution id so you can checkup on it. - /// - /// `command`: the command to execute. - /// `use_user_ids`: if to use the setup user ids (always true during non-setup). - /// - /// # Errors - /// - /// Errors if we cannot talk to docker to create an exec instance. - pub async fn raw_execute_and_wait( - &self, - command: &[String], - use_user_ids: bool, - ) -> Result { - let execution_id = self.raw_execute(command, use_user_ids).await?; - - loop { - if Self::has_execution_finished(&self.client, &execution_id).await { - break; - } - - async_std::task::sleep(Duration::from_micros(10)).await; - } - - Ok(execution_id) - } - - /// Determine if a particular execution ID has finished executing. - /// - /// `client`: the http client instance to query the docker socket. - /// `execution_id`: the execution id to check if it's finished. - pub async fn has_execution_finished(client: &HttpClient, execution_id: &str) -> bool { - let url = format!("/exec/{}/json", execution_id); - let resp_res = Self::docker_api_get( - client, - &url, - "Taking awhile to determine if command has finished running in docker. Will wait up to 30 seconds.".to_owned(), - None, - true - ).await; - if resp_res.is_err() { - return false; - } - let resp = resp_res.unwrap(); - let is_running_opt = &resp["Running"]; - if !is_running_opt.is_boolean() { - return false; - } - - !is_running_opt.as_bool().unwrap() - } - - /// Get the exit code for a particular execution - /// - /// `client`: the HTTP Client to talk to the docker engine api. - /// `execution_id`: the execution id. - /// - /// # Errors - /// - /// If we cannot find an `ExitCode` in the docker response, or talk to the docker socket. - pub async fn get_execution_status_code(client: &HttpClient, execution_id: &str) -> Result { - let url = format!("/exec/{}/json", execution_id); - let resp = Self::docker_api_get( - client, - &url, - "Taking awhile to query exit code of command from docker. Will wait up to 30 seconds." - .to_owned(), - None, - true, - ) - .await - .wrap_err("Failed to query exit code from Docker")?; - let exit_code_opt = &resp["ExitCode"]; - if !exit_code_opt.is_i64() { - return Err(eyre!( - "Failed to find integer ExitCode in response: [{:?}]", - resp, - )) - .wrap_err("Failure querying exit code") - .suggestion("This is an internal error, please file an issue."); - } - - Ok(exit_code_opt.as_i64().unwrap()) - } - - /// Setup the permission helper for this docker container if it's been configured. - /// - /// # Errors - /// - /// If we cannot talk to the docker socket, or cannot create the user. - pub async fn setup_permission_helper(&self) -> Result<()> { - let _guard = DOCK_USER_LOCK.lock().await; - - if self.run_as_user_id != 0 || self.run_as_group_id != 0 { - let sudo_execution_id = self - .raw_execute_and_wait( - &[ - "/usr/bin/env".to_owned(), - "bash".to_owned(), - "-c".to_owned(), - "hash sudo".to_owned(), - ], - false, - ) - .await - .wrap_err("Failure Checking for sudo existance inside docker container for permissions helper.")?; - let has_sudo = - Self::get_execution_status_code(&self.client, &sudo_execution_id).await - .wrap_err("Failure Checking for sudo existance inside docker container for permissions helper.")? == 0; - - // This may be a re-used docker container in which case a user with 'dl' - // already exists. - let user_exist_id = self - .raw_execute_and_wait( - &[ - "/usr/bin/env".to_owned(), - "bash".to_owned(), - "-c".to_owned(), - "getent passwd dl".to_owned(), - ], - false, - ) - .await - .wrap_err( - "Failure checking if user has already been created for permissions helper.", - )?; - if Self::get_execution_status_code(&self.client, &user_exist_id) - .await - .wrap_err( - "Failure checking if user has already been created for permissions helper.", - )? == 0 - { - return Ok(()); - } - - // Create the user. - let creation_execution_id = match (self.user == "root", has_sudo) { - (true, _) | (false, false) => { - self.raw_execute_and_wait(&[ - "/usr/bin/env".to_owned(), - "bash".to_owned(), - "-c".to_owned(), - format!("groupadd -g {} -o dl && useradd -u {} -g {} -o -c '' -m dl", self.run_as_group_id, self.run_as_user_id, self.run_as_group_id) - ], false).await - .wrap_err("Failure creating user for permissions helper")? - }, - (false, true) => { - self.raw_execute_and_wait(&[ - "/usr/bin/env".to_owned(), - "bash".to_owned(), - "-c".to_owned(), - format!("sudo -n groupadd -g {} -o dl && sudo -n useradd -u {} -g {} -o -c '' -m dl", self.run_as_group_id, self.run_as_user_id, self.run_as_group_id) - ], false).await - .wrap_err("Failure creating user for permissions helper")? - }, - }; - if Self::get_execution_status_code(&self.client, &creation_execution_id).await? != 0 { - return Err(eyre!( - "Failed to get successful ExitCode from docker on user creation for permissions helper" - )); - } - - // Allow the user to sudo, if sudo is installed. - if has_sudo { - let sudo_user_creation_id = if self.user == "root" { - self.raw_execute_and_wait(&[ - "/usr/bin/env".to_owned(), - "bash".to_owned(), - "-c".to_owned(), - "mkdir -p /etc/sudoers.d && echo \"dl ALL=(root) NOPASSWD:ALL\" > /etc/sudoers.d/dl && chmod 0440 /etc/sudoers.d/dl".to_owned() - ], false).await - .wrap_err("Failure adding user to sudoers for permissions helper")? - } else { - self.raw_execute_and_wait(&[ - "/usr/bin/env".to_owned(), - "bash".to_owned(), - "-c".to_owned(), - "sudo -n mkdir -p /etc/sudoers.d && echo \"dl ALL=(root) NOPASSWD:ALL\" | sudo -n tee /etc/sudoers.d/dl && sudo -n chmod 0440 /etc/sudoers.d/dl".to_owned() - ], false).await - .wrap_err("Failure adding user to sudoers for permissions helper")? - }; - - if Self::get_execution_status_code(&self.client, &sudo_user_creation_id) - .await - .wrap_err("Failure adding user to sudoers for permissions helper.")? - != 0 - { - return Err(eyre!( - "Failed to setup passwordless sudo access for permissions helper!" - )); - } - } - - Ok(()) - } else { - Ok(()) - } - } - - /// Ensure the docker container exists. - /// - /// # Errors - /// - /// If we cannot talk to the docker socket, or there is a conflict creating the container. - pub async fn ensure_docker_container(&self) -> Result<()> { - let image_exists_url = format!("/images/{}/json", &self.image); - let image_exists = Self::docker_api_get( - &self.client, - &image_exists_url, - "Taking awhile to query if image is downloaded from docker. Will wait up to 30 seconds.".to_owned(), - None, - false - ) - .await - .wrap_err("Failed to check if image has downloaded.") - .is_ok(); - - if !image_exists { - self.download_image().await?; - } - let (container_exists, container_running) = self.is_container_created_and_running().await?; - - if !container_exists { - self.create_container().await?; - } - - if !container_running { - let url = format!("/containers/{}/start", self.container_name); - let _ = Self::docker_api_post( - &self.client, - &url, - "Docker is taking awhile to start the container. Will wait up to 30 seconds." - .to_owned(), - None, - None, - false, - ) - .await - .wrap_err("Failed to tell docker to start running the Docker container")?; - } - - let execution_id = self - .raw_execute_and_wait( - &[ - "/usr/bin/env".to_owned(), - "bash".to_owned(), - "-c".to_owned(), - "hash bash".to_owned(), - ], - false, - ) - .await - .wrap_err("Failed to check for existance of bash in Docker container")?; - - let has_bash = Self::get_execution_status_code(&self.client, &execution_id).await?; - if has_bash != 0 { - return Err(eyre!( - "Docker Image: [{}] does not have bash! This is required for dev-loop!", - self.image, - )) - .note(format!("To replicate you can run: `docker run --rm -it {} /usr/bin/env bash -c \"hash bash\"`", self.image)) - .note(format!("The container is also still running with the name: [{}]", self.container_name)); - } - - let perm_helper_setup = self - .setup_permission_helper() - .await - .suggestion(_PERMISSIONS_HELPER_EXPERIMENTAL_SUGGESTION); - if let Err(perm_helper_err) = perm_helper_setup { - return Err(perm_helper_err); - } - - Ok(()) - } - - /// Determine if the container is attached to a particular network. - /// - /// `network_id`: the id of the network to attach. - pub async fn is_network_attached(&self, network_id: &str) -> bool { - let url = format!("/containers/{}/json", self.container_name); - let body_res = Self::docker_api_get( - &self.client, - &url, - "Taking awhile to get container status from docker. Will wait up to 30 seconds." - .to_owned(), - None, - true, - ) - .await; - if body_res.is_err() { - return false; - } - let body = body_res.unwrap(); - let id_as_opt = body["Id"].as_str(); - if id_as_opt.is_none() { - return false; - } - let id = id_as_opt.unwrap(); - - let network_url = format!("/networks/dl-{}", network_id); - let network_body_res = Self::docker_api_get( - &self.client, - &network_url, - "Taking awhile to get network status from docker. Will wait up to 30 seconds." - .to_owned(), - None, - true, - ) - .await; - if network_body_res.is_err() { - return false; - } - let network_body = network_body_res.unwrap(); - let networks_obj_opt = network_body["Containers"].as_object(); - if networks_obj_opt.is_none() { - return false; - } - let networks_obj = networks_obj_opt.unwrap(); - - networks_obj.contains_key(id) - } - - /// Ensure a particular network has been attached to this container. - /// - /// `network_id`: The Network ID to attach too. - /// - /// # Errors - /// - /// If we fail to talk to the docker socket, or connect the container to the network. - pub async fn ensure_network_attached(&self, network_id: &str) -> Result<()> { - if !self.is_network_attached(network_id).await { - let url = format!("/networks/dl-{}/connect", network_id); - let body = serde_json::json!({ - "Container": self.container_name, - "EndpointConfig": { - "Aliases": [self.hostname], - } - }); - - let _ = Self::docker_api_post( - &self.client, - &url, - "Docker is taking awhile to attach a network to the container. Will wait up to 30 seconds.".to_owned(), - Some(body), - None, - false - ).await - .wrap_err("Failed to attach network to Docker Container.")?; - } - - Ok(()) + self.container.get_container_name() } } #[async_trait::async_trait] -impl Executor for DockerExecutor { +impl ExecutorTrait for Executor { #[must_use] fn meets_requirements(&self, reqs: &[NeedsRequirement]) -> bool { let mut met = true; @@ -1489,13 +249,6 @@ impl Executor for DockerExecutor { met } - #[allow( - clippy::cast_possible_truncation, - clippy::cognitive_complexity, - clippy::too_many_lines, - clippy::used_underscore_binding, - unused_assignments - )] #[must_use] async fn execute( &self, @@ -1504,193 +257,64 @@ impl Executor for DockerExecutor { helper_src_line: &str, task: &ExecutableTask, worker_count: usize, - ) -> isize { - // Execute a particular task inside the docker executor. - // - // 1. Create the network, and stand it up if not. - // 2. Create the container, and stand it up if not. - // 3. Connect the container to the appropriate network. - // 4. Create a temporary directory for the pipeline id, and the task name. - // 5. Write the task file the user specified. - // 6. Write an "entrypoint" that sources in the helpers, and calls the script. - // 7. Execute the script, and wait for it to finish. - - let res = Self::ensure_network_exists(&self.client, task.get_pipeline_id()).await; - if let Err(network_creation_error) = res { - error!("{:?}", network_creation_error); - return 10 as isize; - } - let container_res = self.ensure_docker_container().await; - if let Err(container_err) = container_res { - error!("{:?}", container_err); - return 10 as isize; - } - let attach_res = self.ensure_network_attached(task.get_pipeline_id()).await; - if let Err(attach_err) = attach_res { - error!("{:?}", attach_err); - return 10 as isize; - } - - let mut tmp_path = get_tmp_dir(); - let mut tmp_path_in_docker = async_std::path::PathBuf::from("/tmp"); - tmp_path.push(task.get_pipeline_id().to_owned() + "-dl-host"); - tmp_path_in_docker.push(task.get_pipeline_id().to_owned() + "-dl-host"); - let res = async_std::fs::create_dir_all(tmp_path.clone()).await - .wrap_err("Failed to create temporary directory for dev-loop state") - .note("If you're not able to tell what the issue is (e.g. disk full), please file an issue."); - if let Err(dir_err) = res { - error!("{:?}", dir_err,); - return 10; - } - - let mut regular_task = tmp_path.clone(); - let mut regular_task_in_docker = tmp_path_in_docker.clone(); - regular_task.push(task.get_task_name().to_owned() + ".sh"); - regular_task_in_docker.push(task.get_task_name().to_owned() + ".sh"); - debug!("Docker task writing to path: {:?}", regular_task); - let write_res = - async_std::fs::write(®ular_task, task.get_contents().get_contents()).await - .wrap_err("Failed to write script to run for dev-loop") - .note("If you're not able to tell what the issue is (e.g. disk full), please file an issue."); - if let Err(write_err) = write_res { - error!("{:?}", write_err); - return 10; - } - let path_as_str = regular_task_in_docker.to_str().unwrap(); - - let epoch = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs(); - let mut stdout_log_path = tmp_path.clone(); - let mut stdout_log_path_in_docker = tmp_path_in_docker.clone(); - stdout_log_path.push(format!("{}-{}-out.log", epoch, task.get_task_name())); - stdout_log_path_in_docker.push(format!("{}-{}-out.log", epoch, task.get_task_name())); - let mut stderr_log_path = tmp_path.clone(); - let mut stderr_log_path_in_docker = tmp_path_in_docker.clone(); - stderr_log_path.push(format!("{}-{}-err.log", epoch, task.get_task_name())); - stderr_log_path_in_docker.push(format!("{}-{}-err.log", epoch, task.get_task_name())); - { - if let Err(log_err) = async_std::fs::File::create(&stdout_log_path) - .await - .wrap_err("Failed to create file for logs to stdout") - .note( - "If the issue isn't immediately clear (e.g. disk full), please file an issue.", - ) { - error!("{:?}", log_err); - return 10; - } - } - { - if let Err(log_err) = async_std::fs::File::create(&stderr_log_path) - .await - .wrap_err("Failed to create file for logs to stdout") - .note( - "If the issue isn't immediately clear (e.g. disk full), please file an issue.", - ) { - error!("Failed to create stderr log file: {:?}", log_err); - return 10; - } - } - if cfg!(target_family = "unix") { - { - use std::os::unix::fs::PermissionsExt; - let log_permissions = std::fs::Permissions::from_mode(0o666); - - if std::fs::set_permissions(&stdout_log_path, log_permissions.clone()).is_err() { - warn!("Failed to mark stdout_log as world writable! May cause errors if using a lower-priveleged user!"); - } - if std::fs::set_permissions(&stderr_log_path, log_permissions).is_err() { - warn!("Failed to mark stderr_log as world writable! May cause errors if using a lower-priveleged user!"); - } - } - } - - let stdout_path_as_str = stdout_log_path_in_docker.to_str().unwrap(); - let stderr_path_as_str = stderr_log_path_in_docker.to_str().unwrap(); - - let entry_point_file = format!( - "#!/usr/bin/env bash - -{opening_bracket} - -cd /mnt/dl-root/ - -# Source Helpers -{helper} -eval \"$(declare -F | sed -e 's/-f /-fx /')\" - -{script} {arg_str} - -{closing_bracket} >{stdout_log_path} 2>{stderr_log_path}", - helper = helper_src_line, - script = path_as_str, - arg_str = task.get_arg_string(), - opening_bracket = "{", - closing_bracket = "}", - stdout_log_path = stdout_path_as_str, - stderr_log_path = stderr_path_as_str, - ); - tmp_path.push(task.get_task_name().to_owned() + "-entrypoint.sh"); - tmp_path_in_docker.push(task.get_task_name().to_owned() + "-entrypoint.sh"); - debug!("Task entrypoint is being written too: {:?}", tmp_path); - let write_res = std::fs::write(&tmp_path, entry_point_file) - .wrap_err("Failed to write entrypoint script for dev-loop.") - .note("If the issue isn't immediately clear (e.g. disk full), please file an issue."); - if let Err(write_err) = write_res { - error!("{:?}", write_err); - return 10; - } - - if cfg!(target_family = "unix") { - use std::os::unix::fs::PermissionsExt; - let executable_permissions = std::fs::Permissions::from_mode(0o777); + ) -> Result { + ensure_network_exists(&self.client, task.get_pipeline_id()).await?; + ensure_docker_container( + &self.client, + &self.project_root, + &self.tmp_dir, + &self.container, + ) + .await?; + ensure_network_attached( + &self.client, + self.container.get_container_name(), + self.container.get_hostname(), + task.get_pipeline_id(), + ) + .await?; - if let Err(exec_err) = - std::fs::set_permissions(&tmp_path, executable_permissions.clone()) - { - error!( - "{:?}", - Err::<(), IoError>(exec_err) - .wrap_err("Failed to mark script as executable") - .note("If the issue is not clear pleas file an issue.") - .unwrap_err() - ); - } - if let Err(exec_err) = std::fs::set_permissions(®ular_task, executable_permissions) { - error!( - "{:?}", - Err::<(), IoError>(exec_err) - .wrap_err("Failed to mark task script as executable") - .note("If the issue is not clear pleas file an issue.") - .unwrap_err() - ); - } - } + let shared_dir = create_executor_shared_dir(task.get_pipeline_id())?; - let entrypoint_as_str = tmp_path_in_docker.to_str().unwrap(); + let (stdout_host_log_path, stderr_host_log_path) = + create_log_proxy_files(&shared_dir, task)?; + let stdout_path_in_docker = rewrite_tmp_dir(&self.tmp_dir, &stdout_host_log_path); + let stderr_path_in_docker = rewrite_tmp_dir(&self.tmp_dir, &stderr_host_log_path); - let command_res = self - .raw_execute(&[entrypoint_as_str.to_owned()], true) - .await - .wrap_err("Failed to execute script inside docker container."); - if let Err(command_err) = command_res { - error!("{:?}", command_err); - return 10; - } - let exec_id = command_res.unwrap(); + let entrypoint = create_entrypoint( + "/mnt/dl-root", + &self.tmp_dir, + shared_dir, + helper_src_line, + task, + true, + Some(stdout_path_in_docker), + Some(stderr_path_in_docker), + )?; + let entrypoint_as_str = entrypoint.to_string_lossy().to_string(); + let exec_id = execute_command_in_container_async( + &self.client, + self.container.get_container_name(), + &[entrypoint_as_str], + self.container.get_environment_to_export(), + self.container.get_base_user(), + true, + self.container.get_cloned_proxy_user_id(), + self.container.get_cloned_proxy_group_id(), + ) + .await + .wrap_err("Failed to execute script inside docker container.")?; let has_finished = Arc::new(AtomicBool::new(false)); - let flush_channel_clone = log_channel.clone(); let flush_task_name = task.get_task_name().to_owned(); let flush_is_finished_clone = has_finished.clone(); let flush_task = async_std::task::spawn(async move { let mut line = String::new(); - let file = std::fs::File::open(stdout_log_path) + let file = File::open(stdout_host_log_path) .expect("Failed to open log file even though we created it!"); - let err_file = std::fs::File::open(stderr_log_path) + let err_file = File::open(stderr_host_log_path) .expect("Failed to open stderr log file even though we created it!"); let mut reader = BufReader::new(file); let mut stderr_reader = BufReader::new(err_file); @@ -1718,13 +342,11 @@ eval \"$(declare -F | sed -e 's/-f /-fx /')\" } }); - let mut rc = 0; + let rc: i32; - // Loop until completion... loop { - // Has the exec finished? - if Self::has_execution_finished(&self.client, &exec_id).await { - let rc_res = Self::get_execution_status_code(&self.client, &exec_id) + if has_command_finished(&self.client, &exec_id).await { + let rc_res = get_command_exit_code(&self.client, &exec_id) .await .wrap_err("Failed to check if your task has finished."); if let Err(rc_err) = rc_res { @@ -1749,7 +371,7 @@ eval \"$(declare -F | sed -e 's/-f /-fx /')\" has_finished.store(true, Ordering::Release); flush_task.await; - rc as isize + Ok(rc) } } @@ -1765,7 +387,7 @@ mod unit_tests { let pb = PathBuf::from("/tmp/non-existant"); assert!( - DockerExecutor::new(&pb, &args, &provided_conf, None,).is_err(), + Executor::new(&pb, &args, &provided_conf, None,).is_err(), "Docker Executor without a name_prefix should error.", ); } @@ -1777,7 +399,7 @@ mod unit_tests { let pb = PathBuf::from("/tmp/non-existant"); assert!( - DockerExecutor::new(&pb, &args, &provided_conf, None,).is_err(), + Executor::new(&pb, &args, &provided_conf, None,).is_err(), "Docker executor without an image should error.", ); } @@ -1790,29 +412,12 @@ mod unit_tests { let pb = PathBuf::from("/tmp/non-existant"); assert!( - DockerExecutor::new(&pb, &args, &provided_conf, None,).is_ok(), + Executor::new(&pb, &args, &provided_conf, None,).is_ok(), "Docker executor with an image/name prefix should succeed!", ); } } - #[test] - fn get_container_name() { - let mut args = HashMap::new(); - args.insert("name_prefix".to_owned(), "name-prefix-".to_owned()); - args.insert("image".to_owned(), "localhost:5000/blah:latest".to_owned()); - let provided_conf = Vec::new(); - let pb = PathBuf::from("/tmp/non-existant"); - - let de = DockerExecutor::new(&pb, &args, &provided_conf, None) - .expect("Docker Executor in get_name should be able to be constructed!"); - - assert!( - de.get_container_name().starts_with("dl-name-prefix-"), - "Docker Executor Name needs to start with dl-name-prefix-!", - ); - } - #[test] fn meets_requirements() { let mut args = HashMap::new(); @@ -1825,7 +430,7 @@ mod unit_tests { )); let pb = PathBuf::from("/tmp/non-existant"); - let de = DockerExecutor::new(&pb, &args, &provided_conf, None) + let de = Executor::new(&pb, &args, &provided_conf, None) .expect("Docker Executor in meets_requirements should be able to be constructed"); assert!( diff --git a/src/executors/docker_engine/container.rs b/src/executors/docker_engine/container.rs new file mode 100644 index 0000000..8cae42f --- /dev/null +++ b/src/executors/docker_engine/container.rs @@ -0,0 +1,305 @@ +use color_eyre::{eyre::eyre, Report, Result, Section}; +use std::{collections::HashMap, env::var as env_var, path::PathBuf}; +use tracing::warn; + +const CONTAINER_NAME_ARG: &str = "name_prefix"; +const HOSTNAME_ARG: &str = "hostname"; +const IMAGE_ARG: &str = "image"; +const ENV_TO_EXPORT_ARG: &str = "export_env"; +const MOUNTS_ARG: &str = "extra_mounts"; +const PERMISSION_HELPER_ARG: &str = "experimental_permission_helper"; +const TCP_PORTS_TO_EXPOSE_ARG: &str = "tcp_ports_to_expose"; +const USER_ARG: &str = "user"; +const UDP_PORTS_TO_EXPOSE_ARG: &str = "udp_ports_to_expose"; + +/// Represents a `DockerContainer` managed by the docker-engine/docker executor. +#[derive(Debug)] +pub struct DockerContainerInfo { + /// The container name to use. + container_name: String, + /// The docker image. + image: String, + /// A list of environment variables to export. + environment_to_export: Vec, + /// A list of extra mounts. + extra_mounts: Vec, + /// The list of tcp ports to expose. + tcp_ports_to_expose: Vec, + /// The list of udp ports to expose. + udp_ports_to_expose: Vec, + /// The hostname of this container. + hostname: String, + /// The base user to use. + base_user: String, + /// The proxied user id. + proxy_user_id: Option, + /// The proxied group id. + proxy_group_id: Option, +} + +impl DockerContainerInfo { + pub fn new( + executor_args: &HashMap, + project_root_str: &str, + random_str: &str, + ) -> Result { + let (proxy_user, proxy_group) = get_proxy_user_information(executor_args); + + Ok(Self { + container_name: container_name_from_arg(executor_args, random_str)?, + image: image_from_arg(executor_args)?, + environment_to_export: get_env_vars_to_export(executor_args), + extra_mounts: get_extra_mounts(executor_args, project_root_str), + tcp_ports_to_expose: tcp_ports_to_expose(executor_args), + udp_ports_to_expose: udp_ports_to_expose(executor_args), + hostname: get_hostname(executor_args), + base_user: get_user(executor_args), + proxy_user_id: proxy_user, + proxy_group_id: proxy_group, + }) + } + + pub fn get_container_name(&self) -> &str { + &self.container_name + } + + pub fn get_image(&self) -> &str { + &self.image + } + + pub fn get_environment_to_export(&self) -> &[String] { + &self.environment_to_export + } + + pub fn get_extra_mounts(&self) -> &[String] { + &self.extra_mounts + } + + pub fn get_tcp_ports_to_expose(&self) -> &[u32] { + &self.tcp_ports_to_expose + } + + pub fn get_udp_ports_to_expose(&self) -> &[u32] { + &self.udp_ports_to_expose + } + + pub fn get_hostname(&self) -> &str { + &self.hostname + } + + pub fn get_base_user(&self) -> &str { + &self.base_user + } + + pub fn get_proxy_user_id(&self) -> Option<&u32> { + self.proxy_user_id.as_ref() + } + + pub fn get_cloned_proxy_user_id(&self) -> Option { + self.proxy_user_id + } + + pub fn get_proxy_group_id(&self) -> Option<&u32> { + self.proxy_group_id.as_ref() + } + + pub fn get_cloned_proxy_group_id(&self) -> Option { + self.proxy_group_id + } +} + +fn container_name_from_arg(args: &HashMap, random_str: &str) -> Result { + let mut container_name = "dl-".to_owned(); + if let Some(user_specified_prefix) = args.get(CONTAINER_NAME_ARG) { + container_name += user_specified_prefix; + } else { + return Err(eyre!( + "Docker Container require a `name_prefix` field to know how to name containers!" + )).suggestion("Add a `name_prefix` field to `params` that specifys the name prefix for containers") + .note("You can find the full list of fields here: https://dev-loop.kungfury.io/docs/schemas/executor-conf"); + } + container_name += &random_str; + + Ok(container_name) +} + +fn image_from_arg(args: &HashMap) -> Result { + let image; + if let Some(image_identifier) = args.get(IMAGE_ARG) { + image = image_identifier.to_owned(); + } else { + return Err(eyre!( + "Docker Container requires an `image` to know which docker image to use." + )).suggestion("Add an `image` field to `params` that specifys the docker image to use.") + .note("You can find the full list of fields here: https://dev-loop.kungfury.io/docs/schemas/executor-conf"); + } + + Ok(image) +} + +fn get_env_vars_to_export(args: &HashMap) -> Vec { + let mut env_vars = Vec::new(); + + if let Some(envs_to_export) = args.get(ENV_TO_EXPORT_ARG) { + env_vars = envs_to_export + .split(',') + .map(|the_str| { + env_var(the_str) + .map_or_else(|_| the_str.to_owned(), |val| format!("{}={}", the_str, val)) + }) + .collect::>(); + } + + env_vars +} + +fn get_extra_mounts(args: &HashMap, project_root_str: &str) -> Vec { + let mut extra_mounts = Vec::new(); + + if let Some(mount_str_ref) = args.get(MOUNTS_ARG) { + extra_mounts = mount_str_ref + .split(',') + .filter_map(|item| { + let mounts = item.split(':').collect::>(); + if mounts.len() != 2 { + warn!( + "{:?}", + Err::<(), Report>(eyre!( + "Mount String for Docker Container: [{}] is invalid, missing path for container. Will not mount.", + item, + )) + .note("Mounts should be in the format: `host_path:path_in_container`") + .unwrap_err() + ); + return None; + } + + let src = mounts[0]; + let dest = mounts[1]; + + let src = + if src.starts_with('~') { + let potential_home_dir = crate::dirs::home_dir(); + if potential_home_dir.is_none() { + warn!( + "{:?}", + Err::<(), Report>(eyre!( + "Mount String: [{}] for Docker Container's source path is relative to the home directory, but the home directory couldn't be found. Will not mount.", + item, + )) + .suggestion("You can manually specify the home directory with the `HOME` environment variable.") + .unwrap_err() + ); + return None; + } + let home_dir = potential_home_dir.unwrap(); + let home_dir = home_dir.to_str(); + if home_dir.is_none() { + warn!( + "{:?}", + Err::<(), Report>(eyre!( + "Home directory is not set to a UTF-8 only string." + )).note("If you're not sure how to solve this error, please open an issue.").unwrap_err(), + ); + return None; + } + let home_dir = home_dir.unwrap(); + + src.replace("~", home_dir) + } else if src.starts_with('/') { + src.to_owned() + } else { + project_root_str.to_owned() + "/" + src + }; + + let src_as_pb = PathBuf::from(&src); + if !src_as_pb.exists() { + warn!( + "{:?}", + Err::<(), Report>(eyre!( + "Mount String: [{}] specified a source directory: [{}] that does not exist. Will not mount.", + item, + src, + )).unwrap_err(), + ); + return None; + } + + Some(format!("{}:{}", src, dest)) + }) + .collect::>(); + } + + extra_mounts +} + +fn tcp_ports_to_expose(args: &HashMap) -> Vec { + let mut tcp_ports_to_expose = Vec::new(); + if let Some(ports_to_expose) = args.get(TCP_PORTS_TO_EXPOSE_ARG) { + tcp_ports_to_expose = ports_to_expose + .split(',') + .filter_map(|item| { + let item_pr = item.parse::(); + if item_pr.is_err() { + warn!( + "Not exposing tcp port: [{}] as it is not a valid positive number.", + item + ); + } + item_pr.ok() + }) + .collect::>(); + } + + tcp_ports_to_expose +} + +fn udp_ports_to_expose(args: &HashMap) -> Vec { + let mut udp_ports_to_expose = Vec::new(); + if let Some(ports_to_expose) = args.get(UDP_PORTS_TO_EXPOSE_ARG) { + udp_ports_to_expose = ports_to_expose + .split(',') + .filter_map(|item| { + let item_pr = item.parse::(); + if item_pr.is_err() { + warn!( + "Not exposing udp port: [{}] as it is not a valid positive number.", + item + ); + } + + item_pr.ok() + }) + .collect::>(); + } + + udp_ports_to_expose +} + +fn get_hostname(args: &HashMap) -> String { + if let Some(hostname_ref) = args.get(HOSTNAME_ARG) { + hostname_ref.to_owned() + } else { + let mut string = args.get(CONTAINER_NAME_ARG).unwrap().to_owned(); + string.pop(); + string + } +} + +fn get_user(args: &HashMap) -> String { + args.get(USER_ARG) + .map_or_else(|| "root".to_owned(), String::from) +} + +fn get_proxy_user_information(args: &HashMap) -> (Option, Option) { + let mut proxy_user_id = None; + let mut proxy_group_id = None; + if let Some(permission_helper_active) = args.get(PERMISSION_HELPER_ARG) { + if &permission_helper_active.to_ascii_lowercase() == "true" { + proxy_user_id = Some(users::get_effective_uid()); + proxy_group_id = Some(users::get_effective_gid()); + } + } + + (proxy_user_id, proxy_group_id) +} diff --git a/src/executors/docker_engine/container_api.rs b/src/executors/docker_engine/container_api.rs new file mode 100644 index 0000000..a21ad00 --- /dev/null +++ b/src/executors/docker_engine/container_api.rs @@ -0,0 +1,294 @@ +use super::{ + docker_api_delete, docker_api_get, docker_api_post, download_image, + execute_command_in_container, get_command_exit_code, setup_permission_helper, + DockerContainerInfo, +}; + +use color_eyre::{ + eyre::{eyre, WrapErr}, + Result, Section, +}; +use isahc::HttpClient; + +/// List all the devloop containers. +pub async fn list_devloop_containers(client: &HttpClient) -> Result> { + let resp = docker_api_get( + client, + "/containers/json?all=true", + "Taking awhile to query containers from docker. Will wait up to 30 seconds.".to_owned(), + None, + true, + ) + .await?; + let mut container_names = Vec::new(); + + if let Some(containers) = resp.as_array() { + for container in containers { + let names_opt = container.get("Names"); + if names_opt.is_none() { + continue; + } + + let names_untyped = names_opt.unwrap(); + let names_typed_opt = names_untyped.as_array(); + if names_typed_opt.is_none() { + continue; + } + let names = names_typed_opt.unwrap(); + + let mut dl_name = String::new(); + for name in names { + if let Some(name_str) = name.as_str() { + if name_str.starts_with("/dl-") { + dl_name = name_str.to_owned(); + } + } + } + + if dl_name.is_empty() { + continue; + } + + container_names.push(dl_name); + } + } + + Ok(container_names) +} + +pub async fn delete_container(client: &HttpClient, container_name: &str) { + let _ = docker_api_post( + client, + &format!("/containers{}/kill", container_name), + "Docker is not killing the container in a timely manner. Will wait up to 30 seconds." + .to_owned(), + None, + None, + false, + ) + .await; + let _ = docker_api_delete( + &client, + &format!("/containers{}?v=true&force=true&link=true", container_name), + "Docker is taking awhile to remove the container. Will wait up to 30 seconds.".to_owned(), + None, + None, + false, + ) + .await; +} + +/// Determine if the container is created, and then if it's wondering. +/// +/// # Errors +/// +/// Errors when the docker api cannot be talked to. +pub async fn is_container_created_and_running( + client: &HttpClient, + container_name: &str, +) -> Result<(bool, bool)> { + let url = format!("/containers/{}/json?size=false", container_name); + let mut is_created = false; + let mut is_running = false; + + // Ignore errors since a 404 for no container is an Error. + if let Ok(value) = docker_api_get( + client, + &url, + "Taking awhile to query container status from docker. Will wait up to 30 seconds." + .to_owned(), + None, + true, + ) + .await + { + is_created = true; + let is_running_status = &value["State"]["Running"]; + if is_running_status.is_boolean() { + is_running = is_running_status.as_bool().unwrap(); + } + } + + Ok((is_created, is_running)) +} + +/// Creates the container, should only be called when it does not yet exist. +/// +/// # Errors +/// +/// Errors when the docker socket cannot be talked too, or there is a conflict +/// creating the container. +pub async fn create_container( + client: &HttpClient, + project_root: &str, + tmp_dir: &str, + docker_container: &DockerContainerInfo, +) -> Result<()> { + let mut mounts = Vec::new(); + mounts.push(serde_json::json!({ + "Source": project_root, + "Target": "/mnt/dl-root", + "Type": "bind", + "Consistency": "consistent", + })); + mounts.push(serde_json::json!({ + "Source": tmp_dir, + "Target": "/tmp", + "Type": "bind", + "Consistency": "consistent", + })); + for emount in docker_container.get_extra_mounts() { + let mut split = emount.split(':'); + let source: &str = split.next().unwrap(); + let target: &str = split.next().unwrap(); + mounts.push(serde_json::json!({ + "Source": source, + "Target": target, + "Type": "bind", + "Consistency": "consistent", + })); + } + + let mut port_mapping = serde_json::map::Map::::new(); + let mut host_config_mapping = serde_json::map::Map::::new(); + + for tcp_port in docker_container.get_tcp_ports_to_expose() { + port_mapping.insert(format!("{}/tcp", tcp_port), serde_json::json!({})); + host_config_mapping.insert( + format!("{}/tcp", tcp_port), + serde_json::json!([serde_json::json!({ "HostPort": format!("{}", tcp_port) }),]), + ); + } + for udp_port in docker_container.get_udp_ports_to_expose() { + port_mapping.insert(format!("{}/udp", udp_port), serde_json::json!({})); + host_config_mapping.insert( + format!("{}/udp", udp_port), + serde_json::json!([serde_json::json!({ "HostPort": format!("{}", udp_port) }),]), + ); + } + + let url = format!( + "/containers/create?name={}", + docker_container.get_container_name() + ); + let body = serde_json::json!({ + "Cmd": ["tail", "-f", "/dev/null"], + "Entrypoint": "", + "Image": docker_container.get_image(), + "Hostname": docker_container.get_hostname(), + "User": docker_container.get_base_user(), + "HostConfig": { + "AutoRemove": true, + "Mounts": mounts, + "Privileged": true, + "PortBindings": host_config_mapping, + }, + "WorkingDir": "/mnt/dl-root", + "AttachStdout": true, + "AttachStderr": true, + "Privileged": true, + "Tty": true, + "ExposedPorts": port_mapping, + }); + let _ = docker_api_post( + client, + &url, + "Docker is not creating the container in a timely manner. Will wait up to 30 seconds." + .to_owned(), + Some(body), + None, + false, + ) + .await + .wrap_err("Failed to create the docker container")?; + + Ok(()) +} + +/// Ensure the docker container exists. +/// +/// # Errors +/// +/// If we cannot talk to the docker socket, or there is a conflict creating the container. +pub async fn ensure_docker_container( + client: &HttpClient, + project_root: &str, + tmp_dir: &str, + container: &DockerContainerInfo, +) -> Result<()> { + let image_exists_url = format!("/images/{}/json", container.get_image()); + let image_exists = docker_api_get( + client, + &image_exists_url, + "Taking awhile to query if image is downloaded from docker. Will wait up to 30 seconds." + .to_owned(), + None, + false, + ) + .await + .wrap_err("Failed to check if image has downloaded.") + .is_ok(); + + if !image_exists { + download_image(client, container.get_image()).await?; + } + let (container_exists, container_running) = + is_container_created_and_running(client, container.get_container_name()).await?; + + if !container_exists { + create_container(client, project_root, tmp_dir, container).await?; + } + + if !container_running { + let url = format!("/containers/{}/start", container.get_container_name()); + let _ = docker_api_post( + client, + &url, + "Docker is taking awhile to start the container. Will wait up to 30 seconds." + .to_owned(), + None, + None, + false, + ) + .await + .wrap_err("Failed to tell docker to start running the Docker container")?; + } + + let execution_id = execute_command_in_container( + client, + container.get_container_name(), + &[ + "/usr/bin/env".to_owned(), + "bash".to_owned(), + "-c".to_owned(), + "hash bash".to_owned(), + ], + &[], + container.get_base_user(), + false, + None, + None, + ) + .await + .wrap_err("Failed to check for existance of bash in Docker container")?; + + let has_bash = get_command_exit_code(client, &execution_id).await?; + if has_bash != 0 { + return Err(eyre!( + "Docker Image: [{}] does not have bash! This is required for dev-loop!", + container.get_image(), + )) + .note(format!( + "To replicate you can run: `docker run --rm -it {} /usr/bin/env bash -c \"hash bash\"`", + container.get_image() + )) + .note(format!( + "The container is also still running with the name: [{}]", + container.get_container_name() + )); + } + + setup_permission_helper(client, container).await?; + + Ok(()) +} diff --git a/src/executors/docker_engine/execution_api.rs b/src/executors/docker_engine/execution_api.rs new file mode 100644 index 0000000..9cbd9dc --- /dev/null +++ b/src/executors/docker_engine/execution_api.rs @@ -0,0 +1,182 @@ +use super::{docker_api_get, docker_api_post}; + +use color_eyre::{ + eyre::{eyre, WrapErr}, + Result, Section, +}; +use isahc::HttpClient; +use std::{convert::TryFrom, time::Duration}; + +/// Execute a command, and return the "execution id" to check back on it. +/// +/// # Errors +/// +/// Errors if we fail to create an exec instance with docker. +#[allow(clippy::too_many_arguments)] +pub async fn execute_command_in_container_async( + client: &HttpClient, + container_name: &str, + command: &[String], + environment_to_export: &[String], + user: &str, + needs_forced_ids: bool, + force_user_id: Option, + force_group_id: Option, +) -> Result { + let url = format!("/containers/{}/exec", container_name); + let body = if needs_forced_ids && (force_user_id.is_some() && force_group_id.is_some()) { + serde_json::json!({ + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "User": &format!("{}:{}", force_user_id.unwrap(), force_group_id.unwrap()), + "Privileged": true, + "Cmd": command, + "Env": environment_to_export, + }) + } else { + serde_json::json!({ + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "User": user, + "Privileged": true, + "Cmd": command, + "Env": environment_to_export, + }) + }; + + let resp = docker_api_post( + client, + &url, + "Docker is taking awhile to start running a new command. Will wait up to 30 seconds." + .to_owned(), + Some(body), + None, + true, + ) + .await + .wrap_err("Failed to send new command to Docker container")?; + + let potential_id = &resp["Id"]; + if !potential_id.is_string() { + return Err(eyre!( + "Failed to find \"Id\" in response from docker: [{:?}]", + resp, + )); + } + let exec_id = potential_id.as_str().unwrap().to_owned(); + + let start_url = format!("/exec/{}/start", &exec_id); + let start_body = serde_json::json!({ + "Detach": true, + "Tty": false, + }); + + let _ = docker_api_post( + client, + &start_url, + "Docker is taking awhile to start running a new command. Will wait up to 30 seconds." + .to_owned(), + Some(start_body), + None, + false, + ) + .await + .wrap_err("Failed to tell Docker container to start executing command")?; + + Ok(exec_id) +} + +/// Determine if a particular execution ID has finished executing. +pub async fn has_command_finished(client: &HttpClient, execution_id: &str) -> bool { + let url = format!("/exec/{}/json", execution_id); + let resp_res = docker_api_get( + client, + &url, + "Taking awhile to determine if command has finished running in docker. Will wait up to 30 seconds.".to_owned(), + None, + true + ).await; + if resp_res.is_err() { + return false; + } + let resp = resp_res.unwrap(); + let is_running_opt = &resp["Running"]; + if !is_running_opt.is_boolean() { + return false; + } + + !is_running_opt.as_bool().unwrap() +} + +/// Execute a raw command, and wait til it's finished. Returns execution id so you can checkup on it. +/// +/// # Errors +/// +/// Errors if we cannot talk to docker to create an exec instance. +#[allow(clippy::too_many_arguments)] +pub async fn execute_command_in_container( + client: &HttpClient, + container_name: &str, + command: &[String], + environment_to_export: &[String], + user: &str, + needs_forced_ids: bool, + force_user_id: Option, + force_group_id: Option, +) -> Result { + let execution_id = execute_command_in_container_async( + client, + container_name, + command, + environment_to_export, + user, + needs_forced_ids, + force_user_id, + force_group_id, + ) + .await?; + + loop { + if has_command_finished(client, &execution_id).await { + break; + } + + async_std::task::sleep(Duration::from_micros(10)).await; + } + + Ok(execution_id) +} + +/// Get the exit code for a particular execution +/// +/// # Errors +/// +/// If we cannot find an `ExitCode` in the docker response, or talk to the docker socket. +pub async fn get_command_exit_code(client: &HttpClient, execution_id: &str) -> Result { + let url = format!("/exec/{}/json", execution_id); + let resp = docker_api_get( + client, + &url, + "Taking awhile to query exit code of command from docker. Will wait up to 30 seconds." + .to_owned(), + None, + true, + ) + .await + .wrap_err("Failed to query exit code from Docker")?; + let exit_code_opt = &resp["ExitCode"]; + if !exit_code_opt.is_i64() { + return Err(eyre!( + "Failed to find integer ExitCode in response: [{:?}]", + resp, + )) + .wrap_err("Failure querying exit code") + .suggestion("This is an internal error, please file an issue."); + } + + Ok(i32::try_from(exit_code_opt.as_i64().unwrap()) + .ok() + .unwrap_or(255)) +} diff --git a/src/executors/docker_engine/image_api.rs b/src/executors/docker_engine/image_api.rs new file mode 100644 index 0000000..16f595a --- /dev/null +++ b/src/executors/docker_engine/image_api.rs @@ -0,0 +1,39 @@ +use super::docker_api_post; + +use color_eyre::{eyre::WrapErr, Result}; +use isahc::HttpClient; +use std::time::Duration; + +/// Download the Image for this docker executor. +/// +/// # Errors +/// +/// Errors when it the docker api cannot be talked too, or the image cannot be downloaded. +pub async fn download_image(client: &HttpClient, image: &str) -> Result<()> { + let image_tag_split = image.rsplitn(2, ':').collect::>(); + let (image_name, tag_name) = if image_tag_split.len() == 2 { + (image_tag_split[1], image_tag_split[0]) + } else { + (image_tag_split[0], "latest") + }; + let url = format!("/images/create?fromImage={}&tag={}", image_name, tag_name); + + let _ = docker_api_post( + client, + &url, + format!( + "Downloading the docker_image: [{}:{}]", + image_name, tag_name + ), + None, + Some(Duration::from_secs(3600)), + false, + ) + .await + .wrap_err(format!( + "Failed to download image: [{}:{}]", + image_name, tag_name + ))?; + + Ok(()) +} diff --git a/src/executors/docker_engine/mod.rs b/src/executors/docker_engine/mod.rs new file mode 100644 index 0000000..b5243cb --- /dev/null +++ b/src/executors/docker_engine/mod.rs @@ -0,0 +1,223 @@ +//! Represents interacting with the Docker Engine API. + +use crate::future_helper::timeout_with_log_msg; + +use color_eyre::{ + eyre::{eyre, WrapErr}, + Result, Section, +}; +use isahc::{http::request::Request, prelude::*, Body, HttpClient}; +use once_cell::sync::Lazy; +use serde_json::Value as JsonValue; +use std::time::Duration; +use tracing::debug; + +/// This is the api version we use for talking to the docker socket. +/// +/// The docker socket allows us to choose a versioned api like this, which is +/// why we use it as opposed to using a terminal command (not to mention we +/// don't have to worry about escaping correctly). +/// +/// `v1.40` is chosen because as of the time of writing this +/// `v1.40` is the version for Docker Engine 19.03, which at +/// the time of writing this (July 3rd, 2020) is the lowest supported +/// version according to docker: +/// +/// +/// +/// We can bump this in the future when we know it won't run into anyone. +const DOCKER_API_VERSION: &str = "/v1.40"; +const DOCKER_STATUS_CODES_ERR_NOTE: &str = "To find out what the status code means you can check the Docker documentation: https://docs.docker.com/engine/api/v1.40/."; + +cfg_if::cfg_if! { + if #[cfg(unix)] { + pub const SOCKET_PATH: &str = "/var/run/docker.sock"; + } else if #[cfg(win)] { + // TODO(xxx): named pipes? url? + pub const SOCKET_PATH: &str = "UNIMPLEMENTED"; + } +} + +// A global lock for the unix socket since it can't have multiple things communicating +// at the same time. +// +// You can techincally have multiple writers on windows but only up to a particular buff +// size, and it's just much easier to have just a global lock, and take the extra bit. Really +// the only time this is truly slow is when we're downloading a docker image. +static DOCK_SOCK_LOCK: Lazy> = + Lazy::new(|| async_std::sync::Mutex::new(())); + +async fn docker_api_call>( + client: &HttpClient, + req: Request, + long_call_msg: String, + timeout: Option, + is_json: bool, +) -> Result { + let _guard = DOCK_SOCK_LOCK.lock().await; + let uri = req.uri().to_string(); + + let log_timeout = Duration::from_secs(3); + let timeout_frd = timeout.unwrap_or_else(|| Duration::from_secs(30)); + let mut resp = timeout_with_log_msg( + long_call_msg, + log_timeout, + timeout_frd, + client.send_async(req), + ) + .await??; + + let status = resp.status().as_u16(); + if status < 200 || status > 299 { + return Err(eyre!( + "Docker responded with a status code: [{}] which is not in the 200-300 range.", + status, + )) + .note(DOCKER_STATUS_CODES_ERR_NOTE) + .context(uri); + } + + if is_json { + Ok(resp + .json() + .wrap_err("Failure to response Docker response as JSON") + .context(uri)?) + } else { + // Ensure the response body is read in it's entirerty. Otherwise + // the body could still be writing, but we think we're done with the + // request, and all of a sudden we're writing to a socket while + // a response body is all being written and it's all bad. + let _ = resp.text(); + Ok(serde_json::Value::default()) + } +} + +/// Call the docker engine api using the GET http method. +/// +/// `client`: the http client to use. +/// `path`: the path to call (along with Query Args). +/// `long_call_msg`: the message to print when docker is taking awhile to respond. +/// `timeout`: The optional timeout. Defaults to 30 seconds. +/// `is_json`: whether or not to parse the response as json. +pub(self) async fn docker_api_get( + client: &HttpClient, + path: &str, + long_call_msg: String, + timeout: Option, + is_json: bool, +) -> Result { + let url = format!("http://localhost{}{}", DOCKER_API_VERSION, path); + debug!("URL for get will be: {}", url); + let req = Request::get(url) + .header("Accept", "application/json; charset=UTF-8") + .header("Content-Type", "application/json; charset=UTF-8") + .body(()) + .wrap_err("Internal-Error: Failed to construct http request.") + .suggestion("Please report this as an issue so it can be fixed.")?; + + docker_api_call(client, req, long_call_msg, timeout, is_json) + .await + .context(format!("URL: {}", path)) +} + +/// Call the docker engine api using the POST http method. +/// +/// `client`: the http client to use. +/// `path`: the path to call (along with Query Args). +/// `long_call_msg`: the message to print when docker is taking awhile to respond. +/// `body`: The body to send to the remote endpoint. +/// `timeout`: the optional timeout. Defaults to 30 seconds. +/// `is_json`: whether to attempt to read the response body as json. +pub(self) async fn docker_api_post( + client: &HttpClient, + path: &str, + long_call_msg: String, + body: Option, + timeout: Option, + is_json: bool, +) -> Result { + let url = format!("http://localhost{}{}", DOCKER_API_VERSION, path); + debug!("URL for post will be: {}", url); + let req_part = Request::post(url) + .header("Accept", "application/json; charset=UTF-8") + .header("Content-Type", "application/json; charset=UTF-8") + .header("Expect", ""); + let req = if let Some(body_data) = body { + req_part + .body( + serde_json::to_vec(&body_data) + .wrap_err("Failure converting HTTP Request Body to JSON") + .suggestion("This is an internal error, please report this issue.")?, + ) + .wrap_err("Failed to write body to request") + .suggestion("This is an internal error, please report this issue.")? + } else { + req_part + .body(Vec::new()) + .wrap_err("Failed to write body to request") + .suggestion("This is an internal error, please report this issue.")? + }; + + docker_api_call(client, req, long_call_msg, timeout, is_json) + .await + .context(format!("URL: {}", path)) +} + +/// Call the docker engine api using the POST http method. +/// +/// `client`: the http client to use. +/// `path`: the path to call (along with Query Args). +/// `long_call_msg`: the message to print when docker is taking awhile to respond. +/// `body`: The body to send to the remote endpoint. +/// `timeout`: the timeout for this requests, defaults to 30 seconds. +/// `is_json`: whether to actually try to read the response body as json. +pub(self) async fn docker_api_delete( + client: &HttpClient, + path: &str, + long_call_msg: String, + body: Option, + timeout: Option, + is_json: bool, +) -> Result { + let url = format!("http://localhost{}{}", DOCKER_API_VERSION, path); + debug!("URL for delete will be: {}", url); + let req_part = Request::delete(url) + .header("Accept", "application/json; charset=UTF-8") + .header("Content-Type", "application/json; charset=UTF-8") + .header("Expect", ""); + let req = if let Some(body_data) = body { + req_part + .body( + serde_json::to_vec(&body_data) + .wrap_err("Failure converting HTTP Request Body to JSON") + .suggestion("This is an internal error, please report this issue.")?, + ) + .wrap_err("Failed to write body to request") + .suggestion("This is an internal error, please report this issue.")? + } else { + req_part + .body(Vec::new()) + .wrap_err("Failed to write body to request") + .suggestion("This is an internal error, please report this issue.")? + }; + + docker_api_call(client, req, long_call_msg, timeout, is_json) + .await + .context(format!("URL: {}", path)) +} + +pub(crate) mod container; +pub(crate) mod container_api; +pub(crate) mod execution_api; +pub(crate) mod image_api; +pub(crate) mod network_api; +pub(crate) mod permissions_helper; +pub(crate) mod version_api; + +pub use container::*; +pub use container_api::*; +pub use execution_api::*; +pub use image_api::*; +pub use network_api::*; +pub use permissions_helper::*; +pub use version_api::*; diff --git a/src/executors/docker_engine/network_api.rs b/src/executors/docker_engine/network_api.rs new file mode 100644 index 0000000..f0f207a --- /dev/null +++ b/src/executors/docker_engine/network_api.rs @@ -0,0 +1,182 @@ +use super::{docker_api_delete, docker_api_get, docker_api_post}; + +use color_eyre::{eyre::WrapErr, Result, Section}; +use isahc::HttpClient; +use tracing::error; + +pub async fn list_devloop_networks(client: &HttpClient) -> Result> { + let json_networks = docker_api_get( + &client, + "/networks", + "Taking ahwile to query networks from docker. Will wait up til 30 seconds.".to_owned(), + None, + true, + ) + .await?; + + let mut devloop_networks = Vec::new(); + if let Some(networks) = json_networks.as_array() { + for network in networks { + if let Some(name_untyped) = network.get("Name") { + if let Some(name_str) = name_untyped.as_str() { + if name_str.starts_with("dl-") { + devloop_networks.push(name_str.to_owned()); + } + } + } + } + } + + Ok(devloop_networks) +} + +pub async fn delete_network(client: &HttpClient, network: &str) { + let err = docker_api_delete( + &client, + &format!("/networks/{}", network), + "Docker is taking awhile to delete a docker network. Will wait up to 30 seconds." + .to_owned(), + None, + None, + false, + ) + .await; + + if err.is_err() { + error!( + "{:?}", + err.wrap_err(format!("Failed to delete docker network: [{}]", network)) + .suggestion(format!( + "You can try deleting the network manually with: `docker network rm {}`", + network + )) + .unwrap_err(), + ); + } +} + +/// Ensure a particular network exists. +/// +/// # Errors +/// +/// If we cannot talk to the docker socket, or there is an error creating the network. +pub async fn ensure_network_exists(client: &HttpClient, pipeline_id: &str) -> Result<()> { + let network_id = format!("dl-{}", pipeline_id); + let network_url = format!("/networks/{}", network_id); + let res = docker_api_get( + client, + &network_url, + "Taking awhile to query network existance status from docker. Will wait up to 30 seconds." + .to_owned(), + None, + true, + ) + .await + .wrap_err(format!( + "Failed to query network information: {}", + network_id + )); + + if res.is_err() { + let json_body = serde_json::json!({ + "Name": network_id, + }); + + let _ = docker_api_post( + client, + "/networks/create", + "Docker is not creating the network in a timely manner. Will wait up to 30 seconds." + .to_owned(), + Some(json_body), + None, + false, + ) + .await + .wrap_err(format!("Failed to create docker network: {}", network_id))?; + } + + Ok(()) +} + +/// Determine if the container is attached to a particular network. +/// +/// If there were any errors we'll just return false. +pub async fn is_network_attached( + client: &HttpClient, + container_name: &str, + pipeline_id: &str, +) -> bool { + let url = format!("/containers/{}/json", container_name); + let body_res = docker_api_get( + &client, + &url, + "Taking awhile to get container status from docker. Will wait up to 30 seconds.".to_owned(), + None, + true, + ) + .await; + if body_res.is_err() { + return false; + } + let body = body_res.unwrap(); + let id_as_opt = body["Id"].as_str(); + if id_as_opt.is_none() { + return false; + } + let id = id_as_opt.unwrap(); + + let network_url = format!("/networks/dl-{}", pipeline_id); + let network_body_res = docker_api_get( + client, + &network_url, + "Taking awhile to get network status from docker. Will wait up to 30 seconds.".to_owned(), + None, + true, + ) + .await; + if network_body_res.is_err() { + return false; + } + let network_body = network_body_res.unwrap(); + let networks_obj_opt = network_body["Containers"].as_object(); + if networks_obj_opt.is_none() { + return false; + } + let networks_obj = networks_obj_opt.unwrap(); + + networks_obj.contains_key(id) +} + +/// Ensure a particular network has been attached to this container. +/// +/// # Errors +/// +/// If we fail to talk to the docker socket, or connect the container to the network. +pub async fn ensure_network_attached( + client: &HttpClient, + container_name: &str, + hostname: &str, + pipeline_id: &str, +) -> Result<()> { + if !is_network_attached(client, container_name, pipeline_id).await { + let url = format!("/networks/dl-{}/connect", pipeline_id); + let body = serde_json::json!({ + "Container": container_name, + "EndpointConfig": { + "Aliases": [hostname], + } + }); + + let _ = docker_api_post( + client, + &url, + "Docker is taking awhile to attach a network to the container. Will wait up to 30 seconds.".to_owned(), + Some(body), + None, + false + ).await + .wrap_err("Failed to attach network to Docker Container.")?; + } + + Ok(()) +} diff --git a/src/executors/docker_engine/permissions_helper.rs b/src/executors/docker_engine/permissions_helper.rs new file mode 100644 index 0000000..b26950b --- /dev/null +++ b/src/executors/docker_engine/permissions_helper.rs @@ -0,0 +1,245 @@ +use super::{execute_command_in_container, get_command_exit_code, DockerContainerInfo}; + +use color_eyre::{ + eyre::{eyre, WrapErr}, + Result, Section, +}; +use isahc::HttpClient; +use once_cell::sync::Lazy; + +static DOCK_USER_LOCK: Lazy> = + Lazy::new(|| async_std::sync::Mutex::new(())); +const _PERMISSIONS_HELPER_EXPERIMENTAL_SUGGESTION: &str = "The permissions helper is still experimental. Please report this so it can be fixed before stabilization."; + +/// Setup the permission helper for this docker container if it's been configured. +/// +/// # Errors +/// +/// If we cannot talk to the docker socket, or cannot create the user. +pub async fn setup_permission_helper( + client: &HttpClient, + container: &DockerContainerInfo, +) -> Result<()> { + let _guard = DOCK_USER_LOCK.lock().await; + if container.get_proxy_user_id().is_none() || container.get_proxy_group_id().is_none() { + return Ok(()); + } + + let forced_user_id = container.get_proxy_user_id().unwrap(); + let forced_group_id = container.get_proxy_group_id().unwrap(); + let has_sudo = container_has_sudo( + client, + container.get_container_name(), + container.get_base_user(), + ) + .await + .suggestion(_PERMISSIONS_HELPER_EXPERIMENTAL_SUGGESTION)?; + if has_created_proxy_user_before( + client, + container.get_container_name(), + container.get_base_user(), + ) + .await + .suggestion(_PERMISSIONS_HELPER_EXPERIMENTAL_SUGGESTION)? + { + return Ok(()); + } + create_permissions_proxy_user( + client, + container.get_container_name(), + container.get_base_user(), + *forced_user_id, + *forced_group_id, + has_sudo, + ) + .await + .suggestion(_PERMISSIONS_HELPER_EXPERIMENTAL_SUGGESTION)?; + + // Allow the user to sudo, if sudo is installed. + if has_sudo { + allow_proxy_user_to_sudo( + client, + container.get_container_name(), + container.get_base_user(), + ) + .await + .suggestion(_PERMISSIONS_HELPER_EXPERIMENTAL_SUGGESTION)?; + } + + Ok(()) +} + +/// Perform a very simple execute and wait for command to finish. +async fn execute_and_wait_simple( + client: &HttpClient, + container_name: &str, + user: &str, + command: &[String], +) -> Result { + execute_command_in_container( + client, + container_name, + command, + &[], + user, + false, + None, + None, + ) + .await +} + +/// Check if a container has sudo installed. +async fn container_has_sudo(client: &HttpClient, container_name: &str, user: &str) -> Result { + let sudo_execution_id = execute_and_wait_simple( + client, + container_name, + user, + &[ + "/usr/bin/env".to_owned(), + "bash".to_owned(), + "-c".to_owned(), + "hash sudo".to_owned(), + ], + ) + .await + .wrap_err( + "Failure Checking for sudo existance inside docker container for permissions helper.", + )?; + + Ok(get_command_exit_code(client, &sudo_execution_id) + .await + .wrap_err( + "Failure Checking for sudo existance inside docker container for permissions helper.", + )? == 0) +} + +/// Check if this user has already created the dev-loop permissions helper user. +async fn has_created_proxy_user_before( + client: &HttpClient, + container_name: &str, + user: &str, +) -> Result { + let user_exist_id = execute_and_wait_simple( + client, + container_name, + user, + &[ + "/usr/bin/env".to_owned(), + "bash".to_owned(), + "-c".to_owned(), + "getent passwd dl".to_owned(), + ], + ) + .await + .wrap_err("Failure checking if user has already been created for permissions helper.")?; + + if get_command_exit_code(client, &user_exist_id) + .await + .wrap_err("Failure checking if user has already been created for permissions helper.")? + == 0 + { + Ok(true) + } else { + Ok(false) + } +} + +/// Create a user in the docker container that has the same user id/group id +/// as the user on the host so we can proxy permissions. +async fn create_permissions_proxy_user( + client: &HttpClient, + container_name: &str, + user: &str, + forced_user_id: u32, + forced_group_id: u32, + has_sudo: bool, +) -> Result<()> { + let creation_execution_id = match (user == "root", has_sudo) { + (true, _) | (false, false) => execute_and_wait_simple( + client, + container_name, + user, + &[ + "/usr/bin/env".to_owned(), + "bash".to_owned(), + "-c".to_owned(), + format!( + "groupadd -g {} -o dl && useradd -u {} -g {} -o -c '' -m dl", + forced_group_id, forced_user_id, forced_group_id + ), + ], + ) + .await + .wrap_err("Failure creating user for permissions helper")?, + (false, true) => execute_and_wait_simple( + client, + container_name, + user, + &[ + "/usr/bin/env".to_owned(), + "bash".to_owned(), + "-c".to_owned(), + format!( + "sudo -n groupadd -g {} -o dl && sudo -n useradd -u {} -g {} -o -c '' -m dl", + forced_group_id, forced_user_id, forced_group_id + ), + ], + ) + .await + .wrap_err("Failure creating user for permissions helper")?, + }; + + if get_command_exit_code(client, &creation_execution_id).await? != 0 { + return Err(eyre!( + "Failed to get successful ExitCode from docker on user creation for permissions helper" + )); + } + + Ok(()) +} + +/// Allow the permissions proxy user to sudo. +async fn allow_proxy_user_to_sudo( + client: &HttpClient, + container_name: &str, + user: &str, +) -> Result<()> { + let sudo_user_creation_id = if user == "root" { + execute_and_wait_simple( + client, + container_name, + user, + &[ + "/usr/bin/env".to_owned(), + "bash".to_owned(), + "-c".to_owned(), + "mkdir -p /etc/sudoers.d && echo \"dl ALL=(root) NOPASSWD:ALL\" > /etc/sudoers.d/dl && chmod 0440 /etc/sudoers.d/dl".to_owned() + ], + ).await.wrap_err("Failure adding user to sudoers for permissions helper")? + } else { + execute_and_wait_simple( + client, + container_name, + user, + &[ + "/usr/bin/env".to_owned(), + "bash".to_owned(), + "-c".to_owned(), + "sudo -n mkdir -p /etc/sudoers.d && echo \"dl ALL=(root) NOPASSWD:ALL\" | sudo -n tee /etc/sudoers.d/dl && sudo -n chmod 0440 /etc/sudoers.d/dl".to_owned() + ], + ).await.wrap_err("Failure adding user to sudoers for permissions helper")? + }; + + if get_command_exit_code(client, &sudo_user_creation_id) + .await + .wrap_err("Failure adding user to sudoers for permissions helper.")? + != 0 + { + return Err(eyre!( + "Failed to setup passwordless sudo access for permissions helper!" + )); + } + + Ok(()) +} diff --git a/src/executors/docker_engine/version_api.rs b/src/executors/docker_engine/version_api.rs new file mode 100644 index 0000000..ba79f9d --- /dev/null +++ b/src/executors/docker_engine/version_api.rs @@ -0,0 +1,16 @@ +use super::docker_api_get; + +use color_eyre::Result; +use isahc::HttpClient; +use serde_json::Value as JsonValue; + +pub async fn docker_version_check(client: &HttpClient) -> Result { + docker_api_get( + client, + "/version", + "Taking awhile to query version from docker. Will wait up to 30 seconds.".to_owned(), + None, + true, + ) + .await +} diff --git a/src/executors/host.rs b/src/executors/host.rs index 398e137..1d43fda 100644 --- a/src/executors/host.rs +++ b/src/executors/host.rs @@ -4,20 +4,20 @@ use crate::{ config::types::NeedsRequirement, dirs::get_tmp_dir, - executors::{CompatibilityStatus, Executor}, + executors::{ + shared::{create_entrypoint, create_executor_shared_dir}, + CompatibilityStatus, Executor as ExecutorTrait, + }, tasks::execution::preparation::ExecutableTask, }; -use async_std::{ - fs::{read_dir, remove_dir_all}, - prelude::*, -}; use color_eyre::{ eyre::{eyre, WrapErr}, Result, Section, }; use crossbeam_channel::Sender; use std::{ + fs::{read_dir, remove_dir_all}, io::{BufRead, BufReader, Error as IoError}, path::PathBuf, process::{Command, Stdio}, @@ -30,45 +30,61 @@ use tracing::{debug, error, warn}; /// Determine if an error is an "ETXTFILEBUSY" error, e.g. someone /// else is actively executing bash. +#[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "linux", + target_os = "android", + target_os = "freebsd", + target_os = "dragonfly", + target_os = "openbsd", + target_os = "netbsd" +))] fn is_etxtfilebusy(os_err: &IoError) -> bool { - if cfg!(target_os = "macos") - || cfg!(target_os = "ios") - || cfg!(target_os = "linux") - || cfg!(target_os = "android") - || cfg!(target_os = "freebsd") - || cfg!(target_os = "dragonfly") - || cfg!(target_os = "openbsd") - || cfg!(target_os = "netbsd") - { - if let Some(os_err_code) = os_err.raw_os_error() { - // This stands for ETXTBUSY, since it's pretty weird to match on - // message of rust. - // - // This seems to be correct for all OSs, listed above. - // - Linux: https://mariadb.com/kb/en/operating-system-error-codes/ - // - OSX/iOS: https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/intro.2.html - // - FreeBSD: https://www.freebsd.org/cgi/man.cgi?query=errno&sektion=2&manpath=freebsd-release-ports - // - Android: https://android.googlesource.com/kernel/lk/+/dima/for-travis/include/errno.h - // - DragonflyBSD: https://man.dragonflybsd.org/?command=errno§ion=2 - // - OpenBSD: https://man.openbsd.org/errno.2 - // - NetBSD: https://netbsd.gw.com/cgi-bin/man-cgi?errno+2+NetBSD-6.0 - if os_err_code == 26 { - return true; - } + if let Some(os_err_code) = os_err.raw_os_error() { + // This stands for ETXTBUSY, since it's pretty weird to match on + // message of rust. + // + // This seems to be correct for all OSs, listed above. + // - Linux: https://mariadb.com/kb/en/operating-system-error-codes/ + // - OSX/iOS: https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/intro.2.html + // - FreeBSD: https://www.freebsd.org/cgi/man.cgi?query=errno&sektion=2&manpath=freebsd-release-ports + // - Android: https://android.googlesource.com/kernel/lk/+/dima/for-travis/include/errno.h + // - DragonflyBSD: https://man.dragonflybsd.org/?command=errno§ion=2 + // - OpenBSD: https://man.openbsd.org/errno.2 + // - NetBSD: https://netbsd.gw.com/cgi-bin/man-cgi?errno+2+NetBSD-6.0 + if os_err_code == 26 { + return true; } } false } -/// Represents the actual executor for the host system. +/// Determine if an error is an "ETXTFILEBUSY" error, e.g. someone +/// else is actively executing bash. +#[cfg(not(any( + target_os = "macos", + target_os = "ios", + target_os = "linux", + target_os = "android", + target_os = "freebsd", + target_os = "dragonfly", + target_os = "openbsd", + target_os = "netbsd" +)))] +fn is_etxtfilebusy(os_err: &IoError) -> bool { + false +} + +/// Represents the actual `Executor` for the host system. #[derive(Debug)] -pub struct HostExecutor { +pub struct Executor { /// The root of the project, so we know where to "cd" into. project_root: String, } -impl HostExecutor { +impl Executor { /// Create a new host executor, with nothing but the project root. /// /// # Errors @@ -92,17 +108,16 @@ impl HostExecutor { } /// Performs a clean up of all host resources. - #[allow(clippy::cognitive_complexity)] pub async fn clean() { // To clean all we would possibly have leftover is files in $TMPDIR. // So we iterate through everything in the temporary directory... - if let Ok(mut entries) = read_dir(get_tmp_dir()).await { - while let Some(resulting_entry) = entries.next().await { + if let Ok(entries) = read_dir(get_tmp_dir()) { + for resulting_entry in entries { // Did we get something? if let Ok(entry_de) = resulting_entry { let entry = entry_de.path(); // If it's not a directory ignore it. - if !entry.is_dir().await { + if !entry.is_dir() { debug!( "Found a non-directory in your temporary directory, skipping: [{:?}]", entry @@ -132,7 +147,7 @@ impl HostExecutor { } // If it is... remove the directory and everything underneath it. - if let Err(remove_err) = remove_dir_all(&entry).await { + if let Err(remove_err) = remove_dir_all(&entry) { warn!( "{:?}", Err::<(), IoError>(remove_err) @@ -147,7 +162,7 @@ impl HostExecutor { } } - /// Determines if this `HostExecutor` is compatible with the system. + /// Determines if this `Executor` is compatible with the system. #[must_use] pub fn is_compatible() -> CompatibilityStatus { // This command expands to: `bash -c "hash bash"`, while this may sound like @@ -179,7 +194,7 @@ impl HostExecutor { } #[async_trait::async_trait] -impl Executor for HostExecutor { +impl ExecutorTrait for Executor { #[must_use] fn meets_requirements(&self, reqs: &[NeedsRequirement]) -> bool { let mut meets_reqs = true; @@ -194,13 +209,6 @@ impl Executor for HostExecutor { meets_reqs } - #[allow( - clippy::cognitive_complexity, - clippy::suspicious_else_formatting, - clippy::too_many_lines, - clippy::unnecessary_unwrap, - unused_assignments - )] #[must_use] async fn execute( &self, @@ -209,90 +217,30 @@ impl Executor for HostExecutor { helper_src_line: &str, task: &ExecutableTask, worker_count: usize, - ) -> isize { - // Execute a particular task: - // - // 1. Create a temporary directory for the pipeline id, and the task name. - // 2. Write the task file the user specified. - // 3. Write an "entrypoint" that sources in the helpers, and calls - // the script. - // 4. Execute the script and wait for it to finish. - + ) -> Result { debug!("Host Executor executing task: [{}]", task.get_task_name()); - // Create the pipeline directory. - let mut tmp_path = get_tmp_dir(); - tmp_path.push(task.get_pipeline_id().to_owned() + "-dl-host"); - let res = async_std::fs::create_dir_all(tmp_path.clone()).await; - if let Err(dir_err) = res { - error!( - "Failed to create pipeline directory due to: [{:?}]", - dir_err - ); - return 10; - } - - // Write the task file. - let mut regular_task = tmp_path.clone(); - regular_task.push(task.get_task_name().to_owned() + ".sh"); - debug!("Host Executor task writing to path: [{:?}]", regular_task); - let write_res = - async_std::fs::write(®ular_task, task.get_contents().get_contents()).await; - if let Err(write_err) = write_res { - error!("Failed to write script file due to: [{:?}]", write_err); - return 10; - } - let path_as_str = regular_task.to_str().unwrap(); - - // Write the entrypoint script. - let entry_point_file = format!( - "#!/usr/bin/env bash - -cd {project_root} - -# Source Helpers -{helper} - -eval \"$(declare -F | sed -e 's/-f /-fx /')\" - -{script} {arg_str}", - project_root = self.project_root, - helper = helper_src_line, - script = path_as_str, - arg_str = task.get_arg_string(), - ); - tmp_path.push(task.get_task_name().to_owned() + "-entrypoint.sh"); + // Write out the small wrapper script that sources in the helpers, and runs the task. + let shared_dir = create_executor_shared_dir(task.get_pipeline_id()) + .wrap_err("Failed to create pipeline directory")?; debug!( - "Host task entrypoint is being written too: [{:?}]", - tmp_path + "Host Executor will be using temporary directory: [{:?}]", + shared_dir ); - let write_res = async_std::fs::write(&tmp_path, entry_point_file).await; - if let Err(write_err) = write_res { - error!("Failed to write entrypoint file due to: [{:?}]", write_err); - return 10; - } - - if cfg!(target_family = "unix") { - use std::os::unix::fs::PermissionsExt; - let executable_permissions = std::fs::Permissions::from_mode(0o777); - - if let Err(permission_err) = std::fs::set_permissions(&tmp_path, executable_permissions.clone()).wrap_err( - "Failed to mark temporary path as world-wide readable/writable/executable." - ).suggestion("If the error isn't immediately clear, please file an issue as it's probably a bug in dev-loop with your system.") { - error!("{:?}", permission_err); - return 10; - } - if let Err(permission_err) = std::fs::set_permissions(®ular_task, executable_permissions).wrap_err( - "Failed to mark task file as world-wide readable/writable/executable." - ).suggestion("If the error isn't immediately clear, please file an issue as it's probably a bug in dev-loop with your system.") { - error!("{:?}", permission_err); - return 10; - } - } - - let entrypoint_as_str = tmp_path.to_str().unwrap(); - - // Spawn the task. + let entrypoint_path = create_entrypoint( + &self.project_root, + &get_tmp_dir().to_string_lossy().to_string(), + shared_dir, + helper_src_line, + task, + false, + None, + None, + )?; + let entrypoint_as_str = entrypoint_path.to_str().unwrap(); + + // Spawn the command itself, retry if we get an ETXTFILEBUSY error incase we try to start two + // bash processes at the same time. let mut command_res = Command::new(entrypoint_as_str) .stdin(Stdio::null()) .stdout(Stdio::piped()) @@ -307,22 +255,16 @@ eval \"$(declare -F | sed -e 's/-f /-fx /')\" .stderr(Stdio::piped()) .spawn(); } else { - error!( - "{:?}", - Err::<(), IoError>(command_err) - .wrap_err("Failed to run bash script on the host system") - .note(format!("The script is located at: [{}]", entrypoint_as_str,)) - .unwrap_err() - ); - return 10; + return Err(command_err) + .wrap_err("Failed to run bash script on the host system") + .note(format!("The script is located at: [{}]", entrypoint_as_str,)); } } - let mut child = command_res.unwrap(); + let mut command_pid = command_res.unwrap(); let has_finished = Arc::new(AtomicBool::new(false)); - - let mut child_stdout = BufReader::new(child.stdout.take().unwrap()); - let mut child_stderr = BufReader::new(child.stderr.take().unwrap()); + let mut child_stdout = BufReader::new(command_pid.stdout.take().unwrap()); + let mut child_stderr = BufReader::new(command_pid.stderr.take().unwrap()); let flush_channel_clone = log_channel.clone(); let flush_task_name = task.get_task_name().to_owned(); @@ -356,15 +298,15 @@ eval \"$(declare -F | sed -e 's/-f /-fx /')\" } }); - let mut rc = 0; + let rc; // Loop until completion. loop { // Has the child exited? - let child_opt_res = child.try_wait(); + let child_opt_res = command_pid.try_wait(); if let Err(child_err) = child_opt_res { error!("Failed to read child status: [{:?}]", child_err); rc = 10; - let _ = child.kill(); + let _ = command_pid.kill(); break; } let child_opt = child_opt_res.unwrap(); @@ -375,9 +317,9 @@ eval \"$(declare -F | sed -e 's/-f /-fx /')\" // Have we been requested to stop? if should_stop.load(Ordering::Acquire) { - error!("HostExecutor was told to stop! killing child..."); + error!("Executor was told to stop! killing child..."); rc = 10; - let _ = child.kill(); + let _ = command_pid.kill(); break; } @@ -387,8 +329,7 @@ eval \"$(declare -F | sed -e 's/-f /-fx /')\" has_finished.store(true, Ordering::Release); flush_task.await; - // Return exit code. - rc as isize + Ok(rc) } } @@ -398,14 +339,14 @@ mod unit_tests { #[test] fn is_compatible() { - let compat = HostExecutor::is_compatible(); + let compat = Executor::is_compatible(); assert_eq!(compat, CompatibilityStatus::Compatible); } #[test] fn meets_requirements() { let pb = PathBuf::from("/tmp/non-existant"); - let he = HostExecutor::new(&pb).expect("Should always be able to construct HostExecutor"); + let he = Executor::new(&pb).expect("Should always be able to construct Executor for host."); assert!( he.meets_requirements(&vec![crate::config::types::NeedsRequirement::new( diff --git a/src/executors/mod.rs b/src/executors/mod.rs index cb50bc7..1255faa 100644 --- a/src/executors/mod.rs +++ b/src/executors/mod.rs @@ -10,7 +10,7 @@ use crate::{ }, fetch::FetcherRepository, tasks::execution::preparation::ExecutableTask, - yaml_err::contextualize_yaml_err, + yaml_err::contextualize, }; use color_eyre::{ @@ -66,11 +66,13 @@ pub trait Executor { helper_src_line: &str, task: &ExecutableTask, worker_count: usize, - ) -> isize; + ) -> Result; } -pub mod docker; -pub mod host; +pub(crate) mod docker; +pub(crate) mod docker_engine; +pub(crate) mod host; +pub(crate) mod shared; /// Describes a "repository" of executors, or more accurately a set of all /// the executors that could potentially run, or are running right now. @@ -129,7 +131,6 @@ impl ExecutorRepository { /// /// - When there is an error fetching the executor yaml files from disk. /// - When the executor yaml files contain invalid yaml. - #[allow(clippy::cognitive_complexity, clippy::map_entry)] pub async fn new(tlc: &TopLevelConf, fr: &FetcherRepository, rd: &PathBuf) -> Result { // Keep track of any executors we can construct outside of a custom_executor // for a task. Which will be constructed when the task is run. @@ -190,7 +191,7 @@ impl ExecutorRepository { let exec_yaml_res = serde_yaml::from_slice::(&exec_conf_file.get_contents()); if let Err(exec_err) = exec_yaml_res { - return contextualize_yaml_err( + return contextualize( Err(exec_err), exec_conf_file.get_source(), &String::from_utf8_lossy(exec_conf_file.get_contents()).to_string() @@ -222,10 +223,7 @@ impl ExecutorRepository { let (mut potential_id, executor) = exec_res.unwrap(); if &potential_id == "host" { - if !executors.contains_key(&potential_id) { - debug!("Inserting host executor!"); - executors.insert(potential_id, executor); - } + executors.entry(potential_id).or_insert(executor); continue; } while executors.contains_key(&potential_id) { @@ -249,7 +247,6 @@ impl ExecutorRepository { /// Perform selection of a particular executor for a task. /// /// `task`: The actual task configuration. - #[allow(clippy::cognitive_complexity)] pub async fn select_executor( &mut self, task: &TaskConf, @@ -418,7 +415,7 @@ impl ExecutorRepository { // Help the type checker out. let ret_v: Result<(String, Arc)> = match *conf.get_type() { ExecutorType::Host => { - let compatibility = host::HostExecutor::is_compatible(); + let compatibility = host::Executor::is_compatible(); match compatibility { CompatibilityStatus::Compatible => {} CompatibilityStatus::CouldBeCompatible(how_to_install) => { @@ -440,11 +437,11 @@ impl ExecutorRepository { )); } } - let he = host::HostExecutor::new(rd)?; + let he = host::Executor::new(rd)?; Ok(("host".to_owned(), Arc::new(he))) } ExecutorType::Docker => { - let compatibility = docker::DockerExecutor::is_compatible().await; + let compatibility = docker::Executor::is_compatible().await; match compatibility { CompatibilityStatus::Compatible => {} CompatibilityStatus::CouldBeCompatible(how_to_install) => { @@ -469,7 +466,7 @@ impl ExecutorRepository { let params = conf.get_parameters(); let provides = conf.get_provided(); - let de = docker::DockerExecutor::new(rd, ¶ms, &provides, None)?; + let de = docker::Executor::new(rd, ¶ms, &provides, None)?; Ok((de.get_container_name().to_owned(), Arc::new(de))) } }; diff --git a/src/executors/shared.rs b/src/executors/shared.rs new file mode 100644 index 0000000..4aa3c63 --- /dev/null +++ b/src/executors/shared.rs @@ -0,0 +1,132 @@ +use crate::{ + dirs::{get_tmp_dir, mark_as_world_editable, mark_file_as_executable, rewrite_tmp_dir}, + executors::ExecutableTask, +}; + +use color_eyre::{eyre::WrapErr, Result, Section}; +use std::{ + fs::{create_dir_all, write as write_file, File}, + path::PathBuf, + time::{SystemTime, UNIX_EPOCH}, +}; +use tracing::warn; + +/// Create the shared directory to execute in. +pub fn create_executor_shared_dir(pipeline_id: &str) -> Result { + let mut tmp_path = get_tmp_dir(); + tmp_path.push(format!("{}-dl-host", pipeline_id)); + create_dir_all(tmp_path.clone())?; + Ok(tmp_path) +} + +/// Create a series of files that can be used to capture logs for an entrypoint. +pub fn create_log_proxy_files( + shared_dir: &PathBuf, + task: &ExecutableTask, +) -> Result<(PathBuf, PathBuf)> { + let epoch = SystemTime::now() + .duration_since(UNIX_EPOCH) + .wrap_err( + "System Clock is before unix start time? Please make sure your clock is accurate.", + )? + .as_secs(); + + let mut stdout_log_path = shared_dir.clone(); + stdout_log_path.push(format!("{}-{}-out.log", epoch, task.get_task_name())); + let mut stderr_log_path = shared_dir.clone(); + stderr_log_path.push(format!("{}-{}-err.log", epoch, task.get_task_name())); + + File::create(&stdout_log_path) + .wrap_err("Failed to create file for logs to stdout") + .note("If the issue isn't immediately clear (e.g. disk full), please file an issue.")?; + File::create(&stderr_log_path) + .wrap_err("Failed to create file for logs to stderr") + .note("If the issue isn't immediately clear (e.g. disk full), please file an issue.")?; + + if let Err(err) = mark_as_world_editable(&stdout_log_path) { + warn!("NOTE Failed to mark stdout log file: [{:?}] as world writable, this may cause a lack of logs to be written.\n{:?}", stdout_log_path, err); + } + if let Err(err) = mark_as_world_editable(&stderr_log_path) { + warn!("NOTE Failed to mark stderr log file: [{:?}] as world writable, this may cause a lack of logs to be written.\n{:?}", stderr_log_path, err); + } + + Ok((stdout_log_path, stderr_log_path)) +} + +/// Create an entrypoint to run for tasks. +#[allow(clippy::too_many_arguments)] +pub fn create_entrypoint( + project_root: &str, + tmp_dir: &str, + shared_dir: PathBuf, + helper_src_line: &str, + task: &ExecutableTask, + rewrite_tmp: bool, + stdout_log_path: Option, + stderr_log_path: Option, +) -> Result { + let mut task_path = shared_dir.clone(); + task_path.push(format!("{}.sh", task.get_task_name())); + + let script_to_run = if rewrite_tmp { + rewrite_tmp_dir(tmp_dir, &task_path) + } else { + task_path.to_string_lossy().to_string() + }; + + let mut entrypoint_path = shared_dir; + entrypoint_path.push(format!("{}-entrypoint.sh", task.get_task_name())); + + write_file(&task_path, task.get_contents().get_contents()) + .wrap_err("Failed to copy your task script to temporary directory")?; + + let mut entrypoint_script = format!( + "#!/usr/bin/env bash + +{opening_bracket} + +cd {project_root} + +# Source Helpers +{helper} + +eval \"$(declare -F | sed -e 's/-f /-fx /')\" + +{script} {arg_str} + +{closing_bracket}", + opening_bracket = "{", + project_root = project_root, + helper = helper_src_line, + script = script_to_run, + arg_str = task.get_arg_string(), + closing_bracket = "}", + ); + match (stdout_log_path.is_some(), stderr_log_path.is_some()) { + (true, true) => { + entrypoint_script += &format!( + " >{} 2>{}", + stdout_log_path.unwrap(), + stderr_log_path.unwrap() + ); + } + (true, false) => { + entrypoint_script += &format!(" >{}", stdout_log_path.unwrap()); + } + (false, true) => { + entrypoint_script += &format!(" 2>{}", stderr_log_path.unwrap()); + } + (false, false) => {} + } + + write_file(&entrypoint_path, entrypoint_script).wrap_err("Failed to write entrypoint file")?; + + mark_file_as_executable(&task_path)?; + mark_file_as_executable(&entrypoint_path)?; + + if rewrite_tmp { + Ok(entrypoint_path) + } else { + Ok(PathBuf::from(rewrite_tmp_dir(tmp_dir, &entrypoint_path))) + } +} diff --git a/src/fetch/fs.rs b/src/fetch/fs.rs index c5ce7a8..260df56 100644 --- a/src/fetch/fs.rs +++ b/src/fetch/fs.rs @@ -83,11 +83,7 @@ fn read_path_as_item_blocking(file: &PathBuf, project_root: &PathBuf) -> Result< let mut contents = Vec::new(); fh.read_to_end(&mut contents)?; - Ok(FetchedItem::new( - contents, - LocationType::Path, - source_location, - )) + Ok(FetchedItem::new(contents, source_location)) } /// Handles all fetching based on the 'path' directive. diff --git a/src/fetch/mod.rs b/src/fetch/mod.rs index 12be879..7595989 100644 --- a/src/fetch/mod.rs +++ b/src/fetch/mod.rs @@ -18,8 +18,6 @@ use std::{ pub struct FetchedItem { /// The contents of whatever was fetched. contents: Vec, - /// The fetcher that fetched this item. - fetched_by: LocationType, /// An end-user understood idea of where this item came from. source: String, } @@ -31,12 +29,8 @@ impl FetchedItem { /// `fetched_by`: The fetcher that fetched this task. /// `source`: The source of where this came from. #[must_use] - pub fn new(contents: Vec, fetched_by: LocationType, source: String) -> Self { - Self { - contents, - fetched_by, - source, - } + pub fn new(contents: Vec, source: String) -> Self { + Self { contents, source } } /// Get the contents of this fetched item. @@ -45,12 +39,6 @@ impl FetchedItem { &self.contents } - /// Get who fetched this particular item. - #[must_use] - pub fn get_fetched_by(&self) -> &LocationType { - &self.fetched_by - } - /// Get the source location. #[must_use] pub fn get_source(&self) -> &str { @@ -58,8 +46,8 @@ impl FetchedItem { } } -pub mod fs; -pub mod remote; +pub(crate) mod fs; +pub(crate) mod remote; /// A wrapper around all the fetchers at once, so you just have one type to /// deal with. @@ -92,23 +80,6 @@ impl FetcherRepository { }) } - /// Fetch from a particular location. - /// - /// # Errors - /// - /// - Bubbled error from underlying fetchers when there is an error fetching - /// the item. - pub async fn fetch(&self, location: &LocationConf) -> Result> { - match *location.get_type() { - LocationType::HTTP => self.http_fetcher.fetch_http(location).await, - LocationType::Path => { - self.path_fetcher - .fetch_from_fs(location, &self.project_root, &self.project_root, None) - .await - } - } - } - /// Fetch from a particular location, while filtering on filename. /// /// # Errors diff --git a/src/fetch/remote.rs b/src/fetch/remote.rs index 00fbc79..635f0d3 100644 --- a/src/fetch/remote.rs +++ b/src/fetch/remote.rs @@ -65,11 +65,7 @@ impl HttpFetcher { let mut results = Vec::with_capacity(1); let string = resp.text()?; let bytes = Vec::from(string.as_bytes()); - results.push(FetchedItem::new( - bytes, - LocationType::HTTP, - location.get_at().to_owned(), - )); + results.push(FetchedItem::new(bytes, location.get_at().to_owned())); Ok(results) } diff --git a/src/log.rs b/src/log.rs index 213091d..8179c29 100644 --- a/src/log.rs +++ b/src/log.rs @@ -1,7 +1,7 @@ //! Handles any logging utilities that we need in our crate for dev-loop. use color_eyre::{config::HookBuilder, Result}; -use lazy_static::*; +use lazy_static::lazy_static; use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -17,7 +17,7 @@ use tracing_subscriber::{ }; lazy_static! { - pub static ref HAS_OUTPUT_LOG_MSG: Arc = Arc::new(AtomicBool::new(true)); + pub static ref HAS_OUTPUT_LOG_MSG: Arc = Arc::new(AtomicBool::new(false)); } struct TracingSubscriber {} diff --git a/src/main.rs b/src/main.rs index b89efcd..a51c50b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,26 +1,20 @@ -#![allow( - clippy::module_name_repetitions, - clippy::result_map_unwrap_or_else, - clippy::wildcard_imports -)] - use crate::config::types::TopLevelConf; use color_eyre::{eyre::eyre, Report, Section}; use tracing::warn; -pub mod commands; -pub mod config; -pub mod dirs; -pub mod executors; -pub mod fetch; -pub mod future_helper; -pub mod log; -pub mod sigint; -pub mod strsim; -pub mod tasks; -pub mod terminal; -pub mod yaml_err; +pub(crate) mod commands; +pub(crate) mod config; +pub(crate) mod dirs; +pub(crate) mod executors; +pub(crate) mod fetch; +pub(crate) mod future_helper; +pub(crate) mod log; +pub(crate) mod sigint; +pub(crate) mod strsim; +pub(crate) mod tasks; +pub(crate) mod terminal; +pub(crate) mod yaml_err; /// The entrypoint to the application. /// @@ -51,7 +45,7 @@ fn main() -> Result<(), Report> { action = "list".to_owned(); } - let tlc_res = config::get_top_level_config(); + let tlc_res = config::get_top_level(); let errord_on_tlc = tlc_res.is_err(); let tlc = if let Err(tlc_err) = tlc_res { // NOTE(cynthia): if you change this print statement, make sure it looks diff --git a/src/sigint.rs b/src/sigint.rs index eb5f4f6..e29a4fc 100644 --- a/src/sigint.rs +++ b/src/sigint.rs @@ -1,5 +1,5 @@ use color_eyre::{eyre::WrapErr, Result, Section}; -use lazy_static::*; +use lazy_static::lazy_static; lazy_static! { pub static ref RUNNING: Arc = Arc::new(AtomicBool::new(true)); diff --git a/src/tasks/execution/mod.rs b/src/tasks/execution/mod.rs index bef7c3a..3cff5a6 100644 --- a/src/tasks/execution/mod.rs +++ b/src/tasks/execution/mod.rs @@ -2,7 +2,7 @@ //! are thins like building the full list of "Task's" to run into an ordered //! vector. -pub mod preparation; +pub(crate) mod preparation; use crate::{ dirs::get_tmp_dir, @@ -17,19 +17,18 @@ use crossbeam_deque::{Stealer, Worker}; use std::{ fs::create_dir_all, sync::{ - atomic::{AtomicBool, AtomicIsize, Ordering}, + atomic::{AtomicBool, AtomicI32, Ordering}, Arc, }, time::{SystemTime, UNIX_EPOCH}, }; -use tracing::debug; +use tracing::{debug, error}; /// Execute a particular "line" of tasks. -#[allow(clippy::too_many_arguments)] async fn execute_task_line( src_string: Arc, stealer: Stealer, - rc: Arc, + rc: Arc, should_stop: Arc, log_channel: Sender<(String, String, bool)>, task_channel: Sender, @@ -68,7 +67,7 @@ async fn execute_task_line( worker_count, task.get_task_name() ))); - let task_rc = task + let task_rc_res = task .get_executor() .execute( log_channel.clone(), @@ -78,7 +77,15 @@ async fn execute_task_line( worker_count, ) .await; - new_rc = task_rc; + match task_rc_res { + Ok(rc) => { + new_rc = rc; + } + Err(error) => { + error!("{:?}", error); + new_rc = 10; + } + } let _ = task_channel.send(TaskChange::FinishedTask(format!( "{}-{}", worker_count, @@ -92,7 +99,7 @@ async fn execute_task_line( worker_count, task.get_task_name() ))); - let task_rc = task + let task_rc_res = task .get_executor() .execute( log_channel.clone(), @@ -102,7 +109,15 @@ async fn execute_task_line( worker_count, ) .await; - new_rc = task_rc; + match task_rc_res { + Ok(rc) => { + new_rc = rc; + } + Err(error) => { + error!("{:?}", error); + new_rc = 10; + } + } let _ = task_channel.send(TaskChange::FinishedTask(format!( "{}-{}", worker_count, @@ -179,7 +194,6 @@ fn build_helpers_source_string(helpers: Vec) -> Result { /// # Errors /// /// If we could not execute the tasks in parallel. -#[allow(clippy::cast_possible_truncation)] pub async fn execute_tasks_in_parallel( helpers: Vec, tasks: Worker, @@ -200,7 +214,7 @@ pub async fn execute_tasks_in_parallel( let cloned_task_sender = task_sender.clone(); let stealer = tasks.stealer(); - let finished_line = Arc::new(AtomicIsize::new(-1)); + let finished_line = Arc::new(AtomicI32::new(-1)); let finished_clone = finished_line.clone(); async_std::task::spawn(async move { @@ -234,7 +248,7 @@ pub async fn execute_tasks_in_parallel( let mut any_more = false; for potential_rc in &rc_indicators { - let new_rc = potential_rc.load(Ordering::Acquire); + let mut new_rc = potential_rc.load(Ordering::Acquire); if new_rc == -1 { any_more = true; break; @@ -242,15 +256,14 @@ pub async fn execute_tasks_in_parallel( debug!("Found finished task rc: [{}]", new_rc); // If it's already not equal to 0 preserve the original exit code. if rc == 0 { - let mut new_rc_as_i32 = new_rc as i32; - if new_rc_as_i32 > 255 { - new_rc_as_i32 = 255; + if new_rc > 255 { + new_rc = 255; } - if new_rc_as_i32 < 0 { - new_rc_as_i32 = 255; + if new_rc < 0 { + new_rc = 255; } - rc += new_rc_as_i32; + rc += new_rc; } } } diff --git a/src/tasks/execution/preparation.rs b/src/tasks/execution/preparation.rs index 01c9510..1b59ebd 100644 --- a/src/tasks/execution/preparation.rs +++ b/src/tasks/execution/preparation.rs @@ -223,20 +223,329 @@ where iter.into_iter().all(move |x| uniq.insert(x)) } +/// adds a command type task to the ordered execution list. +async fn add_command_task_to_execution_list<'a, 'b>( + task: &'a TaskConf, + fetcher: &'a FetcherRepository, + executors: &'a mut ExecutorRepository, + root_directory: PathBuf, + arguments: &'a [String], + pipeline_id: String, + work_queue: &'a mut WorkQueue<'b>, +) -> Result { + match work_queue { + WorkQueue::Queue(queue) => queue.push(WorkUnit::SingleTask( + command_to_executable_task( + pipeline_id, + task, + fetcher, + executors, + root_directory, + Vec::from(arguments), + ) + .await?, + )), + WorkQueue::VecQueue(vec) => vec.push( + command_to_executable_task( + pipeline_id, + task, + fetcher, + executors, + root_directory, + Vec::from(arguments), + ) + .await?, + ), + }; + + Ok(1) +} + +/// adds a oneof type task to the ordered execution list. +#[allow(clippy::too_many_arguments)] +async fn add_oneof_task_to_execution_list<'a, 'b, H: BuildHasher>( + tasks: &'a HashMap, + starting_task: &'a TaskConf, + fetcher: &'a FetcherRepository, + executors: &'a mut ExecutorRepository, + root_directory: PathBuf, + arguments: &'a [String], + pipeline_id: String, + work_queue: &'a mut WorkQueue<'b>, +) -> Result<(bool, usize)> { + // Parse a `oneof` type into a list of tasks. + // This _will_ recurse if an option is selected that is not a command task. + + // First make sure someone has specified an options block for a oneof type. + let options = starting_task.get_options(); + if options.is_none() { + return Err(eyre!( + "Task type is marked oneof but has no options: [{}]", + starting_task.get_name() + )) + .suggestion("If you really meant to have no options specify an empty array: `[]`."); + } + let options = options.unwrap(); + + // If someone specified an empty options array, assume it's intentional. + if options.is_empty() { + return Ok((true, 0)); + } + // If it's not an empty set of options we need to know how to choose one of the tasks. + if arguments.is_empty() { + return Err(eyre!( + "The OneOf task: [{}] was selected, but was provided no arguments to know which option to choose.", + starting_task.get_name(), + )); + } + + // Try to grab the option based on the first argument. + // The other arguments are dropped on purpose. + let potential_option = options + .iter() + .find(|option| option.get_name() == arguments[0]); + if potential_option.is_none() { + return Err(eyre!( + "The OneOf task: [{}] was selected, and attempted to find the option: [{}], but that option was not found.", + starting_task.get_name(), + arguments[0], + )); + } + let selected_option = potential_option.unwrap(); + + // Try to turn that option into a relevant task. + // + // Remember we may have failed fetching from a remote endpoint. + // so it may not be in the TaskGraph. + let potential_option_as_task = tasks.get(selected_option.get_task_name()); + if potential_option_as_task.is_none() { + return Err(eyre!( + "The OneOf task: [{}], selected the option: [{}], but failed to find the task associated to it: [{}]", + starting_task.get_name(), + selected_option.get_name(), + selected_option.get_task_name(), + )).suggestion("Please consult the log above to ensure no fetch errors were enounctered."); + } + let task = potential_option_as_task.unwrap(); + + let final_args = if let Some(args_ref) = selected_option.get_args() { + args_ref.clone() + } else { + Vec::new() + }; + + let mut size = 0; + + // Now let's add this task to the list of things to run. + match *task.get_type() { + TaskType::Command => { + size += add_command_task_to_execution_list( + task, + fetcher, + executors, + root_directory, + &final_args, + pipeline_id, + work_queue, + ) + .await? + } + TaskType::Oneof | TaskType::Pipeline | TaskType::ParallelPipeline => { + size += build_ordered_execution_list( + tasks, + task, + fetcher, + executors, + root_directory, + &final_args, + pipeline_id, + work_queue, + ) + .await? + } + }; + + Ok((false, size)) +} + +/// Add a pipeline type task to the current execution list. +async fn add_pipeline_to_execution_list<'a, 'b, H: BuildHasher>( + tasks: &'a HashMap, + starting_task: &'a TaskConf, + fetcher: &'a FetcherRepository, + executors: &'a mut ExecutorRepository, + root_directory: PathBuf, + work_queue: &'a mut WorkQueue<'b>, +) -> Result { + let mut size = 0; + + let optional_steps = starting_task.get_steps(); + if optional_steps.is_none() { + return Err(eyre!( + "Pipeline task: [{}] does not have any steps.", + starting_task.get_name(), + )) + .suggestion("If you meant to have a pipeline with no steps use an empty array: `[]`."); + } + + let steps = optional_steps.unwrap(); + let my_pid = new_pipeline_id(); + debug!( + "Pipeline task: [{}] has been given the pipeline-id: [{}]", + starting_task.get_name(), + my_pid, + ); + + let mut executable_steps = Vec::new(); + let mut executable_steps_as_queue = WorkQueue::VecQueue(&mut executable_steps); + + for step in steps { + let potential_task = tasks.get(step.get_task_name()); + if potential_task.is_none() { + return Err(eyre!( + "The Pipeline task: [{}], on step: [{}], failed to find the task associated to it: [{}]", + starting_task.get_name(), + step.get_name(), + step.get_task_name() + )) + .suggestion( + "Please consult the log above to ensure no fetch errors were encountered.", + ); + } + let task = potential_task.unwrap(); + + let final_args = if let Some(args_ref) = step.get_args() { + args_ref.clone() + } else { + Vec::new() + }; + + match *task.get_type() { + TaskType::Command => { + add_command_task_to_execution_list( + task, + fetcher, + executors, + root_directory.clone(), + &final_args, + my_pid.clone(), + &mut executable_steps_as_queue, + ) + .await?; + } + TaskType::Oneof | TaskType::Pipeline | TaskType::ParallelPipeline => { + build_ordered_execution_list( + tasks, + task, + fetcher, + executors, + root_directory.clone(), + &final_args, + my_pid.clone(), + &mut executable_steps_as_queue, + ) + .await?; + } + } + } + + size += executable_steps.len(); + match work_queue { + WorkQueue::Queue(queue) => queue.push(WorkUnit::Pipeline(executable_steps)), + WorkQueue::VecQueue(vec) => vec.extend(executable_steps), + } + + Ok(size) +} + +async fn add_parallel_pipeline_to_execution_list<'a, 'b, H: BuildHasher>( + tasks: &'a HashMap, + starting_task: &'a TaskConf, + fetcher: &'a FetcherRepository, + executors: &'a mut ExecutorRepository, + root_directory: PathBuf, + work_queue: &'a mut WorkQueue<'b>, +) -> Result { + let mut size = 0; + + let optional_steps = starting_task.get_steps(); + if optional_steps.is_none() { + return Err(eyre!( + "Parallel-Pipeline task: [{}] does not have any steps.", + starting_task.get_name(), + )) + .suggestion( + "If you meant to have a parallel-pipeline with no steps use an empty array: `[]`.", + ); + } + + let steps = optional_steps.unwrap(); + for step in steps { + let potential_task = tasks.get(step.get_task_name()); + if potential_task.is_none() { + return Err(eyre!( + "The Parallel-Pipeline task: [{}], on step: [{}], failed to find the task associated to it: [{}]", + starting_task.get_name(), + step.get_name(), + step.get_task_name() + )) + .suggestion( + "Please consult the log above to ensure no fetch errors were encountered.", + ); + } + let task = potential_task.unwrap(); + + let final_args = if let Some(args_ref) = step.get_args() { + args_ref.clone() + } else { + Vec::new() + }; + + let task_pid = new_pipeline_id(); + debug!( + "Parallel-Pipeline task: [{}], inner task: [{}] has been given the pipeline-id: [{}]", + starting_task.get_name(), + task.get_name(), + task_pid, + ); + + match *task.get_type() { + TaskType::Command => { + size += add_command_task_to_execution_list( + task, + fetcher, + executors, + root_directory.clone(), + &final_args, + task_pid, + work_queue, + ) + .await? + } + TaskType::Oneof | TaskType::Pipeline | TaskType::ParallelPipeline => { + size += build_ordered_execution_list( + tasks, + task, + fetcher, + executors, + root_directory.clone(), + &final_args, + task_pid, + work_queue, + ) + .await? + } + } + } + + Ok(size) +} + /// Taking the full (valid) map of tasks, and a `starting_task` to start with /// build a full list of the tasks that need to be executed, in which order. -/// -/// `tasks`: the consumed state of a once valid `TaskDag`. -/// `starting_task`: the task to actually start with. -/// `fetcher`: Used to fetch the actual task contents. -/// `executors`: The repository of executors. -/// `root_directory`: The root directory of the filesystem. -/// `arguments`: The arguments provided (from wherever) to know how to -/// properly traverse oneof's. -/// `pipeline_id`: the id of this pipeline. -#[allow(clippy::too_many_lines, clippy::too_many_arguments)] +#[allow(clippy::too_many_arguments)] #[must_use] -pub fn build_ordered_execution_list<'a, H: BuildHasher>( +pub fn build_ordered_execution_list<'a, 'b, H: BuildHasher>( tasks: &'a HashMap, starting_task: &'a TaskConf, fetcher: &'a FetcherRepository, @@ -244,323 +553,63 @@ pub fn build_ordered_execution_list<'a, H: BuildHasher>( root_directory: PathBuf, arguments: &'a [String], pipeline_id: String, - work_queue: &'a mut WorkQueue, + work_queue: &'a mut WorkQueue<'b>, ) -> Pin>>> { Box::pin(async move { let mut size = 0; match *starting_task.get_type() { TaskType::Command => { - match work_queue { - WorkQueue::Queue(queue) => queue.push(WorkUnit::SingleTask( - command_to_executable_task( - pipeline_id, - starting_task, - fetcher, - executors, - root_directory, - Vec::from(arguments), - ) - .await?, - )), - WorkQueue::VecQueue(vec) => vec.push( - command_to_executable_task( - pipeline_id, - starting_task, - fetcher, - executors, - root_directory, - Vec::from(arguments), - ) - .await?, - ), - } - size += 1; + size += add_command_task_to_execution_list( + starting_task, + fetcher, + executors, + root_directory, + arguments, + pipeline_id, + work_queue, + ) + .await? } TaskType::Oneof => { - // Parse a `oneof` type into a list of tasks. - // This _will_ recurse if an option is selected that is not a command task. - - // First make sure someone has specified an options block for a oneof type. - let options = starting_task.get_options(); - if options.is_none() { - return Err(eyre!( - "Task type is marked oneof but has no options: [{}]", - starting_task.get_name() - )) - .suggestion( - "If you really meant to have no options specify an empty array: `[]`.", - ); - } - let options = options.unwrap(); - - // If someone specified an empty options array, assume it's intentional. - if options.is_empty() { + let (exit_early, extra_size) = add_oneof_task_to_execution_list( + tasks, + starting_task, + fetcher, + executors, + root_directory, + arguments, + pipeline_id, + work_queue, + ) + .await?; + + size += extra_size; + if exit_early { return Ok(size); } - // If it's not an empty set of options we need to know how to choose one of the tasks. - if arguments.is_empty() { - return Err(eyre!( - "The OneOf task: [{}] was selected, but was provided no arguments to know which option to choose.", - starting_task.get_name(), - )); - } - - // Try to grab the option based on the first argument. - // The other arguments are dropped on purpose. - let potential_option = options - .iter() - .find(|option| option.get_name() == arguments[0]); - if potential_option.is_none() { - return Err(eyre!( - "The OneOf task: [{}] was selected, and attempted to find the option: [{}], but that option was not found.", - starting_task.get_name(), - arguments[0], - )); - } - let selected_option = potential_option.unwrap(); - - // Try to turn that option into a relevant task. - // - // Remember we may have failed fetching from a remote endpoint. - // so it may not be in the TaskGraph. - let potential_option_as_task = tasks.get(selected_option.get_task_name()); - if potential_option_as_task.is_none() { - return Err(eyre!( - "The OneOf task: [{}], selected the option: [{}], but failed to find the task associated to it: [{}]", - starting_task.get_name(), - selected_option.get_name(), - selected_option.get_task_name(), - )).suggestion("Please consult the log above to ensure no fetch errors were enounctered."); - } - let task = potential_option_as_task.unwrap(); - - let final_args = if let Some(args_ref) = selected_option.get_args() { - args_ref.clone() - } else { - Vec::new() - }; - - // Now let's add this task to the list of things to run. - match *task.get_type() { - TaskType::Command => { - match work_queue { - WorkQueue::Queue(queue) => queue.push(WorkUnit::SingleTask( - command_to_executable_task( - pipeline_id, - task, - fetcher, - executors, - root_directory, - final_args, - ) - .await?, - )), - WorkQueue::VecQueue(vec) => vec.push( - command_to_executable_task( - pipeline_id, - task, - fetcher, - executors, - root_directory, - final_args, - ) - .await?, - ), - } - - size += 1; - } - TaskType::Oneof | TaskType::Pipeline | TaskType::ParallelPipeline => { - size += build_ordered_execution_list( - tasks, - task, - fetcher, - executors, - root_directory, - &final_args, - pipeline_id, - work_queue, - ) - .await?; - } - } } TaskType::Pipeline => { - let optional_steps = starting_task.get_steps(); - if optional_steps.is_none() { - return Err(eyre!( - "Pipeline task: [{}] does not have any steps.", - starting_task.get_name(), - )) - .suggestion( - "If you meant to have a pipeline with no steps use an empty array: `[]`.", - ); - } - - let steps = optional_steps.unwrap(); - let my_pid = new_pipeline_id(); - debug!( - "Pipeline task: [{}] has been given the pipeline-id: [{}]", - starting_task.get_name(), - my_pid, - ); - - let mut executable_steps = Vec::new(); - let mut executable_steps_as_queue = WorkQueue::VecQueue(&mut executable_steps); - - for step in steps { - let potential_task = tasks.get(step.get_task_name()); - if potential_task.is_none() { - return Err(eyre!( - "The Pipeline task: [{}], on step: [{}], failed to find the task associated to it: [{}]", - starting_task.get_name(), - step.get_name(), - step.get_task_name() - )).suggestion("Please consult the log above to ensure no fetch errors were encountered."); - } - let task = potential_task.unwrap(); - - let final_args = if let Some(args_ref) = step.get_args() { - args_ref.clone() - } else { - Vec::new() - }; - - match *task.get_type() { - TaskType::Command => { - match executable_steps_as_queue { - WorkQueue::Queue(_) => unreachable!(), - WorkQueue::VecQueue(ref mut vec) => vec.push( - command_to_executable_task( - my_pid.clone(), - task, - fetcher, - executors, - root_directory.clone(), - final_args, - ) - .await?, - ), - }; - } - TaskType::Oneof | TaskType::Pipeline | TaskType::ParallelPipeline => { - if let Some(args) = step.get_args() { - build_ordered_execution_list( - tasks, - task, - fetcher, - executors, - root_directory.clone(), - &args, - my_pid.clone(), - &mut executable_steps_as_queue, - ) - .await?; - } else { - let args = Vec::new(); - build_ordered_execution_list( - tasks, - task, - fetcher, - executors, - root_directory.clone(), - &args, - my_pid.clone(), - &mut executable_steps_as_queue, - ) - .await?; - } - } - } - } - - size += executable_steps.len(); - match work_queue { - WorkQueue::Queue(queue) => queue.push(WorkUnit::Pipeline(executable_steps)), - WorkQueue::VecQueue(vec) => vec.extend(executable_steps), - } + size += add_pipeline_to_execution_list( + tasks, + starting_task, + fetcher, + executors, + root_directory, + work_queue, + ) + .await?; } TaskType::ParallelPipeline => { - let optional_steps = starting_task.get_steps(); - if optional_steps.is_none() { - return Err(eyre!( - "Parallel-Pipeline task: [{}] does not have any steps.", - starting_task.get_name(), - )).suggestion("If you meant to have a parallel-pipeline with no steps use an empty array: `[]`."); - } - - let steps = optional_steps.unwrap(); - for step in steps { - let potential_task = tasks.get(step.get_task_name()); - if potential_task.is_none() { - return Err(eyre!( - "The Parallel-Pipeline task: [{}], on step: [{}], failed to find the task associated to it: [{}]", - starting_task.get_name(), - step.get_name(), - step.get_task_name() - )).suggestion("Please consult the log above to ensure no fetch errors were encountered."); - } - let task = potential_task.unwrap(); - - let final_args = if let Some(args_ref) = step.get_args() { - args_ref.clone() - } else { - Vec::new() - }; - - let task_pid = new_pipeline_id(); - debug!( - "Parallel-Pipeline task: [{}], inner task: [{}] has been given the pipeline-id: [{}]", - starting_task.get_name(), - task.get_name(), - task_pid, - ); - - match *task.get_type() { - TaskType::Command => { - match work_queue { - WorkQueue::Queue(queue) => queue.push(WorkUnit::SingleTask( - command_to_executable_task( - task_pid, - task, - fetcher, - executors, - root_directory.clone(), - final_args, - ) - .await?, - )), - WorkQueue::VecQueue(vec) => vec.push( - command_to_executable_task( - task_pid, - task, - fetcher, - executors, - root_directory.clone(), - final_args, - ) - .await?, - ), - }; - - size += 1; - } - TaskType::Oneof | TaskType::Pipeline | TaskType::ParallelPipeline => { - size += build_ordered_execution_list( - tasks, - task, - fetcher, - executors, - root_directory.clone(), - &final_args, - task_pid, - work_queue, - ) - .await?; - } - } - } + size += add_parallel_pipeline_to_execution_list( + tasks, + starting_task, + fetcher, + executors, + root_directory, + work_queue, + ) + .await?; } } @@ -603,7 +652,6 @@ pub async fn fetch_helpers(tlc: &TopLevelConf, fr: &FetcherRepository) -> Result /// `fetcher`: used for fetching particular files/executors/etc. /// `executors`: the list of executors. /// `root_directory`: the root directory of the project. -#[allow(clippy::cast_possible_wrap, clippy::type_complexity)] #[must_use] pub fn build_concurrent_execution_list<'a, H: BuildHasher>( tasks: &'a HashMap, @@ -640,6 +688,30 @@ pub fn build_concurrent_execution_list<'a, H: BuildHasher>( &mut as_queue, ) .await?; + } else if *task.get_type() == TaskType::Oneof && task.get_options().is_some() { + for option in task.get_options().unwrap() { + if option.get_tags().is_none() { + continue; + } + + let uniq_tags_on_option: HashSet<&String> = + HashSet::from_iter(option.get_tags().unwrap().iter()); + if !has_unique_elements( + unique_tags.iter().chain(uniq_tags_on_option.iter()), + ) { + size += build_ordered_execution_list( + tasks, + task, + fetcher, + executors, + root_directory.clone(), + &[option.get_name().to_owned()], + new_pipeline_id(), + &mut as_queue, + ) + .await?; + } + } } } } diff --git a/src/tasks/mod.rs b/src/tasks/mod.rs index 44d491d..923c0b4 100644 --- a/src/tasks/mod.rs +++ b/src/tasks/mod.rs @@ -6,7 +6,7 @@ use crate::{ config::types::{LocationType, TaskConf, TaskConfFile, TaskType, TopLevelConf}, fetch::FetcherRepository, strsim::add_did_you_mean_text, - yaml_err::contextualize_yaml_err, + yaml_err::contextualize, }; use color_eyre::{ @@ -16,8 +16,8 @@ use color_eyre::{ use std::collections::{HashMap, HashSet}; use tracing::warn; -pub mod execution; -pub mod fs; +pub(crate) mod execution; +pub(crate) mod fs; /// Describes the full "graph" of tasks. /// @@ -31,6 +31,75 @@ pub struct TaskGraph { } impl TaskGraph { + fn parse_task( + task_conf_file_src: &str, + task_conf: TaskConf, + internal_task_names: &mut HashSet, + unsatisfied_task_names: &mut HashSet, + flatenned_tasks: &mut HashMap, + ) -> Result<()> { + let task_name = task_conf.get_name(); + + // If we've already seen this task... it's an error. + // Task names need to be globally unique. + if let Some(other_task_conf) = flatenned_tasks.get(task_name) { + return Err(eyre!( + "Found duplicate task named: [{}]. Originally defined in config at: [{}], found again in config at: [{}]", + task_name, + other_task_conf.get_source_path(), + task_conf_file_src, + )); + } + + // If it's a 'oneof', 'parallel-pipeline', or 'pipeline' type, we + // need to parse it's children so we can ensure everything is valid. + // + // We call `internal_task_names.remove()` always (it'll be a no-op if it + // doesn't contain the key). The `internal_task_names` are tasks that are marked + // `internal: true`, but don't yet have a reference. By being in a + // oneof/parallel-pipeline/pipeline they themselves have a reference. + // + // Next we check if the option "exists", if not. we add it to `unsatisfied_task_names` + // so it can be checked later. + let ttype = task_conf.get_type(); + match ttype { + TaskType::Oneof => { + if let Some(options) = task_conf.get_options() { + for option in options { + internal_task_names.remove(option.get_task_name()); + if !flatenned_tasks.contains_key(option.get_task_name()) { + unsatisfied_task_names.insert(option.get_task_name().to_owned()); + } + } + } + } + TaskType::Pipeline | TaskType::ParallelPipeline => { + if let Some(steps) = task_conf.get_steps() { + for step in steps { + internal_task_names.remove(step.get_task_name()); + if !flatenned_tasks.contains_key(step.get_task_name()) { + unsatisfied_task_names.insert(step.get_task_name().to_owned()); + } + } + } + } + TaskType::Command => {} + } + + // If we're an internal task, and someone hasn't referenced us already + // go ahead and add ourselves to the list of "waiting for a ref" set. + if task_conf.is_internal() && !unsatisfied_task_names.contains(task_name) { + internal_task_names.insert(task_name.to_owned()); + } + // NO-OP if we're not there, otherwise let people know we exist. + unsatisfied_task_names.remove(task_name); + + // Add ourselves to the final map. + flatenned_tasks.insert(task_name.to_owned(), task_conf); + + Ok(()) + } + /// Create a new `TaskGraph`. /// /// NOTE: this will completely parse all the task files (remote or otherwise), @@ -44,7 +113,6 @@ impl TaskGraph { /// - When there is an error fetching the tasks yaml files. /// - When the task yaml files are invalid yaml. /// - When the task yaml file has some sort of invariant error. - #[allow(clippy::cognitive_complexity, clippy::too_many_lines)] pub async fn new(tlc: &TopLevelConf, fetcher: &FetcherRepository) -> Result { let span = tracing::info_span!("finding_tasks"); let _guard = span.enter(); @@ -75,7 +143,7 @@ impl TaskGraph { .await .wrap_err(format!( "Failed fetching tasks specified at `.dl/config.yml:task_locations:{}`", - tl_idx + tl_idx, )); // For HTTP errors we're going to try to continue, if your FS fails @@ -85,18 +153,17 @@ impl TaskGraph { warn!("{:?}", err); warn!("Trying to continue, incase the failing remote endpoint doesn't matter for this run."); allowing_dag_errors = true; - } else { - warn!("Failed to fetch a file from the filesystem! Assuming this is a critical error."); - return Err(err.wrap_err(format!( - "Failed to read the file: [{}] from the filesystem", - task_location.get_at() - ))); + continue; } - continue; + + warn!("Failed to fetch a file from the filesystem! Assuming this is a critical error."); + return Err(err.wrap_err(format!( + "Failed to read the file: [{}] from the filesystem", + task_location.get_at() + ))); } - let fetched_tasks = resulting_fetched_tasks.unwrap(); - for task_conf_file in fetched_tasks { + for task_conf_file in resulting_fetched_tasks.unwrap() { let task_yaml_res = serde_yaml::from_slice::(&task_conf_file.get_contents()); if let Err(tye) = task_yaml_res { @@ -107,7 +174,7 @@ impl TaskGraph { continue; } - return contextualize_yaml_err( + return contextualize( Err(tye), task_conf_file.get_source(), &String::from_utf8_lossy(task_conf_file.get_contents()).to_string() @@ -122,66 +189,13 @@ impl TaskGraph { // we're reading. So we have to allow for cases where a task _may_ not // be parsed yet. for task_conf in task_yaml.consume_tasks() { - let task_name = task_conf.get_name(); - - // If we've already seen this task... it's an error. - // Task names need to be globally unique. - if let Some(other_task_conf) = flatenned_tasks.get(task_name) { - return Err(eyre!( - "Found duplicate task named: [{}]. Originally defined in config at: [{}], found again in config at: [{}]", - task_name, - other_task_conf.get_source_path(), - task_conf_file.get_source(), - )); - } - - // If it's a 'oneof', 'parallel-pipeline', or 'pipeline' type, we - // need to parse it's children so we can ensure everything is valid. - // - // We call `internal_task_names.remove()` always (it'll be a no-op if it - // doesn't contain the key). The `internal_task_names` are tasks that are marked - // `internal: true`, but don't yet have a reference. By being in a - // oneof/parallel-pipeline/pipeline they themselves have a reference. - // - // Next we check if the option "exists", if not. we add it to `unsatisfied_task_names` - // so it can be checked later. - let ttype = task_conf.get_type(); - match ttype { - TaskType::Oneof => { - if let Some(options) = task_conf.get_options() { - for option in options { - internal_task_names.remove(option.get_task_name()); - if !flatenned_tasks.contains_key(option.get_task_name()) { - unsatisfied_task_names - .insert(option.get_task_name().to_owned()); - } - } - } - } - TaskType::Pipeline | TaskType::ParallelPipeline => { - if let Some(steps) = task_conf.get_steps() { - for step in steps { - internal_task_names.remove(step.get_task_name()); - if !flatenned_tasks.contains_key(step.get_task_name()) { - unsatisfied_task_names - .insert(step.get_task_name().to_owned()); - } - } - } - } - TaskType::Command => {} - } - - // If we're an internal task, and someone hasn't referenced us already - // go ahead and add ourselves to the list of "waiting for a ref" set. - if task_conf.is_internal() && !unsatisfied_task_names.contains(task_name) { - internal_task_names.insert(task_name.to_owned()); - } - // NO-OP if we're not there, otherwise let people know we exist. - unsatisfied_task_names.remove(task_name); - - // Add ourselves to the final map. - flatenned_tasks.insert(task_name.to_owned(), task_conf); + Self::parse_task( + task_conf_file.get_source(), + task_conf, + &mut internal_task_names, + &mut unsatisfied_task_names, + &mut flatenned_tasks, + )?; } } } @@ -233,12 +247,6 @@ impl TaskGraph { } } - /// Get a full list of the tasks that are available. - #[must_use] - pub fn get_all_tasks(&self) -> &HashMap { - &self.flattened_tasks - } - /// Consume the overlying tasks type, and get all the tasks. #[must_use] pub fn consume_and_get_tasks(self) -> HashMap { diff --git a/src/terminal/mod.rs b/src/terminal/mod.rs index 5581a94..d801cb1 100644 --- a/src/terminal/mod.rs +++ b/src/terminal/mod.rs @@ -5,14 +5,14 @@ //! command ever needs to do something fancy. use atty::Stream; -use colored::*; +use colored::Colorize; use crossbeam_channel::Sender; -use lazy_static::*; +use lazy_static::lazy_static; use std::sync::Arc; use term_size::dimensions as terminal_dimensions; -pub mod task_indicator; -pub mod throttle; +pub(crate) mod task_indicator; +pub(crate) mod throttle; lazy_static! { pub static ref TERM: Arc = Arc::new(Term::new()); @@ -142,7 +142,6 @@ impl Term { /// Render a list of items with a particular description. /// /// `list_with_descriptions`: A pair of . - #[allow(unused_assignments)] #[must_use] pub fn render_list_with_description( &self, @@ -166,7 +165,7 @@ impl Term { let mut result = String::new(); for (key, description) in list_with_descriptions { - let mut actual_key = String::new(); + let mut actual_key; if key.len() > 15 { actual_key = key.chars().take(12).collect(); actual_key += "..."; @@ -213,7 +212,6 @@ impl Term { /// 1. The task indicator instance. /// 2. A channel sender to send logs (and the task that created them). /// 3. A channel sender to send task changes too. - #[allow(clippy::type_complexity)] #[must_use] pub fn create_task_indicator( &self, @@ -223,6 +221,10 @@ impl Term { Sender<(String, String, bool)>, Sender, ) { - task_indicator::TaskIndicator::new(task_count, self.is_colour, self.is_colour_err) + task_indicator::TaskIndicator::new( + task_count, + self.should_color_stdout(), + self.should_color_stderr(), + ) } } diff --git a/src/terminal/task_indicator.rs b/src/terminal/task_indicator.rs index 2865274..397bd1f 100644 --- a/src/terminal/task_indicator.rs +++ b/src/terminal/task_indicator.rs @@ -1,6 +1,6 @@ use crate::{log::HAS_OUTPUT_LOG_MSG, terminal::throttle::Throttle}; -use colored::*; +use colored::Colorize; use crossbeam_channel::{unbounded, Receiver, Sender}; use std::collections::{HashMap, HashSet}; use term_size::dimensions as terminal_dimensions; @@ -118,7 +118,6 @@ impl TaskIndicator { /// 1. The task indicator instance. /// 2. A channel sender to send logs (and the task that created them). /// 3. A channel sender to send task changes too. - #[allow(clippy::type_complexity)] #[must_use] pub fn new( task_count: usize, @@ -259,7 +258,6 @@ impl TaskIndicator { } /// Stop this task indicator, and flush all remaining logs. - #[allow(clippy::map_entry)] pub fn stop_and_flush(mut self) { // Ignore task status updates. while let Ok(_) = self.task_changes.try_recv() {} @@ -333,10 +331,11 @@ impl TaskIndicator { fn erase_task_lines(&mut self) { // For each line we previously rendered. if self.lines_previously_rendered == 0 { + HAS_OUTPUT_LOG_MSG.store(false, std::sync::atomic::Ordering::Release); return; } // Don't earse if a log line flew by. - if HAS_OUTPUT_LOG_MSG.swap(false, std::sync::atomic::Ordering::Release) { + if HAS_OUTPUT_LOG_MSG.swap(false, std::sync::atomic::Ordering::AcqRel) { return; } @@ -355,11 +354,11 @@ impl TaskIndicator { fn print_tasks_colour(&mut self) { if self.tasks_running.is_empty() { eprint!( - "[{}/{}] {} Tasks Running...", + "[{}/{}] {} Tasks Running...\n", self.tasks_ran, self.task_count, 0 ); - self.lines_previously_rendered = 1; + self.lines_previously_rendered = 2; } else { let mut task_output_str = String::new(); for running_task in &self.tasks_running { @@ -372,13 +371,13 @@ impl TaskIndicator { } eprint!( - "{} {} Tasks Running...\n{}", + "{} {} Tasks Running...\n{}\n", &format!("[{}/{}]", self.tasks_ran, self.task_count).green(), self.tasks_running.len(), task_output_str, ); - self.lines_previously_rendered = self.tasks_running.len() + 1; + self.lines_previously_rendered = self.tasks_running.len() + 2; } } } diff --git a/src/yaml_err.rs b/src/yaml_err.rs index e811650..af301ef 100644 --- a/src/yaml_err.rs +++ b/src/yaml_err.rs @@ -88,7 +88,7 @@ fn did_you_mean_variant(err_msg: &str) -> Option> { /// # Errors /// /// - If the first parameter error'd. -pub fn contextualize_yaml_err( +pub fn contextualize( result: Result, src_filepath: &str, src_data: &str,