From 9420f519fa9e18ec46e9e5eb79f474fc1586309d Mon Sep 17 00:00:00 2001 From: Cheick Keita Date: Tue, 15 Aug 2023 13:03:20 -0700 Subject: [PATCH 01/20] Deserialize the coverage files directly into the output files (#3410) * deserialize the coverage files directly into the output files * reduce the amount of data structure cloning individual save function for each formats * cleanup * unformatted json * bring back spawn * remove stray comment * build fix * fix build --- src/agent/cobertura/src/lib.rs | 71 ++++++----- src/agent/coverage/examples/cobertura.rs | 2 +- src/agent/coverage/examples/record.rs | 6 +- src/agent/coverage/src/cobertura.rs | 6 +- src/agent/coverage/src/source.rs | 3 +- src/agent/coverage/tests/snapshot.rs | 2 +- .../src/coverage/binary.rs | 4 +- .../src/coverage/binary/v1.rs | 4 +- .../src/coverage/source.rs | 4 +- .../src/coverage/source/v1.rs | 8 +- .../src/tasks/coverage/generic.rs | 119 ++++++++++++------ 11 files changed, 137 insertions(+), 92 deletions(-) diff --git a/src/agent/cobertura/src/lib.rs b/src/agent/cobertura/src/lib.rs index 9becf4a113..e919e08733 100644 --- a/src/agent/cobertura/src/lib.rs +++ b/src/agent/cobertura/src/lib.rs @@ -12,15 +12,20 @@ impl CoberturaCoverage { let mut writer = Writer::new_with_indent(cursor, b' ', 2); - self.write_xml(&mut writer)?; + self._write_xml(&mut writer)?; let text = String::from_utf8(data)?; Ok(text) } } -trait WriteXml { - fn write_xml(&self, writer: &mut Writer) -> Result<()>; +pub trait WriteXml { + fn _write_xml(&self, writer: &mut Writer) -> Result<()>; + + fn write_xml(&self, writer: W) -> Result<()> { + let mut writer = Writer::new(writer); + self._write_xml(&mut writer) + } } // Only write optional fields if present. @@ -28,9 +33,9 @@ impl WriteXml for Option where T: WriteXml, { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { if let Some(value) = self { - value.write_xml(writer)?; + value._write_xml(writer)?; } Ok(()) @@ -41,9 +46,9 @@ impl WriteXml for Vec where T: WriteXml, { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { for value in self { - value.write_xml(writer)?; + value._write_xml(writer)?; } Ok(()) @@ -101,7 +106,7 @@ pub struct CoberturaCoverage { } impl WriteXml for CoberturaCoverage { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { writer .create_element("coverage") .with_attributes([ @@ -116,8 +121,8 @@ impl WriteXml for CoberturaCoverage { ("timestamp", uint!(self.timestamp)), ]) .write_inner_content(|w| { - self.sources.write_xml(w)?; - self.packages.write_xml(w)?; + self.sources._write_xml(w)?; + self.packages._write_xml(w)?; Ok(()) })?; @@ -133,10 +138,10 @@ pub struct Sources { } impl WriteXml for Sources { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { writer .create_element("sources") - .write_inner_content(|w| self.sources.write_xml(w))?; + .write_inner_content(|w| self.sources._write_xml(w))?; Ok(()) } @@ -149,7 +154,7 @@ pub struct Source { } impl WriteXml for Source { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { writer .create_element("source") .with_attributes([("path", string!(self.path))]) @@ -166,10 +171,10 @@ pub struct Packages { } impl WriteXml for Packages { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { writer .create_element("packages") - .write_inner_content(|w| self.packages.write_xml(w))?; + .write_inner_content(|w| self.packages._write_xml(w))?; Ok(()) } @@ -191,7 +196,7 @@ pub struct Package { } impl WriteXml for Package { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { writer .create_element("package") .with_attributes([ @@ -200,7 +205,7 @@ impl WriteXml for Package { ("branch-rate", float!(self.branch_rate)), ("complexity", uint!(self.complexity)), ]) - .write_inner_content(|w| self.classes.write_xml(w))?; + .write_inner_content(|w| self.classes._write_xml(w))?; Ok(()) } @@ -213,10 +218,10 @@ pub struct Classes { } impl WriteXml for Classes { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { writer .create_element("classes") - .write_inner_content(|w| self.classes.write_xml(w))?; + .write_inner_content(|w| self.classes._write_xml(w))?; Ok(()) } @@ -241,7 +246,7 @@ pub struct Class { } impl WriteXml for Class { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { writer .create_element("class") .with_attributes([ @@ -252,8 +257,8 @@ impl WriteXml for Class { ("complexity", uint!(self.complexity)), ]) .write_inner_content(|w| { - self.methods.write_xml(w)?; - self.lines.write_xml(w)?; + self.methods._write_xml(w)?; + self.lines._write_xml(w)?; Ok(()) })?; @@ -268,10 +273,10 @@ pub struct Methods { } impl WriteXml for Methods { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { writer .create_element("methods") - .write_inner_content(|w| self.methods.write_xml(w))?; + .write_inner_content(|w| self.methods._write_xml(w))?; Ok(()) } @@ -293,7 +298,7 @@ pub struct Method { } impl WriteXml for Method { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { writer .create_element("method") .with_attributes([ @@ -302,7 +307,7 @@ impl WriteXml for Method { ("line-rate", float!(self.line_rate)), ("branch-rate", float!(self.branch_rate)), ]) - .write_inner_content(|w| self.lines.write_xml(w))?; + .write_inner_content(|w| self.lines._write_xml(w))?; Ok(()) } @@ -315,10 +320,10 @@ pub struct Lines { } impl WriteXml for Lines { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { writer .create_element("lines") - .write_inner_content(|w| self.lines.write_xml(w))?; + .write_inner_content(|w| self.lines._write_xml(w))?; Ok(()) } @@ -340,7 +345,7 @@ pub struct Line { } impl WriteXml for Line { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { let condition_coverage = if let Some(s) = &self.condition_coverage { s.as_str() } else { @@ -355,7 +360,7 @@ impl WriteXml for Line { ("branch", boolean!(self.branch.unwrap_or_default())), ("condition-coverage", condition_coverage), ]) - .write_inner_content(|w| self.conditions.write_xml(w))?; + .write_inner_content(|w| self.conditions._write_xml(w))?; Ok(()) } @@ -368,10 +373,10 @@ pub struct Conditions { } impl WriteXml for Conditions { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { writer .create_element("conditions") - .write_inner_content(|w| self.conditions.write_xml(w))?; + .write_inner_content(|w| self.conditions._write_xml(w))?; Ok(()) } @@ -389,7 +394,7 @@ pub struct Condition { } impl WriteXml for Condition { - fn write_xml(&self, writer: &mut Writer) -> Result<()> { + fn _write_xml(&self, writer: &mut Writer) -> Result<()> { writer .create_element("condition") .with_attributes([ diff --git a/src/agent/coverage/examples/cobertura.rs b/src/agent/coverage/examples/cobertura.rs index 8d999d6a0f..2ba5ec2f6a 100644 --- a/src/agent/coverage/examples/cobertura.rs +++ b/src/agent/coverage/examples/cobertura.rs @@ -40,7 +40,7 @@ fn generate_output() -> Result { coverage.files.insert(file_path, file); } - CoberturaCoverage::from(coverage).to_string() + CoberturaCoverage::from(&coverage).to_string() } #[cfg(test)] diff --git a/src/agent/coverage/examples/record.rs b/src/agent/coverage/examples/record.rs index 4d2ec46922..e2cc34dcd2 100644 --- a/src/agent/coverage/examples/record.rs +++ b/src/agent/coverage/examples/record.rs @@ -190,7 +190,7 @@ fn dump_modoff(coverage: &BinaryCoverage) -> Result<()> { } fn dump_source_line(binary: &BinaryCoverage, allowlist: AllowList) -> Result<()> { - let source = coverage::source::binary_to_source_coverage(binary, allowlist)?; + let source = coverage::source::binary_to_source_coverage(binary, &allowlist)?; for (path, file) in &source.files { for (line, count) in &file.lines { @@ -202,8 +202,8 @@ fn dump_source_line(binary: &BinaryCoverage, allowlist: AllowList) -> Result<()> } fn dump_cobertura(binary: &BinaryCoverage, allowlist: AllowList) -> Result<()> { - let source = coverage::source::binary_to_source_coverage(binary, allowlist)?; - let cobertura: CoberturaCoverage = source.into(); + let source = coverage::source::binary_to_source_coverage(binary, &allowlist)?; + let cobertura: CoberturaCoverage = (&source).into(); println!("{}", cobertura.to_string()?); diff --git a/src/agent/coverage/src/cobertura.rs b/src/agent/coverage/src/cobertura.rs index 669e787fb2..d9175c5e38 100644 --- a/src/agent/coverage/src/cobertura.rs +++ b/src/agent/coverage/src/cobertura.rs @@ -16,8 +16,8 @@ use crate::source::SourceCoverage; // Dir -> Set type FileMap<'a> = BTreeMap<&'a str, BTreeSet<&'a FilePath>>; -impl From for CoberturaCoverage { - fn from(source: SourceCoverage) -> Self { +impl From<&SourceCoverage> for CoberturaCoverage { + fn from(source: &SourceCoverage) -> Self { // The Cobertura data model is organized around `classes` and `methods` contained // in `packages`. Our source coverage has no language-level assumptions. // @@ -51,7 +51,7 @@ impl From for CoberturaCoverage { // Iterate through the grouped files, accumulating `` elements. let (packages, hit_counts): (Vec, Vec) = file_map .into_iter() - .map(|(directory, files)| directory_to_package(&source, directory, files)) + .map(|(directory, files)| directory_to_package(source, directory, files)) .unzip(); let hit_count: HitCounts = hit_counts.into_iter().sum(); diff --git a/src/agent/coverage/src/source.rs b/src/agent/coverage/src/source.rs index 735e7c6b75..b556fe447a 100644 --- a/src/agent/coverage/src/source.rs +++ b/src/agent/coverage/src/source.rs @@ -51,14 +51,13 @@ impl From for u32 { pub fn binary_to_source_coverage( binary: &BinaryCoverage, - source_allowlist: impl Into>, + source_allowlist: &AllowList, ) -> Result { use std::collections::btree_map::Entry; use symbolic::debuginfo::Object; use symbolic::symcache::{SymCache, SymCacheConverter}; - let source_allowlist = source_allowlist.into().unwrap_or_default(); let loader = Loader::new(); let mut source = SourceCoverage::default(); diff --git a/src/agent/coverage/tests/snapshot.rs b/src/agent/coverage/tests/snapshot.rs index 0b44bf1dd6..75d524e2da 100644 --- a/src/agent/coverage/tests/snapshot.rs +++ b/src/agent/coverage/tests/snapshot.rs @@ -54,7 +54,7 @@ fn windows_snapshot_tests() { // generate source-line coverage info let source = - coverage::source::binary_to_source_coverage(&recorded.coverage, source_allowlist) + coverage::source::binary_to_source_coverage(&recorded.coverage, &source_allowlist) .expect("binary_to_source_coverage"); let file_coverage = source diff --git a/src/agent/onefuzz-file-format/src/coverage/binary.rs b/src/agent/onefuzz-file-format/src/coverage/binary.rs index 5fc6346536..a0fc75cb02 100644 --- a/src/agent/onefuzz-file-format/src/coverage/binary.rs +++ b/src/agent/onefuzz-file-format/src/coverage/binary.rs @@ -32,8 +32,8 @@ impl BinaryCoverageJson { } // Convert into the latest format. -impl From for BinaryCoverageJson { - fn from(source: BinaryCoverage) -> Self { +impl From<&BinaryCoverage> for BinaryCoverageJson { + fn from(source: &BinaryCoverage) -> Self { v1::BinaryCoverageJson::from(source).into() } } diff --git a/src/agent/onefuzz-file-format/src/coverage/binary/v1.rs b/src/agent/onefuzz-file-format/src/coverage/binary/v1.rs index 154a056bcd..4f79780466 100644 --- a/src/agent/onefuzz-file-format/src/coverage/binary/v1.rs +++ b/src/agent/onefuzz-file-format/src/coverage/binary/v1.rs @@ -21,8 +21,8 @@ pub struct ModuleCoverageJson { pub blocks: BTreeMap, } -impl From for BinaryCoverageJson { - fn from(binary: BinaryCoverage) -> Self { +impl From<&BinaryCoverage> for BinaryCoverageJson { + fn from(binary: &BinaryCoverage) -> Self { let mut modules = BTreeMap::new(); for (path, offsets) in &binary.modules { diff --git a/src/agent/onefuzz-file-format/src/coverage/source.rs b/src/agent/onefuzz-file-format/src/coverage/source.rs index 74e7adcb10..779e572492 100644 --- a/src/agent/onefuzz-file-format/src/coverage/source.rs +++ b/src/agent/onefuzz-file-format/src/coverage/source.rs @@ -32,8 +32,8 @@ impl SourceCoverageJson { } // Convert into the latest format. -impl From for SourceCoverageJson { - fn from(source: SourceCoverage) -> Self { +impl From<&SourceCoverage> for SourceCoverageJson { + fn from(source: &SourceCoverage) -> Self { v1::SourceCoverageJson::from(source).into() } } diff --git a/src/agent/onefuzz-file-format/src/coverage/source/v1.rs b/src/agent/onefuzz-file-format/src/coverage/source/v1.rs index 0dc4ca6b1a..9b6e4fa477 100644 --- a/src/agent/onefuzz-file-format/src/coverage/source/v1.rs +++ b/src/agent/onefuzz-file-format/src/coverage/source/v1.rs @@ -23,14 +23,14 @@ pub struct FileCoverageJson { pub lines: BTreeMap, } -impl From for SourceCoverageJson { - fn from(source: SourceCoverage) -> Self { +impl From<&SourceCoverage> for SourceCoverageJson { + fn from(source: &SourceCoverage) -> Self { let mut json = SourceCoverageJson::default(); - for (path, file) in source.files { + for (path, file) in &source.files { let mut file_json = FileCoverageJson::default(); - for (line, count) in file.lines { + for (line, count) in &file.lines { let line_number = LineNumber(line.number()); let hit_count = count.0; file_json.lines.insert(line_number, hit_count); diff --git a/src/agent/onefuzz-task/src/tasks/coverage/generic.rs b/src/agent/onefuzz-task/src/tasks/coverage/generic.rs index 17ece8c017..b112cfefbe 100644 --- a/src/agent/onefuzz-task/src/tasks/coverage/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/coverage/generic.rs @@ -10,7 +10,7 @@ use std::time::Duration; use anyhow::{bail, Context, Result}; use async_trait::async_trait; -use cobertura::CoberturaCoverage; +use cobertura::{CoberturaCoverage, WriteXml}; use coverage::allowlist::AllowList; use coverage::binary::{BinaryCoverage, DebugInfoCache}; use coverage::record::CoverageRecorder; @@ -29,6 +29,7 @@ use onefuzz_file_format::coverage::{ use onefuzz_telemetry::{event, warn, Event::coverage_data, Event::coverage_failed, EventData}; use storage_queue::{Message, QueueClient}; use tokio::fs; +use tokio::sync::RwLock; use tokio::task::spawn_blocking; use tokio_stream::wrappers::ReadDirStream; use url::Url; @@ -214,9 +215,9 @@ struct TargetAllowList { struct TaskContext<'a> { config: &'a Config, - coverage: BinaryCoverage, + coverage: RwLock, module_allowlist: AllowList, - source_allowlist: AllowList, + source_allowlist: Arc, heartbeat: Option, cache: Arc, } @@ -242,9 +243,9 @@ impl<'a> TaskContext<'a> { Ok(Self { config, - coverage, + coverage: RwLock::new(coverage), module_allowlist: allowlist.modules, - source_allowlist: allowlist.source_files, + source_allowlist: Arc::new(allowlist.source_files), heartbeat, cache: Arc::new(cache), }) @@ -287,8 +288,8 @@ impl<'a> TaskContext<'a> { async fn try_record_input(&mut self, input: &Path) -> Result<()> { let coverage = self.record_impl(input).await?; - self.coverage.merge(&coverage); - + let mut self_coverage = RwLock::write(&self.coverage).await; + self_coverage.merge(&coverage); Ok(()) } @@ -450,56 +451,96 @@ impl<'a> TaskContext<'a> { pub async fn report_coverage_stats(&self) -> Result<()> { use EventData::*; - let s = CoverageStats::new(&self.coverage); + let coverage = RwLock::read(&self.coverage).await; + let s = CoverageStats::new(&coverage); event!(coverage_data; Covered = s.covered, Features = s.features, Rate = s.rate); metric!(coverage_data; 1.0; Covered = s.covered, Features = s.features, Rate = s.rate); Ok(()) } - pub async fn save_and_sync_coverage(&self) -> Result<()> { - // JSON binary coverage. - let binary = self.coverage.clone(); - let json = BinaryCoverageJson::V1(BinaryCoverageJsonV1::from(binary)); - let text = serde_json::to_string(&json).context("serializing binary coverage")?; - let path = self.config.coverage.local_path.join(COVERAGE_FILE); - fs::write(&path, &text) - .await - .with_context(|| format!("writing coverage to {}", path.display()))?; - - // JSON source coverage. - let source = self.source_coverage().await?; - let json = SourceCoverageJson::V1(SourceCoverageJsonV1::from(source.clone())); - let text = serde_json::to_string(&json).context("serializing source coverage")?; - let path = self.config.coverage.local_path.join(SOURCE_COVERAGE_FILE); - fs::write(&path, &text) - .await - .with_context(|| format!("writing source coverage to {}", path.display()))?; + pub async fn save_coverage( + coverage: &RwLock, + source_allowlist: &Arc, + binary_coverage_path: &Path, + source_coverage_path: &Path, + copbertura_file_path: &Path, + ) -> Result<()> { + let source = Self::source_coverage(coverage, source_allowlist.clone()).await?; + let coverage = coverage.read().await; + + Self::save_binary_coverage(&coverage, binary_coverage_path)?; + Self::save_source_coverage(&source, source_coverage_path).await?; + Self::save_cobertura_xml(&source, copbertura_file_path).await?; + Ok(()) + } + + async fn source_coverage( + coverage: &RwLock, + source_allowlist: Arc, + ) -> Result { + // Must be owned due to `spawn_blocking()` lifetimes. + let allowlist = source_allowlist.clone(); + let binary = Arc::new(coverage.read().await.clone()); + // Conversion to source coverage heavy on blocking I/O. + spawn_blocking(move || binary_to_source_coverage(&binary, &allowlist)).await? + } - // Cobertura XML source coverage. - let cobertura = CoberturaCoverage::from(source.clone()); - let text = cobertura.to_string()?; - let path = self + pub async fn save_and_sync_coverage(&self) -> Result<()> { + let copbertura_file_path = self .config .coverage .local_path .join(COBERTURA_COVERAGE_FILE); - fs::write(&path, &text) - .await - .with_context(|| format!("writing cobertura source coverage to {}", path.display()))?; + let source_coverage_path = self.config.coverage.local_path.join(SOURCE_COVERAGE_FILE); + let binary_coverage_path = self.config.coverage.local_path.join(COVERAGE_FILE); + + Self::save_coverage( + &self.coverage, + &self.source_allowlist, + &binary_coverage_path, + &source_coverage_path, + &copbertura_file_path, + ) + .await?; self.config.coverage.sync_push().await?; + Ok(()) + } + async fn save_cobertura_xml(source: &SourceCoverage, path: &Path) -> Result<(), anyhow::Error> { + let cobertura = CoberturaCoverage::from(source); + let cobertura_coverage_file = std::fs::File::create(path) + .with_context(|| format!("creating cobertura coverage file {}", path.display()))?; + let cobertura_coverage_file_writer = std::io::BufWriter::new(cobertura_coverage_file); + cobertura + .write_xml(cobertura_coverage_file_writer) + .with_context(|| format!("serializing cobertura coverage to {}", path.display()))?; Ok(()) } - async fn source_coverage(&self) -> Result { - // Must be owned due to `spawn_blocking()` lifetimes. - let allowlist = self.source_allowlist.clone(); - let binary = self.coverage.clone(); + async fn save_source_coverage(source: &SourceCoverage, path: &Path) -> Result<()> { + let json = SourceCoverageJson::V1(SourceCoverageJsonV1::from(source)); + let source_coverage_file = std::fs::File::create(path) + .with_context(|| format!("creating source coverage file {}", path.display()))?; + let source_coverage_file_writer = std::io::BufWriter::new(source_coverage_file); + serde_json::to_writer_pretty(source_coverage_file_writer, &json) + .with_context(|| format!("serializing source coverage to {}", path.display()))?; + Ok(()) + } - // Conversion to source coverage heavy on blocking I/O. - spawn_blocking(move || binary_to_source_coverage(&binary, allowlist)).await? + fn save_binary_coverage( + binary_coverage: &BinaryCoverage, + path: &Path, + ) -> Result<(), anyhow::Error> { + let json = BinaryCoverageJson::V1(BinaryCoverageJsonV1::from(binary_coverage)); + + let coverage_file = std::fs::File::create(path) + .with_context(|| format!("creating coverage file {}", path.display()))?; + let coverage_file_writer = std::io::BufWriter::new(coverage_file); + serde_json::to_writer(coverage_file_writer, &json) + .with_context(|| format!("serializing binary coverage to {}", path.display()))?; + Ok(()) } } From b45446ac52f7538fb5753c17ff3d4454fa15d4d3 Mon Sep 17 00:00:00 2001 From: George Pollard Date: Thu, 17 Aug 2023 16:33:20 +1200 Subject: [PATCH 02/20] Make crashdumps optional in config (#3409) --- src/ApiService/ApiService/onefuzzlib/Defs.cs | 4 +- .../onefuzz-task/src/local/libfuzzer_fuzz.rs | 2 +- src/agent/onefuzz-task/src/local/template.rs | 4 +- .../src/tasks/fuzz/libfuzzer/common.rs | 120 ++++++++++-------- .../onefuzz-task/src/tasks/fuzz/supervisor.rs | 41 ++++-- 5 files changed, 101 insertions(+), 70 deletions(-) diff --git a/src/ApiService/ApiService/onefuzzlib/Defs.cs b/src/ApiService/ApiService/onefuzzlib/Defs.cs index 9a2337f1b7..1a6e5797e5 100644 --- a/src/ApiService/ApiService/onefuzzlib/Defs.cs +++ b/src/ApiService/ApiService/onefuzzlib/Defs.cs @@ -192,7 +192,7 @@ public static class Defs { ), new ContainerDefinition( Type:ContainerType.Crashdumps, - Compare: Compare.Equal, + Compare: Compare.AtMost, Value:1, Permissions: ContainerPermission.Write ), @@ -287,7 +287,7 @@ public static class Defs { ), new ContainerDefinition( Type:ContainerType.Crashdumps, - Compare: Compare.Equal, + Compare: Compare.AtMost, Value:1, Permissions: ContainerPermission.Write ), diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_fuzz.rs b/src/agent/onefuzz-task/src/local/libfuzzer_fuzz.rs index 80a083009b..a3cf9d3258 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_fuzz.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_fuzz.rs @@ -51,7 +51,7 @@ pub fn build_fuzz_config( inputs, readonly_inputs, crashes, - crashdumps, + crashdumps: Some(crashdumps), target_exe, target_env, target_options, diff --git a/src/agent/onefuzz-task/src/local/template.rs b/src/agent/onefuzz-task/src/local/template.rs index 978dba74fa..ce0908fd58 100644 --- a/src/agent/onefuzz-task/src/local/template.rs +++ b/src/agent/onefuzz-task/src/local/template.rs @@ -144,7 +144,9 @@ impl TaskConfig { inputs: context.to_monitored_sync_dir("inputs", &config.inputs)?, readonly_inputs: Some(ri?), crashes: context.to_monitored_sync_dir("crashes", &config.crashes)?, - crashdumps: context.to_monitored_sync_dir("crashdumps", &config.crashdumps)?, + crashdumps: Some( + context.to_monitored_sync_dir("crashdumps", &config.crashdumps)?, + ), target_exe: config.target_exe.clone(), target_env: config.target_env.clone(), target_options: config.target_options.clone(), diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs b/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs index ca51f5f5d7..4f8c67ae8e 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs @@ -74,7 +74,7 @@ pub struct Config { pub inputs: SyncedDir, pub readonly_inputs: Option>, pub crashes: SyncedDir, - pub crashdumps: SyncedDir, + pub crashdumps: Option, pub target_exe: PathBuf, pub target_env: HashMap, pub target_options: Vec, @@ -131,7 +131,13 @@ where let resync = self.continuous_sync_inputs(); let new_inputs = self.config.inputs.monitor_results(new_coverage, true); let new_crashes = self.config.crashes.monitor_results(new_result, true); - let new_crashdumps = self.config.crashdumps.monitor_results(new_crashdump, true); + let new_crashdumps = async { + if let Some(crashdumps) = &self.config.crashdumps { + crashdumps.monitor_results(new_crashdump, true).await + } else { + Ok(()) + } + }; let (stats_sender, stats_receiver) = mpsc::unbounded_channel(); let report_stats = report_runtime_stats(stats_receiver, hb_client); @@ -350,64 +356,66 @@ where } } - // check for core dumps on Linux: - // note that collecting the dumps must be enabled by the template - #[cfg(target_os = "linux")] - if let Some(pid) = pid { - // expect crash dump to exist in CWD - let filename = format!("core.{pid}"); - let dest_filename = dump_file_name.as_deref().unwrap_or(OsStr::new(&filename)); - let dest_path = self.config.crashdumps.local_path.join(dest_filename); - match tokio::fs::rename(&filename, &dest_path).await { - Ok(()) => { - info!( - "moved crash dump {} to output directory: {}", - filename, - dest_path.display() - ); - } - Err(e) => { - if e.kind() == std::io::ErrorKind::NotFound { - // okay, no crash dump found - info!("no crash dump found with name: {}", filename); - } else { - return Err(e).context("moving crash dump to output directory"); + if let Some(crashdumps) = &self.config.crashdumps { + // check for core dumps on Linux: + // note that collecting the dumps must be enabled by the template + #[cfg(target_os = "linux")] + if let Some(pid) = pid { + // expect crash dump to exist in CWD + let filename = format!("core.{pid}"); + let dest_filename = dump_file_name.as_deref().unwrap_or(OsStr::new(&filename)); + let dest_path = crashdumps.local_path.join(dest_filename); + match tokio::fs::rename(&filename, &dest_path).await { + Ok(()) => { + info!( + "moved crash dump {} to output directory: {}", + filename, + dest_path.display() + ); + } + Err(e) => { + if e.kind() == std::io::ErrorKind::NotFound { + // okay, no crash dump found + info!("no crash dump found with name: {}", filename); + } else { + return Err(e).context("moving crash dump to output directory"); + } } } + } else { + warn!("no PID found for libfuzzer process"); } - } else { - warn!("no PID found for libfuzzer process"); - } - // check for crash dumps on Windows: - #[cfg(target_os = "windows")] - { - let dumpfile_extension = Some(std::ffi::OsStr::new("dmp")); - - let mut working_dir = tokio::fs::read_dir(".").await?; - let mut found_dump = false; - while let Some(next) = working_dir.next_entry().await? { - if next.path().extension() == dumpfile_extension { - // Windows dumps get a fixed filename so we will generate a random one, - // if there's no valid target crash name: - let dest_filename = - dump_file_name.unwrap_or_else(|| uuid::Uuid::new_v4().to_string().into()); - let dest_path = self.config.crashdumps.local_path.join(&dest_filename); - tokio::fs::rename(next.path(), &dest_path) - .await - .context("moving crash dump to output directory")?; - info!( - "moved crash dump {} to output directory: {}", - next.path().display(), - dest_path.display() - ); - found_dump = true; - break; + // check for crash dumps on Windows: + #[cfg(target_os = "windows")] + { + let dumpfile_extension = Some(std::ffi::OsStr::new("dmp")); + + let mut working_dir = tokio::fs::read_dir(".").await?; + let mut found_dump = false; + while let Some(next) = working_dir.next_entry().await? { + if next.path().extension() == dumpfile_extension { + // Windows dumps get a fixed filename so we will generate a random one, + // if there's no valid target crash name: + let dest_filename = dump_file_name + .unwrap_or_else(|| uuid::Uuid::new_v4().to_string().into()); + let dest_path = crashdumps.local_path.join(&dest_filename); + tokio::fs::rename(next.path(), &dest_path) + .await + .context("moving crash dump to output directory")?; + info!( + "moved crash dump {} to output directory: {}", + next.path().display(), + dest_path.display() + ); + found_dump = true; + break; + } } - } - if !found_dump { - info!("no crash dump found with extension .dmp"); + if !found_dump { + info!("no crash dump found with extension .dmp"); + } } } @@ -425,7 +433,9 @@ where // output directories (init): self.config.crashes.init().await?; - self.config.crashdumps.init().await?; + if let Some(crashdumps) = &self.config.crashdumps { + crashdumps.init().await?; + } Ok(()) } diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs b/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs index f1a750ccf7..de1e1106ba 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs @@ -41,7 +41,7 @@ use futures::TryFutureExt; pub struct SupervisorConfig { pub inputs: SyncedDir, pub crashes: SyncedDir, - pub crashdumps: SyncedDir, + pub crashdumps: Option, pub supervisor_exe: String, pub supervisor_env: HashMap, pub supervisor_options: Vec, @@ -82,12 +82,29 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { let monitor_crashes = crashes.monitor_results(new_result, false); // setup crashdumps - let crashdumps = SyncedDir { - local_path: runtime_dir.path().join("crashdumps"), - remote_path: config.crashdumps.remote_path.clone(), + let (crashdump_dir, monitor_crashdumps) = { + let crashdump_dir = if let Some(crashdumps) = &config.crashdumps { + let dir = SyncedDir { + local_path: runtime_dir.path().join("crashdumps"), + remote_path: crashdumps.remote_path.clone(), + }; + dir.init().await?; + Some(dir) + } else { + None + }; + + let monitor_dir = crashdump_dir.clone(); + let monitor_crashdumps = async move { + if let Some(crashdumps) = monitor_dir { + crashdumps.monitor_results(new_crashdump, false).await + } else { + Ok(()) + } + }; + + (crashdump_dir, monitor_crashdumps) }; - crashdumps.init().await?; - let monitor_crashdumps = crashdumps.monitor_results(new_crashdump, false); // setup coverage if let Some(coverage) = &config.coverage { @@ -148,7 +165,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { &runtime_dir.path(), &config, &crashes, - &crashdumps, + crashdump_dir.as_ref(), &inputs, reports_dir.path().to_path_buf(), ) @@ -217,7 +234,7 @@ async fn start_supervisor( runtime_dir: impl AsRef, config: &SupervisorConfig, crashes: &SyncedDir, - crashdumps: &SyncedDir, + crashdumps: Option<&SyncedDir>, inputs: &SyncedDir, reports_dir: PathBuf, ) -> Result { @@ -233,7 +250,9 @@ async fn start_supervisor( .supervisor_options(&config.supervisor_options) .runtime_dir(&runtime_dir) .crashes(&crashes.local_path) - .crashdumps(&crashdumps.local_path) + .set_optional_ref(&crashdumps, |expand, crashdumps| { + expand.crashdumps(&crashdumps.local_path) + }) .input_corpus(&inputs.local_path) .reports_dir(reports_dir) .setup_dir(&config.common.setup_dir) @@ -410,7 +429,7 @@ mod tests { target_options, inputs: corpus_dir.clone(), crashes: crashes.clone(), - crashdumps: crashdumps.clone(), + crashdumps: Some(crashdumps.clone()), tools: None, wait_for_files: None, stats_file: None, @@ -447,7 +466,7 @@ mod tests { runtime_dir, &config, &crashes, - &crashdumps, + Some(&crashdumps), &corpus_dir, reports_dir, ) From 98e986e3e72af2227f153e3d447a5e9b1aac8c6d Mon Sep 17 00:00:00 2001 From: Teo Voinea <58236992+tevoinea@users.noreply.github.com> Date: Thu, 17 Aug 2023 09:36:38 -0400 Subject: [PATCH 03/20] Tevoinea/migrate task types to template (#3397) * Add analysis task * generic report * generator task * Libfuzzer crash report * Add libfuzzer merge * libfuzzer regression * Add libfuzzer test input * Cleanup * Update schema * Some light docs and example templates * Add testinput task * Reorganize the code * Cleanup * Improve test, update model --- src/agent/onefuzz-task/src/local/coverage.rs | 67 ++- .../libfuzzer_basic.yml | 9 +- .../src/local/example_templates/radamsa.yml | 21 + .../src/local/generic_analysis.rs | 76 ++- .../src/local/generic_crash_report.rs | 96 +++- .../src/local/generic_generator.rs | 80 ++- src/agent/onefuzz-task/src/local/libfuzzer.rs | 79 ++- .../src/local/libfuzzer_crash_report.rs | 92 ++- .../onefuzz-task/src/local/libfuzzer_merge.rs | 68 ++- .../src/local/libfuzzer_regression.rs | 91 ++- .../src/local/libfuzzer_test_input.rs | 57 +- src/agent/onefuzz-task/src/local/readme.md | 6 + src/agent/onefuzz-task/src/local/schema.json | 540 +++++++++++++++++- src/agent/onefuzz-task/src/local/template.rs | 295 +++------- .../onefuzz-task/src/local/test_input.rs | 62 +- src/agent/onefuzz/src/input_tester.rs | 4 +- 16 files changed, 1398 insertions(+), 245 deletions(-) rename src/agent/onefuzz-task/src/local/{ => example_templates}/libfuzzer_basic.yml (88%) create mode 100644 src/agent/onefuzz-task/src/local/example_templates/radamsa.yml create mode 100644 src/agent/onefuzz-task/src/local/readme.md diff --git a/src/agent/onefuzz-task/src/local/coverage.rs b/src/agent/onefuzz-task/src/local/coverage.rs index a5d53a09f0..d091b70695 100644 --- a/src/agent/onefuzz-task/src/local/coverage.rs +++ b/src/agent/onefuzz-task/src/local/coverage.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use std::path::PathBuf; +use std::{collections::HashMap, path::PathBuf}; use crate::{ local::common::{ @@ -15,11 +15,17 @@ use crate::{ }, }; use anyhow::Result; +use async_trait::async_trait; use clap::{Arg, ArgAction, Command}; use flume::Sender; +use onefuzz::syncdir::SyncedDir; +use schemars::JsonSchema; use storage_queue::QueueClient; -use super::common::{SyncCountDirMonitor, UiEvent}; +use super::{ + common::{SyncCountDirMonitor, UiEvent}, + template::{RunContext, Template}, +}; pub fn build_coverage_config( args: &clap::ArgMatches, @@ -127,3 +133,60 @@ pub fn args(name: &'static str) -> Command { .about("execute a local-only coverage task") .args(&build_shared_args(false)) } + +#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] +pub struct Coverage { + target_exe: PathBuf, + target_env: HashMap, + target_options: Vec, + target_timeout: Option, + module_allowlist: Option, + source_allowlist: Option, + input_queue: Option, + readonly_inputs: Vec, + coverage: PathBuf, +} + +#[async_trait] +impl Template for Coverage { + async fn run(&self, context: &RunContext) -> Result<()> { + let ri: Result> = self + .readonly_inputs + .iter() + .enumerate() + .map(|(index, input)| context.to_sync_dir(format!("readonly_inputs_{index}"), input)) + .collect(); + + let input_q = if let Some(w) = &self.input_queue { + Some(context.monitor_dir(w).await?) + } else { + None + }; + + let coverage_config = crate::tasks::coverage::generic::Config { + target_exe: self.target_exe.clone(), + target_env: self.target_env.clone(), + target_options: self.target_options.clone(), + target_timeout: None, + readonly_inputs: ri?, + input_queue: input_q, + common: CommonConfig { + task_id: uuid::Uuid::new_v4(), + ..context.common.clone() + }, + coverage_filter: None, + coverage: context.to_monitored_sync_dir("coverage", self.coverage.clone())?, + module_allowlist: self.module_allowlist.clone(), + source_allowlist: self.source_allowlist.clone(), + }; + + context + .spawn(async move { + let mut coverage = + crate::tasks::coverage::generic::CoverageTask::new(coverage_config); + coverage.run().await + }) + .await; + Ok(()) + } +} diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_basic.yml b/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml similarity index 88% rename from src/agent/onefuzz-task/src/local/libfuzzer_basic.yml rename to src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml index da7100f89d..7210893809 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_basic.yml +++ b/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml @@ -1,4 +1,4 @@ -# yaml-language-server: $schema=schema.json +# yaml-language-server: $schema=../schema.json # What I had to do to get this working: # 1. Update target_exe to point to the target exe @@ -26,7 +26,7 @@ tasks: crashes: *crash reports: "./reports" unique_reports: "./unique_reports" - no_repro: "./noe_repro" + no_repro: "./no_repro" check_fuzzer_help: true - type: "Coverage" @@ -36,8 +36,3 @@ tasks: input_queue: *inputs readonly_inputs: [*inputs] coverage: "./coverage" - - # - type: Analysis - # <<: *target_args - - diff --git a/src/agent/onefuzz-task/src/local/example_templates/radamsa.yml b/src/agent/onefuzz-task/src/local/example_templates/radamsa.yml new file mode 100644 index 0000000000..fc73c48e9d --- /dev/null +++ b/src/agent/onefuzz-task/src/local/example_templates/radamsa.yml @@ -0,0 +1,21 @@ +# yaml-language-server: $schema=../schema.json + +# This template file demonstrates how to configure a radamsa task + +target_args: &target_args + target_env: {} + target_exe: "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\fuzz.exe" + target_options: [] + +tasks: + - type: Generator + <<: *target_args + crashes: "./crashes" + generator_env: {} + generator_exe: "./path/to/generator" + generator_options: [] + readonly_inputs: ["./path/to/readonly-inputs"] + rename_output: true + + - type: Report + <<: *target_args diff --git a/src/agent/onefuzz-task/src/local/generic_analysis.rs b/src/agent/onefuzz-task/src/local/generic_analysis.rs index fb202a9432..3d3e2fafc8 100644 --- a/src/agent/onefuzz-task/src/local/generic_analysis.rs +++ b/src/agent/onefuzz-task/src/local/generic_analysis.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use std::path::PathBuf; +use std::{collections::HashMap, path::PathBuf}; use crate::{ local::common::{ @@ -16,10 +16,14 @@ use crate::{ }, }; use anyhow::Result; +use async_trait::async_trait; use clap::{Arg, Command}; use flume::Sender; +use schemars::JsonSchema; use storage_queue::QueueClient; +use super::template::{RunContext, Template}; + pub fn build_analysis_config( args: &clap::ArgMatches, input_queue: Option, @@ -131,3 +135,73 @@ pub fn args(name: &'static str) -> Command { .about("execute a local-only generic analysis") .args(&build_shared_args(true)) } + +#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] +pub struct Analysis { + analyzer_exe: String, + analyzer_options: Vec, + analyzer_env: HashMap, + target_exe: PathBuf, + target_options: Vec, + input_queue: Option, + crashes: Option, + analysis: PathBuf, + tools: PathBuf, + reports: Option, + unique_reports: Option, + no_repro: Option, +} + +#[async_trait] +impl Template for Analysis { + async fn run(&self, context: &RunContext) -> Result<()> { + let input_q = if let Some(w) = &self.input_queue { + Some(context.monitor_dir(w).await?) + } else { + None + }; + + let analysis_config = crate::tasks::analysis::generic::Config { + analyzer_exe: self.analyzer_exe.clone(), + analyzer_options: self.analyzer_options.clone(), + analyzer_env: self.analyzer_env.clone(), + + target_exe: self.target_exe.clone(), + target_options: self.target_options.clone(), + input_queue: input_q, + crashes: self + .crashes + .as_ref() + .and_then(|path| context.to_monitored_sync_dir("crashes", path).ok()), + + analysis: context.to_monitored_sync_dir("analysis", self.analysis.clone())?, + tools: context + .to_monitored_sync_dir("tools", self.tools.clone()) + .ok(), + + reports: self + .reports + .as_ref() + .and_then(|path| context.to_monitored_sync_dir("reports", path).ok()), + unique_reports: self + .unique_reports + .as_ref() + .and_then(|path| context.to_monitored_sync_dir("unique_reports", path).ok()), + no_repro: self + .no_repro + .as_ref() + .and_then(|path| context.to_monitored_sync_dir("no_repro", path).ok()), + + common: CommonConfig { + task_id: uuid::Uuid::new_v4(), + ..context.common.clone() + }, + }; + + context + .spawn(async move { crate::tasks::analysis::generic::run(analysis_config).await }) + .await; + + Ok(()) + } +} diff --git a/src/agent/onefuzz-task/src/local/generic_crash_report.rs b/src/agent/onefuzz-task/src/local/generic_crash_report.rs index aebd45aa08..6b0e2fccad 100644 --- a/src/agent/onefuzz-task/src/local/generic_crash_report.rs +++ b/src/agent/onefuzz-task/src/local/generic_crash_report.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use std::path::PathBuf; +use std::{collections::HashMap, path::PathBuf}; use crate::{ local::common::{ @@ -13,13 +13,19 @@ use crate::{ tasks::{ config::CommonConfig, report::generic::{Config, ReportTask}, + utils::default_bool_true, }, }; use anyhow::Result; +use async_trait::async_trait; use clap::{Arg, ArgAction, Command}; use flume::Sender; +use futures::future::OptionFuture; +use schemars::JsonSchema; use storage_queue::QueueClient; +use super::template::{RunContext, Template}; + pub fn build_report_config( args: &clap::ArgMatches, input_queue: Option, @@ -140,3 +146,91 @@ pub fn args(name: &'static str) -> Command { .about("execute a local-only generic crash report") .args(&build_shared_args()) } + +#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] +pub struct CrashReport { + target_exe: PathBuf, + target_options: Vec, + target_env: HashMap, + + input_queue: Option, + crashes: Option, + reports: Option, + unique_reports: Option, + no_repro: Option, + + target_timeout: Option, + + #[serde(default)] + check_asan_log: bool, + #[serde(default = "default_bool_true")] + check_debugger: bool, + #[serde(default)] + check_retry_count: u64, + + #[serde(default = "default_bool_true")] + check_queue: bool, + + #[serde(default)] + minimized_stack_depth: Option, +} +#[async_trait] +impl Template for CrashReport { + async fn run(&self, context: &RunContext) -> Result<()> { + let input_q_fut: OptionFuture<_> = self + .input_queue + .iter() + .map(|w| context.monitor_dir(w)) + .next() + .into(); + let input_q = input_q_fut.await.transpose()?; + + let crash_report_config = crate::tasks::report::generic::Config { + target_exe: self.target_exe.clone(), + target_env: self.target_env.clone(), + target_options: self.target_options.clone(), + target_timeout: self.target_timeout, + + input_queue: input_q, + crashes: self + .crashes + .clone() + .map(|c| context.to_monitored_sync_dir("crashes", c)) + .transpose()?, + reports: self + .reports + .clone() + .map(|c| context.to_monitored_sync_dir("reports", c)) + .transpose()?, + unique_reports: self + .unique_reports + .clone() + .map(|c| context.to_monitored_sync_dir("unique_reports", c)) + .transpose()?, + no_repro: self + .no_repro + .clone() + .map(|c| context.to_monitored_sync_dir("no_repro", c)) + .transpose()?, + + check_asan_log: self.check_asan_log, + check_debugger: self.check_debugger, + check_retry_count: self.check_retry_count, + check_queue: self.check_queue, + minimized_stack_depth: self.minimized_stack_depth, + common: CommonConfig { + task_id: uuid::Uuid::new_v4(), + ..context.common.clone() + }, + }; + + context + .spawn(async move { + let mut report = + crate::tasks::report::generic::ReportTask::new(crash_report_config); + report.managed_run().await + }) + .await; + Ok(()) + } +} diff --git a/src/agent/onefuzz-task/src/local/generic_generator.rs b/src/agent/onefuzz-task/src/local/generic_generator.rs index a6b0777348..823ba221d6 100644 --- a/src/agent/onefuzz-task/src/local/generic_generator.rs +++ b/src/agent/onefuzz-task/src/local/generic_generator.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use std::path::PathBuf; +use std::{collections::HashMap, path::PathBuf}; use crate::{ local::common::{ @@ -14,11 +14,17 @@ use crate::{ tasks::{ config::CommonConfig, fuzz::generator::{Config, GeneratorTask}, + utils::default_bool_true, }, }; use anyhow::Result; +use async_trait::async_trait; use clap::{Arg, ArgAction, Command}; use flume::Sender; +use onefuzz::syncdir::SyncedDir; +use schemars::JsonSchema; + +use super::template::{RunContext, Template}; pub fn build_fuzz_config( args: &clap::ArgMatches, @@ -144,3 +150,75 @@ pub fn args(name: &'static str) -> Command { .about("execute a local-only generator fuzzing task") .args(&build_shared_args()) } + +#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] +pub struct Generator { + generator_exe: String, + generator_env: HashMap, + generator_options: Vec, + readonly_inputs: Vec, + crashes: PathBuf, + tools: Option, + + target_exe: PathBuf, + target_env: HashMap, + target_options: Vec, + target_timeout: Option, + #[serde(default)] + check_asan_log: bool, + #[serde(default = "default_bool_true")] + check_debugger: bool, + #[serde(default)] + check_retry_count: u64, + rename_output: bool, + ensemble_sync_delay: Option, +} + +#[async_trait] +impl Template for Generator { + async fn run(&self, context: &RunContext) -> Result<()> { + let generator_config = crate::tasks::fuzz::generator::Config { + generator_exe: self.generator_exe.clone(), + generator_env: self.generator_env.clone(), + generator_options: self.generator_options.clone(), + + readonly_inputs: self + .readonly_inputs + .iter() + .enumerate() + .map(|(index, roi_pb)| { + context.to_monitored_sync_dir(format!("read_only_inputs_{index}"), roi_pb) + }) + .collect::>>()?, + crashes: context.to_monitored_sync_dir("crashes", self.crashes.clone())?, + tools: self + .tools + .as_ref() + .and_then(|path_buf| context.to_monitored_sync_dir("tools", path_buf).ok()), + + target_exe: self.target_exe.clone(), + target_env: self.target_env.clone(), + target_options: self.target_options.clone(), + target_timeout: self.target_timeout, + + check_asan_log: self.check_asan_log, + check_debugger: self.check_debugger, + check_retry_count: self.check_retry_count, + + rename_output: self.rename_output, + ensemble_sync_delay: self.ensemble_sync_delay, + common: CommonConfig { + task_id: uuid::Uuid::new_v4(), + ..context.common.clone() + }, + }; + + context + .spawn(async move { + let generator = crate::tasks::fuzz::generator::GeneratorTask::new(generator_config); + generator.run().await + }) + .await; + Ok(()) + } +} diff --git a/src/agent/onefuzz-task/src/local/libfuzzer.rs b/src/agent/onefuzz-task/src/local/libfuzzer.rs index f3dc1778e0..56dff7dbe3 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer.rs @@ -20,19 +20,29 @@ use crate::{ }, }, tasks::{ - analysis::generic::run as run_analysis, config::CommonConfig, - fuzz::libfuzzer::generic::LibFuzzerFuzzTask, - regression::libfuzzer::LibFuzzerRegressionTask, report::libfuzzer_report::ReportTask, + analysis::generic::run as run_analysis, + config::CommonConfig, + fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask}, + regression::libfuzzer::LibFuzzerRegressionTask, + report::libfuzzer_report::ReportTask, + utils::default_bool_true, }, }; use anyhow::Result; +use async_trait::async_trait; use clap::Command; use flume::Sender; -use onefuzz::utils::try_wait_all_join_handles; -use std::collections::HashSet; +use onefuzz::{syncdir::SyncedDir, utils::try_wait_all_join_handles}; +use schemars::JsonSchema; +use std::{ + collections::{HashMap, HashSet}, + path::PathBuf, +}; use tokio::task::spawn; use uuid::Uuid; +use super::template::{RunContext, Template}; + pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { let context = build_local_context(args, true, event_sender.clone()).await?; let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?; @@ -152,3 +162,62 @@ pub fn args(name: &'static str) -> Command { app } + +#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] +pub struct LibFuzzer { + inputs: PathBuf, + readonly_inputs: Vec, + crashes: PathBuf, + crashdumps: Option, + target_exe: PathBuf, + target_env: HashMap, + target_options: Vec, + target_workers: Option, + ensemble_sync_delay: Option, + #[serde(default = "default_bool_true")] + check_fuzzer_help: bool, + #[serde(default)] + expect_crash_on_failure: bool, +} + +#[async_trait] +impl Template for LibFuzzer { + async fn run(&self, context: &RunContext) -> Result<()> { + let ri: Result> = self + .readonly_inputs + .iter() + .enumerate() + .map(|(index, input)| context.to_sync_dir(format!("readonly_inputs_{index}"), input)) + .collect(); + + let libfuzzer_config = crate::tasks::fuzz::libfuzzer::generic::Config { + inputs: context.to_monitored_sync_dir("inputs", &self.inputs)?, + readonly_inputs: Some(ri?), + crashes: context.to_monitored_sync_dir("crashes", &self.crashes)?, + crashdumps: self + .crashdumps + .as_ref() + .and_then(|path| context.to_monitored_sync_dir("crashdumps", path).ok()), + target_exe: self.target_exe.clone(), + target_env: self.target_env.clone(), + target_options: self.target_options.clone(), + target_workers: self.target_workers.unwrap_or(default_workers()), + ensemble_sync_delay: self.ensemble_sync_delay, + check_fuzzer_help: self.check_fuzzer_help, + expect_crash_on_failure: self.expect_crash_on_failure, + extra: (), + common: CommonConfig { + task_id: uuid::Uuid::new_v4(), + ..context.common.clone() + }, + }; + + context + .spawn(async move { + let fuzzer = LibFuzzerFuzzTask::new(libfuzzer_config)?; + fuzzer.run().await + }) + .await; + Ok(()) + } +} diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs b/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs index be6ac1cefd..c1ab283575 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use std::path::PathBuf; +use std::{collections::HashMap, path::PathBuf}; use crate::{ local::common::{ @@ -13,13 +13,19 @@ use crate::{ tasks::{ config::CommonConfig, report::libfuzzer_report::{Config, ReportTask}, + utils::default_bool_true, }, }; use anyhow::Result; +use async_trait::async_trait; use clap::{Arg, ArgAction, Command}; use flume::Sender; +use futures::future::OptionFuture; +use schemars::JsonSchema; use storage_queue::QueueClient; +use super::template::{RunContext, Template}; + pub fn build_report_config( args: &clap::ArgMatches, input_queue: Option, @@ -129,3 +135,87 @@ pub fn args(name: &'static str) -> Command { .about("execute a local-only libfuzzer crash report task") .args(&build_shared_args()) } + +#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] +pub struct LibfuzzerCrashReport { + target_exe: PathBuf, + target_env: HashMap, + target_options: Vec, + target_timeout: Option, + input_queue: Option, + crashes: Option, + reports: Option, + unique_reports: Option, + no_repro: Option, + + #[serde(default = "default_bool_true")] + check_fuzzer_help: bool, + + #[serde(default)] + check_retry_count: u64, + + #[serde(default)] + minimized_stack_depth: Option, + + #[serde(default = "default_bool_true")] + check_queue: bool, +} + +#[async_trait] +impl Template for LibfuzzerCrashReport { + async fn run(&self, context: &RunContext) -> Result<()> { + let input_q_fut: OptionFuture<_> = self + .input_queue + .iter() + .map(|w| context.monitor_dir(w)) + .next() + .into(); + let input_q = input_q_fut.await.transpose()?; + + let libfuzzer_crash_config = crate::tasks::report::libfuzzer_report::Config { + target_exe: self.target_exe.clone(), + target_env: self.target_env.clone(), + target_options: self.target_options.clone(), + target_timeout: self.target_timeout, + input_queue: input_q, + crashes: self + .crashes + .clone() + .map(|c| context.to_monitored_sync_dir("crashes", c)) + .transpose()?, + reports: self + .reports + .clone() + .map(|c| context.to_monitored_sync_dir("reports", c)) + .transpose()?, + unique_reports: self + .unique_reports + .clone() + .map(|c| context.to_monitored_sync_dir("unique_reports", c)) + .transpose()?, + no_repro: self + .no_repro + .clone() + .map(|c| context.to_monitored_sync_dir("no_repro", c)) + .transpose()?, + + check_fuzzer_help: self.check_fuzzer_help, + check_retry_count: self.check_retry_count, + minimized_stack_depth: self.minimized_stack_depth, + check_queue: self.check_queue, + common: CommonConfig { + task_id: uuid::Uuid::new_v4(), + ..context.common.clone() + }, + }; + + context + .spawn(async move { + let mut libfuzzer_report = + crate::tasks::report::libfuzzer_report::ReportTask::new(libfuzzer_crash_config); + libfuzzer_report.managed_run().await + }) + .await; + Ok(()) + } +} diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs b/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs index dff3a106e0..69c9df820b 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use std::path::PathBuf; +use std::{collections::HashMap, path::PathBuf}; use crate::{ local::common::{ @@ -13,13 +13,20 @@ use crate::{ tasks::{ config::CommonConfig, merge::libfuzzer_merge::{spawn, Config}, + utils::default_bool_true, }, }; use anyhow::Result; +use async_trait::async_trait; use clap::{Arg, ArgAction, Command}; use flume::Sender; +use futures::future::OptionFuture; +use onefuzz::syncdir::SyncedDir; +use schemars::JsonSchema; use storage_queue::QueueClient; +use super::template::{RunContext, Template}; + pub fn build_merge_config( args: &clap::ArgMatches, input_queue: Option, @@ -86,3 +93,62 @@ pub fn args(name: &'static str) -> Command { .about("execute a local-only libfuzzer crash report task") .args(&build_shared_args()) } + +#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] +pub struct LibfuzzerMerge { + target_exe: PathBuf, + target_env: HashMap, + target_options: Vec, + input_queue: Option, + inputs: Vec, + unique_inputs: PathBuf, + preserve_existing_outputs: bool, + + #[serde(default = "default_bool_true")] + check_fuzzer_help: bool, +} + +#[async_trait] +impl Template for LibfuzzerMerge { + async fn run(&self, context: &RunContext) -> Result<()> { + let input_q_fut: OptionFuture<_> = self + .input_queue + .iter() + .map(|w| context.monitor_dir(w)) + .next() + .into(); + let input_q = input_q_fut.await.transpose()?; + + let libfuzzer_merge = crate::tasks::merge::libfuzzer_merge::Config { + target_exe: self.target_exe.clone(), + target_env: self.target_env.clone(), + target_options: self.target_options.clone(), + input_queue: input_q, + inputs: self + .inputs + .iter() + .enumerate() + .map(|(index, roi_pb)| { + context.to_monitored_sync_dir(format!("inputs_{index}"), roi_pb) + }) + .collect::>>()?, + unique_inputs: context + .to_monitored_sync_dir("unique_inputs", self.unique_inputs.clone())?, + preserve_existing_outputs: self.preserve_existing_outputs, + + check_fuzzer_help: self.check_fuzzer_help, + + common: CommonConfig { + task_id: uuid::Uuid::new_v4(), + ..context.common.clone() + }, + }; + + context + .spawn( + async move { crate::tasks::merge::libfuzzer_merge::spawn(libfuzzer_merge).await }, + ) + .await; + Ok(()) + } +} diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs b/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs index da5af6d1b1..501d2385e2 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use std::path::PathBuf; +use std::{collections::HashMap, path::PathBuf}; use crate::{ local::common::{ @@ -13,11 +13,16 @@ use crate::{ tasks::{ config::CommonConfig, regression::libfuzzer::{Config, LibFuzzerRegressionTask}, + utils::default_bool_true, }, }; use anyhow::Result; +use async_trait::async_trait; use clap::{Arg, ArgAction, Command}; use flume::Sender; +use schemars::JsonSchema; + +use super::template::{RunContext, Template}; const REPORT_NAMES: &str = "report_names"; @@ -136,3 +141,87 @@ pub fn args(name: &'static str) -> Command { .about("execute a local-only libfuzzer regression task") .args(&build_shared_args(true)) } + +#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] +pub struct LibfuzzerRegression { + target_exe: PathBuf, + + #[serde(default)] + target_options: Vec, + + #[serde(default)] + target_env: HashMap, + + target_timeout: Option, + + crashes: PathBuf, + regression_reports: PathBuf, + report_list: Option>, + unique_reports: Option, + reports: Option, + no_repro: Option, + readonly_inputs: Option, + + #[serde(default = "default_bool_true")] + check_fuzzer_help: bool, + #[serde(default)] + check_retry_count: u64, + + #[serde(default)] + minimized_stack_depth: Option, +} + +#[async_trait] +impl Template for LibfuzzerRegression { + async fn run(&self, context: &RunContext) -> Result<()> { + let libfuzzer_regression = crate::tasks::regression::libfuzzer::Config { + target_exe: self.target_exe.clone(), + target_env: self.target_env.clone(), + target_options: self.target_options.clone(), + target_timeout: self.target_timeout, + crashes: context.to_monitored_sync_dir("crashes", self.crashes.clone())?, + regression_reports: context + .to_monitored_sync_dir("regression_reports", self.regression_reports.clone())?, + report_list: self.report_list.clone(), + + unique_reports: self + .unique_reports + .clone() + .map(|c| context.to_monitored_sync_dir("unique_reports", c)) + .transpose()?, + reports: self + .reports + .clone() + .map(|c| context.to_monitored_sync_dir("reports", c)) + .transpose()?, + no_repro: self + .no_repro + .clone() + .map(|c| context.to_monitored_sync_dir("no_repro", c)) + .transpose()?, + readonly_inputs: self + .readonly_inputs + .clone() + .map(|c| context.to_monitored_sync_dir("readonly_inputs", c)) + .transpose()?, + + check_fuzzer_help: self.check_fuzzer_help, + check_retry_count: self.check_retry_count, + minimized_stack_depth: self.minimized_stack_depth, + + common: CommonConfig { + task_id: uuid::Uuid::new_v4(), + ..context.common.clone() + }, + }; + context + .spawn(async move { + let regression = crate::tasks::regression::libfuzzer::LibFuzzerRegressionTask::new( + libfuzzer_regression, + ); + regression.run().await + }) + .await; + Ok(()) + } +} diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs b/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs index 7a28e3e4a9..9c6f16094e 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs @@ -9,9 +9,14 @@ use crate::{ tasks::report::libfuzzer_report::{test_input, TestInputArgs}, }; use anyhow::Result; +use async_trait::async_trait; use clap::{Arg, Command}; use flume::Sender; -use std::path::PathBuf; +use onefuzz::machine_id::MachineIdentity; +use schemars::JsonSchema; +use std::{collections::HashMap, path::PathBuf}; + +use super::template::{RunContext, Template}; pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { let context = build_local_context(args, true, event_sender).await?; @@ -86,3 +91,53 @@ pub fn args(name: &'static str) -> Command { .about("test a libfuzzer application with a specific input") .args(&build_shared_args()) } + +#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] +pub struct LibfuzzerTestInput { + input: PathBuf, + target_exe: PathBuf, + target_options: Vec, + target_env: HashMap, + setup_dir: PathBuf, + extra_setup_dir: Option, + extra_output_dir: Option, + target_timeout: Option, + check_retry_count: u64, + minimized_stack_depth: Option, +} + +#[async_trait] +impl Template for LibfuzzerTestInput { + async fn run(&self, context: &RunContext) -> Result<()> { + let c = self.clone(); + let t = tokio::spawn(async move { + let libfuzzer_test_input = crate::tasks::report::libfuzzer_report::TestInputArgs { + input_url: None, + input: c.input.as_path(), + target_exe: c.target_exe.as_path(), + target_options: &c.target_options, + target_env: &c.target_env, + setup_dir: &c.setup_dir, + extra_output_dir: c.extra_output_dir.as_deref(), + extra_setup_dir: c.extra_setup_dir.as_deref(), + task_id: uuid::Uuid::new_v4(), + job_id: uuid::Uuid::new_v4(), + target_timeout: c.target_timeout, + check_retry_count: c.check_retry_count, + minimized_stack_depth: c.minimized_stack_depth, + machine_identity: MachineIdentity { + machine_id: uuid::Uuid::new_v4(), + machine_name: "local".to_string(), + scaleset_name: None, + }, + }; + + crate::tasks::report::libfuzzer_report::test_input(libfuzzer_test_input) + .await + .map(|_| ()) + }); + + context.add_handle(t).await; + Ok(()) + } +} diff --git a/src/agent/onefuzz-task/src/local/readme.md b/src/agent/onefuzz-task/src/local/readme.md new file mode 100644 index 0000000000..8c760d9c88 --- /dev/null +++ b/src/agent/onefuzz-task/src/local/readme.md @@ -0,0 +1,6 @@ +Example templates: `./example_templates` + +Updating schema: + +1. Run the test at the bottome of `template.rs` +1. Copy the output into `schema.json` diff --git a/src/agent/onefuzz-task/src/local/schema.json b/src/agent/onefuzz-task/src/local/schema.json index e21cd2817d..0a1f128e67 100644 --- a/src/agent/onefuzz-task/src/local/schema.json +++ b/src/agent/onefuzz-task/src/local/schema.json @@ -56,6 +56,12 @@ "default": true, "type": "boolean" }, + "crashdumps": { + "type": [ + "string", + "null" + ] + }, "crashes": { "type": "string" }, @@ -261,6 +267,204 @@ } } }, + { + "type": "object", + "required": [ + "target_env", + "target_exe", + "target_options", + "type" + ], + "properties": { + "check_asan_log": { + "default": false, + "type": "boolean" + }, + "check_debugger": { + "default": true, + "type": "boolean" + }, + "check_queue": { + "default": true, + "type": "boolean" + }, + "check_retry_count": { + "default": 0, + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "crashes": { + "type": [ + "string", + "null" + ] + }, + "input_queue": { + "type": [ + "string", + "null" + ] + }, + "minimized_stack_depth": { + "default": null, + "type": [ + "integer", + "null" + ], + "format": "uint", + "minimum": 0.0 + }, + "no_repro": { + "type": [ + "string", + "null" + ] + }, + "reports": { + "type": [ + "string", + "null" + ] + }, + "target_env": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "target_exe": { + "type": "string" + }, + "target_options": { + "type": "array", + "items": { + "type": "string" + } + }, + "target_timeout": { + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "type": { + "type": "string", + "enum": [ + "CrashReport" + ] + }, + "unique_reports": { + "type": [ + "string", + "null" + ] + } + } + }, + { + "type": "object", + "required": [ + "crashes", + "generator_env", + "generator_exe", + "generator_options", + "readonly_inputs", + "rename_output", + "target_env", + "target_exe", + "target_options", + "type" + ], + "properties": { + "check_asan_log": { + "default": false, + "type": "boolean" + }, + "check_debugger": { + "default": true, + "type": "boolean" + }, + "check_retry_count": { + "default": 0, + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "crashes": { + "type": "string" + }, + "ensemble_sync_delay": { + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "generator_env": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "generator_exe": { + "type": "string" + }, + "generator_options": { + "type": "array", + "items": { + "type": "string" + } + }, + "readonly_inputs": { + "type": "array", + "items": { + "type": "string" + } + }, + "rename_output": { + "type": "boolean" + }, + "target_env": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "target_exe": { + "type": "string" + }, + "target_options": { + "type": "array", + "items": { + "type": "string" + } + }, + "target_timeout": { + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "tools": { + "type": [ + "string", + "null" + ] + }, + "type": { + "type": "string", + "enum": [ + "Generator" + ] + } + } + }, { "type": "object", "required": [ @@ -343,7 +547,7 @@ "type": { "type": "string", "enum": [ - "Report" + "LibfuzzerCrashReport" ] }, "unique_reports": { @@ -353,8 +557,340 @@ ] } } + }, + { + "type": "object", + "required": [ + "inputs", + "preserve_existing_outputs", + "target_env", + "target_exe", + "target_options", + "type", + "unique_inputs" + ], + "properties": { + "check_fuzzer_help": { + "default": true, + "type": "boolean" + }, + "input_queue": { + "type": [ + "string", + "null" + ] + }, + "inputs": { + "type": "array", + "items": { + "type": "string" + } + }, + "preserve_existing_outputs": { + "type": "boolean" + }, + "target_env": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "target_exe": { + "type": "string" + }, + "target_options": { + "type": "array", + "items": { + "type": "string" + } + }, + "type": { + "type": "string", + "enum": [ + "LibfuzzerMerge" + ] + }, + "unique_inputs": { + "type": "string" + } + } + }, + { + "type": "object", + "required": [ + "crashes", + "regression_reports", + "target_exe", + "type" + ], + "properties": { + "check_fuzzer_help": { + "default": true, + "type": "boolean" + }, + "check_retry_count": { + "default": 0, + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "crashes": { + "type": "string" + }, + "minimized_stack_depth": { + "default": null, + "type": [ + "integer", + "null" + ], + "format": "uint", + "minimum": 0.0 + }, + "no_repro": { + "type": [ + "string", + "null" + ] + }, + "readonly_inputs": { + "type": [ + "string", + "null" + ] + }, + "regression_reports": { + "type": "string" + }, + "report_list": { + "type": [ + "array", + "null" + ], + "items": { + "type": "string" + } + }, + "reports": { + "type": [ + "string", + "null" + ] + }, + "target_env": { + "default": {}, + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "target_exe": { + "type": "string" + }, + "target_options": { + "default": [], + "type": "array", + "items": { + "type": "string" + } + }, + "target_timeout": { + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "type": { + "type": "string", + "enum": [ + "LibfuzzerRegression" + ] + }, + "unique_reports": { + "type": [ + "string", + "null" + ] + } + } + }, + { + "type": "object", + "required": [ + "check_retry_count", + "input", + "setup_dir", + "target_env", + "target_exe", + "target_options", + "type" + ], + "properties": { + "check_retry_count": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "extra_output_dir": { + "type": [ + "string", + "null" + ] + }, + "extra_setup_dir": { + "type": [ + "string", + "null" + ] + }, + "input": { + "type": "string" + }, + "minimized_stack_depth": { + "type": [ + "integer", + "null" + ], + "format": "uint", + "minimum": 0.0 + }, + "setup_dir": { + "type": "string" + }, + "target_env": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "target_exe": { + "type": "string" + }, + "target_options": { + "type": "array", + "items": { + "type": "string" + } + }, + "target_timeout": { + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "type": { + "type": "string", + "enum": [ + "LibfuzzerTestInput" + ] + } + } + }, + { + "type": "object", + "required": [ + "check_asan_log", + "check_debugger", + "check_retry_count", + "input", + "job_id", + "setup_dir", + "target_env", + "target_exe", + "target_options", + "task_id", + "type" + ], + "properties": { + "check_asan_log": { + "type": "boolean" + }, + "check_debugger": { + "type": "boolean" + }, + "check_retry_count": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "extra_setup_dir": { + "type": [ + "string", + "null" + ] + }, + "input": { + "type": "string" + }, + "job_id": { + "type": "string", + "format": "uuid" + }, + "minimized_stack_depth": { + "type": [ + "integer", + "null" + ], + "format": "uint", + "minimum": 0.0 + }, + "setup_dir": { + "type": "string" + }, + "target_env": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "target_exe": { + "type": "string" + }, + "target_options": { + "type": "array", + "items": { + "type": "string" + } + }, + "target_timeout": { + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "task_id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "TestInput" + ] + } + } + }, + { + "description": "The radamsa task can be represented via a combination of the `Generator` and `Report` tasks. Please see `src/agent/onefuzz-task/src/local/example_templates/radamsa.yml` for an example template", + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "Radamsa" + ] + } + } } ] } } -} \ No newline at end of file +} diff --git a/src/agent/onefuzz-task/src/local/template.rs b/src/agent/onefuzz-task/src/local/template.rs index ce0908fd58..b2e0c425ff 100644 --- a/src/agent/onefuzz-task/src/local/template.rs +++ b/src/agent/onefuzz-task/src/local/template.rs @@ -1,29 +1,25 @@ +use async_trait::async_trait; use flume::Sender; use onefuzz::{blob::BlobContainerUrl, syncdir::SyncedDir, utils::try_wait_all_join_handles}; use path_absolutize::Absolutize; use serde::Deserialize; -use std::{ - collections::HashMap, - path::{Path, PathBuf}, -}; +use std::path::{Path, PathBuf}; use storage_queue::QueueClient; use tokio::{sync::Mutex, task::JoinHandle}; use url::Url; use uuid::Uuid; -use crate::tasks::{ - config::CommonConfig, - fuzz::{ - self, - libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask}, - }, - report, +use crate::local::{ + coverage::Coverage, generic_analysis::Analysis, generic_crash_report::CrashReport, + generic_generator::Generator, libfuzzer::LibFuzzer, + libfuzzer_crash_report::LibfuzzerCrashReport, libfuzzer_merge::LibfuzzerMerge, + libfuzzer_regression::LibfuzzerRegression, libfuzzer_test_input::LibfuzzerTestInput, + test_input::TestInput, }; +use crate::tasks::config::CommonConfig; use super::common::{DirectoryMonitorQueue, SyncCountDirMonitor, UiEvent}; -use anyhow::Result; - -use futures::future::OptionFuture; +use anyhow::{Error, Result}; use schemars::JsonSchema; @@ -46,232 +42,73 @@ struct CommonProperties { pub create_job_dir: bool, } -#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] -struct LibFuzzer { - inputs: PathBuf, - readonly_inputs: Vec, - crashes: PathBuf, - crashdumps: PathBuf, - target_exe: PathBuf, - target_env: HashMap, - target_options: Vec, - target_workers: Option, - ensemble_sync_delay: Option, - #[serde(default = "default_bool_true")] - check_fuzzer_help: bool, - #[serde(default)] - expect_crash_on_failure: bool, -} - -#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] -struct Analysis { - analyzer_exe: String, - analyzer_options: Vec, - analyzer_env: HashMap, - target_exe: PathBuf, - target_options: Vec, - input_queue: Option, - crashes: Option, - analysis: PathBuf, - tools: PathBuf, - reports: Option, - unique_reports: Option, - no_repro: Option, -} - -#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] -struct Report { - target_exe: PathBuf, - target_env: HashMap, - // TODO: options are not yet used for crash reporting - target_options: Vec, - target_timeout: Option, - input_queue: Option, - crashes: Option, - reports: Option, - unique_reports: Option, - no_repro: Option, - #[serde(default = "default_bool_true")] - check_fuzzer_help: bool, - #[serde(default)] - check_retry_count: u64, - #[serde(default)] - minimized_stack_depth: Option, - #[serde(default = "default_bool_true")] - check_queue: bool, -} - -pub fn default_bool_true() -> bool { - true -} - -#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] -struct Coverage { - target_exe: PathBuf, - target_env: HashMap, - target_options: Vec, - target_timeout: Option, - module_allowlist: Option, - source_allowlist: Option, - input_queue: Option, - readonly_inputs: Vec, - coverage: PathBuf, -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] #[serde(tag = "type")] enum TaskConfig { LibFuzzer(LibFuzzer), Analysis(Analysis), Coverage(Coverage), - Report(Report), + CrashReport(CrashReport), + Generator(Generator), + LibfuzzerCrashReport(LibfuzzerCrashReport), + LibfuzzerMerge(LibfuzzerMerge), + LibfuzzerRegression(LibfuzzerRegression), + LibfuzzerTestInput(LibfuzzerTestInput), + TestInput(TestInput), + /// The radamsa task can be represented via a combination of the `Generator` and `Report` tasks. + /// Please see `src/agent/onefuzz-task/src/local/example_templates/radamsa.yml` for an example template + Radamsa, +} + +#[async_trait] +pub trait Template { + async fn run(&self, context: &RunContext) -> Result<()>; } impl TaskConfig { async fn launch(&self, context: RunContext) -> Result { match self { TaskConfig::LibFuzzer(config) => { - let ri: Result> = config - .readonly_inputs - .iter() - .enumerate() - .map(|(index, input)| { - context.to_sync_dir(format!("readonly_inputs_{index}"), input) - }) - .collect(); - - let libfuzzer_config = fuzz::libfuzzer::generic::Config { - inputs: context.to_monitored_sync_dir("inputs", &config.inputs)?, - readonly_inputs: Some(ri?), - crashes: context.to_monitored_sync_dir("crashes", &config.crashes)?, - crashdumps: Some( - context.to_monitored_sync_dir("crashdumps", &config.crashdumps)?, - ), - target_exe: config.target_exe.clone(), - target_env: config.target_env.clone(), - target_options: config.target_options.clone(), - target_workers: config.target_workers.unwrap_or(default_workers()), - ensemble_sync_delay: config.ensemble_sync_delay, - check_fuzzer_help: config.check_fuzzer_help, - expect_crash_on_failure: config.expect_crash_on_failure, - extra: (), - common: CommonConfig { - task_id: uuid::Uuid::new_v4(), - ..context.common.clone() - }, - }; - - context - .spawn(async move { - let fuzzer = LibFuzzerFuzzTask::new(libfuzzer_config)?; - fuzzer.run().await - }) - .await; + config.run(&context).await?; + } + TaskConfig::Analysis(config) => { + config.run(&context).await?; } - TaskConfig::Analysis(_analysis) => {} TaskConfig::Coverage(config) => { - let ri: Result> = config - .readonly_inputs - .iter() - .enumerate() - .map(|(index, input)| { - context.to_sync_dir(format!("readonly_inputs_{index}"), input) - }) - .collect(); - - let input_q = if let Some(w) = &config.input_queue { - Some(context.monitor_dir(w).await?) - } else { - None - }; - - let coverage_config = crate::tasks::coverage::generic::Config { - target_exe: config.target_exe.clone(), - target_env: config.target_env.clone(), - target_options: config.target_options.clone(), - target_timeout: None, - readonly_inputs: ri?, - input_queue: input_q, - common: CommonConfig { - task_id: uuid::Uuid::new_v4(), - ..context.common.clone() - }, - coverage_filter: None, - coverage: context.to_monitored_sync_dir("coverage", config.coverage.clone())?, - module_allowlist: config.module_allowlist.clone(), - source_allowlist: config.source_allowlist.clone(), - }; - - context - .spawn(async move { - let mut coverage = - crate::tasks::coverage::generic::CoverageTask::new(coverage_config); - coverage.run().await - }) - .await; + config.run(&context).await?; } - TaskConfig::Report(config) => { - let input_q_fut: OptionFuture<_> = config - .input_queue - .iter() - .map(|w| context.monitor_dir(w)) - .next() - .into(); - - let input_q = input_q_fut.await.transpose()?; - let report_config = report::libfuzzer_report::Config { - target_exe: config.target_exe.clone(), - target_env: config.target_env.clone(), - target_options: config.target_options.clone(), - target_timeout: config.target_timeout, - input_queue: input_q, - crashes: config - .crashes - .clone() - .map(|c| context.to_monitored_sync_dir("crashes", c)) - .transpose()?, - reports: config - .reports - .clone() - .map(|c| context.to_monitored_sync_dir("reports", c)) - .transpose()?, - unique_reports: config - .unique_reports - .clone() - .map(|c| context.to_monitored_sync_dir("unique_reports", c)) - .transpose()?, - no_repro: config - .no_repro - .clone() - .map(|c| context.to_monitored_sync_dir("no_repro", c)) - .transpose()?, - check_fuzzer_help: config.check_fuzzer_help, - check_retry_count: config.check_retry_count, - minimized_stack_depth: config.minimized_stack_depth, - check_queue: config.check_queue, - common: CommonConfig { - task_id: uuid::Uuid::new_v4(), - ..context.common.clone() - }, - }; - - context - .spawn(async move { - let mut report = report::libfuzzer_report::ReportTask::new(report_config); - report.managed_run().await - }) - .await; + TaskConfig::CrashReport(config) => { + config.run(&context).await?; + } + TaskConfig::Generator(config) => { + config.run(&context).await?; } + TaskConfig::LibfuzzerCrashReport(config) => { + config.run(&context).await?; + } + TaskConfig::LibfuzzerMerge(config) => { + config.run(&context).await?; + } + TaskConfig::LibfuzzerRegression(config) => { + config.run(&context).await?; + } + TaskConfig::LibfuzzerTestInput(config) => { + config.run(&context).await?; + } + TaskConfig::TestInput(config) => { + config.run(&context).await?; + } + TaskConfig::Radamsa => {} } Ok(context) } } -struct RunContext { +pub struct RunContext { monitor_queues: Mutex>, tasks_handle: Mutex>>>, - common: CommonConfig, + pub common: CommonConfig, event_sender: Option>, create_job_dir: bool, } @@ -287,14 +124,14 @@ impl RunContext { } } - async fn monitor_dir(&self, watch: impl AsRef) -> Result { + pub async fn monitor_dir(&self, watch: impl AsRef) -> Result { let monitor_q = DirectoryMonitorQueue::start_monitoring(watch).await?; let q_client = monitor_q.queue_client.clone(); self.monitor_queues.lock().await.push(monitor_q); Ok(q_client) } - fn to_monitored_sync_dir( + pub fn to_monitored_sync_dir( &self, name: impl AsRef, path: impl AsRef, @@ -307,7 +144,7 @@ impl RunContext { .monitor_count(&self.event_sender) } - fn to_sync_dir(&self, name: impl AsRef, path: impl AsRef) -> Result { + pub fn to_sync_dir(&self, name: impl AsRef, path: impl AsRef) -> Result { let path = path.as_ref(); let name = name.as_ref(); let current_dir = std::env::current_dir()?; @@ -335,6 +172,10 @@ impl RunContext { future: impl futures::Future> + std::marker::Send + 'static, ) { let handle = tokio::spawn(future); + self.add_handle(handle).await; + } + + pub async fn add_handle(&self, handle: JoinHandle>) { self.tasks_handle.lock().await.push(handle); } } @@ -392,6 +233,20 @@ mod test { #[test] fn test() { let schema = schemars::schema_for!(super::TaskGroup); - println!("{}", serde_json::to_string_pretty(&schema).unwrap()); + let schema_str = serde_json::to_string_pretty(&schema) + .unwrap() + .replace("\r\n", "\n"); + + let checked_in_schema = std::fs::read_to_string("src/local/schema.json") + .expect("Couldn't find checked-in schema.json") + .replace("\r\n", "\n"); + + println!("{}", schema_str); + + assert_eq!( + schema_str.replace('\n', ""), + checked_in_schema.replace('\n', ""), + "The checked-in local fuzzing schema did not match the generated schema." + ); } } diff --git a/src/agent/onefuzz-task/src/local/test_input.rs b/src/agent/onefuzz-task/src/local/test_input.rs index 715e1a5141..4077bd08f8 100644 --- a/src/agent/onefuzz-task/src/local/test_input.rs +++ b/src/agent/onefuzz-task/src/local/test_input.rs @@ -10,9 +10,15 @@ use crate::{ tasks::report::generic::{test_input, TestInputArgs}, }; use anyhow::Result; +use async_trait::async_trait; use clap::{Arg, ArgAction, Command}; use flume::Sender; -use std::path::PathBuf; +use onefuzz::machine_id::MachineIdentity; +use schemars::JsonSchema; +use std::{collections::HashMap, path::PathBuf}; +use uuid::Uuid; + +use super::template::{RunContext, Template}; pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { let context = build_local_context(args, false, event_sender).await?; @@ -89,3 +95,57 @@ pub fn args(name: &'static str) -> Command { .about("test an application with a specific input") .args(&build_shared_args()) } + +#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] +pub struct TestInput { + input: PathBuf, + target_exe: PathBuf, + target_options: Vec, + target_env: HashMap, + setup_dir: PathBuf, + extra_setup_dir: Option, + task_id: Uuid, + job_id: Uuid, + target_timeout: Option, + check_retry_count: u64, + check_asan_log: bool, + check_debugger: bool, + minimized_stack_depth: Option, +} + +#[async_trait] +impl Template for TestInput { + async fn run(&self, context: &RunContext) -> Result<()> { + let c = self.clone(); + let t = tokio::spawn(async move { + let libfuzzer_test_input = crate::tasks::report::generic::TestInputArgs { + input_url: None, + input: c.input.as_path(), + target_exe: c.target_exe.as_path(), + target_options: &c.target_options, + target_env: &c.target_env, + setup_dir: &c.setup_dir, + extra_setup_dir: c.extra_setup_dir.as_deref(), + task_id: uuid::Uuid::new_v4(), + job_id: uuid::Uuid::new_v4(), + target_timeout: c.target_timeout, + check_retry_count: c.check_retry_count, + check_asan_log: c.check_asan_log, + check_debugger: c.check_debugger, + minimized_stack_depth: c.minimized_stack_depth, + machine_identity: MachineIdentity { + machine_id: uuid::Uuid::new_v4(), + machine_name: "local".to_string(), + scaleset_name: None, + }, + }; + + crate::tasks::report::generic::test_input(libfuzzer_test_input) + .await + .map(|_| ()) + }); + + context.add_handle(t).await; + Ok(()) + } +} diff --git a/src/agent/onefuzz/src/input_tester.rs b/src/agent/onefuzz/src/input_tester.rs index dc0775ed28..331e466280 100644 --- a/src/agent/onefuzz/src/input_tester.rs +++ b/src/agent/onefuzz/src/input_tester.rs @@ -215,7 +215,9 @@ impl<'a> Tester<'a> { let triage = crate::triage::TriageCommand::new(cmd)?; // Share the new child ID with main thread. - let Ok(()) = sender.send(triage.pid()) else { bail!("unable to send PID") }; + let Ok(()) = sender.send(triage.pid()) else { + bail!("unable to send PID") + }; // The target run is blocking, and may hang. triage.run() From 4e2b3fc648bcbec4f42c3679e3c108f6c49208b4 Mon Sep 17 00:00:00 2001 From: Adam <103067949+AdamL-Microsoft@users.noreply.github.com> Date: Thu, 17 Aug 2023 12:36:08 -0700 Subject: [PATCH 04/20] Update default windows image for windows 11 (#3374) * Update all win10 image references to win11 as default --- docs/supported-platforms.md | 14 +++++++------- docs/webhook_events.md | 6 +++--- .../ApiService/onefuzzlib/ImageReference.cs | 2 +- src/pytypes/onefuzztypes/models.py | 2 +- src/runtime-tools/win64/onefuzz.ps1 | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/docs/supported-platforms.md b/docs/supported-platforms.md index 418043cdff..b311301d1f 100644 --- a/docs/supported-platforms.md +++ b/docs/supported-platforms.md @@ -4,18 +4,18 @@ OneFuzz is cross-platform, and the actively-supported platforms vary by componen ### CLI -We continuously test the CLI on Windows 10 Pro and Ubuntu 20.04 LTS, both on the -x64 architecture. The CLI client is written in Python 3, and targets Python 3.7 -and up. We distribute a self-contained executable CLI build for Windows which +We continuously test the CLI on Windows 11 Pro and Ubuntu 20.04 LTS, both on the +x64 architecture. The CLI client is written in Python 3, and targets Python 3.7 +and up. We distribute a self-contained executable CLI build for Windows which bundles a Python interpreter. ### Virtual Machine Scale Sets OneFuzz deploys targets into Azure Virtual Machine Scale Sets for fuzzing (and -supporting tasks). OneFuzz permits arbitrary choice of VM SKU and OS Image, -including custom images. We continuously test on Window 10 Pro x64 (using the -Azure OS image URN `MicrosoftWindowsDesktop:Windows-10:win10-21h2-pro:latest`) -and Ubuntu 20.04 LTS x64 (using the Azure OS image URN +supporting tasks). OneFuzz permits arbitrary choice of VM SKU and OS Image, +including custom images. We continuously test on Window 11 Pro x64 (using the +Azure OS image URN `MicrosoftWindowsDesktop:Windows-11:win11-22h2-pro:latest`) +and Ubuntu 20.04 LTS x64 (using the Azure OS image URN `Canonical:0001-com-ubuntu-server-focal:20_04-lts:latest`). ### LibFuzzer Compilation diff --git a/docs/webhook_events.md b/docs/webhook_events.md index 633703fb85..a417b7465f 100644 --- a/docs/webhook_events.md +++ b/docs/webhook_events.md @@ -721,7 +721,7 @@ If webhook is set to have Event Grid message format then the payload will look a "00000000-0000-0000-0000-000000000000" ], "default_linux_vm_image": "Canonical:0001-com-ubuntu-server-focal:20_04-lts:latest", - "default_windows_vm_image": "MicrosoftWindowsDesktop:Windows-10:win10-21h2-pro:latest", + "default_windows_vm_image": "MicrosoftWindowsDesktop:Windows-11:win11-22h2-pro:latest", "network_config": { "address_space": "10.0.0.0/8", "subnet": "10.0.0.0/16" @@ -868,7 +868,7 @@ If webhook is set to have Event Grid message format then the payload will look a "type": "string" }, "default_windows_vm_image": { - "default": "MicrosoftWindowsDesktop:Windows-10:win10-21h2-pro:latest", + "default": "MicrosoftWindowsDesktop:Windows-11:win11-22h2-pro:latest", "title": "Default Windows Vm Image", "type": "string" }, @@ -6181,7 +6181,7 @@ If webhook is set to have Event Grid message format then the payload will look a "type": "string" }, "default_windows_vm_image": { - "default": "MicrosoftWindowsDesktop:Windows-10:win10-21h2-pro:latest", + "default": "MicrosoftWindowsDesktop:Windows-11:win11-22h2-pro:latest", "title": "Default Windows Vm Image", "type": "string" }, diff --git a/src/ApiService/ApiService/onefuzzlib/ImageReference.cs b/src/ApiService/ApiService/onefuzzlib/ImageReference.cs index 68b26c3aaa..3403d75697 100644 --- a/src/ApiService/ApiService/onefuzzlib/ImageReference.cs +++ b/src/ApiService/ApiService/onefuzzlib/ImageReference.cs @@ -13,7 +13,7 @@ namespace Microsoft.OneFuzz.Service; public static class DefaultImages { - public static readonly ImageReference Windows = ImageReference.MustParse("MicrosoftWindowsDesktop:Windows-10:win10-21h2-pro:latest"); + public static readonly ImageReference Windows = ImageReference.MustParse("MicrosoftWindowsDesktop:Windows-11:win11-22h2-pro:latest"); public static readonly ImageReference Linux = ImageReference.MustParse("Canonical:0001-com-ubuntu-server-focal:20_04-lts:latest"); } diff --git a/src/pytypes/onefuzztypes/models.py b/src/pytypes/onefuzztypes/models.py index 30fbf0f403..a5f8139e97 100644 --- a/src/pytypes/onefuzztypes/models.py +++ b/src/pytypes/onefuzztypes/models.py @@ -837,7 +837,7 @@ class InstanceConfig(BaseModel): ) extensions: Optional[AzureVmExtensionConfig] default_windows_vm_image: str = Field( - default="MicrosoftWindowsDesktop:Windows-10:win10-21h2-pro:latest" + default="MicrosoftWindowsDesktop:Windows-11:win11-22h2-pro:latest" ) default_linux_vm_image: str = Field( default="Canonical:0001-com-ubuntu-server-focal:20_04-lts:latest" diff --git a/src/runtime-tools/win64/onefuzz.ps1 b/src/runtime-tools/win64/onefuzz.ps1 index 51524df7d2..b33f35a5a1 100644 --- a/src/runtime-tools/win64/onefuzz.ps1 +++ b/src/runtime-tools/win64/onefuzz.ps1 @@ -139,7 +139,7 @@ function Install-Debugger { # Windows PowerShell reports progress on every byte downloaded causing performance issues. $ProgressPreference = 'SilentlyContinue' - Invoke-WebRequest -Uri 'https://download.microsoft.com/download/4/2/2/42245968-6A79-4DA7-A5FB-08C0AD0AE661/windowssdk/winsdksetup.exe' -OutFile debug-setup.exe + Invoke-WebRequest -Uri 'https://download.microsoft.com/download/b/8/5/b85bd06f-491c-4c1c-923e-75ce2fe2378e/windowssdk/winsdksetup.exe' -OutFile debug-setup.exe Start-Process "./debug-setup.exe" -ArgumentList "/norestart /quiet /features OptionId.WindowsDesktopDebuggers /log dbg-install.log" -Wait # New-NetFirewallRule -Name onefuzzdebug -DisplayName 'CDB listener' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 1337 From bdb2f1337d8934e78689f707851822bbd15811ef Mon Sep 17 00:00:00 2001 From: Teo Voinea <58236992+tevoinea@users.noreply.github.com> Date: Fri, 18 Aug 2023 09:52:53 -0400 Subject: [PATCH 05/20] Add mariner support (#3306) * Updating setup.sh * logger works * Install omi * syntax * Add option to create mariner pool in checkpr * . * Need to install sudo * . * Include both logging extensions * Revert because we already isntall the azure monitor for linux extension * Downgrade type handler version * Add data collection rules and fuzzing articacts for integration test * TODOs * Fix linux jobs getting sent to mariner * Fix linux jobs going to marienr pool * Fix pools * Remove the old logging extension on linux * try to retain syslog * Value to be set was not clear * Trying to route logs * Maybe we need to specify properties even if we don't set anything * Start adding a data collection association * Create association * update packages.lock * . * Remove auto config and add dependency map * Bring back GCS autoconfig * Undo DCR stuff * Undo package version bump * Fix up files * Remove TODO --- .github/workflows/ci.yml | 2 + .../ApiService/onefuzzlib/Extension.cs | 44 ++++------- src/integration-tests/integration-test.py | 77 +++++++++++++++---- src/runtime-tools/linux/setup.sh | 64 +++++++++++---- 4 files changed, 126 insertions(+), 61 deletions(-) mode change 100755 => 100644 src/runtime-tools/linux/setup.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0740414dcd..9b8eb43826 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -538,9 +538,11 @@ jobs: mkdir -p artifacts/linux-libfuzzer mkdir -p artifacts/linux-libfuzzer-with-options + mkdir -p artifacts/mariner-libfuzzer (cd libfuzzer ; make ) cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/linux-libfuzzer cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/linux-libfuzzer-with-options + cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/mariner-libfuzzer mkdir -p artifacts/linux-libfuzzer-regression (cd libfuzzer-regression ; make ) diff --git a/src/ApiService/ApiService/onefuzzlib/Extension.cs b/src/ApiService/ApiService/onefuzzlib/Extension.cs index 7995026eca..fbf62dd343 100644 --- a/src/ApiService/ApiService/onefuzzlib/Extension.cs +++ b/src/ApiService/ApiService/onefuzzlib/Extension.cs @@ -36,7 +36,9 @@ public async Async.Task> GenericExtensions(AzureLocati var extensions = new List(); var instanceConfig = await _context.ConfigOperations.Fetch(); - extensions.Add(await MonitorExtension(region, vmOs)); + if (vmOs == Os.Windows) { + extensions.Add(await MonitorExtension(region)); + } var depenency = DependencyExtension(region, vmOs); if (depenency is not null) { @@ -329,37 +331,21 @@ public async Async.Task AgentConfig(AzureLocation region, Os throw new NotSupportedException($"unsupported OS: {vmOs}"); } - public async Async.Task MonitorExtension(AzureLocation region, Os vmOs) { + public async Async.Task MonitorExtension(AzureLocation region) { var settings = await _context.LogAnalytics.GetMonitorSettings(); var extensionSettings = JsonSerializer.Serialize(new { WorkspaceId = settings.Id }, _extensionSerializerOptions); var protectedExtensionSettings = JsonSerializer.Serialize(new { WorkspaceKey = settings.Key }, _extensionSerializerOptions); - if (vmOs == Os.Windows) { - return new VMExtensionWrapper { - Location = region, - Name = "OMSExtension", - TypePropertiesType = "MicrosoftMonitoringAgent", - Publisher = "Microsoft.EnterpriseCloud.Monitoring", - TypeHandlerVersion = "1.0", - AutoUpgradeMinorVersion = true, - Settings = new BinaryData(extensionSettings), - ProtectedSettings = new BinaryData(protectedExtensionSettings), - EnableAutomaticUpgrade = false - }; - } else if (vmOs == Os.Linux) { - return new VMExtensionWrapper { - Location = region, - Name = "OmsAgentForLinux", - TypePropertiesType = "OmsAgentForLinux", - Publisher = "Microsoft.EnterpriseCloud.Monitoring", - TypeHandlerVersion = "1.0", - AutoUpgradeMinorVersion = true, - Settings = new BinaryData(extensionSettings), - ProtectedSettings = new BinaryData(protectedExtensionSettings), - EnableAutomaticUpgrade = false - }; - } else { - throw new NotSupportedException($"unsupported os: {vmOs}"); - } + return new VMExtensionWrapper { + Location = region, + Name = "OMSExtension", + TypePropertiesType = "MicrosoftMonitoringAgent", + Publisher = "Microsoft.EnterpriseCloud.Monitoring", + TypeHandlerVersion = "1.0", + AutoUpgradeMinorVersion = true, + Settings = new BinaryData(extensionSettings), + ProtectedSettings = new BinaryData(protectedExtensionSettings), + EnableAutomaticUpgrade = false + }; } diff --git a/src/integration-tests/integration-test.py b/src/integration-tests/integration-test.py index 057404ceff..15ffcfb9fe 100755 --- a/src/integration-tests/integration-test.py +++ b/src/integration-tests/integration-test.py @@ -88,6 +88,7 @@ class Integration(BaseModel): target_method: Optional[str] setup_dir: Optional[str] target_env: Optional[Dict[str, str]] + pool: PoolName TARGETS: Dict[str, Integration] = { @@ -97,6 +98,7 @@ class Integration(BaseModel): target_exe="fuzz.exe", inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, + pool="linux", ), "linux-libfuzzer": Integration( template=TemplateType.libfuzzer, @@ -124,6 +126,7 @@ class Integration(BaseModel): "--only_asan_failures", "--write_test_file={extra_output_dir}/test.txt", ], + pool="linux", ), "linux-libfuzzer-with-options": Integration( template=TemplateType.libfuzzer, @@ -137,6 +140,7 @@ class Integration(BaseModel): }, reboot_after_setup=True, fuzzing_target_options=["-runs=10000000"], + pool="linux", ), "linux-libfuzzer-dlopen": Integration( template=TemplateType.libfuzzer, @@ -150,6 +154,7 @@ class Integration(BaseModel): }, reboot_after_setup=True, use_setup=True, + pool="linux", ), "linux-libfuzzer-linked-library": Integration( template=TemplateType.libfuzzer, @@ -163,6 +168,7 @@ class Integration(BaseModel): }, reboot_after_setup=True, use_setup=True, + pool="linux", ), "linux-libfuzzer-dotnet": Integration( template=TemplateType.libfuzzer_dotnet, @@ -180,6 +186,7 @@ class Integration(BaseModel): ContainerType.unique_reports: 1, }, test_repro=False, + pool="linux", ), "linux-libfuzzer-aarch64-crosscompile": Integration( template=TemplateType.libfuzzer_qemu_user, @@ -189,6 +196,7 @@ class Integration(BaseModel): use_setup=True, wait_for_files={ContainerType.inputs: 2, ContainerType.crashes: 1}, test_repro=False, + pool="linux", ), "linux-libfuzzer-rust": Integration( template=TemplateType.libfuzzer, @@ -196,6 +204,7 @@ class Integration(BaseModel): target_exe="fuzz_target_1", wait_for_files={ContainerType.unique_reports: 1, ContainerType.coverage: 1}, fuzzing_target_options=["--test:{extra_setup_dir}"], + pool="linux", ), "linux-trivial-crash": Integration( template=TemplateType.radamsa, @@ -204,6 +213,7 @@ class Integration(BaseModel): inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, inject_fake_regression=True, + pool="linux", ), "linux-trivial-crash-asan": Integration( template=TemplateType.radamsa, @@ -213,6 +223,28 @@ class Integration(BaseModel): wait_for_files={ContainerType.unique_reports: 1}, check_asan_log=True, disable_check_debugger=True, + pool="linux", + ), + # TODO: Don't install OMS extension on linux anymore + # TODO: Figure out why non mariner work is being scheduled to the mariner pool + "mariner-libfuzzer": Integration( + template=TemplateType.libfuzzer, + os=OS.linux, + target_exe="fuzz.exe", + inputs="seeds", + wait_for_files={ + ContainerType.unique_reports: 1, + ContainerType.coverage: 1, + ContainerType.inputs: 2, + ContainerType.extra_output: 1, + }, + reboot_after_setup=True, + inject_fake_regression=True, + fuzzing_target_options=[ + "--test:{extra_setup_dir}", + "--write_test_file={extra_output_dir}/test.txt", + ], + pool=PoolName("mariner") ), "windows-libfuzzer": Integration( template=TemplateType.libfuzzer, @@ -234,6 +266,7 @@ class Integration(BaseModel): "--only_asan_failures", "--write_test_file={extra_output_dir}/test.txt", ], + pool="windows", ), "windows-libfuzzer-linked-library": Integration( template=TemplateType.libfuzzer, @@ -246,6 +279,7 @@ class Integration(BaseModel): ContainerType.coverage: 1, }, use_setup=True, + pool="windows", ), "windows-libfuzzer-load-library": Integration( template=TemplateType.libfuzzer, @@ -258,6 +292,7 @@ class Integration(BaseModel): ContainerType.coverage: 1, }, use_setup=True, + pool="windows", ), "windows-libfuzzer-dotnet": Integration( template=TemplateType.libfuzzer_dotnet, @@ -275,6 +310,7 @@ class Integration(BaseModel): ContainerType.unique_reports: 1, }, test_repro=False, + pool="windows", ), "windows-trivial-crash": Integration( template=TemplateType.radamsa, @@ -283,6 +319,7 @@ class Integration(BaseModel): inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, inject_fake_regression=True, + pool="windows", ), } @@ -351,7 +388,7 @@ def try_info_get(data: Any) -> None: self.inject_log(self.start_log_marker) for entry in os_list: - name = PoolName(f"testpool-{entry.name}-{self.test_id}") + name = self.build_pool_name(entry.name) self.logger.info("creating pool: %s:%s", entry.name, name) self.of.pools.create(name, entry) self.logger.info("creating scaleset for pool: %s", name) @@ -359,6 +396,15 @@ def try_info_get(data: Any) -> None: name, pool_size, region=region, initial_size=pool_size ) + name = self.build_pool_name("mariner") + self.logger.info("creating pool: %s:%s", "mariner", name) + self.of.pools.create(name, OS.linux) + self.logger.info("creating scaleset for pool: %s", name) + self.of.scalesets.create( + name, pool_size, region=region, initial_size=pool_size, image="MicrosoftCBLMariner:cbl-mariner:cbl-mariner-2-gen2:latest" + ) + + class UnmanagedPool: def __init__( self, @@ -560,12 +606,9 @@ def launch( ) -> List[UUID]: """Launch all of the fuzzing templates""" - pools: Dict[OS, Pool] = {} + pool = None if unmanaged_pool is not None: - pools[unmanaged_pool.the_os] = self.of.pools.get(unmanaged_pool.pool_name) - else: - for pool in self.of.pools.list(): - pools[pool.os] = pool + pool = unmanaged_pool.pool_name job_ids = [] @@ -576,8 +619,8 @@ def launch( if config.os not in os_list: continue - if config.os not in pools.keys(): - raise Exception(f"No pool for target: {target} ,os: {config.os}") + if pool is None: + pool = self.build_pool_name(config.pool) self.logger.info("launching: %s", target) @@ -601,8 +644,9 @@ def launch( setup = Directory(os.path.join(setup, config.nested_setup_dir)) job: Optional[Job] = None + job = self.build_job( - duration, pools, target, config, setup, target_exe, inputs + duration, pool, target, config, setup, target_exe, inputs ) if config.inject_fake_regression and job is not None: @@ -618,7 +662,7 @@ def launch( def build_job( self, duration: int, - pools: Dict[OS, Pool], + pool: PoolName, target: str, config: Integration, setup: Optional[Directory], @@ -634,7 +678,7 @@ def build_job( self.project, target, BUILD, - pools[config.os].name, + pool, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -659,7 +703,7 @@ def build_job( self.project, target, BUILD, - pools[config.os].name, + pool, target_dll=File(config.target_exe), inputs=inputs, setup_dir=setup, @@ -675,7 +719,7 @@ def build_job( self.project, target, BUILD, - pools[config.os].name, + pool, inputs=inputs, target_exe=target_exe, duration=duration, @@ -688,7 +732,7 @@ def build_job( self.project, target, BUILD, - pool_name=pools[config.os].name, + pool_name=pool, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -703,7 +747,7 @@ def build_job( self.project, target, BUILD, - pool_name=pools[config.os].name, + pool_name=pool, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -1233,6 +1277,9 @@ def check_logs_for_errors(self) -> None: if seen_errors: raise Exception("logs included errors") + + def build_pool_name(self, os_type: str) -> PoolName: + return PoolName(f"testpool-{os_type}-{self.test_id}") class Run(Command): diff --git a/src/runtime-tools/linux/setup.sh b/src/runtime-tools/linux/setup.sh old mode 100755 new mode 100644 index f6859003b4..794e827f4d --- a/src/runtime-tools/linux/setup.sh +++ b/src/runtime-tools/linux/setup.sh @@ -18,6 +18,14 @@ export DOTNET_CLI_HOME="$DOTNET_ROOT" export ONEFUZZ_ROOT=/onefuzz export LLVM_SYMBOLIZER_PATH=/onefuzz/bin/llvm-symbolizer +# `logger` won't work on mariner unless we install this package first +if type yum > /dev/null 2> /dev/null; then + until yum install -y util-linux sudo; do + echo "yum failed. sleep 10s, then retrying" + sleep 10 + done +fi + logger "onefuzz: making directories" sudo mkdir -p /onefuzz/downloaded sudo chown -R $(whoami) /onefuzz @@ -134,31 +142,53 @@ if type apt > /dev/null 2> /dev/null; then sudo ln -f -s $(which llvm-symbolizer-12) $LLVM_SYMBOLIZER_PATH fi - # Install dotnet + # Needed to install dotnet until sudo apt install -y curl libicu-dev; do logger "apt failed, sleeping 10s then retrying" sleep 10 done +elif type yum > /dev/null 2> /dev/null; then + until yum install -y gdb gdb-gdbserver libunwind awk ca-certificates tar yum-utils shadow-utils cronie procps; do + echo "yum failed. sleep 10s, then retrying" + sleep 10 + done + + # Install updated Microsoft Open Management Infrastructure - github.com/microsoft/omi + yum-config-manager --add-repo=https://packages.microsoft.com/config/rhel/8/prod.repo 2>&1 | logger -s -i -t 'onefuzz-OMI-add-MS-repo' + yum install -y omi 2>&1 | logger -s -i -t 'onefuzz-OMI-install' - logger "downloading dotnet install" - curl --retry 10 -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh 2>&1 | logger -s -i -t 'onefuzz-curl-dotnet-install' - chmod +x dotnet-install.sh - for version in "${DOTNET_VERSIONS[@]}"; do - logger "running dotnet install $version" - /bin/bash ./dotnet-install.sh --channel "$version" --install-dir "$DOTNET_ROOT" 2>&1 | logger -s -i -t 'onefuzz-dotnet-setup' - done - rm dotnet-install.sh - - logger "install dotnet tools" - pushd "$DOTNET_ROOT" - ls -lah 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' - "$DOTNET_ROOT"/dotnet tool install dotnet-dump --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' - "$DOTNET_ROOT"/dotnet tool install dotnet-coverage --version 17.5 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' - "$DOTNET_ROOT"/dotnet tool install dotnet-sos --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' - popd + if ! [ -f ${LLVM_SYMBOLIZER_PATH} ]; then + until yum install -y llvm-12.0.1; do + echo "yum failed, sleeping 10s then retrying" + sleep 10 + done + + # If specifying symbolizer, exe name must be a "known symbolizer". + # Using `llvm-symbolizer` works for clang 8 .. 12. + sudo ln -f -s $(which llvm-symbolizer-12) $LLVM_SYMBOLIZER_PATH + fi fi +# Install dotnet +logger "downloading dotnet install" +curl --retry 10 -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh 2>&1 | logger -s -i -t 'onefuzz-curl-dotnet-install' +chmod +x dotnet-install.sh + +for version in "${DOTNET_VERSIONS[@]}"; do + logger "running dotnet install $version" + /bin/bash ./dotnet-install.sh --channel "$version" --install-dir "$DOTNET_ROOT" 2>&1 | logger -s -i -t 'onefuzz-dotnet-setup' +done +rm dotnet-install.sh + +logger "install dotnet tools" +pushd "$DOTNET_ROOT" +ls -lah 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' +"$DOTNET_ROOT"/dotnet tool install dotnet-dump --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' +"$DOTNET_ROOT"/dotnet tool install dotnet-coverage --version 17.5 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' +"$DOTNET_ROOT"/dotnet tool install dotnet-sos --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' +popd + if [ -v DOCKER_BUILD ]; then echo "building for docker" elif [ -d /etc/systemd/system ]; then From 804b2993b7681e39c2915116a589dd65949b218b Mon Sep 17 00:00:00 2001 From: Noah McGregor Harper <74685766+nharper285@users.noreply.github.com> Date: Fri, 18 Aug 2023 10:01:32 -0700 Subject: [PATCH 06/20] Job Result Round #2 (#3380) * Attempting new background process. * Formatting. * Adding JobResultOperations. * Fixing. * Adding strorage tests. * Still trying. * Adding. * Adding to rest of queue. * Adding arg. * Fixing. * Updating job result. * Adding cargo lock. * Restoring queuetaskheartbeat. * Fixing job format. * Properly naming function. * Adding to program.cs. * Removing uning unneccessary code. * More small fixes. * Adding regular crash results. * Resolving issues. * More fixes. * Debugging crashing input. * Adding * Remove * Fixed. * Handling other cases. * Adding value to data. * Adding values to queue messages. * Adidng values. * Adidng await. * Adding runtimestats. * Fixing runtime_stats. * Updating types. * Fixing types. * Resolve. * Resolve type. * Updading onefuzz-result cargo. * Responding to comments. * More comments. * Removing unused params. * Respond to more comments. * Updating cargo. * Fixing return type. * UPdating keys." * UPdating return type. * Updating JobResult with constructor. * Remove debug logging. * Adding warning. * Making generic into TaskContext. * Adding crash dump. * Updating for crash dumps. * Formatting. * Updating to warn. * Formatting. * Formatting. * Borrowing new client. --- .../ApiService/Functions/QueueJobResult.cs | 60 ++++++++ .../ApiService/OneFuzzTypes/Model.cs | 45 ++++++ src/ApiService/ApiService/Program.cs | 1 + .../ApiService/onefuzzlib/Config.cs | 1 + .../onefuzzlib/JobResultOperations.cs | 125 +++++++++++++++++ .../ApiService/onefuzzlib/OnefuzzContext.cs | 2 + .../IntegrationTests/Fakes/TestContext.cs | 3 + src/agent/Cargo.lock | 16 +++ src/agent/Cargo.toml | 1 + src/agent/onefuzz-agent/src/config.rs | 12 ++ src/agent/onefuzz-result/Cargo.toml | 18 +++ src/agent/onefuzz-result/src/job_result.rs | 129 ++++++++++++++++++ src/agent/onefuzz-result/src/lib.rs | 4 + src/agent/onefuzz-task/Cargo.toml | 1 + src/agent/onefuzz-task/src/local/common.rs | 1 + src/agent/onefuzz-task/src/local/template.rs | 1 + .../src/tasks/analysis/generic.rs | 3 + src/agent/onefuzz-task/src/tasks/config.rs | 20 +++ .../src/tasks/coverage/generic.rs | 19 ++- .../onefuzz-task/src/tasks/fuzz/generator.rs | 7 +- .../src/tasks/fuzz/libfuzzer/common.rs | 49 +++++-- .../onefuzz-task/src/tasks/fuzz/supervisor.rs | 15 +- src/agent/onefuzz-task/src/tasks/heartbeat.rs | 2 +- .../src/tasks/regression/common.rs | 15 +- .../src/tasks/regression/generic.rs | 3 +- .../src/tasks/regression/libfuzzer.rs | 3 +- .../src/tasks/report/crash_report.rs | 45 +++++- .../src/tasks/report/dotnet/generic.rs | 22 +-- .../onefuzz-task/src/tasks/report/generic.rs | 14 +- .../src/tasks/report/libfuzzer_report.rs | 5 + src/agent/onefuzz/Cargo.toml | 1 + src/agent/onefuzz/src/syncdir.rs | 66 ++++++++- .../bicep-templates/storageAccounts.bicep | 2 +- 33 files changed, 671 insertions(+), 40 deletions(-) create mode 100644 src/ApiService/ApiService/Functions/QueueJobResult.cs create mode 100644 src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs create mode 100644 src/agent/onefuzz-result/Cargo.toml create mode 100644 src/agent/onefuzz-result/src/job_result.rs create mode 100644 src/agent/onefuzz-result/src/lib.rs diff --git a/src/ApiService/ApiService/Functions/QueueJobResult.cs b/src/ApiService/ApiService/Functions/QueueJobResult.cs new file mode 100644 index 0000000000..d781a4d1e1 --- /dev/null +++ b/src/ApiService/ApiService/Functions/QueueJobResult.cs @@ -0,0 +1,60 @@ +using System.Text.Json; +using Microsoft.Azure.Functions.Worker; +using Microsoft.Extensions.Logging; +using Microsoft.OneFuzz.Service.OneFuzzLib.Orm; +namespace Microsoft.OneFuzz.Service.Functions; + + +public class QueueJobResult { + private readonly ILogger _log; + private readonly IOnefuzzContext _context; + + public QueueJobResult(ILogger logTracer, IOnefuzzContext context) { + _log = logTracer; + _context = context; + } + + [Function("QueueJobResult")] + public async Async.Task Run([QueueTrigger("job-result", Connection = "AzureWebJobsStorage")] string msg) { + + var _tasks = _context.TaskOperations; + var _jobs = _context.JobOperations; + + _log.LogInformation("job result: {msg}", msg); + var jr = JsonSerializer.Deserialize(msg, EntityConverter.GetJsonSerializerOptions()).EnsureNotNull($"wrong data {msg}"); + + var task = await _tasks.GetByTaskId(jr.TaskId); + if (task == null) { + _log.LogWarning("invalid {TaskId}", jr.TaskId); + return; + } + + var job = await _jobs.Get(task.JobId); + if (job == null) { + _log.LogWarning("invalid {JobId}", task.JobId); + return; + } + + JobResultData? data = jr.Data; + if (data == null) { + _log.LogWarning($"job result data is empty, throwing out: {jr}"); + return; + } + + var jobResultType = data.Type; + _log.LogInformation($"job result data type: {jobResultType}"); + + Dictionary value; + if (jr.Value.Count > 0) { + value = jr.Value; + } else { + _log.LogWarning($"job result data is empty, throwing out: {jr}"); + return; + } + + var jobResult = await _context.JobResultOperations.CreateOrUpdate(job.JobId, jobResultType, value); + if (!jobResult.IsOk) { + _log.LogError("failed to create or update with job result {JobId}", job.JobId); + } + } +} diff --git a/src/ApiService/ApiService/OneFuzzTypes/Model.cs b/src/ApiService/ApiService/OneFuzzTypes/Model.cs index e430c1448c..b839f52ddc 100644 --- a/src/ApiService/ApiService/OneFuzzTypes/Model.cs +++ b/src/ApiService/ApiService/OneFuzzTypes/Model.cs @@ -33,6 +33,19 @@ public enum HeartbeatType { TaskAlive, } +[SkipRename] +public enum JobResultType { + NewCrashingInput, + NoReproCrashingInput, + NewReport, + NewUniqueReport, + NewRegressionReport, + NewCoverage, + NewCrashDump, + CoverageData, + RuntimeStats, +} + public record HeartbeatData(HeartbeatType Type); public record TaskHeartbeatEntry( @@ -41,6 +54,16 @@ public record TaskHeartbeatEntry( Guid MachineId, HeartbeatData[] Data); +public record JobResultData(JobResultType Type); + +public record TaskJobResultEntry( + Guid TaskId, + Guid? JobId, + Guid MachineId, + JobResultData Data, + Dictionary Value + ); + public record NodeHeartbeatEntry(Guid NodeId, HeartbeatData[] Data); public record NodeCommandStopIfFree(); @@ -892,6 +915,27 @@ public record SecretAddress(Uri Url) : ISecret { public record SecretData(ISecret Secret) { } +public record JobResult( + [PartitionKey][RowKey] Guid JobId, + string Project, + string Name, + double NewCrashingInput = 0, + double NoReproCrashingInput = 0, + double NewReport = 0, + double NewUniqueReport = 0, + double NewRegressionReport = 0, + double NewCrashDump = 0, + double InstructionsCovered = 0, + double TotalInstructions = 0, + double CoverageRate = 0, + double IterationCount = 0 +) : EntityBase() { + public JobResult(Guid JobId, string Project, string Name) : this( + JobId: JobId, + Project: Project, + Name: Name, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) { } +} + public record JobConfig( string Project, string Name, @@ -1056,6 +1100,7 @@ public record TaskUnitConfig( string? InstanceTelemetryKey, string? MicrosoftTelemetryKey, Uri HeartbeatQueue, + Uri JobResultQueue, Dictionary Tags ) { public Uri? inputQueue { get; set; } diff --git a/src/ApiService/ApiService/Program.cs b/src/ApiService/ApiService/Program.cs index f425c00809..d5ee30b45e 100644 --- a/src/ApiService/ApiService/Program.cs +++ b/src/ApiService/ApiService/Program.cs @@ -118,6 +118,7 @@ public static async Async.Task Main() { .AddScoped() .AddScoped() .AddScoped() + .AddScoped() .AddScoped() .AddScoped() .AddScoped() diff --git a/src/ApiService/ApiService/onefuzzlib/Config.cs b/src/ApiService/ApiService/onefuzzlib/Config.cs index 71af317348..872cedbc01 100644 --- a/src/ApiService/ApiService/onefuzzlib/Config.cs +++ b/src/ApiService/ApiService/onefuzzlib/Config.cs @@ -71,6 +71,7 @@ private static BlobContainerSasPermissions ConvertPermissions(ContainerPermissio InstanceTelemetryKey: _serviceConfig.ApplicationInsightsInstrumentationKey, MicrosoftTelemetryKey: _serviceConfig.OneFuzzTelemetry, HeartbeatQueue: await _queue.GetQueueSas("task-heartbeat", StorageType.Config, QueueSasPermissions.Add) ?? throw new Exception("unable to get heartbeat queue sas"), + JobResultQueue: await _queue.GetQueueSas("job-result", StorageType.Config, QueueSasPermissions.Add) ?? throw new Exception("unable to get heartbeat queue sas"), Tags: task.Config.Tags ?? new Dictionary() ); diff --git a/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs b/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs new file mode 100644 index 0000000000..6e33890500 --- /dev/null +++ b/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs @@ -0,0 +1,125 @@ +using ApiService.OneFuzzLib.Orm; +using Microsoft.Extensions.Logging; +using Polly; +namespace Microsoft.OneFuzz.Service; + +public interface IJobResultOperations : IOrm { + + Async.Task GetJobResult(Guid jobId); + Async.Task CreateOrUpdate(Guid jobId, JobResultType resultType, Dictionary resultValue); + +} +public class JobResultOperations : Orm, IJobResultOperations { + + public JobResultOperations(ILogger log, IOnefuzzContext context) + : base(log, context) { + } + + public async Async.Task GetJobResult(Guid jobId) { + return await SearchByPartitionKeys(new[] { jobId.ToString() }).SingleOrDefaultAsync(); + } + + private JobResult UpdateResult(JobResult result, JobResultType type, Dictionary resultValue) { + + var newResult = result; + double newValue; + switch (type) { + case JobResultType.NewCrashingInput: + newValue = result.NewCrashingInput + resultValue["count"]; + newResult = result with { NewCrashingInput = newValue }; + break; + case JobResultType.NewReport: + newValue = result.NewReport + resultValue["count"]; + newResult = result with { NewReport = newValue }; + break; + case JobResultType.NewUniqueReport: + newValue = result.NewUniqueReport + resultValue["count"]; + newResult = result with { NewUniqueReport = newValue }; + break; + case JobResultType.NewRegressionReport: + newValue = result.NewRegressionReport + resultValue["count"]; + newResult = result with { NewRegressionReport = newValue }; + break; + case JobResultType.NewCrashDump: + newValue = result.NewCrashDump + resultValue["count"]; + newResult = result with { NewCrashDump = newValue }; + break; + case JobResultType.CoverageData: + double newCovered = resultValue["covered"]; + double newTotalCovered = resultValue["features"]; + double newCoverageRate = resultValue["rate"]; + newResult = result with { InstructionsCovered = newCovered, TotalInstructions = newTotalCovered, CoverageRate = newCoverageRate }; + break; + case JobResultType.RuntimeStats: + double newTotalIterations = resultValue["total_count"]; + newResult = result with { IterationCount = newTotalIterations }; + break; + default: + _logTracer.LogWarning($"Invalid Field {type}."); + break; + } + _logTracer.LogInformation($"Attempting to log new result: {newResult}"); + return newResult; + } + + private async Async.Task TryUpdate(Job job, JobResultType resultType, Dictionary resultValue) { + var jobId = job.JobId; + + var jobResult = await GetJobResult(jobId); + + if (jobResult == null) { + _logTracer.LogInformation("Creating new JobResult for Job {JobId}", jobId); + + var entry = new JobResult(JobId: jobId, Project: job.Config.Project, Name: job.Config.Name); + + jobResult = UpdateResult(entry, resultType, resultValue); + + var r = await Insert(jobResult); + if (!r.IsOk) { + _logTracer.AddHttpStatus(r.ErrorV); + _logTracer.LogError("failed to insert job result {JobId}", jobResult.JobId); + throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}"); + } + _logTracer.LogInformation("created job result {JobId}", jobResult.JobId); + } else { + _logTracer.LogInformation("Updating existing JobResult entry for Job {JobId}", jobId); + + jobResult = UpdateResult(jobResult, resultType, resultValue); + + var r = await Update(jobResult); + if (!r.IsOk) { + _logTracer.AddHttpStatus(r.ErrorV); + _logTracer.LogError("failed to update job result {JobId}", jobResult.JobId); + throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}"); + } + _logTracer.LogInformation("updated job result {JobId}", jobResult.JobId); + } + + return true; + } + + public async Async.Task CreateOrUpdate(Guid jobId, JobResultType resultType, Dictionary resultValue) { + + var job = await _context.JobOperations.Get(jobId); + if (job == null) { + return OneFuzzResultVoid.Error(ErrorCode.INVALID_REQUEST, "invalid job"); + } + + var success = false; + try { + _logTracer.LogInformation("attempt to update job result {JobId}", job.JobId); + var policy = Policy.Handle().WaitAndRetryAsync(50, _ => new TimeSpan(0, 0, 5)); + await policy.ExecuteAsync(async () => { + success = await TryUpdate(job, resultType, resultValue); + _logTracer.LogInformation("attempt {success}", success); + }); + return OneFuzzResultVoid.Ok; + } catch (Exception e) { + return OneFuzzResultVoid.Error(ErrorCode.UNABLE_TO_UPDATE, new string[] { + $"Unexpected failure when attempting to update job result for {job.JobId}", + $"Exception: {e}" + }); + } + } +} + diff --git a/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs b/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs index d877bfddbb..03c6322663 100644 --- a/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs +++ b/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs @@ -19,6 +19,7 @@ public interface IOnefuzzContext { IExtensions Extensions { get; } IIpOperations IpOperations { get; } IJobOperations JobOperations { get; } + IJobResultOperations JobResultOperations { get; } ILogAnalytics LogAnalytics { get; } INodeMessageOperations NodeMessageOperations { get; } INodeOperations NodeOperations { get; } @@ -83,6 +84,7 @@ public OnefuzzContext(IServiceProvider serviceProvider) { public IVmOperations VmOperations => _serviceProvider.GetRequiredService(); public ISecretsOperations SecretsOperations => _serviceProvider.GetRequiredService(); public IJobOperations JobOperations => _serviceProvider.GetRequiredService(); + public IJobResultOperations JobResultOperations => _serviceProvider.GetRequiredService(); public IScheduler Scheduler => _serviceProvider.GetRequiredService(); public IConfig Config => _serviceProvider.GetRequiredService(); public ILogAnalytics LogAnalytics => _serviceProvider.GetRequiredService(); diff --git a/src/ApiService/IntegrationTests/Fakes/TestContext.cs b/src/ApiService/IntegrationTests/Fakes/TestContext.cs index c46ff5fce7..66d121e746 100644 --- a/src/ApiService/IntegrationTests/Fakes/TestContext.cs +++ b/src/ApiService/IntegrationTests/Fakes/TestContext.cs @@ -32,6 +32,7 @@ public TestContext(IHttpClientFactory httpClientFactory, OneFuzzLoggerProvider p TaskOperations = new TaskOperations(provider.CreateLogger(), Cache, this); NodeOperations = new NodeOperations(provider.CreateLogger(), this); JobOperations = new JobOperations(provider.CreateLogger(), this); + JobResultOperations = new JobResultOperations(provider.CreateLogger(), this); NodeTasksOperations = new NodeTasksOperations(provider.CreateLogger(), this); TaskEventOperations = new TaskEventOperations(provider.CreateLogger(), this); NodeMessageOperations = new NodeMessageOperations(provider.CreateLogger(), this); @@ -57,6 +58,7 @@ public Async.Task InsertAll(params EntityBase[] objs) Node n => NodeOperations.Insert(n), Pool p => PoolOperations.Insert(p), Job j => JobOperations.Insert(j), + JobResult jr => JobResultOperations.Insert(jr), Repro r => ReproOperations.Insert(r), Scaleset ss => ScalesetOperations.Insert(ss), NodeTasks nt => NodeTasksOperations.Insert(nt), @@ -84,6 +86,7 @@ public Async.Task InsertAll(params EntityBase[] objs) public ITaskOperations TaskOperations { get; } public IJobOperations JobOperations { get; } + public IJobResultOperations JobResultOperations { get; } public INodeOperations NodeOperations { get; } public INodeTasksOperations NodeTasksOperations { get; } public ITaskEventOperations TaskEventOperations { get; } diff --git a/src/agent/Cargo.lock b/src/agent/Cargo.lock index a1d86e7d25..254684be97 100644 --- a/src/agent/Cargo.lock +++ b/src/agent/Cargo.lock @@ -2123,6 +2123,7 @@ dependencies = [ "log", "nix", "notify", + "onefuzz-result", "onefuzz-telemetry", "pete", "pretty_assertions", @@ -2197,6 +2198,20 @@ dependencies = [ "serde_json", ] +[[package]] +name = "onefuzz-result" +version = "0.2.0" +dependencies = [ + "anyhow", + "async-trait", + "log", + "onefuzz-telemetry", + "reqwest", + "serde", + "storage-queue", + "uuid", +] + [[package]] name = "onefuzz-task" version = "0.2.0" @@ -2226,6 +2241,7 @@ dependencies = [ "num_cpus", "onefuzz", "onefuzz-file-format", + "onefuzz-result", "onefuzz-telemetry", "path-absolutize", "pretty_assertions", diff --git a/src/agent/Cargo.toml b/src/agent/Cargo.toml index 2f4cea41a4..ce01ae880c 100644 --- a/src/agent/Cargo.toml +++ b/src/agent/Cargo.toml @@ -10,6 +10,7 @@ members = [ "onefuzz", "onefuzz-task", "onefuzz-agent", + "onefuzz-result", "onefuzz-file-format", "onefuzz-telemetry", "reqwest-retry", diff --git a/src/agent/onefuzz-agent/src/config.rs b/src/agent/onefuzz-agent/src/config.rs index 87edfb2c1b..fc623e72af 100644 --- a/src/agent/onefuzz-agent/src/config.rs +++ b/src/agent/onefuzz-agent/src/config.rs @@ -34,6 +34,8 @@ pub struct StaticConfig { pub heartbeat_queue: Option, + pub job_result_queue: Option, + pub instance_id: Uuid, #[serde(default = "default_as_true")] @@ -71,6 +73,8 @@ struct RawStaticConfig { pub heartbeat_queue: Option, + pub job_result_queue: Option, + pub instance_id: Uuid, #[serde(default = "default_as_true")] @@ -117,6 +121,7 @@ impl StaticConfig { microsoft_telemetry_key: config.microsoft_telemetry_key, instance_telemetry_key: config.instance_telemetry_key, heartbeat_queue: config.heartbeat_queue, + job_result_queue: config.job_result_queue, instance_id: config.instance_id, managed: config.managed, machine_identity, @@ -152,6 +157,12 @@ impl StaticConfig { None }; + let job_result_queue = if let Ok(key) = std::env::var("ONEFUZZ_JOB_RESULT") { + Some(Url::parse(&key)?) + } else { + None + }; + let instance_telemetry_key = if let Ok(key) = std::env::var("ONEFUZZ_INSTANCE_TELEMETRY_KEY") { Some(InstanceTelemetryKey::new(Uuid::parse_str(&key)?)) @@ -183,6 +194,7 @@ impl StaticConfig { instance_telemetry_key, microsoft_telemetry_key, heartbeat_queue, + job_result_queue, instance_id, managed: !is_unmanaged, machine_identity, diff --git a/src/agent/onefuzz-result/Cargo.toml b/src/agent/onefuzz-result/Cargo.toml new file mode 100644 index 0000000000..7c7de6615c --- /dev/null +++ b/src/agent/onefuzz-result/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "onefuzz-result" +version = "0.2.0" +authors = ["fuzzing@microsoft.com"] +edition = "2021" +publish = false +license = "MIT" + +[dependencies] +anyhow = { version = "1.0", features = ["backtrace"] } +async-trait = "0.1" +reqwest = "0.11" +serde = "1.0" +storage-queue = { path = "../storage-queue" } +uuid = { version = "1.4", features = ["serde", "v4"] } +onefuzz-telemetry = { path = "../onefuzz-telemetry" } +log = "0.4" + diff --git a/src/agent/onefuzz-result/src/job_result.rs b/src/agent/onefuzz-result/src/job_result.rs new file mode 100644 index 0000000000..b305eca2cb --- /dev/null +++ b/src/agent/onefuzz-result/src/job_result.rs @@ -0,0 +1,129 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use anyhow::Result; +use async_trait::async_trait; +use onefuzz_telemetry::warn; +use reqwest::Url; +use serde::{self, Deserialize, Serialize}; +use std::collections::HashMap; +use std::sync::Arc; +use storage_queue::QueueClient; +use uuid::Uuid; + +#[derive(Debug, Deserialize, Serialize, Hash, Eq, PartialEq, Clone)] +#[serde(tag = "type")] +pub enum JobResultData { + NewCrashingInput, + NoReproCrashingInput, + NewReport, + NewUniqueReport, + NewRegressionReport, + NewCoverage, + NewCrashDump, + CoverageData, + RuntimeStats, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +struct JobResult { + task_id: Uuid, + job_id: Uuid, + machine_id: Uuid, + machine_name: String, + data: JobResultData, + value: HashMap, +} + +#[derive(Clone)] +pub struct TaskContext { + task_id: Uuid, + job_id: Uuid, + machine_id: Uuid, + machine_name: String, +} + +pub struct JobResultContext { + pub state: TaskContext, + pub queue_client: QueueClient, +} + +pub struct JobResultClient { + pub context: Arc>, +} + +impl JobResultClient { + pub fn init_job_result( + context: TaskContext, + queue_url: Url, + ) -> Result> + where + TaskContext: Send + Sync + 'static, + { + let context = Arc::new(JobResultContext { + state: context, + queue_client: QueueClient::new(queue_url)?, + }); + + Ok(JobResultClient { context }) + } +} + +pub type TaskJobResultClient = JobResultClient; + +pub async fn init_job_result( + queue_url: Url, + task_id: Uuid, + job_id: Uuid, + machine_id: Uuid, + machine_name: String, +) -> Result { + let hb = JobResultClient::init_job_result( + TaskContext { + task_id, + job_id, + machine_id, + machine_name, + }, + queue_url, + )?; + Ok(hb) +} + +#[async_trait] +pub trait JobResultSender { + async fn send_direct(&self, data: JobResultData, value: HashMap); +} + +#[async_trait] +impl JobResultSender for TaskJobResultClient { + async fn send_direct(&self, data: JobResultData, value: HashMap) { + let task_id = self.context.state.task_id; + let job_id = self.context.state.job_id; + let machine_id = self.context.state.machine_id; + let machine_name = self.context.state.machine_name.clone(); + + let _ = self + .context + .queue_client + .enqueue(JobResult { + task_id, + job_id, + machine_id, + machine_name, + data, + value, + }) + .await; + } +} + +#[async_trait] +impl JobResultSender for Option { + async fn send_direct(&self, data: JobResultData, value: HashMap) { + match self { + Some(client) => client.send_direct(data, value).await, + None => warn!("Failed to send Job Result message data from agent."), + } + } +} diff --git a/src/agent/onefuzz-result/src/lib.rs b/src/agent/onefuzz-result/src/lib.rs new file mode 100644 index 0000000000..dae666ca9a --- /dev/null +++ b/src/agent/onefuzz-result/src/lib.rs @@ -0,0 +1,4 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +pub mod job_result; diff --git a/src/agent/onefuzz-task/Cargo.toml b/src/agent/onefuzz-task/Cargo.toml index 0ad2f9aa4f..4e0bd381b0 100644 --- a/src/agent/onefuzz-task/Cargo.toml +++ b/src/agent/onefuzz-task/Cargo.toml @@ -39,6 +39,7 @@ serde_json = "1.0" serde_yaml = "0.9.21" onefuzz = { path = "../onefuzz" } onefuzz-telemetry = { path = "../onefuzz-telemetry" } +onefuzz-result = { path = "../onefuzz-result" } path-absolutize = "3.1" reqwest-retry = { path = "../reqwest-retry" } strum = "0.25" diff --git a/src/agent/onefuzz-task/src/local/common.rs b/src/agent/onefuzz-task/src/local/common.rs index f8d7949e80..ce31c7401b 100644 --- a/src/agent/onefuzz-task/src/local/common.rs +++ b/src/agent/onefuzz-task/src/local/common.rs @@ -265,6 +265,7 @@ pub async fn build_local_context( }, instance_telemetry_key: None, heartbeat_queue: None, + job_result_queue: None, microsoft_telemetry_key: None, logs: None, min_available_memory_mb: 0, diff --git a/src/agent/onefuzz-task/src/local/template.rs b/src/agent/onefuzz-task/src/local/template.rs index b2e0c425ff..f85bcf627a 100644 --- a/src/agent/onefuzz-task/src/local/template.rs +++ b/src/agent/onefuzz-task/src/local/template.rs @@ -196,6 +196,7 @@ pub async fn launch( job_id: Uuid::new_v4(), instance_id: Uuid::new_v4(), heartbeat_queue: None, + job_result_queue: None, instance_telemetry_key: None, microsoft_telemetry_key: None, logs: None, diff --git a/src/agent/onefuzz-task/src/tasks/analysis/generic.rs b/src/agent/onefuzz-task/src/tasks/analysis/generic.rs index 3ba068a614..d89ab30bd3 100644 --- a/src/agent/onefuzz-task/src/tasks/analysis/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/analysis/generic.rs @@ -65,6 +65,8 @@ pub async fn run(config: Config) -> Result<()> { tools.init_pull().await?; } + let job_result_client = config.common.init_job_result().await?; + // the tempdir is always created, however, the reports_path and // reports_monitor_future are only created if we have one of the three // report SyncedDir. The idea is that the option for where to write reports @@ -88,6 +90,7 @@ pub async fn run(config: Config) -> Result<()> { &config.unique_reports, &config.reports, &config.no_repro, + &job_result_client, ); ( Some(reports_dir.path().to_path_buf()), diff --git a/src/agent/onefuzz-task/src/tasks/config.rs b/src/agent/onefuzz-task/src/tasks/config.rs index 0848379d73..e29e0fd60d 100644 --- a/src/agent/onefuzz-task/src/tasks/config.rs +++ b/src/agent/onefuzz-task/src/tasks/config.rs @@ -14,6 +14,7 @@ use onefuzz::{ machine_id::MachineIdentity, syncdir::{SyncOperation, SyncedDir}, }; +use onefuzz_result::job_result::{init_job_result, TaskJobResultClient}; use onefuzz_telemetry::{ self as telemetry, Event::task_start, EventData, InstanceTelemetryKey, MicrosoftTelemetryKey, Role, @@ -50,6 +51,8 @@ pub struct CommonConfig { pub heartbeat_queue: Option, + pub job_result_queue: Option, + pub instance_telemetry_key: Option, pub microsoft_telemetry_key: Option, @@ -103,6 +106,23 @@ impl CommonConfig { None => Ok(None), } } + + pub async fn init_job_result(&self) -> Result> { + match &self.job_result_queue { + Some(url) => { + let result = init_job_result( + url.clone(), + self.task_id, + self.job_id, + self.machine_identity.machine_id, + self.machine_identity.machine_name.clone(), + ) + .await?; + Ok(Some(result)) + } + None => Ok(None), + } + } } #[derive(Debug, Deserialize)] diff --git a/src/agent/onefuzz-task/src/tasks/coverage/generic.rs b/src/agent/onefuzz-task/src/tasks/coverage/generic.rs index b112cfefbe..4fde9efb31 100644 --- a/src/agent/onefuzz-task/src/tasks/coverage/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/coverage/generic.rs @@ -26,6 +26,8 @@ use onefuzz_file_format::coverage::{ binary::{v1::BinaryCoverageJson as BinaryCoverageJsonV1, BinaryCoverageJson}, source::{v1::SourceCoverageJson as SourceCoverageJsonV1, SourceCoverageJson}, }; +use onefuzz_result::job_result::JobResultData; +use onefuzz_result::job_result::{JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{event, warn, Event::coverage_data, Event::coverage_failed, EventData}; use storage_queue::{Message, QueueClient}; use tokio::fs; @@ -114,7 +116,7 @@ impl CoverageTask { let allowlist = self.load_target_allowlist().await?; let heartbeat = self.config.common.init_heartbeat(None).await?; - + let job_result = self.config.common.init_job_result().await?; let mut seen_inputs = false; let target_exe_path = @@ -129,6 +131,7 @@ impl CoverageTask { coverage, allowlist, heartbeat, + job_result, target_exe.to_string(), )?; @@ -219,6 +222,7 @@ struct TaskContext<'a> { module_allowlist: AllowList, source_allowlist: Arc, heartbeat: Option, + job_result: Option, cache: Arc, } @@ -228,6 +232,7 @@ impl<'a> TaskContext<'a> { coverage: BinaryCoverage, allowlist: TargetAllowList, heartbeat: Option, + job_result: Option, target_exe: String, ) -> Result { let cache = DebugInfoCache::new(allowlist.source_files.clone()); @@ -247,6 +252,7 @@ impl<'a> TaskContext<'a> { module_allowlist: allowlist.modules, source_allowlist: Arc::new(allowlist.source_files), heartbeat, + job_result, cache: Arc::new(cache), }) } @@ -455,7 +461,16 @@ impl<'a> TaskContext<'a> { let s = CoverageStats::new(&coverage); event!(coverage_data; Covered = s.covered, Features = s.features, Rate = s.rate); metric!(coverage_data; 1.0; Covered = s.covered, Features = s.features, Rate = s.rate); - + self.job_result + .send_direct( + JobResultData::CoverageData, + HashMap::from([ + ("covered".to_string(), s.covered as f64), + ("features".to_string(), s.features as f64), + ("rate".to_string(), s.rate), + ]), + ) + .await; Ok(()) } diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs b/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs index d9116a1ed2..bd7511cac2 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs @@ -73,6 +73,7 @@ impl GeneratorTask { } let hb_client = self.config.common.init_heartbeat(None).await?; + let jr_client = self.config.common.init_job_result().await?; for dir in &self.config.readonly_inputs { dir.init_pull().await?; @@ -84,7 +85,10 @@ impl GeneratorTask { self.config.ensemble_sync_delay, ); - let crash_dir_monitor = self.config.crashes.monitor_results(new_result, false); + let crash_dir_monitor = self + .config + .crashes + .monitor_results(new_result, false, &jr_client); let fuzzer = self.fuzzing_loop(hb_client); @@ -298,6 +302,7 @@ mod tests { task_id: Default::default(), instance_id: Default::default(), heartbeat_queue: Default::default(), + job_result_queue: Default::default(), instance_telemetry_key: Default::default(), microsoft_telemetry_key: Default::default(), logs: Default::default(), diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs b/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs index 4f8c67ae8e..bfd9f3f5cc 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs @@ -1,7 +1,11 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::tasks::{config::CommonConfig, heartbeat::HeartbeatSender, utils::default_bool_true}; +use crate::tasks::{ + config::CommonConfig, + heartbeat::{HeartbeatSender, TaskHeartbeatClient}, + utils::default_bool_true, +}; use anyhow::{Context, Result}; use arraydeque::{ArrayDeque, Wrapping}; use async_trait::async_trait; @@ -12,6 +16,7 @@ use onefuzz::{ process::ExitStatus, syncdir::{continuous_sync, SyncOperation::Pull, SyncedDir}, }; +use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{ Event::{new_coverage, new_crashdump, new_result, runtime_stats}, EventData, @@ -126,21 +131,31 @@ where self.verify().await?; let hb_client = self.config.common.init_heartbeat(None).await?; + let jr_client = self.config.common.init_job_result().await?; // To be scheduled. let resync = self.continuous_sync_inputs(); - let new_inputs = self.config.inputs.monitor_results(new_coverage, true); - let new_crashes = self.config.crashes.monitor_results(new_result, true); + + let new_inputs = self + .config + .inputs + .monitor_results(new_coverage, true, &jr_client); + let new_crashes = self + .config + .crashes + .monitor_results(new_result, true, &jr_client); let new_crashdumps = async { if let Some(crashdumps) = &self.config.crashdumps { - crashdumps.monitor_results(new_crashdump, true).await + crashdumps + .monitor_results(new_crashdump, true, &jr_client) + .await } else { Ok(()) } }; let (stats_sender, stats_receiver) = mpsc::unbounded_channel(); - let report_stats = report_runtime_stats(stats_receiver, hb_client); + let report_stats = report_runtime_stats(stats_receiver, &hb_client, &jr_client); let fuzzers = self.run_fuzzers(Some(&stats_sender)); futures::try_join!( resync, @@ -183,7 +198,7 @@ where .inputs .local_path .parent() - .ok_or_else(|| anyhow!("Invalid input path"))?; + .ok_or_else(|| anyhow!("invalid input path"))?; let temp_path = task_dir.join(".temp"); tokio::fs::create_dir_all(&temp_path).await?; let temp_dir = tempdir_in(temp_path)?; @@ -501,7 +516,7 @@ impl TotalStats { self.execs_sec = self.worker_stats.values().map(|x| x.execs_sec).sum(); } - fn report(&self) { + async fn report(&self, jr_client: &Option) { event!( runtime_stats; EventData::Count = self.count, @@ -513,6 +528,17 @@ impl TotalStats { EventData::Count = self.count, EventData::ExecsSecond = self.execs_sec ); + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::RuntimeStats, + HashMap::from([ + ("total_count".to_string(), self.count as f64), + ("execs_sec".to_string(), self.execs_sec), + ]), + ) + .await; + } } } @@ -542,7 +568,8 @@ impl Timer { // are approximating nearest-neighbor interpolation on the runtime stats time series. async fn report_runtime_stats( mut stats_channel: mpsc::UnboundedReceiver, - heartbeat_client: impl HeartbeatSender, + heartbeat_client: &Option, + jr_client: &Option, ) -> Result<()> { // Cache the last-reported stats for a given worker. // @@ -551,7 +578,7 @@ async fn report_runtime_stats( let mut total = TotalStats::default(); // report all zeros to start - total.report(); + total.report(jr_client).await; let timer = Timer::new(RUNTIME_STATS_PERIOD); @@ -560,10 +587,10 @@ async fn report_runtime_stats( Some(stats) = stats_channel.recv() => { heartbeat_client.alive(); total.update(stats); - total.report() + total.report(jr_client).await } _ = timer.wait() => { - total.report() + total.report(jr_client).await } } } diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs b/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs index de1e1106ba..3f00e20b8d 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs @@ -79,7 +79,10 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { remote_path: config.crashes.remote_path.clone(), }; crashes.init().await?; - let monitor_crashes = crashes.monitor_results(new_result, false); + + let jr_client = config.common.init_job_result().await?; + + let monitor_crashes = crashes.monitor_results(new_result, false, &jr_client); // setup crashdumps let (crashdump_dir, monitor_crashdumps) = { @@ -95,9 +98,12 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { }; let monitor_dir = crashdump_dir.clone(); + let monitor_jr_client = config.common.init_job_result().await?; let monitor_crashdumps = async move { if let Some(crashdumps) = monitor_dir { - crashdumps.monitor_results(new_crashdump, false).await + crashdumps + .monitor_results(new_crashdump, false, &monitor_jr_client) + .await } else { Ok(()) } @@ -129,11 +135,13 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { if let Some(no_repro) = &config.no_repro { no_repro.init().await?; } + let monitor_reports_future = monitor_reports( reports_dir.path(), &config.unique_reports, &config.reports, &config.no_repro, + &jr_client, ); let inputs = SyncedDir { @@ -156,7 +164,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { delay_with_jitter(delay).await; } } - let monitor_inputs = inputs.monitor_results(new_coverage, false); + let monitor_inputs = inputs.monitor_results(new_coverage, false, &jr_client); let inputs_sync_cancellation = CancellationToken::new(); // never actually cancelled let inputs_sync_task = inputs.continuous_sync(Pull, config.ensemble_sync_delay, &inputs_sync_cancellation); @@ -444,6 +452,7 @@ mod tests { task_id: Default::default(), instance_id: Default::default(), heartbeat_queue: Default::default(), + job_result_queue: Default::default(), instance_telemetry_key: Default::default(), microsoft_telemetry_key: Default::default(), logs: Default::default(), diff --git a/src/agent/onefuzz-task/src/tasks/heartbeat.rs b/src/agent/onefuzz-task/src/tasks/heartbeat.rs index 515fa39d0c..e13b661909 100644 --- a/src/agent/onefuzz-task/src/tasks/heartbeat.rs +++ b/src/agent/onefuzz-task/src/tasks/heartbeat.rs @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::onefuzz::heartbeat::HeartbeatClient; use anyhow::Result; +use onefuzz::heartbeat::HeartbeatClient; use reqwest::Url; use serde::{self, Deserialize, Serialize}; use std::time::Duration; diff --git a/src/agent/onefuzz-task/src/tasks/regression/common.rs b/src/agent/onefuzz-task/src/tasks/regression/common.rs index 60023cfa6e..b61a97df4c 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/common.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/common.rs @@ -2,12 +2,14 @@ // Licensed under the MIT License. use crate::tasks::{ + config::CommonConfig, heartbeat::{HeartbeatSender, TaskHeartbeatClient}, report::crash_report::{parse_report_file, CrashTestResult, RegressionReport}, }; use anyhow::{Context, Result}; use async_trait::async_trait; use onefuzz::syncdir::SyncedDir; +use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use std::path::PathBuf; @@ -24,7 +26,7 @@ pub trait RegressionHandler { /// Runs the regression task pub async fn run( - heartbeat_client: Option, + common_config: &CommonConfig, regression_reports: &SyncedDir, crashes: &SyncedDir, report_dirs: &[&SyncedDir], @@ -35,6 +37,9 @@ pub async fn run( info!("starting regression task"); regression_reports.init().await?; + let heartbeat_client = common_config.init_heartbeat(None).await?; + let job_result_client = common_config.init_job_result().await?; + handle_crash_reports( handler, crashes, @@ -42,6 +47,7 @@ pub async fn run( report_list, regression_reports, &heartbeat_client, + &job_result_client, ) .await .context("handling crash reports")?; @@ -52,6 +58,7 @@ pub async fn run( readonly_inputs, regression_reports, &heartbeat_client, + &job_result_client, ) .await .context("handling inputs")?; @@ -71,6 +78,7 @@ pub async fn handle_inputs( readonly_inputs: &SyncedDir, regression_reports: &SyncedDir, heartbeat_client: &Option, + job_result_client: &Option, ) -> Result<()> { readonly_inputs.init_pull().await?; let mut input_files = tokio::fs::read_dir(&readonly_inputs.local_path).await?; @@ -95,7 +103,7 @@ pub async fn handle_inputs( crash_test_result, original_crash_test_result: None, } - .save(None, regression_reports) + .save(None, regression_reports, job_result_client) .await? } @@ -109,6 +117,7 @@ pub async fn handle_crash_reports( report_list: &Option>, regression_reports: &SyncedDir, heartbeat_client: &Option, + job_result_client: &Option, ) -> Result<()> { // without crash report containers, skip this method if report_dirs.is_empty() { @@ -158,7 +167,7 @@ pub async fn handle_crash_reports( crash_test_result, original_crash_test_result: Some(original_crash_test_result), } - .save(Some(file_name), regression_reports) + .save(Some(file_name), regression_reports, job_result_client) .await? } } diff --git a/src/agent/onefuzz-task/src/tasks/regression/generic.rs b/src/agent/onefuzz-task/src/tasks/regression/generic.rs index 640e80db9a..8570208d59 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/generic.rs @@ -89,7 +89,6 @@ impl GenericRegressionTask { pub async fn run(&self) -> Result<()> { info!("Starting generic regression task"); - let heartbeat_client = self.config.common.init_heartbeat(None).await?; let mut report_dirs = vec![]; for dir in vec![ @@ -103,7 +102,7 @@ impl GenericRegressionTask { report_dirs.push(dir); } common::run( - heartbeat_client, + &self.config.common, &self.config.regression_reports, &self.config.crashes, &report_dirs, diff --git a/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs b/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs index 06dd7c00d9..e65f46bb64 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs @@ -103,9 +103,8 @@ impl LibFuzzerRegressionTask { report_dirs.push(dir); } - let heartbeat_client = self.config.common.init_heartbeat(None).await?; common::run( - heartbeat_client, + &self.config.common, &self.config.regression_reports, &self.config.crashes, &report_dirs, diff --git a/src/agent/onefuzz-task/src/tasks/report/crash_report.rs b/src/agent/onefuzz-task/src/tasks/report/crash_report.rs index 23171bc432..290b98ccde 100644 --- a/src/agent/onefuzz-task/src/tasks/report/crash_report.rs +++ b/src/agent/onefuzz-task/src/tasks/report/crash_report.rs @@ -3,6 +3,7 @@ use anyhow::{Context, Result}; use onefuzz::{blob::BlobUrl, monitor::DirectoryMonitor, syncdir::SyncedDir}; +use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{ Event::{ new_report, new_unable_to_reproduce, new_unique_report, regression_report, @@ -12,6 +13,7 @@ use onefuzz_telemetry::{ }; use serde::{Deserialize, Serialize}; use stacktrace_parser::CrashLog; +use std::collections::HashMap; use std::path::{Path, PathBuf}; use uuid::Uuid; @@ -111,6 +113,7 @@ impl RegressionReport { self, report_name: Option, regression_reports: &SyncedDir, + jr_client: &Option, ) -> Result<()> { let (event, name) = match &self.crash_test_result { CrashTestResult::CrashReport(report) => { @@ -126,6 +129,15 @@ impl RegressionReport { if upload_or_save_local(&self, &name, regression_reports).await? { event!(event; EventData::Path = name.clone()); metric!(event; 1.0; EventData::Path = name.clone()); + + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::NewRegressionReport, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } } Ok(()) } @@ -149,6 +161,7 @@ impl CrashTestResult { unique_reports: &Option, reports: &Option, no_repro: &Option, + jr_client: &Option, ) -> Result<()> { match self { Self::CrashReport(report) => { @@ -158,6 +171,15 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, unique_reports).await? { event!(new_unique_report; EventData::Path = report.unique_blob_name()); metric!(new_unique_report; 1.0; EventData::Path = report.unique_blob_name()); + + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::NewUniqueReport, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } } } @@ -166,6 +188,15 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, reports).await? { event!(new_report; EventData::Path = report.blob_name()); metric!(new_report; 1.0; EventData::Path = report.blob_name()); + + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::NewReport, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } } } } @@ -176,6 +207,15 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, no_repro).await? { event!(new_unable_to_reproduce; EventData::Path = report.blob_name()); metric!(new_unable_to_reproduce; 1.0; EventData::Path = report.blob_name()); + + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::NoReproCrashingInput, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } } } } @@ -324,6 +364,7 @@ pub async fn monitor_reports( unique_reports: &Option, reports: &Option, no_crash: &Option, + jr_client: &Option, ) -> Result<()> { if unique_reports.is_none() && reports.is_none() && no_crash.is_none() { debug!("no report directories configured"); @@ -334,7 +375,9 @@ pub async fn monitor_reports( while let Some(file) = monitor.next_file().await? { let result = parse_report_file(file).await?; - result.save(unique_reports, reports, no_crash).await?; + result + .save(unique_reports, reports, no_crash, jr_client) + .await?; } Ok(()) diff --git a/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs b/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs index 9b626a7d89..b8659845de 100644 --- a/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs @@ -8,25 +8,25 @@ use std::{ sync::Arc, }; +use crate::tasks::report::crash_report::*; +use crate::tasks::report::dotnet::common::collect_exception_info; +use crate::tasks::{ + config::CommonConfig, + generic::input_poller::*, + heartbeat::{HeartbeatSender, TaskHeartbeatClient}, + utils::{default_bool_true, try_resolve_setup_relative_path}, +}; use anyhow::{Context, Result}; use async_trait::async_trait; use onefuzz::expand::Expand; use onefuzz::fs::set_executable; use onefuzz::{blob::BlobUrl, sha256, syncdir::SyncedDir}; +use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use storage_queue::{Message, QueueClient}; use tokio::fs; -use crate::tasks::report::crash_report::*; -use crate::tasks::report::dotnet::common::collect_exception_info; -use crate::tasks::{ - config::CommonConfig, - generic::input_poller::*, - heartbeat::{HeartbeatSender, TaskHeartbeatClient}, - utils::{default_bool_true, try_resolve_setup_relative_path}, -}; - const DOTNET_DUMP_TOOL_NAME: &str = "dotnet-dump"; #[derive(Debug, Deserialize)] @@ -114,15 +114,18 @@ impl DotnetCrashReportTask { pub struct AsanProcessor { config: Arc, heartbeat_client: Option, + job_result_client: Option, } impl AsanProcessor { pub async fn new(config: Arc) -> Result { let heartbeat_client = config.common.init_heartbeat(None).await?; + let job_result_client = config.common.init_job_result().await?; Ok(Self { config, heartbeat_client, + job_result_client, }) } @@ -260,6 +263,7 @@ impl Processor for AsanProcessor { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, + &self.job_result_client, ) .await; diff --git a/src/agent/onefuzz-task/src/tasks/report/generic.rs b/src/agent/onefuzz-task/src/tasks/report/generic.rs index 9088f98acc..8ad259f0a5 100644 --- a/src/agent/onefuzz-task/src/tasks/report/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/report/generic.rs @@ -13,6 +13,7 @@ use async_trait::async_trait; use onefuzz::{ blob::BlobUrl, input_tester::Tester, machine_id::MachineIdentity, sha256, syncdir::SyncedDir, }; +use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use std::{ @@ -73,7 +74,9 @@ impl ReportTask { pub async fn managed_run(&mut self) -> Result<()> { info!("Starting generic crash report task"); let heartbeat_client = self.config.common.init_heartbeat(None).await?; - let mut processor = GenericReportProcessor::new(&self.config, heartbeat_client); + let job_result_client = self.config.common.init_job_result().await?; + let mut processor = + GenericReportProcessor::new(&self.config, heartbeat_client, job_result_client); #[allow(clippy::manual_flatten)] for entry in [ @@ -183,13 +186,19 @@ pub async fn test_input(args: TestInputArgs<'_>) -> Result { pub struct GenericReportProcessor<'a> { config: &'a Config, heartbeat_client: Option, + job_result_client: Option, } impl<'a> GenericReportProcessor<'a> { - pub fn new(config: &'a Config, heartbeat_client: Option) -> Self { + pub fn new( + config: &'a Config, + heartbeat_client: Option, + job_result_client: Option, + ) -> Self { Self { config, heartbeat_client, + job_result_client, } } @@ -239,6 +248,7 @@ impl<'a> Processor for GenericReportProcessor<'a> { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, + &self.job_result_client, ) .await .context("saving report failed") diff --git a/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs b/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs index f18f638fa3..587ed2e3dc 100644 --- a/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs +++ b/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs @@ -13,6 +13,7 @@ use async_trait::async_trait; use onefuzz::{ blob::BlobUrl, libfuzzer::LibFuzzer, machine_id::MachineIdentity, sha256, syncdir::SyncedDir, }; +use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use std::{ @@ -196,15 +197,18 @@ pub async fn test_input(args: TestInputArgs<'_>) -> Result { pub struct AsanProcessor { config: Arc, heartbeat_client: Option, + job_result_client: Option, } impl AsanProcessor { pub async fn new(config: Arc) -> Result { let heartbeat_client = config.common.init_heartbeat(None).await?; + let job_result_client = config.common.init_job_result().await?; Ok(Self { config, heartbeat_client, + job_result_client, }) } @@ -257,6 +261,7 @@ impl Processor for AsanProcessor { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, + &self.job_result_client, ) .await } diff --git a/src/agent/onefuzz/Cargo.toml b/src/agent/onefuzz/Cargo.toml index c096c8ddfc..1f3c27985c 100644 --- a/src/agent/onefuzz/Cargo.toml +++ b/src/agent/onefuzz/Cargo.toml @@ -44,6 +44,7 @@ tempfile = "3.7.0" process_control = "4.0" reqwest-retry = { path = "../reqwest-retry" } onefuzz-telemetry = { path = "../onefuzz-telemetry" } +onefuzz-result = { path = "../onefuzz-result" } stacktrace-parser = { path = "../stacktrace-parser" } backoff = { version = "0.4", features = ["tokio"] } diff --git a/src/agent/onefuzz/src/syncdir.rs b/src/agent/onefuzz/src/syncdir.rs index 0252099561..2e73b7a694 100644 --- a/src/agent/onefuzz/src/syncdir.rs +++ b/src/agent/onefuzz/src/syncdir.rs @@ -11,10 +11,12 @@ use crate::{ }; use anyhow::{Context, Result}; use dunce::canonicalize; +use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{Event, EventData}; use reqwest::{StatusCode, Url}; use reqwest_retry::{RetryCheck, SendRetry, DEFAULT_RETRY_PERIOD, MAX_RETRY_ATTEMPTS}; use serde::{Deserialize, Serialize}; +use std::collections::HashMap; use std::{env::current_dir, path::PathBuf, str, time::Duration}; use tokio::{fs, select}; use tokio_util::sync::CancellationToken; @@ -241,6 +243,7 @@ impl SyncedDir { url: BlobContainerUrl, event: Event, ignore_dotfiles: bool, + jr_client: &Option, ) -> Result<()> { debug!("monitoring {}", path.display()); @@ -265,9 +268,39 @@ impl SyncedDir { if ignore_dotfiles && file_name_event_str.starts_with('.') { continue; } - event!(event.clone(); EventData::Path = file_name_event_str); metric!(event.clone(); 1.0; EventData::Path = file_name_str_metric_str); + if let Some(jr_client) = jr_client { + match event { + Event::new_result => { + jr_client + .send_direct( + JobResultData::NewCrashingInput, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } + Event::new_coverage => { + jr_client + .send_direct( + JobResultData::CoverageData, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } + Event::new_crashdump => { + jr_client + .send_direct( + JobResultData::NewCrashDump, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } + _ => { + warn!("Unhandled job result!"); + } + } + } let destination = path.join(file_name); if let Err(err) = fs::copy(&item, &destination).await { let error_message = format!( @@ -305,6 +338,29 @@ impl SyncedDir { event!(event.clone(); EventData::Path = file_name_event_str); metric!(event.clone(); 1.0; EventData::Path = file_name_str_metric_str); + if let Some(jr_client) = jr_client { + match event { + Event::new_result => { + jr_client + .send_direct( + JobResultData::NewCrashingInput, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } + Event::new_coverage => { + jr_client + .send_direct( + JobResultData::CoverageData, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } + _ => { + warn!("Unhandled job result!"); + } + } + } if let Err(err) = uploader.upload(item.clone()).await { let error_message = format!( "Couldn't upload file. path:{} dir:{} err:{:?}", @@ -336,7 +392,12 @@ impl SyncedDir { /// The intent of this is to support use cases where we usually want a directory /// to be initialized, but a user-supplied binary, (such as AFL) logically owns /// a directory, and may reset it. - pub async fn monitor_results(&self, event: Event, ignore_dotfiles: bool) -> Result<()> { + pub async fn monitor_results( + &self, + event: Event, + ignore_dotfiles: bool, + job_result_client: &Option, + ) -> Result<()> { if let Some(url) = self.remote_path.clone() { loop { debug!("waiting to monitor {}", self.local_path.display()); @@ -355,6 +416,7 @@ impl SyncedDir { url.clone(), event.clone(), ignore_dotfiles, + job_result_client, ) .await?; } diff --git a/src/deployment/bicep-templates/storageAccounts.bicep b/src/deployment/bicep-templates/storageAccounts.bicep index 6a96cea6a0..27f2da21d8 100644 --- a/src/deployment/bicep-templates/storageAccounts.bicep +++ b/src/deployment/bicep-templates/storageAccounts.bicep @@ -33,7 +33,7 @@ var storageAccountFuncQueuesParams = [ 'update-queue' 'webhooks' 'signalr-events' - 'custom-metrics' + 'job-result' ] var fileChangesQueueIndex = 0 From c0c5b8324fd1b9b57ff3f4e0576c7be2c0af774b Mon Sep 17 00:00:00 2001 From: Adam <103067949+AdamL-Microsoft@users.noreply.github.com> Date: Fri, 18 Aug 2023 13:11:26 -0700 Subject: [PATCH 07/20] Release 8.7.0 (#3403) * Release-8.7.0 --- CHANGELOG.md | 23 +++++++++++++++++++++++ CURRENT_VERSION | 2 +- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5082cde88b..be4779ad77 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,29 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 8.7.0 + +### Added + +* Agent: Added a snapshot-based test to coverage implementation [#3368](https://github.com/microsoft/onefuzz/pull/3368) +* Agent/CLI/Service: Added ability to capture crash dumps from libfuzzer, when provided [#2793](https://github.com/microsoft/onefuzz/pull/2793) [#3409](https://github.com/microsoft/onefuzz/pull/3409) +* CLI/Service: Implemented `--with_tasks ` option for `onefuzz jobs get` command to expand the task information [#3343](https://github.com/microsoft/onefuzz/pull/3343) + +### Changed + +* Agent: Migrated all the task types to the template model [#3397](https://github.com/microsoft/onefuzz/pull/3307) +* Agent: Removed `srcview` code from OneFuzz since it is not currently utilized [#3376](https://github.com/microsoft/onefuzz/pull/3376) +* Agent: Updated default windows VM image to windows 11 [#3374](https://github.com/microsoft/onefuzz/pull/3374) +* Agent: Migrated `winapi` to `windows-rs`, the newer Microsoft supported version of the Windows API bindings for Rust [#3050](https://github.com/microsoft/onefuzz/pull/3050) +* Deployment: Updated the default deployment option for `EnableWorkItemCreation` feature flag to be enabled [#3387](https://github.com/microsoft/onefuzz/pull/3387) + +### Fixed + +* Agent: Deserialize the coverage files directly into the output files [#3410](https://github.com/microsoft/onefuzz/pull/3410) +* Agent/Deployment/Service: Bumped several C#, Python, and Rust dependencies as well as the Rust edition across all Rust crates [#3396](https://github.com/microsoft/onefuzz/pull/3396), [#3161](https://github.com/microsoft/onefuzz/pull/3161), [#3346](https://github.com/microsoft/onefuzz/pull/3346), [#3391](https://github.com/microsoft/onefuzz/pull/3391), [#2870](https://github.com/microsoft/onefuzz/pull/2870), [#3392](https://github.com/microsoft/onefuzz/pull/3392), [#3402](https://github.com/microsoft/onefuzz/pull/3402) +* Agent: Fixed a bug in agent `DirectoryMonitor` by adding error tolerance when attempting to fetch metadata for `CreateKind::Any` or `CreateKind::Other` events [#3393](https://github.com/microsoft/onefuzz/pull/3393) +* Service: Fixed tag shadowing in logging by giving precedence to the tags produced by log messages over the tags added prior to the call, when the tag names clashed [#3388](https://github.com/microsoft/onefuzz/pull/3388) + ## 8.6.3 ### Fixed diff --git a/CURRENT_VERSION b/CURRENT_VERSION index 1de56fec00..c0bcaebe8f 100644 --- a/CURRENT_VERSION +++ b/CURRENT_VERSION @@ -1 +1 @@ -8.6.3 \ No newline at end of file +8.7.0 \ No newline at end of file From a5517099e1cd862cd712b584dee6373489afd650 Mon Sep 17 00:00:00 2001 From: Teo Voinea <58236992+tevoinea@users.noreply.github.com> Date: Mon, 21 Aug 2023 09:16:17 -0400 Subject: [PATCH 08/20] Only keep coverage, libfuzzer and template commands (#3428) * Only keep coverage, libfuzzer and template commands * Remove unused code --- src/agent/onefuzz-task/src/local/cmd.rs | 42 +---- src/agent/onefuzz-task/src/local/common.rs | 25 --- .../src/local/generic_analysis.rs | 128 +------------- .../src/local/generic_crash_report.rs | 138 +-------------- .../src/local/generic_generator.rs | 142 +-------------- src/agent/onefuzz-task/src/local/libfuzzer.rs | 161 +----------------- .../src/local/libfuzzer_crash_report.rs | 128 +------------- .../onefuzz-task/src/local/libfuzzer_merge.rs | 84 +-------- .../src/local/libfuzzer_regression.rs | 134 +-------------- .../src/local/libfuzzer_test_input.rs | 83 --------- src/agent/onefuzz-task/src/local/mod.rs | 1 - src/agent/onefuzz-task/src/local/radamsa.rs | 78 --------- .../onefuzz-task/src/local/test_input.rs | 86 ---------- 13 files changed, 13 insertions(+), 1217 deletions(-) delete mode 100644 src/agent/onefuzz-task/src/local/radamsa.rs diff --git a/src/agent/onefuzz-task/src/local/cmd.rs b/src/agent/onefuzz-task/src/local/cmd.rs index 80fd51a96b..eabefb71ee 100644 --- a/src/agent/onefuzz-task/src/local/cmd.rs +++ b/src/agent/onefuzz-task/src/local/cmd.rs @@ -3,11 +3,7 @@ #[cfg(any(target_os = "linux", target_os = "windows"))] use crate::local::coverage; -use crate::local::{ - common::add_common_config, generic_analysis, generic_crash_report, generic_generator, - libfuzzer, libfuzzer_crash_report, libfuzzer_fuzz, libfuzzer_merge, libfuzzer_regression, - libfuzzer_test_input, radamsa, test_input, tui::TerminalUi, -}; +use crate::local::{common::add_common_config, libfuzzer_fuzz, tui::TerminalUi}; use anyhow::{Context, Result}; use clap::{Arg, ArgAction, Command}; use std::time::Duration; @@ -21,19 +17,9 @@ use super::template; #[derive(Debug, PartialEq, Eq, EnumString, IntoStaticStr, EnumIter)] #[strum(serialize_all = "kebab-case")] enum Commands { - Radamsa, #[cfg(any(target_os = "linux", target_os = "windows"))] Coverage, LibfuzzerFuzz, - LibfuzzerMerge, - LibfuzzerCrashReport, - LibfuzzerTestInput, - LibfuzzerRegression, - Libfuzzer, - CrashReport, - Generator, - Analysis, - TestInput, Template, } @@ -68,23 +54,7 @@ pub async fn run(args: clap::ArgMatches) -> Result<()> { match command { #[cfg(any(target_os = "linux", target_os = "windows"))] Commands::Coverage => coverage::run(&sub_args, event_sender).await, - Commands::Radamsa => radamsa::run(&sub_args, event_sender).await, - Commands::LibfuzzerCrashReport => { - libfuzzer_crash_report::run(&sub_args, event_sender).await - } Commands::LibfuzzerFuzz => libfuzzer_fuzz::run(&sub_args, event_sender).await, - Commands::LibfuzzerMerge => libfuzzer_merge::run(&sub_args, event_sender).await, - Commands::LibfuzzerTestInput => { - libfuzzer_test_input::run(&sub_args, event_sender).await - } - Commands::LibfuzzerRegression => { - libfuzzer_regression::run(&sub_args, event_sender).await - } - Commands::Libfuzzer => libfuzzer::run(&sub_args, event_sender).await, - Commands::CrashReport => generic_crash_report::run(&sub_args, event_sender).await, - Commands::Generator => generic_generator::run(&sub_args, event_sender).await, - Commands::Analysis => generic_analysis::run(&sub_args, event_sender).await, - Commands::TestInput => test_input::run(&sub_args, event_sender).await, Commands::Template => { let config = sub_args .get_one::("config") @@ -140,17 +110,7 @@ pub fn args(name: &'static str) -> Command { let app = match subcommand { #[cfg(any(target_os = "linux", target_os = "windows"))] Commands::Coverage => coverage::args(subcommand.into()), - Commands::Radamsa => radamsa::args(subcommand.into()), - Commands::LibfuzzerCrashReport => libfuzzer_crash_report::args(subcommand.into()), Commands::LibfuzzerFuzz => libfuzzer_fuzz::args(subcommand.into()), - Commands::LibfuzzerMerge => libfuzzer_merge::args(subcommand.into()), - Commands::LibfuzzerTestInput => libfuzzer_test_input::args(subcommand.into()), - Commands::LibfuzzerRegression => libfuzzer_regression::args(subcommand.into()), - Commands::Libfuzzer => libfuzzer::args(subcommand.into()), - Commands::CrashReport => generic_crash_report::args(subcommand.into()), - Commands::Generator => generic_generator::args(subcommand.into()), - Commands::Analysis => generic_analysis::args(subcommand.into()), - Commands::TestInput => test_input::args(subcommand.into()), Commands::Template => Command::new("template") .about("uses the template to generate a run") .args(vec![Arg::new("config") diff --git a/src/agent/onefuzz-task/src/local/common.rs b/src/agent/onefuzz-task/src/local/common.rs index ce31c7401b..17940d799f 100644 --- a/src/agent/onefuzz-task/src/local/common.rs +++ b/src/agent/onefuzz-task/src/local/common.rs @@ -26,20 +26,10 @@ pub const INPUTS_DIR: &str = "inputs_dir"; pub const CRASHES_DIR: &str = "crashes_dir"; pub const CRASHDUMPS_DIR: &str = "crashdumps_dir"; pub const TARGET_WORKERS: &str = "target_workers"; -pub const REPORTS_DIR: &str = "reports_dir"; -pub const NO_REPRO_DIR: &str = "no_repro_dir"; pub const TARGET_TIMEOUT: &str = "target_timeout"; -pub const CHECK_RETRY_COUNT: &str = "check_retry_count"; -pub const DISABLE_CHECK_QUEUE: &str = "disable_check_queue"; -pub const UNIQUE_REPORTS_DIR: &str = "unique_reports_dir"; pub const COVERAGE_DIR: &str = "coverage_dir"; pub const READONLY_INPUTS: &str = "readonly_inputs_dir"; -pub const CHECK_ASAN_LOG: &str = "check_asan_log"; -pub const TOOLS_DIR: &str = "tools_dir"; -pub const RENAME_OUTPUT: &str = "rename_output"; pub const CHECK_FUZZER_HELP: &str = "check_fuzzer_help"; -pub const DISABLE_CHECK_DEBUGGER: &str = "disable_check_debugger"; -pub const REGRESSION_REPORTS_DIR: &str = "regression_reports_dir"; pub const TARGET_EXE: &str = "target_exe"; pub const TARGET_ENV: &str = "target_env"; @@ -47,17 +37,6 @@ pub const TARGET_OPTIONS: &str = "target_options"; // pub const SUPERVISOR_EXE: &str = "supervisor_exe"; // pub const SUPERVISOR_ENV: &str = "supervisor_env"; // pub const SUPERVISOR_OPTIONS: &str = "supervisor_options"; -pub const GENERATOR_EXE: &str = "generator_exe"; -pub const GENERATOR_ENV: &str = "generator_env"; -pub const GENERATOR_OPTIONS: &str = "generator_options"; - -pub const ANALYZER_EXE: &str = "analyzer_exe"; -pub const ANALYZER_OPTIONS: &str = "analyzer_options"; -pub const ANALYZER_ENV: &str = "analyzer_env"; -pub const ANALYSIS_DIR: &str = "analysis_dir"; -pub const ANALYSIS_INPUTS: &str = "analysis_inputs"; -pub const ANALYSIS_UNIQUE_INPUTS: &str = "analysis_unique_inputs"; -pub const PRESERVE_EXISTING_OUTPUTS: &str = "preserve_existing_outputs"; pub const CREATE_JOB_DIR: &str = "create_job_dir"; @@ -66,7 +45,6 @@ const WAIT_FOR_DIR_DELAY: Duration = Duration::from_secs(1); pub enum CmdType { Target, - Generator, // Supervisor, } @@ -90,7 +68,6 @@ pub fn get_cmd_exe(cmd_type: CmdType, args: &clap::ArgMatches) -> Result let name = match cmd_type { CmdType::Target => TARGET_EXE, // CmdType::Supervisor => SUPERVISOR_EXE, - CmdType::Generator => GENERATOR_EXE, }; args.get_one::(name) @@ -102,7 +79,6 @@ pub fn get_cmd_arg(cmd_type: CmdType, args: &clap::ArgMatches) -> Vec { let name = match cmd_type { CmdType::Target => TARGET_OPTIONS, // CmdType::Supervisor => SUPERVISOR_OPTIONS, - CmdType::Generator => GENERATOR_OPTIONS, }; args.get_many::(name) @@ -115,7 +91,6 @@ pub fn get_cmd_env(cmd_type: CmdType, args: &clap::ArgMatches) -> Result TARGET_ENV, // CmdType::Supervisor => SUPERVISOR_ENV, - CmdType::Generator => GENERATOR_ENV, }; get_hash_map(args, env_name) } diff --git a/src/agent/onefuzz-task/src/local/generic_analysis.rs b/src/agent/onefuzz-task/src/local/generic_analysis.rs index 3d3e2fafc8..710c270331 100644 --- a/src/agent/onefuzz-task/src/local/generic_analysis.rs +++ b/src/agent/onefuzz-task/src/local/generic_analysis.rs @@ -3,139 +3,13 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_exe, get_hash_map, get_synced_dir, CmdType, - SyncCountDirMonitor, UiEvent, ANALYSIS_DIR, ANALYZER_ENV, ANALYZER_EXE, ANALYZER_OPTIONS, - CRASHES_DIR, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TOOLS_DIR, - UNIQUE_REPORTS_DIR, - }, - tasks::{ - analysis::generic::{run as run_analysis, Config}, - config::CommonConfig, - }, -}; +use crate::tasks::config::CommonConfig; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, Command}; -use flume::Sender; use schemars::JsonSchema; -use storage_queue::QueueClient; use super::template::{RunContext, Template}; -pub fn build_analysis_config( - args: &clap::ArgMatches, - input_queue: Option, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_options = get_cmd_arg(CmdType::Target, args); - - let analyzer_exe = args - .get_one::(ANALYZER_EXE) - .cloned() - .ok_or_else(|| format_err!("expected {ANALYZER_EXE}"))?; - - let analyzer_options = args - .get_many::(ANALYZER_OPTIONS) - .unwrap_or_default() - .map(|x| x.to_string()) - .collect(); - - let analyzer_env = get_hash_map(args, ANALYZER_ENV)?; - let analysis = get_synced_dir(ANALYSIS_DIR, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let tools = get_synced_dir(TOOLS_DIR, common.job_id, common.task_id, args)?; - let crashes = if input_queue.is_none() { - get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)? - } else { - None - }; - let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let config = Config { - analyzer_exe, - analyzer_options, - analyzer_env, - target_exe, - target_options, - input_queue, - crashes, - analysis, - tools: Some(tools), - reports, - unique_reports, - no_repro, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_analysis_config(args, None, context.common_config.clone(), event_sender)?; - run_analysis(config).await -} - -pub fn build_shared_args(required_task: bool) -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV) - .long(TARGET_ENV) - .requires(TARGET_EXE) - .num_args(0..), - Arg::new(TARGET_OPTIONS) - .long(TARGET_OPTIONS) - .default_value("{input}") - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CRASHES_DIR) - .long(CRASHES_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(ANALYZER_OPTIONS) - .long(ANALYZER_OPTIONS) - .requires(ANALYZER_EXE) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(ANALYZER_ENV) - .long(ANALYZER_ENV) - .requires(ANALYZER_EXE) - .num_args(0..), - Arg::new(TOOLS_DIR) - .long(TOOLS_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(ANALYZER_EXE) - .long(ANALYZER_EXE) - .requires(ANALYSIS_DIR) - .requires(CRASHES_DIR) - .required(required_task), - Arg::new(ANALYSIS_DIR) - .long(ANALYSIS_DIR) - .requires(ANALYZER_EXE) - .requires(CRASHES_DIR) - .required(required_task), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only generic analysis") - .args(&build_shared_args(true)) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct Analysis { analyzer_exe: String, diff --git a/src/agent/onefuzz-task/src/local/generic_crash_report.rs b/src/agent/onefuzz-task/src/local/generic_crash_report.rs index 6b0e2fccad..347a8cac76 100644 --- a/src/agent/onefuzz-task/src/local/generic_crash_report.rs +++ b/src/agent/onefuzz-task/src/local/generic_crash_report.rs @@ -3,150 +3,14 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, - SyncCountDirMonitor, UiEvent, CHECK_ASAN_LOG, CHECK_RETRY_COUNT, CRASHES_DIR, - DISABLE_CHECK_DEBUGGER, DISABLE_CHECK_QUEUE, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, - TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, - }, - tasks::{ - config::CommonConfig, - report::generic::{Config, ReportTask}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use futures::future::OptionFuture; use schemars::JsonSchema; -use storage_queue::QueueClient; use super::template::{RunContext, Template}; -pub fn build_report_config( - args: &clap::ArgMatches, - input_queue: Option, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - - let crashes = Some(get_synced_dir( - CRASHES_DIR, - common.job_id, - common.task_id, - args, - )?) - .monitor_count(&event_sender)?; - let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let unique_reports = Some(get_synced_dir( - UNIQUE_REPORTS_DIR, - common.job_id, - common.task_id, - args, - )?) - .monitor_count(&event_sender)?; - - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default"); - - let check_queue = !args.get_flag(DISABLE_CHECK_QUEUE); - let check_asan_log = args.get_flag(CHECK_ASAN_LOG); - let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); - - let config = Config { - target_exe, - target_env, - target_options, - target_timeout, - check_asan_log, - check_debugger, - check_retry_count, - check_queue, - crashes, - minimized_stack_depth: None, - input_queue, - no_repro, - reports, - unique_reports, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_report_config(args, None, context.common_config.clone(), event_sender)?; - ReportTask::new(config).managed_run().await -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .default_value("{input}") - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CRASHES_DIR) - .long(CRASHES_DIR) - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(REPORTS_DIR) - .long(REPORTS_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(NO_REPRO_DIR) - .long(NO_REPRO_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(UNIQUE_REPORTS_DIR) - .long(UNIQUE_REPORTS_DIR) - .value_parser(value_parser!(PathBuf)) - .required(true), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)) - .default_value("30"), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - Arg::new(DISABLE_CHECK_QUEUE) - .action(ArgAction::SetTrue) - .long(DISABLE_CHECK_QUEUE), - Arg::new(CHECK_ASAN_LOG) - .action(ArgAction::SetTrue) - .long(CHECK_ASAN_LOG), - Arg::new(DISABLE_CHECK_DEBUGGER) - .action(ArgAction::SetTrue) - .long(DISABLE_CHECK_DEBUGGER), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only generic crash report") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct CrashReport { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/generic_generator.rs b/src/agent/onefuzz-task/src/local/generic_generator.rs index 823ba221d6..ae9f6a3cc6 100644 --- a/src/agent/onefuzz-task/src/local/generic_generator.rs +++ b/src/agent/onefuzz-task/src/local/generic_generator.rs @@ -3,154 +3,14 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, - get_synced_dirs, CmdType, SyncCountDirMonitor, UiEvent, CHECK_ASAN_LOG, CHECK_RETRY_COUNT, - CRASHES_DIR, DISABLE_CHECK_DEBUGGER, GENERATOR_ENV, GENERATOR_EXE, GENERATOR_OPTIONS, - READONLY_INPUTS, RENAME_OUTPUT, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, - TOOLS_DIR, - }, - tasks::{ - config::CommonConfig, - fuzz::generator::{Config, GeneratorTask}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; use super::template::{RunContext, Template}; -pub fn build_fuzz_config( - args: &clap::ArgMatches, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_options = get_cmd_arg(CmdType::Target, args); - let target_env = get_cmd_env(CmdType::Target, args)?; - - let generator_exe = get_cmd_exe(CmdType::Generator, args)?; - let generator_options = get_cmd_arg(CmdType::Generator, args); - let generator_env = get_cmd_env(CmdType::Generator, args)?; - let readonly_inputs = get_synced_dirs(READONLY_INPUTS, common.job_id, common.task_id, args)? - .into_iter() - .map(|sd| sd.monitor_count(&event_sender)) - .collect::>>()?; - - let rename_output = args.get_flag(RENAME_OUTPUT); - let check_asan_log = args.get_flag(CHECK_ASAN_LOG); - let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); - - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default"); - - let target_timeout = Some( - args.get_one::(TARGET_TIMEOUT) - .copied() - .expect("has a default"), - ); - - let tools = get_synced_dir(TOOLS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let ensemble_sync_delay = None; - - let config = Config { - generator_exe, - generator_env, - generator_options, - readonly_inputs, - crashes, - tools, - target_exe, - target_env, - target_options, - target_timeout, - check_asan_log, - check_debugger, - check_retry_count, - rename_output, - ensemble_sync_delay, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_fuzz_config(args, context.common_config.clone(), event_sender)?; - GeneratorTask::new(config).run().await -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .default_value("{input}") - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(GENERATOR_EXE) - .long(GENERATOR_EXE) - .default_value("radamsa") - .required(true), - Arg::new(GENERATOR_ENV).long(GENERATOR_ENV).num_args(0..), - Arg::new(GENERATOR_OPTIONS) - .long(GENERATOR_OPTIONS) - .value_delimiter(' ') - .default_value("-H sha256 -o {generated_inputs}/input-%h.%s -n 100 -r {input_corpus}") - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CRASHES_DIR) - .required(true) - .long(CRASHES_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(READONLY_INPUTS) - .required(true) - .num_args(1..) - .value_parser(value_parser!(PathBuf)) - .long(READONLY_INPUTS), - Arg::new(TOOLS_DIR) - .long(TOOLS_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - Arg::new(CHECK_ASAN_LOG) - .action(ArgAction::SetTrue) - .long(CHECK_ASAN_LOG), - Arg::new(RENAME_OUTPUT) - .action(ArgAction::SetTrue) - .long(RENAME_OUTPUT), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)) - .default_value("30"), - Arg::new(DISABLE_CHECK_DEBUGGER) - .action(ArgAction::SetTrue) - .long(DISABLE_CHECK_DEBUGGER), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only generator fuzzing task") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct Generator { generator_exe: String, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer.rs b/src/agent/onefuzz-task/src/local/libfuzzer.rs index 56dff7dbe3..433636be1c 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer.rs @@ -1,168 +1,19 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#[cfg(any(target_os = "linux", target_os = "windows"))] -use crate::{ - local::{common::COVERAGE_DIR, coverage, coverage::build_shared_args as build_coverage_args}, - tasks::coverage::generic::CoverageTask, -}; -use crate::{ - local::{ - common::{ - build_local_context, wait_for_dir, DirectoryMonitorQueue, UiEvent, ANALYZER_EXE, - REGRESSION_REPORTS_DIR, UNIQUE_REPORTS_DIR, - }, - generic_analysis::{build_analysis_config, build_shared_args as build_analysis_args}, - libfuzzer_crash_report::{build_report_config, build_shared_args as build_crash_args}, - libfuzzer_fuzz::{build_fuzz_config, build_shared_args as build_fuzz_args}, - libfuzzer_regression::{ - build_regression_config, build_shared_args as build_regression_args, - }, - }, - tasks::{ - analysis::generic::run as run_analysis, - config::CommonConfig, - fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask}, - regression::libfuzzer::LibFuzzerRegressionTask, - report::libfuzzer_report::ReportTask, - utils::default_bool_true, - }, +use crate::tasks::{ + config::CommonConfig, + fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask}, + utils::default_bool_true, }; use anyhow::Result; use async_trait::async_trait; -use clap::Command; -use flume::Sender; -use onefuzz::{syncdir::SyncedDir, utils::try_wait_all_join_handles}; +use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; -use std::{ - collections::{HashMap, HashSet}, - path::PathBuf, -}; -use tokio::task::spawn; -use uuid::Uuid; +use std::{collections::HashMap, path::PathBuf}; use super::template::{RunContext, Template}; -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?; - let crash_dir = fuzz_config - .crashes - .remote_url()? - .as_file_path() - .expect("invalid crash dir remote location"); - - let fuzzer = LibFuzzerFuzzTask::new(fuzz_config)?; - let mut task_handles = vec![]; - - let fuzz_task = spawn(async move { fuzzer.run().await }); - - wait_for_dir(&crash_dir).await?; - - task_handles.push(fuzz_task); - - if args.contains_id(UNIQUE_REPORTS_DIR) { - let crash_report_input_monitor = - DirectoryMonitorQueue::start_monitoring(crash_dir.clone()).await?; - - let report_config = build_report_config( - args, - Some(crash_report_input_monitor.queue_client), - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender.clone(), - )?; - - let mut report = ReportTask::new(report_config); - let report_task = spawn(async move { report.managed_run().await }); - - task_handles.push(report_task); - task_handles.push(crash_report_input_monitor.handle); - } - - #[cfg(any(target_os = "linux", target_os = "windows"))] - if args.contains_id(COVERAGE_DIR) { - let coverage_input_monitor = - DirectoryMonitorQueue::start_monitoring(crash_dir.clone()).await?; - let coverage_config = coverage::build_coverage_config( - args, - true, - Some(coverage_input_monitor.queue_client), - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender.clone(), - )?; - - let mut coverage = CoverageTask::new(coverage_config); - let coverage_task = spawn(async move { coverage.run().await }); - - task_handles.push(coverage_task); - task_handles.push(coverage_input_monitor.handle); - } - - if args.contains_id(ANALYZER_EXE) { - let analysis_input_monitor = DirectoryMonitorQueue::start_monitoring(crash_dir).await?; - let analysis_config = build_analysis_config( - args, - Some(analysis_input_monitor.queue_client), - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender.clone(), - )?; - let analysis_task = spawn(async move { run_analysis(analysis_config).await }); - - task_handles.push(analysis_task); - task_handles.push(analysis_input_monitor.handle); - } - - if args.contains_id(REGRESSION_REPORTS_DIR) { - let regression_config = build_regression_config( - args, - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender, - )?; - let regression = LibFuzzerRegressionTask::new(regression_config); - let regression_task = spawn(async move { regression.run().await }); - task_handles.push(regression_task); - } - - try_wait_all_join_handles(task_handles).await?; - - Ok(()) -} - -pub fn args(name: &'static str) -> Command { - let mut app = Command::new(name).about("run a local libfuzzer & crash reporting task"); - - let mut used = HashSet::new(); - - for args in &[ - build_fuzz_args(), - build_crash_args(), - build_analysis_args(false), - #[cfg(any(target_os = "linux", target_os = "windows"))] - build_coverage_args(true), - build_regression_args(false), - ] { - for arg in args { - if used.insert(arg.get_id()) { - app = app.arg(arg); - } - } - } - - app -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibFuzzer { inputs: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs b/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs index c1ab283575..04ba4f9225 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs @@ -3,139 +3,13 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, - SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CHECK_RETRY_COUNT, CRASHES_DIR, - DISABLE_CHECK_QUEUE, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, - TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, - }, - tasks::{ - config::CommonConfig, - report::libfuzzer_report::{Config, ReportTask}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use futures::future::OptionFuture; use schemars::JsonSchema; -use storage_queue::QueueClient; use super::template::{RunContext, Template}; - -pub fn build_report_config( - args: &clap::ArgMatches, - input_queue: Option, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - - let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default"); - - let check_queue = !args.get_flag(DISABLE_CHECK_QUEUE); - - let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); - - let crashes = if input_queue.is_none() { crashes } else { None }; - - let config = Config { - target_exe, - target_env, - target_options, - target_timeout, - check_retry_count, - check_fuzzer_help, - minimized_stack_depth: None, - input_queue, - check_queue, - crashes, - reports, - no_repro, - unique_reports, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_report_config(args, None, context.common_config.clone(), event_sender)?; - ReportTask::new(config).managed_run().await -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CRASHES_DIR) - .long(CRASHES_DIR) - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(REPORTS_DIR) - .long(REPORTS_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(NO_REPRO_DIR) - .long(NO_REPRO_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(UNIQUE_REPORTS_DIR) - .long(UNIQUE_REPORTS_DIR) - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)) - .long(TARGET_TIMEOUT), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - Arg::new(DISABLE_CHECK_QUEUE) - .action(ArgAction::SetTrue) - .long(DISABLE_CHECK_QUEUE), - Arg::new(CHECK_FUZZER_HELP) - .action(ArgAction::SetTrue) - .long(CHECK_FUZZER_HELP), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only libfuzzer crash report task") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerCrashReport { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs b/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs index 69c9df820b..4b3e4ce58f 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs @@ -3,97 +3,15 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, - get_synced_dirs, CmdType, SyncCountDirMonitor, UiEvent, ANALYSIS_INPUTS, - ANALYSIS_UNIQUE_INPUTS, CHECK_FUZZER_HELP, INPUTS_DIR, PRESERVE_EXISTING_OUTPUTS, - TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, - }, - tasks::{ - config::CommonConfig, - merge::libfuzzer_merge::{spawn, Config}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use futures::future::OptionFuture; use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; -use storage_queue::QueueClient; use super::template::{RunContext, Template}; -pub fn build_merge_config( - args: &clap::ArgMatches, - input_queue: Option, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); - let inputs = get_synced_dirs(ANALYSIS_INPUTS, common.job_id, common.task_id, args)? - .into_iter() - .map(|sd| sd.monitor_count(&event_sender)) - .collect::>>()?; - let unique_inputs = - get_synced_dir(ANALYSIS_UNIQUE_INPUTS, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let preserve_existing_outputs = args - .get_one::(PRESERVE_EXISTING_OUTPUTS) - .copied() - .unwrap_or_default(); - - let config = Config { - target_exe, - target_env, - target_options, - input_queue, - inputs, - unique_inputs, - preserve_existing_outputs, - check_fuzzer_help, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_merge_config(args, None, context.common_config.clone(), event_sender)?; - spawn(config).await -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CHECK_FUZZER_HELP) - .action(ArgAction::SetTrue) - .long(CHECK_FUZZER_HELP), - Arg::new(INPUTS_DIR) - .long(INPUTS_DIR) - .value_parser(value_parser!(PathBuf)) - .num_args(0..), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only libfuzzer crash report task") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerMerge { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs b/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs index 501d2385e2..3fbb9f0bd6 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs @@ -3,145 +3,13 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, - SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CHECK_RETRY_COUNT, COVERAGE_DIR, - CRASHES_DIR, NO_REPRO_DIR, REGRESSION_REPORTS_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, - TARGET_OPTIONS, TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, - }, - tasks::{ - config::CommonConfig, - regression::libfuzzer::{Config, LibFuzzerRegressionTask}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use schemars::JsonSchema; use super::template::{RunContext, Template}; -const REPORT_NAMES: &str = "report_names"; - -pub fn build_regression_config( - args: &clap::ArgMatches, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let regression_reports = - get_synced_dir(REGRESSION_REPORTS_DIR, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default value"); - - let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let report_list: Option> = args - .get_many::(REPORT_NAMES) - .map(|x| x.cloned().collect()); - - let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); - - let config = Config { - target_exe, - target_env, - target_options, - target_timeout, - check_fuzzer_help, - check_retry_count, - crashes, - regression_reports, - reports, - no_repro, - unique_reports, - readonly_inputs: None, - report_list, - minimized_stack_depth: None, - common, - }; - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_regression_config(args, context.common_config.clone(), event_sender)?; - LibFuzzerRegressionTask::new(config).run().await -} - -pub fn build_shared_args(local_job: bool) -> Vec { - let mut args = vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(COVERAGE_DIR) - .required(!local_job) - .long(COVERAGE_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(CHECK_FUZZER_HELP) - .action(ArgAction::SetTrue) - .long(CHECK_FUZZER_HELP), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)), - Arg::new(CRASHES_DIR) - .long(CRASHES_DIR) - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(REGRESSION_REPORTS_DIR) - .long(REGRESSION_REPORTS_DIR) - .required(local_job) - .value_parser(value_parser!(PathBuf)), - Arg::new(REPORTS_DIR) - .long(REPORTS_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(NO_REPRO_DIR) - .long(NO_REPRO_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(UNIQUE_REPORTS_DIR) - .long(UNIQUE_REPORTS_DIR) - .value_parser(value_parser!(PathBuf)) - .required(true), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - ]; - if local_job { - args.push(Arg::new(REPORT_NAMES).long(REPORT_NAMES).num_args(0..)) - } - args -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only libfuzzer regression task") - .args(&build_shared_args(true)) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerRegression { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs b/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs index 9c6f16094e..5bef2347f7 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs @@ -1,97 +1,14 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, CmdType, UiEvent, CHECK_RETRY_COUNT, - TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, - }, - tasks::report::libfuzzer_report::{test_input, TestInputArgs}, -}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, Command}; -use flume::Sender; use onefuzz::machine_id::MachineIdentity; use schemars::JsonSchema; use std::{collections::HashMap, path::PathBuf}; use super::template::{RunContext, Template}; -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender).await?; - - let target_exe = args - .get_one::(TARGET_EXE) - .expect("marked as required"); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - let input = args - .get_one::("input") - .expect("marked as required"); - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default value"); - - let extra_setup_dir = context.common_config.extra_setup_dir.as_deref(); - let extra_output_dir = context - .common_config - .extra_output - .as_ref() - .map(|x| x.local_path.as_path()); - - let config = TestInputArgs { - target_exe: target_exe.as_path(), - target_env: &target_env, - target_options: &target_options, - input_url: None, - input: input.as_path(), - job_id: context.common_config.job_id, - task_id: context.common_config.task_id, - target_timeout, - check_retry_count, - setup_dir: &context.common_config.setup_dir, - extra_setup_dir, - extra_output_dir, - minimized_stack_depth: None, - machine_identity: context.common_config.machine_identity, - }; - - let result = test_input(config).await?; - println!("{}", serde_json::to_string_pretty(&result)?); - Ok(()) -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).required(true), - Arg::new("input") - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .default_value("{input}") - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("test a libfuzzer application with a specific input") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerTestInput { input: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/mod.rs b/src/agent/onefuzz-task/src/local/mod.rs index 03d394bcdb..385ff8ffcd 100644 --- a/src/agent/onefuzz-task/src/local/mod.rs +++ b/src/agent/onefuzz-task/src/local/mod.rs @@ -14,7 +14,6 @@ pub mod libfuzzer_fuzz; pub mod libfuzzer_merge; pub mod libfuzzer_regression; pub mod libfuzzer_test_input; -pub mod radamsa; pub mod template; pub mod test_input; pub mod tui; diff --git a/src/agent/onefuzz-task/src/local/radamsa.rs b/src/agent/onefuzz-task/src/local/radamsa.rs deleted file mode 100644 index 4d84de027a..0000000000 --- a/src/agent/onefuzz-task/src/local/radamsa.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -use crate::{ - local::{ - common::{build_local_context, DirectoryMonitorQueue, UiEvent}, - generic_crash_report::{build_report_config, build_shared_args as build_crash_args}, - generic_generator::{build_fuzz_config, build_shared_args as build_fuzz_args}, - }, - tasks::{config::CommonConfig, fuzz::generator::GeneratorTask, report::generic::ReportTask}, -}; -use anyhow::{Context, Result}; -use clap::Command; -use flume::Sender; -use onefuzz::utils::try_wait_all_join_handles; -use std::collections::HashSet; -use tokio::task::spawn; -use uuid::Uuid; - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?; - let crash_dir = fuzz_config - .crashes - .remote_url()? - .as_file_path() - .ok_or_else(|| format_err!("invalid crash directory"))?; - - tokio::fs::create_dir_all(&crash_dir) - .await - .with_context(|| { - format!( - "unable to create crashes directory: {}", - crash_dir.display() - ) - })?; - - let fuzzer = GeneratorTask::new(fuzz_config); - let fuzz_task = spawn(async move { fuzzer.run().await }); - - let crash_report_input_monitor = DirectoryMonitorQueue::start_monitoring(crash_dir) - .await - .context("directory monitor failed")?; - let report_config = build_report_config( - args, - Some(crash_report_input_monitor.queue_client), - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender, - )?; - let report_task = spawn(async move { ReportTask::new(report_config).managed_run().await }); - - try_wait_all_join_handles(vec![ - fuzz_task, - report_task, - crash_report_input_monitor.handle, - ]) - .await?; - - Ok(()) -} - -pub fn args(name: &'static str) -> Command { - let mut app = Command::new(name).about("run a local generator & crash reporting job"); - - let mut used = HashSet::new(); - for args in &[build_fuzz_args(), build_crash_args()] { - for arg in args { - if used.insert(arg.get_id()) { - app = app.arg(arg); - } - } - } - - app -} diff --git a/src/agent/onefuzz-task/src/local/test_input.rs b/src/agent/onefuzz-task/src/local/test_input.rs index 4077bd08f8..b8027a7f41 100644 --- a/src/agent/onefuzz-task/src/local/test_input.rs +++ b/src/agent/onefuzz-task/src/local/test_input.rs @@ -1,18 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, CmdType, UiEvent, CHECK_ASAN_LOG, - CHECK_RETRY_COUNT, DISABLE_CHECK_DEBUGGER, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, - TARGET_TIMEOUT, - }, - tasks::report::generic::{test_input, TestInputArgs}, -}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use onefuzz::machine_id::MachineIdentity; use schemars::JsonSchema; use std::{collections::HashMap, path::PathBuf}; @@ -20,82 +10,6 @@ use uuid::Uuid; use super::template::{RunContext, Template}; -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, false, event_sender).await?; - - let target_exe = args - .get_one::(TARGET_EXE) - .expect("is marked required"); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - let input = args - .get_one::("input") - .expect("is marked required"); - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has default value"); - let check_asan_log = args.get_flag(CHECK_ASAN_LOG); - let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); - - let config = TestInputArgs { - target_exe: target_exe.as_path(), - target_env: &target_env, - target_options: &target_options, - input_url: None, - input: input.as_path(), - job_id: context.common_config.job_id, - task_id: context.common_config.task_id, - target_timeout, - check_retry_count, - setup_dir: &context.common_config.setup_dir, - extra_setup_dir: context.common_config.extra_setup_dir.as_deref(), - minimized_stack_depth: None, - check_asan_log, - check_debugger, - machine_identity: context.common_config.machine_identity.clone(), - }; - - let result = test_input(config).await?; - println!("{}", serde_json::to_string_pretty(&result)?); - Ok(()) -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).required(true), - Arg::new("input") - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .default_value("{input}") - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - Arg::new(CHECK_ASAN_LOG) - .action(ArgAction::SetTrue) - .long(CHECK_ASAN_LOG), - Arg::new(DISABLE_CHECK_DEBUGGER) - .action(ArgAction::SetTrue) - .long("disable_check_debugger"), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("test an application with a specific input") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct TestInput { input: PathBuf, From 8fc2f833fae0447da149efff12475b1f355c197f Mon Sep 17 00:00:00 2001 From: Teo Voinea <58236992+tevoinea@users.noreply.github.com> Date: Mon, 21 Aug 2023 09:37:22 -0400 Subject: [PATCH 09/20] Capture required args and make it match cli (#3429) * Capture required args and make it match cli * Match libfuzzer basic template --- .../example_templates/libfuzzer_basic.yml | 34 ++++++++++++------- .../src/local/generic_analysis.rs | 9 ++--- src/agent/onefuzz-task/src/local/schema.json | 8 +++-- src/agent/onefuzz-task/src/local/template.rs | 12 +++---- 4 files changed, 37 insertions(+), 26 deletions(-) diff --git a/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml b/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml index 7210893809..aba02c7991 100644 --- a/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml +++ b/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml @@ -5,28 +5,31 @@ # 2. Install llvm and export LLVM_SYMBOLIZER_PATH like we do in setup.sh +required_args: &required_args + target_exe: "REPLACE_ME" # The path to your target + inputs: &inputs "REPLACE_ME" # A folder containining your inputs + crashes: &crashes "REPLACE_ME" # The folder where you want the crashing inputs to be output + crashdumps: "REPLACE_ME" # The folder where you want the crash dumps to be output + coverage: "REPLACE_ME" # The folder where you want the code coverage to be output + regression_reports: "REPLACE_ME" # The folder where you want the regression reports to be output + target_args: &target_args + <<: *required_args target_env: {} - target_exe: "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\fuzz.exe" target_options: [] -inputs: &inputs "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\seeds" - tasks: - type: LibFuzzer <<: *target_args - inputs: *inputs - crashes: &crash "./crashes" readonly_inputs: [] check_fuzzer_help: true - - type: "Report" + - type: LibfuzzerRegression <<: *target_args - input_queue: *crash - crashes: *crash - reports: "./reports" - unique_reports: "./unique_reports" - no_repro: "./no_repro" + + - type: "LibfuzzerCrashReport" + <<: *target_args + input_queue: *crashes check_fuzzer_help: true - type: "Coverage" @@ -35,4 +38,11 @@ tasks: - "{input}" input_queue: *inputs readonly_inputs: [*inputs] - coverage: "./coverage" + + # The analysis task is optional in the libfuzzer_basic template + # - type: Analysis + # <<: *target_args + # analysis: "REPLACE_ME" # The folder where you want the analysis results to be output + # analyzer_exe: "REPLACE_ME" + # analyzer_options: [] + # analyzer_env: {} diff --git a/src/agent/onefuzz-task/src/local/generic_analysis.rs b/src/agent/onefuzz-task/src/local/generic_analysis.rs index 710c270331..429e7b0e3b 100644 --- a/src/agent/onefuzz-task/src/local/generic_analysis.rs +++ b/src/agent/onefuzz-task/src/local/generic_analysis.rs @@ -20,7 +20,7 @@ pub struct Analysis { input_queue: Option, crashes: Option, analysis: PathBuf, - tools: PathBuf, + tools: Option, reports: Option, unique_reports: Option, no_repro: Option, @@ -49,9 +49,10 @@ impl Template for Analysis { .and_then(|path| context.to_monitored_sync_dir("crashes", path).ok()), analysis: context.to_monitored_sync_dir("analysis", self.analysis.clone())?, - tools: context - .to_monitored_sync_dir("tools", self.tools.clone()) - .ok(), + tools: self + .tools + .as_ref() + .and_then(|path| context.to_monitored_sync_dir("tools", path).ok()), reports: self .reports diff --git a/src/agent/onefuzz-task/src/local/schema.json b/src/agent/onefuzz-task/src/local/schema.json index 0a1f128e67..e5b00f6e17 100644 --- a/src/agent/onefuzz-task/src/local/schema.json +++ b/src/agent/onefuzz-task/src/local/schema.json @@ -126,7 +126,6 @@ "analyzer_options", "target_exe", "target_options", - "tools", "type" ], "properties": { @@ -182,7 +181,10 @@ } }, "tools": { - "type": "string" + "type": [ + "string", + "null" + ] }, "type": { "type": "string", @@ -893,4 +895,4 @@ ] } } -} +} \ No newline at end of file diff --git a/src/agent/onefuzz-task/src/local/template.rs b/src/agent/onefuzz-task/src/local/template.rs index f85bcf627a..73ae6e5e48 100644 --- a/src/agent/onefuzz-task/src/local/template.rs +++ b/src/agent/onefuzz-task/src/local/template.rs @@ -242,12 +242,10 @@ mod test { .expect("Couldn't find checked-in schema.json") .replace("\r\n", "\n"); - println!("{}", schema_str); - - assert_eq!( - schema_str.replace('\n', ""), - checked_in_schema.replace('\n', ""), - "The checked-in local fuzzing schema did not match the generated schema." - ); + if schema_str.replace('\n', "") != checked_in_schema.replace('\n', "") { + std::fs::write("src/local/new.schema.json", schema_str) + .expect("The schemas did not match but failed to write new schema to file."); + panic!("The checked-in local fuzzing schema did not match the generated schema. The generated schema can be found at src/local/new.schema.json"); + } } } From ea4a1abb8aa24d250b3fb7ca816ea15ee277113f Mon Sep 17 00:00:00 2001 From: Cheick Keita Date: Mon, 21 Aug 2023 09:39:54 -0700 Subject: [PATCH 10/20] Fix local path generation (#3432) * Fix local path generation * format * fix test --- src/agent/onefuzz-agent/src/log_uploader.rs | 29 --------------------- src/agent/onefuzz/src/blob/url.rs | 23 +++++++++++++--- 2 files changed, 19 insertions(+), 33 deletions(-) diff --git a/src/agent/onefuzz-agent/src/log_uploader.rs b/src/agent/onefuzz-agent/src/log_uploader.rs index 6bccc0bef2..d424013421 100644 --- a/src/agent/onefuzz-agent/src/log_uploader.rs +++ b/src/agent/onefuzz-agent/src/log_uploader.rs @@ -210,32 +210,3 @@ async fn sync_file( blob_client.append_block(Body::from(f)).await?; Ok(len) } - -#[cfg(test)] -mod tests { - use std::io::Seek; - - use anyhow::Result; - use tokio::io::{AsyncReadExt, AsyncSeekExt}; - - #[allow(clippy::unused_io_amount)] - #[tokio::test] - #[ignore] - - async fn test_seek_behavior() -> Result<()> { - let path = "C:\\temp\\test.ps1"; - let mut std_file = std::fs::File::open(path)?; - std_file.seek(std::io::SeekFrom::Start(3))?; - - let mut tokio_file = tokio::fs::File::from_std(std_file); - - let buf = &mut [0u8; 5]; - tokio_file.read(buf).await?; - println!("******** buf {:?}", buf); - tokio_file.seek(std::io::SeekFrom::Start(0)).await?; - tokio_file.read(buf).await?; - println!("******** buf {:?}", buf); - - Ok(()) - } -} diff --git a/src/agent/onefuzz/src/blob/url.rs b/src/agent/onefuzz/src/blob/url.rs index f55ffbb23a..134b59dea0 100644 --- a/src/agent/onefuzz/src/blob/url.rs +++ b/src/agent/onefuzz/src/blob/url.rs @@ -192,10 +192,15 @@ impl BlobContainerUrl { } pub fn as_path(&self, prefix: impl AsRef) -> Result { - let dir = self - .account() - .ok_or_else(|| anyhow!("Invalid container Url"))?; - Ok(prefix.as_ref().join(dir)) + match (self.account(), self.container()) { + (Some(account), Some(container)) => { + let mut path = PathBuf::new(); + path.push(account); + path.push(container); + Ok(prefix.as_ref().join(path)) + } + _ => bail!("Invalid container Url"), + } } } @@ -526,4 +531,14 @@ mod tests { "id:000000,sig:06,src:000000,op:havoc,rep:128" ); } + + #[test] + fn test_as_path() -> Result<()> { + let root = PathBuf::from(r"/onefuzz"); + let url = BlobContainerUrl::parse("https://myaccount.blob.core.windows.net/mycontainer")?; + let path = url.as_path(root)?; + assert_eq!(PathBuf::from(r"/onefuzz/myaccount/mycontainer"), path); + + Ok(()) + } } From 6d8be1a83473c941565071d8f7988ea341e0b544 Mon Sep 17 00:00:00 2001 From: Noah McGregor Harper <74685766+nharper285@users.noreply.github.com> Date: Mon, 21 Aug 2023 11:30:33 -0700 Subject: [PATCH 11/20] Remove JobResult Retry Logging (#3439) * Replace JobResult error with warning. * Make log statement. * Leaving exception. --- src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs b/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs index 6e33890500..1166cf91d4 100644 --- a/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs +++ b/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs @@ -76,8 +76,6 @@ private async Async.Task TryUpdate(Job job, JobResultType resultType, Dict var r = await Insert(jobResult); if (!r.IsOk) { - _logTracer.AddHttpStatus(r.ErrorV); - _logTracer.LogError("failed to insert job result {JobId}", jobResult.JobId); throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}"); } _logTracer.LogInformation("created job result {JobId}", jobResult.JobId); @@ -88,8 +86,6 @@ private async Async.Task TryUpdate(Job job, JobResultType resultType, Dict var r = await Update(jobResult); if (!r.IsOk) { - _logTracer.AddHttpStatus(r.ErrorV); - _logTracer.LogError("failed to update job result {JobId}", jobResult.JobId); throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}"); } _logTracer.LogInformation("updated job result {JobId}", jobResult.JobId); From 38cfa9772ebe4416a49fefbdd872d86060a4c69a Mon Sep 17 00:00:00 2001 From: Justin Moore Date: Fri, 25 Aug 2023 11:41:27 -0500 Subject: [PATCH 12/20] Downgrade some debug logs from warn to debug (#3450) --- src/agent/onefuzz-task/src/tasks/analysis/generic.rs | 2 +- src/agent/onefuzz-task/src/tasks/merge/generic.rs | 2 +- src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/agent/onefuzz-task/src/tasks/analysis/generic.rs b/src/agent/onefuzz-task/src/tasks/analysis/generic.rs index d89ab30bd3..05c6c3d169 100644 --- a/src/agent/onefuzz-task/src/tasks/analysis/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/analysis/generic.rs @@ -174,7 +174,7 @@ async fn poll_inputs( } message.delete().await?; } else { - warn!("no new candidate inputs found, sleeping"); + debug!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; } } diff --git a/src/agent/onefuzz-task/src/tasks/merge/generic.rs b/src/agent/onefuzz-task/src/tasks/merge/generic.rs index 4f2e8234a8..3b6a2094d8 100644 --- a/src/agent/onefuzz-task/src/tasks/merge/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/merge/generic.rs @@ -83,7 +83,7 @@ pub async fn spawn(config: &Config) -> Result<()> { } } } else { - warn!("no new candidate inputs found, sleeping"); + debug!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; }; } diff --git a/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs b/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs index 1c334b3f18..2d53bc8c07 100644 --- a/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs +++ b/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs @@ -120,7 +120,7 @@ async fn process_message(config: &Config, input_queue: QueueClient) -> Result<() } Ok(()) } else { - warn!("no new candidate inputs found, sleeping"); + debug!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; Ok(()) } From c49442facd923edd9c876dd5328efc3071e29363 Mon Sep 17 00:00:00 2001 From: Noah McGregor Harper <74685766+nharper285@users.noreply.github.com> Date: Fri, 25 Aug 2023 13:36:36 -0700 Subject: [PATCH 13/20] Use language features. (#3456) --- .devcontainer/devcontainer.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 4059b3d7c1..d3fcf050ed 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -13,6 +13,7 @@ "**/target/**": true }, "lldb.executable": "/usr/bin/lldb", + "dotnet.server.useOmnisharp": true, "omnisharp.enableEditorConfigSupport": true, "omnisharp.enableRoslynAnalyzers": true, "python.defaultInterpreterPath": "/workspaces/onefuzz/src/venv/bin/python", @@ -48,4 +49,4 @@ "features": { "ghcr.io/devcontainers/features/azure-cli:1": {} } -} +} \ No newline at end of file From ead63eb3ff19b6ada3c93e42a830ffa4522fd17d Mon Sep 17 00:00:00 2001 From: Cheick Keita Date: Fri, 25 Aug 2023 16:35:06 -0700 Subject: [PATCH 14/20] Remove the retention policy setting (#3452) * Remove the configuration of the autoscale diagnostic * only remove the retention policy * set retention policy to false * remove settings * remove retentionPolicy from bicep * format --- src/ApiService/ApiService/onefuzzlib/AutoScale.cs | 4 ++-- src/deployment/azuredeploy.bicep | 1 - src/deployment/bicep-templates/autoscale-settings.bicep | 9 ++------- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/src/ApiService/ApiService/onefuzzlib/AutoScale.cs b/src/ApiService/ApiService/onefuzzlib/AutoScale.cs index d721715737..73d5d429e5 100644 --- a/src/ApiService/ApiService/onefuzzlib/AutoScale.cs +++ b/src/ApiService/ApiService/onefuzzlib/AutoScale.cs @@ -263,8 +263,8 @@ private async Async.Task> SetupAutoSca // The field is there in github though, so need to update this code once that code is released: // https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/monitor/Azure.ResourceManager.Monitor/src/Generated/Models/LogSettings.cs // But setting logs one by one works the same as "allLogs" being set... - var logSettings1 = new LogSettings(true) { RetentionPolicy = new RetentionPolicy(true, 30), Category = "AutoscaleEvaluations" }; - var logSettings2 = new LogSettings(true) { RetentionPolicy = new RetentionPolicy(true, 30), Category = "AutoscaleScaleActions" }; + var logSettings1 = new LogSettings(true) { Category = "AutoscaleEvaluations" }; + var logSettings2 = new LogSettings(true) { Category = "AutoscaleScaleActions" }; var parameters = new DiagnosticSettingsData { WorkspaceId = logAnalyticsWorkspaceId diff --git a/src/deployment/azuredeploy.bicep b/src/deployment/azuredeploy.bicep index 4f655cb09a..99c175c6e7 100644 --- a/src/deployment/azuredeploy.bicep +++ b/src/deployment/azuredeploy.bicep @@ -167,7 +167,6 @@ module autoscaleSettings 'bicep-templates/autoscale-settings.bicep' = { server_farm_id: serverFarm.outputs.id owner: owner workspaceId: operationalInsights.outputs.workspaceId - logRetention: log_retention autoscale_name: 'onefuzz-autoscale-${uniqueString(resourceGroup().id)}' function_diagnostics_settings_name: 'functionDiagnosticSettings' } diff --git a/src/deployment/bicep-templates/autoscale-settings.bicep b/src/deployment/bicep-templates/autoscale-settings.bicep index e380be272e..185c603c48 100644 --- a/src/deployment/bicep-templates/autoscale-settings.bicep +++ b/src/deployment/bicep-templates/autoscale-settings.bicep @@ -2,7 +2,6 @@ param location string param server_farm_id string param owner string param workspaceId string -param logRetention int param autoscale_name string param function_diagnostics_settings_name string @@ -27,7 +26,7 @@ resource autoscaleSettings 'Microsoft.Insights/autoscalesettings@2015-04-01' = { rules: [ { metricTrigger: { - metricName: 'CpuPercentage' + metricName: 'CpuPercentage' metricResourceUri: server_farm_id operator: 'GreaterThanOrEqual' statistic: 'Average' @@ -50,7 +49,7 @@ resource autoscaleSettings 'Microsoft.Insights/autoscalesettings@2015-04-01' = { operator: 'LessThan' statistic: 'Average' threshold: 25 - timeAggregation:'Average' + timeAggregation:'Average' timeGrain: 'PT1M' timeWindow: 'PT10M' } @@ -79,10 +78,6 @@ resource functionDiagnosticSettings 'Microsoft.Insights/diagnosticSettings@2021- { categoryGroup: 'allLogs' enabled: true - retentionPolicy: { - days: logRetention - enabled: true - } } ] workspaceId: workspaceId From 34dc7e5e7daa41a8b090bfa199fddd37d5f77cd1 Mon Sep 17 00:00:00 2001 From: Cheick Keita Date: Mon, 28 Aug 2023 13:43:26 -0700 Subject: [PATCH 15/20] install LLVm 16 before running dotnet-fuzzing-tools.ps1 (#3462) --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9b8eb43826..2dd85d7c92 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -375,6 +375,10 @@ jobs: with: # use global.json to install the correct version global-json-file: global.json + - name: install llVM + run: | + choco install llvm --version 16.0.6 + shell: powershell - run: src/ci/dotnet-fuzzing-tools.ps1 shell: pwsh - uses: actions/upload-artifact@v3 From c4d0cd754bdefef45236bab9a85b288bd518c635 Mon Sep 17 00:00:00 2001 From: Cheick Keita Date: Mon, 28 Aug 2023 13:49:10 -0700 Subject: [PATCH 16/20] Revert the location of the setupFolder (#3460) * Revert the location of the setupFolder * install llvm version 16.0.6 * print version * install llvm before building fuzzing tools * revert changes in the script --- src/agent/onefuzz-agent/src/work.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/agent/onefuzz-agent/src/work.rs b/src/agent/onefuzz-agent/src/work.rs index b55d1d86a1..d0222744a7 100644 --- a/src/agent/onefuzz-agent/src/work.rs +++ b/src/agent/onefuzz-agent/src/work.rs @@ -91,7 +91,10 @@ impl WorkSet { pub fn setup_dir(&self) -> Result { let root = self.get_root_folder()?; - self.setup_url.as_path(root) + // Putting the setup container at the root for backward compatibility. + // The path of setup folder can be used as part of the deduplication logic in the bug filing service + let setup_root = root.parent().ok_or_else(|| anyhow!("Invalid root"))?; + self.setup_url.as_path(setup_root) } pub fn extra_setup_dir(&self) -> Result> { From 10bb79a08c628be7a24d913343e7a85c611f71db Mon Sep 17 00:00:00 2001 From: Noah McGregor Harper <74685766+nharper285@users.noreply.github.com> Date: Mon, 28 Aug 2023 14:53:32 -0700 Subject: [PATCH 17/20] Deleting `AutoScale` table entries on vmss shutdown. (#3455) * Deleting AutoScale table entries on vmss shutdown. * FIx. * Trying again * Fixing delete. * Updating autoscale to orm * Removing comments. --- src/ApiService/ApiService/onefuzzlib/AutoScale.cs | 7 ++----- src/ApiService/ApiService/onefuzzlib/ScalesetOperations.cs | 7 +++++++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/ApiService/ApiService/onefuzzlib/AutoScale.cs b/src/ApiService/ApiService/onefuzzlib/AutoScale.cs index 73d5d429e5..bf3aef7e52 100644 --- a/src/ApiService/ApiService/onefuzzlib/AutoScale.cs +++ b/src/ApiService/ApiService/onefuzzlib/AutoScale.cs @@ -1,5 +1,4 @@ -using System.Net; -using System.Text.Json; +using System.Text.Json; using System.Threading.Tasks; using ApiService.OneFuzzLib.Orm; using Azure; @@ -10,9 +9,7 @@ namespace Microsoft.OneFuzz.Service; -public interface IAutoScaleOperations { - - public Async.Task> Insert(AutoScale autoScale); +public interface IAutoScaleOperations : IOrm { public Async.Task GetSettingsForScaleset(ScalesetId scalesetId); diff --git a/src/ApiService/ApiService/onefuzzlib/ScalesetOperations.cs b/src/ApiService/ApiService/onefuzzlib/ScalesetOperations.cs index 44062ca818..c48f167d4c 100644 --- a/src/ApiService/ApiService/onefuzzlib/ScalesetOperations.cs +++ b/src/ApiService/ApiService/onefuzzlib/ScalesetOperations.cs @@ -499,6 +499,13 @@ public async Async.Task Halt(Scaleset scaleset) { _logTracer.AddHttpStatus(r.ErrorV); _logTracer.LogError("Failed to delete scaleset record {ScalesetId}", scaleset.ScalesetId); } + var autoscaleEntry = await _context.AutoScaleOperations.GetSettingsForScaleset(scaleset.ScalesetId); + if (autoscaleEntry is null) { + _logTracer.LogInformation("Could not find autoscale settings for scaleset {ScalesetId}", scaleset.ScalesetId); + } else { + _logTracer.LogInformation("Deleting autoscale entry for scaleset {ScalesetId}", scaleset.ScalesetId); + _ = await _context.AutoScaleOperations.Delete(autoscaleEntry); + } } else { var r = await Update(scaleset); if (!r.IsOk) { From 2a8883816043a6e4092e6fa9bbf95f18345681fc Mon Sep 17 00:00:00 2001 From: Kanan B <32438208+kananb@users.noreply.github.com> Date: Mon, 28 Aug 2023 15:14:50 -0700 Subject: [PATCH 18/20] Kanan/semantic validation (#3386) * Throw exceptoins for missing area/iteration paths * Comment out feature flag check for work item creation * Revert "Comment out feature flag check for work item creation" This reverts commit ad961694535d06ca1f607fe261b3624f36ad1166. * Add some extra info to the exceptions to distinguish them * Add logging when a validation error is encountered * Add call to validate in NotificationsTest for testing * Fix area and iteration path validation * Update error messages * Fix ArgumentNullException * Add extra information to path check failure * Request classification nodes with depth equal to number of parts in the path * Pass tree structure as parameter instead of always using Areas * Remove '[PAT]' from error messages * Require project name in Area/IterationPath during validation * PR comments * Sneak in a one-line fix for empty comments * Change context messsage in response from notiifcation test --- .../ApiService/Functions/NotificationsTest.cs | 5 ++ .../ApiService/OneFuzzTypes/Enums.cs | 1 + .../onefuzzlib/NotificationOperations.cs | 1 + .../onefuzzlib/notifications/Ado.cs | 58 ++++++++++++++++++- src/pytypes/onefuzztypes/enums.py | 1 + 5 files changed, 63 insertions(+), 3 deletions(-) diff --git a/src/ApiService/ApiService/Functions/NotificationsTest.cs b/src/ApiService/ApiService/Functions/NotificationsTest.cs index 53287aa37a..a216f973b0 100644 --- a/src/ApiService/ApiService/Functions/NotificationsTest.cs +++ b/src/ApiService/ApiService/Functions/NotificationsTest.cs @@ -24,6 +24,11 @@ public async Async.Task Run([HttpTrigger(AuthorizationLevel.An } var notificationTest = request.OkV; + var validConfig = await notificationTest.Notification.Config.Validate(); + if (!validConfig.IsOk) { + return await _context.RequestHandling.NotOk(req, validConfig.ErrorV, context: "notification test"); + } + var result = await _context.NotificationOperations.TriggerNotification(notificationTest.Notification.Container, notificationTest.Notification, notificationTest.Report, isLastRetryAttempt: true); var response = req.CreateResponse(HttpStatusCode.OK); diff --git a/src/ApiService/ApiService/OneFuzzTypes/Enums.cs b/src/ApiService/ApiService/OneFuzzTypes/Enums.cs index 97c840ba7f..b1f7225b5f 100644 --- a/src/ApiService/ApiService/OneFuzzTypes/Enums.cs +++ b/src/ApiService/ApiService/OneFuzzTypes/Enums.cs @@ -48,6 +48,7 @@ public enum ErrorCode { ADO_VALIDATION_UNEXPECTED_ERROR = 491, ADO_VALIDATION_MISSING_PAT_SCOPES = 492, ADO_WORKITEM_PROCESSING_DISABLED = 494, + ADO_VALIDATION_INVALID_PATH = 495, // NB: if you update this enum, also update enums.py } diff --git a/src/ApiService/ApiService/onefuzzlib/NotificationOperations.cs b/src/ApiService/ApiService/onefuzzlib/NotificationOperations.cs index 9fd87f7669..dc133e1fba 100644 --- a/src/ApiService/ApiService/onefuzzlib/NotificationOperations.cs +++ b/src/ApiService/ApiService/onefuzzlib/NotificationOperations.cs @@ -134,6 +134,7 @@ public async Async.Task> Create(Container container, if (await _context.FeatureManagerSnapshot.IsEnabledAsync(FeatureFlagConstants.SemanticNotificationConfigValidation)) { var validConfig = await config.Validate(); if (!validConfig.IsOk) { + _logTracer.LogError($"Error(s) ocurred during template validation: {{title}}{Environment.NewLine}{{errors}}", validConfig.ErrorV.Title, string.Join(Environment.NewLine, validConfig.ErrorV.Errors ?? new())); return OneFuzzResult.Error(validConfig.ErrorV); } } diff --git a/src/ApiService/ApiService/onefuzzlib/notifications/Ado.cs b/src/ApiService/ApiService/onefuzzlib/notifications/Ado.cs index d6ce2aa01a..e05bb9bc24 100644 --- a/src/ApiService/ApiService/onefuzzlib/notifications/Ado.cs +++ b/src/ApiService/ApiService/onefuzzlib/notifications/Ado.cs @@ -89,6 +89,38 @@ private static bool IsTransient(Exception e) { return errorCodes.Any(errorStr.Contains); } + private static async Async.Task ValidatePath(string project, string path, TreeStructureGroup structureGroup, WorkItemTrackingHttpClient client) { + var pathType = (structureGroup == TreeStructureGroup.Areas) ? "Area" : "Iteration"; + var pathParts = path.Split('\\'); + if (!string.Equals(pathParts[0], project, StringComparison.OrdinalIgnoreCase)) { + return OneFuzzResultVoid.Error(ErrorCode.ADO_VALIDATION_INVALID_PATH, new string[] { + $"Path \"{path}\" is invalid. It must start with the project name, \"{project}\".", + $"Example: \"{project}\\{path}\".", + }); + } + + var current = await client.GetClassificationNodeAsync(project, structureGroup, depth: pathParts.Length - 1); + if (current == null) { + return OneFuzzResultVoid.Error(ErrorCode.ADO_VALIDATION_INVALID_PATH, new string[] { + $"{pathType} Path \"{path}\" is invalid. \"{project}\" is not a valid project.", + }); + } + + foreach (var part in pathParts.Skip(1)) { + var child = current.Children?.FirstOrDefault(x => string.Equals(x.Name, part, StringComparison.OrdinalIgnoreCase)); + if (child == null) { + return OneFuzzResultVoid.Error(ErrorCode.ADO_VALIDATION_INVALID_PATH, new string[] { + $"{pathType} Path \"{path}\" is invalid. \"{part}\" is not a valid child of \"{current.Name}\".", + $"Valid children of \"{current.Name}\" are: [{string.Join(',', current.Children?.Select(x => $"\"{x.Name}\"") ?? new List())}].", + }); + } + + current = child; + } + + return OneFuzzResultVoid.Ok; + } + public static async Async.Task Validate(AdoTemplate config) { // Validate PAT is valid for the base url VssConnection connection; @@ -124,10 +156,9 @@ await policy.ExecuteAsync(async () => { return OneFuzzResultVoid.Error(ErrorCode.ADO_VALIDATION_INVALID_PAT, "Auth token is missing or invalid"); } + var witClient = await connection.GetClientAsync(); try { // Validate unique_fields are part of the project's valid fields - var witClient = await connection.GetClientAsync(); - // The set of valid fields for this project according to ADO var projectValidFields = await GetValidFields(witClient, config.Project); @@ -162,6 +193,27 @@ await policy.ExecuteAsync(async () => { }); } + try { + // Validate AreaPath and IterationPath exist + if (config.AdoFields.TryGetValue("System.AreaPath", out var areaPathString)) { + var validateAreaPath = await ValidatePath(config.Project, areaPathString, TreeStructureGroup.Areas, witClient); + if (!validateAreaPath.IsOk) { + return validateAreaPath; + } + } + if (config.AdoFields.TryGetValue("System.IterationPath", out var iterationPathString)) { + var validateIterationPath = await ValidatePath(config.Project, iterationPathString, TreeStructureGroup.Iterations, witClient); + if (!validateIterationPath.IsOk) { + return validateIterationPath; + } + } + } catch (Exception e) { + return OneFuzzResultVoid.Error(ErrorCode.ADO_VALIDATION_UNEXPECTED_ERROR, new string[] { + "Failed to query and validate against the classification nodes for this project", + $"Exception: {e}", + }); + } + return OneFuzzResultVoid.Ok; } @@ -362,7 +414,7 @@ public async Async.Task UpdateExisting(WorkItem item, IList<(string, strin return false; } - if (_config.OnDuplicate.Comment != null) { + if (!string.IsNullOrEmpty(_config.OnDuplicate.Comment)) { var comment = _config.OnDuplicate.Comment; _ = await _client.AddCommentAsync( new CommentCreate() { diff --git a/src/pytypes/onefuzztypes/enums.py b/src/pytypes/onefuzztypes/enums.py index cf09b01824..4d59945c85 100644 --- a/src/pytypes/onefuzztypes/enums.py +++ b/src/pytypes/onefuzztypes/enums.py @@ -302,6 +302,7 @@ class ErrorCode(Enum): ADO_VALIDATION_UNEXPECTED_HTTP_EXCEPTION = 490 ADO_VALIDATION_UNEXPECTED_ERROR = 491 ADO_VALIDATION_MISSING_PAT_SCOPES = 492 + ADO_VALIDATION_INVALID_PATH = 495 # NB: if you update this enum, also update Enums.cs From c69deed50e81cc1805f6f82ebb10513a211cbbe2 Mon Sep 17 00:00:00 2001 From: Adam <103067949+AdamL-Microsoft@users.noreply.github.com> Date: Tue, 29 Aug 2023 12:57:19 -0700 Subject: [PATCH 19/20] Release 8.7.1 (hotfix) (#3459) * Remove the retention policy setting (#3452) --------- Co-authored-by: Cheick Keita --- .devcontainer/devcontainer.json | 3 +- .github/workflows/ci.yml | 2 - CHANGELOG.md | 6 + CURRENT_VERSION | 2 +- .../ApiService/Functions/QueueJobResult.cs | 60 ------- .../ApiService/OneFuzzTypes/Model.cs | 45 ----- src/ApiService/ApiService/Program.cs | 1 - .../ApiService/onefuzzlib/Config.cs | 1 - .../ApiService/onefuzzlib/Extension.cs | 44 +++-- .../onefuzzlib/JobResultOperations.cs | 121 ------------- .../ApiService/onefuzzlib/OnefuzzContext.cs | 2 - .../IntegrationTests/Fakes/TestContext.cs | 3 - src/agent/Cargo.lock | 16 -- src/agent/Cargo.toml | 1 - src/agent/onefuzz-agent/src/config.rs | 12 -- src/agent/onefuzz-agent/src/log_uploader.rs | 29 ++++ src/agent/onefuzz-agent/src/work.rs | 5 +- src/agent/onefuzz-result/Cargo.toml | 18 -- src/agent/onefuzz-result/src/job_result.rs | 129 -------------- src/agent/onefuzz-result/src/lib.rs | 4 - src/agent/onefuzz-task/Cargo.toml | 1 - src/agent/onefuzz-task/src/local/cmd.rs | 42 ++++- src/agent/onefuzz-task/src/local/common.rs | 26 ++- .../example_templates/libfuzzer_basic.yml | 34 ++-- .../src/local/generic_analysis.rs | 137 ++++++++++++++- .../src/local/generic_crash_report.rs | 138 ++++++++++++++- .../src/local/generic_generator.rs | 142 ++++++++++++++- src/agent/onefuzz-task/src/local/libfuzzer.rs | 161 +++++++++++++++++- .../src/local/libfuzzer_crash_report.rs | 128 +++++++++++++- .../onefuzz-task/src/local/libfuzzer_merge.rs | 84 ++++++++- .../src/local/libfuzzer_regression.rs | 134 ++++++++++++++- .../src/local/libfuzzer_test_input.rs | 83 +++++++++ src/agent/onefuzz-task/src/local/mod.rs | 1 + src/agent/onefuzz-task/src/local/radamsa.rs | 78 +++++++++ src/agent/onefuzz-task/src/local/schema.json | 8 +- src/agent/onefuzz-task/src/local/template.rs | 13 +- .../onefuzz-task/src/local/test_input.rs | 86 ++++++++++ .../src/tasks/analysis/generic.rs | 5 +- src/agent/onefuzz-task/src/tasks/config.rs | 20 --- .../src/tasks/coverage/generic.rs | 19 +-- .../onefuzz-task/src/tasks/fuzz/generator.rs | 7 +- .../src/tasks/fuzz/libfuzzer/common.rs | 49 ++---- .../onefuzz-task/src/tasks/fuzz/supervisor.rs | 15 +- src/agent/onefuzz-task/src/tasks/heartbeat.rs | 2 +- .../onefuzz-task/src/tasks/merge/generic.rs | 2 +- .../src/tasks/merge/libfuzzer_merge.rs | 2 +- .../src/tasks/regression/common.rs | 15 +- .../src/tasks/regression/generic.rs | 3 +- .../src/tasks/regression/libfuzzer.rs | 3 +- .../src/tasks/report/crash_report.rs | 45 +---- .../src/tasks/report/dotnet/generic.rs | 22 +-- .../onefuzz-task/src/tasks/report/generic.rs | 14 +- .../src/tasks/report/libfuzzer_report.rs | 5 - src/agent/onefuzz/Cargo.toml | 1 - src/agent/onefuzz/src/blob/url.rs | 23 +-- src/agent/onefuzz/src/syncdir.rs | 66 +------ .../bicep-templates/storageAccounts.bicep | 2 +- src/integration-tests/integration-test.py | 77 ++------- src/runtime-tools/linux/setup.sh | 64 ++----- 59 files changed, 1389 insertions(+), 872 deletions(-) delete mode 100644 src/ApiService/ApiService/Functions/QueueJobResult.cs delete mode 100644 src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs delete mode 100644 src/agent/onefuzz-result/Cargo.toml delete mode 100644 src/agent/onefuzz-result/src/job_result.rs delete mode 100644 src/agent/onefuzz-result/src/lib.rs create mode 100644 src/agent/onefuzz-task/src/local/radamsa.rs mode change 100644 => 100755 src/runtime-tools/linux/setup.sh diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index d3fcf050ed..4059b3d7c1 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -13,7 +13,6 @@ "**/target/**": true }, "lldb.executable": "/usr/bin/lldb", - "dotnet.server.useOmnisharp": true, "omnisharp.enableEditorConfigSupport": true, "omnisharp.enableRoslynAnalyzers": true, "python.defaultInterpreterPath": "/workspaces/onefuzz/src/venv/bin/python", @@ -49,4 +48,4 @@ "features": { "ghcr.io/devcontainers/features/azure-cli:1": {} } -} \ No newline at end of file +} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2dd85d7c92..12824fd182 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -542,11 +542,9 @@ jobs: mkdir -p artifacts/linux-libfuzzer mkdir -p artifacts/linux-libfuzzer-with-options - mkdir -p artifacts/mariner-libfuzzer (cd libfuzzer ; make ) cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/linux-libfuzzer cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/linux-libfuzzer-with-options - cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/mariner-libfuzzer mkdir -p artifacts/linux-libfuzzer-regression (cd libfuzzer-regression ; make ) diff --git a/CHANGELOG.md b/CHANGELOG.md index be4779ad77..8d46ea2a0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 8.7.1 + +### Fixed + +* Service: Removed deprecated Azure retention policy setting that was causing scaleset deployment errors [#3452](https://github.com/microsoft/onefuzz/pull/3452) + ## 8.7.0 ### Added diff --git a/CURRENT_VERSION b/CURRENT_VERSION index c0bcaebe8f..efeecbe2c5 100644 --- a/CURRENT_VERSION +++ b/CURRENT_VERSION @@ -1 +1 @@ -8.7.0 \ No newline at end of file +8.7.1 \ No newline at end of file diff --git a/src/ApiService/ApiService/Functions/QueueJobResult.cs b/src/ApiService/ApiService/Functions/QueueJobResult.cs deleted file mode 100644 index d781a4d1e1..0000000000 --- a/src/ApiService/ApiService/Functions/QueueJobResult.cs +++ /dev/null @@ -1,60 +0,0 @@ -using System.Text.Json; -using Microsoft.Azure.Functions.Worker; -using Microsoft.Extensions.Logging; -using Microsoft.OneFuzz.Service.OneFuzzLib.Orm; -namespace Microsoft.OneFuzz.Service.Functions; - - -public class QueueJobResult { - private readonly ILogger _log; - private readonly IOnefuzzContext _context; - - public QueueJobResult(ILogger logTracer, IOnefuzzContext context) { - _log = logTracer; - _context = context; - } - - [Function("QueueJobResult")] - public async Async.Task Run([QueueTrigger("job-result", Connection = "AzureWebJobsStorage")] string msg) { - - var _tasks = _context.TaskOperations; - var _jobs = _context.JobOperations; - - _log.LogInformation("job result: {msg}", msg); - var jr = JsonSerializer.Deserialize(msg, EntityConverter.GetJsonSerializerOptions()).EnsureNotNull($"wrong data {msg}"); - - var task = await _tasks.GetByTaskId(jr.TaskId); - if (task == null) { - _log.LogWarning("invalid {TaskId}", jr.TaskId); - return; - } - - var job = await _jobs.Get(task.JobId); - if (job == null) { - _log.LogWarning("invalid {JobId}", task.JobId); - return; - } - - JobResultData? data = jr.Data; - if (data == null) { - _log.LogWarning($"job result data is empty, throwing out: {jr}"); - return; - } - - var jobResultType = data.Type; - _log.LogInformation($"job result data type: {jobResultType}"); - - Dictionary value; - if (jr.Value.Count > 0) { - value = jr.Value; - } else { - _log.LogWarning($"job result data is empty, throwing out: {jr}"); - return; - } - - var jobResult = await _context.JobResultOperations.CreateOrUpdate(job.JobId, jobResultType, value); - if (!jobResult.IsOk) { - _log.LogError("failed to create or update with job result {JobId}", job.JobId); - } - } -} diff --git a/src/ApiService/ApiService/OneFuzzTypes/Model.cs b/src/ApiService/ApiService/OneFuzzTypes/Model.cs index b839f52ddc..e430c1448c 100644 --- a/src/ApiService/ApiService/OneFuzzTypes/Model.cs +++ b/src/ApiService/ApiService/OneFuzzTypes/Model.cs @@ -33,19 +33,6 @@ public enum HeartbeatType { TaskAlive, } -[SkipRename] -public enum JobResultType { - NewCrashingInput, - NoReproCrashingInput, - NewReport, - NewUniqueReport, - NewRegressionReport, - NewCoverage, - NewCrashDump, - CoverageData, - RuntimeStats, -} - public record HeartbeatData(HeartbeatType Type); public record TaskHeartbeatEntry( @@ -54,16 +41,6 @@ public record TaskHeartbeatEntry( Guid MachineId, HeartbeatData[] Data); -public record JobResultData(JobResultType Type); - -public record TaskJobResultEntry( - Guid TaskId, - Guid? JobId, - Guid MachineId, - JobResultData Data, - Dictionary Value - ); - public record NodeHeartbeatEntry(Guid NodeId, HeartbeatData[] Data); public record NodeCommandStopIfFree(); @@ -915,27 +892,6 @@ public record SecretAddress(Uri Url) : ISecret { public record SecretData(ISecret Secret) { } -public record JobResult( - [PartitionKey][RowKey] Guid JobId, - string Project, - string Name, - double NewCrashingInput = 0, - double NoReproCrashingInput = 0, - double NewReport = 0, - double NewUniqueReport = 0, - double NewRegressionReport = 0, - double NewCrashDump = 0, - double InstructionsCovered = 0, - double TotalInstructions = 0, - double CoverageRate = 0, - double IterationCount = 0 -) : EntityBase() { - public JobResult(Guid JobId, string Project, string Name) : this( - JobId: JobId, - Project: Project, - Name: Name, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) { } -} - public record JobConfig( string Project, string Name, @@ -1100,7 +1056,6 @@ public record TaskUnitConfig( string? InstanceTelemetryKey, string? MicrosoftTelemetryKey, Uri HeartbeatQueue, - Uri JobResultQueue, Dictionary Tags ) { public Uri? inputQueue { get; set; } diff --git a/src/ApiService/ApiService/Program.cs b/src/ApiService/ApiService/Program.cs index d5ee30b45e..f425c00809 100644 --- a/src/ApiService/ApiService/Program.cs +++ b/src/ApiService/ApiService/Program.cs @@ -118,7 +118,6 @@ public static async Async.Task Main() { .AddScoped() .AddScoped() .AddScoped() - .AddScoped() .AddScoped() .AddScoped() .AddScoped() diff --git a/src/ApiService/ApiService/onefuzzlib/Config.cs b/src/ApiService/ApiService/onefuzzlib/Config.cs index 872cedbc01..71af317348 100644 --- a/src/ApiService/ApiService/onefuzzlib/Config.cs +++ b/src/ApiService/ApiService/onefuzzlib/Config.cs @@ -71,7 +71,6 @@ private static BlobContainerSasPermissions ConvertPermissions(ContainerPermissio InstanceTelemetryKey: _serviceConfig.ApplicationInsightsInstrumentationKey, MicrosoftTelemetryKey: _serviceConfig.OneFuzzTelemetry, HeartbeatQueue: await _queue.GetQueueSas("task-heartbeat", StorageType.Config, QueueSasPermissions.Add) ?? throw new Exception("unable to get heartbeat queue sas"), - JobResultQueue: await _queue.GetQueueSas("job-result", StorageType.Config, QueueSasPermissions.Add) ?? throw new Exception("unable to get heartbeat queue sas"), Tags: task.Config.Tags ?? new Dictionary() ); diff --git a/src/ApiService/ApiService/onefuzzlib/Extension.cs b/src/ApiService/ApiService/onefuzzlib/Extension.cs index fbf62dd343..7995026eca 100644 --- a/src/ApiService/ApiService/onefuzzlib/Extension.cs +++ b/src/ApiService/ApiService/onefuzzlib/Extension.cs @@ -36,9 +36,7 @@ public async Async.Task> GenericExtensions(AzureLocati var extensions = new List(); var instanceConfig = await _context.ConfigOperations.Fetch(); - if (vmOs == Os.Windows) { - extensions.Add(await MonitorExtension(region)); - } + extensions.Add(await MonitorExtension(region, vmOs)); var depenency = DependencyExtension(region, vmOs); if (depenency is not null) { @@ -331,21 +329,37 @@ public async Async.Task AgentConfig(AzureLocation region, Os throw new NotSupportedException($"unsupported OS: {vmOs}"); } - public async Async.Task MonitorExtension(AzureLocation region) { + public async Async.Task MonitorExtension(AzureLocation region, Os vmOs) { var settings = await _context.LogAnalytics.GetMonitorSettings(); var extensionSettings = JsonSerializer.Serialize(new { WorkspaceId = settings.Id }, _extensionSerializerOptions); var protectedExtensionSettings = JsonSerializer.Serialize(new { WorkspaceKey = settings.Key }, _extensionSerializerOptions); - return new VMExtensionWrapper { - Location = region, - Name = "OMSExtension", - TypePropertiesType = "MicrosoftMonitoringAgent", - Publisher = "Microsoft.EnterpriseCloud.Monitoring", - TypeHandlerVersion = "1.0", - AutoUpgradeMinorVersion = true, - Settings = new BinaryData(extensionSettings), - ProtectedSettings = new BinaryData(protectedExtensionSettings), - EnableAutomaticUpgrade = false - }; + if (vmOs == Os.Windows) { + return new VMExtensionWrapper { + Location = region, + Name = "OMSExtension", + TypePropertiesType = "MicrosoftMonitoringAgent", + Publisher = "Microsoft.EnterpriseCloud.Monitoring", + TypeHandlerVersion = "1.0", + AutoUpgradeMinorVersion = true, + Settings = new BinaryData(extensionSettings), + ProtectedSettings = new BinaryData(protectedExtensionSettings), + EnableAutomaticUpgrade = false + }; + } else if (vmOs == Os.Linux) { + return new VMExtensionWrapper { + Location = region, + Name = "OmsAgentForLinux", + TypePropertiesType = "OmsAgentForLinux", + Publisher = "Microsoft.EnterpriseCloud.Monitoring", + TypeHandlerVersion = "1.0", + AutoUpgradeMinorVersion = true, + Settings = new BinaryData(extensionSettings), + ProtectedSettings = new BinaryData(protectedExtensionSettings), + EnableAutomaticUpgrade = false + }; + } else { + throw new NotSupportedException($"unsupported os: {vmOs}"); + } } diff --git a/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs b/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs deleted file mode 100644 index 1166cf91d4..0000000000 --- a/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs +++ /dev/null @@ -1,121 +0,0 @@ -using ApiService.OneFuzzLib.Orm; -using Microsoft.Extensions.Logging; -using Polly; -namespace Microsoft.OneFuzz.Service; - -public interface IJobResultOperations : IOrm { - - Async.Task GetJobResult(Guid jobId); - Async.Task CreateOrUpdate(Guid jobId, JobResultType resultType, Dictionary resultValue); - -} -public class JobResultOperations : Orm, IJobResultOperations { - - public JobResultOperations(ILogger log, IOnefuzzContext context) - : base(log, context) { - } - - public async Async.Task GetJobResult(Guid jobId) { - return await SearchByPartitionKeys(new[] { jobId.ToString() }).SingleOrDefaultAsync(); - } - - private JobResult UpdateResult(JobResult result, JobResultType type, Dictionary resultValue) { - - var newResult = result; - double newValue; - switch (type) { - case JobResultType.NewCrashingInput: - newValue = result.NewCrashingInput + resultValue["count"]; - newResult = result with { NewCrashingInput = newValue }; - break; - case JobResultType.NewReport: - newValue = result.NewReport + resultValue["count"]; - newResult = result with { NewReport = newValue }; - break; - case JobResultType.NewUniqueReport: - newValue = result.NewUniqueReport + resultValue["count"]; - newResult = result with { NewUniqueReport = newValue }; - break; - case JobResultType.NewRegressionReport: - newValue = result.NewRegressionReport + resultValue["count"]; - newResult = result with { NewRegressionReport = newValue }; - break; - case JobResultType.NewCrashDump: - newValue = result.NewCrashDump + resultValue["count"]; - newResult = result with { NewCrashDump = newValue }; - break; - case JobResultType.CoverageData: - double newCovered = resultValue["covered"]; - double newTotalCovered = resultValue["features"]; - double newCoverageRate = resultValue["rate"]; - newResult = result with { InstructionsCovered = newCovered, TotalInstructions = newTotalCovered, CoverageRate = newCoverageRate }; - break; - case JobResultType.RuntimeStats: - double newTotalIterations = resultValue["total_count"]; - newResult = result with { IterationCount = newTotalIterations }; - break; - default: - _logTracer.LogWarning($"Invalid Field {type}."); - break; - } - _logTracer.LogInformation($"Attempting to log new result: {newResult}"); - return newResult; - } - - private async Async.Task TryUpdate(Job job, JobResultType resultType, Dictionary resultValue) { - var jobId = job.JobId; - - var jobResult = await GetJobResult(jobId); - - if (jobResult == null) { - _logTracer.LogInformation("Creating new JobResult for Job {JobId}", jobId); - - var entry = new JobResult(JobId: jobId, Project: job.Config.Project, Name: job.Config.Name); - - jobResult = UpdateResult(entry, resultType, resultValue); - - var r = await Insert(jobResult); - if (!r.IsOk) { - throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}"); - } - _logTracer.LogInformation("created job result {JobId}", jobResult.JobId); - } else { - _logTracer.LogInformation("Updating existing JobResult entry for Job {JobId}", jobId); - - jobResult = UpdateResult(jobResult, resultType, resultValue); - - var r = await Update(jobResult); - if (!r.IsOk) { - throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}"); - } - _logTracer.LogInformation("updated job result {JobId}", jobResult.JobId); - } - - return true; - } - - public async Async.Task CreateOrUpdate(Guid jobId, JobResultType resultType, Dictionary resultValue) { - - var job = await _context.JobOperations.Get(jobId); - if (job == null) { - return OneFuzzResultVoid.Error(ErrorCode.INVALID_REQUEST, "invalid job"); - } - - var success = false; - try { - _logTracer.LogInformation("attempt to update job result {JobId}", job.JobId); - var policy = Policy.Handle().WaitAndRetryAsync(50, _ => new TimeSpan(0, 0, 5)); - await policy.ExecuteAsync(async () => { - success = await TryUpdate(job, resultType, resultValue); - _logTracer.LogInformation("attempt {success}", success); - }); - return OneFuzzResultVoid.Ok; - } catch (Exception e) { - return OneFuzzResultVoid.Error(ErrorCode.UNABLE_TO_UPDATE, new string[] { - $"Unexpected failure when attempting to update job result for {job.JobId}", - $"Exception: {e}" - }); - } - } -} - diff --git a/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs b/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs index 03c6322663..d877bfddbb 100644 --- a/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs +++ b/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs @@ -19,7 +19,6 @@ public interface IOnefuzzContext { IExtensions Extensions { get; } IIpOperations IpOperations { get; } IJobOperations JobOperations { get; } - IJobResultOperations JobResultOperations { get; } ILogAnalytics LogAnalytics { get; } INodeMessageOperations NodeMessageOperations { get; } INodeOperations NodeOperations { get; } @@ -84,7 +83,6 @@ public OnefuzzContext(IServiceProvider serviceProvider) { public IVmOperations VmOperations => _serviceProvider.GetRequiredService(); public ISecretsOperations SecretsOperations => _serviceProvider.GetRequiredService(); public IJobOperations JobOperations => _serviceProvider.GetRequiredService(); - public IJobResultOperations JobResultOperations => _serviceProvider.GetRequiredService(); public IScheduler Scheduler => _serviceProvider.GetRequiredService(); public IConfig Config => _serviceProvider.GetRequiredService(); public ILogAnalytics LogAnalytics => _serviceProvider.GetRequiredService(); diff --git a/src/ApiService/IntegrationTests/Fakes/TestContext.cs b/src/ApiService/IntegrationTests/Fakes/TestContext.cs index 66d121e746..c46ff5fce7 100644 --- a/src/ApiService/IntegrationTests/Fakes/TestContext.cs +++ b/src/ApiService/IntegrationTests/Fakes/TestContext.cs @@ -32,7 +32,6 @@ public TestContext(IHttpClientFactory httpClientFactory, OneFuzzLoggerProvider p TaskOperations = new TaskOperations(provider.CreateLogger(), Cache, this); NodeOperations = new NodeOperations(provider.CreateLogger(), this); JobOperations = new JobOperations(provider.CreateLogger(), this); - JobResultOperations = new JobResultOperations(provider.CreateLogger(), this); NodeTasksOperations = new NodeTasksOperations(provider.CreateLogger(), this); TaskEventOperations = new TaskEventOperations(provider.CreateLogger(), this); NodeMessageOperations = new NodeMessageOperations(provider.CreateLogger(), this); @@ -58,7 +57,6 @@ public Async.Task InsertAll(params EntityBase[] objs) Node n => NodeOperations.Insert(n), Pool p => PoolOperations.Insert(p), Job j => JobOperations.Insert(j), - JobResult jr => JobResultOperations.Insert(jr), Repro r => ReproOperations.Insert(r), Scaleset ss => ScalesetOperations.Insert(ss), NodeTasks nt => NodeTasksOperations.Insert(nt), @@ -86,7 +84,6 @@ public Async.Task InsertAll(params EntityBase[] objs) public ITaskOperations TaskOperations { get; } public IJobOperations JobOperations { get; } - public IJobResultOperations JobResultOperations { get; } public INodeOperations NodeOperations { get; } public INodeTasksOperations NodeTasksOperations { get; } public ITaskEventOperations TaskEventOperations { get; } diff --git a/src/agent/Cargo.lock b/src/agent/Cargo.lock index 254684be97..a1d86e7d25 100644 --- a/src/agent/Cargo.lock +++ b/src/agent/Cargo.lock @@ -2123,7 +2123,6 @@ dependencies = [ "log", "nix", "notify", - "onefuzz-result", "onefuzz-telemetry", "pete", "pretty_assertions", @@ -2198,20 +2197,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "onefuzz-result" -version = "0.2.0" -dependencies = [ - "anyhow", - "async-trait", - "log", - "onefuzz-telemetry", - "reqwest", - "serde", - "storage-queue", - "uuid", -] - [[package]] name = "onefuzz-task" version = "0.2.0" @@ -2241,7 +2226,6 @@ dependencies = [ "num_cpus", "onefuzz", "onefuzz-file-format", - "onefuzz-result", "onefuzz-telemetry", "path-absolutize", "pretty_assertions", diff --git a/src/agent/Cargo.toml b/src/agent/Cargo.toml index ce01ae880c..2f4cea41a4 100644 --- a/src/agent/Cargo.toml +++ b/src/agent/Cargo.toml @@ -10,7 +10,6 @@ members = [ "onefuzz", "onefuzz-task", "onefuzz-agent", - "onefuzz-result", "onefuzz-file-format", "onefuzz-telemetry", "reqwest-retry", diff --git a/src/agent/onefuzz-agent/src/config.rs b/src/agent/onefuzz-agent/src/config.rs index fc623e72af..87edfb2c1b 100644 --- a/src/agent/onefuzz-agent/src/config.rs +++ b/src/agent/onefuzz-agent/src/config.rs @@ -34,8 +34,6 @@ pub struct StaticConfig { pub heartbeat_queue: Option, - pub job_result_queue: Option, - pub instance_id: Uuid, #[serde(default = "default_as_true")] @@ -73,8 +71,6 @@ struct RawStaticConfig { pub heartbeat_queue: Option, - pub job_result_queue: Option, - pub instance_id: Uuid, #[serde(default = "default_as_true")] @@ -121,7 +117,6 @@ impl StaticConfig { microsoft_telemetry_key: config.microsoft_telemetry_key, instance_telemetry_key: config.instance_telemetry_key, heartbeat_queue: config.heartbeat_queue, - job_result_queue: config.job_result_queue, instance_id: config.instance_id, managed: config.managed, machine_identity, @@ -157,12 +152,6 @@ impl StaticConfig { None }; - let job_result_queue = if let Ok(key) = std::env::var("ONEFUZZ_JOB_RESULT") { - Some(Url::parse(&key)?) - } else { - None - }; - let instance_telemetry_key = if let Ok(key) = std::env::var("ONEFUZZ_INSTANCE_TELEMETRY_KEY") { Some(InstanceTelemetryKey::new(Uuid::parse_str(&key)?)) @@ -194,7 +183,6 @@ impl StaticConfig { instance_telemetry_key, microsoft_telemetry_key, heartbeat_queue, - job_result_queue, instance_id, managed: !is_unmanaged, machine_identity, diff --git a/src/agent/onefuzz-agent/src/log_uploader.rs b/src/agent/onefuzz-agent/src/log_uploader.rs index d424013421..6bccc0bef2 100644 --- a/src/agent/onefuzz-agent/src/log_uploader.rs +++ b/src/agent/onefuzz-agent/src/log_uploader.rs @@ -210,3 +210,32 @@ async fn sync_file( blob_client.append_block(Body::from(f)).await?; Ok(len) } + +#[cfg(test)] +mod tests { + use std::io::Seek; + + use anyhow::Result; + use tokio::io::{AsyncReadExt, AsyncSeekExt}; + + #[allow(clippy::unused_io_amount)] + #[tokio::test] + #[ignore] + + async fn test_seek_behavior() -> Result<()> { + let path = "C:\\temp\\test.ps1"; + let mut std_file = std::fs::File::open(path)?; + std_file.seek(std::io::SeekFrom::Start(3))?; + + let mut tokio_file = tokio::fs::File::from_std(std_file); + + let buf = &mut [0u8; 5]; + tokio_file.read(buf).await?; + println!("******** buf {:?}", buf); + tokio_file.seek(std::io::SeekFrom::Start(0)).await?; + tokio_file.read(buf).await?; + println!("******** buf {:?}", buf); + + Ok(()) + } +} diff --git a/src/agent/onefuzz-agent/src/work.rs b/src/agent/onefuzz-agent/src/work.rs index d0222744a7..b55d1d86a1 100644 --- a/src/agent/onefuzz-agent/src/work.rs +++ b/src/agent/onefuzz-agent/src/work.rs @@ -91,10 +91,7 @@ impl WorkSet { pub fn setup_dir(&self) -> Result { let root = self.get_root_folder()?; - // Putting the setup container at the root for backward compatibility. - // The path of setup folder can be used as part of the deduplication logic in the bug filing service - let setup_root = root.parent().ok_or_else(|| anyhow!("Invalid root"))?; - self.setup_url.as_path(setup_root) + self.setup_url.as_path(root) } pub fn extra_setup_dir(&self) -> Result> { diff --git a/src/agent/onefuzz-result/Cargo.toml b/src/agent/onefuzz-result/Cargo.toml deleted file mode 100644 index 7c7de6615c..0000000000 --- a/src/agent/onefuzz-result/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "onefuzz-result" -version = "0.2.0" -authors = ["fuzzing@microsoft.com"] -edition = "2021" -publish = false -license = "MIT" - -[dependencies] -anyhow = { version = "1.0", features = ["backtrace"] } -async-trait = "0.1" -reqwest = "0.11" -serde = "1.0" -storage-queue = { path = "../storage-queue" } -uuid = { version = "1.4", features = ["serde", "v4"] } -onefuzz-telemetry = { path = "../onefuzz-telemetry" } -log = "0.4" - diff --git a/src/agent/onefuzz-result/src/job_result.rs b/src/agent/onefuzz-result/src/job_result.rs deleted file mode 100644 index b305eca2cb..0000000000 --- a/src/agent/onefuzz-result/src/job_result.rs +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -use anyhow::Result; -use async_trait::async_trait; -use onefuzz_telemetry::warn; -use reqwest::Url; -use serde::{self, Deserialize, Serialize}; -use std::collections::HashMap; -use std::sync::Arc; -use storage_queue::QueueClient; -use uuid::Uuid; - -#[derive(Debug, Deserialize, Serialize, Hash, Eq, PartialEq, Clone)] -#[serde(tag = "type")] -pub enum JobResultData { - NewCrashingInput, - NoReproCrashingInput, - NewReport, - NewUniqueReport, - NewRegressionReport, - NewCoverage, - NewCrashDump, - CoverageData, - RuntimeStats, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -struct JobResult { - task_id: Uuid, - job_id: Uuid, - machine_id: Uuid, - machine_name: String, - data: JobResultData, - value: HashMap, -} - -#[derive(Clone)] -pub struct TaskContext { - task_id: Uuid, - job_id: Uuid, - machine_id: Uuid, - machine_name: String, -} - -pub struct JobResultContext { - pub state: TaskContext, - pub queue_client: QueueClient, -} - -pub struct JobResultClient { - pub context: Arc>, -} - -impl JobResultClient { - pub fn init_job_result( - context: TaskContext, - queue_url: Url, - ) -> Result> - where - TaskContext: Send + Sync + 'static, - { - let context = Arc::new(JobResultContext { - state: context, - queue_client: QueueClient::new(queue_url)?, - }); - - Ok(JobResultClient { context }) - } -} - -pub type TaskJobResultClient = JobResultClient; - -pub async fn init_job_result( - queue_url: Url, - task_id: Uuid, - job_id: Uuid, - machine_id: Uuid, - machine_name: String, -) -> Result { - let hb = JobResultClient::init_job_result( - TaskContext { - task_id, - job_id, - machine_id, - machine_name, - }, - queue_url, - )?; - Ok(hb) -} - -#[async_trait] -pub trait JobResultSender { - async fn send_direct(&self, data: JobResultData, value: HashMap); -} - -#[async_trait] -impl JobResultSender for TaskJobResultClient { - async fn send_direct(&self, data: JobResultData, value: HashMap) { - let task_id = self.context.state.task_id; - let job_id = self.context.state.job_id; - let machine_id = self.context.state.machine_id; - let machine_name = self.context.state.machine_name.clone(); - - let _ = self - .context - .queue_client - .enqueue(JobResult { - task_id, - job_id, - machine_id, - machine_name, - data, - value, - }) - .await; - } -} - -#[async_trait] -impl JobResultSender for Option { - async fn send_direct(&self, data: JobResultData, value: HashMap) { - match self { - Some(client) => client.send_direct(data, value).await, - None => warn!("Failed to send Job Result message data from agent."), - } - } -} diff --git a/src/agent/onefuzz-result/src/lib.rs b/src/agent/onefuzz-result/src/lib.rs deleted file mode 100644 index dae666ca9a..0000000000 --- a/src/agent/onefuzz-result/src/lib.rs +++ /dev/null @@ -1,4 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -pub mod job_result; diff --git a/src/agent/onefuzz-task/Cargo.toml b/src/agent/onefuzz-task/Cargo.toml index 4e0bd381b0..0ad2f9aa4f 100644 --- a/src/agent/onefuzz-task/Cargo.toml +++ b/src/agent/onefuzz-task/Cargo.toml @@ -39,7 +39,6 @@ serde_json = "1.0" serde_yaml = "0.9.21" onefuzz = { path = "../onefuzz" } onefuzz-telemetry = { path = "../onefuzz-telemetry" } -onefuzz-result = { path = "../onefuzz-result" } path-absolutize = "3.1" reqwest-retry = { path = "../reqwest-retry" } strum = "0.25" diff --git a/src/agent/onefuzz-task/src/local/cmd.rs b/src/agent/onefuzz-task/src/local/cmd.rs index eabefb71ee..80fd51a96b 100644 --- a/src/agent/onefuzz-task/src/local/cmd.rs +++ b/src/agent/onefuzz-task/src/local/cmd.rs @@ -3,7 +3,11 @@ #[cfg(any(target_os = "linux", target_os = "windows"))] use crate::local::coverage; -use crate::local::{common::add_common_config, libfuzzer_fuzz, tui::TerminalUi}; +use crate::local::{ + common::add_common_config, generic_analysis, generic_crash_report, generic_generator, + libfuzzer, libfuzzer_crash_report, libfuzzer_fuzz, libfuzzer_merge, libfuzzer_regression, + libfuzzer_test_input, radamsa, test_input, tui::TerminalUi, +}; use anyhow::{Context, Result}; use clap::{Arg, ArgAction, Command}; use std::time::Duration; @@ -17,9 +21,19 @@ use super::template; #[derive(Debug, PartialEq, Eq, EnumString, IntoStaticStr, EnumIter)] #[strum(serialize_all = "kebab-case")] enum Commands { + Radamsa, #[cfg(any(target_os = "linux", target_os = "windows"))] Coverage, LibfuzzerFuzz, + LibfuzzerMerge, + LibfuzzerCrashReport, + LibfuzzerTestInput, + LibfuzzerRegression, + Libfuzzer, + CrashReport, + Generator, + Analysis, + TestInput, Template, } @@ -54,7 +68,23 @@ pub async fn run(args: clap::ArgMatches) -> Result<()> { match command { #[cfg(any(target_os = "linux", target_os = "windows"))] Commands::Coverage => coverage::run(&sub_args, event_sender).await, + Commands::Radamsa => radamsa::run(&sub_args, event_sender).await, + Commands::LibfuzzerCrashReport => { + libfuzzer_crash_report::run(&sub_args, event_sender).await + } Commands::LibfuzzerFuzz => libfuzzer_fuzz::run(&sub_args, event_sender).await, + Commands::LibfuzzerMerge => libfuzzer_merge::run(&sub_args, event_sender).await, + Commands::LibfuzzerTestInput => { + libfuzzer_test_input::run(&sub_args, event_sender).await + } + Commands::LibfuzzerRegression => { + libfuzzer_regression::run(&sub_args, event_sender).await + } + Commands::Libfuzzer => libfuzzer::run(&sub_args, event_sender).await, + Commands::CrashReport => generic_crash_report::run(&sub_args, event_sender).await, + Commands::Generator => generic_generator::run(&sub_args, event_sender).await, + Commands::Analysis => generic_analysis::run(&sub_args, event_sender).await, + Commands::TestInput => test_input::run(&sub_args, event_sender).await, Commands::Template => { let config = sub_args .get_one::("config") @@ -110,7 +140,17 @@ pub fn args(name: &'static str) -> Command { let app = match subcommand { #[cfg(any(target_os = "linux", target_os = "windows"))] Commands::Coverage => coverage::args(subcommand.into()), + Commands::Radamsa => radamsa::args(subcommand.into()), + Commands::LibfuzzerCrashReport => libfuzzer_crash_report::args(subcommand.into()), Commands::LibfuzzerFuzz => libfuzzer_fuzz::args(subcommand.into()), + Commands::LibfuzzerMerge => libfuzzer_merge::args(subcommand.into()), + Commands::LibfuzzerTestInput => libfuzzer_test_input::args(subcommand.into()), + Commands::LibfuzzerRegression => libfuzzer_regression::args(subcommand.into()), + Commands::Libfuzzer => libfuzzer::args(subcommand.into()), + Commands::CrashReport => generic_crash_report::args(subcommand.into()), + Commands::Generator => generic_generator::args(subcommand.into()), + Commands::Analysis => generic_analysis::args(subcommand.into()), + Commands::TestInput => test_input::args(subcommand.into()), Commands::Template => Command::new("template") .about("uses the template to generate a run") .args(vec![Arg::new("config") diff --git a/src/agent/onefuzz-task/src/local/common.rs b/src/agent/onefuzz-task/src/local/common.rs index 17940d799f..f8d7949e80 100644 --- a/src/agent/onefuzz-task/src/local/common.rs +++ b/src/agent/onefuzz-task/src/local/common.rs @@ -26,10 +26,20 @@ pub const INPUTS_DIR: &str = "inputs_dir"; pub const CRASHES_DIR: &str = "crashes_dir"; pub const CRASHDUMPS_DIR: &str = "crashdumps_dir"; pub const TARGET_WORKERS: &str = "target_workers"; +pub const REPORTS_DIR: &str = "reports_dir"; +pub const NO_REPRO_DIR: &str = "no_repro_dir"; pub const TARGET_TIMEOUT: &str = "target_timeout"; +pub const CHECK_RETRY_COUNT: &str = "check_retry_count"; +pub const DISABLE_CHECK_QUEUE: &str = "disable_check_queue"; +pub const UNIQUE_REPORTS_DIR: &str = "unique_reports_dir"; pub const COVERAGE_DIR: &str = "coverage_dir"; pub const READONLY_INPUTS: &str = "readonly_inputs_dir"; +pub const CHECK_ASAN_LOG: &str = "check_asan_log"; +pub const TOOLS_DIR: &str = "tools_dir"; +pub const RENAME_OUTPUT: &str = "rename_output"; pub const CHECK_FUZZER_HELP: &str = "check_fuzzer_help"; +pub const DISABLE_CHECK_DEBUGGER: &str = "disable_check_debugger"; +pub const REGRESSION_REPORTS_DIR: &str = "regression_reports_dir"; pub const TARGET_EXE: &str = "target_exe"; pub const TARGET_ENV: &str = "target_env"; @@ -37,6 +47,17 @@ pub const TARGET_OPTIONS: &str = "target_options"; // pub const SUPERVISOR_EXE: &str = "supervisor_exe"; // pub const SUPERVISOR_ENV: &str = "supervisor_env"; // pub const SUPERVISOR_OPTIONS: &str = "supervisor_options"; +pub const GENERATOR_EXE: &str = "generator_exe"; +pub const GENERATOR_ENV: &str = "generator_env"; +pub const GENERATOR_OPTIONS: &str = "generator_options"; + +pub const ANALYZER_EXE: &str = "analyzer_exe"; +pub const ANALYZER_OPTIONS: &str = "analyzer_options"; +pub const ANALYZER_ENV: &str = "analyzer_env"; +pub const ANALYSIS_DIR: &str = "analysis_dir"; +pub const ANALYSIS_INPUTS: &str = "analysis_inputs"; +pub const ANALYSIS_UNIQUE_INPUTS: &str = "analysis_unique_inputs"; +pub const PRESERVE_EXISTING_OUTPUTS: &str = "preserve_existing_outputs"; pub const CREATE_JOB_DIR: &str = "create_job_dir"; @@ -45,6 +66,7 @@ const WAIT_FOR_DIR_DELAY: Duration = Duration::from_secs(1); pub enum CmdType { Target, + Generator, // Supervisor, } @@ -68,6 +90,7 @@ pub fn get_cmd_exe(cmd_type: CmdType, args: &clap::ArgMatches) -> Result let name = match cmd_type { CmdType::Target => TARGET_EXE, // CmdType::Supervisor => SUPERVISOR_EXE, + CmdType::Generator => GENERATOR_EXE, }; args.get_one::(name) @@ -79,6 +102,7 @@ pub fn get_cmd_arg(cmd_type: CmdType, args: &clap::ArgMatches) -> Vec { let name = match cmd_type { CmdType::Target => TARGET_OPTIONS, // CmdType::Supervisor => SUPERVISOR_OPTIONS, + CmdType::Generator => GENERATOR_OPTIONS, }; args.get_many::(name) @@ -91,6 +115,7 @@ pub fn get_cmd_env(cmd_type: CmdType, args: &clap::ArgMatches) -> Result TARGET_ENV, // CmdType::Supervisor => SUPERVISOR_ENV, + CmdType::Generator => GENERATOR_ENV, }; get_hash_map(args, env_name) } @@ -240,7 +265,6 @@ pub async fn build_local_context( }, instance_telemetry_key: None, heartbeat_queue: None, - job_result_queue: None, microsoft_telemetry_key: None, logs: None, min_available_memory_mb: 0, diff --git a/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml b/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml index aba02c7991..7210893809 100644 --- a/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml +++ b/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml @@ -5,31 +5,28 @@ # 2. Install llvm and export LLVM_SYMBOLIZER_PATH like we do in setup.sh -required_args: &required_args - target_exe: "REPLACE_ME" # The path to your target - inputs: &inputs "REPLACE_ME" # A folder containining your inputs - crashes: &crashes "REPLACE_ME" # The folder where you want the crashing inputs to be output - crashdumps: "REPLACE_ME" # The folder where you want the crash dumps to be output - coverage: "REPLACE_ME" # The folder where you want the code coverage to be output - regression_reports: "REPLACE_ME" # The folder where you want the regression reports to be output - target_args: &target_args - <<: *required_args target_env: {} + target_exe: "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\fuzz.exe" target_options: [] +inputs: &inputs "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\seeds" + tasks: - type: LibFuzzer <<: *target_args + inputs: *inputs + crashes: &crash "./crashes" readonly_inputs: [] check_fuzzer_help: true - - type: LibfuzzerRegression + - type: "Report" <<: *target_args - - - type: "LibfuzzerCrashReport" - <<: *target_args - input_queue: *crashes + input_queue: *crash + crashes: *crash + reports: "./reports" + unique_reports: "./unique_reports" + no_repro: "./no_repro" check_fuzzer_help: true - type: "Coverage" @@ -38,11 +35,4 @@ tasks: - "{input}" input_queue: *inputs readonly_inputs: [*inputs] - - # The analysis task is optional in the libfuzzer_basic template - # - type: Analysis - # <<: *target_args - # analysis: "REPLACE_ME" # The folder where you want the analysis results to be output - # analyzer_exe: "REPLACE_ME" - # analyzer_options: [] - # analyzer_env: {} + coverage: "./coverage" diff --git a/src/agent/onefuzz-task/src/local/generic_analysis.rs b/src/agent/onefuzz-task/src/local/generic_analysis.rs index 429e7b0e3b..3d3e2fafc8 100644 --- a/src/agent/onefuzz-task/src/local/generic_analysis.rs +++ b/src/agent/onefuzz-task/src/local/generic_analysis.rs @@ -3,13 +3,139 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::tasks::config::CommonConfig; +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_exe, get_hash_map, get_synced_dir, CmdType, + SyncCountDirMonitor, UiEvent, ANALYSIS_DIR, ANALYZER_ENV, ANALYZER_EXE, ANALYZER_OPTIONS, + CRASHES_DIR, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TOOLS_DIR, + UNIQUE_REPORTS_DIR, + }, + tasks::{ + analysis::generic::{run as run_analysis, Config}, + config::CommonConfig, + }, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, Command}; +use flume::Sender; use schemars::JsonSchema; +use storage_queue::QueueClient; use super::template::{RunContext, Template}; +pub fn build_analysis_config( + args: &clap::ArgMatches, + input_queue: Option, + common: CommonConfig, + event_sender: Option>, +) -> Result { + let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); + let target_options = get_cmd_arg(CmdType::Target, args); + + let analyzer_exe = args + .get_one::(ANALYZER_EXE) + .cloned() + .ok_or_else(|| format_err!("expected {ANALYZER_EXE}"))?; + + let analyzer_options = args + .get_many::(ANALYZER_OPTIONS) + .unwrap_or_default() + .map(|x| x.to_string()) + .collect(); + + let analyzer_env = get_hash_map(args, ANALYZER_ENV)?; + let analysis = get_synced_dir(ANALYSIS_DIR, common.job_id, common.task_id, args)? + .monitor_count(&event_sender)?; + let tools = get_synced_dir(TOOLS_DIR, common.job_id, common.task_id, args)?; + let crashes = if input_queue.is_none() { + get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)? + } else { + None + }; + let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let config = Config { + analyzer_exe, + analyzer_options, + analyzer_env, + target_exe, + target_options, + input_queue, + crashes, + analysis, + tools: Some(tools), + reports, + unique_reports, + no_repro, + common, + }; + + Ok(config) +} + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let config = build_analysis_config(args, None, context.common_config.clone(), event_sender)?; + run_analysis(config).await +} + +pub fn build_shared_args(required_task: bool) -> Vec { + vec![ + Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), + Arg::new(TARGET_ENV) + .long(TARGET_ENV) + .requires(TARGET_EXE) + .num_args(0..), + Arg::new(TARGET_OPTIONS) + .long(TARGET_OPTIONS) + .default_value("{input}") + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(CRASHES_DIR) + .long(CRASHES_DIR) + .value_parser(value_parser!(PathBuf)), + Arg::new(ANALYZER_OPTIONS) + .long(ANALYZER_OPTIONS) + .requires(ANALYZER_EXE) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(ANALYZER_ENV) + .long(ANALYZER_ENV) + .requires(ANALYZER_EXE) + .num_args(0..), + Arg::new(TOOLS_DIR) + .long(TOOLS_DIR) + .value_parser(value_parser!(PathBuf)), + Arg::new(ANALYZER_EXE) + .long(ANALYZER_EXE) + .requires(ANALYSIS_DIR) + .requires(CRASHES_DIR) + .required(required_task), + Arg::new(ANALYSIS_DIR) + .long(ANALYSIS_DIR) + .requires(ANALYZER_EXE) + .requires(CRASHES_DIR) + .required(required_task), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("execute a local-only generic analysis") + .args(&build_shared_args(true)) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct Analysis { analyzer_exe: String, @@ -20,7 +146,7 @@ pub struct Analysis { input_queue: Option, crashes: Option, analysis: PathBuf, - tools: Option, + tools: PathBuf, reports: Option, unique_reports: Option, no_repro: Option, @@ -49,10 +175,9 @@ impl Template for Analysis { .and_then(|path| context.to_monitored_sync_dir("crashes", path).ok()), analysis: context.to_monitored_sync_dir("analysis", self.analysis.clone())?, - tools: self - .tools - .as_ref() - .and_then(|path| context.to_monitored_sync_dir("tools", path).ok()), + tools: context + .to_monitored_sync_dir("tools", self.tools.clone()) + .ok(), reports: self .reports diff --git a/src/agent/onefuzz-task/src/local/generic_crash_report.rs b/src/agent/onefuzz-task/src/local/generic_crash_report.rs index 347a8cac76..6b0e2fccad 100644 --- a/src/agent/onefuzz-task/src/local/generic_crash_report.rs +++ b/src/agent/onefuzz-task/src/local/generic_crash_report.rs @@ -3,14 +3,150 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::tasks::{config::CommonConfig, utils::default_bool_true}; +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, + SyncCountDirMonitor, UiEvent, CHECK_ASAN_LOG, CHECK_RETRY_COUNT, CRASHES_DIR, + DISABLE_CHECK_DEBUGGER, DISABLE_CHECK_QUEUE, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, + TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, + }, + tasks::{ + config::CommonConfig, + report::generic::{Config, ReportTask}, + utils::default_bool_true, + }, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, ArgAction, Command}; +use flume::Sender; use futures::future::OptionFuture; use schemars::JsonSchema; +use storage_queue::QueueClient; use super::template::{RunContext, Template}; +pub fn build_report_config( + args: &clap::ArgMatches, + input_queue: Option, + common: CommonConfig, + event_sender: Option>, +) -> Result { + let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); + let target_env = get_cmd_env(CmdType::Target, args)?; + let target_options = get_cmd_arg(CmdType::Target, args); + + let crashes = Some(get_synced_dir( + CRASHES_DIR, + common.job_id, + common.task_id, + args, + )?) + .monitor_count(&event_sender)?; + let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let unique_reports = Some(get_synced_dir( + UNIQUE_REPORTS_DIR, + common.job_id, + common.task_id, + args, + )?) + .monitor_count(&event_sender)?; + + let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); + + let check_retry_count = args + .get_one::(CHECK_RETRY_COUNT) + .copied() + .expect("has a default"); + + let check_queue = !args.get_flag(DISABLE_CHECK_QUEUE); + let check_asan_log = args.get_flag(CHECK_ASAN_LOG); + let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); + + let config = Config { + target_exe, + target_env, + target_options, + target_timeout, + check_asan_log, + check_debugger, + check_retry_count, + check_queue, + crashes, + minimized_stack_depth: None, + input_queue, + no_repro, + reports, + unique_reports, + common, + }; + + Ok(config) +} + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let config = build_report_config(args, None, context.common_config.clone(), event_sender)?; + ReportTask::new(config).managed_run().await +} + +pub fn build_shared_args() -> Vec { + vec![ + Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .default_value("{input}") + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(CRASHES_DIR) + .long(CRASHES_DIR) + .required(true) + .value_parser(value_parser!(PathBuf)), + Arg::new(REPORTS_DIR) + .long(REPORTS_DIR) + .required(false) + .value_parser(value_parser!(PathBuf)), + Arg::new(NO_REPRO_DIR) + .long(NO_REPRO_DIR) + .required(false) + .value_parser(value_parser!(PathBuf)), + Arg::new(UNIQUE_REPORTS_DIR) + .long(UNIQUE_REPORTS_DIR) + .value_parser(value_parser!(PathBuf)) + .required(true), + Arg::new(TARGET_TIMEOUT) + .long(TARGET_TIMEOUT) + .value_parser(value_parser!(u64)) + .default_value("30"), + Arg::new(CHECK_RETRY_COUNT) + .long(CHECK_RETRY_COUNT) + .value_parser(value_parser!(u64)) + .default_value("0"), + Arg::new(DISABLE_CHECK_QUEUE) + .action(ArgAction::SetTrue) + .long(DISABLE_CHECK_QUEUE), + Arg::new(CHECK_ASAN_LOG) + .action(ArgAction::SetTrue) + .long(CHECK_ASAN_LOG), + Arg::new(DISABLE_CHECK_DEBUGGER) + .action(ArgAction::SetTrue) + .long(DISABLE_CHECK_DEBUGGER), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("execute a local-only generic crash report") + .args(&build_shared_args()) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct CrashReport { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/generic_generator.rs b/src/agent/onefuzz-task/src/local/generic_generator.rs index ae9f6a3cc6..823ba221d6 100644 --- a/src/agent/onefuzz-task/src/local/generic_generator.rs +++ b/src/agent/onefuzz-task/src/local/generic_generator.rs @@ -3,14 +3,154 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::tasks::{config::CommonConfig, utils::default_bool_true}; +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, + get_synced_dirs, CmdType, SyncCountDirMonitor, UiEvent, CHECK_ASAN_LOG, CHECK_RETRY_COUNT, + CRASHES_DIR, DISABLE_CHECK_DEBUGGER, GENERATOR_ENV, GENERATOR_EXE, GENERATOR_OPTIONS, + READONLY_INPUTS, RENAME_OUTPUT, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, + TOOLS_DIR, + }, + tasks::{ + config::CommonConfig, + fuzz::generator::{Config, GeneratorTask}, + utils::default_bool_true, + }, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, ArgAction, Command}; +use flume::Sender; use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; use super::template::{RunContext, Template}; +pub fn build_fuzz_config( + args: &clap::ArgMatches, + common: CommonConfig, + event_sender: Option>, +) -> Result { + let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)? + .monitor_count(&event_sender)?; + let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); + let target_options = get_cmd_arg(CmdType::Target, args); + let target_env = get_cmd_env(CmdType::Target, args)?; + + let generator_exe = get_cmd_exe(CmdType::Generator, args)?; + let generator_options = get_cmd_arg(CmdType::Generator, args); + let generator_env = get_cmd_env(CmdType::Generator, args)?; + let readonly_inputs = get_synced_dirs(READONLY_INPUTS, common.job_id, common.task_id, args)? + .into_iter() + .map(|sd| sd.monitor_count(&event_sender)) + .collect::>>()?; + + let rename_output = args.get_flag(RENAME_OUTPUT); + let check_asan_log = args.get_flag(CHECK_ASAN_LOG); + let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); + + let check_retry_count = args + .get_one::(CHECK_RETRY_COUNT) + .copied() + .expect("has a default"); + + let target_timeout = Some( + args.get_one::(TARGET_TIMEOUT) + .copied() + .expect("has a default"), + ); + + let tools = get_synced_dir(TOOLS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let ensemble_sync_delay = None; + + let config = Config { + generator_exe, + generator_env, + generator_options, + readonly_inputs, + crashes, + tools, + target_exe, + target_env, + target_options, + target_timeout, + check_asan_log, + check_debugger, + check_retry_count, + rename_output, + ensemble_sync_delay, + common, + }; + + Ok(config) +} + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let config = build_fuzz_config(args, context.common_config.clone(), event_sender)?; + GeneratorTask::new(config).run().await +} + +pub fn build_shared_args() -> Vec { + vec![ + Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .default_value("{input}") + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(GENERATOR_EXE) + .long(GENERATOR_EXE) + .default_value("radamsa") + .required(true), + Arg::new(GENERATOR_ENV).long(GENERATOR_ENV).num_args(0..), + Arg::new(GENERATOR_OPTIONS) + .long(GENERATOR_OPTIONS) + .value_delimiter(' ') + .default_value("-H sha256 -o {generated_inputs}/input-%h.%s -n 100 -r {input_corpus}") + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(CRASHES_DIR) + .required(true) + .long(CRASHES_DIR) + .value_parser(value_parser!(PathBuf)), + Arg::new(READONLY_INPUTS) + .required(true) + .num_args(1..) + .value_parser(value_parser!(PathBuf)) + .long(READONLY_INPUTS), + Arg::new(TOOLS_DIR) + .long(TOOLS_DIR) + .value_parser(value_parser!(PathBuf)), + Arg::new(CHECK_RETRY_COUNT) + .long(CHECK_RETRY_COUNT) + .value_parser(value_parser!(u64)) + .default_value("0"), + Arg::new(CHECK_ASAN_LOG) + .action(ArgAction::SetTrue) + .long(CHECK_ASAN_LOG), + Arg::new(RENAME_OUTPUT) + .action(ArgAction::SetTrue) + .long(RENAME_OUTPUT), + Arg::new(TARGET_TIMEOUT) + .long(TARGET_TIMEOUT) + .value_parser(value_parser!(u64)) + .default_value("30"), + Arg::new(DISABLE_CHECK_DEBUGGER) + .action(ArgAction::SetTrue) + .long(DISABLE_CHECK_DEBUGGER), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("execute a local-only generator fuzzing task") + .args(&build_shared_args()) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct Generator { generator_exe: String, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer.rs b/src/agent/onefuzz-task/src/local/libfuzzer.rs index 433636be1c..56dff7dbe3 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer.rs @@ -1,19 +1,168 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::tasks::{ - config::CommonConfig, - fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask}, - utils::default_bool_true, +#[cfg(any(target_os = "linux", target_os = "windows"))] +use crate::{ + local::{common::COVERAGE_DIR, coverage, coverage::build_shared_args as build_coverage_args}, + tasks::coverage::generic::CoverageTask, +}; +use crate::{ + local::{ + common::{ + build_local_context, wait_for_dir, DirectoryMonitorQueue, UiEvent, ANALYZER_EXE, + REGRESSION_REPORTS_DIR, UNIQUE_REPORTS_DIR, + }, + generic_analysis::{build_analysis_config, build_shared_args as build_analysis_args}, + libfuzzer_crash_report::{build_report_config, build_shared_args as build_crash_args}, + libfuzzer_fuzz::{build_fuzz_config, build_shared_args as build_fuzz_args}, + libfuzzer_regression::{ + build_regression_config, build_shared_args as build_regression_args, + }, + }, + tasks::{ + analysis::generic::run as run_analysis, + config::CommonConfig, + fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask}, + regression::libfuzzer::LibFuzzerRegressionTask, + report::libfuzzer_report::ReportTask, + utils::default_bool_true, + }, }; use anyhow::Result; use async_trait::async_trait; -use onefuzz::syncdir::SyncedDir; +use clap::Command; +use flume::Sender; +use onefuzz::{syncdir::SyncedDir, utils::try_wait_all_join_handles}; use schemars::JsonSchema; -use std::{collections::HashMap, path::PathBuf}; +use std::{ + collections::{HashMap, HashSet}, + path::PathBuf, +}; +use tokio::task::spawn; +use uuid::Uuid; use super::template::{RunContext, Template}; +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?; + let crash_dir = fuzz_config + .crashes + .remote_url()? + .as_file_path() + .expect("invalid crash dir remote location"); + + let fuzzer = LibFuzzerFuzzTask::new(fuzz_config)?; + let mut task_handles = vec![]; + + let fuzz_task = spawn(async move { fuzzer.run().await }); + + wait_for_dir(&crash_dir).await?; + + task_handles.push(fuzz_task); + + if args.contains_id(UNIQUE_REPORTS_DIR) { + let crash_report_input_monitor = + DirectoryMonitorQueue::start_monitoring(crash_dir.clone()).await?; + + let report_config = build_report_config( + args, + Some(crash_report_input_monitor.queue_client), + CommonConfig { + task_id: Uuid::new_v4(), + ..context.common_config.clone() + }, + event_sender.clone(), + )?; + + let mut report = ReportTask::new(report_config); + let report_task = spawn(async move { report.managed_run().await }); + + task_handles.push(report_task); + task_handles.push(crash_report_input_monitor.handle); + } + + #[cfg(any(target_os = "linux", target_os = "windows"))] + if args.contains_id(COVERAGE_DIR) { + let coverage_input_monitor = + DirectoryMonitorQueue::start_monitoring(crash_dir.clone()).await?; + let coverage_config = coverage::build_coverage_config( + args, + true, + Some(coverage_input_monitor.queue_client), + CommonConfig { + task_id: Uuid::new_v4(), + ..context.common_config.clone() + }, + event_sender.clone(), + )?; + + let mut coverage = CoverageTask::new(coverage_config); + let coverage_task = spawn(async move { coverage.run().await }); + + task_handles.push(coverage_task); + task_handles.push(coverage_input_monitor.handle); + } + + if args.contains_id(ANALYZER_EXE) { + let analysis_input_monitor = DirectoryMonitorQueue::start_monitoring(crash_dir).await?; + let analysis_config = build_analysis_config( + args, + Some(analysis_input_monitor.queue_client), + CommonConfig { + task_id: Uuid::new_v4(), + ..context.common_config.clone() + }, + event_sender.clone(), + )?; + let analysis_task = spawn(async move { run_analysis(analysis_config).await }); + + task_handles.push(analysis_task); + task_handles.push(analysis_input_monitor.handle); + } + + if args.contains_id(REGRESSION_REPORTS_DIR) { + let regression_config = build_regression_config( + args, + CommonConfig { + task_id: Uuid::new_v4(), + ..context.common_config.clone() + }, + event_sender, + )?; + let regression = LibFuzzerRegressionTask::new(regression_config); + let regression_task = spawn(async move { regression.run().await }); + task_handles.push(regression_task); + } + + try_wait_all_join_handles(task_handles).await?; + + Ok(()) +} + +pub fn args(name: &'static str) -> Command { + let mut app = Command::new(name).about("run a local libfuzzer & crash reporting task"); + + let mut used = HashSet::new(); + + for args in &[ + build_fuzz_args(), + build_crash_args(), + build_analysis_args(false), + #[cfg(any(target_os = "linux", target_os = "windows"))] + build_coverage_args(true), + build_regression_args(false), + ] { + for arg in args { + if used.insert(arg.get_id()) { + app = app.arg(arg); + } + } + } + + app +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibFuzzer { inputs: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs b/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs index 04ba4f9225..c1ab283575 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs @@ -3,13 +3,139 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::tasks::{config::CommonConfig, utils::default_bool_true}; +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, + SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CHECK_RETRY_COUNT, CRASHES_DIR, + DISABLE_CHECK_QUEUE, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, + TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, + }, + tasks::{ + config::CommonConfig, + report::libfuzzer_report::{Config, ReportTask}, + utils::default_bool_true, + }, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, ArgAction, Command}; +use flume::Sender; use futures::future::OptionFuture; use schemars::JsonSchema; +use storage_queue::QueueClient; use super::template::{RunContext, Template}; + +pub fn build_report_config( + args: &clap::ArgMatches, + input_queue: Option, + common: CommonConfig, + event_sender: Option>, +) -> Result { + let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); + let target_env = get_cmd_env(CmdType::Target, args)?; + let target_options = get_cmd_arg(CmdType::Target, args); + + let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); + + let check_retry_count = args + .get_one::(CHECK_RETRY_COUNT) + .copied() + .expect("has a default"); + + let check_queue = !args.get_flag(DISABLE_CHECK_QUEUE); + + let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); + + let crashes = if input_queue.is_none() { crashes } else { None }; + + let config = Config { + target_exe, + target_env, + target_options, + target_timeout, + check_retry_count, + check_fuzzer_help, + minimized_stack_depth: None, + input_queue, + check_queue, + crashes, + reports, + no_repro, + unique_reports, + common, + }; + + Ok(config) +} + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let config = build_report_config(args, None, context.common_config.clone(), event_sender)?; + ReportTask::new(config).managed_run().await +} + +pub fn build_shared_args() -> Vec { + vec![ + Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(CRASHES_DIR) + .long(CRASHES_DIR) + .required(true) + .value_parser(value_parser!(PathBuf)), + Arg::new(REPORTS_DIR) + .long(REPORTS_DIR) + .required(false) + .value_parser(value_parser!(PathBuf)), + Arg::new(NO_REPRO_DIR) + .long(NO_REPRO_DIR) + .required(false) + .value_parser(value_parser!(PathBuf)), + Arg::new(UNIQUE_REPORTS_DIR) + .long(UNIQUE_REPORTS_DIR) + .required(true) + .value_parser(value_parser!(PathBuf)), + Arg::new(TARGET_TIMEOUT) + .value_parser(value_parser!(u64)) + .long(TARGET_TIMEOUT), + Arg::new(CHECK_RETRY_COUNT) + .long(CHECK_RETRY_COUNT) + .value_parser(value_parser!(u64)) + .default_value("0"), + Arg::new(DISABLE_CHECK_QUEUE) + .action(ArgAction::SetTrue) + .long(DISABLE_CHECK_QUEUE), + Arg::new(CHECK_FUZZER_HELP) + .action(ArgAction::SetTrue) + .long(CHECK_FUZZER_HELP), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("execute a local-only libfuzzer crash report task") + .args(&build_shared_args()) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerCrashReport { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs b/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs index 4b3e4ce58f..69c9df820b 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs @@ -3,15 +3,97 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::tasks::{config::CommonConfig, utils::default_bool_true}; +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, + get_synced_dirs, CmdType, SyncCountDirMonitor, UiEvent, ANALYSIS_INPUTS, + ANALYSIS_UNIQUE_INPUTS, CHECK_FUZZER_HELP, INPUTS_DIR, PRESERVE_EXISTING_OUTPUTS, + TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, + }, + tasks::{ + config::CommonConfig, + merge::libfuzzer_merge::{spawn, Config}, + utils::default_bool_true, + }, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, ArgAction, Command}; +use flume::Sender; use futures::future::OptionFuture; use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; +use storage_queue::QueueClient; use super::template::{RunContext, Template}; +pub fn build_merge_config( + args: &clap::ArgMatches, + input_queue: Option, + common: CommonConfig, + event_sender: Option>, +) -> Result { + let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); + let target_env = get_cmd_env(CmdType::Target, args)?; + let target_options = get_cmd_arg(CmdType::Target, args); + let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); + let inputs = get_synced_dirs(ANALYSIS_INPUTS, common.job_id, common.task_id, args)? + .into_iter() + .map(|sd| sd.monitor_count(&event_sender)) + .collect::>>()?; + let unique_inputs = + get_synced_dir(ANALYSIS_UNIQUE_INPUTS, common.job_id, common.task_id, args)? + .monitor_count(&event_sender)?; + let preserve_existing_outputs = args + .get_one::(PRESERVE_EXISTING_OUTPUTS) + .copied() + .unwrap_or_default(); + + let config = Config { + target_exe, + target_env, + target_options, + input_queue, + inputs, + unique_inputs, + preserve_existing_outputs, + check_fuzzer_help, + common, + }; + + Ok(config) +} + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let config = build_merge_config(args, None, context.common_config.clone(), event_sender)?; + spawn(config).await +} + +pub fn build_shared_args() -> Vec { + vec![ + Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(CHECK_FUZZER_HELP) + .action(ArgAction::SetTrue) + .long(CHECK_FUZZER_HELP), + Arg::new(INPUTS_DIR) + .long(INPUTS_DIR) + .value_parser(value_parser!(PathBuf)) + .num_args(0..), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("execute a local-only libfuzzer crash report task") + .args(&build_shared_args()) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerMerge { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs b/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs index 3fbb9f0bd6..501d2385e2 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs @@ -3,13 +3,145 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::tasks::{config::CommonConfig, utils::default_bool_true}; +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, + SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CHECK_RETRY_COUNT, COVERAGE_DIR, + CRASHES_DIR, NO_REPRO_DIR, REGRESSION_REPORTS_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, + TARGET_OPTIONS, TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, + }, + tasks::{ + config::CommonConfig, + regression::libfuzzer::{Config, LibFuzzerRegressionTask}, + utils::default_bool_true, + }, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, ArgAction, Command}; +use flume::Sender; use schemars::JsonSchema; use super::template::{RunContext, Template}; +const REPORT_NAMES: &str = "report_names"; + +pub fn build_regression_config( + args: &clap::ArgMatches, + common: CommonConfig, + event_sender: Option>, +) -> Result { + let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); + let target_env = get_cmd_env(CmdType::Target, args)?; + let target_options = get_cmd_arg(CmdType::Target, args); + let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); + let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)? + .monitor_count(&event_sender)?; + let regression_reports = + get_synced_dir(REGRESSION_REPORTS_DIR, common.job_id, common.task_id, args)? + .monitor_count(&event_sender)?; + let check_retry_count = args + .get_one::(CHECK_RETRY_COUNT) + .copied() + .expect("has a default value"); + + let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let report_list: Option> = args + .get_many::(REPORT_NAMES) + .map(|x| x.cloned().collect()); + + let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); + + let config = Config { + target_exe, + target_env, + target_options, + target_timeout, + check_fuzzer_help, + check_retry_count, + crashes, + regression_reports, + reports, + no_repro, + unique_reports, + readonly_inputs: None, + report_list, + minimized_stack_depth: None, + common, + }; + Ok(config) +} + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let config = build_regression_config(args, context.common_config.clone(), event_sender)?; + LibFuzzerRegressionTask::new(config).run().await +} + +pub fn build_shared_args(local_job: bool) -> Vec { + let mut args = vec![ + Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(COVERAGE_DIR) + .required(!local_job) + .long(COVERAGE_DIR) + .value_parser(value_parser!(PathBuf)), + Arg::new(CHECK_FUZZER_HELP) + .action(ArgAction::SetTrue) + .long(CHECK_FUZZER_HELP), + Arg::new(TARGET_TIMEOUT) + .long(TARGET_TIMEOUT) + .value_parser(value_parser!(u64)), + Arg::new(CRASHES_DIR) + .long(CRASHES_DIR) + .required(true) + .value_parser(value_parser!(PathBuf)), + Arg::new(REGRESSION_REPORTS_DIR) + .long(REGRESSION_REPORTS_DIR) + .required(local_job) + .value_parser(value_parser!(PathBuf)), + Arg::new(REPORTS_DIR) + .long(REPORTS_DIR) + .required(false) + .value_parser(value_parser!(PathBuf)), + Arg::new(NO_REPRO_DIR) + .long(NO_REPRO_DIR) + .required(false) + .value_parser(value_parser!(PathBuf)), + Arg::new(UNIQUE_REPORTS_DIR) + .long(UNIQUE_REPORTS_DIR) + .value_parser(value_parser!(PathBuf)) + .required(true), + Arg::new(CHECK_RETRY_COUNT) + .long(CHECK_RETRY_COUNT) + .value_parser(value_parser!(u64)) + .default_value("0"), + ]; + if local_job { + args.push(Arg::new(REPORT_NAMES).long(REPORT_NAMES).num_args(0..)) + } + args +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("execute a local-only libfuzzer regression task") + .args(&build_shared_args(true)) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerRegression { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs b/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs index 5bef2347f7..9c6f16094e 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs @@ -1,14 +1,97 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, CmdType, UiEvent, CHECK_RETRY_COUNT, + TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, + }, + tasks::report::libfuzzer_report::{test_input, TestInputArgs}, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, Command}; +use flume::Sender; use onefuzz::machine_id::MachineIdentity; use schemars::JsonSchema; use std::{collections::HashMap, path::PathBuf}; use super::template::{RunContext, Template}; +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender).await?; + + let target_exe = args + .get_one::(TARGET_EXE) + .expect("marked as required"); + let target_env = get_cmd_env(CmdType::Target, args)?; + let target_options = get_cmd_arg(CmdType::Target, args); + let input = args + .get_one::("input") + .expect("marked as required"); + let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); + let check_retry_count = args + .get_one::(CHECK_RETRY_COUNT) + .copied() + .expect("has a default value"); + + let extra_setup_dir = context.common_config.extra_setup_dir.as_deref(); + let extra_output_dir = context + .common_config + .extra_output + .as_ref() + .map(|x| x.local_path.as_path()); + + let config = TestInputArgs { + target_exe: target_exe.as_path(), + target_env: &target_env, + target_options: &target_options, + input_url: None, + input: input.as_path(), + job_id: context.common_config.job_id, + task_id: context.common_config.task_id, + target_timeout, + check_retry_count, + setup_dir: &context.common_config.setup_dir, + extra_setup_dir, + extra_output_dir, + minimized_stack_depth: None, + machine_identity: context.common_config.machine_identity, + }; + + let result = test_input(config).await?; + println!("{}", serde_json::to_string_pretty(&result)?); + Ok(()) +} + +pub fn build_shared_args() -> Vec { + vec![ + Arg::new(TARGET_EXE).required(true), + Arg::new("input") + .required(true) + .value_parser(value_parser!(PathBuf)), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .default_value("{input}") + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(TARGET_TIMEOUT) + .long(TARGET_TIMEOUT) + .value_parser(value_parser!(u64)), + Arg::new(CHECK_RETRY_COUNT) + .long(CHECK_RETRY_COUNT) + .value_parser(value_parser!(u64)) + .default_value("0"), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("test a libfuzzer application with a specific input") + .args(&build_shared_args()) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerTestInput { input: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/mod.rs b/src/agent/onefuzz-task/src/local/mod.rs index 385ff8ffcd..03d394bcdb 100644 --- a/src/agent/onefuzz-task/src/local/mod.rs +++ b/src/agent/onefuzz-task/src/local/mod.rs @@ -14,6 +14,7 @@ pub mod libfuzzer_fuzz; pub mod libfuzzer_merge; pub mod libfuzzer_regression; pub mod libfuzzer_test_input; +pub mod radamsa; pub mod template; pub mod test_input; pub mod tui; diff --git a/src/agent/onefuzz-task/src/local/radamsa.rs b/src/agent/onefuzz-task/src/local/radamsa.rs new file mode 100644 index 0000000000..4d84de027a --- /dev/null +++ b/src/agent/onefuzz-task/src/local/radamsa.rs @@ -0,0 +1,78 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use crate::{ + local::{ + common::{build_local_context, DirectoryMonitorQueue, UiEvent}, + generic_crash_report::{build_report_config, build_shared_args as build_crash_args}, + generic_generator::{build_fuzz_config, build_shared_args as build_fuzz_args}, + }, + tasks::{config::CommonConfig, fuzz::generator::GeneratorTask, report::generic::ReportTask}, +}; +use anyhow::{Context, Result}; +use clap::Command; +use flume::Sender; +use onefuzz::utils::try_wait_all_join_handles; +use std::collections::HashSet; +use tokio::task::spawn; +use uuid::Uuid; + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?; + let crash_dir = fuzz_config + .crashes + .remote_url()? + .as_file_path() + .ok_or_else(|| format_err!("invalid crash directory"))?; + + tokio::fs::create_dir_all(&crash_dir) + .await + .with_context(|| { + format!( + "unable to create crashes directory: {}", + crash_dir.display() + ) + })?; + + let fuzzer = GeneratorTask::new(fuzz_config); + let fuzz_task = spawn(async move { fuzzer.run().await }); + + let crash_report_input_monitor = DirectoryMonitorQueue::start_monitoring(crash_dir) + .await + .context("directory monitor failed")?; + let report_config = build_report_config( + args, + Some(crash_report_input_monitor.queue_client), + CommonConfig { + task_id: Uuid::new_v4(), + ..context.common_config.clone() + }, + event_sender, + )?; + let report_task = spawn(async move { ReportTask::new(report_config).managed_run().await }); + + try_wait_all_join_handles(vec![ + fuzz_task, + report_task, + crash_report_input_monitor.handle, + ]) + .await?; + + Ok(()) +} + +pub fn args(name: &'static str) -> Command { + let mut app = Command::new(name).about("run a local generator & crash reporting job"); + + let mut used = HashSet::new(); + for args in &[build_fuzz_args(), build_crash_args()] { + for arg in args { + if used.insert(arg.get_id()) { + app = app.arg(arg); + } + } + } + + app +} diff --git a/src/agent/onefuzz-task/src/local/schema.json b/src/agent/onefuzz-task/src/local/schema.json index e5b00f6e17..0a1f128e67 100644 --- a/src/agent/onefuzz-task/src/local/schema.json +++ b/src/agent/onefuzz-task/src/local/schema.json @@ -126,6 +126,7 @@ "analyzer_options", "target_exe", "target_options", + "tools", "type" ], "properties": { @@ -181,10 +182,7 @@ } }, "tools": { - "type": [ - "string", - "null" - ] + "type": "string" }, "type": { "type": "string", @@ -895,4 +893,4 @@ ] } } -} \ No newline at end of file +} diff --git a/src/agent/onefuzz-task/src/local/template.rs b/src/agent/onefuzz-task/src/local/template.rs index 73ae6e5e48..b2e0c425ff 100644 --- a/src/agent/onefuzz-task/src/local/template.rs +++ b/src/agent/onefuzz-task/src/local/template.rs @@ -196,7 +196,6 @@ pub async fn launch( job_id: Uuid::new_v4(), instance_id: Uuid::new_v4(), heartbeat_queue: None, - job_result_queue: None, instance_telemetry_key: None, microsoft_telemetry_key: None, logs: None, @@ -242,10 +241,12 @@ mod test { .expect("Couldn't find checked-in schema.json") .replace("\r\n", "\n"); - if schema_str.replace('\n', "") != checked_in_schema.replace('\n', "") { - std::fs::write("src/local/new.schema.json", schema_str) - .expect("The schemas did not match but failed to write new schema to file."); - panic!("The checked-in local fuzzing schema did not match the generated schema. The generated schema can be found at src/local/new.schema.json"); - } + println!("{}", schema_str); + + assert_eq!( + schema_str.replace('\n', ""), + checked_in_schema.replace('\n', ""), + "The checked-in local fuzzing schema did not match the generated schema." + ); } } diff --git a/src/agent/onefuzz-task/src/local/test_input.rs b/src/agent/onefuzz-task/src/local/test_input.rs index b8027a7f41..4077bd08f8 100644 --- a/src/agent/onefuzz-task/src/local/test_input.rs +++ b/src/agent/onefuzz-task/src/local/test_input.rs @@ -1,8 +1,18 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, CmdType, UiEvent, CHECK_ASAN_LOG, + CHECK_RETRY_COUNT, DISABLE_CHECK_DEBUGGER, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, + TARGET_TIMEOUT, + }, + tasks::report::generic::{test_input, TestInputArgs}, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, ArgAction, Command}; +use flume::Sender; use onefuzz::machine_id::MachineIdentity; use schemars::JsonSchema; use std::{collections::HashMap, path::PathBuf}; @@ -10,6 +20,82 @@ use uuid::Uuid; use super::template::{RunContext, Template}; +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, false, event_sender).await?; + + let target_exe = args + .get_one::(TARGET_EXE) + .expect("is marked required"); + let target_env = get_cmd_env(CmdType::Target, args)?; + let target_options = get_cmd_arg(CmdType::Target, args); + let input = args + .get_one::("input") + .expect("is marked required"); + let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); + let check_retry_count = args + .get_one::(CHECK_RETRY_COUNT) + .copied() + .expect("has default value"); + let check_asan_log = args.get_flag(CHECK_ASAN_LOG); + let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); + + let config = TestInputArgs { + target_exe: target_exe.as_path(), + target_env: &target_env, + target_options: &target_options, + input_url: None, + input: input.as_path(), + job_id: context.common_config.job_id, + task_id: context.common_config.task_id, + target_timeout, + check_retry_count, + setup_dir: &context.common_config.setup_dir, + extra_setup_dir: context.common_config.extra_setup_dir.as_deref(), + minimized_stack_depth: None, + check_asan_log, + check_debugger, + machine_identity: context.common_config.machine_identity.clone(), + }; + + let result = test_input(config).await?; + println!("{}", serde_json::to_string_pretty(&result)?); + Ok(()) +} + +pub fn build_shared_args() -> Vec { + vec![ + Arg::new(TARGET_EXE).required(true), + Arg::new("input") + .required(true) + .value_parser(value_parser!(PathBuf)), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .default_value("{input}") + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(TARGET_TIMEOUT) + .long(TARGET_TIMEOUT) + .value_parser(value_parser!(u64)), + Arg::new(CHECK_RETRY_COUNT) + .long(CHECK_RETRY_COUNT) + .value_parser(value_parser!(u64)) + .default_value("0"), + Arg::new(CHECK_ASAN_LOG) + .action(ArgAction::SetTrue) + .long(CHECK_ASAN_LOG), + Arg::new(DISABLE_CHECK_DEBUGGER) + .action(ArgAction::SetTrue) + .long("disable_check_debugger"), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("test an application with a specific input") + .args(&build_shared_args()) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct TestInput { input: PathBuf, diff --git a/src/agent/onefuzz-task/src/tasks/analysis/generic.rs b/src/agent/onefuzz-task/src/tasks/analysis/generic.rs index 05c6c3d169..3ba068a614 100644 --- a/src/agent/onefuzz-task/src/tasks/analysis/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/analysis/generic.rs @@ -65,8 +65,6 @@ pub async fn run(config: Config) -> Result<()> { tools.init_pull().await?; } - let job_result_client = config.common.init_job_result().await?; - // the tempdir is always created, however, the reports_path and // reports_monitor_future are only created if we have one of the three // report SyncedDir. The idea is that the option for where to write reports @@ -90,7 +88,6 @@ pub async fn run(config: Config) -> Result<()> { &config.unique_reports, &config.reports, &config.no_repro, - &job_result_client, ); ( Some(reports_dir.path().to_path_buf()), @@ -174,7 +171,7 @@ async fn poll_inputs( } message.delete().await?; } else { - debug!("no new candidate inputs found, sleeping"); + warn!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; } } diff --git a/src/agent/onefuzz-task/src/tasks/config.rs b/src/agent/onefuzz-task/src/tasks/config.rs index e29e0fd60d..0848379d73 100644 --- a/src/agent/onefuzz-task/src/tasks/config.rs +++ b/src/agent/onefuzz-task/src/tasks/config.rs @@ -14,7 +14,6 @@ use onefuzz::{ machine_id::MachineIdentity, syncdir::{SyncOperation, SyncedDir}, }; -use onefuzz_result::job_result::{init_job_result, TaskJobResultClient}; use onefuzz_telemetry::{ self as telemetry, Event::task_start, EventData, InstanceTelemetryKey, MicrosoftTelemetryKey, Role, @@ -51,8 +50,6 @@ pub struct CommonConfig { pub heartbeat_queue: Option, - pub job_result_queue: Option, - pub instance_telemetry_key: Option, pub microsoft_telemetry_key: Option, @@ -106,23 +103,6 @@ impl CommonConfig { None => Ok(None), } } - - pub async fn init_job_result(&self) -> Result> { - match &self.job_result_queue { - Some(url) => { - let result = init_job_result( - url.clone(), - self.task_id, - self.job_id, - self.machine_identity.machine_id, - self.machine_identity.machine_name.clone(), - ) - .await?; - Ok(Some(result)) - } - None => Ok(None), - } - } } #[derive(Debug, Deserialize)] diff --git a/src/agent/onefuzz-task/src/tasks/coverage/generic.rs b/src/agent/onefuzz-task/src/tasks/coverage/generic.rs index 4fde9efb31..b112cfefbe 100644 --- a/src/agent/onefuzz-task/src/tasks/coverage/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/coverage/generic.rs @@ -26,8 +26,6 @@ use onefuzz_file_format::coverage::{ binary::{v1::BinaryCoverageJson as BinaryCoverageJsonV1, BinaryCoverageJson}, source::{v1::SourceCoverageJson as SourceCoverageJsonV1, SourceCoverageJson}, }; -use onefuzz_result::job_result::JobResultData; -use onefuzz_result::job_result::{JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{event, warn, Event::coverage_data, Event::coverage_failed, EventData}; use storage_queue::{Message, QueueClient}; use tokio::fs; @@ -116,7 +114,7 @@ impl CoverageTask { let allowlist = self.load_target_allowlist().await?; let heartbeat = self.config.common.init_heartbeat(None).await?; - let job_result = self.config.common.init_job_result().await?; + let mut seen_inputs = false; let target_exe_path = @@ -131,7 +129,6 @@ impl CoverageTask { coverage, allowlist, heartbeat, - job_result, target_exe.to_string(), )?; @@ -222,7 +219,6 @@ struct TaskContext<'a> { module_allowlist: AllowList, source_allowlist: Arc, heartbeat: Option, - job_result: Option, cache: Arc, } @@ -232,7 +228,6 @@ impl<'a> TaskContext<'a> { coverage: BinaryCoverage, allowlist: TargetAllowList, heartbeat: Option, - job_result: Option, target_exe: String, ) -> Result { let cache = DebugInfoCache::new(allowlist.source_files.clone()); @@ -252,7 +247,6 @@ impl<'a> TaskContext<'a> { module_allowlist: allowlist.modules, source_allowlist: Arc::new(allowlist.source_files), heartbeat, - job_result, cache: Arc::new(cache), }) } @@ -461,16 +455,7 @@ impl<'a> TaskContext<'a> { let s = CoverageStats::new(&coverage); event!(coverage_data; Covered = s.covered, Features = s.features, Rate = s.rate); metric!(coverage_data; 1.0; Covered = s.covered, Features = s.features, Rate = s.rate); - self.job_result - .send_direct( - JobResultData::CoverageData, - HashMap::from([ - ("covered".to_string(), s.covered as f64), - ("features".to_string(), s.features as f64), - ("rate".to_string(), s.rate), - ]), - ) - .await; + Ok(()) } diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs b/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs index bd7511cac2..d9116a1ed2 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs @@ -73,7 +73,6 @@ impl GeneratorTask { } let hb_client = self.config.common.init_heartbeat(None).await?; - let jr_client = self.config.common.init_job_result().await?; for dir in &self.config.readonly_inputs { dir.init_pull().await?; @@ -85,10 +84,7 @@ impl GeneratorTask { self.config.ensemble_sync_delay, ); - let crash_dir_monitor = self - .config - .crashes - .monitor_results(new_result, false, &jr_client); + let crash_dir_monitor = self.config.crashes.monitor_results(new_result, false); let fuzzer = self.fuzzing_loop(hb_client); @@ -302,7 +298,6 @@ mod tests { task_id: Default::default(), instance_id: Default::default(), heartbeat_queue: Default::default(), - job_result_queue: Default::default(), instance_telemetry_key: Default::default(), microsoft_telemetry_key: Default::default(), logs: Default::default(), diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs b/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs index bfd9f3f5cc..4f8c67ae8e 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs @@ -1,11 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::tasks::{ - config::CommonConfig, - heartbeat::{HeartbeatSender, TaskHeartbeatClient}, - utils::default_bool_true, -}; +use crate::tasks::{config::CommonConfig, heartbeat::HeartbeatSender, utils::default_bool_true}; use anyhow::{Context, Result}; use arraydeque::{ArrayDeque, Wrapping}; use async_trait::async_trait; @@ -16,7 +12,6 @@ use onefuzz::{ process::ExitStatus, syncdir::{continuous_sync, SyncOperation::Pull, SyncedDir}, }; -use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{ Event::{new_coverage, new_crashdump, new_result, runtime_stats}, EventData, @@ -131,31 +126,21 @@ where self.verify().await?; let hb_client = self.config.common.init_heartbeat(None).await?; - let jr_client = self.config.common.init_job_result().await?; // To be scheduled. let resync = self.continuous_sync_inputs(); - - let new_inputs = self - .config - .inputs - .monitor_results(new_coverage, true, &jr_client); - let new_crashes = self - .config - .crashes - .monitor_results(new_result, true, &jr_client); + let new_inputs = self.config.inputs.monitor_results(new_coverage, true); + let new_crashes = self.config.crashes.monitor_results(new_result, true); let new_crashdumps = async { if let Some(crashdumps) = &self.config.crashdumps { - crashdumps - .monitor_results(new_crashdump, true, &jr_client) - .await + crashdumps.monitor_results(new_crashdump, true).await } else { Ok(()) } }; let (stats_sender, stats_receiver) = mpsc::unbounded_channel(); - let report_stats = report_runtime_stats(stats_receiver, &hb_client, &jr_client); + let report_stats = report_runtime_stats(stats_receiver, hb_client); let fuzzers = self.run_fuzzers(Some(&stats_sender)); futures::try_join!( resync, @@ -198,7 +183,7 @@ where .inputs .local_path .parent() - .ok_or_else(|| anyhow!("invalid input path"))?; + .ok_or_else(|| anyhow!("Invalid input path"))?; let temp_path = task_dir.join(".temp"); tokio::fs::create_dir_all(&temp_path).await?; let temp_dir = tempdir_in(temp_path)?; @@ -516,7 +501,7 @@ impl TotalStats { self.execs_sec = self.worker_stats.values().map(|x| x.execs_sec).sum(); } - async fn report(&self, jr_client: &Option) { + fn report(&self) { event!( runtime_stats; EventData::Count = self.count, @@ -528,17 +513,6 @@ impl TotalStats { EventData::Count = self.count, EventData::ExecsSecond = self.execs_sec ); - if let Some(jr_client) = jr_client { - let _ = jr_client - .send_direct( - JobResultData::RuntimeStats, - HashMap::from([ - ("total_count".to_string(), self.count as f64), - ("execs_sec".to_string(), self.execs_sec), - ]), - ) - .await; - } } } @@ -568,8 +542,7 @@ impl Timer { // are approximating nearest-neighbor interpolation on the runtime stats time series. async fn report_runtime_stats( mut stats_channel: mpsc::UnboundedReceiver, - heartbeat_client: &Option, - jr_client: &Option, + heartbeat_client: impl HeartbeatSender, ) -> Result<()> { // Cache the last-reported stats for a given worker. // @@ -578,7 +551,7 @@ async fn report_runtime_stats( let mut total = TotalStats::default(); // report all zeros to start - total.report(jr_client).await; + total.report(); let timer = Timer::new(RUNTIME_STATS_PERIOD); @@ -587,10 +560,10 @@ async fn report_runtime_stats( Some(stats) = stats_channel.recv() => { heartbeat_client.alive(); total.update(stats); - total.report(jr_client).await + total.report() } _ = timer.wait() => { - total.report(jr_client).await + total.report() } } } diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs b/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs index 3f00e20b8d..de1e1106ba 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs @@ -79,10 +79,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { remote_path: config.crashes.remote_path.clone(), }; crashes.init().await?; - - let jr_client = config.common.init_job_result().await?; - - let monitor_crashes = crashes.monitor_results(new_result, false, &jr_client); + let monitor_crashes = crashes.monitor_results(new_result, false); // setup crashdumps let (crashdump_dir, monitor_crashdumps) = { @@ -98,12 +95,9 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { }; let monitor_dir = crashdump_dir.clone(); - let monitor_jr_client = config.common.init_job_result().await?; let monitor_crashdumps = async move { if let Some(crashdumps) = monitor_dir { - crashdumps - .monitor_results(new_crashdump, false, &monitor_jr_client) - .await + crashdumps.monitor_results(new_crashdump, false).await } else { Ok(()) } @@ -135,13 +129,11 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { if let Some(no_repro) = &config.no_repro { no_repro.init().await?; } - let monitor_reports_future = monitor_reports( reports_dir.path(), &config.unique_reports, &config.reports, &config.no_repro, - &jr_client, ); let inputs = SyncedDir { @@ -164,7 +156,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { delay_with_jitter(delay).await; } } - let monitor_inputs = inputs.monitor_results(new_coverage, false, &jr_client); + let monitor_inputs = inputs.monitor_results(new_coverage, false); let inputs_sync_cancellation = CancellationToken::new(); // never actually cancelled let inputs_sync_task = inputs.continuous_sync(Pull, config.ensemble_sync_delay, &inputs_sync_cancellation); @@ -452,7 +444,6 @@ mod tests { task_id: Default::default(), instance_id: Default::default(), heartbeat_queue: Default::default(), - job_result_queue: Default::default(), instance_telemetry_key: Default::default(), microsoft_telemetry_key: Default::default(), logs: Default::default(), diff --git a/src/agent/onefuzz-task/src/tasks/heartbeat.rs b/src/agent/onefuzz-task/src/tasks/heartbeat.rs index e13b661909..515fa39d0c 100644 --- a/src/agent/onefuzz-task/src/tasks/heartbeat.rs +++ b/src/agent/onefuzz-task/src/tasks/heartbeat.rs @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +use crate::onefuzz::heartbeat::HeartbeatClient; use anyhow::Result; -use onefuzz::heartbeat::HeartbeatClient; use reqwest::Url; use serde::{self, Deserialize, Serialize}; use std::time::Duration; diff --git a/src/agent/onefuzz-task/src/tasks/merge/generic.rs b/src/agent/onefuzz-task/src/tasks/merge/generic.rs index 3b6a2094d8..4f2e8234a8 100644 --- a/src/agent/onefuzz-task/src/tasks/merge/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/merge/generic.rs @@ -83,7 +83,7 @@ pub async fn spawn(config: &Config) -> Result<()> { } } } else { - debug!("no new candidate inputs found, sleeping"); + warn!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; }; } diff --git a/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs b/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs index 2d53bc8c07..1c334b3f18 100644 --- a/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs +++ b/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs @@ -120,7 +120,7 @@ async fn process_message(config: &Config, input_queue: QueueClient) -> Result<() } Ok(()) } else { - debug!("no new candidate inputs found, sleeping"); + warn!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; Ok(()) } diff --git a/src/agent/onefuzz-task/src/tasks/regression/common.rs b/src/agent/onefuzz-task/src/tasks/regression/common.rs index b61a97df4c..60023cfa6e 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/common.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/common.rs @@ -2,14 +2,12 @@ // Licensed under the MIT License. use crate::tasks::{ - config::CommonConfig, heartbeat::{HeartbeatSender, TaskHeartbeatClient}, report::crash_report::{parse_report_file, CrashTestResult, RegressionReport}, }; use anyhow::{Context, Result}; use async_trait::async_trait; use onefuzz::syncdir::SyncedDir; -use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use std::path::PathBuf; @@ -26,7 +24,7 @@ pub trait RegressionHandler { /// Runs the regression task pub async fn run( - common_config: &CommonConfig, + heartbeat_client: Option, regression_reports: &SyncedDir, crashes: &SyncedDir, report_dirs: &[&SyncedDir], @@ -37,9 +35,6 @@ pub async fn run( info!("starting regression task"); regression_reports.init().await?; - let heartbeat_client = common_config.init_heartbeat(None).await?; - let job_result_client = common_config.init_job_result().await?; - handle_crash_reports( handler, crashes, @@ -47,7 +42,6 @@ pub async fn run( report_list, regression_reports, &heartbeat_client, - &job_result_client, ) .await .context("handling crash reports")?; @@ -58,7 +52,6 @@ pub async fn run( readonly_inputs, regression_reports, &heartbeat_client, - &job_result_client, ) .await .context("handling inputs")?; @@ -78,7 +71,6 @@ pub async fn handle_inputs( readonly_inputs: &SyncedDir, regression_reports: &SyncedDir, heartbeat_client: &Option, - job_result_client: &Option, ) -> Result<()> { readonly_inputs.init_pull().await?; let mut input_files = tokio::fs::read_dir(&readonly_inputs.local_path).await?; @@ -103,7 +95,7 @@ pub async fn handle_inputs( crash_test_result, original_crash_test_result: None, } - .save(None, regression_reports, job_result_client) + .save(None, regression_reports) .await? } @@ -117,7 +109,6 @@ pub async fn handle_crash_reports( report_list: &Option>, regression_reports: &SyncedDir, heartbeat_client: &Option, - job_result_client: &Option, ) -> Result<()> { // without crash report containers, skip this method if report_dirs.is_empty() { @@ -167,7 +158,7 @@ pub async fn handle_crash_reports( crash_test_result, original_crash_test_result: Some(original_crash_test_result), } - .save(Some(file_name), regression_reports, job_result_client) + .save(Some(file_name), regression_reports) .await? } } diff --git a/src/agent/onefuzz-task/src/tasks/regression/generic.rs b/src/agent/onefuzz-task/src/tasks/regression/generic.rs index 8570208d59..640e80db9a 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/generic.rs @@ -89,6 +89,7 @@ impl GenericRegressionTask { pub async fn run(&self) -> Result<()> { info!("Starting generic regression task"); + let heartbeat_client = self.config.common.init_heartbeat(None).await?; let mut report_dirs = vec![]; for dir in vec![ @@ -102,7 +103,7 @@ impl GenericRegressionTask { report_dirs.push(dir); } common::run( - &self.config.common, + heartbeat_client, &self.config.regression_reports, &self.config.crashes, &report_dirs, diff --git a/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs b/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs index e65f46bb64..06dd7c00d9 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs @@ -103,8 +103,9 @@ impl LibFuzzerRegressionTask { report_dirs.push(dir); } + let heartbeat_client = self.config.common.init_heartbeat(None).await?; common::run( - &self.config.common, + heartbeat_client, &self.config.regression_reports, &self.config.crashes, &report_dirs, diff --git a/src/agent/onefuzz-task/src/tasks/report/crash_report.rs b/src/agent/onefuzz-task/src/tasks/report/crash_report.rs index 290b98ccde..23171bc432 100644 --- a/src/agent/onefuzz-task/src/tasks/report/crash_report.rs +++ b/src/agent/onefuzz-task/src/tasks/report/crash_report.rs @@ -3,7 +3,6 @@ use anyhow::{Context, Result}; use onefuzz::{blob::BlobUrl, monitor::DirectoryMonitor, syncdir::SyncedDir}; -use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{ Event::{ new_report, new_unable_to_reproduce, new_unique_report, regression_report, @@ -13,7 +12,6 @@ use onefuzz_telemetry::{ }; use serde::{Deserialize, Serialize}; use stacktrace_parser::CrashLog; -use std::collections::HashMap; use std::path::{Path, PathBuf}; use uuid::Uuid; @@ -113,7 +111,6 @@ impl RegressionReport { self, report_name: Option, regression_reports: &SyncedDir, - jr_client: &Option, ) -> Result<()> { let (event, name) = match &self.crash_test_result { CrashTestResult::CrashReport(report) => { @@ -129,15 +126,6 @@ impl RegressionReport { if upload_or_save_local(&self, &name, regression_reports).await? { event!(event; EventData::Path = name.clone()); metric!(event; 1.0; EventData::Path = name.clone()); - - if let Some(jr_client) = jr_client { - let _ = jr_client - .send_direct( - JobResultData::NewRegressionReport, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } } Ok(()) } @@ -161,7 +149,6 @@ impl CrashTestResult { unique_reports: &Option, reports: &Option, no_repro: &Option, - jr_client: &Option, ) -> Result<()> { match self { Self::CrashReport(report) => { @@ -171,15 +158,6 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, unique_reports).await? { event!(new_unique_report; EventData::Path = report.unique_blob_name()); metric!(new_unique_report; 1.0; EventData::Path = report.unique_blob_name()); - - if let Some(jr_client) = jr_client { - let _ = jr_client - .send_direct( - JobResultData::NewUniqueReport, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } } } @@ -188,15 +166,6 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, reports).await? { event!(new_report; EventData::Path = report.blob_name()); metric!(new_report; 1.0; EventData::Path = report.blob_name()); - - if let Some(jr_client) = jr_client { - let _ = jr_client - .send_direct( - JobResultData::NewReport, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } } } } @@ -207,15 +176,6 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, no_repro).await? { event!(new_unable_to_reproduce; EventData::Path = report.blob_name()); metric!(new_unable_to_reproduce; 1.0; EventData::Path = report.blob_name()); - - if let Some(jr_client) = jr_client { - let _ = jr_client - .send_direct( - JobResultData::NoReproCrashingInput, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } } } } @@ -364,7 +324,6 @@ pub async fn monitor_reports( unique_reports: &Option, reports: &Option, no_crash: &Option, - jr_client: &Option, ) -> Result<()> { if unique_reports.is_none() && reports.is_none() && no_crash.is_none() { debug!("no report directories configured"); @@ -375,9 +334,7 @@ pub async fn monitor_reports( while let Some(file) = monitor.next_file().await? { let result = parse_report_file(file).await?; - result - .save(unique_reports, reports, no_crash, jr_client) - .await?; + result.save(unique_reports, reports, no_crash).await?; } Ok(()) diff --git a/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs b/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs index b8659845de..9b626a7d89 100644 --- a/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs @@ -8,25 +8,25 @@ use std::{ sync::Arc, }; -use crate::tasks::report::crash_report::*; -use crate::tasks::report::dotnet::common::collect_exception_info; -use crate::tasks::{ - config::CommonConfig, - generic::input_poller::*, - heartbeat::{HeartbeatSender, TaskHeartbeatClient}, - utils::{default_bool_true, try_resolve_setup_relative_path}, -}; use anyhow::{Context, Result}; use async_trait::async_trait; use onefuzz::expand::Expand; use onefuzz::fs::set_executable; use onefuzz::{blob::BlobUrl, sha256, syncdir::SyncedDir}; -use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use storage_queue::{Message, QueueClient}; use tokio::fs; +use crate::tasks::report::crash_report::*; +use crate::tasks::report::dotnet::common::collect_exception_info; +use crate::tasks::{ + config::CommonConfig, + generic::input_poller::*, + heartbeat::{HeartbeatSender, TaskHeartbeatClient}, + utils::{default_bool_true, try_resolve_setup_relative_path}, +}; + const DOTNET_DUMP_TOOL_NAME: &str = "dotnet-dump"; #[derive(Debug, Deserialize)] @@ -114,18 +114,15 @@ impl DotnetCrashReportTask { pub struct AsanProcessor { config: Arc, heartbeat_client: Option, - job_result_client: Option, } impl AsanProcessor { pub async fn new(config: Arc) -> Result { let heartbeat_client = config.common.init_heartbeat(None).await?; - let job_result_client = config.common.init_job_result().await?; Ok(Self { config, heartbeat_client, - job_result_client, }) } @@ -263,7 +260,6 @@ impl Processor for AsanProcessor { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, - &self.job_result_client, ) .await; diff --git a/src/agent/onefuzz-task/src/tasks/report/generic.rs b/src/agent/onefuzz-task/src/tasks/report/generic.rs index 8ad259f0a5..9088f98acc 100644 --- a/src/agent/onefuzz-task/src/tasks/report/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/report/generic.rs @@ -13,7 +13,6 @@ use async_trait::async_trait; use onefuzz::{ blob::BlobUrl, input_tester::Tester, machine_id::MachineIdentity, sha256, syncdir::SyncedDir, }; -use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use std::{ @@ -74,9 +73,7 @@ impl ReportTask { pub async fn managed_run(&mut self) -> Result<()> { info!("Starting generic crash report task"); let heartbeat_client = self.config.common.init_heartbeat(None).await?; - let job_result_client = self.config.common.init_job_result().await?; - let mut processor = - GenericReportProcessor::new(&self.config, heartbeat_client, job_result_client); + let mut processor = GenericReportProcessor::new(&self.config, heartbeat_client); #[allow(clippy::manual_flatten)] for entry in [ @@ -186,19 +183,13 @@ pub async fn test_input(args: TestInputArgs<'_>) -> Result { pub struct GenericReportProcessor<'a> { config: &'a Config, heartbeat_client: Option, - job_result_client: Option, } impl<'a> GenericReportProcessor<'a> { - pub fn new( - config: &'a Config, - heartbeat_client: Option, - job_result_client: Option, - ) -> Self { + pub fn new(config: &'a Config, heartbeat_client: Option) -> Self { Self { config, heartbeat_client, - job_result_client, } } @@ -248,7 +239,6 @@ impl<'a> Processor for GenericReportProcessor<'a> { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, - &self.job_result_client, ) .await .context("saving report failed") diff --git a/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs b/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs index 587ed2e3dc..f18f638fa3 100644 --- a/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs +++ b/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs @@ -13,7 +13,6 @@ use async_trait::async_trait; use onefuzz::{ blob::BlobUrl, libfuzzer::LibFuzzer, machine_id::MachineIdentity, sha256, syncdir::SyncedDir, }; -use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use std::{ @@ -197,18 +196,15 @@ pub async fn test_input(args: TestInputArgs<'_>) -> Result { pub struct AsanProcessor { config: Arc, heartbeat_client: Option, - job_result_client: Option, } impl AsanProcessor { pub async fn new(config: Arc) -> Result { let heartbeat_client = config.common.init_heartbeat(None).await?; - let job_result_client = config.common.init_job_result().await?; Ok(Self { config, heartbeat_client, - job_result_client, }) } @@ -261,7 +257,6 @@ impl Processor for AsanProcessor { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, - &self.job_result_client, ) .await } diff --git a/src/agent/onefuzz/Cargo.toml b/src/agent/onefuzz/Cargo.toml index 1f3c27985c..c096c8ddfc 100644 --- a/src/agent/onefuzz/Cargo.toml +++ b/src/agent/onefuzz/Cargo.toml @@ -44,7 +44,6 @@ tempfile = "3.7.0" process_control = "4.0" reqwest-retry = { path = "../reqwest-retry" } onefuzz-telemetry = { path = "../onefuzz-telemetry" } -onefuzz-result = { path = "../onefuzz-result" } stacktrace-parser = { path = "../stacktrace-parser" } backoff = { version = "0.4", features = ["tokio"] } diff --git a/src/agent/onefuzz/src/blob/url.rs b/src/agent/onefuzz/src/blob/url.rs index 134b59dea0..f55ffbb23a 100644 --- a/src/agent/onefuzz/src/blob/url.rs +++ b/src/agent/onefuzz/src/blob/url.rs @@ -192,15 +192,10 @@ impl BlobContainerUrl { } pub fn as_path(&self, prefix: impl AsRef) -> Result { - match (self.account(), self.container()) { - (Some(account), Some(container)) => { - let mut path = PathBuf::new(); - path.push(account); - path.push(container); - Ok(prefix.as_ref().join(path)) - } - _ => bail!("Invalid container Url"), - } + let dir = self + .account() + .ok_or_else(|| anyhow!("Invalid container Url"))?; + Ok(prefix.as_ref().join(dir)) } } @@ -531,14 +526,4 @@ mod tests { "id:000000,sig:06,src:000000,op:havoc,rep:128" ); } - - #[test] - fn test_as_path() -> Result<()> { - let root = PathBuf::from(r"/onefuzz"); - let url = BlobContainerUrl::parse("https://myaccount.blob.core.windows.net/mycontainer")?; - let path = url.as_path(root)?; - assert_eq!(PathBuf::from(r"/onefuzz/myaccount/mycontainer"), path); - - Ok(()) - } } diff --git a/src/agent/onefuzz/src/syncdir.rs b/src/agent/onefuzz/src/syncdir.rs index 2e73b7a694..0252099561 100644 --- a/src/agent/onefuzz/src/syncdir.rs +++ b/src/agent/onefuzz/src/syncdir.rs @@ -11,12 +11,10 @@ use crate::{ }; use anyhow::{Context, Result}; use dunce::canonicalize; -use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{Event, EventData}; use reqwest::{StatusCode, Url}; use reqwest_retry::{RetryCheck, SendRetry, DEFAULT_RETRY_PERIOD, MAX_RETRY_ATTEMPTS}; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; use std::{env::current_dir, path::PathBuf, str, time::Duration}; use tokio::{fs, select}; use tokio_util::sync::CancellationToken; @@ -243,7 +241,6 @@ impl SyncedDir { url: BlobContainerUrl, event: Event, ignore_dotfiles: bool, - jr_client: &Option, ) -> Result<()> { debug!("monitoring {}", path.display()); @@ -268,39 +265,9 @@ impl SyncedDir { if ignore_dotfiles && file_name_event_str.starts_with('.') { continue; } + event!(event.clone(); EventData::Path = file_name_event_str); metric!(event.clone(); 1.0; EventData::Path = file_name_str_metric_str); - if let Some(jr_client) = jr_client { - match event { - Event::new_result => { - jr_client - .send_direct( - JobResultData::NewCrashingInput, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } - Event::new_coverage => { - jr_client - .send_direct( - JobResultData::CoverageData, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } - Event::new_crashdump => { - jr_client - .send_direct( - JobResultData::NewCrashDump, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } - _ => { - warn!("Unhandled job result!"); - } - } - } let destination = path.join(file_name); if let Err(err) = fs::copy(&item, &destination).await { let error_message = format!( @@ -338,29 +305,6 @@ impl SyncedDir { event!(event.clone(); EventData::Path = file_name_event_str); metric!(event.clone(); 1.0; EventData::Path = file_name_str_metric_str); - if let Some(jr_client) = jr_client { - match event { - Event::new_result => { - jr_client - .send_direct( - JobResultData::NewCrashingInput, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } - Event::new_coverage => { - jr_client - .send_direct( - JobResultData::CoverageData, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } - _ => { - warn!("Unhandled job result!"); - } - } - } if let Err(err) = uploader.upload(item.clone()).await { let error_message = format!( "Couldn't upload file. path:{} dir:{} err:{:?}", @@ -392,12 +336,7 @@ impl SyncedDir { /// The intent of this is to support use cases where we usually want a directory /// to be initialized, but a user-supplied binary, (such as AFL) logically owns /// a directory, and may reset it. - pub async fn monitor_results( - &self, - event: Event, - ignore_dotfiles: bool, - job_result_client: &Option, - ) -> Result<()> { + pub async fn monitor_results(&self, event: Event, ignore_dotfiles: bool) -> Result<()> { if let Some(url) = self.remote_path.clone() { loop { debug!("waiting to monitor {}", self.local_path.display()); @@ -416,7 +355,6 @@ impl SyncedDir { url.clone(), event.clone(), ignore_dotfiles, - job_result_client, ) .await?; } diff --git a/src/deployment/bicep-templates/storageAccounts.bicep b/src/deployment/bicep-templates/storageAccounts.bicep index 27f2da21d8..6a96cea6a0 100644 --- a/src/deployment/bicep-templates/storageAccounts.bicep +++ b/src/deployment/bicep-templates/storageAccounts.bicep @@ -33,7 +33,7 @@ var storageAccountFuncQueuesParams = [ 'update-queue' 'webhooks' 'signalr-events' - 'job-result' + 'custom-metrics' ] var fileChangesQueueIndex = 0 diff --git a/src/integration-tests/integration-test.py b/src/integration-tests/integration-test.py index 15ffcfb9fe..057404ceff 100755 --- a/src/integration-tests/integration-test.py +++ b/src/integration-tests/integration-test.py @@ -88,7 +88,6 @@ class Integration(BaseModel): target_method: Optional[str] setup_dir: Optional[str] target_env: Optional[Dict[str, str]] - pool: PoolName TARGETS: Dict[str, Integration] = { @@ -98,7 +97,6 @@ class Integration(BaseModel): target_exe="fuzz.exe", inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, - pool="linux", ), "linux-libfuzzer": Integration( template=TemplateType.libfuzzer, @@ -126,7 +124,6 @@ class Integration(BaseModel): "--only_asan_failures", "--write_test_file={extra_output_dir}/test.txt", ], - pool="linux", ), "linux-libfuzzer-with-options": Integration( template=TemplateType.libfuzzer, @@ -140,7 +137,6 @@ class Integration(BaseModel): }, reboot_after_setup=True, fuzzing_target_options=["-runs=10000000"], - pool="linux", ), "linux-libfuzzer-dlopen": Integration( template=TemplateType.libfuzzer, @@ -154,7 +150,6 @@ class Integration(BaseModel): }, reboot_after_setup=True, use_setup=True, - pool="linux", ), "linux-libfuzzer-linked-library": Integration( template=TemplateType.libfuzzer, @@ -168,7 +163,6 @@ class Integration(BaseModel): }, reboot_after_setup=True, use_setup=True, - pool="linux", ), "linux-libfuzzer-dotnet": Integration( template=TemplateType.libfuzzer_dotnet, @@ -186,7 +180,6 @@ class Integration(BaseModel): ContainerType.unique_reports: 1, }, test_repro=False, - pool="linux", ), "linux-libfuzzer-aarch64-crosscompile": Integration( template=TemplateType.libfuzzer_qemu_user, @@ -196,7 +189,6 @@ class Integration(BaseModel): use_setup=True, wait_for_files={ContainerType.inputs: 2, ContainerType.crashes: 1}, test_repro=False, - pool="linux", ), "linux-libfuzzer-rust": Integration( template=TemplateType.libfuzzer, @@ -204,7 +196,6 @@ class Integration(BaseModel): target_exe="fuzz_target_1", wait_for_files={ContainerType.unique_reports: 1, ContainerType.coverage: 1}, fuzzing_target_options=["--test:{extra_setup_dir}"], - pool="linux", ), "linux-trivial-crash": Integration( template=TemplateType.radamsa, @@ -213,7 +204,6 @@ class Integration(BaseModel): inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, inject_fake_regression=True, - pool="linux", ), "linux-trivial-crash-asan": Integration( template=TemplateType.radamsa, @@ -223,28 +213,6 @@ class Integration(BaseModel): wait_for_files={ContainerType.unique_reports: 1}, check_asan_log=True, disable_check_debugger=True, - pool="linux", - ), - # TODO: Don't install OMS extension on linux anymore - # TODO: Figure out why non mariner work is being scheduled to the mariner pool - "mariner-libfuzzer": Integration( - template=TemplateType.libfuzzer, - os=OS.linux, - target_exe="fuzz.exe", - inputs="seeds", - wait_for_files={ - ContainerType.unique_reports: 1, - ContainerType.coverage: 1, - ContainerType.inputs: 2, - ContainerType.extra_output: 1, - }, - reboot_after_setup=True, - inject_fake_regression=True, - fuzzing_target_options=[ - "--test:{extra_setup_dir}", - "--write_test_file={extra_output_dir}/test.txt", - ], - pool=PoolName("mariner") ), "windows-libfuzzer": Integration( template=TemplateType.libfuzzer, @@ -266,7 +234,6 @@ class Integration(BaseModel): "--only_asan_failures", "--write_test_file={extra_output_dir}/test.txt", ], - pool="windows", ), "windows-libfuzzer-linked-library": Integration( template=TemplateType.libfuzzer, @@ -279,7 +246,6 @@ class Integration(BaseModel): ContainerType.coverage: 1, }, use_setup=True, - pool="windows", ), "windows-libfuzzer-load-library": Integration( template=TemplateType.libfuzzer, @@ -292,7 +258,6 @@ class Integration(BaseModel): ContainerType.coverage: 1, }, use_setup=True, - pool="windows", ), "windows-libfuzzer-dotnet": Integration( template=TemplateType.libfuzzer_dotnet, @@ -310,7 +275,6 @@ class Integration(BaseModel): ContainerType.unique_reports: 1, }, test_repro=False, - pool="windows", ), "windows-trivial-crash": Integration( template=TemplateType.radamsa, @@ -319,7 +283,6 @@ class Integration(BaseModel): inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, inject_fake_regression=True, - pool="windows", ), } @@ -388,7 +351,7 @@ def try_info_get(data: Any) -> None: self.inject_log(self.start_log_marker) for entry in os_list: - name = self.build_pool_name(entry.name) + name = PoolName(f"testpool-{entry.name}-{self.test_id}") self.logger.info("creating pool: %s:%s", entry.name, name) self.of.pools.create(name, entry) self.logger.info("creating scaleset for pool: %s", name) @@ -396,15 +359,6 @@ def try_info_get(data: Any) -> None: name, pool_size, region=region, initial_size=pool_size ) - name = self.build_pool_name("mariner") - self.logger.info("creating pool: %s:%s", "mariner", name) - self.of.pools.create(name, OS.linux) - self.logger.info("creating scaleset for pool: %s", name) - self.of.scalesets.create( - name, pool_size, region=region, initial_size=pool_size, image="MicrosoftCBLMariner:cbl-mariner:cbl-mariner-2-gen2:latest" - ) - - class UnmanagedPool: def __init__( self, @@ -606,9 +560,12 @@ def launch( ) -> List[UUID]: """Launch all of the fuzzing templates""" - pool = None + pools: Dict[OS, Pool] = {} if unmanaged_pool is not None: - pool = unmanaged_pool.pool_name + pools[unmanaged_pool.the_os] = self.of.pools.get(unmanaged_pool.pool_name) + else: + for pool in self.of.pools.list(): + pools[pool.os] = pool job_ids = [] @@ -619,8 +576,8 @@ def launch( if config.os not in os_list: continue - if pool is None: - pool = self.build_pool_name(config.pool) + if config.os not in pools.keys(): + raise Exception(f"No pool for target: {target} ,os: {config.os}") self.logger.info("launching: %s", target) @@ -644,9 +601,8 @@ def launch( setup = Directory(os.path.join(setup, config.nested_setup_dir)) job: Optional[Job] = None - job = self.build_job( - duration, pool, target, config, setup, target_exe, inputs + duration, pools, target, config, setup, target_exe, inputs ) if config.inject_fake_regression and job is not None: @@ -662,7 +618,7 @@ def launch( def build_job( self, duration: int, - pool: PoolName, + pools: Dict[OS, Pool], target: str, config: Integration, setup: Optional[Directory], @@ -678,7 +634,7 @@ def build_job( self.project, target, BUILD, - pool, + pools[config.os].name, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -703,7 +659,7 @@ def build_job( self.project, target, BUILD, - pool, + pools[config.os].name, target_dll=File(config.target_exe), inputs=inputs, setup_dir=setup, @@ -719,7 +675,7 @@ def build_job( self.project, target, BUILD, - pool, + pools[config.os].name, inputs=inputs, target_exe=target_exe, duration=duration, @@ -732,7 +688,7 @@ def build_job( self.project, target, BUILD, - pool_name=pool, + pool_name=pools[config.os].name, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -747,7 +703,7 @@ def build_job( self.project, target, BUILD, - pool_name=pool, + pool_name=pools[config.os].name, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -1277,9 +1233,6 @@ def check_logs_for_errors(self) -> None: if seen_errors: raise Exception("logs included errors") - - def build_pool_name(self, os_type: str) -> PoolName: - return PoolName(f"testpool-{os_type}-{self.test_id}") class Run(Command): diff --git a/src/runtime-tools/linux/setup.sh b/src/runtime-tools/linux/setup.sh old mode 100644 new mode 100755 index 794e827f4d..f6859003b4 --- a/src/runtime-tools/linux/setup.sh +++ b/src/runtime-tools/linux/setup.sh @@ -18,14 +18,6 @@ export DOTNET_CLI_HOME="$DOTNET_ROOT" export ONEFUZZ_ROOT=/onefuzz export LLVM_SYMBOLIZER_PATH=/onefuzz/bin/llvm-symbolizer -# `logger` won't work on mariner unless we install this package first -if type yum > /dev/null 2> /dev/null; then - until yum install -y util-linux sudo; do - echo "yum failed. sleep 10s, then retrying" - sleep 10 - done -fi - logger "onefuzz: making directories" sudo mkdir -p /onefuzz/downloaded sudo chown -R $(whoami) /onefuzz @@ -142,53 +134,31 @@ if type apt > /dev/null 2> /dev/null; then sudo ln -f -s $(which llvm-symbolizer-12) $LLVM_SYMBOLIZER_PATH fi - # Needed to install dotnet + # Install dotnet until sudo apt install -y curl libicu-dev; do logger "apt failed, sleeping 10s then retrying" sleep 10 done -elif type yum > /dev/null 2> /dev/null; then - until yum install -y gdb gdb-gdbserver libunwind awk ca-certificates tar yum-utils shadow-utils cronie procps; do - echo "yum failed. sleep 10s, then retrying" - sleep 10 - done - - # Install updated Microsoft Open Management Infrastructure - github.com/microsoft/omi - yum-config-manager --add-repo=https://packages.microsoft.com/config/rhel/8/prod.repo 2>&1 | logger -s -i -t 'onefuzz-OMI-add-MS-repo' - yum install -y omi 2>&1 | logger -s -i -t 'onefuzz-OMI-install' + logger "downloading dotnet install" + curl --retry 10 -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh 2>&1 | logger -s -i -t 'onefuzz-curl-dotnet-install' + chmod +x dotnet-install.sh - if ! [ -f ${LLVM_SYMBOLIZER_PATH} ]; then - until yum install -y llvm-12.0.1; do - echo "yum failed, sleeping 10s then retrying" - sleep 10 - done - - # If specifying symbolizer, exe name must be a "known symbolizer". - # Using `llvm-symbolizer` works for clang 8 .. 12. - sudo ln -f -s $(which llvm-symbolizer-12) $LLVM_SYMBOLIZER_PATH - fi + for version in "${DOTNET_VERSIONS[@]}"; do + logger "running dotnet install $version" + /bin/bash ./dotnet-install.sh --channel "$version" --install-dir "$DOTNET_ROOT" 2>&1 | logger -s -i -t 'onefuzz-dotnet-setup' + done + rm dotnet-install.sh + + logger "install dotnet tools" + pushd "$DOTNET_ROOT" + ls -lah 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' + "$DOTNET_ROOT"/dotnet tool install dotnet-dump --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' + "$DOTNET_ROOT"/dotnet tool install dotnet-coverage --version 17.5 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' + "$DOTNET_ROOT"/dotnet tool install dotnet-sos --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' + popd fi -# Install dotnet -logger "downloading dotnet install" -curl --retry 10 -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh 2>&1 | logger -s -i -t 'onefuzz-curl-dotnet-install' -chmod +x dotnet-install.sh - -for version in "${DOTNET_VERSIONS[@]}"; do - logger "running dotnet install $version" - /bin/bash ./dotnet-install.sh --channel "$version" --install-dir "$DOTNET_ROOT" 2>&1 | logger -s -i -t 'onefuzz-dotnet-setup' -done -rm dotnet-install.sh - -logger "install dotnet tools" -pushd "$DOTNET_ROOT" -ls -lah 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' -"$DOTNET_ROOT"/dotnet tool install dotnet-dump --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' -"$DOTNET_ROOT"/dotnet tool install dotnet-coverage --version 17.5 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' -"$DOTNET_ROOT"/dotnet tool install dotnet-sos --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' -popd - if [ -v DOCKER_BUILD ]; then echo "building for docker" elif [ -d /etc/systemd/system ]; then From c8986aaa91838a8d701cf0e1099be6a103b8b736 Mon Sep 17 00:00:00 2001 From: Adam <103067949+AdamL-Microsoft@users.noreply.github.com> Date: Wed, 30 Aug 2023 13:53:49 -0700 Subject: [PATCH 20/20] Revert "Release 8.7.1 (hotfix) (#3459)" (#3468) This reverts commit c69deed50e81cc1805f6f82ebb10513a211cbbe2. --- .devcontainer/devcontainer.json | 3 +- .github/workflows/ci.yml | 2 + CHANGELOG.md | 6 - CURRENT_VERSION | 2 +- .../ApiService/Functions/QueueJobResult.cs | 60 +++++++ .../ApiService/OneFuzzTypes/Model.cs | 45 +++++ src/ApiService/ApiService/Program.cs | 1 + .../ApiService/onefuzzlib/Config.cs | 1 + .../ApiService/onefuzzlib/Extension.cs | 44 ++--- .../onefuzzlib/JobResultOperations.cs | 121 +++++++++++++ .../ApiService/onefuzzlib/OnefuzzContext.cs | 2 + .../IntegrationTests/Fakes/TestContext.cs | 3 + src/agent/Cargo.lock | 16 ++ src/agent/Cargo.toml | 1 + src/agent/onefuzz-agent/src/config.rs | 12 ++ src/agent/onefuzz-agent/src/log_uploader.rs | 29 ---- src/agent/onefuzz-agent/src/work.rs | 5 +- src/agent/onefuzz-result/Cargo.toml | 18 ++ src/agent/onefuzz-result/src/job_result.rs | 129 ++++++++++++++ src/agent/onefuzz-result/src/lib.rs | 4 + src/agent/onefuzz-task/Cargo.toml | 1 + src/agent/onefuzz-task/src/local/cmd.rs | 42 +---- src/agent/onefuzz-task/src/local/common.rs | 26 +-- .../example_templates/libfuzzer_basic.yml | 34 ++-- .../src/local/generic_analysis.rs | 137 +-------------- .../src/local/generic_crash_report.rs | 138 +-------------- .../src/local/generic_generator.rs | 142 +-------------- src/agent/onefuzz-task/src/local/libfuzzer.rs | 161 +----------------- .../src/local/libfuzzer_crash_report.rs | 128 +------------- .../onefuzz-task/src/local/libfuzzer_merge.rs | 84 +-------- .../src/local/libfuzzer_regression.rs | 134 +-------------- .../src/local/libfuzzer_test_input.rs | 83 --------- src/agent/onefuzz-task/src/local/mod.rs | 1 - src/agent/onefuzz-task/src/local/radamsa.rs | 78 --------- src/agent/onefuzz-task/src/local/schema.json | 8 +- src/agent/onefuzz-task/src/local/template.rs | 13 +- .../onefuzz-task/src/local/test_input.rs | 86 ---------- .../src/tasks/analysis/generic.rs | 5 +- src/agent/onefuzz-task/src/tasks/config.rs | 20 +++ .../src/tasks/coverage/generic.rs | 19 ++- .../onefuzz-task/src/tasks/fuzz/generator.rs | 7 +- .../src/tasks/fuzz/libfuzzer/common.rs | 49 ++++-- .../onefuzz-task/src/tasks/fuzz/supervisor.rs | 15 +- src/agent/onefuzz-task/src/tasks/heartbeat.rs | 2 +- .../onefuzz-task/src/tasks/merge/generic.rs | 2 +- .../src/tasks/merge/libfuzzer_merge.rs | 2 +- .../src/tasks/regression/common.rs | 15 +- .../src/tasks/regression/generic.rs | 3 +- .../src/tasks/regression/libfuzzer.rs | 3 +- .../src/tasks/report/crash_report.rs | 45 ++++- .../src/tasks/report/dotnet/generic.rs | 22 ++- .../onefuzz-task/src/tasks/report/generic.rs | 14 +- .../src/tasks/report/libfuzzer_report.rs | 5 + src/agent/onefuzz/Cargo.toml | 1 + src/agent/onefuzz/src/blob/url.rs | 23 ++- src/agent/onefuzz/src/syncdir.rs | 66 ++++++- .../bicep-templates/storageAccounts.bicep | 2 +- src/integration-tests/integration-test.py | 77 +++++++-- src/runtime-tools/linux/setup.sh | 64 +++++-- 59 files changed, 872 insertions(+), 1389 deletions(-) create mode 100644 src/ApiService/ApiService/Functions/QueueJobResult.cs create mode 100644 src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs create mode 100644 src/agent/onefuzz-result/Cargo.toml create mode 100644 src/agent/onefuzz-result/src/job_result.rs create mode 100644 src/agent/onefuzz-result/src/lib.rs delete mode 100644 src/agent/onefuzz-task/src/local/radamsa.rs mode change 100755 => 100644 src/runtime-tools/linux/setup.sh diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 4059b3d7c1..d3fcf050ed 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -13,6 +13,7 @@ "**/target/**": true }, "lldb.executable": "/usr/bin/lldb", + "dotnet.server.useOmnisharp": true, "omnisharp.enableEditorConfigSupport": true, "omnisharp.enableRoslynAnalyzers": true, "python.defaultInterpreterPath": "/workspaces/onefuzz/src/venv/bin/python", @@ -48,4 +49,4 @@ "features": { "ghcr.io/devcontainers/features/azure-cli:1": {} } -} +} \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 12824fd182..2dd85d7c92 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -542,9 +542,11 @@ jobs: mkdir -p artifacts/linux-libfuzzer mkdir -p artifacts/linux-libfuzzer-with-options + mkdir -p artifacts/mariner-libfuzzer (cd libfuzzer ; make ) cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/linux-libfuzzer cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/linux-libfuzzer-with-options + cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/mariner-libfuzzer mkdir -p artifacts/linux-libfuzzer-regression (cd libfuzzer-regression ; make ) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d46ea2a0e..be4779ad77 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,12 +7,6 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## 8.7.1 - -### Fixed - -* Service: Removed deprecated Azure retention policy setting that was causing scaleset deployment errors [#3452](https://github.com/microsoft/onefuzz/pull/3452) - ## 8.7.0 ### Added diff --git a/CURRENT_VERSION b/CURRENT_VERSION index efeecbe2c5..c0bcaebe8f 100644 --- a/CURRENT_VERSION +++ b/CURRENT_VERSION @@ -1 +1 @@ -8.7.1 \ No newline at end of file +8.7.0 \ No newline at end of file diff --git a/src/ApiService/ApiService/Functions/QueueJobResult.cs b/src/ApiService/ApiService/Functions/QueueJobResult.cs new file mode 100644 index 0000000000..d781a4d1e1 --- /dev/null +++ b/src/ApiService/ApiService/Functions/QueueJobResult.cs @@ -0,0 +1,60 @@ +using System.Text.Json; +using Microsoft.Azure.Functions.Worker; +using Microsoft.Extensions.Logging; +using Microsoft.OneFuzz.Service.OneFuzzLib.Orm; +namespace Microsoft.OneFuzz.Service.Functions; + + +public class QueueJobResult { + private readonly ILogger _log; + private readonly IOnefuzzContext _context; + + public QueueJobResult(ILogger logTracer, IOnefuzzContext context) { + _log = logTracer; + _context = context; + } + + [Function("QueueJobResult")] + public async Async.Task Run([QueueTrigger("job-result", Connection = "AzureWebJobsStorage")] string msg) { + + var _tasks = _context.TaskOperations; + var _jobs = _context.JobOperations; + + _log.LogInformation("job result: {msg}", msg); + var jr = JsonSerializer.Deserialize(msg, EntityConverter.GetJsonSerializerOptions()).EnsureNotNull($"wrong data {msg}"); + + var task = await _tasks.GetByTaskId(jr.TaskId); + if (task == null) { + _log.LogWarning("invalid {TaskId}", jr.TaskId); + return; + } + + var job = await _jobs.Get(task.JobId); + if (job == null) { + _log.LogWarning("invalid {JobId}", task.JobId); + return; + } + + JobResultData? data = jr.Data; + if (data == null) { + _log.LogWarning($"job result data is empty, throwing out: {jr}"); + return; + } + + var jobResultType = data.Type; + _log.LogInformation($"job result data type: {jobResultType}"); + + Dictionary value; + if (jr.Value.Count > 0) { + value = jr.Value; + } else { + _log.LogWarning($"job result data is empty, throwing out: {jr}"); + return; + } + + var jobResult = await _context.JobResultOperations.CreateOrUpdate(job.JobId, jobResultType, value); + if (!jobResult.IsOk) { + _log.LogError("failed to create or update with job result {JobId}", job.JobId); + } + } +} diff --git a/src/ApiService/ApiService/OneFuzzTypes/Model.cs b/src/ApiService/ApiService/OneFuzzTypes/Model.cs index e430c1448c..b839f52ddc 100644 --- a/src/ApiService/ApiService/OneFuzzTypes/Model.cs +++ b/src/ApiService/ApiService/OneFuzzTypes/Model.cs @@ -33,6 +33,19 @@ public enum HeartbeatType { TaskAlive, } +[SkipRename] +public enum JobResultType { + NewCrashingInput, + NoReproCrashingInput, + NewReport, + NewUniqueReport, + NewRegressionReport, + NewCoverage, + NewCrashDump, + CoverageData, + RuntimeStats, +} + public record HeartbeatData(HeartbeatType Type); public record TaskHeartbeatEntry( @@ -41,6 +54,16 @@ public record TaskHeartbeatEntry( Guid MachineId, HeartbeatData[] Data); +public record JobResultData(JobResultType Type); + +public record TaskJobResultEntry( + Guid TaskId, + Guid? JobId, + Guid MachineId, + JobResultData Data, + Dictionary Value + ); + public record NodeHeartbeatEntry(Guid NodeId, HeartbeatData[] Data); public record NodeCommandStopIfFree(); @@ -892,6 +915,27 @@ public record SecretAddress(Uri Url) : ISecret { public record SecretData(ISecret Secret) { } +public record JobResult( + [PartitionKey][RowKey] Guid JobId, + string Project, + string Name, + double NewCrashingInput = 0, + double NoReproCrashingInput = 0, + double NewReport = 0, + double NewUniqueReport = 0, + double NewRegressionReport = 0, + double NewCrashDump = 0, + double InstructionsCovered = 0, + double TotalInstructions = 0, + double CoverageRate = 0, + double IterationCount = 0 +) : EntityBase() { + public JobResult(Guid JobId, string Project, string Name) : this( + JobId: JobId, + Project: Project, + Name: Name, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) { } +} + public record JobConfig( string Project, string Name, @@ -1056,6 +1100,7 @@ public record TaskUnitConfig( string? InstanceTelemetryKey, string? MicrosoftTelemetryKey, Uri HeartbeatQueue, + Uri JobResultQueue, Dictionary Tags ) { public Uri? inputQueue { get; set; } diff --git a/src/ApiService/ApiService/Program.cs b/src/ApiService/ApiService/Program.cs index f425c00809..d5ee30b45e 100644 --- a/src/ApiService/ApiService/Program.cs +++ b/src/ApiService/ApiService/Program.cs @@ -118,6 +118,7 @@ public static async Async.Task Main() { .AddScoped() .AddScoped() .AddScoped() + .AddScoped() .AddScoped() .AddScoped() .AddScoped() diff --git a/src/ApiService/ApiService/onefuzzlib/Config.cs b/src/ApiService/ApiService/onefuzzlib/Config.cs index 71af317348..872cedbc01 100644 --- a/src/ApiService/ApiService/onefuzzlib/Config.cs +++ b/src/ApiService/ApiService/onefuzzlib/Config.cs @@ -71,6 +71,7 @@ private static BlobContainerSasPermissions ConvertPermissions(ContainerPermissio InstanceTelemetryKey: _serviceConfig.ApplicationInsightsInstrumentationKey, MicrosoftTelemetryKey: _serviceConfig.OneFuzzTelemetry, HeartbeatQueue: await _queue.GetQueueSas("task-heartbeat", StorageType.Config, QueueSasPermissions.Add) ?? throw new Exception("unable to get heartbeat queue sas"), + JobResultQueue: await _queue.GetQueueSas("job-result", StorageType.Config, QueueSasPermissions.Add) ?? throw new Exception("unable to get heartbeat queue sas"), Tags: task.Config.Tags ?? new Dictionary() ); diff --git a/src/ApiService/ApiService/onefuzzlib/Extension.cs b/src/ApiService/ApiService/onefuzzlib/Extension.cs index 7995026eca..fbf62dd343 100644 --- a/src/ApiService/ApiService/onefuzzlib/Extension.cs +++ b/src/ApiService/ApiService/onefuzzlib/Extension.cs @@ -36,7 +36,9 @@ public async Async.Task> GenericExtensions(AzureLocati var extensions = new List(); var instanceConfig = await _context.ConfigOperations.Fetch(); - extensions.Add(await MonitorExtension(region, vmOs)); + if (vmOs == Os.Windows) { + extensions.Add(await MonitorExtension(region)); + } var depenency = DependencyExtension(region, vmOs); if (depenency is not null) { @@ -329,37 +331,21 @@ public async Async.Task AgentConfig(AzureLocation region, Os throw new NotSupportedException($"unsupported OS: {vmOs}"); } - public async Async.Task MonitorExtension(AzureLocation region, Os vmOs) { + public async Async.Task MonitorExtension(AzureLocation region) { var settings = await _context.LogAnalytics.GetMonitorSettings(); var extensionSettings = JsonSerializer.Serialize(new { WorkspaceId = settings.Id }, _extensionSerializerOptions); var protectedExtensionSettings = JsonSerializer.Serialize(new { WorkspaceKey = settings.Key }, _extensionSerializerOptions); - if (vmOs == Os.Windows) { - return new VMExtensionWrapper { - Location = region, - Name = "OMSExtension", - TypePropertiesType = "MicrosoftMonitoringAgent", - Publisher = "Microsoft.EnterpriseCloud.Monitoring", - TypeHandlerVersion = "1.0", - AutoUpgradeMinorVersion = true, - Settings = new BinaryData(extensionSettings), - ProtectedSettings = new BinaryData(protectedExtensionSettings), - EnableAutomaticUpgrade = false - }; - } else if (vmOs == Os.Linux) { - return new VMExtensionWrapper { - Location = region, - Name = "OmsAgentForLinux", - TypePropertiesType = "OmsAgentForLinux", - Publisher = "Microsoft.EnterpriseCloud.Monitoring", - TypeHandlerVersion = "1.0", - AutoUpgradeMinorVersion = true, - Settings = new BinaryData(extensionSettings), - ProtectedSettings = new BinaryData(protectedExtensionSettings), - EnableAutomaticUpgrade = false - }; - } else { - throw new NotSupportedException($"unsupported os: {vmOs}"); - } + return new VMExtensionWrapper { + Location = region, + Name = "OMSExtension", + TypePropertiesType = "MicrosoftMonitoringAgent", + Publisher = "Microsoft.EnterpriseCloud.Monitoring", + TypeHandlerVersion = "1.0", + AutoUpgradeMinorVersion = true, + Settings = new BinaryData(extensionSettings), + ProtectedSettings = new BinaryData(protectedExtensionSettings), + EnableAutomaticUpgrade = false + }; } diff --git a/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs b/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs new file mode 100644 index 0000000000..1166cf91d4 --- /dev/null +++ b/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs @@ -0,0 +1,121 @@ +using ApiService.OneFuzzLib.Orm; +using Microsoft.Extensions.Logging; +using Polly; +namespace Microsoft.OneFuzz.Service; + +public interface IJobResultOperations : IOrm { + + Async.Task GetJobResult(Guid jobId); + Async.Task CreateOrUpdate(Guid jobId, JobResultType resultType, Dictionary resultValue); + +} +public class JobResultOperations : Orm, IJobResultOperations { + + public JobResultOperations(ILogger log, IOnefuzzContext context) + : base(log, context) { + } + + public async Async.Task GetJobResult(Guid jobId) { + return await SearchByPartitionKeys(new[] { jobId.ToString() }).SingleOrDefaultAsync(); + } + + private JobResult UpdateResult(JobResult result, JobResultType type, Dictionary resultValue) { + + var newResult = result; + double newValue; + switch (type) { + case JobResultType.NewCrashingInput: + newValue = result.NewCrashingInput + resultValue["count"]; + newResult = result with { NewCrashingInput = newValue }; + break; + case JobResultType.NewReport: + newValue = result.NewReport + resultValue["count"]; + newResult = result with { NewReport = newValue }; + break; + case JobResultType.NewUniqueReport: + newValue = result.NewUniqueReport + resultValue["count"]; + newResult = result with { NewUniqueReport = newValue }; + break; + case JobResultType.NewRegressionReport: + newValue = result.NewRegressionReport + resultValue["count"]; + newResult = result with { NewRegressionReport = newValue }; + break; + case JobResultType.NewCrashDump: + newValue = result.NewCrashDump + resultValue["count"]; + newResult = result with { NewCrashDump = newValue }; + break; + case JobResultType.CoverageData: + double newCovered = resultValue["covered"]; + double newTotalCovered = resultValue["features"]; + double newCoverageRate = resultValue["rate"]; + newResult = result with { InstructionsCovered = newCovered, TotalInstructions = newTotalCovered, CoverageRate = newCoverageRate }; + break; + case JobResultType.RuntimeStats: + double newTotalIterations = resultValue["total_count"]; + newResult = result with { IterationCount = newTotalIterations }; + break; + default: + _logTracer.LogWarning($"Invalid Field {type}."); + break; + } + _logTracer.LogInformation($"Attempting to log new result: {newResult}"); + return newResult; + } + + private async Async.Task TryUpdate(Job job, JobResultType resultType, Dictionary resultValue) { + var jobId = job.JobId; + + var jobResult = await GetJobResult(jobId); + + if (jobResult == null) { + _logTracer.LogInformation("Creating new JobResult for Job {JobId}", jobId); + + var entry = new JobResult(JobId: jobId, Project: job.Config.Project, Name: job.Config.Name); + + jobResult = UpdateResult(entry, resultType, resultValue); + + var r = await Insert(jobResult); + if (!r.IsOk) { + throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}"); + } + _logTracer.LogInformation("created job result {JobId}", jobResult.JobId); + } else { + _logTracer.LogInformation("Updating existing JobResult entry for Job {JobId}", jobId); + + jobResult = UpdateResult(jobResult, resultType, resultValue); + + var r = await Update(jobResult); + if (!r.IsOk) { + throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}"); + } + _logTracer.LogInformation("updated job result {JobId}", jobResult.JobId); + } + + return true; + } + + public async Async.Task CreateOrUpdate(Guid jobId, JobResultType resultType, Dictionary resultValue) { + + var job = await _context.JobOperations.Get(jobId); + if (job == null) { + return OneFuzzResultVoid.Error(ErrorCode.INVALID_REQUEST, "invalid job"); + } + + var success = false; + try { + _logTracer.LogInformation("attempt to update job result {JobId}", job.JobId); + var policy = Policy.Handle().WaitAndRetryAsync(50, _ => new TimeSpan(0, 0, 5)); + await policy.ExecuteAsync(async () => { + success = await TryUpdate(job, resultType, resultValue); + _logTracer.LogInformation("attempt {success}", success); + }); + return OneFuzzResultVoid.Ok; + } catch (Exception e) { + return OneFuzzResultVoid.Error(ErrorCode.UNABLE_TO_UPDATE, new string[] { + $"Unexpected failure when attempting to update job result for {job.JobId}", + $"Exception: {e}" + }); + } + } +} + diff --git a/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs b/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs index d877bfddbb..03c6322663 100644 --- a/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs +++ b/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs @@ -19,6 +19,7 @@ public interface IOnefuzzContext { IExtensions Extensions { get; } IIpOperations IpOperations { get; } IJobOperations JobOperations { get; } + IJobResultOperations JobResultOperations { get; } ILogAnalytics LogAnalytics { get; } INodeMessageOperations NodeMessageOperations { get; } INodeOperations NodeOperations { get; } @@ -83,6 +84,7 @@ public OnefuzzContext(IServiceProvider serviceProvider) { public IVmOperations VmOperations => _serviceProvider.GetRequiredService(); public ISecretsOperations SecretsOperations => _serviceProvider.GetRequiredService(); public IJobOperations JobOperations => _serviceProvider.GetRequiredService(); + public IJobResultOperations JobResultOperations => _serviceProvider.GetRequiredService(); public IScheduler Scheduler => _serviceProvider.GetRequiredService(); public IConfig Config => _serviceProvider.GetRequiredService(); public ILogAnalytics LogAnalytics => _serviceProvider.GetRequiredService(); diff --git a/src/ApiService/IntegrationTests/Fakes/TestContext.cs b/src/ApiService/IntegrationTests/Fakes/TestContext.cs index c46ff5fce7..66d121e746 100644 --- a/src/ApiService/IntegrationTests/Fakes/TestContext.cs +++ b/src/ApiService/IntegrationTests/Fakes/TestContext.cs @@ -32,6 +32,7 @@ public TestContext(IHttpClientFactory httpClientFactory, OneFuzzLoggerProvider p TaskOperations = new TaskOperations(provider.CreateLogger(), Cache, this); NodeOperations = new NodeOperations(provider.CreateLogger(), this); JobOperations = new JobOperations(provider.CreateLogger(), this); + JobResultOperations = new JobResultOperations(provider.CreateLogger(), this); NodeTasksOperations = new NodeTasksOperations(provider.CreateLogger(), this); TaskEventOperations = new TaskEventOperations(provider.CreateLogger(), this); NodeMessageOperations = new NodeMessageOperations(provider.CreateLogger(), this); @@ -57,6 +58,7 @@ public Async.Task InsertAll(params EntityBase[] objs) Node n => NodeOperations.Insert(n), Pool p => PoolOperations.Insert(p), Job j => JobOperations.Insert(j), + JobResult jr => JobResultOperations.Insert(jr), Repro r => ReproOperations.Insert(r), Scaleset ss => ScalesetOperations.Insert(ss), NodeTasks nt => NodeTasksOperations.Insert(nt), @@ -84,6 +86,7 @@ public Async.Task InsertAll(params EntityBase[] objs) public ITaskOperations TaskOperations { get; } public IJobOperations JobOperations { get; } + public IJobResultOperations JobResultOperations { get; } public INodeOperations NodeOperations { get; } public INodeTasksOperations NodeTasksOperations { get; } public ITaskEventOperations TaskEventOperations { get; } diff --git a/src/agent/Cargo.lock b/src/agent/Cargo.lock index a1d86e7d25..254684be97 100644 --- a/src/agent/Cargo.lock +++ b/src/agent/Cargo.lock @@ -2123,6 +2123,7 @@ dependencies = [ "log", "nix", "notify", + "onefuzz-result", "onefuzz-telemetry", "pete", "pretty_assertions", @@ -2197,6 +2198,20 @@ dependencies = [ "serde_json", ] +[[package]] +name = "onefuzz-result" +version = "0.2.0" +dependencies = [ + "anyhow", + "async-trait", + "log", + "onefuzz-telemetry", + "reqwest", + "serde", + "storage-queue", + "uuid", +] + [[package]] name = "onefuzz-task" version = "0.2.0" @@ -2226,6 +2241,7 @@ dependencies = [ "num_cpus", "onefuzz", "onefuzz-file-format", + "onefuzz-result", "onefuzz-telemetry", "path-absolutize", "pretty_assertions", diff --git a/src/agent/Cargo.toml b/src/agent/Cargo.toml index 2f4cea41a4..ce01ae880c 100644 --- a/src/agent/Cargo.toml +++ b/src/agent/Cargo.toml @@ -10,6 +10,7 @@ members = [ "onefuzz", "onefuzz-task", "onefuzz-agent", + "onefuzz-result", "onefuzz-file-format", "onefuzz-telemetry", "reqwest-retry", diff --git a/src/agent/onefuzz-agent/src/config.rs b/src/agent/onefuzz-agent/src/config.rs index 87edfb2c1b..fc623e72af 100644 --- a/src/agent/onefuzz-agent/src/config.rs +++ b/src/agent/onefuzz-agent/src/config.rs @@ -34,6 +34,8 @@ pub struct StaticConfig { pub heartbeat_queue: Option, + pub job_result_queue: Option, + pub instance_id: Uuid, #[serde(default = "default_as_true")] @@ -71,6 +73,8 @@ struct RawStaticConfig { pub heartbeat_queue: Option, + pub job_result_queue: Option, + pub instance_id: Uuid, #[serde(default = "default_as_true")] @@ -117,6 +121,7 @@ impl StaticConfig { microsoft_telemetry_key: config.microsoft_telemetry_key, instance_telemetry_key: config.instance_telemetry_key, heartbeat_queue: config.heartbeat_queue, + job_result_queue: config.job_result_queue, instance_id: config.instance_id, managed: config.managed, machine_identity, @@ -152,6 +157,12 @@ impl StaticConfig { None }; + let job_result_queue = if let Ok(key) = std::env::var("ONEFUZZ_JOB_RESULT") { + Some(Url::parse(&key)?) + } else { + None + }; + let instance_telemetry_key = if let Ok(key) = std::env::var("ONEFUZZ_INSTANCE_TELEMETRY_KEY") { Some(InstanceTelemetryKey::new(Uuid::parse_str(&key)?)) @@ -183,6 +194,7 @@ impl StaticConfig { instance_telemetry_key, microsoft_telemetry_key, heartbeat_queue, + job_result_queue, instance_id, managed: !is_unmanaged, machine_identity, diff --git a/src/agent/onefuzz-agent/src/log_uploader.rs b/src/agent/onefuzz-agent/src/log_uploader.rs index 6bccc0bef2..d424013421 100644 --- a/src/agent/onefuzz-agent/src/log_uploader.rs +++ b/src/agent/onefuzz-agent/src/log_uploader.rs @@ -210,32 +210,3 @@ async fn sync_file( blob_client.append_block(Body::from(f)).await?; Ok(len) } - -#[cfg(test)] -mod tests { - use std::io::Seek; - - use anyhow::Result; - use tokio::io::{AsyncReadExt, AsyncSeekExt}; - - #[allow(clippy::unused_io_amount)] - #[tokio::test] - #[ignore] - - async fn test_seek_behavior() -> Result<()> { - let path = "C:\\temp\\test.ps1"; - let mut std_file = std::fs::File::open(path)?; - std_file.seek(std::io::SeekFrom::Start(3))?; - - let mut tokio_file = tokio::fs::File::from_std(std_file); - - let buf = &mut [0u8; 5]; - tokio_file.read(buf).await?; - println!("******** buf {:?}", buf); - tokio_file.seek(std::io::SeekFrom::Start(0)).await?; - tokio_file.read(buf).await?; - println!("******** buf {:?}", buf); - - Ok(()) - } -} diff --git a/src/agent/onefuzz-agent/src/work.rs b/src/agent/onefuzz-agent/src/work.rs index b55d1d86a1..d0222744a7 100644 --- a/src/agent/onefuzz-agent/src/work.rs +++ b/src/agent/onefuzz-agent/src/work.rs @@ -91,7 +91,10 @@ impl WorkSet { pub fn setup_dir(&self) -> Result { let root = self.get_root_folder()?; - self.setup_url.as_path(root) + // Putting the setup container at the root for backward compatibility. + // The path of setup folder can be used as part of the deduplication logic in the bug filing service + let setup_root = root.parent().ok_or_else(|| anyhow!("Invalid root"))?; + self.setup_url.as_path(setup_root) } pub fn extra_setup_dir(&self) -> Result> { diff --git a/src/agent/onefuzz-result/Cargo.toml b/src/agent/onefuzz-result/Cargo.toml new file mode 100644 index 0000000000..7c7de6615c --- /dev/null +++ b/src/agent/onefuzz-result/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "onefuzz-result" +version = "0.2.0" +authors = ["fuzzing@microsoft.com"] +edition = "2021" +publish = false +license = "MIT" + +[dependencies] +anyhow = { version = "1.0", features = ["backtrace"] } +async-trait = "0.1" +reqwest = "0.11" +serde = "1.0" +storage-queue = { path = "../storage-queue" } +uuid = { version = "1.4", features = ["serde", "v4"] } +onefuzz-telemetry = { path = "../onefuzz-telemetry" } +log = "0.4" + diff --git a/src/agent/onefuzz-result/src/job_result.rs b/src/agent/onefuzz-result/src/job_result.rs new file mode 100644 index 0000000000..b305eca2cb --- /dev/null +++ b/src/agent/onefuzz-result/src/job_result.rs @@ -0,0 +1,129 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use anyhow::Result; +use async_trait::async_trait; +use onefuzz_telemetry::warn; +use reqwest::Url; +use serde::{self, Deserialize, Serialize}; +use std::collections::HashMap; +use std::sync::Arc; +use storage_queue::QueueClient; +use uuid::Uuid; + +#[derive(Debug, Deserialize, Serialize, Hash, Eq, PartialEq, Clone)] +#[serde(tag = "type")] +pub enum JobResultData { + NewCrashingInput, + NoReproCrashingInput, + NewReport, + NewUniqueReport, + NewRegressionReport, + NewCoverage, + NewCrashDump, + CoverageData, + RuntimeStats, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +struct JobResult { + task_id: Uuid, + job_id: Uuid, + machine_id: Uuid, + machine_name: String, + data: JobResultData, + value: HashMap, +} + +#[derive(Clone)] +pub struct TaskContext { + task_id: Uuid, + job_id: Uuid, + machine_id: Uuid, + machine_name: String, +} + +pub struct JobResultContext { + pub state: TaskContext, + pub queue_client: QueueClient, +} + +pub struct JobResultClient { + pub context: Arc>, +} + +impl JobResultClient { + pub fn init_job_result( + context: TaskContext, + queue_url: Url, + ) -> Result> + where + TaskContext: Send + Sync + 'static, + { + let context = Arc::new(JobResultContext { + state: context, + queue_client: QueueClient::new(queue_url)?, + }); + + Ok(JobResultClient { context }) + } +} + +pub type TaskJobResultClient = JobResultClient; + +pub async fn init_job_result( + queue_url: Url, + task_id: Uuid, + job_id: Uuid, + machine_id: Uuid, + machine_name: String, +) -> Result { + let hb = JobResultClient::init_job_result( + TaskContext { + task_id, + job_id, + machine_id, + machine_name, + }, + queue_url, + )?; + Ok(hb) +} + +#[async_trait] +pub trait JobResultSender { + async fn send_direct(&self, data: JobResultData, value: HashMap); +} + +#[async_trait] +impl JobResultSender for TaskJobResultClient { + async fn send_direct(&self, data: JobResultData, value: HashMap) { + let task_id = self.context.state.task_id; + let job_id = self.context.state.job_id; + let machine_id = self.context.state.machine_id; + let machine_name = self.context.state.machine_name.clone(); + + let _ = self + .context + .queue_client + .enqueue(JobResult { + task_id, + job_id, + machine_id, + machine_name, + data, + value, + }) + .await; + } +} + +#[async_trait] +impl JobResultSender for Option { + async fn send_direct(&self, data: JobResultData, value: HashMap) { + match self { + Some(client) => client.send_direct(data, value).await, + None => warn!("Failed to send Job Result message data from agent."), + } + } +} diff --git a/src/agent/onefuzz-result/src/lib.rs b/src/agent/onefuzz-result/src/lib.rs new file mode 100644 index 0000000000..dae666ca9a --- /dev/null +++ b/src/agent/onefuzz-result/src/lib.rs @@ -0,0 +1,4 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +pub mod job_result; diff --git a/src/agent/onefuzz-task/Cargo.toml b/src/agent/onefuzz-task/Cargo.toml index 0ad2f9aa4f..4e0bd381b0 100644 --- a/src/agent/onefuzz-task/Cargo.toml +++ b/src/agent/onefuzz-task/Cargo.toml @@ -39,6 +39,7 @@ serde_json = "1.0" serde_yaml = "0.9.21" onefuzz = { path = "../onefuzz" } onefuzz-telemetry = { path = "../onefuzz-telemetry" } +onefuzz-result = { path = "../onefuzz-result" } path-absolutize = "3.1" reqwest-retry = { path = "../reqwest-retry" } strum = "0.25" diff --git a/src/agent/onefuzz-task/src/local/cmd.rs b/src/agent/onefuzz-task/src/local/cmd.rs index 80fd51a96b..eabefb71ee 100644 --- a/src/agent/onefuzz-task/src/local/cmd.rs +++ b/src/agent/onefuzz-task/src/local/cmd.rs @@ -3,11 +3,7 @@ #[cfg(any(target_os = "linux", target_os = "windows"))] use crate::local::coverage; -use crate::local::{ - common::add_common_config, generic_analysis, generic_crash_report, generic_generator, - libfuzzer, libfuzzer_crash_report, libfuzzer_fuzz, libfuzzer_merge, libfuzzer_regression, - libfuzzer_test_input, radamsa, test_input, tui::TerminalUi, -}; +use crate::local::{common::add_common_config, libfuzzer_fuzz, tui::TerminalUi}; use anyhow::{Context, Result}; use clap::{Arg, ArgAction, Command}; use std::time::Duration; @@ -21,19 +17,9 @@ use super::template; #[derive(Debug, PartialEq, Eq, EnumString, IntoStaticStr, EnumIter)] #[strum(serialize_all = "kebab-case")] enum Commands { - Radamsa, #[cfg(any(target_os = "linux", target_os = "windows"))] Coverage, LibfuzzerFuzz, - LibfuzzerMerge, - LibfuzzerCrashReport, - LibfuzzerTestInput, - LibfuzzerRegression, - Libfuzzer, - CrashReport, - Generator, - Analysis, - TestInput, Template, } @@ -68,23 +54,7 @@ pub async fn run(args: clap::ArgMatches) -> Result<()> { match command { #[cfg(any(target_os = "linux", target_os = "windows"))] Commands::Coverage => coverage::run(&sub_args, event_sender).await, - Commands::Radamsa => radamsa::run(&sub_args, event_sender).await, - Commands::LibfuzzerCrashReport => { - libfuzzer_crash_report::run(&sub_args, event_sender).await - } Commands::LibfuzzerFuzz => libfuzzer_fuzz::run(&sub_args, event_sender).await, - Commands::LibfuzzerMerge => libfuzzer_merge::run(&sub_args, event_sender).await, - Commands::LibfuzzerTestInput => { - libfuzzer_test_input::run(&sub_args, event_sender).await - } - Commands::LibfuzzerRegression => { - libfuzzer_regression::run(&sub_args, event_sender).await - } - Commands::Libfuzzer => libfuzzer::run(&sub_args, event_sender).await, - Commands::CrashReport => generic_crash_report::run(&sub_args, event_sender).await, - Commands::Generator => generic_generator::run(&sub_args, event_sender).await, - Commands::Analysis => generic_analysis::run(&sub_args, event_sender).await, - Commands::TestInput => test_input::run(&sub_args, event_sender).await, Commands::Template => { let config = sub_args .get_one::("config") @@ -140,17 +110,7 @@ pub fn args(name: &'static str) -> Command { let app = match subcommand { #[cfg(any(target_os = "linux", target_os = "windows"))] Commands::Coverage => coverage::args(subcommand.into()), - Commands::Radamsa => radamsa::args(subcommand.into()), - Commands::LibfuzzerCrashReport => libfuzzer_crash_report::args(subcommand.into()), Commands::LibfuzzerFuzz => libfuzzer_fuzz::args(subcommand.into()), - Commands::LibfuzzerMerge => libfuzzer_merge::args(subcommand.into()), - Commands::LibfuzzerTestInput => libfuzzer_test_input::args(subcommand.into()), - Commands::LibfuzzerRegression => libfuzzer_regression::args(subcommand.into()), - Commands::Libfuzzer => libfuzzer::args(subcommand.into()), - Commands::CrashReport => generic_crash_report::args(subcommand.into()), - Commands::Generator => generic_generator::args(subcommand.into()), - Commands::Analysis => generic_analysis::args(subcommand.into()), - Commands::TestInput => test_input::args(subcommand.into()), Commands::Template => Command::new("template") .about("uses the template to generate a run") .args(vec![Arg::new("config") diff --git a/src/agent/onefuzz-task/src/local/common.rs b/src/agent/onefuzz-task/src/local/common.rs index f8d7949e80..17940d799f 100644 --- a/src/agent/onefuzz-task/src/local/common.rs +++ b/src/agent/onefuzz-task/src/local/common.rs @@ -26,20 +26,10 @@ pub const INPUTS_DIR: &str = "inputs_dir"; pub const CRASHES_DIR: &str = "crashes_dir"; pub const CRASHDUMPS_DIR: &str = "crashdumps_dir"; pub const TARGET_WORKERS: &str = "target_workers"; -pub const REPORTS_DIR: &str = "reports_dir"; -pub const NO_REPRO_DIR: &str = "no_repro_dir"; pub const TARGET_TIMEOUT: &str = "target_timeout"; -pub const CHECK_RETRY_COUNT: &str = "check_retry_count"; -pub const DISABLE_CHECK_QUEUE: &str = "disable_check_queue"; -pub const UNIQUE_REPORTS_DIR: &str = "unique_reports_dir"; pub const COVERAGE_DIR: &str = "coverage_dir"; pub const READONLY_INPUTS: &str = "readonly_inputs_dir"; -pub const CHECK_ASAN_LOG: &str = "check_asan_log"; -pub const TOOLS_DIR: &str = "tools_dir"; -pub const RENAME_OUTPUT: &str = "rename_output"; pub const CHECK_FUZZER_HELP: &str = "check_fuzzer_help"; -pub const DISABLE_CHECK_DEBUGGER: &str = "disable_check_debugger"; -pub const REGRESSION_REPORTS_DIR: &str = "regression_reports_dir"; pub const TARGET_EXE: &str = "target_exe"; pub const TARGET_ENV: &str = "target_env"; @@ -47,17 +37,6 @@ pub const TARGET_OPTIONS: &str = "target_options"; // pub const SUPERVISOR_EXE: &str = "supervisor_exe"; // pub const SUPERVISOR_ENV: &str = "supervisor_env"; // pub const SUPERVISOR_OPTIONS: &str = "supervisor_options"; -pub const GENERATOR_EXE: &str = "generator_exe"; -pub const GENERATOR_ENV: &str = "generator_env"; -pub const GENERATOR_OPTIONS: &str = "generator_options"; - -pub const ANALYZER_EXE: &str = "analyzer_exe"; -pub const ANALYZER_OPTIONS: &str = "analyzer_options"; -pub const ANALYZER_ENV: &str = "analyzer_env"; -pub const ANALYSIS_DIR: &str = "analysis_dir"; -pub const ANALYSIS_INPUTS: &str = "analysis_inputs"; -pub const ANALYSIS_UNIQUE_INPUTS: &str = "analysis_unique_inputs"; -pub const PRESERVE_EXISTING_OUTPUTS: &str = "preserve_existing_outputs"; pub const CREATE_JOB_DIR: &str = "create_job_dir"; @@ -66,7 +45,6 @@ const WAIT_FOR_DIR_DELAY: Duration = Duration::from_secs(1); pub enum CmdType { Target, - Generator, // Supervisor, } @@ -90,7 +68,6 @@ pub fn get_cmd_exe(cmd_type: CmdType, args: &clap::ArgMatches) -> Result let name = match cmd_type { CmdType::Target => TARGET_EXE, // CmdType::Supervisor => SUPERVISOR_EXE, - CmdType::Generator => GENERATOR_EXE, }; args.get_one::(name) @@ -102,7 +79,6 @@ pub fn get_cmd_arg(cmd_type: CmdType, args: &clap::ArgMatches) -> Vec { let name = match cmd_type { CmdType::Target => TARGET_OPTIONS, // CmdType::Supervisor => SUPERVISOR_OPTIONS, - CmdType::Generator => GENERATOR_OPTIONS, }; args.get_many::(name) @@ -115,7 +91,6 @@ pub fn get_cmd_env(cmd_type: CmdType, args: &clap::ArgMatches) -> Result TARGET_ENV, // CmdType::Supervisor => SUPERVISOR_ENV, - CmdType::Generator => GENERATOR_ENV, }; get_hash_map(args, env_name) } @@ -265,6 +240,7 @@ pub async fn build_local_context( }, instance_telemetry_key: None, heartbeat_queue: None, + job_result_queue: None, microsoft_telemetry_key: None, logs: None, min_available_memory_mb: 0, diff --git a/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml b/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml index 7210893809..aba02c7991 100644 --- a/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml +++ b/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml @@ -5,28 +5,31 @@ # 2. Install llvm and export LLVM_SYMBOLIZER_PATH like we do in setup.sh +required_args: &required_args + target_exe: "REPLACE_ME" # The path to your target + inputs: &inputs "REPLACE_ME" # A folder containining your inputs + crashes: &crashes "REPLACE_ME" # The folder where you want the crashing inputs to be output + crashdumps: "REPLACE_ME" # The folder where you want the crash dumps to be output + coverage: "REPLACE_ME" # The folder where you want the code coverage to be output + regression_reports: "REPLACE_ME" # The folder where you want the regression reports to be output + target_args: &target_args + <<: *required_args target_env: {} - target_exe: "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\fuzz.exe" target_options: [] -inputs: &inputs "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\seeds" - tasks: - type: LibFuzzer <<: *target_args - inputs: *inputs - crashes: &crash "./crashes" readonly_inputs: [] check_fuzzer_help: true - - type: "Report" + - type: LibfuzzerRegression <<: *target_args - input_queue: *crash - crashes: *crash - reports: "./reports" - unique_reports: "./unique_reports" - no_repro: "./no_repro" + + - type: "LibfuzzerCrashReport" + <<: *target_args + input_queue: *crashes check_fuzzer_help: true - type: "Coverage" @@ -35,4 +38,11 @@ tasks: - "{input}" input_queue: *inputs readonly_inputs: [*inputs] - coverage: "./coverage" + + # The analysis task is optional in the libfuzzer_basic template + # - type: Analysis + # <<: *target_args + # analysis: "REPLACE_ME" # The folder where you want the analysis results to be output + # analyzer_exe: "REPLACE_ME" + # analyzer_options: [] + # analyzer_env: {} diff --git a/src/agent/onefuzz-task/src/local/generic_analysis.rs b/src/agent/onefuzz-task/src/local/generic_analysis.rs index 3d3e2fafc8..429e7b0e3b 100644 --- a/src/agent/onefuzz-task/src/local/generic_analysis.rs +++ b/src/agent/onefuzz-task/src/local/generic_analysis.rs @@ -3,139 +3,13 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_exe, get_hash_map, get_synced_dir, CmdType, - SyncCountDirMonitor, UiEvent, ANALYSIS_DIR, ANALYZER_ENV, ANALYZER_EXE, ANALYZER_OPTIONS, - CRASHES_DIR, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TOOLS_DIR, - UNIQUE_REPORTS_DIR, - }, - tasks::{ - analysis::generic::{run as run_analysis, Config}, - config::CommonConfig, - }, -}; +use crate::tasks::config::CommonConfig; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, Command}; -use flume::Sender; use schemars::JsonSchema; -use storage_queue::QueueClient; use super::template::{RunContext, Template}; -pub fn build_analysis_config( - args: &clap::ArgMatches, - input_queue: Option, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_options = get_cmd_arg(CmdType::Target, args); - - let analyzer_exe = args - .get_one::(ANALYZER_EXE) - .cloned() - .ok_or_else(|| format_err!("expected {ANALYZER_EXE}"))?; - - let analyzer_options = args - .get_many::(ANALYZER_OPTIONS) - .unwrap_or_default() - .map(|x| x.to_string()) - .collect(); - - let analyzer_env = get_hash_map(args, ANALYZER_ENV)?; - let analysis = get_synced_dir(ANALYSIS_DIR, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let tools = get_synced_dir(TOOLS_DIR, common.job_id, common.task_id, args)?; - let crashes = if input_queue.is_none() { - get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)? - } else { - None - }; - let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let config = Config { - analyzer_exe, - analyzer_options, - analyzer_env, - target_exe, - target_options, - input_queue, - crashes, - analysis, - tools: Some(tools), - reports, - unique_reports, - no_repro, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_analysis_config(args, None, context.common_config.clone(), event_sender)?; - run_analysis(config).await -} - -pub fn build_shared_args(required_task: bool) -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV) - .long(TARGET_ENV) - .requires(TARGET_EXE) - .num_args(0..), - Arg::new(TARGET_OPTIONS) - .long(TARGET_OPTIONS) - .default_value("{input}") - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CRASHES_DIR) - .long(CRASHES_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(ANALYZER_OPTIONS) - .long(ANALYZER_OPTIONS) - .requires(ANALYZER_EXE) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(ANALYZER_ENV) - .long(ANALYZER_ENV) - .requires(ANALYZER_EXE) - .num_args(0..), - Arg::new(TOOLS_DIR) - .long(TOOLS_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(ANALYZER_EXE) - .long(ANALYZER_EXE) - .requires(ANALYSIS_DIR) - .requires(CRASHES_DIR) - .required(required_task), - Arg::new(ANALYSIS_DIR) - .long(ANALYSIS_DIR) - .requires(ANALYZER_EXE) - .requires(CRASHES_DIR) - .required(required_task), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only generic analysis") - .args(&build_shared_args(true)) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct Analysis { analyzer_exe: String, @@ -146,7 +20,7 @@ pub struct Analysis { input_queue: Option, crashes: Option, analysis: PathBuf, - tools: PathBuf, + tools: Option, reports: Option, unique_reports: Option, no_repro: Option, @@ -175,9 +49,10 @@ impl Template for Analysis { .and_then(|path| context.to_monitored_sync_dir("crashes", path).ok()), analysis: context.to_monitored_sync_dir("analysis", self.analysis.clone())?, - tools: context - .to_monitored_sync_dir("tools", self.tools.clone()) - .ok(), + tools: self + .tools + .as_ref() + .and_then(|path| context.to_monitored_sync_dir("tools", path).ok()), reports: self .reports diff --git a/src/agent/onefuzz-task/src/local/generic_crash_report.rs b/src/agent/onefuzz-task/src/local/generic_crash_report.rs index 6b0e2fccad..347a8cac76 100644 --- a/src/agent/onefuzz-task/src/local/generic_crash_report.rs +++ b/src/agent/onefuzz-task/src/local/generic_crash_report.rs @@ -3,150 +3,14 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, - SyncCountDirMonitor, UiEvent, CHECK_ASAN_LOG, CHECK_RETRY_COUNT, CRASHES_DIR, - DISABLE_CHECK_DEBUGGER, DISABLE_CHECK_QUEUE, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, - TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, - }, - tasks::{ - config::CommonConfig, - report::generic::{Config, ReportTask}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use futures::future::OptionFuture; use schemars::JsonSchema; -use storage_queue::QueueClient; use super::template::{RunContext, Template}; -pub fn build_report_config( - args: &clap::ArgMatches, - input_queue: Option, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - - let crashes = Some(get_synced_dir( - CRASHES_DIR, - common.job_id, - common.task_id, - args, - )?) - .monitor_count(&event_sender)?; - let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let unique_reports = Some(get_synced_dir( - UNIQUE_REPORTS_DIR, - common.job_id, - common.task_id, - args, - )?) - .monitor_count(&event_sender)?; - - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default"); - - let check_queue = !args.get_flag(DISABLE_CHECK_QUEUE); - let check_asan_log = args.get_flag(CHECK_ASAN_LOG); - let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); - - let config = Config { - target_exe, - target_env, - target_options, - target_timeout, - check_asan_log, - check_debugger, - check_retry_count, - check_queue, - crashes, - minimized_stack_depth: None, - input_queue, - no_repro, - reports, - unique_reports, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_report_config(args, None, context.common_config.clone(), event_sender)?; - ReportTask::new(config).managed_run().await -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .default_value("{input}") - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CRASHES_DIR) - .long(CRASHES_DIR) - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(REPORTS_DIR) - .long(REPORTS_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(NO_REPRO_DIR) - .long(NO_REPRO_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(UNIQUE_REPORTS_DIR) - .long(UNIQUE_REPORTS_DIR) - .value_parser(value_parser!(PathBuf)) - .required(true), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)) - .default_value("30"), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - Arg::new(DISABLE_CHECK_QUEUE) - .action(ArgAction::SetTrue) - .long(DISABLE_CHECK_QUEUE), - Arg::new(CHECK_ASAN_LOG) - .action(ArgAction::SetTrue) - .long(CHECK_ASAN_LOG), - Arg::new(DISABLE_CHECK_DEBUGGER) - .action(ArgAction::SetTrue) - .long(DISABLE_CHECK_DEBUGGER), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only generic crash report") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct CrashReport { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/generic_generator.rs b/src/agent/onefuzz-task/src/local/generic_generator.rs index 823ba221d6..ae9f6a3cc6 100644 --- a/src/agent/onefuzz-task/src/local/generic_generator.rs +++ b/src/agent/onefuzz-task/src/local/generic_generator.rs @@ -3,154 +3,14 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, - get_synced_dirs, CmdType, SyncCountDirMonitor, UiEvent, CHECK_ASAN_LOG, CHECK_RETRY_COUNT, - CRASHES_DIR, DISABLE_CHECK_DEBUGGER, GENERATOR_ENV, GENERATOR_EXE, GENERATOR_OPTIONS, - READONLY_INPUTS, RENAME_OUTPUT, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, - TOOLS_DIR, - }, - tasks::{ - config::CommonConfig, - fuzz::generator::{Config, GeneratorTask}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; use super::template::{RunContext, Template}; -pub fn build_fuzz_config( - args: &clap::ArgMatches, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_options = get_cmd_arg(CmdType::Target, args); - let target_env = get_cmd_env(CmdType::Target, args)?; - - let generator_exe = get_cmd_exe(CmdType::Generator, args)?; - let generator_options = get_cmd_arg(CmdType::Generator, args); - let generator_env = get_cmd_env(CmdType::Generator, args)?; - let readonly_inputs = get_synced_dirs(READONLY_INPUTS, common.job_id, common.task_id, args)? - .into_iter() - .map(|sd| sd.monitor_count(&event_sender)) - .collect::>>()?; - - let rename_output = args.get_flag(RENAME_OUTPUT); - let check_asan_log = args.get_flag(CHECK_ASAN_LOG); - let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); - - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default"); - - let target_timeout = Some( - args.get_one::(TARGET_TIMEOUT) - .copied() - .expect("has a default"), - ); - - let tools = get_synced_dir(TOOLS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let ensemble_sync_delay = None; - - let config = Config { - generator_exe, - generator_env, - generator_options, - readonly_inputs, - crashes, - tools, - target_exe, - target_env, - target_options, - target_timeout, - check_asan_log, - check_debugger, - check_retry_count, - rename_output, - ensemble_sync_delay, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_fuzz_config(args, context.common_config.clone(), event_sender)?; - GeneratorTask::new(config).run().await -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .default_value("{input}") - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(GENERATOR_EXE) - .long(GENERATOR_EXE) - .default_value("radamsa") - .required(true), - Arg::new(GENERATOR_ENV).long(GENERATOR_ENV).num_args(0..), - Arg::new(GENERATOR_OPTIONS) - .long(GENERATOR_OPTIONS) - .value_delimiter(' ') - .default_value("-H sha256 -o {generated_inputs}/input-%h.%s -n 100 -r {input_corpus}") - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CRASHES_DIR) - .required(true) - .long(CRASHES_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(READONLY_INPUTS) - .required(true) - .num_args(1..) - .value_parser(value_parser!(PathBuf)) - .long(READONLY_INPUTS), - Arg::new(TOOLS_DIR) - .long(TOOLS_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - Arg::new(CHECK_ASAN_LOG) - .action(ArgAction::SetTrue) - .long(CHECK_ASAN_LOG), - Arg::new(RENAME_OUTPUT) - .action(ArgAction::SetTrue) - .long(RENAME_OUTPUT), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)) - .default_value("30"), - Arg::new(DISABLE_CHECK_DEBUGGER) - .action(ArgAction::SetTrue) - .long(DISABLE_CHECK_DEBUGGER), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only generator fuzzing task") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct Generator { generator_exe: String, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer.rs b/src/agent/onefuzz-task/src/local/libfuzzer.rs index 56dff7dbe3..433636be1c 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer.rs @@ -1,168 +1,19 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#[cfg(any(target_os = "linux", target_os = "windows"))] -use crate::{ - local::{common::COVERAGE_DIR, coverage, coverage::build_shared_args as build_coverage_args}, - tasks::coverage::generic::CoverageTask, -}; -use crate::{ - local::{ - common::{ - build_local_context, wait_for_dir, DirectoryMonitorQueue, UiEvent, ANALYZER_EXE, - REGRESSION_REPORTS_DIR, UNIQUE_REPORTS_DIR, - }, - generic_analysis::{build_analysis_config, build_shared_args as build_analysis_args}, - libfuzzer_crash_report::{build_report_config, build_shared_args as build_crash_args}, - libfuzzer_fuzz::{build_fuzz_config, build_shared_args as build_fuzz_args}, - libfuzzer_regression::{ - build_regression_config, build_shared_args as build_regression_args, - }, - }, - tasks::{ - analysis::generic::run as run_analysis, - config::CommonConfig, - fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask}, - regression::libfuzzer::LibFuzzerRegressionTask, - report::libfuzzer_report::ReportTask, - utils::default_bool_true, - }, +use crate::tasks::{ + config::CommonConfig, + fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask}, + utils::default_bool_true, }; use anyhow::Result; use async_trait::async_trait; -use clap::Command; -use flume::Sender; -use onefuzz::{syncdir::SyncedDir, utils::try_wait_all_join_handles}; +use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; -use std::{ - collections::{HashMap, HashSet}, - path::PathBuf, -}; -use tokio::task::spawn; -use uuid::Uuid; +use std::{collections::HashMap, path::PathBuf}; use super::template::{RunContext, Template}; -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?; - let crash_dir = fuzz_config - .crashes - .remote_url()? - .as_file_path() - .expect("invalid crash dir remote location"); - - let fuzzer = LibFuzzerFuzzTask::new(fuzz_config)?; - let mut task_handles = vec![]; - - let fuzz_task = spawn(async move { fuzzer.run().await }); - - wait_for_dir(&crash_dir).await?; - - task_handles.push(fuzz_task); - - if args.contains_id(UNIQUE_REPORTS_DIR) { - let crash_report_input_monitor = - DirectoryMonitorQueue::start_monitoring(crash_dir.clone()).await?; - - let report_config = build_report_config( - args, - Some(crash_report_input_monitor.queue_client), - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender.clone(), - )?; - - let mut report = ReportTask::new(report_config); - let report_task = spawn(async move { report.managed_run().await }); - - task_handles.push(report_task); - task_handles.push(crash_report_input_monitor.handle); - } - - #[cfg(any(target_os = "linux", target_os = "windows"))] - if args.contains_id(COVERAGE_DIR) { - let coverage_input_monitor = - DirectoryMonitorQueue::start_monitoring(crash_dir.clone()).await?; - let coverage_config = coverage::build_coverage_config( - args, - true, - Some(coverage_input_monitor.queue_client), - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender.clone(), - )?; - - let mut coverage = CoverageTask::new(coverage_config); - let coverage_task = spawn(async move { coverage.run().await }); - - task_handles.push(coverage_task); - task_handles.push(coverage_input_monitor.handle); - } - - if args.contains_id(ANALYZER_EXE) { - let analysis_input_monitor = DirectoryMonitorQueue::start_monitoring(crash_dir).await?; - let analysis_config = build_analysis_config( - args, - Some(analysis_input_monitor.queue_client), - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender.clone(), - )?; - let analysis_task = spawn(async move { run_analysis(analysis_config).await }); - - task_handles.push(analysis_task); - task_handles.push(analysis_input_monitor.handle); - } - - if args.contains_id(REGRESSION_REPORTS_DIR) { - let regression_config = build_regression_config( - args, - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender, - )?; - let regression = LibFuzzerRegressionTask::new(regression_config); - let regression_task = spawn(async move { regression.run().await }); - task_handles.push(regression_task); - } - - try_wait_all_join_handles(task_handles).await?; - - Ok(()) -} - -pub fn args(name: &'static str) -> Command { - let mut app = Command::new(name).about("run a local libfuzzer & crash reporting task"); - - let mut used = HashSet::new(); - - for args in &[ - build_fuzz_args(), - build_crash_args(), - build_analysis_args(false), - #[cfg(any(target_os = "linux", target_os = "windows"))] - build_coverage_args(true), - build_regression_args(false), - ] { - for arg in args { - if used.insert(arg.get_id()) { - app = app.arg(arg); - } - } - } - - app -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibFuzzer { inputs: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs b/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs index c1ab283575..04ba4f9225 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs @@ -3,139 +3,13 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, - SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CHECK_RETRY_COUNT, CRASHES_DIR, - DISABLE_CHECK_QUEUE, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, - TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, - }, - tasks::{ - config::CommonConfig, - report::libfuzzer_report::{Config, ReportTask}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use futures::future::OptionFuture; use schemars::JsonSchema; -use storage_queue::QueueClient; use super::template::{RunContext, Template}; - -pub fn build_report_config( - args: &clap::ArgMatches, - input_queue: Option, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - - let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default"); - - let check_queue = !args.get_flag(DISABLE_CHECK_QUEUE); - - let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); - - let crashes = if input_queue.is_none() { crashes } else { None }; - - let config = Config { - target_exe, - target_env, - target_options, - target_timeout, - check_retry_count, - check_fuzzer_help, - minimized_stack_depth: None, - input_queue, - check_queue, - crashes, - reports, - no_repro, - unique_reports, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_report_config(args, None, context.common_config.clone(), event_sender)?; - ReportTask::new(config).managed_run().await -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CRASHES_DIR) - .long(CRASHES_DIR) - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(REPORTS_DIR) - .long(REPORTS_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(NO_REPRO_DIR) - .long(NO_REPRO_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(UNIQUE_REPORTS_DIR) - .long(UNIQUE_REPORTS_DIR) - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)) - .long(TARGET_TIMEOUT), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - Arg::new(DISABLE_CHECK_QUEUE) - .action(ArgAction::SetTrue) - .long(DISABLE_CHECK_QUEUE), - Arg::new(CHECK_FUZZER_HELP) - .action(ArgAction::SetTrue) - .long(CHECK_FUZZER_HELP), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only libfuzzer crash report task") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerCrashReport { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs b/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs index 69c9df820b..4b3e4ce58f 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs @@ -3,97 +3,15 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, - get_synced_dirs, CmdType, SyncCountDirMonitor, UiEvent, ANALYSIS_INPUTS, - ANALYSIS_UNIQUE_INPUTS, CHECK_FUZZER_HELP, INPUTS_DIR, PRESERVE_EXISTING_OUTPUTS, - TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, - }, - tasks::{ - config::CommonConfig, - merge::libfuzzer_merge::{spawn, Config}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use futures::future::OptionFuture; use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; -use storage_queue::QueueClient; use super::template::{RunContext, Template}; -pub fn build_merge_config( - args: &clap::ArgMatches, - input_queue: Option, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); - let inputs = get_synced_dirs(ANALYSIS_INPUTS, common.job_id, common.task_id, args)? - .into_iter() - .map(|sd| sd.monitor_count(&event_sender)) - .collect::>>()?; - let unique_inputs = - get_synced_dir(ANALYSIS_UNIQUE_INPUTS, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let preserve_existing_outputs = args - .get_one::(PRESERVE_EXISTING_OUTPUTS) - .copied() - .unwrap_or_default(); - - let config = Config { - target_exe, - target_env, - target_options, - input_queue, - inputs, - unique_inputs, - preserve_existing_outputs, - check_fuzzer_help, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_merge_config(args, None, context.common_config.clone(), event_sender)?; - spawn(config).await -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CHECK_FUZZER_HELP) - .action(ArgAction::SetTrue) - .long(CHECK_FUZZER_HELP), - Arg::new(INPUTS_DIR) - .long(INPUTS_DIR) - .value_parser(value_parser!(PathBuf)) - .num_args(0..), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only libfuzzer crash report task") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerMerge { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs b/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs index 501d2385e2..3fbb9f0bd6 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs @@ -3,145 +3,13 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, - SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CHECK_RETRY_COUNT, COVERAGE_DIR, - CRASHES_DIR, NO_REPRO_DIR, REGRESSION_REPORTS_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, - TARGET_OPTIONS, TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, - }, - tasks::{ - config::CommonConfig, - regression::libfuzzer::{Config, LibFuzzerRegressionTask}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use schemars::JsonSchema; use super::template::{RunContext, Template}; -const REPORT_NAMES: &str = "report_names"; - -pub fn build_regression_config( - args: &clap::ArgMatches, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let regression_reports = - get_synced_dir(REGRESSION_REPORTS_DIR, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default value"); - - let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let report_list: Option> = args - .get_many::(REPORT_NAMES) - .map(|x| x.cloned().collect()); - - let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); - - let config = Config { - target_exe, - target_env, - target_options, - target_timeout, - check_fuzzer_help, - check_retry_count, - crashes, - regression_reports, - reports, - no_repro, - unique_reports, - readonly_inputs: None, - report_list, - minimized_stack_depth: None, - common, - }; - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_regression_config(args, context.common_config.clone(), event_sender)?; - LibFuzzerRegressionTask::new(config).run().await -} - -pub fn build_shared_args(local_job: bool) -> Vec { - let mut args = vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(COVERAGE_DIR) - .required(!local_job) - .long(COVERAGE_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(CHECK_FUZZER_HELP) - .action(ArgAction::SetTrue) - .long(CHECK_FUZZER_HELP), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)), - Arg::new(CRASHES_DIR) - .long(CRASHES_DIR) - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(REGRESSION_REPORTS_DIR) - .long(REGRESSION_REPORTS_DIR) - .required(local_job) - .value_parser(value_parser!(PathBuf)), - Arg::new(REPORTS_DIR) - .long(REPORTS_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(NO_REPRO_DIR) - .long(NO_REPRO_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(UNIQUE_REPORTS_DIR) - .long(UNIQUE_REPORTS_DIR) - .value_parser(value_parser!(PathBuf)) - .required(true), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - ]; - if local_job { - args.push(Arg::new(REPORT_NAMES).long(REPORT_NAMES).num_args(0..)) - } - args -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only libfuzzer regression task") - .args(&build_shared_args(true)) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerRegression { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs b/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs index 9c6f16094e..5bef2347f7 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs @@ -1,97 +1,14 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, CmdType, UiEvent, CHECK_RETRY_COUNT, - TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, - }, - tasks::report::libfuzzer_report::{test_input, TestInputArgs}, -}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, Command}; -use flume::Sender; use onefuzz::machine_id::MachineIdentity; use schemars::JsonSchema; use std::{collections::HashMap, path::PathBuf}; use super::template::{RunContext, Template}; -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender).await?; - - let target_exe = args - .get_one::(TARGET_EXE) - .expect("marked as required"); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - let input = args - .get_one::("input") - .expect("marked as required"); - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default value"); - - let extra_setup_dir = context.common_config.extra_setup_dir.as_deref(); - let extra_output_dir = context - .common_config - .extra_output - .as_ref() - .map(|x| x.local_path.as_path()); - - let config = TestInputArgs { - target_exe: target_exe.as_path(), - target_env: &target_env, - target_options: &target_options, - input_url: None, - input: input.as_path(), - job_id: context.common_config.job_id, - task_id: context.common_config.task_id, - target_timeout, - check_retry_count, - setup_dir: &context.common_config.setup_dir, - extra_setup_dir, - extra_output_dir, - minimized_stack_depth: None, - machine_identity: context.common_config.machine_identity, - }; - - let result = test_input(config).await?; - println!("{}", serde_json::to_string_pretty(&result)?); - Ok(()) -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).required(true), - Arg::new("input") - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .default_value("{input}") - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("test a libfuzzer application with a specific input") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerTestInput { input: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/mod.rs b/src/agent/onefuzz-task/src/local/mod.rs index 03d394bcdb..385ff8ffcd 100644 --- a/src/agent/onefuzz-task/src/local/mod.rs +++ b/src/agent/onefuzz-task/src/local/mod.rs @@ -14,7 +14,6 @@ pub mod libfuzzer_fuzz; pub mod libfuzzer_merge; pub mod libfuzzer_regression; pub mod libfuzzer_test_input; -pub mod radamsa; pub mod template; pub mod test_input; pub mod tui; diff --git a/src/agent/onefuzz-task/src/local/radamsa.rs b/src/agent/onefuzz-task/src/local/radamsa.rs deleted file mode 100644 index 4d84de027a..0000000000 --- a/src/agent/onefuzz-task/src/local/radamsa.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -use crate::{ - local::{ - common::{build_local_context, DirectoryMonitorQueue, UiEvent}, - generic_crash_report::{build_report_config, build_shared_args as build_crash_args}, - generic_generator::{build_fuzz_config, build_shared_args as build_fuzz_args}, - }, - tasks::{config::CommonConfig, fuzz::generator::GeneratorTask, report::generic::ReportTask}, -}; -use anyhow::{Context, Result}; -use clap::Command; -use flume::Sender; -use onefuzz::utils::try_wait_all_join_handles; -use std::collections::HashSet; -use tokio::task::spawn; -use uuid::Uuid; - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?; - let crash_dir = fuzz_config - .crashes - .remote_url()? - .as_file_path() - .ok_or_else(|| format_err!("invalid crash directory"))?; - - tokio::fs::create_dir_all(&crash_dir) - .await - .with_context(|| { - format!( - "unable to create crashes directory: {}", - crash_dir.display() - ) - })?; - - let fuzzer = GeneratorTask::new(fuzz_config); - let fuzz_task = spawn(async move { fuzzer.run().await }); - - let crash_report_input_monitor = DirectoryMonitorQueue::start_monitoring(crash_dir) - .await - .context("directory monitor failed")?; - let report_config = build_report_config( - args, - Some(crash_report_input_monitor.queue_client), - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender, - )?; - let report_task = spawn(async move { ReportTask::new(report_config).managed_run().await }); - - try_wait_all_join_handles(vec![ - fuzz_task, - report_task, - crash_report_input_monitor.handle, - ]) - .await?; - - Ok(()) -} - -pub fn args(name: &'static str) -> Command { - let mut app = Command::new(name).about("run a local generator & crash reporting job"); - - let mut used = HashSet::new(); - for args in &[build_fuzz_args(), build_crash_args()] { - for arg in args { - if used.insert(arg.get_id()) { - app = app.arg(arg); - } - } - } - - app -} diff --git a/src/agent/onefuzz-task/src/local/schema.json b/src/agent/onefuzz-task/src/local/schema.json index 0a1f128e67..e5b00f6e17 100644 --- a/src/agent/onefuzz-task/src/local/schema.json +++ b/src/agent/onefuzz-task/src/local/schema.json @@ -126,7 +126,6 @@ "analyzer_options", "target_exe", "target_options", - "tools", "type" ], "properties": { @@ -182,7 +181,10 @@ } }, "tools": { - "type": "string" + "type": [ + "string", + "null" + ] }, "type": { "type": "string", @@ -893,4 +895,4 @@ ] } } -} +} \ No newline at end of file diff --git a/src/agent/onefuzz-task/src/local/template.rs b/src/agent/onefuzz-task/src/local/template.rs index b2e0c425ff..73ae6e5e48 100644 --- a/src/agent/onefuzz-task/src/local/template.rs +++ b/src/agent/onefuzz-task/src/local/template.rs @@ -196,6 +196,7 @@ pub async fn launch( job_id: Uuid::new_v4(), instance_id: Uuid::new_v4(), heartbeat_queue: None, + job_result_queue: None, instance_telemetry_key: None, microsoft_telemetry_key: None, logs: None, @@ -241,12 +242,10 @@ mod test { .expect("Couldn't find checked-in schema.json") .replace("\r\n", "\n"); - println!("{}", schema_str); - - assert_eq!( - schema_str.replace('\n', ""), - checked_in_schema.replace('\n', ""), - "The checked-in local fuzzing schema did not match the generated schema." - ); + if schema_str.replace('\n', "") != checked_in_schema.replace('\n', "") { + std::fs::write("src/local/new.schema.json", schema_str) + .expect("The schemas did not match but failed to write new schema to file."); + panic!("The checked-in local fuzzing schema did not match the generated schema. The generated schema can be found at src/local/new.schema.json"); + } } } diff --git a/src/agent/onefuzz-task/src/local/test_input.rs b/src/agent/onefuzz-task/src/local/test_input.rs index 4077bd08f8..b8027a7f41 100644 --- a/src/agent/onefuzz-task/src/local/test_input.rs +++ b/src/agent/onefuzz-task/src/local/test_input.rs @@ -1,18 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, CmdType, UiEvent, CHECK_ASAN_LOG, - CHECK_RETRY_COUNT, DISABLE_CHECK_DEBUGGER, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, - TARGET_TIMEOUT, - }, - tasks::report::generic::{test_input, TestInputArgs}, -}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use onefuzz::machine_id::MachineIdentity; use schemars::JsonSchema; use std::{collections::HashMap, path::PathBuf}; @@ -20,82 +10,6 @@ use uuid::Uuid; use super::template::{RunContext, Template}; -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, false, event_sender).await?; - - let target_exe = args - .get_one::(TARGET_EXE) - .expect("is marked required"); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - let input = args - .get_one::("input") - .expect("is marked required"); - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has default value"); - let check_asan_log = args.get_flag(CHECK_ASAN_LOG); - let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); - - let config = TestInputArgs { - target_exe: target_exe.as_path(), - target_env: &target_env, - target_options: &target_options, - input_url: None, - input: input.as_path(), - job_id: context.common_config.job_id, - task_id: context.common_config.task_id, - target_timeout, - check_retry_count, - setup_dir: &context.common_config.setup_dir, - extra_setup_dir: context.common_config.extra_setup_dir.as_deref(), - minimized_stack_depth: None, - check_asan_log, - check_debugger, - machine_identity: context.common_config.machine_identity.clone(), - }; - - let result = test_input(config).await?; - println!("{}", serde_json::to_string_pretty(&result)?); - Ok(()) -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).required(true), - Arg::new("input") - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .default_value("{input}") - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - Arg::new(CHECK_ASAN_LOG) - .action(ArgAction::SetTrue) - .long(CHECK_ASAN_LOG), - Arg::new(DISABLE_CHECK_DEBUGGER) - .action(ArgAction::SetTrue) - .long("disable_check_debugger"), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("test an application with a specific input") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct TestInput { input: PathBuf, diff --git a/src/agent/onefuzz-task/src/tasks/analysis/generic.rs b/src/agent/onefuzz-task/src/tasks/analysis/generic.rs index 3ba068a614..05c6c3d169 100644 --- a/src/agent/onefuzz-task/src/tasks/analysis/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/analysis/generic.rs @@ -65,6 +65,8 @@ pub async fn run(config: Config) -> Result<()> { tools.init_pull().await?; } + let job_result_client = config.common.init_job_result().await?; + // the tempdir is always created, however, the reports_path and // reports_monitor_future are only created if we have one of the three // report SyncedDir. The idea is that the option for where to write reports @@ -88,6 +90,7 @@ pub async fn run(config: Config) -> Result<()> { &config.unique_reports, &config.reports, &config.no_repro, + &job_result_client, ); ( Some(reports_dir.path().to_path_buf()), @@ -171,7 +174,7 @@ async fn poll_inputs( } message.delete().await?; } else { - warn!("no new candidate inputs found, sleeping"); + debug!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; } } diff --git a/src/agent/onefuzz-task/src/tasks/config.rs b/src/agent/onefuzz-task/src/tasks/config.rs index 0848379d73..e29e0fd60d 100644 --- a/src/agent/onefuzz-task/src/tasks/config.rs +++ b/src/agent/onefuzz-task/src/tasks/config.rs @@ -14,6 +14,7 @@ use onefuzz::{ machine_id::MachineIdentity, syncdir::{SyncOperation, SyncedDir}, }; +use onefuzz_result::job_result::{init_job_result, TaskJobResultClient}; use onefuzz_telemetry::{ self as telemetry, Event::task_start, EventData, InstanceTelemetryKey, MicrosoftTelemetryKey, Role, @@ -50,6 +51,8 @@ pub struct CommonConfig { pub heartbeat_queue: Option, + pub job_result_queue: Option, + pub instance_telemetry_key: Option, pub microsoft_telemetry_key: Option, @@ -103,6 +106,23 @@ impl CommonConfig { None => Ok(None), } } + + pub async fn init_job_result(&self) -> Result> { + match &self.job_result_queue { + Some(url) => { + let result = init_job_result( + url.clone(), + self.task_id, + self.job_id, + self.machine_identity.machine_id, + self.machine_identity.machine_name.clone(), + ) + .await?; + Ok(Some(result)) + } + None => Ok(None), + } + } } #[derive(Debug, Deserialize)] diff --git a/src/agent/onefuzz-task/src/tasks/coverage/generic.rs b/src/agent/onefuzz-task/src/tasks/coverage/generic.rs index b112cfefbe..4fde9efb31 100644 --- a/src/agent/onefuzz-task/src/tasks/coverage/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/coverage/generic.rs @@ -26,6 +26,8 @@ use onefuzz_file_format::coverage::{ binary::{v1::BinaryCoverageJson as BinaryCoverageJsonV1, BinaryCoverageJson}, source::{v1::SourceCoverageJson as SourceCoverageJsonV1, SourceCoverageJson}, }; +use onefuzz_result::job_result::JobResultData; +use onefuzz_result::job_result::{JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{event, warn, Event::coverage_data, Event::coverage_failed, EventData}; use storage_queue::{Message, QueueClient}; use tokio::fs; @@ -114,7 +116,7 @@ impl CoverageTask { let allowlist = self.load_target_allowlist().await?; let heartbeat = self.config.common.init_heartbeat(None).await?; - + let job_result = self.config.common.init_job_result().await?; let mut seen_inputs = false; let target_exe_path = @@ -129,6 +131,7 @@ impl CoverageTask { coverage, allowlist, heartbeat, + job_result, target_exe.to_string(), )?; @@ -219,6 +222,7 @@ struct TaskContext<'a> { module_allowlist: AllowList, source_allowlist: Arc, heartbeat: Option, + job_result: Option, cache: Arc, } @@ -228,6 +232,7 @@ impl<'a> TaskContext<'a> { coverage: BinaryCoverage, allowlist: TargetAllowList, heartbeat: Option, + job_result: Option, target_exe: String, ) -> Result { let cache = DebugInfoCache::new(allowlist.source_files.clone()); @@ -247,6 +252,7 @@ impl<'a> TaskContext<'a> { module_allowlist: allowlist.modules, source_allowlist: Arc::new(allowlist.source_files), heartbeat, + job_result, cache: Arc::new(cache), }) } @@ -455,7 +461,16 @@ impl<'a> TaskContext<'a> { let s = CoverageStats::new(&coverage); event!(coverage_data; Covered = s.covered, Features = s.features, Rate = s.rate); metric!(coverage_data; 1.0; Covered = s.covered, Features = s.features, Rate = s.rate); - + self.job_result + .send_direct( + JobResultData::CoverageData, + HashMap::from([ + ("covered".to_string(), s.covered as f64), + ("features".to_string(), s.features as f64), + ("rate".to_string(), s.rate), + ]), + ) + .await; Ok(()) } diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs b/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs index d9116a1ed2..bd7511cac2 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs @@ -73,6 +73,7 @@ impl GeneratorTask { } let hb_client = self.config.common.init_heartbeat(None).await?; + let jr_client = self.config.common.init_job_result().await?; for dir in &self.config.readonly_inputs { dir.init_pull().await?; @@ -84,7 +85,10 @@ impl GeneratorTask { self.config.ensemble_sync_delay, ); - let crash_dir_monitor = self.config.crashes.monitor_results(new_result, false); + let crash_dir_monitor = self + .config + .crashes + .monitor_results(new_result, false, &jr_client); let fuzzer = self.fuzzing_loop(hb_client); @@ -298,6 +302,7 @@ mod tests { task_id: Default::default(), instance_id: Default::default(), heartbeat_queue: Default::default(), + job_result_queue: Default::default(), instance_telemetry_key: Default::default(), microsoft_telemetry_key: Default::default(), logs: Default::default(), diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs b/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs index 4f8c67ae8e..bfd9f3f5cc 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs @@ -1,7 +1,11 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::tasks::{config::CommonConfig, heartbeat::HeartbeatSender, utils::default_bool_true}; +use crate::tasks::{ + config::CommonConfig, + heartbeat::{HeartbeatSender, TaskHeartbeatClient}, + utils::default_bool_true, +}; use anyhow::{Context, Result}; use arraydeque::{ArrayDeque, Wrapping}; use async_trait::async_trait; @@ -12,6 +16,7 @@ use onefuzz::{ process::ExitStatus, syncdir::{continuous_sync, SyncOperation::Pull, SyncedDir}, }; +use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{ Event::{new_coverage, new_crashdump, new_result, runtime_stats}, EventData, @@ -126,21 +131,31 @@ where self.verify().await?; let hb_client = self.config.common.init_heartbeat(None).await?; + let jr_client = self.config.common.init_job_result().await?; // To be scheduled. let resync = self.continuous_sync_inputs(); - let new_inputs = self.config.inputs.monitor_results(new_coverage, true); - let new_crashes = self.config.crashes.monitor_results(new_result, true); + + let new_inputs = self + .config + .inputs + .monitor_results(new_coverage, true, &jr_client); + let new_crashes = self + .config + .crashes + .monitor_results(new_result, true, &jr_client); let new_crashdumps = async { if let Some(crashdumps) = &self.config.crashdumps { - crashdumps.monitor_results(new_crashdump, true).await + crashdumps + .monitor_results(new_crashdump, true, &jr_client) + .await } else { Ok(()) } }; let (stats_sender, stats_receiver) = mpsc::unbounded_channel(); - let report_stats = report_runtime_stats(stats_receiver, hb_client); + let report_stats = report_runtime_stats(stats_receiver, &hb_client, &jr_client); let fuzzers = self.run_fuzzers(Some(&stats_sender)); futures::try_join!( resync, @@ -183,7 +198,7 @@ where .inputs .local_path .parent() - .ok_or_else(|| anyhow!("Invalid input path"))?; + .ok_or_else(|| anyhow!("invalid input path"))?; let temp_path = task_dir.join(".temp"); tokio::fs::create_dir_all(&temp_path).await?; let temp_dir = tempdir_in(temp_path)?; @@ -501,7 +516,7 @@ impl TotalStats { self.execs_sec = self.worker_stats.values().map(|x| x.execs_sec).sum(); } - fn report(&self) { + async fn report(&self, jr_client: &Option) { event!( runtime_stats; EventData::Count = self.count, @@ -513,6 +528,17 @@ impl TotalStats { EventData::Count = self.count, EventData::ExecsSecond = self.execs_sec ); + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::RuntimeStats, + HashMap::from([ + ("total_count".to_string(), self.count as f64), + ("execs_sec".to_string(), self.execs_sec), + ]), + ) + .await; + } } } @@ -542,7 +568,8 @@ impl Timer { // are approximating nearest-neighbor interpolation on the runtime stats time series. async fn report_runtime_stats( mut stats_channel: mpsc::UnboundedReceiver, - heartbeat_client: impl HeartbeatSender, + heartbeat_client: &Option, + jr_client: &Option, ) -> Result<()> { // Cache the last-reported stats for a given worker. // @@ -551,7 +578,7 @@ async fn report_runtime_stats( let mut total = TotalStats::default(); // report all zeros to start - total.report(); + total.report(jr_client).await; let timer = Timer::new(RUNTIME_STATS_PERIOD); @@ -560,10 +587,10 @@ async fn report_runtime_stats( Some(stats) = stats_channel.recv() => { heartbeat_client.alive(); total.update(stats); - total.report() + total.report(jr_client).await } _ = timer.wait() => { - total.report() + total.report(jr_client).await } } } diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs b/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs index de1e1106ba..3f00e20b8d 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs @@ -79,7 +79,10 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { remote_path: config.crashes.remote_path.clone(), }; crashes.init().await?; - let monitor_crashes = crashes.monitor_results(new_result, false); + + let jr_client = config.common.init_job_result().await?; + + let monitor_crashes = crashes.monitor_results(new_result, false, &jr_client); // setup crashdumps let (crashdump_dir, monitor_crashdumps) = { @@ -95,9 +98,12 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { }; let monitor_dir = crashdump_dir.clone(); + let monitor_jr_client = config.common.init_job_result().await?; let monitor_crashdumps = async move { if let Some(crashdumps) = monitor_dir { - crashdumps.monitor_results(new_crashdump, false).await + crashdumps + .monitor_results(new_crashdump, false, &monitor_jr_client) + .await } else { Ok(()) } @@ -129,11 +135,13 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { if let Some(no_repro) = &config.no_repro { no_repro.init().await?; } + let monitor_reports_future = monitor_reports( reports_dir.path(), &config.unique_reports, &config.reports, &config.no_repro, + &jr_client, ); let inputs = SyncedDir { @@ -156,7 +164,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { delay_with_jitter(delay).await; } } - let monitor_inputs = inputs.monitor_results(new_coverage, false); + let monitor_inputs = inputs.monitor_results(new_coverage, false, &jr_client); let inputs_sync_cancellation = CancellationToken::new(); // never actually cancelled let inputs_sync_task = inputs.continuous_sync(Pull, config.ensemble_sync_delay, &inputs_sync_cancellation); @@ -444,6 +452,7 @@ mod tests { task_id: Default::default(), instance_id: Default::default(), heartbeat_queue: Default::default(), + job_result_queue: Default::default(), instance_telemetry_key: Default::default(), microsoft_telemetry_key: Default::default(), logs: Default::default(), diff --git a/src/agent/onefuzz-task/src/tasks/heartbeat.rs b/src/agent/onefuzz-task/src/tasks/heartbeat.rs index 515fa39d0c..e13b661909 100644 --- a/src/agent/onefuzz-task/src/tasks/heartbeat.rs +++ b/src/agent/onefuzz-task/src/tasks/heartbeat.rs @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::onefuzz::heartbeat::HeartbeatClient; use anyhow::Result; +use onefuzz::heartbeat::HeartbeatClient; use reqwest::Url; use serde::{self, Deserialize, Serialize}; use std::time::Duration; diff --git a/src/agent/onefuzz-task/src/tasks/merge/generic.rs b/src/agent/onefuzz-task/src/tasks/merge/generic.rs index 4f2e8234a8..3b6a2094d8 100644 --- a/src/agent/onefuzz-task/src/tasks/merge/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/merge/generic.rs @@ -83,7 +83,7 @@ pub async fn spawn(config: &Config) -> Result<()> { } } } else { - warn!("no new candidate inputs found, sleeping"); + debug!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; }; } diff --git a/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs b/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs index 1c334b3f18..2d53bc8c07 100644 --- a/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs +++ b/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs @@ -120,7 +120,7 @@ async fn process_message(config: &Config, input_queue: QueueClient) -> Result<() } Ok(()) } else { - warn!("no new candidate inputs found, sleeping"); + debug!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; Ok(()) } diff --git a/src/agent/onefuzz-task/src/tasks/regression/common.rs b/src/agent/onefuzz-task/src/tasks/regression/common.rs index 60023cfa6e..b61a97df4c 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/common.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/common.rs @@ -2,12 +2,14 @@ // Licensed under the MIT License. use crate::tasks::{ + config::CommonConfig, heartbeat::{HeartbeatSender, TaskHeartbeatClient}, report::crash_report::{parse_report_file, CrashTestResult, RegressionReport}, }; use anyhow::{Context, Result}; use async_trait::async_trait; use onefuzz::syncdir::SyncedDir; +use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use std::path::PathBuf; @@ -24,7 +26,7 @@ pub trait RegressionHandler { /// Runs the regression task pub async fn run( - heartbeat_client: Option, + common_config: &CommonConfig, regression_reports: &SyncedDir, crashes: &SyncedDir, report_dirs: &[&SyncedDir], @@ -35,6 +37,9 @@ pub async fn run( info!("starting regression task"); regression_reports.init().await?; + let heartbeat_client = common_config.init_heartbeat(None).await?; + let job_result_client = common_config.init_job_result().await?; + handle_crash_reports( handler, crashes, @@ -42,6 +47,7 @@ pub async fn run( report_list, regression_reports, &heartbeat_client, + &job_result_client, ) .await .context("handling crash reports")?; @@ -52,6 +58,7 @@ pub async fn run( readonly_inputs, regression_reports, &heartbeat_client, + &job_result_client, ) .await .context("handling inputs")?; @@ -71,6 +78,7 @@ pub async fn handle_inputs( readonly_inputs: &SyncedDir, regression_reports: &SyncedDir, heartbeat_client: &Option, + job_result_client: &Option, ) -> Result<()> { readonly_inputs.init_pull().await?; let mut input_files = tokio::fs::read_dir(&readonly_inputs.local_path).await?; @@ -95,7 +103,7 @@ pub async fn handle_inputs( crash_test_result, original_crash_test_result: None, } - .save(None, regression_reports) + .save(None, regression_reports, job_result_client) .await? } @@ -109,6 +117,7 @@ pub async fn handle_crash_reports( report_list: &Option>, regression_reports: &SyncedDir, heartbeat_client: &Option, + job_result_client: &Option, ) -> Result<()> { // without crash report containers, skip this method if report_dirs.is_empty() { @@ -158,7 +167,7 @@ pub async fn handle_crash_reports( crash_test_result, original_crash_test_result: Some(original_crash_test_result), } - .save(Some(file_name), regression_reports) + .save(Some(file_name), regression_reports, job_result_client) .await? } } diff --git a/src/agent/onefuzz-task/src/tasks/regression/generic.rs b/src/agent/onefuzz-task/src/tasks/regression/generic.rs index 640e80db9a..8570208d59 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/generic.rs @@ -89,7 +89,6 @@ impl GenericRegressionTask { pub async fn run(&self) -> Result<()> { info!("Starting generic regression task"); - let heartbeat_client = self.config.common.init_heartbeat(None).await?; let mut report_dirs = vec![]; for dir in vec![ @@ -103,7 +102,7 @@ impl GenericRegressionTask { report_dirs.push(dir); } common::run( - heartbeat_client, + &self.config.common, &self.config.regression_reports, &self.config.crashes, &report_dirs, diff --git a/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs b/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs index 06dd7c00d9..e65f46bb64 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs @@ -103,9 +103,8 @@ impl LibFuzzerRegressionTask { report_dirs.push(dir); } - let heartbeat_client = self.config.common.init_heartbeat(None).await?; common::run( - heartbeat_client, + &self.config.common, &self.config.regression_reports, &self.config.crashes, &report_dirs, diff --git a/src/agent/onefuzz-task/src/tasks/report/crash_report.rs b/src/agent/onefuzz-task/src/tasks/report/crash_report.rs index 23171bc432..290b98ccde 100644 --- a/src/agent/onefuzz-task/src/tasks/report/crash_report.rs +++ b/src/agent/onefuzz-task/src/tasks/report/crash_report.rs @@ -3,6 +3,7 @@ use anyhow::{Context, Result}; use onefuzz::{blob::BlobUrl, monitor::DirectoryMonitor, syncdir::SyncedDir}; +use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{ Event::{ new_report, new_unable_to_reproduce, new_unique_report, regression_report, @@ -12,6 +13,7 @@ use onefuzz_telemetry::{ }; use serde::{Deserialize, Serialize}; use stacktrace_parser::CrashLog; +use std::collections::HashMap; use std::path::{Path, PathBuf}; use uuid::Uuid; @@ -111,6 +113,7 @@ impl RegressionReport { self, report_name: Option, regression_reports: &SyncedDir, + jr_client: &Option, ) -> Result<()> { let (event, name) = match &self.crash_test_result { CrashTestResult::CrashReport(report) => { @@ -126,6 +129,15 @@ impl RegressionReport { if upload_or_save_local(&self, &name, regression_reports).await? { event!(event; EventData::Path = name.clone()); metric!(event; 1.0; EventData::Path = name.clone()); + + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::NewRegressionReport, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } } Ok(()) } @@ -149,6 +161,7 @@ impl CrashTestResult { unique_reports: &Option, reports: &Option, no_repro: &Option, + jr_client: &Option, ) -> Result<()> { match self { Self::CrashReport(report) => { @@ -158,6 +171,15 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, unique_reports).await? { event!(new_unique_report; EventData::Path = report.unique_blob_name()); metric!(new_unique_report; 1.0; EventData::Path = report.unique_blob_name()); + + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::NewUniqueReport, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } } } @@ -166,6 +188,15 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, reports).await? { event!(new_report; EventData::Path = report.blob_name()); metric!(new_report; 1.0; EventData::Path = report.blob_name()); + + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::NewReport, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } } } } @@ -176,6 +207,15 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, no_repro).await? { event!(new_unable_to_reproduce; EventData::Path = report.blob_name()); metric!(new_unable_to_reproduce; 1.0; EventData::Path = report.blob_name()); + + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::NoReproCrashingInput, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } } } } @@ -324,6 +364,7 @@ pub async fn monitor_reports( unique_reports: &Option, reports: &Option, no_crash: &Option, + jr_client: &Option, ) -> Result<()> { if unique_reports.is_none() && reports.is_none() && no_crash.is_none() { debug!("no report directories configured"); @@ -334,7 +375,9 @@ pub async fn monitor_reports( while let Some(file) = monitor.next_file().await? { let result = parse_report_file(file).await?; - result.save(unique_reports, reports, no_crash).await?; + result + .save(unique_reports, reports, no_crash, jr_client) + .await?; } Ok(()) diff --git a/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs b/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs index 9b626a7d89..b8659845de 100644 --- a/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs @@ -8,25 +8,25 @@ use std::{ sync::Arc, }; +use crate::tasks::report::crash_report::*; +use crate::tasks::report::dotnet::common::collect_exception_info; +use crate::tasks::{ + config::CommonConfig, + generic::input_poller::*, + heartbeat::{HeartbeatSender, TaskHeartbeatClient}, + utils::{default_bool_true, try_resolve_setup_relative_path}, +}; use anyhow::{Context, Result}; use async_trait::async_trait; use onefuzz::expand::Expand; use onefuzz::fs::set_executable; use onefuzz::{blob::BlobUrl, sha256, syncdir::SyncedDir}; +use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use storage_queue::{Message, QueueClient}; use tokio::fs; -use crate::tasks::report::crash_report::*; -use crate::tasks::report::dotnet::common::collect_exception_info; -use crate::tasks::{ - config::CommonConfig, - generic::input_poller::*, - heartbeat::{HeartbeatSender, TaskHeartbeatClient}, - utils::{default_bool_true, try_resolve_setup_relative_path}, -}; - const DOTNET_DUMP_TOOL_NAME: &str = "dotnet-dump"; #[derive(Debug, Deserialize)] @@ -114,15 +114,18 @@ impl DotnetCrashReportTask { pub struct AsanProcessor { config: Arc, heartbeat_client: Option, + job_result_client: Option, } impl AsanProcessor { pub async fn new(config: Arc) -> Result { let heartbeat_client = config.common.init_heartbeat(None).await?; + let job_result_client = config.common.init_job_result().await?; Ok(Self { config, heartbeat_client, + job_result_client, }) } @@ -260,6 +263,7 @@ impl Processor for AsanProcessor { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, + &self.job_result_client, ) .await; diff --git a/src/agent/onefuzz-task/src/tasks/report/generic.rs b/src/agent/onefuzz-task/src/tasks/report/generic.rs index 9088f98acc..8ad259f0a5 100644 --- a/src/agent/onefuzz-task/src/tasks/report/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/report/generic.rs @@ -13,6 +13,7 @@ use async_trait::async_trait; use onefuzz::{ blob::BlobUrl, input_tester::Tester, machine_id::MachineIdentity, sha256, syncdir::SyncedDir, }; +use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use std::{ @@ -73,7 +74,9 @@ impl ReportTask { pub async fn managed_run(&mut self) -> Result<()> { info!("Starting generic crash report task"); let heartbeat_client = self.config.common.init_heartbeat(None).await?; - let mut processor = GenericReportProcessor::new(&self.config, heartbeat_client); + let job_result_client = self.config.common.init_job_result().await?; + let mut processor = + GenericReportProcessor::new(&self.config, heartbeat_client, job_result_client); #[allow(clippy::manual_flatten)] for entry in [ @@ -183,13 +186,19 @@ pub async fn test_input(args: TestInputArgs<'_>) -> Result { pub struct GenericReportProcessor<'a> { config: &'a Config, heartbeat_client: Option, + job_result_client: Option, } impl<'a> GenericReportProcessor<'a> { - pub fn new(config: &'a Config, heartbeat_client: Option) -> Self { + pub fn new( + config: &'a Config, + heartbeat_client: Option, + job_result_client: Option, + ) -> Self { Self { config, heartbeat_client, + job_result_client, } } @@ -239,6 +248,7 @@ impl<'a> Processor for GenericReportProcessor<'a> { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, + &self.job_result_client, ) .await .context("saving report failed") diff --git a/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs b/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs index f18f638fa3..587ed2e3dc 100644 --- a/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs +++ b/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs @@ -13,6 +13,7 @@ use async_trait::async_trait; use onefuzz::{ blob::BlobUrl, libfuzzer::LibFuzzer, machine_id::MachineIdentity, sha256, syncdir::SyncedDir, }; +use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use std::{ @@ -196,15 +197,18 @@ pub async fn test_input(args: TestInputArgs<'_>) -> Result { pub struct AsanProcessor { config: Arc, heartbeat_client: Option, + job_result_client: Option, } impl AsanProcessor { pub async fn new(config: Arc) -> Result { let heartbeat_client = config.common.init_heartbeat(None).await?; + let job_result_client = config.common.init_job_result().await?; Ok(Self { config, heartbeat_client, + job_result_client, }) } @@ -257,6 +261,7 @@ impl Processor for AsanProcessor { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, + &self.job_result_client, ) .await } diff --git a/src/agent/onefuzz/Cargo.toml b/src/agent/onefuzz/Cargo.toml index c096c8ddfc..1f3c27985c 100644 --- a/src/agent/onefuzz/Cargo.toml +++ b/src/agent/onefuzz/Cargo.toml @@ -44,6 +44,7 @@ tempfile = "3.7.0" process_control = "4.0" reqwest-retry = { path = "../reqwest-retry" } onefuzz-telemetry = { path = "../onefuzz-telemetry" } +onefuzz-result = { path = "../onefuzz-result" } stacktrace-parser = { path = "../stacktrace-parser" } backoff = { version = "0.4", features = ["tokio"] } diff --git a/src/agent/onefuzz/src/blob/url.rs b/src/agent/onefuzz/src/blob/url.rs index f55ffbb23a..134b59dea0 100644 --- a/src/agent/onefuzz/src/blob/url.rs +++ b/src/agent/onefuzz/src/blob/url.rs @@ -192,10 +192,15 @@ impl BlobContainerUrl { } pub fn as_path(&self, prefix: impl AsRef) -> Result { - let dir = self - .account() - .ok_or_else(|| anyhow!("Invalid container Url"))?; - Ok(prefix.as_ref().join(dir)) + match (self.account(), self.container()) { + (Some(account), Some(container)) => { + let mut path = PathBuf::new(); + path.push(account); + path.push(container); + Ok(prefix.as_ref().join(path)) + } + _ => bail!("Invalid container Url"), + } } } @@ -526,4 +531,14 @@ mod tests { "id:000000,sig:06,src:000000,op:havoc,rep:128" ); } + + #[test] + fn test_as_path() -> Result<()> { + let root = PathBuf::from(r"/onefuzz"); + let url = BlobContainerUrl::parse("https://myaccount.blob.core.windows.net/mycontainer")?; + let path = url.as_path(root)?; + assert_eq!(PathBuf::from(r"/onefuzz/myaccount/mycontainer"), path); + + Ok(()) + } } diff --git a/src/agent/onefuzz/src/syncdir.rs b/src/agent/onefuzz/src/syncdir.rs index 0252099561..2e73b7a694 100644 --- a/src/agent/onefuzz/src/syncdir.rs +++ b/src/agent/onefuzz/src/syncdir.rs @@ -11,10 +11,12 @@ use crate::{ }; use anyhow::{Context, Result}; use dunce::canonicalize; +use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{Event, EventData}; use reqwest::{StatusCode, Url}; use reqwest_retry::{RetryCheck, SendRetry, DEFAULT_RETRY_PERIOD, MAX_RETRY_ATTEMPTS}; use serde::{Deserialize, Serialize}; +use std::collections::HashMap; use std::{env::current_dir, path::PathBuf, str, time::Duration}; use tokio::{fs, select}; use tokio_util::sync::CancellationToken; @@ -241,6 +243,7 @@ impl SyncedDir { url: BlobContainerUrl, event: Event, ignore_dotfiles: bool, + jr_client: &Option, ) -> Result<()> { debug!("monitoring {}", path.display()); @@ -265,9 +268,39 @@ impl SyncedDir { if ignore_dotfiles && file_name_event_str.starts_with('.') { continue; } - event!(event.clone(); EventData::Path = file_name_event_str); metric!(event.clone(); 1.0; EventData::Path = file_name_str_metric_str); + if let Some(jr_client) = jr_client { + match event { + Event::new_result => { + jr_client + .send_direct( + JobResultData::NewCrashingInput, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } + Event::new_coverage => { + jr_client + .send_direct( + JobResultData::CoverageData, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } + Event::new_crashdump => { + jr_client + .send_direct( + JobResultData::NewCrashDump, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } + _ => { + warn!("Unhandled job result!"); + } + } + } let destination = path.join(file_name); if let Err(err) = fs::copy(&item, &destination).await { let error_message = format!( @@ -305,6 +338,29 @@ impl SyncedDir { event!(event.clone(); EventData::Path = file_name_event_str); metric!(event.clone(); 1.0; EventData::Path = file_name_str_metric_str); + if let Some(jr_client) = jr_client { + match event { + Event::new_result => { + jr_client + .send_direct( + JobResultData::NewCrashingInput, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } + Event::new_coverage => { + jr_client + .send_direct( + JobResultData::CoverageData, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } + _ => { + warn!("Unhandled job result!"); + } + } + } if let Err(err) = uploader.upload(item.clone()).await { let error_message = format!( "Couldn't upload file. path:{} dir:{} err:{:?}", @@ -336,7 +392,12 @@ impl SyncedDir { /// The intent of this is to support use cases where we usually want a directory /// to be initialized, but a user-supplied binary, (such as AFL) logically owns /// a directory, and may reset it. - pub async fn monitor_results(&self, event: Event, ignore_dotfiles: bool) -> Result<()> { + pub async fn monitor_results( + &self, + event: Event, + ignore_dotfiles: bool, + job_result_client: &Option, + ) -> Result<()> { if let Some(url) = self.remote_path.clone() { loop { debug!("waiting to monitor {}", self.local_path.display()); @@ -355,6 +416,7 @@ impl SyncedDir { url.clone(), event.clone(), ignore_dotfiles, + job_result_client, ) .await?; } diff --git a/src/deployment/bicep-templates/storageAccounts.bicep b/src/deployment/bicep-templates/storageAccounts.bicep index 6a96cea6a0..27f2da21d8 100644 --- a/src/deployment/bicep-templates/storageAccounts.bicep +++ b/src/deployment/bicep-templates/storageAccounts.bicep @@ -33,7 +33,7 @@ var storageAccountFuncQueuesParams = [ 'update-queue' 'webhooks' 'signalr-events' - 'custom-metrics' + 'job-result' ] var fileChangesQueueIndex = 0 diff --git a/src/integration-tests/integration-test.py b/src/integration-tests/integration-test.py index 057404ceff..15ffcfb9fe 100755 --- a/src/integration-tests/integration-test.py +++ b/src/integration-tests/integration-test.py @@ -88,6 +88,7 @@ class Integration(BaseModel): target_method: Optional[str] setup_dir: Optional[str] target_env: Optional[Dict[str, str]] + pool: PoolName TARGETS: Dict[str, Integration] = { @@ -97,6 +98,7 @@ class Integration(BaseModel): target_exe="fuzz.exe", inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, + pool="linux", ), "linux-libfuzzer": Integration( template=TemplateType.libfuzzer, @@ -124,6 +126,7 @@ class Integration(BaseModel): "--only_asan_failures", "--write_test_file={extra_output_dir}/test.txt", ], + pool="linux", ), "linux-libfuzzer-with-options": Integration( template=TemplateType.libfuzzer, @@ -137,6 +140,7 @@ class Integration(BaseModel): }, reboot_after_setup=True, fuzzing_target_options=["-runs=10000000"], + pool="linux", ), "linux-libfuzzer-dlopen": Integration( template=TemplateType.libfuzzer, @@ -150,6 +154,7 @@ class Integration(BaseModel): }, reboot_after_setup=True, use_setup=True, + pool="linux", ), "linux-libfuzzer-linked-library": Integration( template=TemplateType.libfuzzer, @@ -163,6 +168,7 @@ class Integration(BaseModel): }, reboot_after_setup=True, use_setup=True, + pool="linux", ), "linux-libfuzzer-dotnet": Integration( template=TemplateType.libfuzzer_dotnet, @@ -180,6 +186,7 @@ class Integration(BaseModel): ContainerType.unique_reports: 1, }, test_repro=False, + pool="linux", ), "linux-libfuzzer-aarch64-crosscompile": Integration( template=TemplateType.libfuzzer_qemu_user, @@ -189,6 +196,7 @@ class Integration(BaseModel): use_setup=True, wait_for_files={ContainerType.inputs: 2, ContainerType.crashes: 1}, test_repro=False, + pool="linux", ), "linux-libfuzzer-rust": Integration( template=TemplateType.libfuzzer, @@ -196,6 +204,7 @@ class Integration(BaseModel): target_exe="fuzz_target_1", wait_for_files={ContainerType.unique_reports: 1, ContainerType.coverage: 1}, fuzzing_target_options=["--test:{extra_setup_dir}"], + pool="linux", ), "linux-trivial-crash": Integration( template=TemplateType.radamsa, @@ -204,6 +213,7 @@ class Integration(BaseModel): inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, inject_fake_regression=True, + pool="linux", ), "linux-trivial-crash-asan": Integration( template=TemplateType.radamsa, @@ -213,6 +223,28 @@ class Integration(BaseModel): wait_for_files={ContainerType.unique_reports: 1}, check_asan_log=True, disable_check_debugger=True, + pool="linux", + ), + # TODO: Don't install OMS extension on linux anymore + # TODO: Figure out why non mariner work is being scheduled to the mariner pool + "mariner-libfuzzer": Integration( + template=TemplateType.libfuzzer, + os=OS.linux, + target_exe="fuzz.exe", + inputs="seeds", + wait_for_files={ + ContainerType.unique_reports: 1, + ContainerType.coverage: 1, + ContainerType.inputs: 2, + ContainerType.extra_output: 1, + }, + reboot_after_setup=True, + inject_fake_regression=True, + fuzzing_target_options=[ + "--test:{extra_setup_dir}", + "--write_test_file={extra_output_dir}/test.txt", + ], + pool=PoolName("mariner") ), "windows-libfuzzer": Integration( template=TemplateType.libfuzzer, @@ -234,6 +266,7 @@ class Integration(BaseModel): "--only_asan_failures", "--write_test_file={extra_output_dir}/test.txt", ], + pool="windows", ), "windows-libfuzzer-linked-library": Integration( template=TemplateType.libfuzzer, @@ -246,6 +279,7 @@ class Integration(BaseModel): ContainerType.coverage: 1, }, use_setup=True, + pool="windows", ), "windows-libfuzzer-load-library": Integration( template=TemplateType.libfuzzer, @@ -258,6 +292,7 @@ class Integration(BaseModel): ContainerType.coverage: 1, }, use_setup=True, + pool="windows", ), "windows-libfuzzer-dotnet": Integration( template=TemplateType.libfuzzer_dotnet, @@ -275,6 +310,7 @@ class Integration(BaseModel): ContainerType.unique_reports: 1, }, test_repro=False, + pool="windows", ), "windows-trivial-crash": Integration( template=TemplateType.radamsa, @@ -283,6 +319,7 @@ class Integration(BaseModel): inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, inject_fake_regression=True, + pool="windows", ), } @@ -351,7 +388,7 @@ def try_info_get(data: Any) -> None: self.inject_log(self.start_log_marker) for entry in os_list: - name = PoolName(f"testpool-{entry.name}-{self.test_id}") + name = self.build_pool_name(entry.name) self.logger.info("creating pool: %s:%s", entry.name, name) self.of.pools.create(name, entry) self.logger.info("creating scaleset for pool: %s", name) @@ -359,6 +396,15 @@ def try_info_get(data: Any) -> None: name, pool_size, region=region, initial_size=pool_size ) + name = self.build_pool_name("mariner") + self.logger.info("creating pool: %s:%s", "mariner", name) + self.of.pools.create(name, OS.linux) + self.logger.info("creating scaleset for pool: %s", name) + self.of.scalesets.create( + name, pool_size, region=region, initial_size=pool_size, image="MicrosoftCBLMariner:cbl-mariner:cbl-mariner-2-gen2:latest" + ) + + class UnmanagedPool: def __init__( self, @@ -560,12 +606,9 @@ def launch( ) -> List[UUID]: """Launch all of the fuzzing templates""" - pools: Dict[OS, Pool] = {} + pool = None if unmanaged_pool is not None: - pools[unmanaged_pool.the_os] = self.of.pools.get(unmanaged_pool.pool_name) - else: - for pool in self.of.pools.list(): - pools[pool.os] = pool + pool = unmanaged_pool.pool_name job_ids = [] @@ -576,8 +619,8 @@ def launch( if config.os not in os_list: continue - if config.os not in pools.keys(): - raise Exception(f"No pool for target: {target} ,os: {config.os}") + if pool is None: + pool = self.build_pool_name(config.pool) self.logger.info("launching: %s", target) @@ -601,8 +644,9 @@ def launch( setup = Directory(os.path.join(setup, config.nested_setup_dir)) job: Optional[Job] = None + job = self.build_job( - duration, pools, target, config, setup, target_exe, inputs + duration, pool, target, config, setup, target_exe, inputs ) if config.inject_fake_regression and job is not None: @@ -618,7 +662,7 @@ def launch( def build_job( self, duration: int, - pools: Dict[OS, Pool], + pool: PoolName, target: str, config: Integration, setup: Optional[Directory], @@ -634,7 +678,7 @@ def build_job( self.project, target, BUILD, - pools[config.os].name, + pool, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -659,7 +703,7 @@ def build_job( self.project, target, BUILD, - pools[config.os].name, + pool, target_dll=File(config.target_exe), inputs=inputs, setup_dir=setup, @@ -675,7 +719,7 @@ def build_job( self.project, target, BUILD, - pools[config.os].name, + pool, inputs=inputs, target_exe=target_exe, duration=duration, @@ -688,7 +732,7 @@ def build_job( self.project, target, BUILD, - pool_name=pools[config.os].name, + pool_name=pool, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -703,7 +747,7 @@ def build_job( self.project, target, BUILD, - pool_name=pools[config.os].name, + pool_name=pool, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -1233,6 +1277,9 @@ def check_logs_for_errors(self) -> None: if seen_errors: raise Exception("logs included errors") + + def build_pool_name(self, os_type: str) -> PoolName: + return PoolName(f"testpool-{os_type}-{self.test_id}") class Run(Command): diff --git a/src/runtime-tools/linux/setup.sh b/src/runtime-tools/linux/setup.sh old mode 100755 new mode 100644 index f6859003b4..794e827f4d --- a/src/runtime-tools/linux/setup.sh +++ b/src/runtime-tools/linux/setup.sh @@ -18,6 +18,14 @@ export DOTNET_CLI_HOME="$DOTNET_ROOT" export ONEFUZZ_ROOT=/onefuzz export LLVM_SYMBOLIZER_PATH=/onefuzz/bin/llvm-symbolizer +# `logger` won't work on mariner unless we install this package first +if type yum > /dev/null 2> /dev/null; then + until yum install -y util-linux sudo; do + echo "yum failed. sleep 10s, then retrying" + sleep 10 + done +fi + logger "onefuzz: making directories" sudo mkdir -p /onefuzz/downloaded sudo chown -R $(whoami) /onefuzz @@ -134,31 +142,53 @@ if type apt > /dev/null 2> /dev/null; then sudo ln -f -s $(which llvm-symbolizer-12) $LLVM_SYMBOLIZER_PATH fi - # Install dotnet + # Needed to install dotnet until sudo apt install -y curl libicu-dev; do logger "apt failed, sleeping 10s then retrying" sleep 10 done +elif type yum > /dev/null 2> /dev/null; then + until yum install -y gdb gdb-gdbserver libunwind awk ca-certificates tar yum-utils shadow-utils cronie procps; do + echo "yum failed. sleep 10s, then retrying" + sleep 10 + done + + # Install updated Microsoft Open Management Infrastructure - github.com/microsoft/omi + yum-config-manager --add-repo=https://packages.microsoft.com/config/rhel/8/prod.repo 2>&1 | logger -s -i -t 'onefuzz-OMI-add-MS-repo' + yum install -y omi 2>&1 | logger -s -i -t 'onefuzz-OMI-install' - logger "downloading dotnet install" - curl --retry 10 -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh 2>&1 | logger -s -i -t 'onefuzz-curl-dotnet-install' - chmod +x dotnet-install.sh - for version in "${DOTNET_VERSIONS[@]}"; do - logger "running dotnet install $version" - /bin/bash ./dotnet-install.sh --channel "$version" --install-dir "$DOTNET_ROOT" 2>&1 | logger -s -i -t 'onefuzz-dotnet-setup' - done - rm dotnet-install.sh - - logger "install dotnet tools" - pushd "$DOTNET_ROOT" - ls -lah 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' - "$DOTNET_ROOT"/dotnet tool install dotnet-dump --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' - "$DOTNET_ROOT"/dotnet tool install dotnet-coverage --version 17.5 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' - "$DOTNET_ROOT"/dotnet tool install dotnet-sos --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' - popd + if ! [ -f ${LLVM_SYMBOLIZER_PATH} ]; then + until yum install -y llvm-12.0.1; do + echo "yum failed, sleeping 10s then retrying" + sleep 10 + done + + # If specifying symbolizer, exe name must be a "known symbolizer". + # Using `llvm-symbolizer` works for clang 8 .. 12. + sudo ln -f -s $(which llvm-symbolizer-12) $LLVM_SYMBOLIZER_PATH + fi fi +# Install dotnet +logger "downloading dotnet install" +curl --retry 10 -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh 2>&1 | logger -s -i -t 'onefuzz-curl-dotnet-install' +chmod +x dotnet-install.sh + +for version in "${DOTNET_VERSIONS[@]}"; do + logger "running dotnet install $version" + /bin/bash ./dotnet-install.sh --channel "$version" --install-dir "$DOTNET_ROOT" 2>&1 | logger -s -i -t 'onefuzz-dotnet-setup' +done +rm dotnet-install.sh + +logger "install dotnet tools" +pushd "$DOTNET_ROOT" +ls -lah 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' +"$DOTNET_ROOT"/dotnet tool install dotnet-dump --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' +"$DOTNET_ROOT"/dotnet tool install dotnet-coverage --version 17.5 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' +"$DOTNET_ROOT"/dotnet tool install dotnet-sos --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' +popd + if [ -v DOCKER_BUILD ]; then echo "building for docker" elif [ -d /etc/systemd/system ]; then