diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bfb80321..0cd3fdfe 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,9 +31,16 @@ jobs: - name: Install memtrack run: | cargo install --path crates/memtrack --locked - echo "CODSPEED_MEMTRACK_BINARY=$(which codspeed-memtrack)" >> $GITHUB_ENV - - run: cargo test --all --exclude memtrack + - run: cargo test --all --exclude memtrack --exclude exec-harness + + exec-harness-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: moonrepo/setup-rust@v1 + - name: Run tests + run: cargo test -p exec-harness bpf-tests: runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index f686a4b8..4f6354b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -429,6 +429,7 @@ dependencies = [ "clap", "console", "debugid", + "exec-harness", "futures", "gimli", "git2", @@ -450,6 +451,7 @@ dependencies = [ "open", "procfs", "rand", + "rayon", "regex", "reqwest", "reqwest-middleware", @@ -756,8 +758,13 @@ dependencies = [ "anyhow", "clap", "codspeed", + "env_logger", + "humantime", + "log", + "runner-shared", "serde", "serde_json", + "tempfile", ] [[package]] @@ -792,9 +799,9 @@ checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" [[package]] name = "flate2" -version = "1.1.7" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2152dbcb980c05735e2a651d96011320a949eb31a0c8b38b72645ce97dec676" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" dependencies = [ "crc32fast", "miniz_oxide", @@ -1080,6 +1087,12 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + [[package]] name = "hyper" version = "0.14.32" @@ -1533,9 +1546,8 @@ checksum = "bfae20f6b19ad527b550c223fddc3077a547fc70cda94b9b566575423fd303ee" [[package]] name = "linux-perf-data" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee44f48bb8d8ac1f6e55e294b211960d118c069863e7206913d138d2ddd6175" +version = "0.12.0" +source = "git+https://github.com/CodSpeedHQ/linux-perf-data.git?rev=e8c0ed51c2762b660708f8086e76a28449ef976c#e8c0ed51c2762b660708f8086e76a28449ef976c" dependencies = [ "byteorder", "linear-map", @@ -1544,6 +1556,7 @@ dependencies = [ "prost", "prost-derive", "thiserror 2.0.17", + "zstd-safe", ] [[package]] @@ -2185,21 +2198,21 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" dependencies = [ "bytes", ] [[package]] name = "prost-derive" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.13.0", "proc-macro2", "quote", "syn 2.0.111", diff --git a/Cargo.toml b/Cargo.toml index b83ea7b3..049b3781 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,6 +17,7 @@ itertools = { workspace = true } lazy_static = "1.4.0" log = { workspace = true } rand = "0.8.5" +rayon = "1.10" regex = "1.10.2" semver = "1.0" reqwest = { version = "0.11.22", features = [ @@ -39,7 +40,7 @@ async-compression = { version = "0.4.5", features = ["tokio", "gzip"] } simplelog = { version = "0.12.1", default-features = false, features = [ "termcolor", ] } -tempfile = "3.10.0" +tempfile = { workspace = true } git2 = "0.20.2" nestify = "0.3.3" gql_client = { git = "https://github.com/CodSpeedHQ/gql-client-rs" } @@ -51,13 +52,16 @@ async-trait = "0.1.82" libc = { workspace = true } bincode = "1.3.3" object = "0.36.7" -linux-perf-data = "0.11.0" +linux-perf-data = { git = "https://github.com/CodSpeedHQ/linux-perf-data.git", rev = "e8c0ed51c2762b660708f8086e76a28449ef976c", features = [ + "zstd", +] } # feat: support zstd compressed records debugid = "0.8.0" memmap2 = "0.9.5" nix = { version = "0.29.0", features = ["fs", "time", "user"] } futures = "0.3.31" runner-shared = { path = "crates/runner-shared" } memtrack = { path = "crates/memtrack", default-features = false } +exec-harness = { path = "crates/exec-harness" } ipc-channel = "0.18" shellexpand = { version = "3.1.1", features = ["tilde"] } addr2line = "0.25" @@ -84,7 +88,7 @@ members = ["crates/runner-shared", "crates/memtrack", "crates/exec-harness"] [workspace.dependencies] anyhow = "1.0" -clap = { version = "4.5", features = ["derive"] } +clap = { version = "4.5", features = ["derive", "env"] } libc = "0.2" log = "0.4.28" serde_json = "1.0" @@ -92,6 +96,7 @@ serde = { version = "1.0.228", features = ["derive"] } ipc-channel = "0.18" itertools = "0.14.0" env_logger = "0.11.8" +tempfile = "3.10.0" [workspace.metadata.release] sign-tag = true diff --git a/crates/exec-harness/Cargo.toml b/crates/exec-harness/Cargo.toml index d36ad4da..8087cc50 100644 --- a/crates/exec-harness/Cargo.toml +++ b/crates/exec-harness/Cargo.toml @@ -5,12 +5,27 @@ edition = "2024" repository = "https://github.com/CodSpeedHQ/runner" publish = false +[lib] +name = "exec_harness" +path = "src/lib.rs" + +[[bin]] +name = "exec-harness" +path = "src/main.rs" + [dependencies] anyhow = { workspace = true } codspeed = "4.1.0" +log = { workspace = true } +env_logger = { workspace = true } clap = { workspace = true } serde_json = { workspace = true } serde = { workspace = true } +humantime = "2.1" +runner-shared = { path = "../runner-shared" } + +[dev-dependencies] +tempfile = { workspace = true } [package.metadata.dist] targets = ["aarch64-unknown-linux-musl", "x86_64-unknown-linux-musl"] diff --git a/crates/exec-harness/src/lib.rs b/crates/exec-harness/src/lib.rs new file mode 100644 index 00000000..845065b7 --- /dev/null +++ b/crates/exec-harness/src/lib.rs @@ -0,0 +1,7 @@ +//! CodSpeed exec-harness library +//! +//! This library provides the core functionality for wrapping commands +//! with CodSpeed performance instrumentation. + +mod prelude; +pub mod walltime; diff --git a/crates/exec-harness/src/main.rs b/crates/exec-harness/src/main.rs index f637729b..05d31d80 100644 --- a/crates/exec-harness/src/main.rs +++ b/crates/exec-harness/src/main.rs @@ -1,13 +1,11 @@ +use crate::prelude::*; use crate::walltime::WalltimeResults; -use anyhow::Context; -use anyhow::Result; -use anyhow::bail; use clap::Parser; use codspeed::instrument_hooks::InstrumentHooks; -use codspeed::walltime_results::WalltimeBenchmark; +use runner_shared::walltime_results::WalltimeBenchmark; use std::path::PathBuf; -use std::process; +mod prelude; mod walltime; #[derive(Parser, Debug)] @@ -21,11 +19,19 @@ struct Args { #[arg(long)] name: Option, + #[command(flatten)] + execution_args: walltime::WalltimeExecutionArgs, + /// The command and arguments to execute command: Vec, } fn main() -> Result<()> { + env_logger::builder() + .parse_env(env_logger::Env::new().filter_or("CODSPEED_LOG", "info")) + .format_timestamp(None) + .init(); + let args = Args::parse(); if args.command.is_empty() { @@ -49,46 +55,18 @@ fn main() -> Result<()> { .set_integration("codspeed-rust", env!("CARGO_PKG_VERSION")) .unwrap(); - const NUM_ITERATIONS: usize = 1; - let mut times_per_round_ns = Vec::with_capacity(NUM_ITERATIONS); - - hooks.start_benchmark().unwrap(); - for _ in 0..NUM_ITERATIONS { - // Spawn the command - let mut child = process::Command::new(&args.command[0]) - .args(&args.command[1..]) - .spawn() - .context("Failed to spawn command")?; - - // Start monotonic timer for this iteration - let bench_start = InstrumentHooks::current_timestamp(); - - // Wait for the process to complete - let status = child.wait().context("Failed to wait for command")?; - - // Measure elapsed time - let bench_end = InstrumentHooks::current_timestamp(); - hooks.add_benchmark_timestamps(bench_start, bench_end); - - // Exit immediately if any iteration fails - if !status.success() { - bail!("Command failed with exit code: {:?}", status.code()); - } - - // Calculate and store the elapsed time in nanoseconds - let elapsed_ns = (bench_end - bench_start) as u128; - times_per_round_ns.push(elapsed_ns); - } + // Build execution options from CLI args + let execution_options: walltime::ExecutionOptions = args.execution_args.try_into()?; - hooks.stop_benchmark().unwrap(); - hooks.set_executed_benchmark(&bench_uri).unwrap(); + let times_per_round_ns = + walltime::perform(bench_uri.clone(), args.command, &execution_options)?; // Collect walltime results let max_time_ns = times_per_round_ns.iter().copied().max(); let walltime_benchmark = WalltimeBenchmark::from_runtime_data( bench_name.clone(), bench_uri.clone(), - vec![1; NUM_ITERATIONS], + vec![1; times_per_round_ns.len()], times_per_round_ns, max_time_ns, ); diff --git a/crates/exec-harness/src/prelude.rs b/crates/exec-harness/src/prelude.rs new file mode 100644 index 00000000..d72bacc4 --- /dev/null +++ b/crates/exec-harness/src/prelude.rs @@ -0,0 +1,6 @@ +pub use anyhow::Context; +pub use anyhow::Result; +pub use anyhow::bail; + +#[allow(unused_imports)] +pub use log::{debug, error, info, trace, warn}; diff --git a/crates/exec-harness/src/walltime.rs b/crates/exec-harness/src/walltime.rs deleted file mode 100644 index 6c373745..00000000 --- a/crates/exec-harness/src/walltime.rs +++ /dev/null @@ -1,63 +0,0 @@ -use anyhow::Context; -use anyhow::Result; -use codspeed::walltime_results::WalltimeBenchmark; -use serde::Deserialize; -use serde::Serialize; -use std::path::Path; - -#[derive(Debug, Serialize, Deserialize)] -struct Instrument { - #[serde(rename = "type")] - type_: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct Creator { - name: String, - version: String, - pid: u32, -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct WalltimeResults { - creator: Creator, - instrument: Instrument, - benchmarks: Vec, -} - -impl WalltimeResults { - pub fn from_benchmarks(benchmarks: Vec) -> Result { - Ok(WalltimeResults { - instrument: Instrument { - type_: "walltime".to_string(), - }, - creator: Creator { - // TODO: Stop impersonating codspeed-rust 🥸 - name: "codspeed-rust".to_string(), - version: env!("CARGO_PKG_VERSION").to_string(), - pid: std::process::id(), - }, - benchmarks, - }) - } - - pub fn save_to_file>(&self, profile_folder: P) -> Result<()> { - let results_path = { - let results_dir = profile_folder.as_ref().join("results"); - std::fs::create_dir_all(&results_dir).with_context(|| { - format!( - "Failed to create results directory: {}", - results_dir.display() - ) - })?; - - results_dir.join(format!("{}.json", self.creator.pid)) - }; - - let file = std::fs::File::create(&results_path) - .with_context(|| format!("Failed to create file: {}", results_path.display()))?; - serde_json::to_writer_pretty(file, &self) - .with_context(|| format!("Failed to write JSON to file: {}", results_path.display()))?; - Ok(()) - } -} diff --git a/crates/exec-harness/src/walltime/config.rs b/crates/exec-harness/src/walltime/config.rs new file mode 100644 index 00000000..fe12f057 --- /dev/null +++ b/crates/exec-harness/src/walltime/config.rs @@ -0,0 +1,496 @@ +use crate::prelude::*; +use std::time::Duration; + +const DEFAULT_WARMUP_TIME_NS: u64 = 1_000_000_000; // 1 second +/// Default maximum time if no constraints are provided +const DEFAULT_MAX_TIME_NS: u64 = 3_000_000_000; // 3 seconds + +/// Parse a duration string into nanoseconds +/// Supports humantime format: "1s", "500ms", "1.5s", "2m", "1h", etc. +/// Also supports pure numbers interpreted as seconds (e.g., "2" = 2s, "1.5" = 1.5s) +fn parse_duration_to_ns(s: &str) -> Result { + let s = s.trim(); + + // Try parsing as pure number first (interpret as seconds) + if let Ok(seconds) = s.parse::() { + return Ok((seconds * 1_000_000_000.0) as u64); + } + + // Try parsing with humantime + let duration: Duration = humantime::parse_duration(s) + .with_context(|| format!("Invalid duration format: '{s}'. Expected format like '1s', '500ms', '2m', '1h' or a number in seconds"))?; + + Ok(duration.as_nanos() as u64) +} + +/// Arguments for walltime execution configuration +/// +/// ⚠️ Make sure to update WalltimeExecutionArgs::to_cli_args() when fields change, else the runner +/// will not properly forward arguments +#[derive(Debug, Clone, Default, clap::Args)] +pub struct WalltimeExecutionArgs { + /// Duration of the warmup phase before measurement starts. + /// During warmup, the benchmark runs to stabilize performance (e.g., JIT compilation, cache warming). + /// Set to "0s" or "0" to disable warmup. + /// + /// Format: duration string (e.g., "1s", "500ms", "1.5s", "2m") or number in seconds (e.g., "1", "0.5") + /// Default: 1s + #[arg(long, value_name = "DURATION")] + pub warmup_time: Option, + + /// Maximum total time to spend running benchmarks (includes warmup). + /// Execution stops when this time is reached, even if min_rounds is not satisfied. + /// Cannot be used together with --min-rounds. + /// + /// Format: duration string (e.g., "3s", "15s", "1m") or number in seconds (e.g., "3", "15") + /// Default: 3s if no other constraints are set, 0 (unlimited) if one of min_time, max_rounds, + /// or min_rounds is set + #[arg(long, value_name = "DURATION", conflicts_with = "min_rounds")] + pub max_time: Option, + + /// Minimum total time to spend running benchmarks (excludes warmup). + /// Ensures benchmarks run for at least this duration for statistical accuracy. + /// Cannot be used together with --max-rounds. + /// + /// Format: duration string (e.g., "1s", "500ms") or number in seconds (e.g., "1", "0.5") + /// Default: undefined (no minimum) + #[arg(long, value_name = "DURATION", conflicts_with = "max_rounds")] + pub min_time: Option, + + /// Maximum number of benchmark iterations (rounds) to perform. + /// Execution stops after this many rounds, even if max_time is not reached. + /// Cannot be used together with --min-time. + /// + /// Format: positive integer + /// Default: undefined (determined by timing constraints) + #[arg(long, value_name = "COUNT", conflicts_with = "min_time")] + pub max_rounds: Option, + + /// Minimum number of benchmark iterations (rounds) to perform. + /// Ensures at least this many rounds are executed for statistical accuracy. + /// Cannot be used together with --max-time. + /// + /// Format: positive integer + /// Default: undefined (determined by timing constraints) + #[arg(long, value_name = "COUNT", conflicts_with = "max_time")] + pub min_rounds: Option, +} + +impl WalltimeExecutionArgs { + /// Convert WalltimeExecutionArgs back to CLI argument strings + /// + /// Unfortunately, clap does not provide a built-in way to serialize args back to CLI format, + // Clippy is very confused since this is used in the runner, but not in the binary of exec-harness + #[allow(dead_code)] + pub fn to_cli_args(&self) -> Vec { + let mut args = Vec::new(); + + if let Some(warmup) = &self.warmup_time { + args.push("--warmup-time".to_string()); + args.push(warmup.clone()); + } + + if let Some(max_time) = &self.max_time { + args.push("--max-time".to_string()); + args.push(max_time.clone()); + } + + if let Some(min_time) = &self.min_time { + args.push("--min-time".to_string()); + args.push(min_time.clone()); + } + + if let Some(max_rounds) = &self.max_rounds { + args.push("--max-rounds".to_string()); + args.push(max_rounds.to_string()); + } + + if let Some(min_rounds) = &self.min_rounds { + args.push("--min-rounds".to_string()); + args.push(min_rounds.to_string()); + } + + args + } +} + +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum RoundOrTime { + /// Explicit number of rounds + Rounds(u64), + /// Explicit time in nanoseconds + TimeNs(u64), + /// Both rounds and time specified. The most restrictive will be used, e.g the smaller number + /// of rounds or shorter time for min boundaries. + Both { rounds: u64, time_ns: u64 }, +} + +#[derive(Debug)] +pub struct ExecutionOptions { + pub(crate) warmup_time_ns: u64, + pub(crate) min: Option, + pub(crate) max: Option, +} + +impl TryFrom for ExecutionOptions { + type Error = anyhow::Error; + + /// Convert WalltimeExecutionArgs to ExecutionOptions with validation + /// + /// Check that the input is coherent with rules + /// - 1: Cannot mix time-based and round-based constraints for the opposite bounds + /// - 2: min_xxx cannot be greater than max_xxx + /// - 3: If warmup is disabled, must specify at least one rounds bound explicitly + fn try_from(args: WalltimeExecutionArgs) -> Result { + // Parse duration strings + let warmup_time_ns = args + .warmup_time + .as_ref() + .map(|s| parse_duration_to_ns(s)) + .transpose() + .context("Invalid warmup_time")?; + + let max_time_ns = args + .max_time + .as_ref() + .map(|s| parse_duration_to_ns(s)) + .transpose() + .context("Invalid max_time")? + .unwrap_or_else(|| { + // No max_time provided, use default only if no round-based constraints are set + if args.max_rounds.is_some() || args.min_rounds.is_some() || args.min_time.is_some() + { + 0 + } else { + DEFAULT_MAX_TIME_NS + } + }); + + let min_time_ns = args + .min_time + .as_ref() + .map(|s| parse_duration_to_ns(s)) + .transpose() + .context("Invalid min_time")?; + + // Rule 1: Cannot mix time-based and round-based constraints for the opposite bounds + if min_time_ns.is_some() && args.max_rounds.is_some() { + bail!("Cannot use both min_time and max_rounds. Choose one minimum constraint."); + } + + if max_time_ns > 0 && args.min_rounds.is_some() { + bail!("Cannot use both max_time and min_rounds. Choose one maximum constraint."); + } + + // Rule 2: min_xxx cannot be greater than max_xxx + if max_time_ns > 0 { + if let Some(min) = min_time_ns { + if min > max_time_ns { + bail!( + "min_time ({:.2}s) cannot be greater than max_time ({:.2}s)", + min as f64 / 1_000_000_000.0, + max_time_ns as f64 / 1_000_000_000.0 + ); + } + } + } + + if let (Some(min), Some(max)) = (args.min_rounds, args.max_rounds) { + if min > max { + bail!("min_rounds ({min}) cannot be greater than max_rounds ({max})"); + } + } + + // Rule 3: If warmup is disabled, must specify at least one rounds bound explicitly + if warmup_time_ns == Some(0) && args.min_rounds.is_none() && args.max_rounds.is_none() { + bail!( + "When warmup_time is 0, you must specify either min_rounds or max_rounds. \ + Without warmup, the number of iterations cannot be determined automatically." + ); + } + + // Build min/max using RoundOrTime enum + let min = match (args.min_rounds, min_time_ns) { + (Some(rounds), None) => Some(RoundOrTime::Rounds(rounds)), + (None, Some(time_ns)) => Some(RoundOrTime::TimeNs(time_ns)), + (Some(rounds), Some(time_ns)) => Some(RoundOrTime::Both { rounds, time_ns }), + (None, None) => None, + }; + + let max = match (args.max_rounds, max_time_ns) { + (Some(rounds), 0) => Some(RoundOrTime::Rounds(rounds)), + (Some(rounds), time_ns) => Some(RoundOrTime::Both { rounds, time_ns }), + (None, 0) => None, + (None, time_ns) => Some(RoundOrTime::TimeNs(time_ns)), + }; + + Ok(Self { + warmup_time_ns: warmup_time_ns.unwrap_or(DEFAULT_WARMUP_TIME_NS), + min, + max, + }) + } +} + +impl Default for ExecutionOptions { + fn default() -> Self { + ExecutionOptions { + warmup_time_ns: DEFAULT_WARMUP_TIME_NS, + min: None, + max: Some(RoundOrTime::TimeNs(DEFAULT_MAX_TIME_NS)), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_duration_to_ns_pure_numbers() { + // Pure numbers should be interpreted as seconds + assert_eq!(parse_duration_to_ns("1").unwrap(), 1_000_000_000); // 1 second + assert_eq!(parse_duration_to_ns("2").unwrap(), 2_000_000_000); // 2 seconds + assert_eq!(parse_duration_to_ns("0.5").unwrap(), 500_000_000); // 0.5 seconds + assert_eq!(parse_duration_to_ns("0").unwrap(), 0); + assert_eq!(parse_duration_to_ns("1.5").unwrap(), 1_500_000_000); // 1.5 seconds + } + + #[test] + fn test_parse_duration_to_ns_humantime_formats() { + // Humantime format durations + assert_eq!(parse_duration_to_ns("1s").unwrap(), 1_000_000_000); + assert_eq!(parse_duration_to_ns("500ms").unwrap(), 500_000_000); + assert_eq!(parse_duration_to_ns("2m").unwrap(), 120_000_000_000); + assert_eq!(parse_duration_to_ns("1h").unwrap(), 3_600_000_000_000); + + // Fractional values + assert_eq!(parse_duration_to_ns("1.5s").unwrap(), 1_500_000_000); + assert_eq!(parse_duration_to_ns("0.5s").unwrap(), 500_000_000); + } + + #[test] + fn test_parse_duration_to_ns_whitespace() { + // Should handle whitespace + assert_eq!(parse_duration_to_ns(" 1s ").unwrap(), 1_000_000_000); + assert_eq!(parse_duration_to_ns(" 500ms ").unwrap(), 500_000_000); + } + + #[test] + fn test_parse_duration_to_ns_invalid() { + // Invalid formats should error + assert!(parse_duration_to_ns("invalid").is_err()); + assert!(parse_duration_to_ns("1x").is_err()); + assert!(parse_duration_to_ns("").is_err()); + } + + #[test] + fn test_execution_options_from_args() { + // Test creating ExecutionOptions from CLI args + let opts: ExecutionOptions = WalltimeExecutionArgs { + warmup_time: Some("2s".to_string()), + max_time: Some("10s".to_string()), + min_time: None, + max_rounds: Some(10), + min_rounds: None, + } + .try_into() + .unwrap(); + + assert_eq!(opts.warmup_time_ns, 2_000_000_000); + assert!(matches!( + opts.max, + Some(RoundOrTime::Both { + rounds: 10, + time_ns: 10_000_000_000 + }) + )); + assert!(opts.min.is_none()); + } + + #[test] + fn test_execution_options_from_args_none() { + // Test with all None values (should use defaults) + let opts: ExecutionOptions = WalltimeExecutionArgs::default().try_into().unwrap(); + + assert_eq!(opts.warmup_time_ns, DEFAULT_WARMUP_TIME_NS); + assert_eq!(opts.max, Some(RoundOrTime::TimeNs(DEFAULT_MAX_TIME_NS))); + assert!(opts.min.is_none()); + } + + #[test] + fn test_execution_options_from_args_invalid_duration() { + // Test with invalid duration string + let result: Result = WalltimeExecutionArgs { + warmup_time: Some("invalid".to_string()), + max_time: None, + min_time: None, + max_rounds: None, + min_rounds: None, + } + .try_into(); + + assert!(result.is_err()); + assert!( + result + .unwrap_err() + .to_string() + .contains("Invalid warmup_time") + ); + } + + // Business rule validation tests + + #[test] + fn test_validation_cannot_mix_min_time_and_max_rounds() { + let result: Result = WalltimeExecutionArgs { + warmup_time: Some("1s".to_string()), + max_time: Some("10s".to_string()), + min_time: Some("2s".to_string()), + max_rounds: None, + min_rounds: Some(5), + } + .try_into(); + + assert!(result.is_err()); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("Cannot use both max_time and min_rounds"), + "Expected error about mixing min_time and min_rounds, got: {err}" + ); + } + + #[test] + fn test_validation_cannot_mix_max_time_and_min_rounds() { + let result: Result = WalltimeExecutionArgs { + warmup_time: Some("1s".to_string()), + max_time: Some("10s".to_string()), + min_time: None, + max_rounds: None, + min_rounds: Some(5), + } + .try_into(); + + assert!(result.is_err()); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("Cannot use both max_time and min_rounds"), + "Expected error about mixing max_time and max_rounds, got: {err}" + ); + } + + #[test] + fn test_validation_min_time_greater_than_max_time() { + let result: Result = WalltimeExecutionArgs { + warmup_time: Some("1s".to_string()), + max_time: Some("5s".to_string()), + min_time: Some("10s".to_string()), // min > max! + max_rounds: None, + min_rounds: None, + } + .try_into(); + + assert!(result.is_err()); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("min_time") && err.contains("cannot be greater than max_time"), + "Expected error about min_time > max_time, got: {err}" + ); + } + + #[test] + fn test_validation_min_rounds_greater_than_max_rounds() { + let result: Result = WalltimeExecutionArgs { + warmup_time: Some("1s".to_string()), + max_time: None, + min_time: None, + max_rounds: Some(10), + min_rounds: Some(50), // min > max! + } + .try_into(); + + assert!(result.is_err()); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("min_rounds") && err.contains("cannot be greater than max_rounds"), + "Expected error about min_rounds > max_rounds, got: {err}" + ); + } + + #[test] + fn test_validation_no_warmup_requires_rounds() { + let result: Result = WalltimeExecutionArgs { + warmup_time: Some("0".to_string()), // No warmup + max_time: Some("10s".to_string()), + min_time: None, + max_rounds: None, // No rounds specified + min_rounds: None, + } + .try_into(); + + assert!(result.is_err()); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("warmup_time is 0") && err.contains("min_rounds or max_rounds"), + "Expected error about needing rounds when warmup is 0, got: {err}" + ); + } + + #[test] + fn test_validation_valid_combinations() { + // Valid: max_time + max_rounds + let result: Result = WalltimeExecutionArgs { + warmup_time: Some("1s".to_string()), + max_time: Some("10s".to_string()), + min_time: None, + max_rounds: Some(5), + min_rounds: None, + } + .try_into(); + assert!(result.is_ok()); + + // Valid: min_time + min_rounds + let result: Result = WalltimeExecutionArgs { + warmup_time: Some("1s".to_string()), + max_time: None, + min_time: Some("2s".to_string()), + max_rounds: None, + min_rounds: Some(100), + } + .try_into(); + assert!(result.is_ok()); + + // Valid: max_time + min_time (with min < max) + let result: Result = WalltimeExecutionArgs { + warmup_time: Some("1s".to_string()), + max_time: Some("10s".to_string()), + min_time: Some("2s".to_string()), + max_rounds: None, + min_rounds: None, + } + .try_into(); + assert!(result.is_ok()); + + // Valid: max_rounds + min_rounds (with min < max) + let result: Result = WalltimeExecutionArgs { + warmup_time: Some("1s".to_string()), + max_time: None, + min_time: None, + max_rounds: Some(100), + min_rounds: Some(10), + } + .try_into(); + assert!(result.is_ok()); + + // Valid: no warmup with rounds specified + let result: Result = WalltimeExecutionArgs { + warmup_time: Some("0".to_string()), + max_time: None, + min_time: None, + max_rounds: Some(50), + min_rounds: None, + } + .try_into(); + assert!(result.is_ok()); + } +} diff --git a/crates/exec-harness/src/walltime/mod.rs b/crates/exec-harness/src/walltime/mod.rs new file mode 100644 index 00000000..0fb9bcc4 --- /dev/null +++ b/crates/exec-harness/src/walltime/mod.rs @@ -0,0 +1,162 @@ +mod config; + +pub use config::ExecutionOptions; +use config::RoundOrTime; +pub use config::WalltimeExecutionArgs; +pub use runner_shared::walltime_results::WalltimeResults; + +use crate::prelude::*; +use codspeed::instrument_hooks::InstrumentHooks; +use std::process::Command; + +pub fn perform( + bench_uri: String, + command: Vec, + config: &ExecutionOptions, +) -> Result> { + let warmup_time_ns = config.warmup_time_ns; + let hooks = InstrumentHooks::instance(); + + let do_one_round = |times_per_round_ns: &mut Vec| { + let mut child = Command::new(&command[0]) + .args(&command[1..]) + .spawn() + .context("Failed to execute command")?; + let bench_round_start_ts_ns = InstrumentHooks::current_timestamp(); + let status = child + .wait() + .context("Failed to wait for command to finish")?; + + let bench_round_end_ts_ns = InstrumentHooks::current_timestamp(); + hooks.add_benchmark_timestamps(bench_round_start_ts_ns, bench_round_end_ts_ns); + + if !status.success() { + bail!("Command exited with non-zero status: {status}"); + } + + times_per_round_ns.push((bench_round_end_ts_ns - bench_round_start_ts_ns) as u128); + + Ok(()) + }; + + // Compute the number of rounds to perform, either from warmup or directly from config + let rounds_to_perform = if warmup_time_ns > 0 { + let mut warmup_times_ns = Vec::new(); + let warmup_start_ts_ns = InstrumentHooks::current_timestamp(); + + hooks.start_benchmark().unwrap(); + while InstrumentHooks::current_timestamp() < warmup_start_ts_ns + warmup_time_ns { + do_one_round(&mut warmup_times_ns)?; + } + hooks.stop_benchmark().unwrap(); + let warmup_end_ts_ns = InstrumentHooks::current_timestamp(); + + if let [single_warmup_round_duration_ns] = warmup_times_ns.as_slice() { + match config.max { + Some(RoundOrTime::TimeNs(time_ns)) | Some(RoundOrTime::Both { time_ns, .. }) => { + if time_ns <= *single_warmup_round_duration_ns as u64 { + info!( + "Warmup duration ({single_warmup_round_duration_ns} ns) exceeded or met max_time ({time_ns} ns). No more rounds will be performed." + ); + // Mark benchmark as executed for the runner to register + hooks.set_executed_benchmark(&bench_uri).unwrap(); + return Ok(warmup_times_ns); + } + } + _ => { /* No max time constraint */ } + } + } + + info!("Completed {} warmup rounds", warmup_times_ns.len()); + + let average_time_per_round_ns = + (warmup_end_ts_ns - warmup_start_ts_ns) / warmup_times_ns.len() as u64; + + // Extract min rounds from config + let actual_min_rounds = match &config.min { + Some(RoundOrTime::Rounds(rounds)) => Some(*rounds), + Some(RoundOrTime::TimeNs(time_ns)) => { + Some(((time_ns + average_time_per_round_ns) / average_time_per_round_ns) + 1) + } + Some(RoundOrTime::Both { rounds, time_ns }) => { + let rounds_from_time = + ((time_ns + average_time_per_round_ns) / average_time_per_round_ns) + 1; + Some((*rounds).max(rounds_from_time)) + } + None => None, + }; + + // Extract max rounds from config + let actual_max_rounds = match &config.max { + Some(RoundOrTime::Rounds(rounds)) => Some(*rounds), + Some(RoundOrTime::TimeNs(time_ns)) => { + Some((time_ns + average_time_per_round_ns) / average_time_per_round_ns) + } + Some(RoundOrTime::Both { rounds, time_ns }) => { + let rounds_from_time = + (time_ns + average_time_per_round_ns) / average_time_per_round_ns; + Some((*rounds).min(rounds_from_time)) + } + None => None, + }; + + match (actual_min_rounds, actual_max_rounds) { + (Some(min), Some(max)) if min > max => { + warn!( + "Computed min rounds ({min}) is greater than max rounds ({max}). Using max rounds.", + ); + max + } + (Some(min), Some(max)) => (min + max) / 2, + (None, Some(max)) => max, + (Some(min), None) => min, + (None, None) => { + bail!("Unable to determine number of rounds to perform"); + } + } + } else { + // No warmup, extract rounds directly from config + match (&config.max, &config.min) { + (Some(RoundOrTime::Rounds(rounds)), _) | (_, Some(RoundOrTime::Rounds(rounds))) => { + *rounds + } + (Some(RoundOrTime::Both { rounds, .. }), _) + | (_, Some(RoundOrTime::Both { rounds, .. })) => *rounds, + _ => bail!("Either max_rounds or min_rounds must be specified when warmup is disabled"), + } + }; + + info!("Performing {rounds_to_perform} rounds"); + + let round_start_ts_ns = InstrumentHooks::current_timestamp(); + let mut times_per_round_ns = Vec::with_capacity(rounds_to_perform.try_into().unwrap()); + + hooks.start_benchmark().unwrap(); + for round in 0..rounds_to_perform { + do_one_round(&mut times_per_round_ns)?; + + // Check if we've exceeded max time + let max_time_ns = match &config.max { + Some(RoundOrTime::TimeNs(time_ns)) | Some(RoundOrTime::Both { time_ns, .. }) => { + Some(*time_ns) + } + _ => None, + }; + + if let Some(max_time_ns) = max_time_ns { + let current_round = round + 1; + if current_round < rounds_to_perform + && InstrumentHooks::current_timestamp() - round_start_ts_ns > max_time_ns + { + info!( + "Prematurally reached maximum time limit after {current_round}/{rounds_to_perform} rounds, stopping here" + ); + break; + } + } + } + hooks.stop_benchmark().unwrap(); + hooks.set_executed_benchmark(&bench_uri).unwrap(); + + Ok(times_per_round_ns) +} diff --git a/crates/exec-harness/tests/integration_test.rs b/crates/exec-harness/tests/integration_test.rs new file mode 100644 index 00000000..0d2e541a --- /dev/null +++ b/crates/exec-harness/tests/integration_test.rs @@ -0,0 +1,323 @@ +use anyhow::Result; +use tempfile::TempDir; + +// Helper to create a simple sleep 100ms command +fn sleep_cmd() -> Vec { + vec!["sleep".to_string(), "0.1".to_string()] +} + +/// Test that a command runs exactly the specified number of max_rounds +#[test] +fn test_max_rounds_without_warmup() -> Result<()> { + // Create execution options with no warmup and fixed rounds + let exec_opts = exec_harness::walltime::ExecutionOptions::try_from( + exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: Some("0s".to_string()), // No warmup + max_time: None, + min_time: None, + max_rounds: Some(10), // Exactly 10 rounds + min_rounds: None, + }, + )?; + + let times = exec_harness::walltime::perform( + "test::max_rounds_no_warmup".to_string(), + sleep_cmd(), + &exec_opts, + )?; + + // Should run exactly 10 times + assert_eq!(times.len(), 10, "Expected exactly 10 iterations"); + + Ok(()) +} + +/// Test that a command runs between min and max rounds +#[test] +fn test_min_max_rounds_with_warmup() -> Result<()> { + // Create execution options with warmup and min/max rounds + let exec_opts = exec_harness::walltime::ExecutionOptions::try_from( + exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: Some("100ms".to_string()), // Short warmup + max_time: None, + min_time: None, + max_rounds: Some(50), // Max 50 rounds + min_rounds: Some(5), // Min 5 rounds + }, + )?; + + let times = exec_harness::walltime::perform( + "test::min_max_rounds_warmup".to_string(), + sleep_cmd(), + &exec_opts, + )?; + + // Should run between 5 and 50 times + assert!( + times.len() >= 5, + "Expected at least 5 iterations, got {}", + times.len() + ); + assert!( + times.len() <= 50, + "Expected at most 50 iterations, got {}", + times.len() + ); + + Ok(()) +} + +/// Test that max_time constraint is respected +#[test] +fn test_max_time_constraint() -> Result<()> { + // Use a very short max_time to ensure we don't run too many iterations + let exec_opts = exec_harness::walltime::ExecutionOptions::try_from( + exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: Some("50ms".to_string()), // Short warmup + max_time: Some("500ms".to_string()), // Very short max time + min_time: None, + max_rounds: None, + min_rounds: None, + }, + )?; + + let times = + exec_harness::walltime::perform("test::max_time".to_string(), sleep_cmd(), &exec_opts)?; + + // Should have run at least 1 time, but not an excessive amount + assert!(!times.is_empty(), "Expected at least 1 iteration"); + assert!( + times.len() < 6, + "Expected fewer than 5 iterations due to max_time constraint, got {}", + times.len() + ); + + Ok(()) +} + +/// Test that min_rounds is satisfied even with short min_time +#[test] +fn test_min_rounds_and_min_time() -> Result<()> { + // Set min_rounds and min_time + let exec_opts = exec_harness::walltime::ExecutionOptions::try_from( + exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: Some("10ms".to_string()), // Very short warmup + max_time: None, + min_time: Some("1ms".to_string()), + max_rounds: None, + min_rounds: Some(15), + }, + )?; + + let times = exec_harness::walltime::perform( + "test::min_rounds_priority".to_string(), + sleep_cmd(), + &exec_opts, + )?; + + // Should satisfy min_rounds requirement + assert!( + times.len() >= 15, + "Expected at least 15 iterations (min_rounds), got {}", + times.len() + ); + + Ok(()) +} + +/// Test that warmup is actually performed (results in non-zero warmup phase) +#[test] +fn test_warmup_is_performed() -> Result<()> { + // With warmup enabled + let exec_opts_with_warmup = exec_harness::walltime::ExecutionOptions::try_from( + exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: Some("200ms".to_string()), // Significant warmup time + max_time: Some("500ms".to_string()), + min_time: None, + max_rounds: None, + min_rounds: None, + }, + )?; + + let times_with_warmup = exec_harness::walltime::perform( + "test::with_warmup".to_string(), + sleep_cmd(), + &exec_opts_with_warmup, + )?; + + // With warmup disabled + let exec_opts_no_warmup = exec_harness::walltime::ExecutionOptions::try_from( + exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: Some("0s".to_string()), // No warmup + max_time: None, + min_time: None, + max_rounds: Some(5), // Fixed 5 rounds + min_rounds: None, + }, + )?; + + let times_no_warmup = exec_harness::walltime::perform( + "test::no_warmup".to_string(), + sleep_cmd(), + &exec_opts_no_warmup, + )?; + + // Both should complete successfully + assert!(!times_with_warmup.is_empty()); + assert_eq!(times_no_warmup.len(), 5); + + Ok(()) +} + +/// Test with a slower command to verify timing works correctly +#[test] +fn test_with_sleep_command() -> Result<()> { + // Use a command that takes a measurable amount of time + let exec_opts = exec_harness::walltime::ExecutionOptions::try_from( + exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: Some("0s".to_string()), // No warmup for faster test + max_time: None, + min_time: None, + max_rounds: Some(3), // Just 3 rounds + min_rounds: None, + }, + )?; + + let times = exec_harness::walltime::perform( + "test::sleep_command".to_string(), + vec!["sleep".to_string(), "0.01".to_string()], // 10ms sleep + &exec_opts, + )?; + + // Should run exactly 3 times + assert_eq!(times.len(), 3, "Expected exactly 3 iterations"); + + // Each iteration should take at least 10ms (10_000_000 ns) + for (i, &time_ns) in times.iter().enumerate() { + assert!( + time_ns >= 10_000_000, + "Iteration {i} took only {time_ns}ns, expected at least 10ms" + ); + } + + Ok(()) +} + +/// Test that invalid command exits early +#[test] +fn test_invalid_command_fails() { + let exec_opts = exec_harness::walltime::ExecutionOptions::try_from( + exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: Some("0s".to_string()), + max_time: None, + min_time: None, + max_rounds: Some(5), + min_rounds: None, + }, + ) + .unwrap(); + + // Try to run a command that doesn't exist + let result = exec_harness::walltime::perform( + "test::invalid_command".to_string(), + vec!["this_command_definitely_does_not_exist_12345".to_string()], + &exec_opts, + ); + + // Should fail + assert!(result.is_err(), "Expected error for invalid command"); +} + +/// Test that pure numbers are interpreted as seconds +#[test] +fn test_pure_numbers_as_seconds() -> Result<()> { + // Use pure numbers which should be interpreted as seconds + let exec_opts = exec_harness::walltime::ExecutionOptions::try_from( + exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: Some("0.1".to_string()), // 0.1 seconds warmup + max_time: Some("1".to_string()), // 1 second max time + min_time: None, + max_rounds: None, + min_rounds: None, + }, + )?; + + let times = exec_harness::walltime::perform( + "test::pure_numbers_seconds".to_string(), + sleep_cmd(), + &exec_opts, + )?; + + // Should have run at least once + assert!(!times.is_empty(), "Expected at least one iteration"); + + // Test fractional seconds too + let exec_opts_fractional = exec_harness::walltime::ExecutionOptions::try_from( + exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: Some("0.1".to_string()), // 0.1 seconds warmup + max_time: Some("0.5".to_string()), // 0.5 seconds max time + min_time: None, + max_rounds: None, + min_rounds: None, + }, + )?; + + let times_fractional = exec_harness::walltime::perform( + "test::fractional_seconds".to_string(), + sleep_cmd(), + &exec_opts_fractional, + )?; + + assert!( + !times_fractional.is_empty(), + "Expected at least one iteration with fractional seconds" + ); + + Ok(()) +} + +/// Test that when a warmup run exceeds max_time, the command is only run once +#[test] +fn test_single_long_execution() -> Result<()> { + // Set max_time very low and warmup time high to force single execution + let exec_opts = exec_harness::walltime::ExecutionOptions::try_from( + exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: Some("100ms".to_string()), + max_time: Some("100ms".to_string()), // Low max time, shorter than command duration + min_time: None, + max_rounds: None, + min_rounds: None, + }, + )?; + + // Create a temporary directory for the test + let tmpdir = TempDir::new()?; + + // Create a command that sleeps and creates a directory that must not exist + // This will fail if executed twice because the directory will already exist + let test_dir = tmpdir.path().join("lock_file"); + let cmd = format!("sleep 1 && mkdir {}", test_dir.display()); + + let times = exec_harness::walltime::perform( + "test::single_long_execution".to_string(), + vec!["sh".to_string(), "-c".to_string(), cmd.clone()], + &exec_opts, + )?; + + // Should have run exactly once + assert_eq!(times.len(), 1, "Expected exactly one iteration"); + + // Sanity check: any subsequent run should fail due to directory existing, to avoid false + // positives + assert!( + exec_harness::walltime::perform( + "test::single_long_execution".to_string(), + vec!["sh".to_string(), "-c".to_string(), cmd], + &exec_opts, + ) + .is_err(), + "Expected failure on second execution due to existing directory" + ); + + Ok(()) +} diff --git a/crates/memtrack/Cargo.toml b/crates/memtrack/Cargo.toml index 66ce9487..2d739890 100644 --- a/crates/memtrack/Cargo.toml +++ b/crates/memtrack/Cargo.toml @@ -40,7 +40,7 @@ vmlinux = { git = "https://github.com/libbpf/vmlinux.h.git", rev = "991dd4b8dfd8 bindgen = "0.71" [dev-dependencies] -tempfile = "3.8" +tempfile = { workspace = true } rstest = "0.21" test-log = "0.2" diff --git a/crates/runner-shared/src/walltime_results.rs b/crates/runner-shared/src/walltime_results.rs index ad7a6e73..d630bfa5 100644 --- a/crates/runner-shared/src/walltime_results.rs +++ b/crates/runner-shared/src/walltime_results.rs @@ -44,16 +44,123 @@ pub struct WalltimeBenchmark { pub stats: BenchmarkStats, } +impl WalltimeBenchmark { + pub fn from_runtime_data( + name: String, + uri: String, + iters_per_round: Vec, + times_per_round_ns: Vec, + _max_time_ns: Option, + ) -> Self { + // Calculate statistics + let mut times_sorted: Vec = times_per_round_ns.iter().map(|&t| t as f64).collect(); + times_sorted.sort_by(|a, b| a.partial_cmp(b).unwrap()); + + let rounds = times_sorted.len() as u64; + let total_time: f64 = times_sorted.iter().sum(); + let mean_ns = if rounds > 0 { + total_time / rounds as f64 + } else { + 0.0 + }; + + let min_ns = times_sorted.first().copied().unwrap_or(0.0); + let max_ns = times_sorted.last().copied().unwrap_or(0.0); + + // Calculate percentiles + let median_ns = if rounds > 0 { + let mid = rounds as usize / 2; + if rounds % 2 == 0 { + (times_sorted[mid - 1] + times_sorted[mid]) / 2.0 + } else { + times_sorted[mid] + } + } else { + 0.0 + }; + + let q1_ns = if rounds > 0 { + let q1_idx = (rounds as usize / 4).max(0); + times_sorted[q1_idx] + } else { + 0.0 + }; + + let q3_ns = if rounds > 0 { + let q3_idx = (3 * rounds as usize / 4).min(times_sorted.len() - 1); + times_sorted[q3_idx] + } else { + 0.0 + }; + + // Calculate standard deviation + let stdev_ns = if rounds > 1 { + let variance: f64 = times_sorted + .iter() + .map(|&t| { + let diff = t - mean_ns; + diff * diff + }) + .sum::() + / (rounds - 1) as f64; + variance.sqrt() + } else { + 0.0 + }; + + // Calculate outliers (simplified - using IQR method) + let iqr = q3_ns - q1_ns; + let lower_bound = q1_ns - 1.5 * iqr; + let upper_bound = q3_ns + 1.5 * iqr; + let iqr_outlier_rounds = times_sorted + .iter() + .filter(|&&t| t < lower_bound || t > upper_bound) + .count() as u64; + + // Standard deviation outliers (2 sigma) + let stdev_outlier_rounds = times_sorted + .iter() + .filter(|&&t| (t - mean_ns).abs() > 2.0 * stdev_ns) + .count() as u64; + + let iter_per_round = if !iters_per_round.is_empty() { + iters_per_round[0] + } else { + 1 + }; + + WalltimeBenchmark { + metadata: BenchmarkMetadata { name, uri }, + config: BenchmarkConfig::default(), + stats: BenchmarkStats { + min_ns, + max_ns, + mean_ns, + stdev_ns, + q1_ns, + median_ns, + q3_ns, + rounds, + total_time, + iqr_outlier_rounds, + stdev_outlier_rounds, + iter_per_round, + warmup_iters: 0, + }, + } + } +} + #[derive(Debug, Serialize, Deserialize)] pub struct Instrument { #[serde(rename = "type")] - type_: String, + pub type_: String, } #[derive(Debug, Serialize, Deserialize)] pub struct Creator { - name: String, - version: String, + pub name: String, + pub version: String, pub pid: u32, } @@ -63,3 +170,47 @@ pub struct WalltimeResults { pub instrument: Instrument, pub benchmarks: Vec, } + +impl WalltimeResults { + pub fn from_benchmarks(benchmarks: Vec) -> anyhow::Result { + Ok(WalltimeResults { + instrument: Instrument { + type_: "walltime".to_string(), + }, + creator: Creator { + // TODO: Stop impersonating codspeed-rust 🥸 + name: "codspeed-rust".to_string(), + version: env!("CARGO_PKG_VERSION").to_string(), + pid: std::process::id(), + }, + benchmarks, + }) + } + + pub fn save_to_file>(&self, profile_folder: P) -> anyhow::Result<()> { + let results_path = { + let results_dir = profile_folder.as_ref().join("results"); + std::fs::create_dir_all(&results_dir).map_err(|e| { + anyhow::anyhow!( + "Failed to create results directory: {}: {}", + results_dir.display(), + e + ) + })?; + + results_dir.join(format!("{}.json", self.creator.pid)) + }; + + let file = std::fs::File::create(&results_path).map_err(|e| { + anyhow::anyhow!("Failed to create file: {}: {}", results_path.display(), e) + })?; + serde_json::to_writer_pretty(file, &self).map_err(|e| { + anyhow::anyhow!( + "Failed to write JSON to file: {}: {}", + results_path.display(), + e + ) + })?; + Ok(()) + } +} diff --git a/src/exec/mod.rs b/src/exec/mod.rs index 48e9e06e..96c41833 100644 --- a/src/exec/mod.rs +++ b/src/exec/mod.rs @@ -20,6 +20,9 @@ pub struct ExecArgs { #[command(flatten)] pub shared: crate::run::ExecAndRunSharedArgs, + #[command(flatten)] + pub walltime_args: exec_harness::walltime::WalltimeExecutionArgs, + /// Optional benchmark name (defaults to command filename) #[arg(long)] pub name: Option, diff --git a/src/executor/config.rs b/src/executor/config.rs index 2ec272b7..1eee92a0 100644 --- a/src/executor/config.rs +++ b/src/executor/config.rs @@ -136,6 +136,8 @@ impl TryFrom for Config { .map_err(|e| anyhow!("Invalid upload URL: {raw_upload_url}, {e}"))?; let wrapped_command = std::iter::once(EXEC_HARNESS_COMMAND.to_owned()) + // Forward exec-harness arguments + .chain(args.walltime_args.to_cli_args()) .chain(args.command) .collect::>() .join(" "); @@ -310,6 +312,7 @@ mod tests { perf_unwinding_mode: None, }, }, + walltime_args: Default::default(), name: None, command: vec!["my-binary".into(), "arg1".into(), "arg2".into()], }; diff --git a/src/executor/mod.rs b/src/executor/mod.rs index 3c812153..1204bc9c 100644 --- a/src/executor/mod.rs +++ b/src/executor/mod.rs @@ -139,6 +139,8 @@ where if let Some(mut mongo_tracer) = mongo_tracer { mongo_tracer.stop().await?; } + end_group!(); + start_opened_group!("Tearing down environment"); executor.teardown(execution_context).await?; execution_context diff --git a/src/executor/wall_time/perf/mod.rs b/src/executor/wall_time/perf/mod.rs index ef31a3ba..ebab89ad 100644 --- a/src/executor/wall_time/perf/mod.rs +++ b/src/executor/wall_time/perf/mod.rs @@ -21,6 +21,7 @@ use libc::pid_t; use perf_executable::get_compression_flags; use perf_executable::get_event_flags; use perf_map::ProcessSymbols; +use rayon::prelude::*; use runner_shared::artifacts::ArtifactExt; use runner_shared::artifacts::ExecutionTimestamps; use runner_shared::debug_info::ModuleDebugInfo; @@ -373,25 +374,26 @@ impl BenchmarkData { (&self.symbols_by_pid, &self.unwind_data_by_pid) }; - for proc_sym in symbols_by_pid.values() { - proc_sym.save_to(&path).unwrap(); - } + let path_ref = path.as_ref(); + info!("Saving symbols addresses"); + symbols_by_pid.par_iter().for_each(|(_, proc_sym)| { + proc_sym.save_to(path_ref).unwrap(); + }); // Collect debug info for each process by looking up file/line for symbols - let mut debug_info_by_pid = HashMap::>::new(); - for (pid, proc_sym) in symbols_by_pid { - debug_info_by_pid - .entry(*pid) - .or_default() - .extend(ProcessDebugInfo::new(proc_sym).modules()); - } - - for (pid, modules) in unwind_data_by_pid { - for module in modules { - module.save_to(&path, *pid).unwrap(); - } - } + info!("Saving debug_info"); + let debug_info_by_pid: HashMap> = symbols_by_pid + .par_iter() + .map(|(pid, proc_sym)| (*pid, ProcessDebugInfo::new(proc_sym).modules())) + .collect(); + + unwind_data_by_pid.par_iter().for_each(|(pid, modules)| { + modules.iter().for_each(|module| { + module.save_to(path_ref, *pid).unwrap(); + }); + }); + info!("Saving metadata"); #[allow(deprecated)] let metadata = PerfMetadata { version: PERF_METADATA_CURRENT_VERSION,