diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ea27031..055934f 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -38,6 +38,7 @@ jobs: cargo_flags: - "" - "--no-default-features" + - "--features tracing" - "--all-features" include: # Integration tests are disabled on Windows as they take *way* too diff --git a/CHANGELOG.md b/CHANGELOG.md index b58d0be..0be09bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,103 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +* Sandbox containers are now reused across commands within a single build, + avoiding per-command `docker create`/`docker rm` overhead. Every `Command` + spawned inside a `BuildBuilder::run` closure runs in the same container, + and the container is recreated transparently if a previous command's OOM + kill brought it down or if the container was otherwise stopped. + +* Timed-out sandboxed commands now tear down the reused container before the + next command runs. This prevents abandoned processes left behind by a killed + host-side `docker exec` from racing later commands or leaking filesystem + changes into them. + +* Added the public `Sandbox`, `SandboxStatistics`, and `BuildResult` types, + plus `SandboxBuilder::start` for direct sandbox construction. The + underlying container is created and started before `start` returns, so + docker errors surface there rather than on the first command. + +* Added `Build::statistics()` for taking a snapshot of sandbox statistics + while a build is still running. This can be used for per-step reporting + from inside the build closure, including from `process_lines` callbacks. + +* Added `Command::arg` and relaxed `Command::args`, + `Command::env`, and `Command::current_directory` to accept owned values + directly (`Into` / `Into`) instead of requiring borrowed + slices or references. + +* **BREAKING**: `BuildBuilder::run` now returns `BuildResult` instead of + `R`. The result wraps the closure's return value together with + `SandboxStatistics` gathered over the whole build: + + ```rust + let result = build_dir.build(&toolchain, &krate, sandbox).run(|build| { + build.cargo().args(&["test"]).run()?; + Ok(()) + })?; + let peak = result.statistics().memory_peak_bytes(); + let value = result.into_inner(); + ``` + +* **BREAKING**: `Command::run` returns `()` instead of `ProcessStatistics`. + Peak memory is no longer tracked per command; the cumulative maximum + across all commands in the build is exposed via the `BuildResult` + returned from `BuildBuilder::run`: + + ```rust + let result = build_dir.build(&toolchain, &krate, sandbox).run(|build| { + build.cargo().args(&["test"]).run()?; + build.cargo().args(&["doc"]).run()?; + Ok(()) + })?; + let peak = result.statistics().memory_peak_bytes(); + ``` + +* **BREAKING**: `ProcessOutput::memory_peak_bytes` and the + `ProcessStatistics` type were removed. Use + `BuildResult::statistics().memory_peak_bytes()` (or + `Sandbox::statistics()` when using the sandbox API directly). + +* **BREAKING**: `Command::source_dir_mount_kind` moved to + `SandboxBuilder::source_dir_mount_kind`. The mount kind now applies to + every command spawned in the build, since they share one container: + + ```rust + let sandbox = SandboxBuilder::new() + .source_dir_mount_kind(MountKind::ReadWrite); + build_dir.build(&toolchain, &krate, sandbox).run(|build| { + build.cargo().args(&["test"]).run()?; + Ok(()) + })?; + ``` + + With a writable source mount, mutations from an earlier command + persist into all later commands in the same build (and across reuse + of the source directory by later builds) — only opt in if you trust + every step to leave the source in a sensible state. + +* **BREAKING**: `Command::new_sandboxed` was renamed to + `Command::new_in_sandbox` and now takes an `Rc>` + produced by `SandboxBuilder::start`, instead of a `SandboxBuilder`. + Most callers should use `BuildBuilder::run` instead; the lower-level + form is: + + ```rust + use std::{cell::RefCell, rc::Rc}; + + let sandbox = Rc::new(RefCell::new( + SandboxBuilder::new().start(&workspace, source_dir, target_dir)?, + )); + Command::new_in_sandbox(&workspace, sandbox, "cargo") + .args(&["test"]) + .run()?; + ``` + + By default the command's working directory is the sandbox's source + directory; an explicit `current_directory(...)` path must point inside + the source directory or it will panic at runtime. + + ## [0.24.0] - 2026-05-12 * make alternate registry support optional to reduce dependencies diff --git a/Cargo.toml b/Cargo.toml index d9d9687..671e548 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,7 +30,6 @@ tokio = { version = "1.0", features = ["process", "time", "io-util", "rt", "rt-m tokio-stream = { version = "0.1", features = ["io-util"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -scopeguard = "1.0.0" tempfile = "3.0.0" attohttpc = "0.30.1" flate2 = "1" diff --git a/examples/docs-builder.rs b/examples/docs-builder.rs index c928829..373ae32 100644 --- a/examples/docs-builder.rs +++ b/examples/docs-builder.rs @@ -29,7 +29,7 @@ fn main() -> Result<(), Box> { let mut build_dir = workspace.build_dir("docs"); build_dir.build(&toolchain, &krate, sandbox).run(|build| { - build.cargo().args(&["doc", "--no-deps"]).run()?; + build.cargo().arg("doc").arg("--no-deps").run()?; Ok(()) })?; diff --git a/src/build.rs b/src/build.rs index 9c3bfbc..b205a82 100644 --- a/src/build.rs +++ b/src/build.rs @@ -1,8 +1,14 @@ -use crate::cmd::{Command, MountKind, Runnable, SandboxBuilder}; -use crate::prepare::Prepare; -use crate::{Crate, PrepareError, Toolchain, Workspace}; +use crate::{ + Crate, PrepareError, Toolchain, Workspace, + cmd::{ + Command, Runnable, Sandbox, SandboxBuilder, SandboxStatistics, SandboxStatisticsState, + container_dirs, + }, + prepare::Prepare, +}; use std::path::PathBuf; use std::vec::Vec; +use std::{cell::RefCell, rc::Rc}; #[derive(Clone)] pub(crate) enum CratePatch { @@ -42,6 +48,24 @@ pub struct BuildBuilder<'a> { patches: Vec, } +/// Output of a completed build together with build-level statistics. +pub struct BuildResult { + output: T, + statistics: SandboxStatistics, +} + +impl BuildResult { + /// Return the wrapped build output. + pub fn into_inner(self) -> T { + self.output + } + + /// Borrow the build-level statistics. + pub fn statistics(&self) -> &SandboxStatistics { + &self.statistics + } +} + impl BuildBuilder<'_> { /// Add a git-based patch to this build. /// Patches get added to the crate's Cargo.toml in the `patch.crates-io` table. @@ -111,6 +135,9 @@ impl BuildBuilder<'_> { /// be provided an instance of [`Build`](struct.Build.html) that allows spawning new processes /// inside the sandbox. /// + /// Returns a [`BuildResult`] containing both the closure's return value and build-level + /// statistics gathered across the sandbox lifetime. + /// /// All the state will be kept on disk as long as the closure doesn't exit: after that things /// might be removed. /// # Example @@ -124,13 +151,17 @@ impl BuildBuilder<'_> { /// # let krate = Crate::local("".as_ref()); /// # let sandbox = SandboxBuilder::new(); /// let mut build_dir = workspace.build_dir("foo"); - /// build_dir.build(&toolchain, &krate, sandbox).run(|build| { + /// let result = build_dir.build(&toolchain, &krate, sandbox).run(|build| { /// build.cargo().args(&["test", "--all"]).run()?; /// Ok(()) /// })?; + /// let _peak = result.statistics().memory_peak_bytes(); /// # Ok(()) /// # } - pub fn run anyhow::Result>(self, f: F) -> anyhow::Result { + pub fn run anyhow::Result>( + self, + f: F, + ) -> anyhow::Result> { self.build_dir .run(self.toolchain, self.krate, self.sandbox, self.patches, f) } @@ -199,7 +230,7 @@ impl BuildDirectory { sandbox: SandboxBuilder, patches: Vec, f: F, - ) -> anyhow::Result { + ) -> anyhow::Result> { let source_dir = self.source_dir(); if source_dir.exists() { crate::utils::remove_dir_all(&source_dir)?; @@ -215,18 +246,38 @@ impl BuildDirectory { })?; std::fs::create_dir_all(self.target_dir())?; + let statistics = Rc::new(SandboxStatisticsState::default()); + let sandbox = Rc::new(RefCell::new(sandbox.start_with_statistics( + &self.workspace, + source_dir.clone(), + self.target_dir(), + statistics.clone(), + )?)); + let res = { #[cfg(feature = "tracing")] - let _entered = tracing::info_span!("build.user_callback").entered(); + let _entered = tracing::info_span!( + "build.user_callback", + build_dir = %self.name, + krate = %krate, + toolchain = %toolchain, + ) + .entered(); + f(&Build { dir: self, toolchain, - sandbox, + sandbox: sandbox.clone(), + statistics, }) }?; + let statistics = sandbox.borrow_mut().cleanup()?; crate::utils::remove_dir_all(&source_dir)?; - Ok(res) + Ok(BuildResult { + output: res, + statistics, + }) } /// Remove all the contents of the build directory, freeing disk space. @@ -257,7 +308,8 @@ impl BuildDirectory { pub struct Build<'ws> { dir: &'ws BuildDirectory, toolchain: &'ws Toolchain, - sandbox: SandboxBuilder, + sandbox: Rc>>, + statistics: Rc, } impl<'ws> Build<'ws> { @@ -267,6 +319,11 @@ impl<'ws> Build<'ws> { /// outside the sandbox. The crate's source directory will be the working directory for the /// command. /// + /// All commands spawned through the same [`Build`] share a single underlying container, so + /// running a sandboxed command from inside another sandboxed command's + /// [`process_lines`](struct.Command.html#method.process_lines) callback is not supported and + /// will return [`CommandError::ReentrantSandbox`](../cmd/enum.CommandError.html#variant.ReentrantSandbox). + /// /// # Example /// /// ```no_run @@ -286,17 +343,10 @@ impl<'ws> Build<'ws> { /// # } /// ``` pub fn cmd<'pl, R: Runnable>(&self, bin: R) -> Command<'ws, 'pl> { - let container_dir = &*crate::cmd::container_dirs::TARGET_DIR; + let container_dir = &*container_dirs::TARGET_DIR; - Command::new_sandboxed( - &self.dir.workspace, - self.sandbox - .clone() - .mount(&self.dir.target_dir(), container_dir, MountKind::ReadWrite), - bin, - ) - .current_directory(self.dir.source_dir()) - .env("CARGO_TARGET_DIR", container_dir) + Command::new_in_sandbox(&self.dir.workspace, self.sandbox.clone(), bin) + .env("CARGO_TARGET_DIR", container_dir) } /// Run `cargo` inside the sandbox, using the toolchain chosen for the build. @@ -326,6 +376,14 @@ impl<'ws> Build<'ws> { self.cmd(self.toolchain.cargo()) } + /// Snapshot the sandbox statistics (e.g. peak memory) gathered so far in + /// this build. The same data is available on the [`BuildResult`] returned + /// from [`BuildBuilder::run`]; this method exposes it mid-build, e.g. for + /// per-step reporting from inside the closure. + pub fn statistics(&self) -> SandboxStatistics { + self.statistics.snapshot() + } + /// Get the path to the source code on the host machine (outside the sandbox). pub fn host_source_dir(&self) -> PathBuf { self.dir.source_dir() diff --git a/src/cmd/mod.rs b/src/cmd/mod.rs index 86fd416..890bcd5 100644 --- a/src/cmd/mod.rs +++ b/src/cmd/mod.rs @@ -14,13 +14,12 @@ use futures_util::{ }; use log::{error, info}; use process_lines_actions::InnerState; -use std::env::consts::EXE_SUFFIX; use std::ffi::{OsStr, OsString}; use std::fmt; -use std::mem; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::process::{ExitStatus, Stdio}; use std::time::{Duration, Instant}; +use std::{cell::RefCell, env::consts::EXE_SUFFIX, rc::Rc}; use std::{convert::AsRef, sync::LazyLock}; use tokio::{ io::{AsyncBufReadExt, BufReader}, @@ -87,6 +86,14 @@ pub enum CommandError { #[error("container ran out of memory")] SandboxOOM, + /// A sandboxed command was spawned while another sandboxed command on the + /// same sandbox was still running (typically from inside a + /// [`process_lines`](struct.Command.html#method.process_lines) callback). + /// The reused-container model serializes commands through a single + /// `&mut Sandbox`, so this nesting is not supported. + #[error("re-entrant sandboxed commands are not supported")] + ReentrantSandbox, + /// Pulling a sandbox image from the registry failed #[error("failed to pull the sandbox image from the registry: {0}")] SandboxImagePullFailed(#[source] Box), @@ -202,7 +209,7 @@ impl Runnable for &B { #[allow(clippy::type_complexity)] pub struct Command<'w, 'pl> { workspace: Option<&'w Workspace>, - sandbox: Option, + sandbox: Option>>>, binary: Binary, args: Vec, env: Vec<(OsString, OsString)>, @@ -212,7 +219,6 @@ pub struct Command<'w, 'pl> { no_output_timeout: Option, log_command: bool, log_output: bool, - source_dir_mount_kind: MountKind, } // Custom Debug keeps command output focused: environment variables are shown as keys only, @@ -231,7 +237,6 @@ impl fmt::Debug for Command<'_, '_> { .field("no_output_timeout", &self.no_output_timeout) .field("log_command", &self.log_command) .field("log_output", &self.log_output) - .field("source_dir_mount_kind", &self.source_dir_mount_kind) .finish() } } @@ -242,10 +247,15 @@ impl<'w> Command<'w, '_> { binary.prepare_command(Self::new_inner(binary.name(), Some(workspace), None)) } - /// Create a new, sandboxed command. - pub fn new_sandboxed( + /// Create a new command that runs inside an existing sandbox. + /// + /// By default the command's working directory is the sandbox's source directory; call + /// [`current_directory`](#method.current_directory) to override it. Any explicit path + /// must point inside the sandbox source directory — paths outside it will panic at + /// runtime. + pub fn new_in_sandbox( workspace: &'w Workspace, - sandbox: SandboxBuilder, + sandbox: Rc>>, binary: R, ) -> Self { binary.prepare_command(Self::new_inner( @@ -262,7 +272,7 @@ impl<'w> Command<'w, '_> { fn new_inner( binary: Binary, workspace: Option<&'w Workspace>, - sandbox: Option, + sandbox: Option>>>, ) -> Self { let (timeout, no_output_timeout) = if let Some(workspace) = workspace { ( @@ -284,30 +294,34 @@ impl<'w> Command<'w, '_> { no_output_timeout, log_output: true, log_command: true, - source_dir_mount_kind: MountKind::ReadOnly, } } + /// Add a command-line argument to the command. This method can be called multiple times to add + /// additional args. + pub fn arg(mut self, arg: impl Into) -> Self { + self.args.push(arg.into()); + self + } + /// Add command-line arguments to the command. This method can be called multiple times to add /// additional args. - pub fn args>(mut self, args: &[S]) -> Self { + pub fn args>(mut self, args: impl IntoIterator) -> Self { for arg in args { - self.args.push(arg.as_ref().to_os_string()); + self = self.arg(arg); } - self } /// Add an environment variable to the command. - pub fn env, S2: AsRef>(mut self, key: S1, value: S2) -> Self { - self.env - .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); + pub fn env, S2: Into>(mut self, key: S1, value: S2) -> Self { + self.env.push((key.into(), value.into())); self } /// Change the directory where the command will be executed in. - pub fn current_directory>(mut self, path: P) -> Self { - self.current_directory = Some(path.as_ref().to_path_buf()); + pub fn current_directory>(mut self, path: P) -> Self { + self.current_directory = Some(path.into()); self } @@ -333,6 +347,11 @@ impl<'w> Command<'w, '_> { /// Set the function that will be called each time a line is outputted to either the standard /// output or the standard error. Only one function can be set at any time for a command. /// + /// For sandboxed commands, the callback runs while the underlying [`Sandbox`] is mutably + /// borrowed. Spawning another sandboxed command (e.g. via [`Build::cmd`](../build/struct.Build.html#method.cmd)) + /// from inside the callback is not supported with the reused-container model and will return + /// [`CommandError::ReentrantSandbox`](enum.CommandError.html#variant.ReentrantSandbox). + /// /// The method is useful to analyze the command's output without storing all of it in memory. /// This example builds a crate and detects compiler errors (ICEs): /// @@ -381,27 +400,11 @@ impl<'w> Command<'w, '_> { self } - /// Sets how the source directory is mounted. - /// - /// The default mount kind is read-only. - /// - /// ## Security - /// - /// Be sure you understand the implications of setting this. If you set - /// this to read-write, and the source directory may potentially be - /// reused, then subsequent invocations may see those changes. Beware of - /// trusting those previous invocations or the contents of the source - /// directory. - pub fn source_dir_mount_kind(mut self, mount_kind: MountKind) -> Self { - self.source_dir_mount_kind = mount_kind; - self - } - /// Run the prepared command and return an error if it fails (for example with a non-zero exit /// code or a timeout). - pub fn run(self) -> Result { - let output = self.run_inner(false)?; - Ok(output.statistics) + pub fn run(self) -> Result<(), CommandError> { + self.run_inner(false)?; + Ok(()) } /// Run the prepared command and return its output if it succeedes. If it fails (for example @@ -419,10 +422,7 @@ impl<'w> Command<'w, '_> { tracing::instrument(skip_all, fields(self = ?self, capture)) )] fn run_inner(self, capture: bool) -> Result { - if let Some(mut builder) = self.sandbox { - let workspace = self - .workspace - .expect("sandboxed builds without a workspace are not supported"); + if let Some(sandbox) = self.sandbox { let binary = match self.binary { Binary::Global(path) => path, Binary::ManagedByRustwide(path) => { @@ -430,61 +430,36 @@ impl<'w> Command<'w, '_> { } }; - let mut cmd = vec![binary.to_string_lossy().as_ref().to_string()]; + let mut command = SandboxCommand::new(binary) + .args(self.args) + .env("SOURCE_DIR", &*container_dirs::WORK_DIR) + .env("CARGO_HOME", &*container_dirs::CARGO_HOME) + .env("RUSTUP_HOME", &*container_dirs::RUSTUP_HOME); - for arg in self.args { - cmd.push(arg.to_string_lossy().to_string()); + for (key, value) in self.env { + command = command.env(key, value); } - let source_dir = match self.current_directory { - Some(path) => path, - None => PathBuf::from("."), - }; - - builder = builder - .mount( - &source_dir, - &container_dirs::WORK_DIR, - self.source_dir_mount_kind, - ) - .env("SOURCE_DIR", container_dirs::WORK_DIR.to_str().unwrap()) - .workdir(container_dirs::WORK_DIR.to_str().unwrap()) - .cmd(cmd); - - if let Some(user) = native::current_user() { - builder = builder.user(user.user_id, user.group_id); + if let Some(workdir) = self.current_directory { + command = command.workdir(workdir); } - for (key, value) in self.env { - builder = builder.env( - key.to_string_lossy().as_ref(), - value.to_string_lossy().as_ref(), - ); + if let Some(user) = native::current_user() { + command = command.user(user.user_id, user.group_id); } - builder = builder - .mount( - &workspace.cargo_home(), - &container_dirs::CARGO_HOME, - MountKind::ReadOnly, - ) - .mount( - &workspace.rustup_home(), - &container_dirs::RUSTUP_HOME, - MountKind::ReadOnly, + sandbox + .try_borrow_mut() + .map_err(|_| CommandError::ReentrantSandbox)? + .run( + command, + self.timeout, + self.no_output_timeout, + self.process_lines, + self.log_output, + self.log_command, + capture, ) - .env("CARGO_HOME", container_dirs::CARGO_HOME.to_str().unwrap()) - .env("RUSTUP_HOME", container_dirs::RUSTUP_HOME.to_str().unwrap()); - - builder.run( - workspace, - self.timeout, - self.no_output_timeout, - self.process_lines, - self.log_output, - self.log_command, - capture, - ) } else { let (binary, managed_by_rustwide) = match self.binary { // global paths should never be normalized @@ -579,42 +554,8 @@ impl From for ProcessOutput { ProcessOutput { stdout: orig.stdout, stderr: orig.stderr, - statistics: ProcessStatistics::default(), - } - } -} - -/// collected statistics about the process execution. -#[derive(Debug, Default, Clone)] -#[cfg_attr(test, derive(PartialEq, Eq))] -pub struct ProcessStatistics { - /// peak memory usage in bytes. - /// This is populated for sandboxed commands on systems - /// with cgroups v1/v2. - pub memory_peak: Option, -} - -impl ProcessStatistics { - /// Merge two `ProcessStatistics` into one, following a fixed set of aggregation rules: - /// - /// - `memory_peak`: the maximum of the two values is kept, since a merged peak - /// should reflect the highest peak observed across all runs. If only one side - /// has a value and the other is `None`, that value is used as-is. - pub fn merge(self, other: Self) -> Self { - Self { - memory_peak: match (self.memory_peak, other.memory_peak) { - (Some(a), Some(b)) => Some(a.max(b)), - (a, b) => a.or(b), - }, } } - - /// Merge another `ProcessStatistics` into `self` in place. - /// - /// See [`merge`](Self::merge) for the aggregation rules. - pub fn merge_mut(&mut self, other: Self) { - *self = mem::take(self).merge(other); - } } /// Output of a [`Command`](struct.Command.html) when it was executed with the @@ -622,7 +563,6 @@ impl ProcessStatistics { pub struct ProcessOutput { stdout: Vec, stderr: Vec, - statistics: ProcessStatistics, } impl ProcessOutput { @@ -635,14 +575,6 @@ impl ProcessOutput { pub fn stderr_lines(&self) -> &[String] { &self.stderr } - - /// Return the peak memory usage in bytes of the sandbox container, if available. - /// - /// This is populated for sandboxed commands on systems with cgroups v2. Returns `None` for - /// non-sandboxed commands or when the metric could not be read. - pub fn memory_peak_bytes(&self) -> Option { - self.statistics.memory_peak - } } enum OutputKind { @@ -784,43 +716,3 @@ fn exe_suffix(file: &OsStr) -> OsString { path.push(EXE_SUFFIX); path } - -#[cfg(test)] -mod tests { - use super::ProcessStatistics; - use test_case::test_case; - - const fn stats(peak: Option) -> ProcessStatistics { - ProcessStatistics { memory_peak: peak } - } - - #[test_case(stats(None), stats(None), stats(None))] - #[test_case(stats(Some(100)), stats(None), stats(Some(100)))] - #[test_case(stats(None), stats(Some(100)), stats(Some(100)))] - #[test_case(stats(Some(300)), stats(Some(100)), stats(Some(300)))] - #[test_case(stats(Some(100)), stats(Some(300)), stats(Some(300)))] - #[test_case(stats(Some(42)), stats(Some(42)), stats(Some(42)))] - fn test_merge(lhs: ProcessStatistics, rhs: ProcessStatistics, expected: ProcessStatistics) { - { - let lhs = lhs.clone(); - let rhs = rhs.clone(); - assert_eq!(lhs.merge(rhs), expected); - } - - { - let mut lhs = lhs.clone(); - lhs.merge_mut(rhs); - assert_eq!(lhs, expected); - } - } - - #[test] - fn merge_mut_accumulate_over_multiple() { - let mut s = stats(None); - s.merge_mut(stats(Some(50))); - s.merge_mut(stats(Some(200))); - s.merge_mut(stats(None)); - s.merge_mut(stats(Some(150))); - assert_eq!(s.memory_peak, Some(200)); - } -} diff --git a/src/cmd/sandbox.rs b/src/cmd/sandbox.rs index a33be38..edc3b4a 100644 --- a/src/cmd/sandbox.rs +++ b/src/cmd/sandbox.rs @@ -1,12 +1,16 @@ -use crate::Workspace; -use crate::cmd::{Command, CommandError, ProcessLinesActions, ProcessOutput, ProcessStatistics}; +use crate::{ + Workspace, + cmd::{Command, CommandError, ProcessLinesActions, ProcessOutput, container_dirs}, +}; use log::{error, info}; use serde::Deserialize; use std::{ - error::Error, - fmt, + cell::RefCell, + ffi::OsString, + fmt, mem, ops::RangeInclusive, path::{Path, PathBuf}, + rc::Rc, time::Duration, }; @@ -35,7 +39,7 @@ impl SandboxImage { let mut image = SandboxImage { name: name.into() }; info!("pulling image {name} from Docker Hub"); Command::new_workspaceless("docker") - .args(&["pull", name]) + .args(["pull", name]) .run() .map_err(|e| CommandError::SandboxImagePullFailed(Box::new(e)))?; if let Some(name_with_hash) = image.get_name_with_hash() { @@ -49,7 +53,7 @@ impl SandboxImage { fn ensure_exists_locally(&self) -> Result<(), CommandError> { info!("checking the image {} is available locally", self.name); Command::new_workspaceless("docker") - .args(&["image", "inspect", &self.name]) + .args(["image", "inspect", &self.name]) .log_output(false) .run() .map_err(|e| CommandError::SandboxImageMissing(Box::new(e)))?; @@ -61,7 +65,7 @@ impl SandboxImage { /// ghcr.io/rust-lang/crates-build-env/linux@sha256:61361fe0a... pub fn get_name_with_hash(&self) -> Option { Command::new_workspaceless("docker") - .args(&[ + .args([ "inspect", &self.name, "--format", @@ -141,33 +145,136 @@ impl MountConfig { } } -/// The sandbox builder allows to configure a sandbox, used later in a -/// [`Command`](struct.Command.html). +/// The sandbox builder allows configuring a [`Sandbox`]. +/// +/// Call [`SandboxBuilder::start`] to create a live sandbox, then run commands +/// inside it with [`Command::new_in_sandbox`](struct.Command.html#method.new_in_sandbox). #[derive(Clone)] pub struct SandboxBuilder { mounts: Vec, - env: Vec<(String, String)>, + source_dir_mount_kind: MountKind, memory_limit: Option, cpu_limit: Option, cpuset_cpus: Option>, - workdir: Option, - user: Option, - cmd: Vec, enable_networking: bool, } +/// Statistics collected for a sandbox. +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct SandboxStatistics { + memory_peak: Option, +} + +impl SandboxStatistics { + /// Return the peak memory usage in bytes observed across the whole sandbox, if available. + pub fn memory_peak_bytes(&self) -> Option { + self.memory_peak + } + + /// Combine two `SandboxStatistics` into one, keeping the highest observed peak memory. + pub fn combine(self, other: Self) -> Self { + Self { + memory_peak: match (self.memory_peak, other.memory_peak) { + (Some(a), Some(b)) => Some(a.max(b)), + (a, b) => a.or(b), + }, + } + } + + /// Merge another `SandboxStatistics` into `self` in place. + pub fn merge(&mut self, other: Self) { + *self = mem::take(self).combine(other); + } +} + +#[derive(Debug, Default)] +pub(crate) struct SandboxStatisticsState { + statistics: RefCell, +} + +impl SandboxStatisticsState { + pub(crate) fn snapshot(&self) -> SandboxStatistics { + self.statistics.borrow().clone() + } + + fn merge(&self, statistics: SandboxStatistics) { + self.statistics.borrow_mut().merge(statistics); + } +} + +/// A live sandbox that can execute one or more commands. +/// +/// Sandboxes are returned already started by [`SandboxBuilder::start`] and +/// can be reused across multiple commands. If a command exhausts the +/// container's memory limit and kills the container, the next command +/// transparently recreates it. +pub struct Sandbox<'w> { + workspace: &'w Workspace, + builder: SandboxBuilder, + source_dir: PathBuf, + target_dir: PathBuf, + container: Option>, + statistics: Rc, +} + +pub(crate) struct SandboxCommand { + pub(crate) cmd: Vec, + pub(crate) env: Vec<(OsString, OsString)>, + pub(crate) workdir: Option, + pub(crate) user: Option, +} + +impl SandboxCommand { + pub(crate) fn new(program: impl Into) -> SandboxCommand { + Self { + cmd: vec![program.into()], + env: Vec::new(), + workdir: None, + user: None, + } + } + + pub(crate) fn user(mut self, user: u32, group: u32) -> Self { + self.user = Some(format!("{user}:{group}")); + self + } + + pub(crate) fn workdir(mut self, workdir: impl AsRef) -> Self { + self.workdir = Some(crate::utils::normalize_path(workdir.as_ref())); + self + } + + pub(crate) fn env(mut self, k: impl Into, v: impl Into) -> Self { + self.env.push((k.into(), v.into())); + self + } + + pub(crate) fn arg(mut self, arg: impl Into) -> Self { + self.cmd.push(arg.into()); + self + } + + pub(crate) fn args(mut self, args: I) -> Self + where + I: IntoIterator, + S: Into, + { + for arg in args { + self = self.arg(arg); + } + self + } +} + impl SandboxBuilder { /// Create a new sandbox builder. pub fn new() -> Self { Self { mounts: Vec::new(), - env: Vec::new(), - workdir: None, + source_dir_mount_kind: MountKind::ReadOnly, memory_limit: None, cpu_limit: None, cpuset_cpus: None, - user: None, - cmd: Vec::new(), enable_networking: true, } } @@ -183,6 +290,24 @@ impl SandboxBuilder { self } + /// Sets how the source directory is mounted for reusable sandbox commands. + /// + /// The default mount kind is read-only. + /// + /// ## Security + /// + /// Be sure you understand the implications of setting this. The same container + /// backs every command spawned inside a single + /// [`BuildBuilder::run`](../build/struct.BuildBuilder.html#method.run) closure, so with + /// `MountKind::ReadWrite` any mutation made by an earlier command persists into all + /// later commands in that build — and across reuse of the same source directory by + /// later builds. Do not trust the source directory's contents to be untouched if you + /// opt in to a writable mount. + pub fn source_dir_mount_kind(mut self, mount_kind: MountKind) -> Self { + self.source_dir_mount_kind = mount_kind; + self + } + /// Enable or disable the sandbox's memory limit. When the processes inside the sandbox use /// more memory than the limit the sandbox will be killed. /// @@ -221,24 +346,51 @@ impl SandboxBuilder { self } - pub(super) fn env, S2: Into>(mut self, key: S1, value: S2) -> Self { - self.env.push((key.into(), value.into())); - self - } - - pub(super) fn cmd(mut self, cmd: Vec) -> Self { - self.cmd = cmd; - self + /// Start a live sandbox from this configuration. + /// + /// The returned sandbox can be used to run one or more commands against a + /// fixed source directory and target directory. The underlying container is + /// created and started before this returns, so any docker errors surface here + /// rather than on the first command. + pub fn start<'w>( + self, + workspace: &'w Workspace, + source_dir: impl AsRef, + target_dir: impl AsRef, + ) -> Result, CommandError> { + self.start_with_statistics( + workspace, + source_dir, + target_dir, + Rc::new(SandboxStatisticsState::default()), + ) } - pub(super) fn workdir>(mut self, workdir: S) -> Self { - self.workdir = Some(workdir.into()); - self + pub(crate) fn start_with_statistics<'w>( + self, + workspace: &'w Workspace, + source_dir: impl AsRef, + target_dir: impl AsRef, + statistics: Rc, + ) -> Result, CommandError> { + let source_dir = crate::utils::normalize_path(source_dir.as_ref()); + let target_dir = crate::utils::normalize_path(target_dir.as_ref()); + let container = Sandbox::create_container(&self, workspace, &source_dir, &target_dir)?; + Ok(Sandbox { + workspace, + builder: self, + source_dir, + target_dir, + container: Some(container), + statistics, + }) } - pub(super) fn user(mut self, user: u32, group: u32) -> Self { - self.user = Some(format!("{user}:{group}")); - self + fn create_started(self, workspace: &Workspace) -> Result, CommandError> { + let mut container = self.create(workspace)?; + container.start()?; + container.record_oom_kill_count(); + Ok(container) } #[cfg_attr( @@ -248,12 +400,10 @@ impl SandboxBuilder { fields( image = %workspace.sandbox_image().name, mounts = self.mounts.len(), - env = self.env.len(), memory_limit = ?self.memory_limit, cpu_limit = ?self.cpu_limit, cpuset_cpus = ?self.cpuset_cpus, enable_networking = self.enable_networking, - command = ?self.cmd, ) ) )] @@ -312,70 +462,13 @@ impl SandboxBuilder { .run_capture() .map_err(|err| CommandError::SandboxContainerCreate(Box::new(err)))?; Ok(Container { - id: out.stdout_lines()[0].clone(), + id: Some(out.stdout_lines()[0].clone()), workspace, - cmd: self.cmd, - env: self.env, - workdir: self.workdir, - user: self.user, + running: true, + oom_killed: false, + oom_kill_count: None, }) } - - #[allow(clippy::too_many_arguments)] - #[allow(clippy::type_complexity)] - #[cfg_attr( - feature = "tracing", - tracing::instrument( - skip_all, - fields( - image = %workspace.sandbox_image().name, - mounts = self.mounts.len(), - env = self.env.len(), - memory_limit = ?self.memory_limit, - cpu_limit = ?self.cpu_limit, - cpuset_cpus = ?self.cpuset_cpus, - enable_networking = self.enable_networking, - command = ?self.cmd, - capture, - timeout_secs = ?timeout.map(|timeout| timeout.as_secs()), - no_output_timeout_secs = ?no_output_timeout.map(|timeout| timeout.as_secs()), - ) - ) - )] - pub(super) fn run( - self, - workspace: &Workspace, - timeout: Option, - no_output_timeout: Option, - process_lines: Option<&mut dyn FnMut(&str, &mut ProcessLinesActions)>, - log_output: bool, - log_command: bool, - capture: bool, - ) -> Result { - let container = self.create(workspace)?; - - // Ensure the container is properly deleted even if something panics - scopeguard::defer! {{ - if let Err(err) = container.delete() { - error!("failed to delete container {}", container.id); - error!("caused by: {err}"); - let mut err: &dyn Error = &err; - while let Some(cause) = err.source() { - error!("caused by: {cause}"); - err = cause; - } - } - }} - - container.run( - timeout, - no_output_timeout, - process_lines, - log_output, - log_command, - capture, - ) - } } #[derive(Deserialize)] @@ -388,23 +481,32 @@ struct InspectContainer { struct InspectState { #[serde(rename = "OOMKilled")] oom_killed: bool, + #[serde(rename = "Running")] + running: bool, } -#[derive(Clone)] struct Container<'w> { - // Docker container ID - id: String, + /// Docker container ID. `Some` while the container is live; `take`n by a + /// successful [`Container::delete`] so that [`Drop`] knows there's + /// nothing left to remove. + id: Option, workspace: &'w Workspace, - // Command-level config for `docker exec` (not baked into `docker create`) - cmd: Vec, - env: Vec<(String, String)>, - workdir: Option, - user: Option, + running: bool, + oom_killed: bool, + oom_kill_count: Option, +} + +impl Container<'_> { + fn id(&self) -> &str { + self.id + .as_deref() + .expect("container has already been deleted") + } } impl fmt::Display for Container<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.id.fmt(f) + self.id().fmt(f) } } @@ -412,7 +514,7 @@ impl Container<'_> { #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))] fn inspect(&self) -> Result { let output = Command::new(self.workspace, "docker") - .args(&["inspect", &self.id]) + .args(["inspect", self.id()]) .log_output(false) .run_capture()?; @@ -427,17 +529,7 @@ impl Container<'_> { #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))] fn start(&self) -> Result<(), CommandError> { Command::new(self.workspace, "docker") - .args(&["start", &self.id]) - .log_output(false) - .run() - .map(|_| ()) - } - - /// Stop a running container. Uses `-t 1` to give `sleep infinity` a short grace period. - #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))] - fn stop(&self) -> Result<(), CommandError> { - Command::new(self.workspace, "docker") - .args(&["stop", "-t", "1", &self.id]) + .args(["start", self.id()]) .log_output(false) .run() .map(|_| ()) @@ -447,7 +539,7 @@ impl Container<'_> { #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))] fn exec_cat_file(&self, path: &str) -> Option> { Command::new(self.workspace, "docker") - .args(&["exec", &self.id, "cat", path]) + .args(["exec", self.id(), "cat", path]) .log_output(false) .log_command(false) .run_capture() @@ -455,6 +547,10 @@ impl Container<'_> { .map(|o| o.stdout_lines().to_vec()) } + fn record_oom_kill_count(&mut self) { + self.oom_kill_count = self.read_oom_kill_count(); + } + /// Best-effort read of peak memory usage from the still-running container. /// Tries cgroups v2 first, then falls back to cgroups v1. #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))] @@ -481,7 +577,7 @@ impl Container<'_> { /// report `OOMKilled`, so we check the cgroup events directly. /// Tries cgroups v2 first, then falls back to cgroups v1. #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))] - fn check_cgroup_oom(&self) -> bool { + fn read_oom_kill_count(&self) -> Option { // Both v1 and v2 expose `oom_kill ` — just in different files. let paths = [ "/sys/fs/cgroup/memory.events", // v2 @@ -489,61 +585,77 @@ impl Container<'_> { ]; for path in paths { if let Some(lines) = self.exec_cat_file(path) { - let found = lines.iter().any(|line| { - line.strip_prefix("oom_kill ") + for line in &lines { + if let Some(count) = line + .strip_prefix("oom_kill ") .and_then(|rest| rest.trim().parse::().ok()) - .is_some_and(|count| count > 0) - }); - if found { - return true; + { + return Some(count); + } } - // File existed but no OOM — don't try the other version - return false; + return Some(0); } } - false + None } - #[allow(clippy::type_complexity)] + fn check_cgroup_oom(&mut self) -> bool { + let current = self.read_oom_kill_count(); + let previous = self.oom_kill_count; + self.oom_kill_count = current; + + current.unwrap_or_default() > previous.unwrap_or_default() + } + + fn check_container_oom(&mut self, details: &InspectContainer) -> bool { + self.running = details.state.running; + // `OOMKilled` can stay true after the first failure. Treat it as an + // edge-triggered signal so later commands in the same container don't + // keep being reported as fresh OOMs. + let previous = self.oom_killed; + self.oom_killed = details.state.oom_killed; + details.state.oom_killed && !previous + } + + fn is_running(&self) -> bool { + self.running + } + + #[allow(clippy::too_many_arguments, clippy::type_complexity)] #[cfg_attr( feature = "tracing", - tracing::instrument(skip_all, fields(container_id = %self.id, capture)) + tracing::instrument(skip_all, fields(container_id = %self.id(), capture)) )] - fn run( - &self, + fn run_command( + &mut self, + command: SandboxCommand, timeout: Option, no_output_timeout: Option, process_lines: Option<&mut dyn FnMut(&str, &mut ProcessLinesActions)>, log_output: bool, log_command: bool, capture: bool, - ) -> Result { - // Start the container in detached mode (runs `sleep infinity`) - self.start()?; - + ) -> (SandboxStatistics, Result) { // Build the `docker exec` command with env/workdir/user from the sandbox config - let mut args: Vec = vec!["exec".into()]; + let mut cmd = Command::new(self.workspace, "docker").arg("exec"); - for (var, value) in &self.env { - args.push("-e".into()); - args.push(format!("{var}={value}")); + for (var, value) in command.env { + cmd = cmd + .arg("-e") + .arg(format!("{}={}", var.display(), value.display())); } - if let Some(ref workdir) = self.workdir { - args.push("-w".into()); - args.push(workdir.clone()); + if let Some(workdir) = command.workdir { + cmd = cmd.arg("-w").arg(workdir); } - if let Some(ref user) = self.user { - args.push("--user".into()); - args.push(user.clone()); + if let Some(user) = command.user { + cmd = cmd.arg("--user").arg(user); } - args.push(self.id.clone()); - args.extend(self.cmd.iter().cloned()); - - let mut cmd = Command::new(self.workspace, "docker") - .args(&args) + cmd = cmd + .arg(self.id()) + .args(command.cmd) .timeout(timeout) .log_output(log_output) .log_command(log_command) @@ -556,38 +668,213 @@ impl Container<'_> { let res = cmd.run_inner(capture); // Read peak memory usage while the container is still running (best-effort) - let memory_peak = self.read_memory_peak(); + let statistics = SandboxStatistics { + memory_peak: self.read_memory_peak(), + }; // Check OOM via cgroup events (catches cases where only the exec'd process // was killed, leaving the container's init process alive) let cgroup_oom = self.check_cgroup_oom(); - // Explicitly stop the container now that we're done reading metrics. - // The scopeguard will still call `docker rm -f` for final cleanup. - let _ = self.stop(); - - let details = self.inspect()?; + let details = match self.inspect() { + Ok(details) => details, + Err(err) => return (statistics, Err(err)), + }; + let container_oom = self.check_container_oom(&details); // Return a different error if the container was killed due to an OOM - if details.state.oom_killed || cgroup_oom { + let res = if container_oom || cgroup_oom { Err(match res { Ok(_) | Err(CommandError::ExecutionFailed { .. }) => CommandError::SandboxOOM, Err(err) => err, }) } else { - res.map(|mut output| { - output.statistics = ProcessStatistics { memory_peak }; - output - }) - } + res + }; + (statistics, res) } + /// Run `docker rm -f` for this container, idempotently. On success the + /// stored id is taken (so subsequent calls — including the one in + /// [`Drop`] — are no-ops). On failure the id is restored so [`Drop`] + /// (or a later call) can retry. #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))] - fn delete(&self) -> Result<(), CommandError> { - Command::new(self.workspace, "docker") - .args(&["rm", "-f", &self.id]) + fn delete(&mut self) -> Result<(), CommandError> { + let Some(id) = self.id.take() else { + return Ok(()); + }; + if let Err(err) = Command::new(self.workspace, "docker") + .args(["rm", "-f", &id]) .run() - .map(|_| ()) + { + self.id = Some(id); + return Err(err); + } + Ok(()) + } +} + +impl Drop for Container<'_> { + fn drop(&mut self) { + if let Err(err) = self.delete() { + error!( + "docker rm failed, leaked sandbox container {}:\n{:?}", + self.id.as_deref().unwrap_or_default(), + err + ); + } + } +} + +impl<'w> Sandbox<'w> { + fn command_timed_out(res: &Result) -> bool { + matches!( + res, + Err(CommandError::NoOutputFor(_)) + | Err(CommandError::Timeout(_)) + | Err(CommandError::KillAfterTimeoutFailed(_)) + ) + } + + /// Return the statistics gathered across the sandbox lifetime so far. + pub fn statistics(&self) -> SandboxStatistics { + self.statistics.snapshot() + } + + pub(crate) fn container_workdir(&self, path: &Path) -> Option { + let relative = path.strip_prefix(&self.source_dir).ok()?; + Some(container_dirs::WORK_DIR.join(relative)) + } + + fn create_container( + builder: &SandboxBuilder, + workspace: &'w Workspace, + source_dir: &Path, + target_dir: &Path, + ) -> Result, CommandError> { + builder + .clone() + .mount( + source_dir, + &container_dirs::WORK_DIR, + builder.source_dir_mount_kind, + ) + .mount( + target_dir, + &container_dirs::TARGET_DIR, + MountKind::ReadWrite, + ) + .mount( + &workspace.cargo_home(), + &container_dirs::CARGO_HOME, + MountKind::ReadOnly, + ) + .mount( + &workspace.rustup_home(), + &container_dirs::RUSTUP_HOME, + MountKind::ReadOnly, + ) + .create_started(workspace) + } + + fn ensure_reusable_container(&mut self) -> Result<(), CommandError> { + // The container can be stopped if a previous command got OOM-killed + // at the container level, or missing after an explicit `cleanup()`. + // Either way, recreate before attempting another `docker exec`. + // Assigning the new value drops the old `Container`, whose `Drop` + // impl runs `docker rm`. + let needs_recreate = self + .container + .as_ref() + .is_none_or(|container| !container.is_running()); + if needs_recreate { + self.container = Some(Self::create_container( + &self.builder, + self.workspace, + &self.source_dir, + &self.target_dir, + )?); + } + + Ok(()) + } + + #[allow(clippy::too_many_arguments, clippy::type_complexity)] + #[cfg_attr( + feature = "tracing", + tracing::instrument( + skip_all, + fields( + image = %self.workspace.sandbox_image().name, + mounts = self.builder.mounts.len(), + memory_limit = ?self.builder.memory_limit, + cpu_limit = ?self.builder.cpu_limit, + cpuset_cpus = ?self.builder.cpuset_cpus, + enable_networking = self.builder.enable_networking, + capture, + timeout_secs = ?timeout.map(|timeout| timeout.as_secs()), + no_output_timeout_secs = ?no_output_timeout.map(|timeout| timeout.as_secs()), + ) + ) + )] + pub(crate) fn run( + &mut self, + command: SandboxCommand, + timeout: Option, + no_output_timeout: Option, + process_lines: Option<&mut dyn FnMut(&str, &mut ProcessLinesActions)>, + log_output: bool, + log_command: bool, + capture: bool, + ) -> Result { + let container_workdir = match command.workdir { + Some(workdir) => self + .container_workdir(&workdir) + .expect("explicit workdir must be inside the sandbox source directory"), + None => container_dirs::WORK_DIR.clone(), + }; + let command = SandboxCommand { + workdir: Some(container_workdir), + ..command + }; + self.ensure_reusable_container()?; + let (statistics, res) = self.container.as_mut().unwrap().run_command( + command, + timeout, + no_output_timeout, + process_lines, + log_output, + log_command, + capture, + ); + self.statistics.merge(statistics); + + // On timeout we kill the host-side `docker exec` process, but the + // command inside the container keeps running on the container's + // `sleep infinity` init. Reusing the container would let the + // abandoned process race the next command (sharing files, target + // dir, CPU/memory budget). Tear the container down so the next + // command in this build gets a clean one via + // `ensure_reusable_container`. + if Self::command_timed_out(&res) + && let Some(mut container) = self.container.take() + { + container.delete()?; + } + + res + } + + /// Remove the live container owned by this sandbox and return the final + /// statistics. Returns an error if `docker rm` fails; the container is + /// also torn down by [`Drop`] as a fallback if this method is not called + /// (or if its `docker rm` failed and a retry on drop succeeds). + pub fn cleanup(&mut self) -> Result { + if let Some(container) = self.container.as_mut() { + container.delete()?; + } + self.container = None; + Ok(self.statistics()) } } @@ -599,7 +886,7 @@ impl Container<'_> { pub fn docker_running(workspace: &Workspace) -> bool { info!("checking if the docker daemon is running"); Command::new(workspace, "docker") - .args(&["info"]) + .args(["info"]) .log_output(false) .run() .is_ok() @@ -612,9 +899,44 @@ fn format_cpuset_cpus(cpus: &RangeInclusive) -> String { #[cfg(test)] mod tests { use super::*; + use test_case::test_case; #[test] fn formats_cpuset_cpus() { assert_eq!(format_cpuset_cpus(&(2..=4)), "2-4"); } + + const fn stats(peak: Option) -> SandboxStatistics { + SandboxStatistics { memory_peak: peak } + } + + #[test_case(stats(None), stats(None), stats(None))] + #[test_case(stats(Some(100)), stats(None), stats(Some(100)))] + #[test_case(stats(None), stats(Some(100)), stats(Some(100)))] + #[test_case(stats(Some(300)), stats(Some(100)), stats(Some(300)))] + #[test_case(stats(Some(100)), stats(Some(300)), stats(Some(300)))] + #[test_case(stats(Some(42)), stats(Some(42)), stats(Some(42)))] + fn test_combine(lhs: SandboxStatistics, rhs: SandboxStatistics, expected: SandboxStatistics) { + { + let lhs = lhs.clone(); + let rhs = rhs.clone(); + assert_eq!(lhs.combine(rhs), expected); + } + + { + let mut lhs = lhs.clone(); + lhs.merge(rhs); + assert_eq!(lhs, expected); + } + } + + #[test] + fn merge_accumulate_over_multiple() { + let mut s = stats(None); + s.merge(stats(Some(50))); + s.merge(stats(Some(200))); + s.merge(stats(None)); + s.merge(stats(Some(150))); + assert_eq!(s.memory_peak, Some(200)); + } } diff --git a/src/crates/git.rs b/src/crates/git.rs index 9d99c37..c966362 100644 --- a/src/crates/git.rs +++ b/src/crates/git.rs @@ -17,7 +17,7 @@ impl GitRepo { pub(super) fn git_commit(&self, workspace: &Workspace) -> Option { let res = Command::new(workspace, "git") - .args(&["rev-parse", "HEAD"]) + .args(["rev-parse", "HEAD"]) .current_directory(self.cached_path(workspace)) .run_capture(); @@ -94,9 +94,9 @@ impl CrateTrait for GitRepo { let res = if cache_hit { info!("updating cached repository {}", self.url); Command::new(workspace, "git") - .args(&self.suppress_password_prompt_args(workspace)) - .args(&["-c", "remote.origin.fetch=refs/heads/*:refs/heads/*"]) - .args(&["fetch", "origin", "--force", "--prune"]) + .args(self.suppress_password_prompt_args(workspace)) + .args(["-c", "remote.origin.fetch=refs/heads/*:refs/heads/*"]) + .args(["fetch", "origin", "--force", "--prune"]) .current_directory(&path) .process_lines(&mut detect_private_repositories) .run() @@ -104,9 +104,9 @@ impl CrateTrait for GitRepo { } else { info!("cloning repository {}", self.url); Command::new(workspace, "git") - .args(&self.suppress_password_prompt_args(workspace)) - .args(&["clone", "--bare", &self.url]) - .args(&[&path]) + .args(self.suppress_password_prompt_args(workspace)) + .args(["clone", "--bare", &self.url]) + .args([&path]) .process_lines(&mut detect_private_repositories) .run() .with_context(|| format!("failed to clone {}", self.url)) @@ -133,8 +133,8 @@ impl CrateTrait for GitRepo { )] fn copy_source_to(&self, workspace: &Workspace, dest: &Path) -> anyhow::Result<()> { Command::new(workspace, "git") - .args(&["clone"]) - .args(&[self.cached_path(workspace).as_path(), dest]) + .args(["clone"]) + .args([self.cached_path(workspace).as_path(), dest]) .run() .with_context(|| format!("failed to checkout {}", self.url))?; Ok(()) diff --git a/src/inside_docker.rs b/src/inside_docker.rs index 09f1e0f..bbded31 100644 --- a/src/inside_docker.rs +++ b/src/inside_docker.rs @@ -14,7 +14,7 @@ impl CurrentContainer { if let Some(id) = probe_container_id(workspace)? { info!("inspecting the current container"); let inspect = Command::new(workspace, "docker") - .args(&["inspect", &id]) + .args(["inspect", &id]) .log_output(false) .log_command(false) .run_capture()?; @@ -56,7 +56,7 @@ pub(crate) fn probe_container_id(workspace: &Workspace) -> anyhow::Result