Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 1 addition & 6 deletions testcontainers/src/core/containers/async_container.rs
Original file line number Diff line number Diff line change
Expand Up @@ -421,12 +421,7 @@ where
mod tests {
use tokio::io::AsyncBufReadExt;

use crate::{
core::{ContainerPort, ContainerState, ExecCommand, WaitFor},
images::generic::GenericImage,
runners::AsyncRunner,
Image,
};
use crate::{images::generic::GenericImage, runners::AsyncRunner};

#[tokio::test]
async fn async_logs_are_accessible() -> anyhow::Result<()> {
Expand Down
114 changes: 112 additions & 2 deletions testcontainers/src/core/containers/request.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,64 @@ pub struct ContainerRequest<I: Image> {
pub(crate) startup_timeout: Option<Duration>,
pub(crate) working_dir: Option<String>,
pub(crate) log_consumers: Vec<Box<dyn LogConsumer + 'static>>,

/// The length of a CPU period in microseconds. Default is 100000, this configures how
/// CFS will schedule the threads for this container. Normally you don't adjust this and
/// just set the CPU quota or nano CPUs. You might want to set this if you want to increase
/// or reduce context-switching the container is subjected to.
pub(crate) cpu_period: Option<i64>,

/// Microseconds of CPU time that the container can get in a CPU period.
/// Most users will want to set CPU quota to their desired CPU count * 100000.
/// For example, to limit a container to 2 CPUs, set CPU quota to 200000.
/// This is based on the default CPU period of 100000.
/// If CPU quota is set to 0, the container will not be limited.
pub(crate) cpu_quota: Option<i64>,

/// The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks.
pub(crate) cpu_realtime_period: Option<i64>,

/// The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks.
pub(crate) cpu_realtime_runtime: Option<i64>,

/// CPUs in which to allow execution (e.g., `0-3`, `0,1`).
/// Core pinning should help with performance consistency and context switching in some cases.
pub(crate) cpuset_cpus: Option<String>,

/// CPU quota in units of 10<sup>-9</sup> CPUs. This is basically what the --cpus flag turns into, but the
/// raw value is denominated in billionths of a CPU. cpu_period and cpu_quota give you more control over the scheduler.
pub nano_cpus: Option<i64>,

/// Memory limit for the container, the _minimum_ is 6 MiB.
/// This is the same as `HostConfig::memory`.
pub(crate) memory: Option<i64>,

/// Memory reservation, soft limit. Analogous to the JVM's `-Xms` option.
/// The _minimum_ is 6 MiB.
/// This is the same as `HostConfig::memory_reservation`.
pub(crate) memory_reservation: Option<i64>,

/// Total memory limit (memory + swap). Set as `-1` to enable unlimited swap.
/// Same 6 MiB minimum as `memory`.
pub memory_swap: Option<i64>,

/// Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
pub memory_swappiness: Option<i64>,

/// Disable OOM Killer for the container. This will not do anything unless -m (memory limit, cf. memory on this struct) is set.
/// You can disable OOM-killer by writing "1" to memory.oom_control file, as:
/// ```ignore
/// echo 1 > memory.oom_control
/// ```
/// This operation is only allowed to the top cgroup of sub-hierarchy.
/// If OOM-killer is disabled, tasks under cgroup will hang/sleep
/// in memory cgroup's OOM-waitqueue when they request accountable memory.
/// https://lwn.net/Articles/432224/
pub oom_kill_disable: Option<bool>,

/// Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change.
pub pids_limit: Option<i64>,

#[cfg(feature = "reusable-containers")]
pub(crate) reuse: crate::ReuseDirective,
}
Expand Down Expand Up @@ -187,6 +245,42 @@ impl<I: Image> ContainerRequest<I> {
self.working_dir.as_deref()
}

pub fn cpu_period(&self) -> Option<i64> {
self.cpu_period
}
pub fn cpu_quota(&self) -> Option<i64> {
self.cpu_quota
}
pub fn cpu_realtime_period(&self) -> Option<i64> {
self.cpu_realtime_period
}
pub fn cpu_realtime_runtime(&self) -> Option<i64> {
self.cpu_realtime_runtime
}
pub fn cpuset_cpus(&self) -> Option<&str> {
self.cpuset_cpus.as_deref()
}
pub fn nano_cpus(&self) -> Option<i64> {
self.nano_cpus
}
pub fn memory(&self) -> Option<i64> {
self.memory
}
pub fn memory_reservation(&self) -> Option<i64> {
self.memory_reservation
}
pub fn memory_swap(&self) -> Option<i64> {
self.memory_swap
}
pub fn memory_swappiness(&self) -> Option<i64> {
self.memory_swappiness
}
pub fn oom_kill_disable(&self) -> Option<bool> {
self.oom_kill_disable
}
pub fn pids_limit(&self) -> Option<i64> {
self.pids_limit
}
/// Indicates that the container will not be stopped when it is dropped
#[cfg(feature = "reusable-containers")]
pub fn reuse(&self) -> crate::ReuseDirective {
Expand Down Expand Up @@ -219,6 +313,18 @@ impl<I: Image> From<I> for ContainerRequest<I> {
startup_timeout: None,
working_dir: None,
log_consumers: vec![],
cpu_period: None,
cpu_quota: None,
cpu_realtime_period: None,
cpu_realtime_runtime: None,
cpuset_cpus: None,
nano_cpus: None,
memory: None,
memory_reservation: None,
memory_swap: None,
memory_swappiness: None,
oom_kill_disable: None,
pids_limit: None,
#[cfg(feature = "reusable-containers")]
reuse: crate::ReuseDirective::Never,
}
Expand Down Expand Up @@ -265,8 +371,12 @@ impl<I: Image + Debug> Debug for ContainerRequest<I> {
.field("cgroupns_mode", &self.cgroupns_mode)
.field("userns_mode", &self.userns_mode)
.field("startup_timeout", &self.startup_timeout)
.field("working_dir", &self.working_dir);

.field("working_dir", &self.working_dir)
.field("cpu_period", &self.cpu_period)
.field("cpu_quota", &self.cpu_quota)
.field("cpu_realtime_period", &self.cpu_realtime_period)
.field("cpu_realtime_runtime", &self.cpu_realtime_runtime)
.field("cpuset_cpus", &self.cpuset_cpus);
#[cfg(feature = "reusable-containers")]
repr.field("reusable", &self.reuse);

Expand Down
151 changes: 151 additions & 0 deletions testcontainers/src/core/image/image_ext.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,66 @@ pub trait ImageExt<I: Image> {
/// Allows to follow the container logs for the whole lifecycle of the container, starting from the creation.
fn with_log_consumer(self, log_consumer: impl LogConsumer + 'static) -> ContainerRequest<I>;

/// Sets the CPU period for the container.
/// The default is defined by the underlying image.
/// The length of a CPU period in microseconds.
/// https://docs.docker.com/engine/reference/commandline/run/#cpu-period
fn with_cpu_period(self, cpu_period: impl Into<i64>) -> ContainerRequest<I>;

/// Sets the CPU quota for the container.
/// The default is defined by the underlying image.
/// Microseconds of CPU time that the container can get in a CPU period.
/// https://docs.docker.com/engine/reference/commandline/run/#cpu-quota
/// Most users will want to set CPU quota to their desired CPU count * 100000.
/// For example, to limit a container to 2 CPUs, set CPU quota to 200000.
/// This is based on the default CPU period of 100000.
/// If CPU quota is set to 0, the container will not be limited.
fn with_cpu_quota(self, cpu_quota: impl Into<i64>) -> ContainerRequest<I>;

/// Sets the CPU realtime period for the container.
/// The default is defined by the underlying image.
/// The length of a CPU real-time period in microseconds.
fn with_cpu_realtime_period(self, cpu_realtime_period: impl Into<i64>) -> ContainerRequest<I>;

/// Sets the CPU realtime runtime for the container.
fn with_cpu_realtime_runtime(self, cpu_realtime_runtime: impl Into<i64>)
-> ContainerRequest<I>;

/// Sets the CPUs in which to allow execution (e.g., `0-3`, `0,1`).
/// Core pinning should help with performance consistency and context switching in some cases.
/// The default is defined by the underlying image.
fn with_cpuset_cpus(self, cpuset_cpus: impl Into<String>) -> ContainerRequest<I>;

/// Memory limit for the container, the _minimum_ is 6 MiB.
/// This is the same as `HostConfig::memory`.
fn with_memory(self, bytes: i64) -> ContainerRequest<I>;

/// Memory reservation, soft limit. Analogous to the JVM's `-Xms` option.
/// The _minimum_ is 6 MiB.
/// This is the same as `HostConfig::memory_reservation`.
fn with_memory_reservation(self, bytes: i64) -> ContainerRequest<I>;

/// Total memory limit (memory + swap). Set as `-1` to enable unlimited swap.
/// Same 6 MiB minimum as `memory`. I do not know why everything is i64.
fn with_memory_swap(self, bytes: i64) -> ContainerRequest<I>;

/// Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
fn with_memory_swappiness(self, swappiness: i64) -> ContainerRequest<I>;

/// Disable OOM Killer for the container. This will not do anything unless -m (memory limit, cf. memory on this struct) is set.
/// You can disable OOM-killer by writing "1" to memory.oom_control file, as:
/// ```ignore
/// echo 1 > memory.oom_control
/// ```
/// This operation is only allowed to the top cgroup of sub-hierarchy.
/// If OOM-killer is disabled, tasks under cgroup will hang/sleep
/// in memory cgroup's OOM-waitqueue when they request accountable memory.
/// https://lwn.net/Articles/432224/
fn with_oom_kill_disable(self, disable: bool) -> ContainerRequest<I>;

/// Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change.
fn with_pids_limit(self, limit: i64) -> ContainerRequest<I>;

/// Flag the container as being exempt from the default `testcontainers` remove-on-drop lifecycle,
/// indicating that the container should be kept running, and that executions with the same configuration
/// reuse it instead of starting a "fresh" container instance.
Expand Down Expand Up @@ -373,6 +433,97 @@ impl<RI: Into<ContainerRequest<I>>, I: Image> ImageExt<I> for RI {
container_req
}

fn with_cpu_period(self, cpu_period: impl Into<i64>) -> ContainerRequest<I> {
let container_req = self.into();
ContainerRequest {
cpu_period: Some(cpu_period.into()),
..container_req
}
}

fn with_cpu_quota(self, cpu_quota: impl Into<i64>) -> ContainerRequest<I> {
let container_req = self.into();
ContainerRequest {
cpu_quota: Some(cpu_quota.into()),
..container_req
}
}

fn with_cpu_realtime_period(self, cpu_realtime_period: impl Into<i64>) -> ContainerRequest<I> {
let container_req = self.into();
ContainerRequest {
cpu_realtime_period: Some(cpu_realtime_period.into()),
..container_req
}
}

fn with_cpu_realtime_runtime(
self,
cpu_realtime_runtime: impl Into<i64>,
) -> ContainerRequest<I> {
let container_req = self.into();
ContainerRequest {
cpu_realtime_runtime: Some(cpu_realtime_runtime.into()),
..container_req
}
}

fn with_cpuset_cpus(self, cpuset_cpus: impl Into<String>) -> ContainerRequest<I> {
let container_req = self.into();
ContainerRequest {
cpuset_cpus: Some(cpuset_cpus.into()),
..container_req
}
}

fn with_memory(self, bytes: i64) -> ContainerRequest<I> {
let container_req = self.into();
ContainerRequest {
memory: Some(bytes),
..container_req
}
}

fn with_memory_reservation(self, bytes: i64) -> ContainerRequest<I> {
let container_req = self.into();
ContainerRequest {
memory_reservation: Some(bytes),
..container_req
}
}

fn with_memory_swap(self, bytes: i64) -> ContainerRequest<I> {
let container_req = self.into();
ContainerRequest {
memory_swap: Some(bytes),
..container_req
}
}

fn with_memory_swappiness(self, swappiness: i64) -> ContainerRequest<I> {
let container_req = self.into();
ContainerRequest {
memory_swappiness: Some(swappiness),
..container_req
}
}

fn with_oom_kill_disable(self, disable: bool) -> ContainerRequest<I> {
let container_req = self.into();
ContainerRequest {
oom_kill_disable: Some(disable),
..container_req
}
}

fn with_pids_limit(self, limit: i64) -> ContainerRequest<I> {
let container_req = self.into();
ContainerRequest {
pids_limit: Some(limit),
..container_req
}
}

#[cfg(feature = "reusable-containers")]
fn with_reuse(self, reuse: ReuseDirective) -> ContainerRequest<I> {
ContainerRequest {
Expand Down
Loading
Loading