From 50d19110b3fa3fd06d17092813a6af4a4a13f643 Mon Sep 17 00:00:00 2001 From: Jacek N Date: Thu, 4 Dec 2025 15:06:30 +0000 Subject: [PATCH] Bump parallel catchup RAM requests This PR bumps prallel catchup worker pod RAM requests from 2 to 8G. I noticed that the pods burst quite a lot and having requests so low when compared to 16G limit means k8s may need to kill pods sometimes due to memory contention. --- src/FSLibrary/StellarKubeSpecs.fs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/FSLibrary/StellarKubeSpecs.fs b/src/FSLibrary/StellarKubeSpecs.fs index 37a06c42..7079e674 100644 --- a/src/FSLibrary/StellarKubeSpecs.fs +++ b/src/FSLibrary/StellarKubeSpecs.fs @@ -126,8 +126,8 @@ let SimulatePubnetTier1PerfCoreResourceRequirements : V1ResourceRequirements = let ParallelCatchupCoreResourceRequirements : V1ResourceRequirements = // When doing parallel catchup, we give each container - // 0.25 vCPUs, 2GB RAM and 35 GB of disk bursting to 2vCPU, 16GB and 40 GB - makeResourceRequirementsWithStorageLimit 250 2048 35 2000 16384 40 + // 0.25 vCPUs, 8GB RAM and 35 GB of disk bursting to 2vCPU, 16GB and 40 GB + makeResourceRequirementsWithStorageLimit 250 8192 35 2000 16384 40 let NonParallelCatchupCoreResourceRequirements : V1ResourceRequirements = // When doing non-parallel catchup, we give each container