Skip to content

[ML] Report the "actual" memory usage of the autodetect process #2846

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 12 commits into
base: main
Choose a base branch
from
7 changes: 6 additions & 1 deletion bin/autodetect/Main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include <core/CJsonOutputStreamWrapper.h>
#include <core/CLogger.h>
#include <core/CProcessPriority.h>
#include <core/CProcessStats.h>
#include <core/CProgramCounters.h>
#include <core/CStringUtils.h>
#include <core/CoreTypes.h>
Expand Down Expand Up @@ -83,7 +84,9 @@ int main(int argc, char** argv) {
ml::counter_t::E_TSADNumberMemoryLimitModelCreationFailures,
ml::counter_t::E_TSADNumberPrunedItems,
ml::counter_t::E_TSADAssignmentMemoryBasis,
ml::counter_t::E_TSADOutputMemoryAllocatorUsage};
ml::counter_t::E_TSADOutputMemoryAllocatorUsage,
ml::counter_t::E_TSADSystemMemoryUsage,
ml::counter_t::E_TSADMaxSystemMemoryUsage};

ml::core::CProgramCounters::registerProgramCounterTypes(counters);

Expand Down Expand Up @@ -151,6 +154,8 @@ int main(int argc, char** argv) {
}
cancellerThread.stop();

LOG_DEBUG(<< "Max Resident Set Size: " << ml::core::CProcessStats::maxResidentSetSize());
LOG_DEBUG(<< "Resident Set Size: " << ml::core::CProcessStats::residentSetSize());
// Log the program version immediately after reconfiguring the logger. This
// must be done from the program, and NOT a shared library, as each program
// statically links its own version library.
Expand Down
1 change: 1 addition & 0 deletions docs/CHANGELOG.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
=== Enhancements

* Track memory used in the hierarchical results normalizer. (See {ml-pull}2831[#2831].)
* Report the actual memory usage of the autodetect process. (See {ml-pull}2846[#2846])

=== Bug Fixes

Expand Down
12 changes: 11 additions & 1 deletion include/core/CProgramCounters.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,12 @@ enum ECounterTypes {
//! The memory currently used by the allocators to output JSON documents, in bytes.
E_TSADOutputMemoryAllocatorUsage = 30,

//! The resident set size of the process, in bytes.
E_TSADSystemMemoryUsage = 31,

//! The maximum resident set size of the process, in bytes.
E_TSADMaxSystemMemoryUsage = 32,

// Data Frame Outlier Detection

//! The estimated peak memory usage for outlier detection in bytes
Expand Down Expand Up @@ -146,7 +152,7 @@ enum ECounterTypes {
// Add any new values here

//! This MUST be last, increment the value for every new enum added
E_LastEnumCounter = 31
E_LastEnumCounter = 33
};

static constexpr std::size_t NUM_COUNTERS = static_cast<std::size_t>(E_LastEnumCounter);
Expand Down Expand Up @@ -355,6 +361,10 @@ class CORE_EXPORT CProgramCounters {
"Which option is being used to get model memory for node assignment?"},
{counter_t::E_TSADOutputMemoryAllocatorUsage, "E_TSADOutputMemoryAllocatorUsage",
"The amount of memory used to output JSON documents, in bytes."},
{counter_t::E_TSADSystemMemoryUsage, "E_TSADResidentSetSize",
"The resident set size of the process, in bytes"},
{counter_t::E_TSADMaxSystemMemoryUsage, "E_TSADMaxResidentSetSize",
"The maximum resident set size of the process, in bytes"},
{counter_t::E_DFOEstimatedPeakMemoryUsage, "E_DFOEstimatedPeakMemoryUsage",
"The upfront estimate of the peak memory outlier detection would use"},
{counter_t::E_DFOPeakMemoryUsage, "E_DFOPeakMemoryUsage", "The peak memory outlier detection used"},
Expand Down
8 changes: 8 additions & 0 deletions include/model/CResourceMonitor.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ class MODEL_EXPORT CResourceMonitor {
std::size_t s_AdjustedUsage{0};
std::size_t s_PeakUsage{0};
std::size_t s_AdjustedPeakUsage{0};
std::size_t s_SystemMemoryUsage{0};
std::size_t s_MaxSystemMemoryUsage{0};
std::size_t s_ByFields{0};
std::size_t s_PartitionFields{0};
std::size_t s_OverFields{0};
Expand Down Expand Up @@ -180,6 +182,12 @@ class MODEL_EXPORT CResourceMonitor {
//! Returns the sum of used memory plus any extra memory
std::size_t totalMemory() const;

//! Returns the current physical memory of the process (rss) as reported by the system
std::size_t systemMemory() const;

//! Returns the maximum physical memory of the process (max rss) as reported by the system
std::size_t maxSystemMemory() const;

private:
using TMonitoredResourcePtrSizeUMap =
boost::unordered_map<CMonitoredResource*, std::size_t>;
Expand Down
2 changes: 1 addition & 1 deletion include/model/ModelTypes.h
Original file line number Diff line number Diff line change
Expand Up @@ -719,7 +719,7 @@ enum EAssignmentMemoryBasis {
E_AssignmentBasisUnknown = 0, //!< Decision made in Java code
E_AssignmentBasisModelMemoryLimit = 1, //!< Use model memory limit
E_AssignmentBasisCurrentModelBytes = 2, //!< Use current actual model size
E_AssignmentBasisPeakModelBytes = 3 //!< Use highest ever actual model size
E_AssignmentBasisPeakModelBytes = 3
};

//! Get a string description of \p assignmentMemoryBasis.
Expand Down
8 changes: 8 additions & 0 deletions lib/api/CModelSizeStatsJsonWriter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ const std::string JOB_ID{"job_id"};
const std::string MODEL_SIZE_STATS{"model_size_stats"};
const std::string MODEL_BYTES{"model_bytes"};
const std::string PEAK_MODEL_BYTES{"peak_model_bytes"};
const std::string SYSTEM_MEMORY_BYTES{"system_memory_bytes"};
const std::string MAX_SYSTEM_MEMORY_BYTES{"max_system_memory_bytes"};
const std::string MODEL_BYTES_EXCEEDED{"model_bytes_exceeded"};
const std::string MODEL_BYTES_MEMORY_LIMIT{"model_bytes_memory_limit"};
const std::string TOTAL_BY_FIELD_COUNT{"total_by_field_count"};
Expand Down Expand Up @@ -60,6 +62,12 @@ void CModelSizeStatsJsonWriter::write(const std::string& jobId,
writer.onKey(PEAK_MODEL_BYTES);
writer.onUint64(results.s_AdjustedPeakUsage);

writer.onKey(SYSTEM_MEMORY_BYTES);
writer.onUint64(results.s_SystemMemoryUsage);

writer.onKey(MAX_SYSTEM_MEMORY_BYTES);
writer.onUint64(results.s_MaxSystemMemoryUsage);

writer.onKey(MODEL_BYTES_EXCEEDED);
writer.onUint64(results.s_BytesExceeded);

Expand Down
22 changes: 20 additions & 2 deletions lib/api/unittest/CAnomalyJobLimitTest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
* limitation.
*/
#include <core/CJsonOutputStreamWrapper.h>
#include <core/CProcessStats.h>
#include <core/CoreTypes.h>

#include <maths/common/CIntegerTools.h>
Expand Down Expand Up @@ -92,6 +93,8 @@ BOOST_AUTO_TEST_CASE(testAccuracy) {
std::size_t nonLimitedUsage{0};
std::size_t limitedUsage{0};

std::size_t nonLimitedMaxSystemUsage{0};
std::size_t limitedMaxSystemUsage{0};
{
// Without limits, this data set should make the models around
// 1230000 bytes
Expand All @@ -105,8 +108,6 @@ BOOST_AUTO_TEST_CASE(testAccuracy) {
core::CJsonOutputStreamWrapper wrappedOutputStream(outputStrm);

model::CLimits limits;
//limits.resourceMonitor().m_ByteLimitHigh = 100000;
//limits.resourceMonitor().m_ByteLimitLow = 90000;

{
LOG_TRACE(<< "Setting up job");
Expand All @@ -127,8 +128,12 @@ BOOST_AUTO_TEST_CASE(testAccuracy) {
BOOST_REQUIRE_EQUAL(uint64_t(18630), job.numRecordsHandled());

nonLimitedUsage = limits.resourceMonitor().totalMemory();
nonLimitedMaxSystemUsage = limits.resourceMonitor().maxSystemMemory();
}
}
LOG_DEBUG(<< "nonLimitedUsage: " << nonLimitedUsage);
LOG_DEBUG(<< "nonLimitedMaxSystemUsage: " << nonLimitedMaxSystemUsage);
BOOST_TEST_REQUIRE(nonLimitedMaxSystemUsage >= nonLimitedUsage);
{
// Now run the data with limiting
ml::api::CAnomalyJobConfig jobConfig = CTestAnomalyJob::makeSimpleJobConfig(
Expand Down Expand Up @@ -166,11 +171,15 @@ BOOST_AUTO_TEST_CASE(testAccuracy) {
// TODO this limit must be tightened once there is more granular
// control over the model memory creation
limitedUsage = limits.resourceMonitor().totalMemory();
limitedMaxSystemUsage = limits.resourceMonitor().maxSystemMemory();
}
LOG_TRACE(<< outputStrm.str());

LOG_DEBUG(<< "Non-limited usage: " << nonLimitedUsage << "; limited: " << limitedUsage);
LOG_DEBUG(<< "Non-limited System Usage: " << nonLimitedMaxSystemUsage);
LOG_DEBUG(<< "Limited System Usage: " << limitedMaxSystemUsage);
BOOST_TEST_REQUIRE(limitedUsage < nonLimitedUsage);
BOOST_TEST_REQUIRE(limitedMaxSystemUsage >= limitedUsage);
}
}

Expand Down Expand Up @@ -375,6 +384,8 @@ BOOST_AUTO_TEST_CASE(testModelledEntityCountForFixedMemoryLimit) {
LOG_DEBUG(<< "# partition = " << used.s_PartitionFields);
LOG_DEBUG(<< "Memory status = " << used.s_MemoryStatus);
LOG_DEBUG(<< "Memory usage bytes = " << used.s_Usage);
LOG_DEBUG(<< "System memory usage bytes = " << used.s_SystemMemoryUsage);
LOG_DEBUG(<< "Max system memory usage bytes = " << used.s_MaxSystemMemoryUsage);
LOG_DEBUG(<< "Memory limit bytes = "
<< memoryLimit * core::constants::BYTES_IN_MEGABYTES);
BOOST_TEST_REQUIRE(used.s_ByFields > testParam.s_ExpectedByFields);
Expand All @@ -384,6 +395,7 @@ BOOST_AUTO_TEST_CASE(testModelledEntityCountForFixedMemoryLimit) {
memoryLimit * core::constants::BYTES_IN_MEGABYTES / 2, used.s_Usage,
memoryLimit * core::constants::BYTES_IN_MEGABYTES /
testParam.s_ExpectedByMemoryUsageRelativeErrorDivisor);
BOOST_TEST_REQUIRE(used.s_Usage <= used.s_MaxSystemMemoryUsage);
}

LOG_DEBUG(<< "**** Test partition with bucketLength = " << testParam.s_BucketLength
Expand Down Expand Up @@ -428,6 +440,8 @@ BOOST_AUTO_TEST_CASE(testModelledEntityCountForFixedMemoryLimit) {
LOG_DEBUG(<< "# partition = " << used.s_PartitionFields);
LOG_DEBUG(<< "Memory status = " << used.s_MemoryStatus);
LOG_DEBUG(<< "Memory usage = " << used.s_Usage);
LOG_DEBUG(<< "System memory usage = " << used.s_SystemMemoryUsage);
LOG_DEBUG(<< "Max system memory usage = " << used.s_MaxSystemMemoryUsage);
LOG_DEBUG(<< "Memory limit bytes = " << memoryLimit * 1024 * 1024);
BOOST_TEST_REQUIRE(used.s_PartitionFields >= testParam.s_ExpectedPartitionFields);
BOOST_TEST_REQUIRE(used.s_PartitionFields < 450);
Expand All @@ -437,6 +451,7 @@ BOOST_AUTO_TEST_CASE(testModelledEntityCountForFixedMemoryLimit) {
memoryLimit * core::constants::BYTES_IN_MEGABYTES / 2, used.s_Usage,
memoryLimit * core::constants::BYTES_IN_MEGABYTES /
testParam.s_ExpectedPartitionUsageRelativeErrorDivisor);
BOOST_TEST_REQUIRE(used.s_Usage <= used.s_MaxSystemMemoryUsage);
}

LOG_DEBUG(<< "**** Test over with bucketLength = " << testParam.s_BucketLength
Expand Down Expand Up @@ -479,13 +494,16 @@ BOOST_AUTO_TEST_CASE(testModelledEntityCountForFixedMemoryLimit) {
LOG_DEBUG(<< "# over = " << used.s_OverFields);
LOG_DEBUG(<< "Memory status = " << used.s_MemoryStatus);
LOG_DEBUG(<< "Memory usage = " << used.s_Usage);
LOG_DEBUG(<< "System memory usage = " << used.s_SystemMemoryUsage);
LOG_DEBUG(<< "Max system memory usage = " << used.s_MaxSystemMemoryUsage);
LOG_DEBUG(<< "Memory limit bytes = " << memoryLimit * 1024 * 1024);
BOOST_TEST_REQUIRE(used.s_OverFields > testParam.s_ExpectedOverFields);
BOOST_TEST_REQUIRE(used.s_OverFields <= 9000);
BOOST_REQUIRE_CLOSE_ABSOLUTE(
memoryLimit * core::constants::BYTES_IN_MEGABYTES / 2, used.s_Usage,
memoryLimit * core::constants::BYTES_IN_MEGABYTES /
testParam.s_ExpectedOverUsageRelativeErrorDivisor);
BOOST_TEST_REQUIRE(used.s_Usage <= used.s_MaxSystemMemoryUsage);
}
}
}
Expand Down
63 changes: 35 additions & 28 deletions lib/api/unittest/CJsonOutputWriterTest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1728,21 +1728,23 @@ BOOST_AUTO_TEST_CASE(testReportMemoryUsage) {
resourceUsage.s_AdjustedUsage = 2;
resourceUsage.s_PeakUsage = 3;
resourceUsage.s_AdjustedPeakUsage = 4;
resourceUsage.s_ByFields = 5;
resourceUsage.s_PartitionFields = 6;
resourceUsage.s_OverFields = 7;
resourceUsage.s_AllocationFailures = 8;
resourceUsage.s_SystemMemoryUsage = 5;
resourceUsage.s_MaxSystemMemoryUsage = 6;
resourceUsage.s_ByFields = 7;
resourceUsage.s_PartitionFields = 8;
resourceUsage.s_OverFields = 9;
resourceUsage.s_AllocationFailures = 10;
resourceUsage.s_MemoryStatus = ml::model_t::E_MemoryStatusHardLimit;
resourceUsage.s_AssignmentMemoryBasis = ml::model_t::E_AssignmentBasisCurrentModelBytes;
resourceUsage.s_BucketStartTime = 9;
resourceUsage.s_BytesExceeded = 10;
resourceUsage.s_BytesMemoryLimit = 11;
resourceUsage.s_OverallCategorizerStats.s_CategorizedMessages = 12;
resourceUsage.s_OverallCategorizerStats.s_TotalCategories = 13;
resourceUsage.s_OverallCategorizerStats.s_FrequentCategories = 14;
resourceUsage.s_OverallCategorizerStats.s_RareCategories = 15;
resourceUsage.s_OverallCategorizerStats.s_DeadCategories = 16;
resourceUsage.s_OverallCategorizerStats.s_MemoryCategorizationFailures = 17;
resourceUsage.s_AssignmentMemoryBasis = ml::model_t::E_AssignmentBasisPeakModelBytes;
resourceUsage.s_BucketStartTime = 11;
resourceUsage.s_BytesExceeded = 12;
resourceUsage.s_BytesMemoryLimit = 13;
resourceUsage.s_OverallCategorizerStats.s_CategorizedMessages = 14;
resourceUsage.s_OverallCategorizerStats.s_TotalCategories = 15;
resourceUsage.s_OverallCategorizerStats.s_FrequentCategories = 16;
resourceUsage.s_OverallCategorizerStats.s_RareCategories = 17;
resourceUsage.s_OverallCategorizerStats.s_DeadCategories = 18;
resourceUsage.s_OverallCategorizerStats.s_MemoryCategorizationFailures = 19;
resourceUsage.s_OverallCategorizerStats.s_CategorizationStatus =
ml::model_t::E_CategorizationStatusWarn;

Expand Down Expand Up @@ -1770,44 +1772,49 @@ BOOST_AUTO_TEST_CASE(testReportMemoryUsage) {
BOOST_REQUIRE_EQUAL(2, sizeStats.at("model_bytes").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("peak_model_bytes"));
BOOST_REQUIRE_EQUAL(4, sizeStats.at("peak_model_bytes").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("system_memory_bytes"));
BOOST_REQUIRE_EQUAL(5, sizeStats.at("system_memory_bytes").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("max_system_memory_bytes"));
BOOST_REQUIRE_EQUAL(
6, sizeStats.at("max_system_memory_bytes").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("total_by_field_count"));
BOOST_REQUIRE_EQUAL(5, sizeStats.at("total_by_field_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(7, sizeStats.at("total_by_field_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("total_partition_field_count"));
BOOST_REQUIRE_EQUAL(
6, sizeStats.at("total_partition_field_count").to_number<std::int64_t>());
8, sizeStats.at("total_partition_field_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("total_over_field_count"));
BOOST_REQUIRE_EQUAL(7, sizeStats.at("total_over_field_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(9, sizeStats.at("total_over_field_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("bucket_allocation_failures_count"));
BOOST_REQUIRE_EQUAL(
8, sizeStats.at("bucket_allocation_failures_count").to_number<std::int64_t>());
10, sizeStats.at("bucket_allocation_failures_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("timestamp"));
BOOST_REQUIRE_EQUAL(9000, sizeStats.at("timestamp").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(11000, sizeStats.at("timestamp").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("memory_status"));
BOOST_REQUIRE_EQUAL("hard_limit", sizeStats.at("memory_status").as_string());
BOOST_TEST_REQUIRE(sizeStats.contains("assignment_memory_basis"));
BOOST_REQUIRE_EQUAL("current_model_bytes",
BOOST_REQUIRE_EQUAL("peak_model_bytes",
sizeStats.at("assignment_memory_basis").as_string());
BOOST_TEST_REQUIRE(sizeStats.contains("log_time"));
std::int64_t nowMs{ml::core::CTimeUtils::nowMs()};
BOOST_TEST_REQUIRE(nowMs >= sizeStats.at("log_time").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("model_bytes_exceeded"));
BOOST_REQUIRE_EQUAL(10, sizeStats.at("model_bytes_exceeded").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(12, sizeStats.at("model_bytes_exceeded").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("model_bytes_memory_limit"));
BOOST_REQUIRE_EQUAL(
11, sizeStats.at("model_bytes_memory_limit").to_number<std::int64_t>());
13, sizeStats.at("model_bytes_memory_limit").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("categorized_doc_count"));
BOOST_REQUIRE_EQUAL(12, sizeStats.at("categorized_doc_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(14, sizeStats.at("categorized_doc_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("total_category_count"));
BOOST_REQUIRE_EQUAL(13, sizeStats.at("total_category_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(15, sizeStats.at("total_category_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("frequent_category_count"));
BOOST_REQUIRE_EQUAL(
14, sizeStats.at("frequent_category_count").to_number<std::int64_t>());
16, sizeStats.at("frequent_category_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("rare_category_count"));
BOOST_REQUIRE_EQUAL(15, sizeStats.at("rare_category_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(17, sizeStats.at("rare_category_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("dead_category_count"));
BOOST_REQUIRE_EQUAL(16, sizeStats.at("dead_category_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(18, sizeStats.at("dead_category_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("failed_category_count"));
BOOST_REQUIRE_EQUAL(17, sizeStats.at("failed_category_count").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(19, sizeStats.at("failed_category_count").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(sizeStats.contains("categorization_status"));
BOOST_REQUIRE_EQUAL("warn", sizeStats.at("categorization_status").as_string());
}
Expand Down
6 changes: 6 additions & 0 deletions lib/api/unittest/CModelSnapshotJsonWriterTest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ BOOST_AUTO_TEST_CASE(testWrite) {
20000, // bytes used (adjusted)
30000, // peak bytes used
60000, // peak bytes used (adjusted)
409600, // System memory used (rss)
413696, // Max system memory used (max rss)
3, // # by fields
1, // # partition fields
150, // # over fields
Expand Down Expand Up @@ -116,6 +118,10 @@ BOOST_AUTO_TEST_CASE(testWrite) {
BOOST_TEST_REQUIRE(modelSizeStats.contains("peak_model_bytes"));
BOOST_REQUIRE_EQUAL(
60000, modelSizeStats.at("peak_model_bytes").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(
409600, modelSizeStats.at("system_memory_bytes").to_number<std::int64_t>());
BOOST_REQUIRE_EQUAL(
413696, modelSizeStats.at("max_system_memory_bytes").to_number<std::int64_t>());
BOOST_TEST_REQUIRE(modelSizeStats.contains("total_by_field_count"));
BOOST_REQUIRE_EQUAL(
3, modelSizeStats.at("total_by_field_count").to_number<std::int64_t>());
Expand Down
9 changes: 8 additions & 1 deletion lib/core/CProcessStats_Linux.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <core/CProcessStats.h>

#include <core/CLogger.h>
#include <core/CProgramCounters.h>
#include <core/CStringUtils.h>

#include <errno.h>
Expand Down Expand Up @@ -75,6 +76,8 @@ std::size_t CProcessStats::residentSetSize() {
}
}

CProgramCounters::counter(counter_t::E_TSADSystemMemoryUsage) = rss;

return rss;
}

Expand All @@ -87,7 +90,11 @@ std::size_t CProcessStats::maxResidentSetSize() {
}

// ru_maxrss is in kilobytes
return static_cast<std::size_t>(rusage.ru_maxrss * 1024L);
auto maxRSS = static_cast<std::size_t>(rusage.ru_maxrss * 1024L);

CProgramCounters::counter(counter_t::E_TSADMaxSystemMemoryUsage) = maxRSS;

return maxRSS;
}
}
}
Loading