diff --git a/include/fastdds/dds/core/policy/QosPolicies.hpp b/include/fastdds/dds/core/policy/QosPolicies.hpp
index 13b995edf59..a831311e8e9 100644
--- a/include/fastdds/dds/core/policy/QosPolicies.hpp
+++ b/include/fastdds/dds/core/policy/QosPolicies.hpp
@@ -1729,27 +1729,27 @@ class ResourceLimitsQosPolicy : public Parameter_t, public QosPolicy
* @brief Specifies the maximum number of data-samples the DataWriter (or DataReader) can manage across all the
* instances associated with it. Represents the maximum samples the middleware can store for any one DataWriter
* (or DataReader).
- * Value less or equal to 0 means infinite resources. By default, 5000.
+ * Value less or equal to 0 means infinite resources. By default, LENGTH_UNLIMITED.
*
* @warning It is inconsistent if `max_samples < (max_instances * max_samples_per_instance)`.
*/
int32_t max_samples;
/**
* @brief Represents the maximum number of instances DataWriter (or DataReader) can manage.
- * Value less or equal to 0 means infinite resources. By default, 10.
+ * Value less or equal to 0 means infinite resources. By default, LENGTH_UNLIMITED.
*
* @warning It is inconsistent if `(max_instances * max_samples_per_instance) > max_samples`.
*/
int32_t max_instances;
/**
* @brief Represents the maximum number of samples of any one instance a DataWriter(or DataReader) can manage.
- * Value less or equal to 0 means infinite resources. By default, 400.
+ * Value less or equal to 0 means infinite resources. By default, LENGTH_UNLIMITED.
*
* @warning It is inconsistent if `(max_instances * max_samples_per_instance) > max_samples`.
*/
int32_t max_samples_per_instance;
/**
- * @brief Number of samples currently allocated.
+ * @brief Number of samples initially allocated.
* By default, 100.
*/
int32_t allocated_samples;
@@ -1765,9 +1765,9 @@ class ResourceLimitsQosPolicy : public Parameter_t, public QosPolicy
FASTDDS_EXPORTED_API ResourceLimitsQosPolicy()
: Parameter_t(PID_RESOURCE_LIMITS, 4 * 5)
, QosPolicy(false)
- , max_samples(5000)
- , max_instances(10)
- , max_samples_per_instance(400)
+ , max_samples(LENGTH_UNLIMITED)
+ , max_instances(LENGTH_UNLIMITED)
+ , max_samples_per_instance(LENGTH_UNLIMITED)
, allocated_samples(100)
, extra_samples(1)
{
diff --git a/src/cpp/fastdds/publisher/DataWriterHistory.cpp b/src/cpp/fastdds/publisher/DataWriterHistory.cpp
index 844f63c8cfb..b3fb2180a2d 100644
--- a/src/cpp/fastdds/publisher/DataWriterHistory.cpp
+++ b/src/cpp/fastdds/publisher/DataWriterHistory.cpp
@@ -27,6 +27,7 @@
#include
#include
+#include
#include
namespace eprosima {
@@ -51,10 +52,17 @@ HistoryAttributes DataWriterHistory::to_history_attributes(
max_samples = history_qos.depth;
if (topic_kind != NO_KEY)
{
- max_samples *= resource_limits_qos.max_instances;
+ if (0 < resource_limits_qos.max_instances)
+ {
+ max_samples *= resource_limits_qos.max_instances;
+ }
+ else
+ {
+ max_samples = LENGTH_UNLIMITED;
+ }
}
- initial_samples = std::min(initial_samples, max_samples);
+ initial_samples = get_min_max_samples(initial_samples, max_samples);
}
return HistoryAttributes(mempolicy, payloadMaxSize, initial_samples, max_samples, extra_samples);
diff --git a/src/cpp/fastdds/publisher/DataWriterImpl.cpp b/src/cpp/fastdds/publisher/DataWriterImpl.cpp
index 74542c560dd..6efbf5f28fd 100644
--- a/src/cpp/fastdds/publisher/DataWriterImpl.cpp
+++ b/src/cpp/fastdds/publisher/DataWriterImpl.cpp
@@ -338,6 +338,20 @@ ReturnCode_t DataWriterImpl::enable()
datasharing.add_domain_id(utils::default_domain_id());
}
w_att.endpoint.set_data_sharing_configuration(datasharing);
+
+ // Update pool config for KEEP_ALL when max_samples is infinite
+ if ((0 == pool_config_.maximum_size) && (KEEP_ALL_HISTORY_QOS == qos_.history().kind))
+ {
+ // Override infinite with old default value for max_samples + extra samples
+ pool_config_.maximum_size = 5000;
+ if (0 < qos_.resource_limits().extra_samples)
+ {
+ pool_config_.maximum_size += static_cast(qos_.resource_limits().extra_samples);
+ }
+ EPROSIMA_LOG_ERROR(DATA_WRITER,
+ "DataWriter with KEEP_ALL history and infinite max_samples is not compatible with DataSharing. "
+ "Setting max_samples to " << pool_config_.maximum_size);
+ }
}
else
{
@@ -2124,11 +2138,10 @@ ReturnCode_t DataWriterImpl::check_qos(
qos.resource_limits().max_samples_per_instance > 0 &&
qos.history().depth > qos.resource_limits().max_samples_per_instance)
{
- EPROSIMA_LOG_WARNING(RTPS_QOS_CHECK,
- "HISTORY DEPTH '" << qos.history().depth <<
- "' is inconsistent with max_samples_per_instance: '" << qos.resource_limits().max_samples_per_instance <<
- "'. Consistency rule: depth <= max_samples_per_instance." <<
- " Effectively using max_samples_per_instance as depth.");
+ EPROSIMA_LOG_ERROR(RTPS_QOS_CHECK,
+ "HISTORY DEPTH '" << qos.history().depth << "' is higher than max_samples_per_instance " <<
+ "'" << qos.resource_limits().max_samples_per_instance << "'.");
+ return RETCODE_INCONSISTENT_POLICY;
}
return RETCODE_OK;
}
@@ -2136,19 +2149,19 @@ ReturnCode_t DataWriterImpl::check_qos(
ReturnCode_t DataWriterImpl::check_allocation_consistency(
const DataWriterQos& qos)
{
- if ((qos.resource_limits().max_samples > 0) &&
- (qos.resource_limits().max_samples <
- (qos.resource_limits().max_instances * qos.resource_limits().max_samples_per_instance)))
+ if ((qos.resource_limits().max_instances <= 0 || qos.resource_limits().max_samples_per_instance <= 0) &&
+ (qos.resource_limits().max_samples > 0))
{
EPROSIMA_LOG_ERROR(DDS_QOS_CHECK,
- "max_samples should be greater than max_instances * max_samples_per_instance");
+ "max_samples should be infinite when max_instances or max_samples_per_instance are infinite");
return RETCODE_INCONSISTENT_POLICY;
}
- if ((qos.resource_limits().max_instances <= 0 || qos.resource_limits().max_samples_per_instance <= 0) &&
- (qos.resource_limits().max_samples > 0))
+ if ((qos.resource_limits().max_samples > 0) &&
+ (qos.resource_limits().max_samples <
+ (qos.resource_limits().max_instances * qos.resource_limits().max_samples_per_instance)))
{
EPROSIMA_LOG_ERROR(DDS_QOS_CHECK,
- "max_samples should be infinite when max_instances or max_samples_per_instance are infinite");
+ "max_samples should be greater than max_instances * max_samples_per_instance");
return RETCODE_INCONSISTENT_POLICY;
}
return RETCODE_OK;
diff --git a/src/cpp/fastdds/subscriber/DataReaderImpl.cpp b/src/cpp/fastdds/subscriber/DataReaderImpl.cpp
index bae7e2e9fc2..da5f685a2f1 100644
--- a/src/cpp/fastdds/subscriber/DataReaderImpl.cpp
+++ b/src/cpp/fastdds/subscriber/DataReaderImpl.cpp
@@ -1620,11 +1620,10 @@ ReturnCode_t DataReaderImpl::check_qos(
qos.resource_limits().max_samples_per_instance > 0 &&
qos.history().depth > qos.resource_limits().max_samples_per_instance)
{
- EPROSIMA_LOG_WARNING(RTPS_QOS_CHECK,
- "HISTORY DEPTH '" << qos.history().depth <<
- "' is inconsistent with max_samples_per_instance: '" << qos.resource_limits().max_samples_per_instance <<
- "'. Consistency rule: depth <= max_samples_per_instance." <<
- " Effectively using max_samples_per_instance as depth.");
+ EPROSIMA_LOG_ERROR(RTPS_QOS_CHECK,
+ "HISTORY DEPTH '" << qos.history().depth << "' is higher than max_samples_per_instance " <<
+ "'" << qos.resource_limits().max_samples_per_instance << "'.");
+ return RETCODE_INCONSISTENT_POLICY;
}
return RETCODE_OK;
}
@@ -1632,19 +1631,19 @@ ReturnCode_t DataReaderImpl::check_qos(
ReturnCode_t DataReaderImpl::check_allocation_consistency(
const DataReaderQos& qos)
{
- if ((qos.resource_limits().max_samples > 0) &&
- (qos.resource_limits().max_samples <
- (qos.resource_limits().max_instances * qos.resource_limits().max_samples_per_instance)))
+ if ((qos.resource_limits().max_instances <= 0 || qos.resource_limits().max_samples_per_instance <= 0) &&
+ (qos.resource_limits().max_samples > 0))
{
EPROSIMA_LOG_ERROR(DDS_QOS_CHECK,
- "max_samples should be greater than max_instances * max_samples_per_instance");
+ "max_samples should be infinite when max_instances or max_samples_per_instance are infinite");
return RETCODE_INCONSISTENT_POLICY;
}
- if ((qos.resource_limits().max_instances <= 0 || qos.resource_limits().max_samples_per_instance <= 0) &&
- (qos.resource_limits().max_samples > 0))
+ if ((qos.resource_limits().max_samples > 0) &&
+ (qos.resource_limits().max_samples <
+ (qos.resource_limits().max_instances * qos.resource_limits().max_samples_per_instance)))
{
EPROSIMA_LOG_ERROR(DDS_QOS_CHECK,
- "max_samples should be infinite when max_instances or max_samples_per_instance are infinite");
+ "max_samples should be greater than max_instances * max_samples_per_instance");
return RETCODE_INCONSISTENT_POLICY;
}
return RETCODE_OK;
diff --git a/src/cpp/fastdds/subscriber/history/DataReaderHistory.cpp b/src/cpp/fastdds/subscriber/history/DataReaderHistory.cpp
index a8f1a6f3a4f..e86be622d52 100644
--- a/src/cpp/fastdds/subscriber/history/DataReaderHistory.cpp
+++ b/src/cpp/fastdds/subscriber/history/DataReaderHistory.cpp
@@ -96,9 +96,16 @@ DataReaderHistory::DataReaderHistory(
{
resource_limited_qos_.max_instances = 1;
resource_limited_qos_.max_samples_per_instance = resource_limited_qos_.max_samples;
- key_changes_allocation_.initial = resource_limited_qos_.allocated_samples;
- key_changes_allocation_.maximum = resource_limited_qos_.max_samples;
+ if (0 < resource_limited_qos_.allocated_samples)
+ {
+ key_changes_allocation_.initial = resource_limited_qos_.allocated_samples;
+ }
+
+ if (resource_limited_qos_.max_samples_per_instance < std::numeric_limits::max())
+ {
+ key_changes_allocation_.maximum = resource_limited_qos_.max_samples_per_instance;
+ }
instances_.emplace(c_InstanceHandle_Unknown,
std::make_shared(key_changes_allocation_, key_writers_allocation_));
data_available_instances_[c_InstanceHandle_Unknown] = instances_[c_InstanceHandle_Unknown];
diff --git a/src/cpp/rtps/history/HistoryAttributesExtension.hpp b/src/cpp/rtps/history/HistoryAttributesExtension.hpp
index 44443207b6c..f0b0409222b 100644
--- a/src/cpp/rtps/history/HistoryAttributesExtension.hpp
+++ b/src/cpp/rtps/history/HistoryAttributesExtension.hpp
@@ -49,6 +49,21 @@ static inline ResourceLimitedContainerConfig resource_limits_from_history(
};
}
+/**
+ * Get the minimum value between two sample counts, considering that <= 0 means unlimited.
+ *
+ * @param a First sample count.
+ * @param b Second sample count.
+ *
+ * @return Minimum sample count.
+ */
+static constexpr int32_t get_min_max_samples(
+ int32_t a,
+ int32_t b)
+{
+ return (0 < a && 0 < b) ? (a < b ? a : b) : (0 < a ? a : b);
+}
+
} // namespace rtps
} // namespace fastdds
} // namespace eprosima
diff --git a/test/blackbox/common/BlackboxTestsReliability.cpp b/test/blackbox/common/BlackboxTestsReliability.cpp
index 1269a5d2186..66159134720 100644
--- a/test/blackbox/common/BlackboxTestsReliability.cpp
+++ b/test/blackbox/common/BlackboxTestsReliability.cpp
@@ -58,6 +58,9 @@ void reliability_disable_heartbeat_piggyback(
writer.reliability(eprosima::fastdds::dds::RELIABLE_RELIABILITY_QOS)
.history_kind(eprosima::fastdds::dds::KEEP_LAST_HISTORY_QOS)
.history_depth(1)
+ .resource_limits_max_samples_per_instance(1)
+ .resource_limits_max_instances(10)
+ .resource_limits_max_samples(10)
.heartbeat_period_seconds(180000)
.disable_heartbeat_piggyback(disable_heartbeat_piggyback)
.disable_builtin_transport()
diff --git a/test/mock/dds/DataWriterHistory/fastdds/publisher/DataWriterHistory.hpp b/test/mock/dds/DataWriterHistory/fastdds/publisher/DataWriterHistory.hpp
index f04652bcf75..d7f1579d09a 100644
--- a/test/mock/dds/DataWriterHistory/fastdds/publisher/DataWriterHistory.hpp
+++ b/test/mock/dds/DataWriterHistory/fastdds/publisher/DataWriterHistory.hpp
@@ -60,7 +60,14 @@ class DataWriterHistory : public WriterHistory
max_samples = history_qos.depth;
if (topic_kind != NO_KEY)
{
- max_samples *= resource_limits_qos.max_instances;
+ if (0 < resource_limits_qos.max_instances)
+ {
+ max_samples *= resource_limits_qos.max_instances;
+ }
+ else
+ {
+ max_samples = std::numeric_limits::max();
+ }
}
initial_samples = std::min(initial_samples, max_samples);
diff --git a/test/mock/dds/QosPolicies/fastdds/dds/core/policy/QosPolicies.hpp b/test/mock/dds/QosPolicies/fastdds/dds/core/policy/QosPolicies.hpp
index 5d8f965ce79..24a7e4d992f 100644
--- a/test/mock/dds/QosPolicies/fastdds/dds/core/policy/QosPolicies.hpp
+++ b/test/mock/dds/QosPolicies/fastdds/dds/core/policy/QosPolicies.hpp
@@ -669,61 +669,61 @@ class GenericDataQosPolicy : public Parameter_t, public QosPolicy,
* Class TClassName, base template for user data qos policies.
*/
#define TEMPLATE_DATA_QOS_POLICY(TClassName, TPid) \
- class TClassName : public GenericDataQosPolicy \
- { \
- public: \
+ class TClassName : public GenericDataQosPolicy \
+ { \
+ public: \
\
- FASTDDS_EXPORTED_API TClassName() \
- : GenericDataQosPolicy(TPid) \
- { \
- } \
+ FASTDDS_EXPORTED_API TClassName() \
+ : GenericDataQosPolicy(TPid) \
+ { \
+ } \
\
- FASTDDS_EXPORTED_API TClassName( \
- uint16_t in_length) \
- : GenericDataQosPolicy(TPid, in_length) \
- { \
- } \
+ FASTDDS_EXPORTED_API TClassName( \
+ uint16_t in_length) \
+ : GenericDataQosPolicy(TPid, in_length) \
+ { \
+ } \
\
- /** \
- * Construct from another TClassName. \
- * \
- * The resulting TClassName will have the same size limits \
- * as the input attribute \
- * \
- * @param data data to copy in the newly created object \
- */ \
- FASTDDS_EXPORTED_API TClassName( \
- const TClassName& data) = default; \
+ /** \
+ * Construct from another TClassName. \
+ * \
+ * The resulting TClassName will have the same size limits \
+ * as the input attribute \
+ * \
+ * @param data data to copy in the newly created object \
+ */ \
+ FASTDDS_EXPORTED_API TClassName( \
+ const TClassName& data) = default; \
\
- /** \
- * Construct from underlying collection type. \
- * \
- * Useful to easy integration on old APIs where a traditional container was used. \
- * The resulting TClassName will always be unlimited in size \
- * \
- * @param data data to copy in the newly created object \
- */ \
- FASTDDS_EXPORTED_API TClassName( \
- const collection_type& data) \
- : GenericDataQosPolicy(TPid, data) \
- { \
- } \
+ /** \
+ * Construct from underlying collection type. \
+ * \
+ * Useful to easy integration on old APIs where a traditional container was used. \
+ * The resulting TClassName will always be unlimited in size \
+ * \
+ * @param data data to copy in the newly created object \
+ */ \
+ FASTDDS_EXPORTED_API TClassName( \
+ const collection_type& data) \
+ : GenericDataQosPolicy(TPid, data) \
+ { \
+ } \
\
- virtual FASTDDS_EXPORTED_API ~TClassName() = default; \
+ virtual FASTDDS_EXPORTED_API ~TClassName() = default; \
\
- /** \
- * Copies another TClassName. \
- * \
- * The resulting TClassName will have the same size limit \
- * as the input parameter, so all data in the input will be copied. \
- * \
- * @param b object to be copied \
- * @return reference to the current object. \
- */ \
- TClassName& operator =( \
- const TClassName& b) = default; \
+ /** \
+ * Copies another TClassName. \
+ * \
+ * The resulting TClassName will have the same size limit \
+ * as the input parameter, so all data in the input will be copied. \
+ * \
+ * @param b object to be copied \
+ * @return reference to the current object. \
+ */ \
+ TClassName& operator =( \
+ const TClassName& b) = default; \
\
- };
+ };
TEMPLATE_DATA_QOS_POLICY(UserDataQosPolicy, PID_USER_DATA)
TEMPLATE_DATA_QOS_POLICY(TopicDataQosPolicy, PID_TOPIC_DATA)
@@ -955,9 +955,9 @@ class HistoryQosPolicy : public Parameter_t, public QosPolicy
/**
* Class ResourceLimitsQosPolicy, defines the ResourceLimits for the Writer or the Reader.
- * max_samples: Default value 5000.
- * max_instances: Default value 10.
- * max_samples_per_instance: Default value 400.
+ * max_samples: Default value LENGTH_UNLIMITED.
+ * max_instances: Default value LENGTH_UNLIMITED.
+ * max_samples_per_instance: Default value LENGTH_UNLIMITED.
* allocated_samples: Default value 100.
*/
class ResourceLimitsQosPolicy : public Parameter_t, public QosPolicy
@@ -973,9 +973,9 @@ class ResourceLimitsQosPolicy : public Parameter_t, public QosPolicy
FASTDDS_EXPORTED_API ResourceLimitsQosPolicy()
: Parameter_t(PID_RESOURCE_LIMITS, 4 + 4 + 4)
, QosPolicy(false)
- , max_samples(5000)
- , max_instances(10)
- , max_samples_per_instance(400)
+ , max_samples(LENGTH_UNLIMITED)
+ , max_instances(LENGTH_UNLIMITED)
+ , max_samples_per_instance(LENGTH_UNLIMITED)
, allocated_samples(100)
{
}
diff --git a/test/unittest/dds/publisher/DataWriterTests.cpp b/test/unittest/dds/publisher/DataWriterTests.cpp
index 752876dcd93..9d58ea6d666 100644
--- a/test/unittest/dds/publisher/DataWriterTests.cpp
+++ b/test/unittest/dds/publisher/DataWriterTests.cpp
@@ -862,10 +862,12 @@ TEST(DataWriterTests, InvalidQos)
EXPECT_EQ(inconsistent_code, datawriter->set_qos(qos)); // KEEP LAST 0 is inconsistent
qos.history().depth = 2;
EXPECT_EQ(RETCODE_OK, datawriter->set_qos(qos)); // KEEP LAST 2 is OK
- // KEEP LAST 2000 but max_samples_per_instance default (400) is inconsistent but right now it only shows a warning
- // This test will fail whenever we enforce the consistency between depth and max_samples_per_instance.
+ // KEEP LAST 2000 and max_samples_per_instance default (UNLIMITED) is consistent.
qos.history().depth = 2000;
EXPECT_EQ(RETCODE_OK, datawriter->set_qos(qos));
+ qos.resource_limits().max_samples_per_instance = 1000;
+ // KEEP LAST 2000 and max_samples_per_instance 1000 is inconsistent
+ EXPECT_EQ(inconsistent_code, datawriter->set_qos(qos));
ASSERT_TRUE(publisher->delete_datawriter(datawriter) == RETCODE_OK);
ASSERT_TRUE(participant->delete_topic(topic) == RETCODE_OK);
@@ -2659,10 +2661,10 @@ TEST(DataWriterTests, CustomPoolCreation)
DomainParticipantFactory::get_instance()->delete_participant(participant);
}
-TEST(DataWriterTests, history_depth_max_samples_per_instance_warning)
+TEST(DataWriterTests, history_depth_max_samples_per_instance_error)
{
- /* Setup log so it may catch the expected warning */
+ /* Setup log so it may catch the expected error */
Log::ClearConsumers();
MockConsumer* mockConsumer = new MockConsumer("RTPS_QOS_CHECK");
Log::RegisterConsumer(std::unique_ptr(mockConsumer));
@@ -2682,14 +2684,14 @@ TEST(DataWriterTests, history_depth_max_samples_per_instance_warning)
Publisher* publisher = participant->create_publisher(PUBLISHER_QOS_DEFAULT);
ASSERT_NE(publisher, nullptr);
- /* Create a datawriter with the QoS that should generate a warning */
+ /* Create a datawriter with the QoS that should generate an error */
DataWriterQos qos;
qos.history().depth = 10;
qos.resource_limits().max_samples_per_instance = 5;
DataWriter* datawriter_1 = publisher->create_datawriter(topic, qos);
- ASSERT_NE(datawriter_1, nullptr);
+ ASSERT_EQ(datawriter_1, nullptr);
- /* Check that the config generated a warning */
+ /* Check that the config generated an error */
auto wait_for_log_entries =
[&mockConsumer](const uint32_t amount, const uint32_t retries, const uint32_t wait_ms) -> size_t
{
@@ -2711,11 +2713,7 @@ TEST(DataWriterTests, history_depth_max_samples_per_instance_warning)
const uint32_t wait_ms = 25;
ASSERT_EQ(wait_for_log_entries(expected_entries, retries, wait_ms), expected_entries);
- /* Check that the datawriter can send data */
- FooType data;
- ASSERT_EQ(RETCODE_OK, datawriter_1->write(&data, HANDLE_NIL));
-
- /* Check that a correctly initialized writer does not produce any warning */
+ /* Check that a correctly initialized writer does not produce any error or warning */
qos.history().depth = 10;
qos.resource_limits().max_samples_per_instance = 10;
DataWriter* datawriter_2 = publisher->create_datawriter(topic, qos);
diff --git a/test/unittest/dds/subscriber/DataReaderTests.cpp b/test/unittest/dds/subscriber/DataReaderTests.cpp
index 13f7bb4bf5c..9dea269e8e2 100644
--- a/test/unittest/dds/subscriber/DataReaderTests.cpp
+++ b/test/unittest/dds/subscriber/DataReaderTests.cpp
@@ -714,17 +714,17 @@ TEST_F(DataReaderTests, InvalidQos)
qos.history().kind = KEEP_LAST_HISTORY_QOS;
qos.history().depth = 0;
EXPECT_EQ(inconsistent_code, data_reader_->set_qos(qos)); // KEEP LAST 0 is inconsistent
- // KEEP LAST 2000 but max_samples_per_instance default (400) is inconsistent but right now it only shows a warning
- // In the reader, this returns RETCODE_INMUTABLE_POLICY, because the depth cannot be changed on run time.
- // Because of the implementation, we know de consistency is checked before the inmutability, so by checking the
- // return against RETCODE_INMUTABLE_POLICY we are testing that the setting are not considered inconsistent yet.
- // This test will fail whenever we enforce the consistency between depth and max_samples_per_instance.
qos.history().depth = 2000;
- EXPECT_EQ(RETCODE_IMMUTABLE_POLICY, data_reader_->set_qos(qos));
+ qos.resource_limits().max_samples = 1000;
+ EXPECT_EQ(inconsistent_code, data_reader_->set_qos(qos)); // KEEP LAST 2000 with max_samples 1000 is inconsistent
/* Inmutable QoS */
const ReturnCode_t inmutable_code = RETCODE_IMMUTABLE_POLICY;
+ qos = DATAREADER_QOS_DEFAULT;
+ qos.history().depth = 2000;
+ EXPECT_EQ(inmutable_code, data_reader_->set_qos(qos));
+
qos = DATAREADER_QOS_DEFAULT;
qos.durability().kind = PERSISTENT_DURABILITY_QOS;
EXPECT_EQ(inmutable_code, data_reader_->set_qos(qos));
@@ -3681,10 +3681,10 @@ TEST_F(DataReaderTests, UpdateInmutableQos)
DomainParticipantFactory::get_instance()->delete_participant(participant);
}
-TEST_F(DataReaderTests, history_depth_max_samples_per_instance_warning)
+TEST_F(DataReaderTests, history_depth_max_samples_per_instance_error)
{
- /* Setup log so it may catch the expected warning */
+ /* Setup log so it may catch the expected error */
Log::ClearConsumers();
MockConsumer* mockConsumer = new MockConsumer("RTPS_QOS_CHECK");
Log::RegisterConsumer(std::unique_ptr(mockConsumer));
@@ -3704,14 +3704,14 @@ TEST_F(DataReaderTests, history_depth_max_samples_per_instance_warning)
Subscriber* subscriber = participant->create_subscriber(SUBSCRIBER_QOS_DEFAULT);
ASSERT_NE(subscriber, nullptr);
- /* Create a datareader with the QoS that should generate a warning */
+ /* Create a datareader with the QoS that should generate an error */
DataReaderQos qos;
qos.history().depth = 10;
qos.resource_limits().max_samples_per_instance = 5;
DataReader* datareader_1 = subscriber->create_datareader(topic, qos);
- ASSERT_NE(datareader_1, nullptr);
+ ASSERT_EQ(datareader_1, nullptr);
- /* Check that the config generated a warning */
+ /* Check that the config generated an error */
auto wait_for_log_entries =
[&mockConsumer](const uint32_t amount, const uint32_t retries, const uint32_t wait_ms) -> size_t
{
diff --git a/versions.md b/versions.md
index 0b65d34d078..4c9a52b1acb 100644
--- a/versions.md
+++ b/versions.md
@@ -1,6 +1,8 @@
Forthcoming
-----------
+* ResourceLimitsQosPolicy maximum limits default values changed to LENGTH_UNLIMITED (i.e. infinite).
+
Version v3.4.0
--------------