Skip to content

Commit 8e91b1b

Browse files
committedMar 4, 2025·
fix http-push-connection-dispatcher config overrides
properties were not in the right place and the config was defaulting to https://github.com/apache/pekko/blob/2469f729f7503acf814bcbd042b4bb0863103c9d/actor/src/main/resources/reference.conf#L506 Signed-off-by: Aleksandar Stanchev <aleksandar.stanchev@bosch.com>
1 parent 8d7f126 commit 8e91b1b

File tree

1 file changed

+11
-4
lines changed

1 file changed

+11
-4
lines changed
 

‎connectivity/service/src/main/resources/connectivity.conf

+11-4
Original file line numberDiff line numberDiff line change
@@ -1329,11 +1329,18 @@ http-push-connection-dispatcher {
13291329
# This executor is meant to be allowed to grow quite big as its limited by the max parallelism of each http connection client.
13301330
# Limit this parallelism here additionally could lead to confusing results regarding througput of some http connections.
13311331
# The core-pool-size-min remains unchanged so its quite small (8). Unused threads will expire after 60s.
1332-
core-pool-size-factor = 2147483647
1333-
core-pool-size-factor = ${?HTTP_PUSH_CORE_POOL_SIZE_FACTOR}
13341332

1335-
core-pool-size-max = 2147483647
1336-
core-pool-size-max = ${?HTTP_PUSH_CORE_POOL_SIZE_MAX}
1333+
thread-pool-executor {
1334+
core-pool-size-factor = 4
1335+
core-pool-size-factor = ${?HTTP_PUSH_CORE_POOL_SIZE_FACTOR}
1336+
core-pool-size-max = 64,
1337+
core-pool-size-max = ${?HTTP_PUSH_CORE_POOL_SIZE_MAX}
1338+
max-pool-size-min= 64,
1339+
max-pool-size-min= ${?HTTP_PUSH_CORE_POOL_SIZE_MAX} # Overriding with the value of HTTP_PUSH_CORE_POOL_SIZE_MAX as if threads are a reason for http problems the pool normaly doesn't grow on its own so
1340+
# it is better to set the core upper limit manualy and this config will forse the core pool size to this value.
1341+
max-pool-size-max = 2147483647,
1342+
allow-core-timeout = on # Enables timeout of core threads
1343+
}
13371344
}
13381345

13391346
kafka-consumer-dispatcher {

0 commit comments

Comments
 (0)
Please sign in to comment.