@@ -714,7 +714,7 @@ sentry:
714714 # noStrictOffsetReset: false
715715
716716 billingMetricsConsumer :
717- enabled : true
717+ enabled : false
718718 replicas : 1
719719 env : []
720720 resources : {}
@@ -1253,7 +1253,7 @@ snuba:
12531253 # medium: Memory
12541254
12551255 outcomesBillingConsumer :
1256- enabled : true
1256+ enabled : false
12571257 replicas : 1
12581258 env : []
12591259 resources : {}
@@ -2064,7 +2064,7 @@ filestore:
20642064 # # GKE, AWS & OpenStack)
20652065 # #
20662066 # storageClass: "-"
2067- accessMode : ReadWriteOnce # Set ReadWriteMany for work Replays
2067+ accessMode : ReadWriteMany
20682068 size : 10Gi
20692069
20702070 # # Whether to mount the persistent volume to the Sentry worker and
@@ -2075,7 +2075,7 @@ filestore:
20752075 # # Please note that you may need to change your accessMode to ReadWriteMany
20762076 # # if you plan on having the web, worker and cron deployments run on
20772077 # # different nodes.
2078- persistentWorkers : false
2078+ persistentWorkers : true
20792079
20802080 # # If existingClaim is specified, no PVC will be created and this claim will
20812081 # # be used
@@ -2119,49 +2119,50 @@ config:
21192119 maxWorkerLifetime : 86400
21202120
21212121clickhouse :
2122+ image :
2123+ tag : 23.8.16-debian-12-r0
21222124 enabled : true
2123- nodeSelector : {}
2124- # tolerations: []
2125- clickhouse :
2126- replicas : " 1"
2127- configmap :
2128- remote_servers :
2129- internal_replication : true
2130- replica :
2131- backup :
2132- enabled : false
2133- zookeeper_servers :
2134- enabled : true
2135- config :
2136- - index : " clickhouse"
2137- hostTemplate : " {{ .Release.Name }}-zookeeper-clickhouse"
2138- port : " 2181"
2139- users :
2140- enabled : false
2141- user :
2142- # the first user will be used if enabled
2143- - name : default
2144- config :
2145- password : " "
2146- networks :
2147- - ::/0
2148- profile : default
2149- quota : default
2150-
2151- persistentVolumeClaim :
2152- enabled : true
2153- dataPersistentVolume :
2154- enabled : true
2155- accessModes :
2156- - " ReadWriteOnce"
2157- storage : " 30Gi"
2158-
2159- # # Use this to enable an extra service account
2160- # serviceAccount:
2161- # annotations: {}
2162- # enabled: false
2163- # name: "sentry-clickhouse"
2164- # automountServiceAccountToken: true
2125+ containerPorts :
2126+ http : 8123
2127+ https : 8443
2128+ tcp : 9000
2129+ replicaCount : 1
2130+ shards : 1
2131+ keeper :
2132+ replicaCount : 1
2133+ auth :
2134+ username : default
2135+ password : " thePassword"
2136+ configdFiles :
2137+ config.xml : |
2138+ <yandex>
2139+ <logger>
2140+ <level>warning</level>
2141+ <console>true</console>
2142+ </logger>
2143+ <query_thread_log remove="remove"/>
2144+ <query_log remove="remove"/>
2145+ <text_log remove="remove"/>
2146+ <trace_log remove="remove"/>
2147+ <metric_log remove="remove"/>
2148+ <asynchronous_metric_log remove="remove"/>
2149+
2150+ <session_log remove="remove"/>
2151+ <part_log remove="remove"/>
2152+
2153+ <allow_nullable_key>1</allow_nullable_key>
2154+
2155+ <profiles>
2156+ <default>
2157+ <log_queries>0</log_queries>
2158+ <log_query_threads>0</log_query_threads>
2159+ </default>
2160+ </profiles>
2161+ <merge_tree>
2162+ <enable_mixed_granularity_parts>1</enable_mixed_granularity_parts>
2163+ <max_suspicious_broken_parts>10</max_suspicious_broken_parts>
2164+ </merge_tree>
2165+ </yandex>
21652166
21662167# # This value is only used when clickhouse.enabled is set to false
21672168# #
@@ -2184,27 +2185,16 @@ externalClickhouse:
21842185 # #
21852186 # clusterName: test_shard_localhost
21862187
2187- # Settings for Zookeeper.
2188- # See https://github.com/bitnami/charts/tree/master/bitnami/zookeeper
2189- zookeeper :
2190- enabled : true
2191- nameOverride : zookeeper-clickhouse
2192- replicaCount : 1
2193- nodeSelector : {}
2194- # tolerations: []
2195- # # When increasing the number of exceptions, you need to increase persistence.size
2196- # persistence:
2197- # size: 8Gi
2198-
21992188# Settings for Kafka.
22002189# See https://github.com/bitnami/charts/tree/master/bitnami/kafka
22012190kafka :
22022191 enabled : true
22032192 provisioning :
22042193 # # Increasing the replicationFactor enhances data reliability during Kafka pod failures by replicating data across multiple brokers.
22052194 # Note that existing topics will remain with replicationFactor: 1 when updated.
2206- replicationFactor : 3
2207- enabled : true
2195+ replicationFactor : 1
2196+ parallel : 2
2197+ enabled : false
22082198 # Topic list is based on files below.
22092199 # - https://github.com/getsentry/snuba/blob/master/snuba/utils/streams/topics.py
22102200 # - https://github.com/getsentry/sentry/blob/master/src/sentry/conf/types/kafka_definition.py
@@ -2352,7 +2342,7 @@ kafka:
23522342 kraft :
23532343 enabled : true
23542344 controller :
2355- replicaCount : 3
2345+ replicaCount : 1
23562346 nodeSelector : {}
23572347 # tolerations: []
23582348 # # if the load on the kafka controller increases, resourcesPreset must be increased
@@ -2421,7 +2411,7 @@ externalKafka:
24212411 ms : 1000
24222412
24232413sourcemaps :
2424- enabled : false
2414+ enabled : true
24252415
24262416redis :
24272417 enabled : true
0 commit comments