Skip to content

kafka: apply zone id to client.rack #427

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 2 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions driver-kafka/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,38 @@ NOTE: This is a slightly modified version with two key differences:
- there is a new argument that converts all output result json files into a single csv.

TODO: Document these changes.

## Features

### Zone-aware workers

To pass a zone/rack ID (e.g. cloud region availability zone name) to the Kafka clients (producer, consumer) client-id configuration, use the system property `zone.id`, and use the template `{zone.id}` on the `client.id` or `client.rack` configs, either on the `commonConfig`, `producerConfig`, or `consumerConfig` Driver values.

When running workers, pass the `zone.id`:

```bash
export JVM_OPTS=-Dzone.id=az0
/opt/benchmark/bin/benchmark-worker
```

Then pass the `client.id` template:
```yaml
commonConfig: |
bootstrap.servers=localhost:9092
client.id=omb-client_az={zone.id}
client.rack=omb-client_az={zone.id}
```

This generates producer and consumer `client.id=omb-client_az=value` and `client.rack=omb-client_az=value`

```yaml
producerConfig: |
client.id=omb-producer_az={zone.id}
client.rack=omb-producer_az={zone.id}
consumerConfig: |
auto.offset.reset=earliest
client.id=omb-consumer_az={zone.id}
client.rack=omb-consumer_az={zone.id}
```

This generates producer `client.id=omb-producer_az=value` and `client.rack=omb-producer_az=value`; consumer `client.id=omb-consumer_az=value` and `client.rack=omb-consumer_az=value`
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,16 @@ public class KafkaBenchmarkDriver implements BenchmarkDriver {
private static final String ZONE_ID_CONFIG = "zone.id";
private static final String ZONE_ID_TEMPLATE = "{zone.id}";
private static final String KAFKA_CLIENT_ID = "client.id";
private static final String KAFKA_CLIENT_RACK = "client.rack";
private Config config;

private List<BenchmarkProducer> producers = Collections.synchronizedList(new ArrayList<>());
private List<BenchmarkConsumer> consumers = Collections.synchronizedList(new ArrayList<>());

private Properties topicProperties;
private Properties producerProperties;
private Properties consumerProperties;
// Visible for testing
Properties topicProperties;
Properties producerProperties;
Properties consumerProperties;

private AdminClient admin;

Expand All @@ -66,16 +68,16 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
Properties commonProperties = new Properties();
commonProperties.load(new StringReader(config.commonConfig));

if (commonProperties.containsKey(KAFKA_CLIENT_ID)) {
commonProperties.put(
KAFKA_CLIENT_ID,
applyZoneId(
commonProperties.getProperty(KAFKA_CLIENT_ID), System.getProperty(ZONE_ID_CONFIG)));
}
applyZoneIdIfNeeded(commonProperties, KAFKA_CLIENT_ID);
applyZoneIdIfNeeded(commonProperties, KAFKA_CLIENT_RACK);

producerProperties = new Properties();
commonProperties.forEach((key, value) -> producerProperties.put(key, value));
producerProperties.load(new StringReader(config.producerConfig));

applyZoneIdIfNeeded(producerProperties, KAFKA_CLIENT_ID);
applyZoneIdIfNeeded(producerProperties, KAFKA_CLIENT_RACK);

producerProperties.put(
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
producerProperties.put(
Expand All @@ -84,6 +86,10 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
consumerProperties = new Properties();
commonProperties.forEach((key, value) -> consumerProperties.put(key, value));
consumerProperties.load(new StringReader(config.consumerConfig));

applyZoneIdIfNeeded(consumerProperties, KAFKA_CLIENT_ID);
applyZoneIdIfNeeded(consumerProperties, KAFKA_CLIENT_RACK);

consumerProperties.put(
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
consumerProperties.put(
Expand All @@ -95,6 +101,15 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
admin = AdminClient.create(commonProperties);
}

private static void applyZoneIdIfNeeded(Properties props, String propKey) {
if (props.containsKey(propKey)) {
props.put(
propKey,
applyZoneId(
props.getProperty(propKey), System.getProperty(ZONE_ID_CONFIG)));
}
}

@Override
public String getTopicNamePrefix() {
return "test-topic";
Expand Down Expand Up @@ -161,11 +176,12 @@ public void close() throws Exception {
admin.close();
}

private static String applyZoneId(String clientId, String zoneId) {
return clientId.replace(ZONE_ID_TEMPLATE, zoneId);
private static String applyZoneId(String propValue, String zoneId) {
return propValue.replace(ZONE_ID_TEMPLATE, zoneId);
}

private static final ObjectMapper mapper =
// Visible for testing
static final ObjectMapper mapper =
new ObjectMapper(new YAMLFactory())
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.openmessaging.benchmark.driver.kafka;

import static org.assertj.core.api.Assertions.assertThat;

import java.nio.file.Files;
import java.nio.file.Path;
import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;

class KafkaBenchmarkDriverTest {
@TempDir Path tempDir;

@ParameterizedTest
@CsvSource({
"client.id=test_az={zone.id},\"\",\"\",test_az=az0,test_az=az0",
"client.id=test_az={zone.id},client.id=prod_az={zone.id},client.id=cons_az={zone.id},prod_az=az0,cons_az=az0",
"\"\",client.id=prod_az={zone.id},client.id=cons_az={zone.id},prod_az=az0,cons_az=az0",
"\"\",client.id=prod_az={zone.id},\"\",prod_az=az0,",
"\"\",\"\",client.id=cons_az={zone.id},,cons_az=az0"
})
void testInitClientIdWithZoneId(
String commonConfig,
String producerConfig,
String consumerConfig,
String producerClientId,
String consumerClientId)
throws Exception {
// Given these configs
final Path configPath = tempDir.resolve("config");
Config config = new Config();
config.replicationFactor = 1;
config.commonConfig = "bootstrap.servers=localhost:9092\n" + commonConfig;
config.producerConfig = producerConfig;
config.consumerConfig = consumerConfig;
config.topicConfig = "";

// and the system property set for zone id
System.setProperty("zone.id", "az0");

try (KafkaBenchmarkDriver driver = new KafkaBenchmarkDriver()) {
// When initializing kafka driver
Files.write(configPath, KafkaBenchmarkDriver.mapper.writeValueAsBytes(config));
driver.initialize(configPath.toFile(), null);

// Then
if (producerClientId != null) {
assertThat(driver.producerProperties).containsEntry("client.id", producerClientId);
} else {
assertThat(driver.producerProperties).doesNotContainKey("client.id");
}
if (consumerClientId != null) {
assertThat(driver.consumerProperties).containsEntry("client.id", consumerClientId);
} else {
assertThat(driver.consumerProperties).doesNotContainKey("client.id");
}
}
}

@ParameterizedTest
@CsvSource({
"client.rack=test_az={zone.id},\"\",\"\",test_az=az0,test_az=az0",
"client.rack=test_az={zone.id},client.rack=prod_az={zone.id},client.rack=cons_az={zone.id},prod_az=az0,cons_az=az0",
"\"\",client.rack=prod_az={zone.id},client.rack=cons_az={zone.id},prod_az=az0,cons_az=az0",
"\"\",client.rack=prod_az={zone.id},\"\",prod_az=az0,",
"\"\",\"\",client.rack=cons_az={zone.id},,cons_az=az0"
})
void testInitClientRackWithZoneId(
String commonConfig,
String producerConfig,
String consumerConfig,
String producerClientRack,
String consumerClientRack)
throws Exception {
// Given these configs
final Path configPath = tempDir.resolve("config");
Config config = new Config();
config.replicationFactor = 1;
config.commonConfig = "bootstrap.servers=localhost:9092\n" + commonConfig;
config.producerConfig = producerConfig;
config.consumerConfig = consumerConfig;
config.topicConfig = "";

// and the system property set for zone id
System.setProperty("zone.id", "az0");

try (KafkaBenchmarkDriver driver = new KafkaBenchmarkDriver()) {
// When initializing kafka driver
Files.write(configPath, KafkaBenchmarkDriver.mapper.writeValueAsBytes(config));
driver.initialize(configPath.toFile(), null);

// Then
if (producerClientRack != null) {
assertThat(driver.producerProperties).containsEntry("client.rack", producerClientRack);
} else {
assertThat(driver.producerProperties).doesNotContainKey("client.rack");
}
if (consumerClientRack != null) {
assertThat(driver.consumerProperties).containsEntry("client.rack", consumerClientRack);
} else {
assertThat(driver.consumerProperties).doesNotContainKey("client.rack");
}
}
}
}
Loading