|
36 | 36 | import org.apache.kafka.common.TopicPartition; |
37 | 37 |
|
38 | 38 | import io.aiven.kafka.connect.common.config.CompressionType; |
| 39 | +import io.aiven.kafka.connect.common.config.FileNameFragment; |
| 40 | +import io.aiven.kafka.connect.common.config.FormatType; |
| 41 | +import io.aiven.kafka.connect.common.config.OutputFieldEncodingType; |
| 42 | +import io.aiven.kafka.connect.common.config.OutputFieldType; |
| 43 | +import io.aiven.kafka.connect.common.config.OutputFormatFragment; |
39 | 44 |
|
40 | 45 | import org.junit.jupiter.api.BeforeEach; |
41 | 46 | import org.junit.jupiter.api.Test; |
@@ -63,8 +68,8 @@ void setUp() throws ExecutionException, InterruptedException { |
63 | 68 | @ValueSource(strings = { "none", "gzip", "snappy", "zstd" }) |
64 | 69 | void basicTest(final String compression) throws ExecutionException, InterruptedException { |
65 | 70 | final Map<String, String> connectorConfig = basicConnectorConfig(); |
66 | | - connectorConfig.put(AzureBlobSinkConfig.FORMAT_OUTPUT_FIELDS_CONFIG, "key,value"); |
67 | | - connectorConfig.put(AzureBlobSinkConfig.FILE_COMPRESSION_TYPE_CONFIG, compression); |
| 71 | + OutputFormatFragment.setter(connectorConfig).withOutputFields(OutputFieldType.KEY, OutputFieldType.VALUE); |
| 72 | + FileNameFragment.setter(connectorConfig).fileCompression(CompressionType.forName(compression)); |
68 | 73 | createConnector(connectorConfig); |
69 | 74 |
|
70 | 75 | final List<Future<RecordMetadata>> sendFutures = new ArrayList<>(); |
@@ -118,10 +123,11 @@ void basicTest(final String compression) throws ExecutionException, InterruptedE |
118 | 123 | @ValueSource(strings = { "none", "gzip", "snappy", "zstd" }) |
119 | 124 | void groupByTimestampVariable(final String compression) throws ExecutionException, InterruptedException { |
120 | 125 | final Map<String, String> connectorConfig = basicConnectorConfig(); |
121 | | - connectorConfig.put(AzureBlobSinkConfig.FORMAT_OUTPUT_FIELDS_CONFIG, "key,value"); |
122 | | - connectorConfig.put(AzureBlobSinkConfig.FILE_COMPRESSION_TYPE_CONFIG, compression); |
123 | | - connectorConfig.put(AzureBlobSinkConfig.FILE_NAME_TEMPLATE_CONFIG, "{{topic}}-{{partition}}-{{start_offset}}-" |
124 | | - + "{{timestamp:unit=yyyy}}-{{timestamp:unit=MM}}-{{timestamp:unit=dd}}"); |
| 126 | + OutputFormatFragment.setter(connectorConfig).withOutputFields(OutputFieldType.KEY, OutputFieldType.VALUE); |
| 127 | + FileNameFragment.setter(connectorConfig) |
| 128 | + .fileCompression(CompressionType.forName(compression)) |
| 129 | + .template("{{topic}}-{{partition}}-{{start_offset}}-" |
| 130 | + + "{{timestamp:unit=yyyy}}-{{timestamp:unit=MM}}-{{timestamp:unit=dd}}"); |
125 | 131 | createConnector(connectorConfig); |
126 | 132 |
|
127 | 133 | final List<Future<RecordMetadata>> sendFutures = new ArrayList<>(); |
@@ -176,10 +182,12 @@ private String getTimestampBlobName(final int partition, final int startOffset) |
176 | 182 | @ValueSource(strings = { "none", "gzip", "snappy", "zstd" }) |
177 | 183 | void oneFilePerRecordWithPlainValues(final String compression) throws ExecutionException, InterruptedException { |
178 | 184 | final Map<String, String> connectorConfig = basicConnectorConfig(); |
179 | | - connectorConfig.put(AzureBlobSinkConfig.FORMAT_OUTPUT_FIELDS_CONFIG, "value"); |
180 | | - connectorConfig.put(AzureBlobSinkConfig.FILE_COMPRESSION_TYPE_CONFIG, compression); |
181 | | - connectorConfig.put(AzureBlobSinkConfig.FORMAT_OUTPUT_FIELDS_VALUE_ENCODING_CONFIG, "none"); |
182 | | - connectorConfig.put(AzureBlobSinkConfig.FILE_MAX_RECORDS, "1"); |
| 185 | + FileNameFragment.setter(connectorConfig) |
| 186 | + .maxRecordsPerFile(1) |
| 187 | + .fileCompression(CompressionType.forName(compression)); |
| 188 | + OutputFormatFragment.setter(connectorConfig) |
| 189 | + .withOutputFields(OutputFieldType.VALUE) |
| 190 | + .withOutputFieldEncodingType(OutputFieldEncodingType.NONE); |
183 | 191 | createConnector(connectorConfig); |
184 | 192 |
|
185 | 193 | final List<Future<RecordMetadata>> sendFutures = new ArrayList<>(); |
@@ -226,9 +234,10 @@ void groupByKey(final String compression) throws ExecutionException, Interrupted |
226 | 234 | final Map<String, String> connectorConfig = basicConnectorConfig(); |
227 | 235 | final CompressionType compressionType = CompressionType.forName(compression); |
228 | 236 | connectorConfig.put("key.converter", "org.apache.kafka.connect.storage.StringConverter"); |
229 | | - connectorConfig.put(AzureBlobSinkConfig.FORMAT_OUTPUT_FIELDS_CONFIG, "key,value"); |
230 | | - connectorConfig.put(AzureBlobSinkConfig.FILE_COMPRESSION_TYPE_CONFIG, compression); |
231 | | - connectorConfig.put(AzureBlobSinkConfig.FILE_NAME_TEMPLATE_CONFIG, "{{key}}" + compressionType.extension()); |
| 237 | + OutputFormatFragment.setter(connectorConfig).withOutputFields(OutputFieldType.KEY, OutputFieldType.VALUE); |
| 238 | + FileNameFragment.setter(connectorConfig) |
| 239 | + .fileCompression(CompressionType.forName(compression)) |
| 240 | + .template("{{key}}" + compressionType.extension()); |
232 | 241 | createConnector(connectorConfig); |
233 | 242 |
|
234 | 243 | final Map<TopicPartition, List<String>> keysPerTopicPartition = new HashMap<>(); |
@@ -292,14 +301,14 @@ void groupByKey(final String compression) throws ExecutionException, Interrupted |
292 | 301 | void jsonlOutput() throws ExecutionException, InterruptedException { |
293 | 302 | final Map<String, String> connectorConfig = basicConnectorConfig(); |
294 | 303 | final String compression = "none"; |
295 | | - final String contentType = "jsonl"; |
296 | | - connectorConfig.put(AzureBlobSinkConfig.FORMAT_OUTPUT_FIELDS_CONFIG, "key,value"); |
297 | | - connectorConfig.put(AzureBlobSinkConfig.FORMAT_OUTPUT_FIELDS_VALUE_ENCODING_CONFIG, "none"); |
298 | 304 | connectorConfig.put("key.converter", "org.apache.kafka.connect.storage.StringConverter"); |
299 | 305 | connectorConfig.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); |
300 | 306 | connectorConfig.put("value.converter.schemas.enable", "false"); |
301 | | - connectorConfig.put(AzureBlobSinkConfig.FILE_COMPRESSION_TYPE_CONFIG, compression); |
302 | | - connectorConfig.put(AzureBlobSinkConfig.FORMAT_OUTPUT_TYPE_CONFIG, contentType); |
| 307 | + FileNameFragment.setter(connectorConfig).fileCompression(CompressionType.NONE); |
| 308 | + OutputFormatFragment.setter(connectorConfig) |
| 309 | + .withFormatType(FormatType.JSONL) |
| 310 | + .withOutputFields(OutputFieldType.KEY, OutputFieldType.VALUE) |
| 311 | + .withOutputFieldEncodingType(OutputFieldEncodingType.NONE); |
303 | 312 | createConnector(connectorConfig); |
304 | 313 |
|
305 | 314 | final List<Future<RecordMetadata>> sendFutures = new ArrayList<>(); |
@@ -353,16 +362,16 @@ void jsonlOutput() throws ExecutionException, InterruptedException { |
353 | 362 | void jsonOutput() throws ExecutionException, InterruptedException { |
354 | 363 | final Map<String, String> connectorConfig = basicConnectorConfig(); |
355 | 364 | final String compression = "none"; |
356 | | - final String contentType = "json"; |
357 | 365 | connectorConfig.put("azure.storage.connection.string", |
358 | 366 | azureEndpoint != null ? azureEndpoint : azureConnectionString); // NOPMD |
359 | | - connectorConfig.put(AzureBlobSinkConfig.FORMAT_OUTPUT_FIELDS_CONFIG, "key,value"); |
360 | | - connectorConfig.put(AzureBlobSinkConfig.FORMAT_OUTPUT_FIELDS_VALUE_ENCODING_CONFIG, "none"); |
361 | 367 | connectorConfig.put("key.converter", "org.apache.kafka.connect.storage.StringConverter"); |
362 | 368 | connectorConfig.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); |
363 | 369 | connectorConfig.put("value.converter.schemas.enable", "false"); |
364 | | - connectorConfig.put(AzureBlobSinkConfig.FILE_COMPRESSION_TYPE_CONFIG, compression); |
365 | | - connectorConfig.put(AzureBlobSinkConfig.FORMAT_OUTPUT_TYPE_CONFIG, contentType); |
| 370 | + FileNameFragment.setter(connectorConfig).fileCompression(CompressionType.NONE); |
| 371 | + OutputFormatFragment.setter(connectorConfig) |
| 372 | + .withFormatType(FormatType.JSON) |
| 373 | + .withOutputFields(OutputFieldType.KEY, OutputFieldType.VALUE) |
| 374 | + .withOutputFieldEncodingType(OutputFieldEncodingType.NONE); |
366 | 375 | createConnector(connectorConfig); |
367 | 376 |
|
368 | 377 | final int numEpochs = 10; |
|
0 commit comments