Skip to content

Commit ccf9778

Browse files
authoredJan 14, 2021
Added more documentation to HeaderToField. Fixes #66. (#67)
1 parent 1f794b1 commit ccf9778

File tree

6 files changed

+264
-2
lines changed

6 files changed

+264
-2
lines changed
 

‎pom.xml

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
<parent>
2424
<groupId>com.github.jcustenborder.kafka.connect</groupId>
2525
<artifactId>kafka-connect-parent</artifactId>
26-
<version>2.2.1-cp1</version>
26+
<version>2.6.0</version>
2727
</parent>
2828
<artifactId>kafka-connect-transform-common</artifactId>
2929
<version>0.1.0-SNAPSHOT</version>
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
/**
2+
* Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com)
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
package com.github.jcustenborder.kafka.connect.transform.common;
17+
18+
import com.fasterxml.jackson.databind.SerializationFeature;
19+
import com.github.jcustenborder.kafka.connect.utils.jackson.ObjectMapperFactory;
20+
import org.apache.kafka.common.config.ConfigDef;
21+
import org.apache.kafka.connect.connector.ConnectRecord;
22+
import org.apache.kafka.connect.sink.SinkRecord;
23+
import org.apache.kafka.connect.source.SourceRecord;
24+
import org.apache.kafka.connect.transforms.Transformation;
25+
import org.slf4j.Logger;
26+
import org.slf4j.LoggerFactory;
27+
28+
import java.util.LinkedHashMap;
29+
import java.util.Map;
30+
31+
public class Debug<R extends ConnectRecord<R>> implements Transformation<R> {
32+
private static final Logger log = LoggerFactory.getLogger(Debug.class);
33+
34+
void addConnectRecord(Map<String, Object> debugContent, ConnectRecord r) {
35+
debugContent.put("topic", r.topic());
36+
debugContent.put("kafkaPartition", r.kafkaPartition());
37+
debugContent.put("timestamp", r.timestamp());
38+
}
39+
40+
void addSinkRecord(Map<String, Object> debugContent, SinkRecord r) {
41+
debugContent.put("timestampType", r.timestampType());
42+
debugContent.put("kafkaOffset", r.kafkaOffset());
43+
}
44+
45+
void addSourceRecord(Map<String, Object> debugContent, SourceRecord r) {
46+
debugContent.put("sourcePartition", r.sourcePartition());
47+
debugContent.put("sourceOffset", r.sourceOffset());
48+
}
49+
50+
void addKey(Map<String, Object> debugContent, R record) {
51+
Object result = record.key();
52+
debugContent.put("key", result);
53+
}
54+
55+
void addValue(Map<String, Object> debugContent, R record) {
56+
Object result = record.value();
57+
debugContent.put("value", result);
58+
}
59+
60+
@Override
61+
public R apply(R r) {
62+
try {
63+
Map<String, Object> debugContent = new LinkedHashMap<>();
64+
addConnectRecord(debugContent, r);
65+
if (r instanceof SinkRecord) {
66+
SinkRecord sinkRecord = (SinkRecord) r;
67+
addSinkRecord(debugContent, sinkRecord);
68+
} else if (r instanceof SourceRecord) {
69+
SourceRecord sourceRecord = (SourceRecord) r;
70+
addSourceRecord(debugContent, sourceRecord);
71+
}
72+
addKey(debugContent, r);
73+
addValue(debugContent, r);
74+
log.info("\n{}", ObjectMapperFactory.INSTANCE.writeValueAsString(debugContent));
75+
} catch (Exception ex) {
76+
log.error("Exception while generating debug content.", ex);
77+
}
78+
79+
return r;
80+
}
81+
82+
@Override
83+
public ConfigDef config() {
84+
return new ConfigDef();
85+
}
86+
87+
@Override
88+
public void close() {
89+
90+
}
91+
92+
@Override
93+
public void configure(Map<String, ?> settings) {
94+
ObjectMapperFactory.INSTANCE.configure(SerializationFeature.INDENT_OUTPUT, true);
95+
}
96+
}

‎src/main/java/com/github/jcustenborder/kafka/connect/transform/common/HeaderToField.java

+5
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@
1515
*/
1616
package com.github.jcustenborder.kafka.connect.transform.common;
1717

18+
import com.github.jcustenborder.kafka.connect.utils.config.Description;
19+
import com.github.jcustenborder.kafka.connect.utils.config.Title;
1820
import com.github.jcustenborder.kafka.connect.utils.data.SchemaBuilders;
1921
import com.github.jcustenborder.kafka.connect.utils.transformation.BaseKeyValueTransformation;
2022
import org.apache.kafka.common.config.ConfigDef;
@@ -32,6 +34,9 @@
3234
import java.util.List;
3335
import java.util.Map;
3436

37+
@Title("HeaderToField")
38+
@Description("This transformation is used to copy the value of a header to a field in the key or " +
39+
"value of the record.")
3540
public class HeaderToField<R extends ConnectRecord<R>> extends BaseKeyValueTransformation<R> {
3641
private static final Logger log = LoggerFactory.getLogger(HeaderToField.class);
3742

‎src/main/java/com/github/jcustenborder/kafka/connect/transform/common/HeaderToFieldConfig.java

+21-1
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,28 @@
4545
class HeaderToFieldConfig extends AbstractConfig {
4646
private static final Logger log = LoggerFactory.getLogger(HeaderToFieldConfig.class);
4747

48+
static String supportedHeaderTypes() {
49+
StringBuilder builder = new StringBuilder();
50+
HeaderToFieldMapping.SCHEMA_TYPE_LOOKUP.keySet()
51+
.stream()
52+
.sorted()
53+
.forEach(key -> {
54+
builder.append("* ");
55+
builder.append(key);
56+
builder.append("\n");
57+
});
58+
59+
return builder.toString();
60+
}
61+
62+
4863
public static final String HEADER_MAPPINGS_CONF = "header.mappings";
49-
static final String HEADER_MAPPINGS_DOC = "The mapping of the header to the field in the message.";
64+
static final String HEADER_MAPPINGS_DOC = "The mapping of the header to the field in the message. " +
65+
"More than one mapping can be specified separated by a comma. " +
66+
"The format is `<header name>:<header type>[:field name]`. Supported header types are:\n\n" +
67+
supportedHeaderTypes();
68+
69+
5070

5171
public final List<HeaderToFieldMapping> mappings;
5272

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
/**
2+
* Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com)
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
package com.github.jcustenborder.kafka.connect.transform.common;
17+
18+
import com.github.jcustenborder.kafka.connect.utils.SinkRecordHelper;
19+
import com.google.common.collect.ImmutableMap;
20+
import org.apache.kafka.connect.data.Schema;
21+
import org.apache.kafka.connect.data.SchemaBuilder;
22+
import org.apache.kafka.connect.data.Struct;
23+
import org.apache.kafka.connect.sink.SinkRecord;
24+
import org.junit.jupiter.api.BeforeEach;
25+
import org.junit.jupiter.api.Test;
26+
27+
public class DebugTest {
28+
Debug<SinkRecord> transform;
29+
30+
@BeforeEach
31+
public void before() {
32+
this.transform = new Debug<>();
33+
this.transform.configure(ImmutableMap.of());
34+
}
35+
36+
@Test
37+
public void apply() {
38+
Schema valueSchema = SchemaBuilder.struct()
39+
.name("foo")
40+
.field("firstName", Schema.STRING_SCHEMA)
41+
.field("lastName", Schema.STRING_SCHEMA)
42+
.build();
43+
44+
45+
SinkRecord input = SinkRecordHelper.write("test", Schema.STRING_SCHEMA, "1234", valueSchema, new Struct(valueSchema).put("firstName", "adfs").put("lastName", "asdfas"));
46+
SinkRecord output = this.transform.apply(input);
47+
48+
}
49+
50+
51+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
{
2+
"input": {
3+
"topic": "testing",
4+
"kafkaPartition": 1,
5+
"valueSchema": {
6+
"type": "STRUCT",
7+
"isOptional": false,
8+
"fieldSchemas": {
9+
"firstName": {
10+
"type": "STRING",
11+
"isOptional": true
12+
},
13+
"lastName": {
14+
"type": "STRING",
15+
"isOptional": true
16+
}
17+
}
18+
},
19+
"value": {
20+
"schema": {
21+
"type": "STRUCT",
22+
"isOptional": false,
23+
"fieldSchemas": {
24+
"firstName": {
25+
"type": "STRING",
26+
"isOptional": true
27+
},
28+
"lastName": {
29+
"type": "STRING",
30+
"isOptional": true
31+
}
32+
}
33+
},
34+
"fieldValues": [
35+
{
36+
"name": "firstName",
37+
"schema": {
38+
"type": "STRING",
39+
"isOptional": true
40+
},
41+
"storage": "example"
42+
},
43+
{
44+
"name": "lastName",
45+
"schema": {
46+
"type": "STRING",
47+
"isOptional": true
48+
},
49+
"storage": "user"
50+
}
51+
]
52+
},
53+
"timestamp": 123412351,
54+
"timestampType": "NO_TIMESTAMP_TYPE",
55+
"offset": 12345,
56+
"headers": [
57+
{
58+
"name": "file.path",
59+
"schema": {
60+
"type": "STRING",
61+
"isOptional": false
62+
},
63+
"storage": "/tmp/input/test1.csv"
64+
},
65+
{
66+
"name": "file.name",
67+
"schema": {
68+
"type": "STRING",
69+
"isOptional": false
70+
},
71+
"storage": "test1.csv"
72+
},
73+
{
74+
"name": "file.last.modified",
75+
"schema": {
76+
"type": "INT64",
77+
"isOptional": false,
78+
"name": "org.apache.kafka.connect.data.Timestamp"
79+
},
80+
"storage": 1610656447123
81+
}
82+
]
83+
},
84+
"description": "The following example takes the output from the Spooldir connector copies headers for the metadata to fields in the value.",
85+
"name": "Spooldir metadata",
86+
"config": {
87+
"header.mappings": "file.path:STRING:file_path,file.name:STRING:file_name,file.last.modified:INT64(Timestamp):file_last_modified"
88+
},
89+
"childClass": "Value"
90+
}

0 commit comments

Comments
 (0)
Please sign in to comment.