13
13
import io .opentelemetry .context .propagation .TextMapPropagator ;
14
14
import io .opentelemetry .context .propagation .TextMapSetter ;
15
15
import io .opentelemetry .instrumentation .api .instrumenter .Instrumenter ;
16
+ import io .opentelemetry .instrumentation .api .internal .InstrumenterUtil ;
17
+ import io .opentelemetry .instrumentation .api .internal .Timer ;
18
+ import io .opentelemetry .instrumentation .kafka .internal .KafkaConsumerContext ;
19
+ import io .opentelemetry .instrumentation .kafka .internal .KafkaConsumerContextUtil ;
16
20
import io .opentelemetry .instrumentation .kafka .internal .KafkaHeadersSetter ;
17
21
import io .opentelemetry .instrumentation .kafka .internal .KafkaProcessRequest ;
18
22
import io .opentelemetry .instrumentation .kafka .internal .KafkaProducerRequest ;
23
+ import io .opentelemetry .instrumentation .kafka .internal .KafkaReceiveRequest ;
19
24
import io .opentelemetry .instrumentation .kafka .internal .KafkaUtil ;
20
25
import io .opentelemetry .instrumentation .kafka .internal .OpenTelemetryMetricsReporter ;
21
26
import io .opentelemetry .instrumentation .kafka .internal .OpenTelemetrySupplier ;
27
+ import io .opentelemetry .instrumentation .kafka .internal .TracingList ;
22
28
import java .lang .reflect .InvocationTargetException ;
23
29
import java .lang .reflect .Proxy ;
24
30
import java .util .Collections ;
25
31
import java .util .HashMap ;
32
+ import java .util .LinkedHashMap ;
33
+ import java .util .List ;
26
34
import java .util .Map ;
27
35
import java .util .concurrent .Future ;
28
36
import java .util .function .BiFunction ;
37
45
import org .apache .kafka .clients .producer .Producer ;
38
46
import org .apache .kafka .clients .producer .ProducerRecord ;
39
47
import org .apache .kafka .clients .producer .RecordMetadata ;
48
+ import org .apache .kafka .common .TopicPartition ;
40
49
import org .apache .kafka .common .header .Headers ;
41
50
import org .apache .kafka .common .metrics .MetricsReporter ;
42
51
@@ -47,16 +56,19 @@ public final class KafkaTelemetry {
47
56
48
57
private final OpenTelemetry openTelemetry ;
49
58
private final Instrumenter <KafkaProducerRequest , RecordMetadata > producerInstrumenter ;
59
+ private final Instrumenter <KafkaReceiveRequest , Void > consumerReceiveInstrumenter ;
50
60
private final Instrumenter <KafkaProcessRequest , Void > consumerProcessInstrumenter ;
51
61
private final boolean producerPropagationEnabled ;
52
62
53
63
KafkaTelemetry (
54
64
OpenTelemetry openTelemetry ,
55
65
Instrumenter <KafkaProducerRequest , RecordMetadata > producerInstrumenter ,
66
+ Instrumenter <KafkaReceiveRequest , Void > consumerReceiveInstrumenter ,
56
67
Instrumenter <KafkaProcessRequest , Void > consumerProcessInstrumenter ,
57
68
boolean producerPropagationEnabled ) {
58
69
this .openTelemetry = openTelemetry ;
59
70
this .producerInstrumenter = producerInstrumenter ;
71
+ this .consumerReceiveInstrumenter = consumerReceiveInstrumenter ;
60
72
this .consumerProcessInstrumenter = consumerProcessInstrumenter ;
61
73
this .producerPropagationEnabled = producerPropagationEnabled ;
62
74
}
@@ -115,6 +127,7 @@ public <K, V> Consumer<K, V> wrap(Consumer<K, V> consumer) {
115
127
new Class <?>[] {Consumer .class },
116
128
(proxy , method , args ) -> {
117
129
Object result ;
130
+ Timer timer = "poll" .equals (method .getName ()) ? Timer .start () : null ;
118
131
try {
119
132
result = method .invoke (consumer , args );
120
133
} catch (InvocationTargetException exception ) {
@@ -123,12 +136,36 @@ public <K, V> Consumer<K, V> wrap(Consumer<K, V> consumer) {
123
136
// ConsumerRecords<K, V> poll(long timeout)
124
137
// ConsumerRecords<K, V> poll(Duration duration)
125
138
if ("poll" .equals (method .getName ()) && result instanceof ConsumerRecords ) {
126
- buildAndFinishSpan ((ConsumerRecords ) result , consumer );
139
+ ConsumerRecords <K , V > consumerRecords = (ConsumerRecords <K , V >) result ;
140
+ Context receiveContext = buildAndFinishSpan (consumerRecords , consumer , timer );
141
+ if (receiveContext == null ) {
142
+ receiveContext = Context .current ();
143
+ }
144
+ KafkaConsumerContext consumerContext =
145
+ KafkaConsumerContextUtil .create (receiveContext , consumer );
146
+ result = addTracing (consumerRecords , consumerContext );
127
147
}
128
148
return result ;
129
149
});
130
150
}
131
151
152
+ <K , V > ConsumerRecords <K , V > addTracing (
153
+ ConsumerRecords <K , V > consumerRecords , KafkaConsumerContext consumerContext ) {
154
+ if (consumerRecords .isEmpty ()) {
155
+ return consumerRecords ;
156
+ }
157
+
158
+ Map <TopicPartition , List <ConsumerRecord <K , V >>> records = new LinkedHashMap <>();
159
+ for (TopicPartition partition : consumerRecords .partitions ()) {
160
+ List <ConsumerRecord <K , V >> list = consumerRecords .records (partition );
161
+ if (list != null && !list .isEmpty ()) {
162
+ list = TracingList .wrap (list , consumerProcessInstrumenter , () -> true , consumerContext );
163
+ }
164
+ records .put (partition , list );
165
+ }
166
+ return new ConsumerRecords <>(records );
167
+ }
168
+
132
169
/**
133
170
* Produces a set of kafka client config properties (consumer or producer) to register a {@link
134
171
* MetricsReporter} that records metrics to an {@code openTelemetry} instance. Add these resulting
@@ -221,23 +258,37 @@ <K, V> Future<RecordMetadata> buildAndInjectSpan(
221
258
}
222
259
}
223
260
224
- private <K , V > void buildAndFinishSpan (ConsumerRecords <K , V > records , Consumer <K , V > consumer ) {
225
- buildAndFinishSpan (
226
- records , KafkaUtil .getConsumerGroup (consumer ), KafkaUtil .getClientId (consumer ));
261
+ private <K , V > Context buildAndFinishSpan (
262
+ ConsumerRecords <K , V > records , Consumer <K , V > consumer , Timer timer ) {
263
+ return buildAndFinishSpan (
264
+ records , KafkaUtil .getConsumerGroup (consumer ), KafkaUtil .getClientId (consumer ), timer );
227
265
}
228
266
229
- <K , V > void buildAndFinishSpan (
230
- ConsumerRecords <K , V > records , String consumerGroup , String clientId ) {
267
+ <K , V > Context buildAndFinishSpan (
268
+ ConsumerRecords <K , V > records , String consumerGroup , String clientId , Timer timer ) {
269
+ if (records .isEmpty ()) {
270
+ return null ;
271
+ }
231
272
Context parentContext = Context .current ();
232
- for (ConsumerRecord <K , V > record : records ) {
233
- KafkaProcessRequest request = KafkaProcessRequest .create (record , consumerGroup , clientId );
234
- if (!consumerProcessInstrumenter .shouldStart (parentContext , request )) {
235
- continue ;
236
- }
237
-
238
- Context context = consumerProcessInstrumenter .start (parentContext , request );
239
- consumerProcessInstrumenter .end (context , request , null , null );
273
+ KafkaReceiveRequest request = KafkaReceiveRequest .create (records , consumerGroup , clientId );
274
+ Context context = null ;
275
+ if (consumerReceiveInstrumenter .shouldStart (parentContext , request )) {
276
+ context =
277
+ InstrumenterUtil .startAndEnd (
278
+ consumerReceiveInstrumenter ,
279
+ parentContext ,
280
+ request ,
281
+ null ,
282
+ null ,
283
+ timer .startTime (),
284
+ timer .now ());
240
285
}
286
+
287
+ // we're returning the context of the receive span so that process spans can use it as
288
+ // parent context even though the span has ended
289
+ // this is the suggested behavior according to the spec batch receive scenario:
290
+ // https://github.com/open-telemetry/semantic-conventions/blob/main/docs/messaging/messaging-spans.md#batch-receiving
291
+ return context ;
241
292
}
242
293
243
294
private class ProducerCallback implements Callback {
0 commit comments