Skip to content

Commit f9a379b

Browse files
authored
fix(groq): exception when metrics are turned off (#2778)
1 parent b1fde54 commit f9a379b

File tree

1 file changed

+27
-16
lines changed
  • packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq

1 file changed

+27
-16
lines changed

packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/__init__.py

+27-16
Original file line numberDiff line numberDiff line change
@@ -196,24 +196,35 @@ def _set_response_attributes(span, response, token_histogram):
196196
span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
197197
)
198198
set_span_attribute(
199-
span,
200-
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
201-
)
202-
set_span_attribute(
203-
span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens
199+
span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
204200
)
201+
set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens)
205202

206-
if isinstance(prompt_tokens, int) and prompt_tokens >= 0 and token_histogram is not None:
207-
token_histogram.record(prompt_tokens, attributes={
208-
SpanAttributes.LLM_TOKEN_TYPE: "input",
209-
SpanAttributes.LLM_RESPONSE_MODEL: response.get("model")
210-
})
203+
if (
204+
isinstance(prompt_tokens, int)
205+
and prompt_tokens >= 0
206+
and token_histogram is not None
207+
):
208+
token_histogram.record(
209+
prompt_tokens,
210+
attributes={
211+
SpanAttributes.LLM_TOKEN_TYPE: "input",
212+
SpanAttributes.LLM_RESPONSE_MODEL: response.get("model"),
213+
},
214+
)
211215

212-
if isinstance(completion_tokens, int) and completion_tokens >= 0 and token_histogram is not None:
213-
token_histogram.record(completion_tokens, attributes={
214-
SpanAttributes.LLM_TOKEN_TYPE: "output",
215-
SpanAttributes.LLM_RESPONSE_MODEL: response.get("model")
216-
})
216+
if (
217+
isinstance(completion_tokens, int)
218+
and completion_tokens >= 0
219+
and token_histogram is not None
220+
):
221+
token_histogram.record(
222+
completion_tokens,
223+
attributes={
224+
SpanAttributes.LLM_TOKEN_TYPE: "output",
225+
SpanAttributes.LLM_RESPONSE_MODEL: response.get("model"),
226+
},
227+
)
217228

218229
choices = response.get("choices")
219230
if should_send_prompts() and choices:
@@ -575,7 +586,7 @@ def _instrument(self, **kwargs):
575586
token_histogram,
576587
choice_counter,
577588
duration_histogram,
578-
) = (None, None, None, None)
589+
) = (None, None, None)
579590

580591
for wrapped_method in WRAPPED_METHODS:
581592
wrap_package = wrapped_method.get("package")

0 commit comments

Comments
 (0)