diff --git a/libs/partners/perplexity/langchain_perplexity/chat_models.py b/libs/partners/perplexity/langchain_perplexity/chat_models.py index 7566792fe9eb6..15d250cabd8d2 100644 --- a/libs/partners/perplexity/langchain_perplexity/chat_models.py +++ b/libs/partners/perplexity/langchain_perplexity/chat_models.py @@ -28,7 +28,11 @@ SystemMessageChunk, ToolMessageChunk, ) -from langchain_core.messages.ai import UsageMetadata, subtract_usage +from langchain_core.messages.ai import ( + OutputTokenDetails, + UsageMetadata, + subtract_usage, +) from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough @@ -52,10 +56,19 @@ def _create_usage_metadata(token_usage: dict) -> UsageMetadata: input_tokens = token_usage.get("prompt_tokens", 0) output_tokens = token_usage.get("completion_tokens", 0) total_tokens = token_usage.get("total_tokens", input_tokens + output_tokens) + citation_tokens = token_usage.get("citation_tokens", 0) + num_search_queries = token_usage.get("num_search_queries", 0) + reasoning_tokens = token_usage.get("reasoning_tokens", 0) + return UsageMetadata( input_tokens=input_tokens, output_tokens=output_tokens, total_tokens=total_tokens, + output_token_details=OutputTokenDetails( # type: ignore[typeddict-unknown-key] + citation_tokens=citation_tokens, + num_search_queries=num_search_queries, + reasoning=reasoning_tokens, + ), )