-
Notifications
You must be signed in to change notification settings - Fork 7
Add Langfuse observability to Unified API #457
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,10 +1,12 @@ | ||
| import uuid | ||
| import logging | ||
| from typing import Any, Dict, Optional | ||
| from typing import Any, Callable, Dict, Optional | ||
| from functools import wraps | ||
|
|
||
| from asgi_correlation_id import correlation_id | ||
| from langfuse import Langfuse | ||
| from langfuse.client import StatefulGenerationClient, StatefulTraceClient | ||
| from app.models.llm import CompletionConfig, QueryParams, LLMCallResponse | ||
|
|
||
| logger = logging.getLogger(__name__) | ||
|
|
||
|
|
@@ -107,3 +109,112 @@ def log_error(self, error_message: str, response_id: Optional[str] = None): | |
|
|
||
| def flush(self): | ||
| self.langfuse.flush() | ||
|
|
||
|
|
||
| def observe_llm_execution( | ||
| session_id: str | None = None, | ||
| credentials: dict | None = None, | ||
| ): | ||
| """Decorator to add Langfuse observability to LLM provider execute methods. | ||
|
|
||
| Args: | ||
| credentials: Langfuse credentials with public_key, secret_key, and host | ||
| session_id: Session ID for grouping traces (conversation_id) | ||
|
|
||
| Usage: | ||
| decorated_execute = observe_llm_execution( | ||
| credentials=langfuse_creds, | ||
| session_id=conversation_id | ||
| )(provider_instance.execute) | ||
| """ | ||
|
|
||
| def decorator(func: Callable) -> Callable: | ||
| @wraps(func) | ||
| def wrapper(completion_config: CompletionConfig, query: QueryParams, **kwargs): | ||
| # Skip observability if no credentials provided | ||
| if not credentials or not all( | ||
| key in credentials for key in ["public_key", "secret_key", "host"] | ||
| ): | ||
| logger.info("[Langfuse] No credentials - skipping observability") | ||
| return func(completion_config, query, **kwargs) | ||
|
|
||
| try: | ||
| langfuse = Langfuse( | ||
| public_key=credentials.get("public_key"), | ||
| secret_key=credentials.get("secret_key"), | ||
| host=credentials.get("host"), | ||
| ) | ||
| except Exception as e: | ||
| logger.warning(f"[Langfuse] Failed to initialize client: {e}") | ||
| return func(completion_config, query, **kwargs) | ||
|
|
||
| trace_metadata = { | ||
| "provider": completion_config.provider, | ||
| } | ||
|
|
||
| if query.conversation and query.conversation.id: | ||
| trace_metadata["conversation_id"] = query.conversation.id | ||
|
|
||
| trace = langfuse.trace( | ||
| name="unified-llm-call", | ||
| input=query.input, | ||
| metadata=trace_metadata, | ||
| tags=[completion_config.provider], | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why is provider detail being repeated both in metadata and tags |
||
| ) | ||
avirajsingh7 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| generation = trace.generation( | ||
| name=f"{completion_config.provider}-completion", | ||
| input=query.input, | ||
| model=completion_config.params.get("model"), | ||
| ) | ||
|
|
||
| try: | ||
| # Execute the actual LLM call | ||
| response: LLMCallResponse | None | ||
| error: str | None | ||
| response, error = func(completion_config, query, **kwargs) | ||
|
|
||
| if response: | ||
| generation.end( | ||
| output={ | ||
| "status": "success", | ||
| "output": response.response.output.text, | ||
| }, | ||
| usage_details={ | ||
| "input": response.usage.input_tokens, | ||
| "output": response.usage.output_tokens, | ||
| }, | ||
| model=response.response.model, | ||
| ) | ||
|
|
||
| trace.update( | ||
| output={ | ||
| "status": "success", | ||
| "output": response.response.output.text, | ||
| }, | ||
| session_id=session_id or response.response.conversation_id, | ||
| ) | ||
| else: | ||
| error_msg = error or "Unknown error" | ||
| generation.end(output={"error": error_msg}) | ||
| trace.update( | ||
| output={"status": "failure", "error": error_msg}, | ||
| session_id=session_id, | ||
| ) | ||
|
|
||
| langfuse.flush() | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. maybe you can a function for marking the status and error, and then use that function here
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. right now, |
||
| return response, error | ||
|
|
||
| except Exception as e: | ||
| error_msg = str(e) | ||
| generation.end(output={"error": error_msg}) | ||
| trace.update( | ||
| output={"status": "failure", "error": error_msg}, | ||
| session_id=session_id, | ||
| ) | ||
| langfuse.flush() | ||
| raise | ||
|
|
||
| return wrapper | ||
|
|
||
| return decorator | ||
Uh oh!
There was an error while loading. Please reload this page.