Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 14 additions & 1 deletion litellm/proxy/spend_tracking/spend_tracking_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -651,7 +651,20 @@ def _get_response_for_spend_logs_payload(
if payload is None:
return "{}"
if _should_store_prompts_and_responses_in_spend_logs():
return json.dumps(payload.get("response", {}))
response_obj: Any = payload.get("response")
if response_obj is None:
return "{}"

sanitized_wrapper = _sanitize_request_body_for_spend_logs_payload(
{"response": response_obj}
)
sanitized_response = sanitized_wrapper.get("response", response_obj)

if sanitized_response is None:
return "{}"
if isinstance(sanitized_response, str):
return sanitized_response
return safe_dumps(sanitized_response)
return "{}"


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import os
import sys
from datetime import timezone
from typing import Any
from typing import Any, cast

import pytest
from fastapi.testclient import TestClient
Expand All @@ -19,9 +19,11 @@
from litellm.constants import LITELLM_TRUNCATED_PAYLOAD_FIELD, REDACTED_BY_LITELM_STRING
from litellm.litellm_core_utils.safe_json_dumps import safe_dumps
from litellm.proxy.spend_tracking.spend_tracking_utils import (
_get_response_for_spend_logs_payload,
_get_vector_store_request_for_spend_logs_payload,
_sanitize_request_body_for_spend_logs_payload,
)
from litellm.types.utils import StandardLoggingPayload


def test_sanitize_request_body_for_spend_logs_payload_basic():
Expand Down Expand Up @@ -234,6 +236,71 @@ def test_get_vector_store_request_for_spend_logs_payload_null_input(mock_should_
assert result is None


@patch(
"litellm.proxy.spend_tracking.spend_tracking_utils._should_store_prompts_and_responses_in_spend_logs"
)
def test_get_response_for_spend_logs_payload_truncates_large_base64(mock_should_store):
from litellm.constants import MAX_STRING_LENGTH_PROMPT_IN_DB

mock_should_store.return_value = True
large_text = "A" * (MAX_STRING_LENGTH_PROMPT_IN_DB + 500)
payload = cast(
StandardLoggingPayload,
{
"response": {
"data": [
{
"b64_json": large_text,
"other_field": "value",
}
]
}
},
)

response_json = _get_response_for_spend_logs_payload(payload)
parsed = json.loads(response_json)
truncated_value = parsed["data"][0]["b64_json"]
assert len(truncated_value) < len(large_text)
assert LITELLM_TRUNCATED_PAYLOAD_FIELD in truncated_value
assert parsed["data"][0]["other_field"] == "value"


@patch(
"litellm.proxy.spend_tracking.spend_tracking_utils._should_store_prompts_and_responses_in_spend_logs"
)
def test_get_response_for_spend_logs_payload_truncates_large_embedding(mock_should_store):
from litellm.constants import MAX_STRING_LENGTH_PROMPT_IN_DB

mock_should_store.return_value = True
embedding_values = [
round(i * 0.0001, 6) for i in range(MAX_STRING_LENGTH_PROMPT_IN_DB + 500)
]
large_embedding = json.dumps(embedding_values)
payload = cast(
StandardLoggingPayload,
{
"response": {
"data": [
{
"embedding": large_embedding,
"other_field": "value",
}
]
}
},
)

response_json = _get_response_for_spend_logs_payload(payload)
parsed = json.loads(response_json)
truncated_value = parsed["data"][0]["embedding"]

assert isinstance(truncated_value, str)
assert len(truncated_value) < len(large_embedding)
assert LITELLM_TRUNCATED_PAYLOAD_FIELD in truncated_value
assert parsed["data"][0]["other_field"] == "value"


def test_safe_dumps_handles_circular_references():
"""Test that safe_dumps can handle circular references without raising exceptions"""

Expand Down
Loading