Skip to content
This repository was archived by the owner on Aug 14, 2025. It is now read-only.

Commit 24da5c8

Browse files
feat(api): update via SDK Studio
1 parent d8a7553 commit 24da5c8

File tree

7 files changed

+243
-5
lines changed

7 files changed

+243
-5
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
configured_endpoints: 105
1+
configured_endpoints: 106
22
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-ab9c9bf2527d3b4179e2bc3e6495c64d43c42b2ea8dc1a55d472986e1a1430a0.yml
33
openapi_spec_hash: b93c85fb747e3c29134451d2f364ce8b
4-
config_hash: 0394c2b14022becb0352c36afcdfbafe
4+
config_hash: b0cd3ed9be70b0310bc685a4014eb0a5

api.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -463,6 +463,7 @@ from llama_stack_client.types import OpenAIModerationsResponse, RunShieldRespons
463463

464464
Methods:
465465

466+
- <code title="post /v1/openai/v1/moderations">client.safety.<a href="./src/llama_stack_client/resources/safety.py">openai_moderations</a>(\*\*<a href="src/llama_stack_client/types/safety_openai_moderations_params.py">params</a>) -> <a href="./src/llama_stack_client/types/openai_moderations_response.py">OpenAIModerationsResponse</a></code>
466467
- <code title="post /v1/safety/run-shield">client.safety.<a href="./src/llama_stack_client/resources/safety.py">run_shield</a>(\*\*<a href="src/llama_stack_client/types/safety_run_shield_params.py">params</a>) -> <a href="./src/llama_stack_client/types/run_shield_response.py">RunShieldResponse</a></code>
467468

468469
# Shields

src/llama_stack_client/resources/safety.py

Lines changed: 103 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,11 @@
22

33
from __future__ import annotations
44

5-
from typing import Dict, Union, Iterable
5+
from typing import Dict, List, Union, Iterable
66

77
import httpx
88

9-
from ..types import safety_run_shield_params
9+
from ..types import safety_run_shield_params, safety_openai_moderations_params
1010
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
1111
from .._utils import maybe_transform, async_maybe_transform
1212
from .._compat import cached_property
@@ -20,6 +20,7 @@
2020
from .._base_client import make_request_options
2121
from ..types.run_shield_response import RunShieldResponse
2222
from ..types.shared_params.message import Message
23+
from ..types.openai_moderations_response import OpenAIModerationsResponse
2324

2425
__all__ = ["SafetyResource", "AsyncSafetyResource"]
2526

@@ -44,6 +45,50 @@ def with_streaming_response(self) -> SafetyResourceWithStreamingResponse:
4445
"""
4546
return SafetyResourceWithStreamingResponse(self)
4647

48+
def openai_moderations(
49+
self,
50+
*,
51+
input: Union[str, List[str]],
52+
model: str | NotGiven = NOT_GIVEN,
53+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
54+
# The extra values given here take precedence over values defined on the client or passed to this method.
55+
extra_headers: Headers | None = None,
56+
extra_query: Query | None = None,
57+
extra_body: Body | None = None,
58+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
59+
) -> OpenAIModerationsResponse:
60+
"""
61+
Classifies if text and/or image inputs are potentially harmful.
62+
63+
Args:
64+
input: Input (or inputs) to classify. Can be a single string, an array of strings, or
65+
an array of multi-modal input objects similar to other models.
66+
67+
model: The content moderation model you would like to use.
68+
69+
extra_headers: Send extra headers
70+
71+
extra_query: Add additional query parameters to the request
72+
73+
extra_body: Add additional JSON properties to the request
74+
75+
timeout: Override the client-level default timeout for this request, in seconds
76+
"""
77+
return self._post(
78+
"/v1/openai/v1/moderations",
79+
body=maybe_transform(
80+
{
81+
"input": input,
82+
"model": model,
83+
},
84+
safety_openai_moderations_params.SafetyOpenAIModerationsParams,
85+
),
86+
options=make_request_options(
87+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
88+
),
89+
cast_to=OpenAIModerationsResponse,
90+
)
91+
4792
def run_shield(
4893
self,
4994
*,
@@ -112,6 +157,50 @@ def with_streaming_response(self) -> AsyncSafetyResourceWithStreamingResponse:
112157
"""
113158
return AsyncSafetyResourceWithStreamingResponse(self)
114159

160+
async def openai_moderations(
161+
self,
162+
*,
163+
input: Union[str, List[str]],
164+
model: str | NotGiven = NOT_GIVEN,
165+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
166+
# The extra values given here take precedence over values defined on the client or passed to this method.
167+
extra_headers: Headers | None = None,
168+
extra_query: Query | None = None,
169+
extra_body: Body | None = None,
170+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
171+
) -> OpenAIModerationsResponse:
172+
"""
173+
Classifies if text and/or image inputs are potentially harmful.
174+
175+
Args:
176+
input: Input (or inputs) to classify. Can be a single string, an array of strings, or
177+
an array of multi-modal input objects similar to other models.
178+
179+
model: The content moderation model you would like to use.
180+
181+
extra_headers: Send extra headers
182+
183+
extra_query: Add additional query parameters to the request
184+
185+
extra_body: Add additional JSON properties to the request
186+
187+
timeout: Override the client-level default timeout for this request, in seconds
188+
"""
189+
return await self._post(
190+
"/v1/openai/v1/moderations",
191+
body=await async_maybe_transform(
192+
{
193+
"input": input,
194+
"model": model,
195+
},
196+
safety_openai_moderations_params.SafetyOpenAIModerationsParams,
197+
),
198+
options=make_request_options(
199+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
200+
),
201+
cast_to=OpenAIModerationsResponse,
202+
)
203+
115204
async def run_shield(
116205
self,
117206
*,
@@ -164,6 +253,9 @@ class SafetyResourceWithRawResponse:
164253
def __init__(self, safety: SafetyResource) -> None:
165254
self._safety = safety
166255

256+
self.openai_moderations = to_raw_response_wrapper(
257+
safety.openai_moderations,
258+
)
167259
self.run_shield = to_raw_response_wrapper(
168260
safety.run_shield,
169261
)
@@ -173,6 +265,9 @@ class AsyncSafetyResourceWithRawResponse:
173265
def __init__(self, safety: AsyncSafetyResource) -> None:
174266
self._safety = safety
175267

268+
self.openai_moderations = async_to_raw_response_wrapper(
269+
safety.openai_moderations,
270+
)
176271
self.run_shield = async_to_raw_response_wrapper(
177272
safety.run_shield,
178273
)
@@ -182,6 +277,9 @@ class SafetyResourceWithStreamingResponse:
182277
def __init__(self, safety: SafetyResource) -> None:
183278
self._safety = safety
184279

280+
self.openai_moderations = to_streamed_response_wrapper(
281+
safety.openai_moderations,
282+
)
185283
self.run_shield = to_streamed_response_wrapper(
186284
safety.run_shield,
187285
)
@@ -191,6 +289,9 @@ class AsyncSafetyResourceWithStreamingResponse:
191289
def __init__(self, safety: AsyncSafetyResource) -> None:
192290
self._safety = safety
193291

292+
self.openai_moderations = async_to_streamed_response_wrapper(
293+
safety.openai_moderations,
294+
)
194295
self.run_shield = async_to_streamed_response_wrapper(
195296
safety.run_shield,
196297
)

src/llama_stack_client/types/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@
133133
from .inference_completion_params import InferenceCompletionParams as InferenceCompletionParams
134134
from .inference_embeddings_params import InferenceEmbeddingsParams as InferenceEmbeddingsParams
135135
from .list_vector_stores_response import ListVectorStoresResponse as ListVectorStoresResponse
136+
from .openai_moderations_response import OpenAIModerationsResponse as OpenAIModerationsResponse
136137
from .telemetry_get_span_response import TelemetryGetSpanResponse as TelemetryGetSpanResponse
137138
from .vector_db_register_response import VectorDBRegisterResponse as VectorDBRegisterResponse
138139
from .vector_db_retrieve_response import VectorDBRetrieveResponse as VectorDBRetrieveResponse
@@ -151,6 +152,7 @@
151152
from .tool_runtime_invoke_tool_params import ToolRuntimeInvokeToolParams as ToolRuntimeInvokeToolParams
152153
from .inference_chat_completion_params import InferenceChatCompletionParams as InferenceChatCompletionParams
153154
from .list_post_training_jobs_response import ListPostTrainingJobsResponse as ListPostTrainingJobsResponse
155+
from .safety_openai_moderations_params import SafetyOpenAIModerationsParams as SafetyOpenAIModerationsParams
154156
from .scoring_function_register_params import ScoringFunctionRegisterParams as ScoringFunctionRegisterParams
155157
from .telemetry_get_span_tree_response import TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse
156158
from .tool_runtime_list_tools_response import ToolRuntimeListToolsResponse as ToolRuntimeListToolsResponse
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Dict, List
4+
5+
from .._models import BaseModel
6+
7+
__all__ = ["OpenAIModerationsResponse", "Result"]
8+
9+
10+
class Result(BaseModel):
11+
categories: Dict[str, bool]
12+
"""A list of the categories, and whether they are flagged or not."""
13+
14+
category_applied_input_types: Dict[str, List[str]]
15+
16+
category_messages: Dict[str, str]
17+
18+
category_scores: Dict[str, float]
19+
"""A list of the categories along with their scores as predicted by model."""
20+
21+
flagged: bool
22+
"""Whether any of the below categories are flagged."""
23+
24+
25+
class OpenAIModerationsResponse(BaseModel):
26+
id: str
27+
"""The unique identifier for the moderation request."""
28+
29+
model: str
30+
"""The model used to generate the moderation results."""
31+
32+
results: List[Result]
33+
"""A list of moderation objects"""
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing import List, Union
6+
from typing_extensions import Required, TypedDict
7+
8+
__all__ = ["SafetyOpenAIModerationsParams"]
9+
10+
11+
class SafetyOpenAIModerationsParams(TypedDict, total=False):
12+
input: Required[Union[str, List[str]]]
13+
"""Input (or inputs) to classify.
14+
15+
Can be a single string, an array of strings, or an array of multi-modal input
16+
objects similar to other models.
17+
"""
18+
19+
model: str
20+
"""The content moderation model you would like to use."""

tests/api_resources/test_safety.py

Lines changed: 82 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,56 @@
99

1010
from tests.utils import assert_matches_type
1111
from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
12-
from llama_stack_client.types import RunShieldResponse
12+
from llama_stack_client.types import (
13+
RunShieldResponse,
14+
OpenAIModerationsResponse,
15+
)
1316

1417
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
1518

1619

1720
class TestSafety:
1821
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
1922

23+
@parametrize
24+
def test_method_openai_moderations(self, client: LlamaStackClient) -> None:
25+
safety = client.safety.openai_moderations(
26+
input="string",
27+
)
28+
assert_matches_type(OpenAIModerationsResponse, safety, path=["response"])
29+
30+
@parametrize
31+
def test_method_openai_moderations_with_all_params(self, client: LlamaStackClient) -> None:
32+
safety = client.safety.openai_moderations(
33+
input="string",
34+
model="model",
35+
)
36+
assert_matches_type(OpenAIModerationsResponse, safety, path=["response"])
37+
38+
@parametrize
39+
def test_raw_response_openai_moderations(self, client: LlamaStackClient) -> None:
40+
response = client.safety.with_raw_response.openai_moderations(
41+
input="string",
42+
)
43+
44+
assert response.is_closed is True
45+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
46+
safety = response.parse()
47+
assert_matches_type(OpenAIModerationsResponse, safety, path=["response"])
48+
49+
@parametrize
50+
def test_streaming_response_openai_moderations(self, client: LlamaStackClient) -> None:
51+
with client.safety.with_streaming_response.openai_moderations(
52+
input="string",
53+
) as response:
54+
assert not response.is_closed
55+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
56+
57+
safety = response.parse()
58+
assert_matches_type(OpenAIModerationsResponse, safety, path=["response"])
59+
60+
assert cast(Any, response.is_closed) is True
61+
2062
@parametrize
2163
def test_method_run_shield(self, client: LlamaStackClient) -> None:
2264
safety = client.safety.run_shield(
@@ -75,6 +117,45 @@ class TestAsyncSafety:
75117
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
76118
)
77119

120+
@parametrize
121+
async def test_method_openai_moderations(self, async_client: AsyncLlamaStackClient) -> None:
122+
safety = await async_client.safety.openai_moderations(
123+
input="string",
124+
)
125+
assert_matches_type(OpenAIModerationsResponse, safety, path=["response"])
126+
127+
@parametrize
128+
async def test_method_openai_moderations_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
129+
safety = await async_client.safety.openai_moderations(
130+
input="string",
131+
model="model",
132+
)
133+
assert_matches_type(OpenAIModerationsResponse, safety, path=["response"])
134+
135+
@parametrize
136+
async def test_raw_response_openai_moderations(self, async_client: AsyncLlamaStackClient) -> None:
137+
response = await async_client.safety.with_raw_response.openai_moderations(
138+
input="string",
139+
)
140+
141+
assert response.is_closed is True
142+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
143+
safety = await response.parse()
144+
assert_matches_type(OpenAIModerationsResponse, safety, path=["response"])
145+
146+
@parametrize
147+
async def test_streaming_response_openai_moderations(self, async_client: AsyncLlamaStackClient) -> None:
148+
async with async_client.safety.with_streaming_response.openai_moderations(
149+
input="string",
150+
) as response:
151+
assert not response.is_closed
152+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
153+
154+
safety = await response.parse()
155+
assert_matches_type(OpenAIModerationsResponse, safety, path=["response"])
156+
157+
assert cast(Any, response.is_closed) is True
158+
78159
@parametrize
79160
async def test_method_run_shield(self, async_client: AsyncLlamaStackClient) -> None:
80161
safety = await async_client.safety.run_shield(

0 commit comments

Comments
 (0)