Skip to content
This repository was archived by the owner on Aug 14, 2025. It is now read-only.

Commit 9c69353

Browse files
feat(api): update via SDK Studio
1 parent 7a6b5de commit 9c69353

File tree

10 files changed

+324
-178
lines changed

10 files changed

+324
-178
lines changed

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 106
22
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f59f1c7d33001d60b5190f68aa49eacec90f05dbe694620b8916152c3922051d.yml
33
openapi_spec_hash: 804edd2e834493906dc430145402be3b
4-
config_hash: e6c3e48e220b264936ee6df8b996ab12
4+
config_hash: de16e52db65de71ac35adcdb665a74f5

api.md

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -453,17 +453,28 @@ Methods:
453453

454454
- <code title="get /v1/inspect/routes">client.routes.<a href="./src/llama_stack_client/resources/routes.py">list</a>() -> <a href="./src/llama_stack_client/types/route_list_response.py">RouteListResponse</a></code>
455455

456+
# Moderations
457+
458+
Types:
459+
460+
```python
461+
from llama_stack_client.types import CreateResponse
462+
```
463+
464+
Methods:
465+
466+
- <code title="post /v1/openai/v1/moderations">client.moderations.<a href="./src/llama_stack_client/resources/moderations.py">create</a>(\*\*<a href="src/llama_stack_client/types/moderation_create_params.py">params</a>) -> <a href="./src/llama_stack_client/types/create_response.py">CreateResponse</a></code>
467+
456468
# Safety
457469

458470
Types:
459471

460472
```python
461-
from llama_stack_client.types import CreateResponse, RunShieldResponse
473+
from llama_stack_client.types import RunShieldResponse
462474
```
463475

464476
Methods:
465477

466-
- <code title="post /v1/openai/v1/moderations">client.safety.<a href="./src/llama_stack_client/resources/safety.py">create</a>(\*\*<a href="src/llama_stack_client/types/safety_create_params.py">params</a>) -> <a href="./src/llama_stack_client/types/create_response.py">CreateResponse</a></code>
467478
- <code title="post /v1/safety/run-shield">client.safety.<a href="./src/llama_stack_client/resources/safety.py">run_shield</a>(\*\*<a href="src/llama_stack_client/types/safety_run_shield_params.py">params</a>) -> <a href="./src/llama_stack_client/types/run_shield_response.py">RunShieldResponse</a></code>
468479

469480
# Shields

src/llama_stack_client/_client.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
toolgroups,
4242
vector_dbs,
4343
completions,
44+
moderations,
4445
scoring_functions,
4546
synthetic_data_generation,
4647
)
@@ -91,6 +92,7 @@ class LlamaStackClient(SyncAPIClient):
9192
post_training: post_training.PostTrainingResource
9293
providers: providers.ProvidersResource
9394
routes: routes.RoutesResource
95+
moderations: moderations.ModerationsResource
9496
safety: safety.SafetyResource
9597
shields: shields.ShieldsResource
9698
synthetic_data_generation: synthetic_data_generation.SyntheticDataGenerationResource
@@ -177,6 +179,7 @@ def __init__(
177179
self.post_training = post_training.PostTrainingResource(self)
178180
self.providers = providers.ProvidersResource(self)
179181
self.routes = routes.RoutesResource(self)
182+
self.moderations = moderations.ModerationsResource(self)
180183
self.safety = safety.SafetyResource(self)
181184
self.shields = shields.ShieldsResource(self)
182185
self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResource(self)
@@ -315,6 +318,7 @@ class AsyncLlamaStackClient(AsyncAPIClient):
315318
post_training: post_training.AsyncPostTrainingResource
316319
providers: providers.AsyncProvidersResource
317320
routes: routes.AsyncRoutesResource
321+
moderations: moderations.AsyncModerationsResource
318322
safety: safety.AsyncSafetyResource
319323
shields: shields.AsyncShieldsResource
320324
synthetic_data_generation: synthetic_data_generation.AsyncSyntheticDataGenerationResource
@@ -401,6 +405,7 @@ def __init__(
401405
self.post_training = post_training.AsyncPostTrainingResource(self)
402406
self.providers = providers.AsyncProvidersResource(self)
403407
self.routes = routes.AsyncRoutesResource(self)
408+
self.moderations = moderations.AsyncModerationsResource(self)
404409
self.safety = safety.AsyncSafetyResource(self)
405410
self.shields = shields.AsyncShieldsResource(self)
406411
self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResource(self)
@@ -540,6 +545,7 @@ def __init__(self, client: LlamaStackClient) -> None:
540545
self.post_training = post_training.PostTrainingResourceWithRawResponse(client.post_training)
541546
self.providers = providers.ProvidersResourceWithRawResponse(client.providers)
542547
self.routes = routes.RoutesResourceWithRawResponse(client.routes)
548+
self.moderations = moderations.ModerationsResourceWithRawResponse(client.moderations)
543549
self.safety = safety.SafetyResourceWithRawResponse(client.safety)
544550
self.shields = shields.ShieldsResourceWithRawResponse(client.shields)
545551
self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResourceWithRawResponse(
@@ -573,6 +579,7 @@ def __init__(self, client: AsyncLlamaStackClient) -> None:
573579
self.post_training = post_training.AsyncPostTrainingResourceWithRawResponse(client.post_training)
574580
self.providers = providers.AsyncProvidersResourceWithRawResponse(client.providers)
575581
self.routes = routes.AsyncRoutesResourceWithRawResponse(client.routes)
582+
self.moderations = moderations.AsyncModerationsResourceWithRawResponse(client.moderations)
576583
self.safety = safety.AsyncSafetyResourceWithRawResponse(client.safety)
577584
self.shields = shields.AsyncShieldsResourceWithRawResponse(client.shields)
578585
self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResourceWithRawResponse(
@@ -608,6 +615,7 @@ def __init__(self, client: LlamaStackClient) -> None:
608615
self.post_training = post_training.PostTrainingResourceWithStreamingResponse(client.post_training)
609616
self.providers = providers.ProvidersResourceWithStreamingResponse(client.providers)
610617
self.routes = routes.RoutesResourceWithStreamingResponse(client.routes)
618+
self.moderations = moderations.ModerationsResourceWithStreamingResponse(client.moderations)
611619
self.safety = safety.SafetyResourceWithStreamingResponse(client.safety)
612620
self.shields = shields.ShieldsResourceWithStreamingResponse(client.shields)
613621
self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResourceWithStreamingResponse(
@@ -643,6 +651,7 @@ def __init__(self, client: AsyncLlamaStackClient) -> None:
643651
self.post_training = post_training.AsyncPostTrainingResourceWithStreamingResponse(client.post_training)
644652
self.providers = providers.AsyncProvidersResourceWithStreamingResponse(client.providers)
645653
self.routes = routes.AsyncRoutesResourceWithStreamingResponse(client.routes)
654+
self.moderations = moderations.AsyncModerationsResourceWithStreamingResponse(client.moderations)
646655
self.safety = safety.AsyncSafetyResourceWithStreamingResponse(client.safety)
647656
self.shields = shields.AsyncShieldsResourceWithStreamingResponse(client.shields)
648657
self.synthetic_data_generation = (

src/llama_stack_client/resources/__init__.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,14 @@
176176
CompletionsResourceWithStreamingResponse,
177177
AsyncCompletionsResourceWithStreamingResponse,
178178
)
179+
from .moderations import (
180+
ModerationsResource,
181+
AsyncModerationsResource,
182+
ModerationsResourceWithRawResponse,
183+
AsyncModerationsResourceWithRawResponse,
184+
ModerationsResourceWithStreamingResponse,
185+
AsyncModerationsResourceWithStreamingResponse,
186+
)
179187
from .tool_runtime import (
180188
ToolRuntimeResource,
181189
AsyncToolRuntimeResource,
@@ -332,6 +340,12 @@
332340
"AsyncRoutesResourceWithRawResponse",
333341
"RoutesResourceWithStreamingResponse",
334342
"AsyncRoutesResourceWithStreamingResponse",
343+
"ModerationsResource",
344+
"AsyncModerationsResource",
345+
"ModerationsResourceWithRawResponse",
346+
"AsyncModerationsResourceWithRawResponse",
347+
"ModerationsResourceWithStreamingResponse",
348+
"AsyncModerationsResourceWithStreamingResponse",
335349
"SafetyResource",
336350
"AsyncSafetyResource",
337351
"SafetyResourceWithRawResponse",
Lines changed: 189 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,189 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing import List, Union
6+
7+
import httpx
8+
9+
from ..types import moderation_create_params
10+
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
11+
from .._utils import maybe_transform, async_maybe_transform
12+
from .._compat import cached_property
13+
from .._resource import SyncAPIResource, AsyncAPIResource
14+
from .._response import (
15+
to_raw_response_wrapper,
16+
to_streamed_response_wrapper,
17+
async_to_raw_response_wrapper,
18+
async_to_streamed_response_wrapper,
19+
)
20+
from .._base_client import make_request_options
21+
from ..types.create_response import CreateResponse
22+
23+
__all__ = ["ModerationsResource", "AsyncModerationsResource"]
24+
25+
26+
class ModerationsResource(SyncAPIResource):
27+
@cached_property
28+
def with_raw_response(self) -> ModerationsResourceWithRawResponse:
29+
"""
30+
This property can be used as a prefix for any HTTP method call to return
31+
the raw response object instead of the parsed content.
32+
33+
For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
34+
"""
35+
return ModerationsResourceWithRawResponse(self)
36+
37+
@cached_property
38+
def with_streaming_response(self) -> ModerationsResourceWithStreamingResponse:
39+
"""
40+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
41+
42+
For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
43+
"""
44+
return ModerationsResourceWithStreamingResponse(self)
45+
46+
def create(
47+
self,
48+
*,
49+
input: Union[str, List[str]],
50+
model: str,
51+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
52+
# The extra values given here take precedence over values defined on the client or passed to this method.
53+
extra_headers: Headers | None = None,
54+
extra_query: Query | None = None,
55+
extra_body: Body | None = None,
56+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
57+
) -> CreateResponse:
58+
"""
59+
Classifies if text and/or image inputs are potentially harmful.
60+
61+
Args:
62+
input: Input (or inputs) to classify. Can be a single string, an array of strings, or
63+
an array of multi-modal input objects similar to other models.
64+
65+
model: The content moderation model you would like to use.
66+
67+
extra_headers: Send extra headers
68+
69+
extra_query: Add additional query parameters to the request
70+
71+
extra_body: Add additional JSON properties to the request
72+
73+
timeout: Override the client-level default timeout for this request, in seconds
74+
"""
75+
return self._post(
76+
"/v1/openai/v1/moderations",
77+
body=maybe_transform(
78+
{
79+
"input": input,
80+
"model": model,
81+
},
82+
moderation_create_params.ModerationCreateParams,
83+
),
84+
options=make_request_options(
85+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
86+
),
87+
cast_to=CreateResponse,
88+
)
89+
90+
91+
class AsyncModerationsResource(AsyncAPIResource):
92+
@cached_property
93+
def with_raw_response(self) -> AsyncModerationsResourceWithRawResponse:
94+
"""
95+
This property can be used as a prefix for any HTTP method call to return
96+
the raw response object instead of the parsed content.
97+
98+
For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
99+
"""
100+
return AsyncModerationsResourceWithRawResponse(self)
101+
102+
@cached_property
103+
def with_streaming_response(self) -> AsyncModerationsResourceWithStreamingResponse:
104+
"""
105+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
106+
107+
For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
108+
"""
109+
return AsyncModerationsResourceWithStreamingResponse(self)
110+
111+
async def create(
112+
self,
113+
*,
114+
input: Union[str, List[str]],
115+
model: str,
116+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
117+
# The extra values given here take precedence over values defined on the client or passed to this method.
118+
extra_headers: Headers | None = None,
119+
extra_query: Query | None = None,
120+
extra_body: Body | None = None,
121+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
122+
) -> CreateResponse:
123+
"""
124+
Classifies if text and/or image inputs are potentially harmful.
125+
126+
Args:
127+
input: Input (or inputs) to classify. Can be a single string, an array of strings, or
128+
an array of multi-modal input objects similar to other models.
129+
130+
model: The content moderation model you would like to use.
131+
132+
extra_headers: Send extra headers
133+
134+
extra_query: Add additional query parameters to the request
135+
136+
extra_body: Add additional JSON properties to the request
137+
138+
timeout: Override the client-level default timeout for this request, in seconds
139+
"""
140+
return await self._post(
141+
"/v1/openai/v1/moderations",
142+
body=await async_maybe_transform(
143+
{
144+
"input": input,
145+
"model": model,
146+
},
147+
moderation_create_params.ModerationCreateParams,
148+
),
149+
options=make_request_options(
150+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
151+
),
152+
cast_to=CreateResponse,
153+
)
154+
155+
156+
class ModerationsResourceWithRawResponse:
157+
def __init__(self, moderations: ModerationsResource) -> None:
158+
self._moderations = moderations
159+
160+
self.create = to_raw_response_wrapper(
161+
moderations.create,
162+
)
163+
164+
165+
class AsyncModerationsResourceWithRawResponse:
166+
def __init__(self, moderations: AsyncModerationsResource) -> None:
167+
self._moderations = moderations
168+
169+
self.create = async_to_raw_response_wrapper(
170+
moderations.create,
171+
)
172+
173+
174+
class ModerationsResourceWithStreamingResponse:
175+
def __init__(self, moderations: ModerationsResource) -> None:
176+
self._moderations = moderations
177+
178+
self.create = to_streamed_response_wrapper(
179+
moderations.create,
180+
)
181+
182+
183+
class AsyncModerationsResourceWithStreamingResponse:
184+
def __init__(self, moderations: AsyncModerationsResource) -> None:
185+
self._moderations = moderations
186+
187+
self.create = async_to_streamed_response_wrapper(
188+
moderations.create,
189+
)

0 commit comments

Comments
 (0)