Skip to content

Commit 8d7d36d

Browse files
committed
fix integ tests
1 parent c5ce36f commit 8d7d36d

File tree

3 files changed

+21
-41
lines changed

3 files changed

+21
-41
lines changed

tests_integ/models/providers.py

Lines changed: 4 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -67,25 +67,12 @@ def __init__(self):
6767
"api_key": os.getenv("ANTHROPIC_API_KEY"),
6868
},
6969
model_id="claude-3-7-sonnet-20250219",
70-
max_tokens=2048,
71-
params={
72-
"thinking": {
73-
"type": "enabled",
74-
"budget_tokens": 1024,
75-
},
76-
},
70+
max_tokens=512,
7771
),
7872
)
7973
bedrock = ProviderInfo(
8074
id="bedrock",
81-
factory=lambda: BedrockModel(
82-
additional_request_fields={
83-
"thinking": {
84-
"type": "enabled",
85-
"budget_tokens": 1024,
86-
},
87-
},
88-
),
75+
factory=lambda: BedrockModel(),
8976
)
9077
cohere = ProviderInfo(
9178
id="cohere",
@@ -101,15 +88,7 @@ def __init__(self):
10188
)
10289
litellm = ProviderInfo(
10390
id="litellm",
104-
factory=lambda: LiteLLMModel(
105-
model_id="bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
106-
params={
107-
"thinking": {
108-
"budget_tokens": 1024,
109-
"type": "enabled",
110-
},
111-
},
112-
),
91+
factory=lambda: LiteLLMModel(model_id="bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0"),
11392
)
11493
llama = ProviderInfo(
11594
id="llama",
@@ -158,12 +137,7 @@ def __init__(self):
158137
factory=lambda: GeminiModel(
159138
client_args={"api_key": os.getenv("GOOGLE_API_KEY")},
160139
model_id="gemini-2.5-flash",
161-
params={
162-
"temperature": 0.7,
163-
"thinking_config": {
164-
"include_thoughts": True,
165-
},
166-
},
140+
params={"temperature": 0.7},
167141
),
168142
)
169143

tests_integ/models/test_conformance.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
from strands import Agent
77
from strands.models import Model
8-
from tests_integ.models.providers import ProviderInfo, all_providers, cohere, gemini, llama, mistral, openai, writer
8+
from tests_integ.models.providers import ProviderInfo, all_providers, cohere, llama, mistral
99

1010

1111
def get_models():
@@ -61,13 +61,3 @@ class Weather(BaseModel):
6161
assert len(result.time) > 0
6262
assert len(result.weather) > 0
6363
assert isinstance(result, Weather)
64-
65-
66-
def test_stream_reasoning(skip_for, model):
67-
skip_for([cohere, gemini, llama, mistral, openai, writer], "reasoning is not supported")
68-
69-
agent = Agent(model)
70-
result = agent("Please reason about the equation 2+2.")
71-
72-
assert "reasoningContent" in result.message["content"][0]
73-
assert result.message["content"][0]["reasoningContent"]["reasoningText"]["text"]

tests_integ/models/test_model_litellm.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,22 @@ async def test_agent_stream_async(agent):
121121
assert all(string in text for string in ["12:00", "sunny"])
122122

123123

124+
def test_agent_invoke_reasoning(agent, model):
125+
model.update_config(
126+
params={
127+
"thinking": {
128+
"budget_tokens": 1024,
129+
"type": "enabled",
130+
},
131+
},
132+
)
133+
134+
result = agent("Please reason about the equation 2+2.")
135+
136+
assert "reasoningContent" in result.message["content"][0]
137+
assert result.message["content"][0]["reasoningContent"]["reasoningText"]["text"]
138+
139+
124140
def test_structured_output(agent, weather):
125141
tru_weather = agent.structured_output(type(weather), "The time is 12:00 and the weather is sunny")
126142
exp_weather = weather

0 commit comments

Comments
 (0)