Skip to content

Commit f45358c

Browse files
committed
updated
1 parent 8d34dcd commit f45358c

File tree

1 file changed

+8
-6
lines changed

1 file changed

+8
-6
lines changed

llm_api_utils.py

+8-6
Original file line numberDiff line numberDiff line change
@@ -32,15 +32,17 @@ def get_llm_response(model_name: str, params_: dict, messages: list[dict]) -> st
3232

3333

3434
@retry(wait=wait_fixed(90), stop=stop_after_attempt(10))
35-
async def get_llm_response_async(model_name: str, params: dict, messages: list[dict]) -> str:
35+
async def get_llm_response_async(model_name: str, params_: dict, messages: list[dict]) -> str:
36+
params = params_.copy()
3637
if model_name in OPENAI_MODEL_NAMES:
3738
if 'max_tokens' in params:
38-
params_copy = params.copy()
39-
params_copy['max_completion_tokens'] = params_copy['max_tokens']
40-
del params_copy['max_tokens']
41-
return get_gpt_respnose(model_name, params_copy, messages)
42-
return get_gpt_respnose(model_name, params, messages)
39+
params['maxcompletion_tokens'] = params['max_tokens']
40+
del params['max_tokens']
41+
return get_gpt_respnose_async(model_name, params, messages)
42+
return get_gpt_respnose_async(model_name, params, messages)
4343
elif model_name in ANTHROPIC_MODEL_NAMES:
44+
if 'max_tokens' not in params:
45+
params['max_tokens'] = 8192
4446
return await get_claude_response_async(model_name, params, messages)
4547
elif model_name in GEMINI_MODEL_NAMES:
4648
return await get_gemini_response_async(model_name, params, messages)

0 commit comments

Comments
 (0)