Skip to content

Commit dd92c90

Browse files
authored
fix: Record all tool calls from LLM response (camel-ai#1928)
1 parent 69127ed commit dd92c90

File tree

11 files changed

+99
-58
lines changed

11 files changed

+99
-58
lines changed

.github/ISSUE_TEMPLATE/bug_report.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ body:
2626
attributes:
2727
label: What version of camel are you using?
2828
description: Run command `python3 -c 'print(__import__("camel").__version__)'` in your shell and paste the output here.
29-
placeholder: E.g., 0.2.34
29+
placeholder: E.g., 0.2.35
3030
validations:
3131
required: true
3232

camel/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
from camel.logger import disable_logging, enable_logging, set_log_level
1616

17-
__version__ = '0.2.34'
17+
__version__ = '0.2.35'
1818

1919
__all__ = [
2020
'__version__',

camel/agents/_types.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ class ModelResponse(BaseModel):
3434
model_config = ConfigDict(arbitrary_types_allowed=True)
3535

3636
response: Union[ChatCompletion, Stream, AsyncStream]
37-
tool_call_request: Optional[ToolCallRequest]
37+
tool_call_requests: Optional[List[ToolCallRequest]]
3838
output_messages: List[BaseMessage]
3939
finish_reasons: List[str]
4040
usage_dict: Dict[str, Any]

camel/agents/_utils.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ def get_info_dict(
136136
termination_reasons: List[str],
137137
num_tokens: int,
138138
tool_calls: List[ToolCallingRecord],
139-
external_tool_call_request: Optional[ToolCallRequest] = None,
139+
external_tool_call_requests: Optional[List[ToolCallRequest]] = None,
140140
) -> Dict[str, Any]:
141141
r"""Returns a dictionary containing information about the chat session.
142142
@@ -149,8 +149,8 @@ def get_info_dict(
149149
num_tokens (int): The number of tokens used in the chat session.
150150
tool_calls (List[ToolCallingRecord]): The list of function
151151
calling records, containing the information of called tools.
152-
external_tool_call_request (Optional[ToolCallRequest]): The
153-
request for external tool call.
152+
external_tool_call_requests (Optional[List[ToolCallRequest]]): The
153+
requests for external tool calls.
154154
155155
156156
Returns:
@@ -162,7 +162,7 @@ def get_info_dict(
162162
"termination_reasons": termination_reasons,
163163
"num_tokens": num_tokens,
164164
"tool_calls": tool_calls,
165-
"external_tool_call_request": external_tool_call_request,
165+
"external_tool_call_requests": external_tool_call_requests,
166166
}
167167

168168

camel/agents/chat_agent.py

+64-27
Original file line numberDiff line numberDiff line change
@@ -582,7 +582,7 @@ def step(
582582
self.update_memory(input_message, OpenAIBackendRole.USER)
583583

584584
tool_call_records: List[ToolCallingRecord] = []
585-
external_tool_call_request: Optional[ToolCallRequest] = None
585+
external_tool_call_requests: Optional[List[ToolCallRequest]] = None
586586

587587
while True:
588588
try:
@@ -602,12 +602,26 @@ def step(
602602
if self.single_iteration:
603603
break
604604

605-
if tool_call_request := response.tool_call_request:
606-
if tool_call_request.tool_name in self._external_tool_schemas:
607-
external_tool_call_request = tool_call_request
605+
if tool_call_requests := response.tool_call_requests:
606+
# Process all tool calls
607+
for tool_call_request in tool_call_requests:
608+
if (
609+
tool_call_request.tool_name
610+
in self._external_tool_schemas
611+
):
612+
if external_tool_call_requests is None:
613+
external_tool_call_requests = []
614+
external_tool_call_requests.append(tool_call_request)
615+
else:
616+
tool_call_records.append(
617+
self._execute_tool(tool_call_request)
618+
)
619+
620+
# If we found external tool calls, break the loop
621+
if external_tool_call_requests:
608622
break
609623

610-
tool_call_records.append(self._execute_tool(tool_call_request))
624+
# If we're still here, continue the loop
611625
continue
612626

613627
break
@@ -616,7 +630,10 @@ def step(
616630
self._record_final_output(response.output_messages)
617631

618632
return self._convert_to_chatagent_response(
619-
response, tool_call_records, num_tokens, external_tool_call_request
633+
response,
634+
tool_call_records,
635+
num_tokens,
636+
external_tool_call_requests,
620637
)
621638

622639
@property
@@ -658,7 +675,7 @@ async def astep(
658675
self.update_memory(input_message, OpenAIBackendRole.USER)
659676

660677
tool_call_records: List[ToolCallingRecord] = []
661-
external_tool_call_request: Optional[ToolCallRequest] = None
678+
external_tool_call_requests: Optional[List[ToolCallRequest]] = None
662679
while True:
663680
try:
664681
openai_messages, num_tokens = self.memory.get_context()
@@ -677,13 +694,27 @@ async def astep(
677694
if self.single_iteration:
678695
break
679696

680-
if tool_call_request := response.tool_call_request:
681-
if tool_call_request.tool_name in self._external_tool_schemas:
682-
external_tool_call_request = tool_call_request
697+
if tool_call_requests := response.tool_call_requests:
698+
# Process all tool calls
699+
for tool_call_request in tool_call_requests:
700+
if (
701+
tool_call_request.tool_name
702+
in self._external_tool_schemas
703+
):
704+
if external_tool_call_requests is None:
705+
external_tool_call_requests = []
706+
external_tool_call_requests.append(tool_call_request)
707+
708+
tool_call_record = await self._aexecute_tool(
709+
tool_call_request
710+
)
711+
tool_call_records.append(tool_call_record)
712+
713+
# If we found an external tool call, break the loop
714+
if external_tool_call_requests:
683715
break
684716

685-
tool_call_record = await self._aexecute_tool(tool_call_request)
686-
tool_call_records.append(tool_call_record)
717+
# If we're still here, continue the loop
687718
continue
688719

689720
break
@@ -692,15 +723,18 @@ async def astep(
692723
self._record_final_output(response.output_messages)
693724

694725
return self._convert_to_chatagent_response(
695-
response, tool_call_records, num_tokens, external_tool_call_request
726+
response,
727+
tool_call_records,
728+
num_tokens,
729+
external_tool_call_requests,
696730
)
697731

698732
def _convert_to_chatagent_response(
699733
self,
700734
response: ModelResponse,
701735
tool_call_records: List[ToolCallingRecord],
702736
num_tokens: int,
703-
external_tool_call_request: Optional[ToolCallRequest],
737+
external_tool_call_requests: Optional[List[ToolCallRequest]],
704738
) -> ChatAgentResponse:
705739
r"""Parse the final model response into the chat agent response."""
706740
info = self._step_get_info(
@@ -710,7 +744,7 @@ def _convert_to_chatagent_response(
710744
response.response_id,
711745
tool_call_records,
712746
num_tokens,
713-
external_tool_call_request,
747+
external_tool_call_requests,
714748
)
715749

716750
return ChatAgentResponse(
@@ -961,7 +995,7 @@ def _step_get_info(
961995
response_id: str,
962996
tool_calls: List[ToolCallingRecord],
963997
num_tokens: int,
964-
external_tool_call_request: Optional[ToolCallRequest] = None,
998+
external_tool_call_requests: Optional[List[ToolCallRequest]] = None,
965999
) -> Dict[str, Any]:
9661000
r"""Process the output of a chat step and gather information about the
9671001
step.
@@ -1018,7 +1052,7 @@ def _step_get_info(
10181052
finish_reasons,
10191053
num_tokens,
10201054
tool_calls,
1021-
external_tool_call_request,
1055+
external_tool_call_requests,
10221056
)
10231057

10241058
def _handle_batch_response(
@@ -1057,18 +1091,21 @@ def _handle_batch_response(
10571091
if response.usage is not None:
10581092
usage = safe_model_dump(response.usage)
10591093

1060-
tool_call_request: Optional[ToolCallRequest] = None
1094+
tool_call_requests: Optional[List[ToolCallRequest]] = None
10611095
if tool_calls := response.choices[0].message.tool_calls:
1062-
tool_name = tool_calls[0].function.name
1063-
tool_call_id = tool_calls[0].id
1064-
args = json.loads(tool_calls[0].function.arguments)
1065-
tool_call_request = ToolCallRequest(
1066-
tool_name=tool_name, args=args, tool_call_id=tool_call_id
1067-
)
1096+
tool_call_requests = []
1097+
for tool_call in tool_calls:
1098+
tool_name = tool_call.function.name
1099+
tool_call_id = tool_call.id
1100+
args = json.loads(tool_call.function.arguments)
1101+
tool_call_request = ToolCallRequest(
1102+
tool_name=tool_name, args=args, tool_call_id=tool_call_id
1103+
)
1104+
tool_call_requests.append(tool_call_request)
10681105

10691106
return ModelResponse(
10701107
response=response,
1071-
tool_call_request=tool_call_request,
1108+
tool_call_requests=tool_call_requests,
10721109
output_messages=output_messages,
10731110
finish_reasons=finish_reasons,
10741111
usage_dict=usage,
@@ -1108,7 +1145,7 @@ def _handle_stream_response(
11081145
# TODO: Handle tool calls
11091146
return ModelResponse(
11101147
response=response,
1111-
tool_call_request=None,
1148+
tool_call_requests=None,
11121149
output_messages=output_messages,
11131150
finish_reasons=finish_reasons,
11141151
usage_dict=usage_dict,
@@ -1148,7 +1185,7 @@ async def _ahandle_stream_response(
11481185
# TODO: Handle tool calls
11491186
return ModelResponse(
11501187
response=response,
1151-
tool_call_request=None,
1188+
tool_call_requests=None,
11521189
output_messages=output_messages,
11531190
finish_reasons=finish_reasons,
11541191
usage_dict=usage_dict,

camel/configs/openai_config.py

+20-16
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
from typing import Dict, Optional, Sequence, Type, Union
1717

18-
from pydantic import BaseModel, Field
18+
from pydantic import BaseModel
1919

2020
from camel.configs.base_config import BaseConfig
2121

@@ -28,14 +28,14 @@ class ChatGPTConfig(BaseConfig):
2828
temperature (float, optional): Sampling temperature to use, between
2929
:obj:`0` and :obj:`2`. Higher values make the output more random,
3030
while lower values make it more focused and deterministic.
31-
(default: :obj:`0.2`)
31+
(default: :obj:`None`)
3232
top_p (float, optional): An alternative to sampling with temperature,
3333
called nucleus sampling, where the model considers the results of
3434
the tokens with top_p probability mass. So :obj:`0.1` means only
3535
the tokens comprising the top 10% probability mass are considered.
36-
(default: :obj:`1.0`)
36+
(default: :obj:`None`)
3737
n (int, optional): How many chat completion choices to generate for
38-
each input message. (default: :obj:`1`)
38+
each input message. (default: :obj:`None`)
3939
response_format (object, optional): An object specifying the format
4040
that the model must output. Compatible with GPT-4 Turbo and all
4141
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
@@ -51,7 +51,7 @@ class ChatGPTConfig(BaseConfig):
5151
max context length.
5252
stream (bool, optional): If True, partial message deltas will be sent
5353
as data-only server-sent events as they become available.
54-
(default: :obj:`False`)
54+
(default: :obj:`None`)
5555
stop (str or list, optional): Up to :obj:`4` sequences where the API
5656
will stop generating further tokens. (default: :obj:`None`)
5757
max_tokens (int, optional): The maximum number of tokens to generate
@@ -62,12 +62,12 @@ class ChatGPTConfig(BaseConfig):
6262
:obj:`2.0`. Positive values penalize new tokens based on whether
6363
they appear in the text so far, increasing the model's likelihood
6464
to talk about new topics. See more information about frequency and
65-
presence penalties. (default: :obj:`0.0`)
65+
presence penalties. (default: :obj:`None`)
6666
frequency_penalty (float, optional): Number between :obj:`-2.0` and
6767
:obj:`2.0`. Positive values penalize new tokens based on their
6868
existing frequency in the text so far, decreasing the model's
6969
likelihood to repeat the same line verbatim. See more information
70-
about frequency and presence penalties. (default: :obj:`0.0`)
70+
about frequency and presence penalties. (default: :obj:`None`)
7171
logit_bias (dict, optional): Modify the likelihood of specified tokens
7272
appearing in the completion. Accepts a json object that maps tokens
7373
(specified by their token ID in the tokenizer) to an associated
@@ -76,7 +76,7 @@ class ChatGPTConfig(BaseConfig):
7676
The exact effect will vary per model, but values between:obj:` -1`
7777
and :obj:`1` should decrease or increase likelihood of selection;
7878
values like :obj:`-100` or :obj:`100` should result in a ban or
79-
exclusive selection of the relevant token. (default: :obj:`{}`)
79+
exclusive selection of the relevant token. (default: :obj:`None`)
8080
user (str, optional): A unique identifier representing your end-user,
8181
which can help OpenAI to monitor and detect abuse.
8282
(default: :obj:`""`)
@@ -101,21 +101,25 @@ class ChatGPTConfig(BaseConfig):
101101
:obj:`o1mini`, :obj:`o1preview`, :obj:`o3mini`). If not provided
102102
or if the model type does not support it, this parameter is
103103
ignored. (default: :obj:`None`)
104+
parallel_tool_calls (bool, optional): A parameter specifying whether
105+
the model should call tools in parallel or not. (default:
106+
:obj:`None`)
104107
"""
105108

106-
temperature: float = 0.2 # openai default: 1.0
107-
top_p: float = 1.0
108-
n: int = 1
109-
stream: bool = False
109+
temperature: Optional[float] = None
110+
top_p: Optional[float] = None
111+
n: Optional[int] = None
112+
stream: Optional[bool] = None
110113
stop: Optional[Union[str, Sequence[str]]] = None
111114
max_tokens: Optional[int] = None
112-
presence_penalty: float = 0.0
115+
presence_penalty: Optional[float] = None
113116
response_format: Optional[Union[Type[BaseModel], Dict]] = None
114-
frequency_penalty: float = 0.0
115-
logit_bias: Dict = Field(default_factory=dict)
116-
user: str = ""
117+
frequency_penalty: Optional[float] = None
118+
logit_bias: Optional[Dict] = None
119+
user: Optional[str] = None
117120
tool_choice: Optional[Union[Dict[str, str], str]] = None
118121
reasoning_effort: Optional[str] = None
122+
parallel_tool_calls: Optional[bool] = None
119123

120124

121125
OPENAI_API_PARAMS = {param for param in ChatGPTConfig.model_fields.keys()}

docs/conf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
project = 'CAMEL'
2828
copyright = '2024, CAMEL-AI.org'
2929
author = 'CAMEL-AI.org'
30-
release = '0.2.34'
30+
release = '0.2.35'
3131

3232
html_favicon = (
3333
'https://raw.githubusercontent.com/camel-ai/camel/master/misc/favicon.png'

docs/key_modules/loaders.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -340,14 +340,14 @@ response = jina_reader.read_content("https://docs.camel-ai.org/")
340340
print(response)
341341
```
342342
```markdown
343-
>>>Welcome to CAMEL’s documentation! — CAMEL 0.2.34 documentation
343+
>>>Welcome to CAMEL’s documentation! — CAMEL 0.2.35 documentation
344344
===============
345345

346346
[Skip to main content](https://docs.camel-ai.org/#main-content)
347347

348348
Back to top Ctrl+K
349349

350-
[![Image 1](https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png) ![Image 2](https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png)CAMEL 0.2.34](https://docs.camel-ai.org/#)
350+
[![Image 1](https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png) ![Image 2](https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png)CAMEL 0.2.35](https://docs.camel-ai.org/#)
351351

352352
Search Ctrl+K
353353

pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
44

55
[project]
66
name = "camel-ai"
7-
version = "0.2.34"
7+
version = "0.2.35"
88
description = "Communicative Agents for AI Society Study"
99
authors = [{ name = "CAMEL-AI.org" }]
1010
requires-python = ">=3.10,<3.13"

test/agents/test_chat_agent.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -361,11 +361,11 @@ def test_chat_agent_step_with_external_tools(step_call_count=3):
361361
response = external_tool_agent.step(usr_msg)
362362
assert not response.msg.content
363363

364-
external_tool_call_request = response.info[
365-
"external_tool_call_request"
364+
external_tool_call_requests = response.info[
365+
"external_tool_call_requests"
366366
]
367367
assert (
368-
external_tool_call_request.tool_name == "sub"
368+
external_tool_call_requests[0].tool_name == "sub"
369369
), f"Error in calling round {i+1}"
370370

371371

uv.lock

+1-1
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)