@@ -582,7 +582,7 @@ def step(
582
582
self .update_memory (input_message , OpenAIBackendRole .USER )
583
583
584
584
tool_call_records : List [ToolCallingRecord ] = []
585
- external_tool_call_request : Optional [ToolCallRequest ] = None
585
+ external_tool_call_requests : Optional [List [ ToolCallRequest ] ] = None
586
586
587
587
while True :
588
588
try :
@@ -602,12 +602,26 @@ def step(
602
602
if self .single_iteration :
603
603
break
604
604
605
- if tool_call_request := response .tool_call_request :
606
- if tool_call_request .tool_name in self ._external_tool_schemas :
607
- external_tool_call_request = tool_call_request
605
+ if tool_call_requests := response .tool_call_requests :
606
+ # Process all tool calls
607
+ for tool_call_request in tool_call_requests :
608
+ if (
609
+ tool_call_request .tool_name
610
+ in self ._external_tool_schemas
611
+ ):
612
+ if external_tool_call_requests is None :
613
+ external_tool_call_requests = []
614
+ external_tool_call_requests .append (tool_call_request )
615
+ else :
616
+ tool_call_records .append (
617
+ self ._execute_tool (tool_call_request )
618
+ )
619
+
620
+ # If we found external tool calls, break the loop
621
+ if external_tool_call_requests :
608
622
break
609
623
610
- tool_call_records . append ( self . _execute_tool ( tool_call_request ))
624
+ # If we're still here, continue the loop
611
625
continue
612
626
613
627
break
@@ -616,7 +630,10 @@ def step(
616
630
self ._record_final_output (response .output_messages )
617
631
618
632
return self ._convert_to_chatagent_response (
619
- response , tool_call_records , num_tokens , external_tool_call_request
633
+ response ,
634
+ tool_call_records ,
635
+ num_tokens ,
636
+ external_tool_call_requests ,
620
637
)
621
638
622
639
@property
@@ -658,7 +675,7 @@ async def astep(
658
675
self .update_memory (input_message , OpenAIBackendRole .USER )
659
676
660
677
tool_call_records : List [ToolCallingRecord ] = []
661
- external_tool_call_request : Optional [ToolCallRequest ] = None
678
+ external_tool_call_requests : Optional [List [ ToolCallRequest ] ] = None
662
679
while True :
663
680
try :
664
681
openai_messages , num_tokens = self .memory .get_context ()
@@ -677,13 +694,27 @@ async def astep(
677
694
if self .single_iteration :
678
695
break
679
696
680
- if tool_call_request := response .tool_call_request :
681
- if tool_call_request .tool_name in self ._external_tool_schemas :
682
- external_tool_call_request = tool_call_request
697
+ if tool_call_requests := response .tool_call_requests :
698
+ # Process all tool calls
699
+ for tool_call_request in tool_call_requests :
700
+ if (
701
+ tool_call_request .tool_name
702
+ in self ._external_tool_schemas
703
+ ):
704
+ if external_tool_call_requests is None :
705
+ external_tool_call_requests = []
706
+ external_tool_call_requests .append (tool_call_request )
707
+
708
+ tool_call_record = await self ._aexecute_tool (
709
+ tool_call_request
710
+ )
711
+ tool_call_records .append (tool_call_record )
712
+
713
+ # If we found an external tool call, break the loop
714
+ if external_tool_call_requests :
683
715
break
684
716
685
- tool_call_record = await self ._aexecute_tool (tool_call_request )
686
- tool_call_records .append (tool_call_record )
717
+ # If we're still here, continue the loop
687
718
continue
688
719
689
720
break
@@ -692,15 +723,18 @@ async def astep(
692
723
self ._record_final_output (response .output_messages )
693
724
694
725
return self ._convert_to_chatagent_response (
695
- response , tool_call_records , num_tokens , external_tool_call_request
726
+ response ,
727
+ tool_call_records ,
728
+ num_tokens ,
729
+ external_tool_call_requests ,
696
730
)
697
731
698
732
def _convert_to_chatagent_response (
699
733
self ,
700
734
response : ModelResponse ,
701
735
tool_call_records : List [ToolCallingRecord ],
702
736
num_tokens : int ,
703
- external_tool_call_request : Optional [ToolCallRequest ],
737
+ external_tool_call_requests : Optional [List [ ToolCallRequest ] ],
704
738
) -> ChatAgentResponse :
705
739
r"""Parse the final model response into the chat agent response."""
706
740
info = self ._step_get_info (
@@ -710,7 +744,7 @@ def _convert_to_chatagent_response(
710
744
response .response_id ,
711
745
tool_call_records ,
712
746
num_tokens ,
713
- external_tool_call_request ,
747
+ external_tool_call_requests ,
714
748
)
715
749
716
750
return ChatAgentResponse (
@@ -961,7 +995,7 @@ def _step_get_info(
961
995
response_id : str ,
962
996
tool_calls : List [ToolCallingRecord ],
963
997
num_tokens : int ,
964
- external_tool_call_request : Optional [ToolCallRequest ] = None ,
998
+ external_tool_call_requests : Optional [List [ ToolCallRequest ] ] = None ,
965
999
) -> Dict [str , Any ]:
966
1000
r"""Process the output of a chat step and gather information about the
967
1001
step.
@@ -1018,7 +1052,7 @@ def _step_get_info(
1018
1052
finish_reasons ,
1019
1053
num_tokens ,
1020
1054
tool_calls ,
1021
- external_tool_call_request ,
1055
+ external_tool_call_requests ,
1022
1056
)
1023
1057
1024
1058
def _handle_batch_response (
@@ -1057,18 +1091,21 @@ def _handle_batch_response(
1057
1091
if response .usage is not None :
1058
1092
usage = safe_model_dump (response .usage )
1059
1093
1060
- tool_call_request : Optional [ToolCallRequest ] = None
1094
+ tool_call_requests : Optional [List [ ToolCallRequest ] ] = None
1061
1095
if tool_calls := response .choices [0 ].message .tool_calls :
1062
- tool_name = tool_calls [0 ].function .name
1063
- tool_call_id = tool_calls [0 ].id
1064
- args = json .loads (tool_calls [0 ].function .arguments )
1065
- tool_call_request = ToolCallRequest (
1066
- tool_name = tool_name , args = args , tool_call_id = tool_call_id
1067
- )
1096
+ tool_call_requests = []
1097
+ for tool_call in tool_calls :
1098
+ tool_name = tool_call .function .name
1099
+ tool_call_id = tool_call .id
1100
+ args = json .loads (tool_call .function .arguments )
1101
+ tool_call_request = ToolCallRequest (
1102
+ tool_name = tool_name , args = args , tool_call_id = tool_call_id
1103
+ )
1104
+ tool_call_requests .append (tool_call_request )
1068
1105
1069
1106
return ModelResponse (
1070
1107
response = response ,
1071
- tool_call_request = tool_call_request ,
1108
+ tool_call_requests = tool_call_requests ,
1072
1109
output_messages = output_messages ,
1073
1110
finish_reasons = finish_reasons ,
1074
1111
usage_dict = usage ,
@@ -1108,7 +1145,7 @@ def _handle_stream_response(
1108
1145
# TODO: Handle tool calls
1109
1146
return ModelResponse (
1110
1147
response = response ,
1111
- tool_call_request = None ,
1148
+ tool_call_requests = None ,
1112
1149
output_messages = output_messages ,
1113
1150
finish_reasons = finish_reasons ,
1114
1151
usage_dict = usage_dict ,
@@ -1148,7 +1185,7 @@ async def _ahandle_stream_response(
1148
1185
# TODO: Handle tool calls
1149
1186
return ModelResponse (
1150
1187
response = response ,
1151
- tool_call_request = None ,
1188
+ tool_call_requests = None ,
1152
1189
output_messages = output_messages ,
1153
1190
finish_reasons = finish_reasons ,
1154
1191
usage_dict = usage_dict ,
0 commit comments