@@ -1446,6 +1446,32 @@ def tool_messages(tool_requests_ids, tool_call_result, expect_content):
1446
1446
def choice_content (response ):
1447
1447
return response ["content" ]
1448
1448
1449
+ def get_stream_body_content (body ):
1450
+ content = []
1451
+ content_block = {}
1452
+ input_json_buf = ""
1453
+ for event in body :
1454
+ json_bytes = event ["chunk" ].get ("bytes" , b"" )
1455
+ decoded = json_bytes .decode ("utf-8" )
1456
+ chunk = json .loads (decoded )
1457
+
1458
+ if (message_type := chunk .get ("type" )) is not None :
1459
+ if message_type == "content_block_start" :
1460
+ content_block = chunk ["content_block" ]
1461
+ elif message_type == "content_block_delta" :
1462
+ if chunk ["delta" ]["type" ] == "text_delta" :
1463
+ content_block ["text" ] += chunk ["delta" ]["text" ]
1464
+ elif chunk ["delta" ]["type" ] == "input_json_delta" :
1465
+ input_json_buf += chunk ["delta" ]["partial_json" ]
1466
+ elif message_type == "content_block_stop" :
1467
+ if input_json_buf :
1468
+ content_block ["input" ] = json .loads (input_json_buf )
1469
+ content .append (content_block )
1470
+ content_block = None
1471
+ input_json_buf = ""
1472
+
1473
+ return content
1474
+
1449
1475
1450
1476
class AmazonNovaModel :
1451
1477
@staticmethod
@@ -1524,6 +1550,42 @@ def tool_messages(tool_requests_ids, tool_call_result, expect_content):
1524
1550
def choice_content (response ):
1525
1551
return response ["output" ]["message" ]["content" ]
1526
1552
1553
+ def get_stream_body_content (body ):
1554
+ content = []
1555
+ content_block = {}
1556
+ tool_use = {}
1557
+ for event in body :
1558
+ json_bytes = event ["chunk" ].get ("bytes" , b"" )
1559
+ decoded = json_bytes .decode ("utf-8" )
1560
+ chunk = json .loads (decoded )
1561
+
1562
+ if "contentBlockDelta" in chunk :
1563
+ delta = chunk ["contentBlockDelta" ]["delta" ]
1564
+ if "text" in delta :
1565
+ content_block .setdefault ("text" , "" )
1566
+ content_block ["text" ] += delta ["text" ]
1567
+ elif "toolUse" in delta :
1568
+ tool_use ["toolUse" ]["input" ] = json .loads (
1569
+ delta ["toolUse" ]["input" ]
1570
+ )
1571
+ elif "contentBlockStart" in chunk :
1572
+ if content_block :
1573
+ content .append (content_block )
1574
+ content_block = {}
1575
+ start = chunk ["contentBlockStart" ]["start" ]
1576
+ if "toolUse" in start :
1577
+ tool_use = start
1578
+ elif "contentBlockStop" in chunk :
1579
+ if tool_use :
1580
+ content .append (tool_use )
1581
+ tool_use = {}
1582
+ elif "messageStop" in chunk :
1583
+ if content_block :
1584
+ content .append (content_block )
1585
+ content_block = {}
1586
+
1587
+ return content
1588
+
1527
1589
1528
1590
def invoke_model_tool_call (
1529
1591
span_exporter ,
@@ -2110,54 +2172,7 @@ def invoke_model_with_response_stream_tool_call(
2110
2172
modelId = llm_model_value ,
2111
2173
)
2112
2174
2113
- content = []
2114
- content_block = {}
2115
- # used only by anthropic claude
2116
- input_json_buf = ""
2117
- # used only by amazon nova
2118
- tool_use = {}
2119
- for event in response_0 ["body" ]:
2120
- json_bytes = event ["chunk" ].get ("bytes" , b"" )
2121
- decoded = json_bytes .decode ("utf-8" )
2122
- chunk = json .loads (decoded )
2123
-
2124
- # anthropic claude
2125
- if (message_type := chunk .get ("type" )) is not None :
2126
- if message_type == "content_block_start" :
2127
- content_block = chunk ["content_block" ]
2128
- elif message_type == "content_block_delta" :
2129
- if chunk ["delta" ]["type" ] == "text_delta" :
2130
- content_block ["text" ] += chunk ["delta" ]["text" ]
2131
- elif chunk ["delta" ]["type" ] == "input_json_delta" :
2132
- input_json_buf += chunk ["delta" ]["partial_json" ]
2133
- elif message_type == "content_block_stop" :
2134
- if input_json_buf :
2135
- content_block ["input" ] = json .loads (input_json_buf )
2136
- content .append (content_block )
2137
- content_block = None
2138
- input_json_buf = ""
2139
- else :
2140
- if "contentBlockDelta" in chunk :
2141
- delta = chunk ["contentBlockDelta" ]["delta" ]
2142
- if "text" in delta :
2143
- content_block .setdefault ("text" , "" )
2144
- content_block ["text" ] += delta ["text" ]
2145
- elif "toolUse" in delta :
2146
- tool_use ["toolUse" ]["input" ] = json .loads (
2147
- delta ["toolUse" ]["input" ]
2148
- )
2149
- elif "contentBlockStart" in chunk :
2150
- if content_block :
2151
- content .append (content_block )
2152
- content_block = {}
2153
- start = chunk ["contentBlockStart" ]["start" ]
2154
- if "toolUse" in start :
2155
- tool_use = start
2156
- elif "contentBlockStop" in chunk :
2157
- if tool_use :
2158
- content .append (tool_use )
2159
- tool_use = {}
2160
-
2175
+ content = llm_model_config .get_stream_body_content (response_0 ["body" ])
2161
2176
assert content
2162
2177
2163
2178
tool_requests_ids = llm_model_config .tool_requests_ids_from_stream (content )
@@ -2179,34 +2194,9 @@ def invoke_model_with_response_stream_tool_call(
2179
2194
modelId = llm_model_value ,
2180
2195
)
2181
2196
2182
- content_block = {}
2183
- response_1_content = []
2184
- for event in response_1 ["body" ]:
2185
- json_bytes = event ["chunk" ].get ("bytes" , b"" )
2186
- decoded = json_bytes .decode ("utf-8" )
2187
- chunk = json .loads (decoded )
2188
-
2189
- # anthropic claude
2190
- if (message_type := chunk .get ("type" )) is not None :
2191
- if message_type == "content_block_start" :
2192
- content_block = chunk ["content_block" ]
2193
- elif message_type == "content_block_delta" :
2194
- if chunk ["delta" ]["type" ] == "text_delta" :
2195
- content_block ["text" ] += chunk ["delta" ]["text" ]
2196
- elif message_type == "content_block_stop" :
2197
- response_1_content .append (content_block )
2198
- content_block = None
2199
- else :
2200
- if "contentBlockDelta" in chunk :
2201
- delta = chunk ["contentBlockDelta" ]["delta" ]
2202
- if "text" in delta :
2203
- content_block .setdefault ("text" , "" )
2204
- content_block ["text" ] += delta ["text" ]
2205
- elif "messageStop" in chunk :
2206
- if content_block :
2207
- response_1_content .append (content_block )
2208
- content_block = {}
2209
-
2197
+ response_1_content = llm_model_config .get_stream_body_content (
2198
+ response_1 ["body" ]
2199
+ )
2210
2200
assert response_1_content
2211
2201
2212
2202
(span_0 , span_1 ) = span_exporter .get_finished_spans ()
0 commit comments