@@ -2,33 +2,128 @@ import {
22 CoreAssistantMessage ,
33 CoreMessage ,
44 CoreSystemMessage ,
5- CoreTool ,
65 CoreUserMessage ,
76 generateObject ,
87 generateText ,
98 ImagePart ,
109 LanguageModel ,
10+ NoObjectGeneratedError ,
1111 TextPart ,
12+ ToolSet ,
1213} from "ai" ;
1314import {
1415 CreateChatCompletionOptions ,
1516 LLMClient ,
1617 AvailableModel ,
1718} from "@browserbasehq/stagehand" ;
1819import { ChatCompletion } from "openai/resources" ;
20+ import { LogLine } from "@/types/log" ;
21+ import { LLMCache } from "@/lib/cache/LLMCache" ;
1922
2023export class AISdkClient extends LLMClient {
2124 public type = "aisdk" as const ;
2225 private model : LanguageModel ;
26+ private logger ?: ( message : LogLine ) => void ;
27+ private cache : LLMCache | undefined ;
28+ private enableCaching : boolean ;
2329
24- constructor ( { model } : { model : LanguageModel } ) {
30+ constructor ( {
31+ model,
32+ logger,
33+ enableCaching = false ,
34+ cache,
35+ } : {
36+ model : LanguageModel ;
37+ logger ?: ( message : LogLine ) => void ;
38+ enableCaching ?: boolean ;
39+ cache ?: LLMCache ;
40+ } ) {
2541 super ( model . modelId as AvailableModel ) ;
2642 this . model = model ;
43+ this . logger = logger ;
44+ this . cache = cache ;
45+ this . enableCaching = enableCaching ;
46+ }
47+
48+ public getLanguageModel ( ) : LanguageModel {
49+ return this . model ;
2750 }
2851
2952 async createChatCompletion < T = ChatCompletion > ( {
3053 options,
3154 } : CreateChatCompletionOptions ) : Promise < T > {
55+ this . logger ?.( {
56+ category : "aisdk" ,
57+ message : "creating chat completion" ,
58+ level : 2 ,
59+ auxiliary : {
60+ options : {
61+ value : JSON . stringify ( {
62+ ...options ,
63+ image : undefined ,
64+ messages : options . messages . map ( ( msg ) => ( {
65+ ...msg ,
66+ content : Array . isArray ( msg . content )
67+ ? msg . content . map ( ( c ) =>
68+ "image_url" in c
69+ ? { ...c , image_url : { url : "[IMAGE_REDACTED]" } }
70+ : c ,
71+ )
72+ : msg . content ,
73+ } ) ) ,
74+ } ) ,
75+ type : "object" ,
76+ } ,
77+ modelName : {
78+ value : this . model . modelId ,
79+ type : "string" ,
80+ } ,
81+ } ,
82+ } ) ;
83+
84+ const cacheOptions = {
85+ model : this . model . modelId ,
86+ messages : options . messages ,
87+ response_model : options . response_model ,
88+ } ;
89+
90+ if ( this . enableCaching && this . cache ) {
91+ const cachedResponse = await this . cache . get < T > (
92+ cacheOptions ,
93+ options . requestId ,
94+ ) ;
95+ if ( cachedResponse ) {
96+ this . logger ?.( {
97+ category : "llm_cache" ,
98+ message : "LLM cache hit - returning cached response" ,
99+ level : 1 ,
100+ auxiliary : {
101+ requestId : {
102+ value : options . requestId ,
103+ type : "string" ,
104+ } ,
105+ cachedResponse : {
106+ value : JSON . stringify ( cachedResponse ) ,
107+ type : "object" ,
108+ } ,
109+ } ,
110+ } ) ;
111+ return cachedResponse ;
112+ } else {
113+ this . logger ?.( {
114+ category : "llm_cache" ,
115+ message : "LLM cache miss - no cached response found" ,
116+ level : 1 ,
117+ auxiliary : {
118+ requestId : {
119+ value : options . requestId ,
120+ type : "string" ,
121+ } ,
122+ } ,
123+ } ) ;
124+ }
125+ }
126+
32127 const formattedMessages : CoreMessage [ ] = options . messages . map ( ( message ) => {
33128 if ( Array . isArray ( message . content ) ) {
34129 if ( message . role === "system" ) {
@@ -82,45 +177,215 @@ export class AISdkClient extends LLMClient {
82177 } ;
83178 } ) ;
84179
180+ let objectResponse : Awaited < ReturnType < typeof generateObject > > ;
181+ const isGPT5 = this . model . modelId . includes ( "gpt-5" ) ;
85182 if ( options . response_model ) {
86- const response = await generateObject ( {
87- model : this . model ,
88- messages : formattedMessages ,
89- schema : options . response_model . schema ,
90- } ) ;
183+ try {
184+ objectResponse = await generateObject ( {
185+ model : this . model ,
186+ messages : formattedMessages ,
187+ schema : options . response_model . schema ,
188+ temperature : options . temperature ,
189+ providerOptions : isGPT5
190+ ? {
191+ openai : {
192+ textVerbosity : "low" , // Making these the default for gpt-5 for now
193+ reasoningEffort : "minimal" ,
194+ } ,
195+ }
196+ : undefined ,
197+ } ) ;
198+ } catch ( err ) {
199+ if ( NoObjectGeneratedError . isInstance ( err ) ) {
200+ this . logger ?.( {
201+ category : "AISDK error" ,
202+ message : err . message ,
203+ level : 0 ,
204+ auxiliary : {
205+ cause : {
206+ value : JSON . stringify ( err . cause ?? { } ) ,
207+ type : "object" ,
208+ } ,
209+ text : {
210+ value : err . text ?? "" ,
211+ type : "string" ,
212+ } ,
213+ response : {
214+ value : JSON . stringify ( err . response ?? { } ) ,
215+ type : "object" ,
216+ } ,
217+ usage : {
218+ value : JSON . stringify ( err . usage ?? { } ) ,
219+ type : "object" ,
220+ } ,
221+ finishReason : {
222+ value : err . finishReason ?? "unknown" ,
223+ type : "string" ,
224+ } ,
225+ requestId : {
226+ value : options . requestId ,
227+ type : "string" ,
228+ } ,
229+ } ,
230+ } ) ;
91231
92- return {
93- data : response . object ,
232+ throw err ;
233+ }
234+ throw err ;
235+ }
236+
237+ const result = {
238+ data : objectResponse . object ,
94239 usage : {
95- prompt_tokens : response . usage . promptTokens ?? 0 ,
96- completion_tokens : response . usage . completionTokens ?? 0 ,
97- total_tokens : response . usage . totalTokens ?? 0 ,
240+ prompt_tokens : objectResponse . usage . promptTokens ?? 0 ,
241+ completion_tokens : objectResponse . usage . completionTokens ?? 0 ,
242+ total_tokens : objectResponse . usage . totalTokens ?? 0 ,
98243 } ,
99244 } as T ;
100- }
101245
102- const tools : Record < string , CoreTool > = { } ;
246+ if ( this . enableCaching ) {
247+ this . logger ?.( {
248+ category : "llm_cache" ,
249+ message : "caching response" ,
250+ level : 1 ,
251+ auxiliary : {
252+ requestId : {
253+ value : options . requestId ,
254+ type : "string" ,
255+ } ,
256+ cacheOptions : {
257+ value : JSON . stringify ( {
258+ ...cacheOptions ,
259+ messages : cacheOptions . messages . map ( ( msg ) => ( {
260+ ...msg ,
261+ content : Array . isArray ( msg . content )
262+ ? msg . content . map ( ( c ) =>
263+ "image_url" in c
264+ ? { ...c , image_url : { url : "[IMAGE_REDACTED]" } }
265+ : c ,
266+ )
267+ : msg . content ,
268+ } ) ) ,
269+ } ) ,
270+ type : "object" ,
271+ } ,
272+ response : {
273+ value : JSON . stringify ( result ) ,
274+ type : "object" ,
275+ } ,
276+ } ,
277+ } ) ;
278+ this . cache . set ( cacheOptions , result , options . requestId ) ;
279+ }
103280
104- for ( const rawTool of options . tools ) {
105- tools [ rawTool . name ] = {
106- description : rawTool . description ,
107- parameters : rawTool . parameters ,
108- } ;
281+ this . logger ?.( {
282+ category : "aisdk" ,
283+ message : "response" ,
284+ level : 1 ,
285+ auxiliary : {
286+ response : {
287+ value : JSON . stringify ( {
288+ object : objectResponse . object ,
289+ usage : objectResponse . usage ,
290+ finishReason : objectResponse . finishReason ,
291+ // Omit request and response properties that might contain images
292+ } ) ,
293+ type : "object" ,
294+ } ,
295+ requestId : {
296+ value : options . requestId ,
297+ type : "string" ,
298+ } ,
299+ } ,
300+ } ) ;
301+
302+ return result ;
109303 }
110304
111- const response = await generateText ( {
305+ const tools : ToolSet = { } ;
306+ if ( options . tools && options . tools . length > 0 ) {
307+ for ( const tool of options . tools ) {
308+ tools [ tool . name ] = {
309+ description : tool . description ,
310+ parameters : tool . parameters ,
311+ } ;
312+ }
313+ }
314+
315+ const textResponse = await generateText ( {
112316 model : this . model ,
113317 messages : formattedMessages ,
114- tools,
318+ tools : Object . keys ( tools ) . length > 0 ? tools : undefined ,
319+ toolChoice :
320+ Object . keys ( tools ) . length > 0
321+ ? options . tool_choice === "required"
322+ ? "required"
323+ : options . tool_choice === "none"
324+ ? "none"
325+ : "auto"
326+ : undefined ,
327+ temperature : options . temperature ,
115328 } ) ;
116329
117- return {
118- data : response . text ,
330+ // Transform AI SDK response to match LLMResponse format expected by operator handler
331+ const transformedToolCalls = ( textResponse . toolCalls || [ ] ) . map (
332+ ( toolCall ) => ( {
333+ id :
334+ toolCall . toolCallId ||
335+ `call_${ Date . now ( ) } _${ Math . random ( ) . toString ( 36 ) . substr ( 2 , 9 ) } ` ,
336+ type : "function" ,
337+ function : {
338+ name : toolCall . toolName ,
339+ arguments : JSON . stringify ( toolCall . args ) ,
340+ } ,
341+ } ) ,
342+ ) ;
343+
344+ const result = {
345+ id : `chatcmpl_${ Date . now ( ) } _${ Math . random ( ) . toString ( 36 ) . substr ( 2 , 9 ) } ` ,
346+ object : "chat.completion" ,
347+ created : Math . floor ( Date . now ( ) / 1000 ) ,
348+ model : this . model . modelId ,
349+ choices : [
350+ {
351+ index : 0 ,
352+ message : {
353+ role : "assistant" ,
354+ content : textResponse . text || null ,
355+ tool_calls : transformedToolCalls ,
356+ } ,
357+ finish_reason : textResponse . finishReason || "stop" ,
358+ } ,
359+ ] ,
119360 usage : {
120- prompt_tokens : response . usage . promptTokens ?? 0 ,
121- completion_tokens : response . usage . completionTokens ?? 0 ,
122- total_tokens : response . usage . totalTokens ?? 0 ,
361+ prompt_tokens : textResponse . usage . promptTokens ?? 0 ,
362+ completion_tokens : textResponse . usage . completionTokens ?? 0 ,
363+ total_tokens : textResponse . usage . totalTokens ?? 0 ,
123364 } ,
124365 } as T ;
366+
367+
368+ this . logger ?.( {
369+ category : "aisdk" ,
370+ message : "response" ,
371+ level : 2 ,
372+ auxiliary : {
373+ response : {
374+ value : JSON . stringify ( {
375+ text : textResponse . text ,
376+ usage : textResponse . usage ,
377+ finishReason : textResponse . finishReason ,
378+ // Omit request and response properties that might contain images
379+ } ) ,
380+ type : "object" ,
381+ } ,
382+ requestId : {
383+ value : options . requestId ,
384+ type : "string" ,
385+ } ,
386+ } ,
387+ } ) ;
388+
389+ return result ;
125390 }
126391}
0 commit comments