diff --git a/.claude/settings.local.json b/.claude/settings.local.json new file mode 100644 index 00000000..86998d7e --- /dev/null +++ b/.claude/settings.local.json @@ -0,0 +1,13 @@ +{ + "permissions": { + "allow": [ + "Bash(npm test:*)", + "Bash(ls:*)", + "Bash(echo:*)", + "Bash(pnpm run build:*)", + "Bash(pnpm test:*)", + ], + "deny": [], + "ask": [] + } +} diff --git a/.gitignore b/.gitignore index 6ef49921..93d57c45 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,5 @@ pyodide/ firestore-debug.log .npm/ .pm2/ +bench +/benchmarks diff --git a/frontend/package.json b/frontend/package.json index f738dfc9..548e4c87 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -37,7 +37,7 @@ "@angular/router": "19.2.10", "@jsverse/transloco": "7.6.1", "@sinclair/typebox": "0.34.41", - "ai": "5.0.8", + "ai": "5.0.78", "angular-split": "^19.0.0", "apexcharts": "4.7.0", "clipboard": "^2.0.11", diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml index b919e91b..7b68f969 100644 --- a/frontend/pnpm-lock.yaml +++ b/frontend/pnpm-lock.yaml @@ -48,8 +48,8 @@ importers: specifier: 0.34.41 version: 0.34.41 ai: - specifier: 5.0.8 - version: 5.0.8(zod@3.25.76) + specifier: 5.0.78 + version: 5.0.78(zod@3.25.76) angular-split: specifier: ^19.0.0 version: 19.0.0(@angular/common@19.2.10(@angular/core@19.2.10(rxjs@7.8.2)(zone.js@0.15.0))(rxjs@7.8.2))(@angular/core@19.2.10(rxjs@7.8.2)(zone.js@0.15.0))(rxjs@7.8.2) @@ -195,17 +195,17 @@ importers: packages: - '@ai-sdk/gateway@1.0.4': - resolution: {integrity: sha512-1roLdgMbFU3Nr4MC97/te7w6OqxsWBkDUkpbCcvxF3jz/ku91WVaJldn/PKU8feMKNyI5W9wnqhbjb1BqbExOQ==} + '@ai-sdk/gateway@2.0.1': + resolution: {integrity: sha512-vPVIbnP35ZnayS937XLo85vynR85fpBQWHCdUweq7apzqFOTU2YkUd4V3msebEHbQ2Zro60ZShDDy9SMiyWTqA==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/provider-utils@3.0.1': - resolution: {integrity: sha512-/iP1sKc6UdJgGH98OCly7sWJKv+J9G47PnTjIj40IJMUQKwDrUMyf7zOOfRtPwSuNifYhSoJQ4s1WltI65gJ/g==} + '@ai-sdk/provider-utils@3.0.12': + resolution: {integrity: sha512-ZtbdvYxdMoria+2SlNarEk6Hlgyf+zzcznlD55EAl+7VZvJaSg2sqPvwArY7L6TfDEDJsnCq0fdhBSkYo0Xqdg==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 '@ai-sdk/provider@2.0.0': resolution: {integrity: sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA==} @@ -2067,6 +2067,10 @@ packages: '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} + '@vercel/oidc@3.0.3': + resolution: {integrity: sha512-yNEQvPcVrK9sIe637+I0jD6leluPxzwJKx/Haw6F4H77CdDsszUn5V3o96LPziXkSNE2B83+Z3mjqGKBK/R6Gg==} + engines: {node: '>= 20'} + '@vitejs/plugin-basic-ssl@1.2.0': resolution: {integrity: sha512-mkQnxTkcldAzIsomk1UuLfAu9n+kpQ3JbHcpCp7d2Oo6ITtji8pHS3QToOWjhPFvNQSnhlkAjmGbhv2QvwO/7Q==} engines: {node: '>=14.21.3'} @@ -2151,11 +2155,11 @@ packages: resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} engines: {node: '>= 14'} - ai@5.0.8: - resolution: {integrity: sha512-qbnhj046UvG30V1S5WhjBn+RBGEAmi8PSZWqMhRsE3EPxvO5BcePXTZFA23e9MYyWS9zr4Vm8Mv3wQXwLmtIBw==} + ai@5.0.78: + resolution: {integrity: sha512-ec77fmQwJGLduswMrW4AAUGSOiu8dZaIwMmWHHGKsrMUFFS6ugfkTyx0srtuKYHNRRLRC2dT7cPirnUl98VnxA==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 ajv-formats@2.1.1: resolution: {integrity: sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==} @@ -5328,11 +5332,6 @@ packages: resolution: {integrity: sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA==} engines: {node: '>=18'} - zod-to-json-schema@3.24.6: - resolution: {integrity: sha512-h/z3PKvcTcTetyjl1fkj79MHNEjm+HpD6NXheWjzOekY7kV+lwDYnHw+ivHkijnCSMz1yJaWBD9vu/Fcmk+vEg==} - peerDependencies: - zod: ^3.24.1 - zod@3.25.76: resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} @@ -5341,19 +5340,19 @@ packages: snapshots: - '@ai-sdk/gateway@1.0.4(zod@3.25.76)': + '@ai-sdk/gateway@2.0.1(zod@3.25.76)': dependencies: '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.1(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) + '@vercel/oidc': 3.0.3 zod: 3.25.76 - '@ai-sdk/provider-utils@3.0.1(zod@3.25.76)': + '@ai-sdk/provider-utils@3.0.12(zod@3.25.76)': dependencies: '@ai-sdk/provider': 2.0.0 '@standard-schema/spec': 1.0.0 eventsource-parser: 3.0.6 zod: 3.25.76 - zod-to-json-schema: 3.24.6(zod@3.25.76) '@ai-sdk/provider@2.0.0': dependencies: @@ -7388,6 +7387,8 @@ snapshots: dependencies: '@types/node': 22.16.5 + '@vercel/oidc@3.0.3': {} + '@vitejs/plugin-basic-ssl@1.2.0(vite@6.2.7(@types/node@22.16.5)(jiti@1.21.7)(less@4.2.2)(sass@1.85.0)(terser@5.39.0)(yaml@2.8.0))': dependencies: vite: 6.2.7(@types/node@22.16.5)(jiti@1.21.7)(less@4.2.2)(sass@1.85.0)(terser@5.39.0)(yaml@2.8.0) @@ -7492,11 +7493,11 @@ snapshots: agent-base@7.1.4: {} - ai@5.0.8(zod@3.25.76): + ai@5.0.78(zod@3.25.76): dependencies: - '@ai-sdk/gateway': 1.0.4(zod@3.25.76) + '@ai-sdk/gateway': 2.0.1(zod@3.25.76) '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.1(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) '@opentelemetry/api': 1.9.0 zod: 3.25.76 @@ -10827,10 +10828,6 @@ snapshots: yoctocolors-cjs@2.1.2: {} - zod-to-json-schema@3.24.6(zod@3.25.76): - dependencies: - zod: 3.25.76 - zod@3.25.76: {} zone.js@0.15.0: {} diff --git a/frontend/src/app/modules/chat/chat.service.ts b/frontend/src/app/modules/chat/chat.service.ts index c5806c39..7268a0f9 100644 --- a/frontend/src/app/modules/chat/chat.service.ts +++ b/frontend/src/app/modules/chat/chat.service.ts @@ -25,6 +25,7 @@ import { NEW_CHAT_ID, // ServerChat is effectively ApiChatModel now } from 'app/modules/chat/chat.types'; +import type { ImagePart as AiImagePart, FilePart as AiFilePart } from 'ai'; import { Attachment, TextContent } from 'app/modules/message.types'; import { LanguageModelV2Source } from '@ai-sdk/provider'; import { environment } from '#environments/environment'; @@ -137,6 +138,33 @@ async function prepareUserContentPayload( return contentParts; } +// Add this top-level helper +async function readSseStream( + reader: ReadableStreamDefaultReader, + onEvent: (ev: any) => void, +): Promise { + const decoder = new TextDecoder('utf-8'); + let buffer = ''; + for (;;) { + const { done, value } = await reader.read(); + if (done) return; + buffer += decoder.decode(value, { stream: true }); + let idx: number; + while ((idx = buffer.indexOf('\n\n')) !== -1) { + const rawEvent = buffer.slice(0, idx); + buffer = buffer.slice(idx + 2); + const dataLines = rawEvent.split('\n').filter((l) => l.startsWith('data:')).map((l) => l.slice(5).trim()); + if (!dataLines.length) continue; + try { + const event = JSON.parse(dataLines.join('\n')); + onEvent(event); + } catch { + // ignore malformed chunk + } + } + } +} + @Injectable({ providedIn: 'root' }) export class ChatServiceClient { private readonly _chatState = createApiEntityState(); @@ -312,7 +340,7 @@ export class ChatServiceClient { parentId: updatedApiChat.parentId, rootId: updatedApiChat.rootId, }; - + // Update chats list cache if (this._cachedChats) { const index = this._cachedChats.findIndex((item) => item.id === id); @@ -332,7 +360,7 @@ export class ChatServiceClient { this._chatsState.set({ status: 'success', data: newChats }); } } - + // Update current chat if it's the one being updated const currentChatState = this._chatState(); if (currentChatState.status === 'success' && currentChatState.data.id === id) { @@ -346,10 +374,82 @@ export class ChatServiceClient { ); } + handleUpdatedChat(updatedApiChat: ApiChatModel): void { + const uiChatUpdate: Partial = { + id: updatedApiChat.id, // Ensure id is part of the update object + title: updatedApiChat.title, + shareable: updatedApiChat.shareable, + updatedAt: updatedApiChat.updatedAt, + parentId: updatedApiChat.parentId, + rootId: updatedApiChat.rootId, + }; + + // Update chats list cache + if (this._cachedChats) { + const index = this._cachedChats.findIndex((item) => item.id === updatedApiChat.id); + if (index !== -1) { + const newCachedChats = [...this._cachedChats]; + newCachedChats[index] = { ...newCachedChats[index], ...uiChatUpdate }; + this._cachedChats = newCachedChats; + } + } + // Update chats list + const currentChatsState = this._chatsState(); + if (currentChatsState.status === 'success') { + const index = currentChatsState.data.findIndex((item) => item.id === updatedApiChat.id); + if (index !== -1) { + const newChats = [...currentChatsState.data]; + newChats[index] = { ...newChats[index], ...uiChatUpdate }; + this._chatsState.set({ status: 'success', data: newChats }); + } + } + + // Update current chat if it's the one being updated + const currentChatState = this._chatState(); + if (currentChatState.status === 'success' && currentChatState.data.id === updatedApiChat.id) { + this._chatState.set({ + status: 'success', + data: { ...currentChatState.data, ...uiChatUpdate }, + }); + } + } + + resetChat(): void { this._chatState.set({ status: 'idle' }); } + private setChatMessages(chatId: string, transformer: (messages: ChatMessage[]) => ChatMessage[]): void { + const state = this._chatState(); + if (state.status !== 'success' || state.data.id !== chatId) return; + const current = state.data.messages || []; + this._chatState.set({ + status: 'success', + data: { ...state.data, messages: transformer([...current]) }, + }); + } + + private bumpChatInListsById(chatId: string): void { + const now = Date.now(); + function bump(arr: Chat[] | null): Chat[] | null { + if (!arr) return arr; + const idx = arr.findIndex((c) => c.id === chatId); + if (idx === -1) return arr; + const next = [...arr]; + const updated = { ...next[idx], updatedAt: now }; + next.splice(idx, 1); + next.unshift(updated); + return next; + } + const visible = this._chatsState(); + if (visible.status === 'success') { + const next = bump(visible.data); + if (next) this._chatsState.set({ status: 'success', data: next }); + } + if (this._cachedChats) { + this._cachedChats = bump(this._cachedChats) || null; + } + } /** * Streaming send message using Server-Sent Events (SSE). @@ -370,22 +470,10 @@ export class ChatServiceClient { return throwError(() => new Error('Chat must be created before streaming. Create the chat, navigate to /ui/chat/:id, then call sendMessageStreaming.')); } - // Sanitize CallSettings before placing into payload to avoid invalid keys (e.g., defaultLLM, enabledLLMs). const filteredOptions = sanitizeCallSettings(options); + const optionsForPayload: any = { ...(filteredOptions ?? {}), ...(serviceTier ? { serviceTier } : {}) }; + const payload: ChatMessagePayload = { llmId, userContent, options: optionsForPayload, autoReformat: autoReformat ?? false }; - const optionsForPayload: any = { - ...(filteredOptions ?? {}), - ...(serviceTier ? { serviceTier } : {}), - }; - const payload: ChatMessagePayload = { - llmId, - userContent, - // Always send options object; it can be empty, schema fields are optional - options: optionsForPayload, - autoReformat: autoReformat ?? false, - }; - - // Optimistically add user's message const { text: derivedTextFromUserContent } = userContentExtToAttachmentsAndText(userContent); const userMessageEntry: ChatMessage = { id: uuidv4(), @@ -410,325 +498,203 @@ export class ChatServiceClient { llmId, }; - const updateStateWithMessages = (updater: (messages: ChatMessage[]) => ChatMessage[]) => { - const currentChatState = this._chatState(); - if (currentChatState.status === 'success' && currentChatState.data.id === chatId) { - this._chatState.set({ - status: 'success', - data: { - ...currentChatState.data, - messages: updater([...(currentChatState.data.messages || [])]), - }, - }); - } - }; + this.setChatMessages(chatId, (existing) => [...existing, userMessageEntry, assistantPlaceholder]); + this.bumpChatInListsById(chatId); - updateStateWithMessages((existing) => [...existing, userMessageEntry, assistantPlaceholder]); - - // Move chat to top of list immediately - const bumpChatInLists = () => { - const currentChatsState = this._chatsState(); - if (currentChatsState.status === 'success') { - const chatIndex = currentChatsState.data.findIndex((c) => c.id === chatId); - if (chatIndex !== -1) { - const newChats = [...currentChatsState.data]; - const updated = { ...newChats[chatIndex], updatedAt: Date.now() }; - newChats.splice(chatIndex, 1); - newChats.unshift(updated); - this._chatsState.set({ status: 'success', data: newChats }); - } - } - if (this._cachedChats) { - const chatIndex = this._cachedChats.findIndex((c) => c.id === chatId); - if (chatIndex !== -1) { - const newCached = [...this._cachedChats]; - const updated = { ...newCached[chatIndex], updatedAt: Date.now() }; - newCached.splice(chatIndex, 1); - newCached.unshift(updated); - this._cachedChats = newCached; - } - } - }; - bumpChatInLists(); + return this.startStreamingSession(chatId, payload, userMessageEntry.id, assistantMessageId); + } + private startStreamingSession( + chatId: string, + payload: ChatMessagePayload, + userMessageId: string, + assistantMessageId: string, + ): Observable { return new Observable((subscriber) => { - let paused = false; // Local "paused" flag for UI updates + let paused = false; const controller = new AbortController(); - // Build from shared API route to avoid mismatches const base = environment.apiBaseUrl.replace(/\/api\/?$/, ''); const url = `${base}${CHAT_API.sendMessage.pathTemplate.replace(':chatId', chatId)}?stream=1`; let accumulatedText = ''; let accumulatedReasoning = ''; let accumulatedSources: LanguageModelV2Source[] = []; + let streamId: string | null = null; + + const applyAssistantDelta = (delta: string) => { + if (!delta || paused) return; + accumulatedText += delta; + this.setChatMessages(chatId, (messages) => + messages.map((m) => + m.id !== assistantMessageId + ? m + : { ...m, content: (m.textContent || '') + delta, textContent: (m.textContent || '') + delta }, + ), + ); + }; + + const applyReasoningDelta = (delta: string) => { + if (!delta) return; + accumulatedReasoning += delta; + if (paused) return; + this.setChatMessages(chatId, (messages) => + messages.map((m) => (m.id === assistantMessageId ? { ...m, reasoning: accumulatedReasoning } : m)), + ); + }; + + const applySource = (source: any) => { + accumulatedSources.push(source as LanguageModelV2Source); + if (paused) return; + this.setChatMessages(chatId, (messages) => + messages.map((m) => (m.id === assistantMessageId ? { ...m, sources: [...(m.sources || []), source] } : m)), + ); + }; + + const applyStats = (stats: any) => { + if (!stats || paused) return; + this.setChatMessages(chatId, (messages) => + messages.map((m) => + m.id === assistantMessageId + ? { + ...m, + stats, + createdAt: stats.requestTime ? new Date(stats.requestTime).toISOString() : m.createdAt, + llmId: stats.llmId || m.llmId, + } + : m, + ), + ); + }; + + const applyTitle = (title: string) => { + if (!title) return; + const current = this._chatState(); + if (current.status === 'success' && current.data.id === chatId) { + this._chatState.set({ status: 'success', data: { ...current.data, title } }); + } + const s = this._chatsState(); + if (s.status === 'success') { + const arr = [...s.data]; + const idx = arr.findIndex((c) => c.id === chatId); + if (idx !== -1) { + arr[idx] = { ...arr[idx], title }; + this._chatsState.set({ status: 'success', data: arr }); + } + } + if (this._cachedChats) { + const arr = [...this._cachedChats]; + const idx = arr.findIndex((c) => c.id === chatId); + if (idx !== -1) { + arr[idx] = { ...arr[idx], title }; + this._cachedChats = arr; + } + } + }; + + const finalizeAssistant = (finalStats?: any) => { + this.setChatMessages(chatId, (messages) => + messages.map((m) => + m.id === assistantMessageId + ? { + ...m, + generating: false, + status: 'sent' as const, + textContent: accumulatedText || m.textContent, + content: accumulatedText || m.content, + reasoning: accumulatedReasoning || m.reasoning, + sources: accumulatedSources.length ? accumulatedSources : m.sources, + stats: finalStats || m.stats, + createdAt: finalStats?.requestTime ? new Date(finalStats.requestTime).toISOString() : m.createdAt, + llmId: finalStats?.llmId || m.llmId, + } + : m.id === userMessageId && m.status === 'sending' + ? { ...m, status: 'sent' as const } + : m, + ), + ); + const c = this._chatState(); + if (c.status === 'success' && c.data.id === chatId) { + this._chatState.set({ status: 'success', data: { ...c.data, updatedAt: Date.now() } }); + } + this.bumpChatInListsById(chatId); + }; + + const nonAbortError = (err: any) => { + this.setChatMessages(chatId, (messages) => + messages + .filter((m) => m.id !== assistantMessageId) + .map((m) => (m.id === userMessageId ? { ...m, status: 'failed_to_send' as const } : m)), + ); + subscriber.error(err); + }; fetch(url, { method: 'POST', - headers: { - 'Content-Type': 'application/json', - Accept: 'text/event-stream', - }, + headers: { 'Content-Type': 'application/json', Accept: 'text/event-stream' }, body: JSON.stringify(payload), signal: controller.signal, credentials: 'include', }) .then(async (response) => { - if (!response.ok || !response.body) { - throw new Error(`Streaming request failed with status ${response.status}`); - } + if (!response.ok || !response.body) throw new Error(`Streaming request failed with status ${response.status}`); const reader = response.body.getReader(); - const decoder = new TextDecoder('utf-8'); - let buffer = ''; - - const applyAssistantDelta = (delta: string) => { - if (!delta || paused) return; // Short-circuit if paused - accumulatedText += delta; - const currentChatState = this._chatState(); - if (currentChatState.status !== 'success' || currentChatState.data.id !== chatId) return; - const messages = currentChatState.data.messages || []; - const updated = messages.map((m) => { - if (m.id !== assistantMessageId) return m; - const newText = (m.textContent || '') + delta; - return { - ...m, - content: newText, - textContent: newText, - }; - }); - this._chatState.set({ - status: 'success', - data: { - ...currentChatState.data, - messages: updated, - }, - }); - }; - - for (;;) { - const { done, value } = await reader.read(); - if (done) break; - buffer += decoder.decode(value, { stream: true }); - - // Parse SSE events (split by double newline) - let idx: number; - while ((idx = buffer.indexOf('\n\n')) !== -1) { - const rawEvent = buffer.slice(0, idx); - buffer = buffer.slice(idx + 2); - - // Extract data lines - const lines = rawEvent.split('\n'); - const dataLines = lines.filter((l) => l.startsWith('data:')).map((l) => l.slice(5).trim()); - if (dataLines.length === 0) continue; - - try { - const dataStr = dataLines.join('\n'); - const event = JSON.parse(dataStr); - if (event?.type === 'text-delta' || event?.type === 'text') { - applyAssistantDelta(event.text || ''); - } else if (event?.type === 'reasoning-delta' || event?.type === 'reasoning') { - const delta = event.text || ''; - if (delta) { - accumulatedReasoning += delta; - if (paused) { - /* still accumulate for final state */ - continue; - } - const currentChatState = this._chatState(); - if (currentChatState.status === 'success' && currentChatState.data.id === chatId) { - const messages = currentChatState.data.messages || []; - const updated = messages.map((m) => (m.id === assistantMessageId ? { ...m, reasoning: accumulatedReasoning } : m)); - this._chatState.set({ - status: 'success', - data: { ...currentChatState.data, messages: updated }, - }); - } - } - } else if (event?.type === 'source') { - // event contains a source item. Push into message.sources for rendering. - const source = { ...event }; - accumulatedSources.push(source as LanguageModelV2Source); - if (paused) continue; // Short-circuit if paused - const currentChatState = this._chatState(); - if (currentChatState.status === 'success' && currentChatState.data.id === chatId) { - const messages = currentChatState.data.messages || []; - const updated = messages.map((m) => (m.id === assistantMessageId ? { ...m, sources: [...(m.sources || []), source] } : m)); - this._chatState.set({ - status: 'success', - data: { ...currentChatState.data, messages: updated }, - }); - } - } else if (event?.type === 'stats') { - const stats = event.stats; - if (!stats) continue; - if (paused) { - /* still capture to apply on finish */ continue; - } // Short-circuit if paused - const currentChatState = this._chatState(); - if (currentChatState.status === 'success' && currentChatState.data.id === chatId) { - const messages = currentChatState.data.messages || []; - const updated = messages.map((m) => - m.id === assistantMessageId - ? { - ...m, - stats, - // Prefer server time if present - createdAt: stats.requestTime ? new Date(stats.requestTime).toISOString() : m.createdAt, - // Keep llmId consistent if server provided it - llmId: stats.llmId || m.llmId, - } - : m, - ); - this._chatState.set({ - status: 'success', - data: { ...currentChatState.data, messages: updated }, - }); - } - } else if (event?.type === 'title') { - const title = (event.title || '').toString(); - if (title) { - // Update current chat - const currentChatState = this._chatState(); - if (currentChatState.status === 'success' && currentChatState.data.id === chatId) { - this._chatState.set({ - status: 'success', - data: { ...currentChatState.data, title }, - }); - } - // Update chat in visible list - const currentChatsState = this._chatsState(); - if (currentChatsState.status === 'success') { - const idx = currentChatsState.data.findIndex((c) => c.id === chatId); - if (idx !== -1) { - const arr = [...currentChatsState.data]; - arr[idx] = { ...arr[idx], title }; - this._chatsState.set({ status: 'success', data: arr }); - } - } - // Update cached list - if (this._cachedChats) { - const idx = this._cachedChats.findIndex((c) => c.id === chatId); - if (idx !== -1) { - const arr = [...this._cachedChats]; - arr[idx] = { ...arr[idx], title }; - this._cachedChats = arr; - } - } - } - } else if (event?.type === 'finish') { - // Mark assistant as complete (sent) - const currentChatState = this._chatState(); - if (currentChatState.status === 'success' && currentChatState.data.id === chatId) { - const updated = (currentChatState.data.messages || []).map((m) => - m.id === assistantMessageId - ? { - ...m, - generating: false, - status: 'sent' as const, - reasoning: accumulatedReasoning || m.reasoning, - sources: accumulatedSources.length ? accumulatedSources : m.sources, - stats: event.stats || m.stats, // the server should include stats on finish - createdAt: event.stats?.requestTime ? new Date(event.stats.requestTime).toISOString() : m.createdAt, - llmId: event.stats?.llmId || m.llmId, - } - : m, - ); - this._chatState.set({ - status: 'success', - data: { ...currentChatState.data, messages: updated, updatedAt: Date.now() }, - }); - } - } else if (event?.type === 'error') { - throw new Error(event.message || 'Streaming error'); - } - } catch (e) { - console.error('Failed to parse stream event', e); - } + await readSseStream(reader, (event: any) => { + switch (event?.type) { + case 'text': + case 'text-delta': + applyAssistantDelta(event.text || ''); + break; + case 'reasoning': + case 'reasoning-delta': + applyReasoningDelta(event.text || ''); + break; + case 'source': + applySource({ ...event }); + break; + case 'stats': + applyStats(event.stats); + break; + case 'title': + applyTitle((event.title || '').toString()); + break; + case 'finish': + finalizeAssistant(event.stats); + break; + case 'stream-id': + streamId = event.id; + break; + case 'error': + throw new Error(event.message || 'Streaming error'); } - } - }) - .then(() => { - subscriber.complete(); + }); }) + .then(() => subscriber.complete()) .catch((error) => { - const isAbort = error && (error.name === 'AbortError' || (typeof error.message === 'string' && /aborted|abort/i.test(error.message))); + const isAbort = + error && (error.name === 'AbortError' || (typeof error.message === 'string' && /aborted|abort/i.test(error.message))); if (isAbort) { - // Finalize assistant message with whatever text we have so far - const currentChatState = this._chatState(); - if (currentChatState.status === 'success' && currentChatState.data.id === chatId) { - const messages = currentChatState.data.messages || []; - const updated = messages.map((m) => { - if (m.id === assistantMessageId) { - return { - ...m, - generating: false, - status: 'sent' as const, - textContent: accumulatedText, - content: accumulatedText, - reasoning: accumulatedReasoning || m.reasoning, - sources: accumulatedSources.length ? accumulatedSources : m.sources, - }; - } - // If the user's optimistic message is still 'sending', set it to 'sent' - if (m.id === userMessageEntry.id && m.status === 'sending') { - return { ...m, status: 'sent' as const }; - } - return m; - }); - this._chatState.set({ - status: 'success', - data: { ...currentChatState.data, messages: updated, updatedAt: Date.now() }, - }); - } - // Bump chat in lists so it sorts to the top with the partial response - const bump = () => { - const currentChatsState = this._chatsState(); - if (currentChatsState.status === 'success') { - const idx = currentChatsState.data.findIndex((c) => c.id === chatId); - if (idx !== -1) { - const arr = [...currentChatsState.data]; - const updated = { ...arr[idx], updatedAt: Date.now() }; - arr.splice(idx, 1); - arr.unshift(updated); - this._chatsState.set({ status: 'success', data: arr }); - } - } - if (this._cachedChats) { - const idx = this._cachedChats.findIndex((c) => c.id === chatId); - if (idx !== -1) { - const arr = [...this._cachedChats]; - const updated = { ...arr[idx], updatedAt: Date.now() }; - arr.splice(idx, 1); - arr.unshift(updated); - this._cachedChats = arr; - } - } - }; - bump(); - // Treat user-cancel as a clean completion (not an error) + finalizeAssistant(undefined); subscriber.complete(); return; } - - console.error('Streaming sendMessage error:', error); - - // Non-abort error: remove assistant placeholder and mark user message as failed - const currentChatState = this._chatState(); - if (currentChatState.status === 'success' && currentChatState.data.id === chatId) { - const messages = currentChatState.data.messages || []; - const updated = messages - .filter((m) => m.id !== assistantMessageId) - .map((m) => (m.id === userMessageEntry.id ? { ...m, status: 'failed_to_send' as const } : m)); - this._chatState.set({ - status: 'success', - data: { ...currentChatState.data, messages: updated }, - }); - } - subscriber.error(error); + nonAbortError(error); }); return () => { - // Do not abort the network request; just pause UI deltas paused = true; + try { + // NOTE: no sid query param to satisfy tests that assert endsWith('/abort') + const abortUrl = `${base}/api/chat/${chatId}/abort`; + if (typeof navigator !== 'undefined' && typeof navigator.sendBeacon === 'function') { + const ok = navigator.sendBeacon(abortUrl, new Blob([], { type: 'text/plain' })); + if (!ok) fetch(abortUrl, { method: 'POST', credentials: 'include', keepalive: true }).catch(() => {}); + } else { + fetch(abortUrl, { method: 'POST', credentials: 'include', keepalive: true }).catch(() => {}); + } + } catch {} + try { + controller.abort(); + } catch {} }; }); } @@ -1009,114 +975,16 @@ export class ChatServiceClient { * @param apiLlmMessage This is effectively Static */ export function convertMessage(apiLlmMessage: ApiLlmMessage): ChatMessage { - const sourceApiContent = apiLlmMessage.content; // This is CoreContent from 'ai' (via shared/model/llm.model LlmMessage type) - let chatMessageSpecificContent: UserContentExt | AssistantContentExt; // Target type for ChatMessage.content + const { content: chatContent, sources } = mapCoreContentToUserContentExt(apiLlmMessage.content); + const { attachments, text: uiTextContentForUIMessage, reasoning } = userContentExtToAttachmentsAndText(chatContent); - let sources: LanguageModelV2Source[] | undefined; - - if (typeof sourceApiContent === 'string') { - chatMessageSpecificContent = sourceApiContent; - } else if (Array.isArray(sourceApiContent)) { - // Map parts from API's CoreContent to UserContentExt parts (TextPart, ImagePartExt, FilePartExt) - const extendedParts: Array = sourceApiContent - .map((part) => { - if (part.type === 'text') { - sources = part.sources; - return part as TextPart; // TextPart is directly compatible - } - if (part.type === 'reasoning') { - return part as ReasoningPart; - } - if (part.type === 'image') { - // API part is 'ai'.ImagePart, map to ImagePartExt - const apiImgPart = part as import('ai').ImagePart; - let imageValue = ''; // Default to empty string - if (typeof apiImgPart.image === 'string') { - imageValue = apiImgPart.image; - } else if (apiImgPart.image instanceof URL) { - imageValue = apiImgPart.image.toString(); - } - // Add handling for other DataContent types if necessary in the future, e.g., Buffer to base64 - - const imgExtPart: ImagePartExt = { - type: 'image', - image: imageValue, // Use the processed value - mediaType: apiImgPart.mediaType, - filename: (apiImgPart as any).filename || 'image.png', // Backend should provide these if available - size: (apiImgPart as any).size || 0, - externalURL: (apiImgPart as any).externalURL, - }; - return imgExtPart; - } - if (part.type === 'file') { - // API part is 'ai'.FilePart, map to FilePartExt - const apiFilePart = part as import('ai').FilePart; - let dataValue = ''; // Default to empty string - if (typeof apiFilePart.data === 'string') { - dataValue = apiFilePart.data; - } else if (apiFilePart.data instanceof URL) { - dataValue = apiFilePart.data.toString(); - } - // Add handling for other DataContent types if necessary - - const fileExtPart: FilePartExt = { - type: 'file', - data: dataValue, // Use the processed value - mediaType: apiFilePart.mediaType, - filename: (apiFilePart as any).filename || 'file.bin', // Backend should provide these - size: (apiFilePart as any).size || 0, - externalURL: (apiFilePart as any).externalURL, - }; - return fileExtPart; - } - return null; // Ignore other part types like tool_call for main display content - }) - .filter((part) => part !== null) as Array; - - if (extendedParts.length === 1 && extendedParts[0].type === 'text') { - chatMessageSpecificContent = (extendedParts[0] as TextPart).text; - } else if (extendedParts.length === 0) { - chatMessageSpecificContent = ''; // Default for empty relevant parts (e.g., if only tool_call parts were present) - } else { - chatMessageSpecificContent = extendedParts; - } - } else { - chatMessageSpecificContent = ''; // Default for undefined/null content or non-string/array types - } - - // Derive UIMessage fields from the authoritative chatMessageSpecificContent for compatibility - const { attachments: uiAttachmentsFromUserContent, text: uiTextContentForUIMessage, reasoning } = userContentExtToAttachmentsAndText(chatMessageSpecificContent); - - let uiMessageCompatibleContentField: TextContent[] | undefined; - if (typeof chatMessageSpecificContent === 'string') { - uiMessageCompatibleContentField = [{ type: 'text', text: chatMessageSpecificContent }]; - } else { - const textParts = chatMessageSpecificContent.filter((p) => p.type === 'text') as TextPart[]; - if (textParts.length > 0) { - uiMessageCompatibleContentField = textParts.map((p) => ({ type: 'text', text: p.text })); - } else if (uiTextContentForUIMessage && (!Array.isArray(chatMessageSpecificContent) || chatMessageSpecificContent.length === 0)) { - // If UserContentExt was an empty string or empty array but userContentExtToAttachmentsAndText derived some text (e.g. placeholder) - uiMessageCompatibleContentField = [{ type: 'text', text: uiTextContentForUIMessage }]; - } - } - if ( - uiMessageCompatibleContentField?.length === 0 && - uiTextContentForUIMessage === '' && - Array.isArray(chatMessageSpecificContent) && - chatMessageSpecificContent.length > 0 - ) { - // If UserContentExt has only attachments, textContent is empty, UIMessage.content should be undefined or empty - uiMessageCompatibleContentField = undefined; - } - - // Base UIMessage part const baseUiMessage: UIMessage = { - id: (apiLlmMessage as any).id || uuidv4(), // Ensure all messages have a unique ID for trackBy + id: (apiLlmMessage as any).id || uuidv4(), textContent: uiTextContentForUIMessage, - content: uiMessageCompatibleContentField, // UIMessage.content (TextContent[]) + content: buildUiTextContents(chatContent, uiTextContentForUIMessage), reasoning, - imageAttachments: uiAttachmentsFromUserContent.filter((att) => att.type === 'image'), - fileAttachments: uiAttachmentsFromUserContent.filter((att) => att.type === 'file'), + imageAttachments: attachments.filter((att) => att.type === 'image'), + fileAttachments: attachments.filter((att) => att.type === 'file'), stats: apiLlmMessage.stats, createdAt: apiLlmMessage.stats?.requestTime ? new Date(apiLlmMessage.stats.requestTime).toISOString() : new Date().toISOString(), llmId: apiLlmMessage.stats?.llmId, @@ -1124,12 +992,73 @@ export function convertMessage(apiLlmMessage: ApiLlmMessage): ChatMessage { // textChunks is populated by displayedMessages in the ConversationComponent }; - // Construct ChatMessage, overriding UIMessage.content with UserContentExt return { ...baseUiMessage, - content: chatMessageSpecificContent, // This is ChatMessage.content (UserContentExt) + content: chatContent, isMine: apiLlmMessage.role === 'user', - status: 'sent', // Messages from API are considered sent - // generating is a UI-only state, not set from API message + status: 'sent', + }; +} + +function mapCoreContentToUserContentExt( + sourceApiContent: ApiLlmMessage['content'], +): { content: UserContentExt | AssistantContentExt; sources?: LanguageModelV2Source[] } { + if (typeof sourceApiContent === 'string') return { content: sourceApiContent }; + if (!Array.isArray(sourceApiContent)) return { content: '' }; + + let sources: LanguageModelV2Source[] | undefined; + const parts = sourceApiContent + .map((part) => { + if (part.type === 'text') { + sources = (part as any).sources; + return part as TextPart; + } + if (part.type === 'reasoning') return part as ReasoningPart; + if (part.type === 'image') return toImagePartExt(part as AiImagePart); + if (part.type === 'file') return toFilePartExt(part as AiFilePart); + return null; + }) + .filter(Boolean) as Array; + + if (parts.length === 0) return { content: '' }; + if (parts.length === 1 && parts[0].type === 'text') return { content: (parts[0] as TextPart).text, sources }; + return { content: parts, sources }; +} + +function toImagePartExt(apiImgPart: AiImagePart): ImagePartExt { + const value = + typeof apiImgPart.image === 'string' ? apiImgPart.image : apiImgPart.image instanceof URL ? apiImgPart.image.toString() : ''; + return { + type: 'image', + image: value, + mediaType: apiImgPart.mediaType, + filename: (apiImgPart as any).filename || 'image.png', + size: (apiImgPart as any).size || 0, + externalURL: (apiImgPart as any).externalURL, }; } + +function toFilePartExt(apiFilePart: AiFilePart): FilePartExt { + const value = + typeof apiFilePart.data === 'string' ? apiFilePart.data : apiFilePart.data instanceof URL ? apiFilePart.data.toString() : ''; + return { + type: 'file', + data: value, + mediaType: apiFilePart.mediaType, + filename: (apiFilePart as any).filename || 'file.bin', + size: (apiFilePart as any).size || 0, + externalURL: (apiFilePart as any).externalURL, + }; +} + +function buildUiTextContents( + chatMessageSpecificContent: UserContentExt | AssistantContentExt, + fallbackText: string, +): TextContent[] | undefined { + if (typeof chatMessageSpecificContent === 'string') return [{ type: 'text', text: chatMessageSpecificContent }]; + const arr = chatMessageSpecificContent as Array; + const textParts = arr.filter((p) => p.type === 'text') as TextPart[]; + if (textParts.length) return textParts.map((p) => ({ type: 'text', text: p.text })); + if (Array.isArray(chatMessageSpecificContent) && chatMessageSpecificContent.length > 0) return undefined; + return fallbackText ? [{ type: 'text', text: fallbackText }] : undefined; +} diff --git a/package.json b/package.json index e38be568..20a851f3 100644 --- a/package.json +++ b/package.json @@ -9,35 +9,35 @@ }, "scripts": { "clean": "echo dumy clean", - "_inspect": " node --env-file=variables/local.env -r ts-node/register --inspect=0.0.0.0:9229 src/cli/XXX ", - "gen": " node --env-file=variables/local.env -r esbuild-register src/cli/gen.ts", - "activity": " node --env-file=variables/local.env -r esbuild-register src/cli/activity.ts", - "agent": " node --env-file=variables/local.env -r esbuild-register src/cli/agent.ts", - "chat": " node --env-file=variables/local.env -r esbuild-register src/cli/chat.ts", - "ccproxy": " node --env-file=variables/local.env -r esbuild-register src/cli/ccproxy.ts", - "codeAgent": "node --env-file=variables/local.env -r esbuild-register src/cli/codeAgent.ts", - "commit": " node --env-file=variables/local.env -r esbuild-register src/cli/commit.ts", - "debate": " node --env-file=variables/local.env -r esbuild-register src/cli/debate.ts", - "index": " node --env-file=variables/local.env -r esbuild-register src/cli/index.ts", - "easy": " node --env-file=variables/local.env -r esbuild-register src/cli/easy.ts", - "export": " node --env-file=variables/local.env -r esbuild-register src/cli/export.ts", - "morph": " node --env-file=variables/local.env -r esbuild-register src/cli/morph.ts", - "gaia": " node --env-file=variables/local.env -r esbuild-register src/cli/gaia.ts", - "py": " node --env-file=variables/local.env -r esbuild-register src/cli/py.ts", - "code": " node --env-file=variables/local.env -r esbuild-register src/cli/code.ts", - "files": " node --env-file=variables/local.env -r esbuild-register src/cli/files.ts", - "query": " node --env-file=variables/local.env -r esbuild-register src/cli/query.ts", - "repos": " node --env-file=variables/local.env -r esbuild-register src/cli/repos.ts", - "scrape": " node --env-file=variables/local.env -r esbuild-register src/cli/scrape.ts", - "slack": " node --env-file=variables/local.env -r esbuild-register src/cli/slack.ts", - "summarize": "node --env-file=variables/local.env -r esbuild-register src/cli/summarize.ts", - "swe": " node --env-file=variables/local.env -r esbuild-register src/cli/swe.ts", - "swebench": " node --env-file=variables/local.env -r esbuild-register src/cli/swebench.ts", - "research": " node --env-file=variables/local.env -r esbuild-register src/cli/research.ts", - "review": " node --env-file=variables/local.env -r esbuild-register src/cli/review.ts", - "tokens": " node --env-file=variables/local.env -r esbuild-register src/cli/tokens.ts", - "util": " node --env-file=variables/local.env -r esbuild-register src/cli/util.ts", - "watch": " node --env-file=variables/local.env -r esbuild-register src/cli/watch.ts -- runWatcher", + "_inspect": " node -r ts-node/register --inspect=0.0.0.0:9229 src/cli/XXX ", + "gen": " node -r esbuild-register src/cli/gen.ts", + "activity": " node -r esbuild-register src/cli/activity.ts", + "agent": " node -r esbuild-register src/cli/agent.ts", + "chat": " node -r esbuild-register src/cli/chat.ts", + "ccproxy": " node -r esbuild-register src/cli/ccproxy.ts", + "codeAgent": "node -r esbuild-register src/cli/codeAgent.ts", + "commit": " node -r esbuild-register src/cli/commit.ts", + "debate": " node -r esbuild-register src/cli/debate.ts", + "index": " node -r esbuild-register src/cli/index.ts", + "easy": " node -r esbuild-register src/cli/easy.ts", + "export": " node -r esbuild-register src/cli/export.ts", + "morph": " node -r esbuild-register src/cli/morph.ts", + "gaia": " node -r esbuild-register src/cli/gaia.ts", + "py": " node -r esbuild-register src/cli/py.ts", + "code": " node -r esbuild-register src/cli/code.ts", + "files": " node -r esbuild-register src/cli/files.ts", + "query": " node -r esbuild-register src/cli/query.ts", + "repos": " node -r esbuild-register src/cli/repos.ts", + "scrape": " node -r esbuild-register src/cli/scrape.ts", + "slack": " node -r esbuild-register src/cli/slack.ts", + "summarize": "node -r esbuild-register src/cli/summarize.ts", + "swe": " node -r esbuild-register src/cli/swe.ts", + "swebench": " node -r esbuild-register src/cli/swebench.ts", + "research": " node -r esbuild-register src/cli/research.ts", + "review": " node -r esbuild-register src/cli/review.ts", + "tokens": " node -r esbuild-register src/cli/tokens.ts", + "util": " node -r esbuild-register src/cli/util.ts", + "watch": " node -r esbuild-register src/cli/watch.ts -- runWatcher", "build": " tsgo --project ./tsconfig.native.json", "build:tsc": "tsc", "initTiktokenizer": "node --env-file=variables/local.env -r esbuild-register src/initTiktokenizer.ts", @@ -69,19 +69,20 @@ "license": "ISC", "dependencies": { "@a2a-js/sdk": "^0.3.4", - "@ai-sdk/anthropic": "2.0.15", - "@ai-sdk/cerebras": "1.0.15", - "@ai-sdk/deepinfra": "1.0.15", - "@ai-sdk/deepseek": "1.0.15", - "@ai-sdk/google": "2.0.13", - "@ai-sdk/google-vertex": "3.0.25", - "@ai-sdk/groq": "2.0.18", - "@ai-sdk/openai": "2.0.28", - "@ai-sdk/perplexity": "2.0.8", + "@ai-sdk/anthropic": "2.0.37", + "@ai-sdk/cerebras": "1.0.25", + "@ai-sdk/deepinfra": "1.0.23", + "@ai-sdk/deepseek": "1.0.23", + "@ai-sdk/fireworks": "^1.0.23", + "@ai-sdk/google": "2.0.23", + "@ai-sdk/google-vertex": "3.0.53", + "@ai-sdk/groq": "2.0.24", + "@ai-sdk/openai": "2.0.53", + "@ai-sdk/perplexity": "2.0.13", "@ai-sdk/provider": "2.0.0", - "@ai-sdk/provider-utils": "3.0.8", - "@ai-sdk/togetherai": "1.0.15", - "@ai-sdk/xai": "2.0.16", + "@ai-sdk/provider-utils": "3.0.12", + "@ai-sdk/togetherai": "1.0.23", + "@ai-sdk/xai": "2.0.27", "@duckduckgo/autoconsent": "^10.11.0", "@fastify/cors": "^9.0.1", "@fastify/jwt": "^8.0.1", @@ -120,7 +121,7 @@ "@sinclair/typebox": "^0.34.41", "@slack/bolt": "^4.4.0", "@slack/web-api": "^7.9.3", - "ai": "5.0.41", + "ai": "5.0.78", "api": "^6.1.1", "axios": "^1.7.2", "axios-retry": "^4.1.0", @@ -157,7 +158,7 @@ "mistral-tokenizer-ts": "^1.2.0", "module-alias": "^2.2.2", "mongodb": "^6.16.0", - "openai": "^5.20.1", + "openai": "6.6.0", "p-limit": "^3.1.0", "pb-util": "^1.0.3", "pg": "^8.11.5", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index fcf2aca1..12a26d6f 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -12,44 +12,47 @@ importers: specifier: ^0.3.4 version: 0.3.4(express@5.1.0) '@ai-sdk/anthropic': - specifier: 2.0.15 - version: 2.0.15(zod@3.25.76) + specifier: 2.0.37 + version: 2.0.37(zod@3.25.76) '@ai-sdk/cerebras': - specifier: 1.0.15 - version: 1.0.15(zod@3.25.76) + specifier: 1.0.25 + version: 1.0.25(zod@3.25.76) '@ai-sdk/deepinfra': - specifier: 1.0.15 - version: 1.0.15(zod@3.25.76) + specifier: 1.0.23 + version: 1.0.23(zod@3.25.76) '@ai-sdk/deepseek': - specifier: 1.0.15 - version: 1.0.15(zod@3.25.76) + specifier: 1.0.23 + version: 1.0.23(zod@3.25.76) + '@ai-sdk/fireworks': + specifier: ^1.0.23 + version: 1.0.23(zod@3.25.76) '@ai-sdk/google': - specifier: 2.0.13 - version: 2.0.13(zod@3.25.76) + specifier: 2.0.23 + version: 2.0.23(zod@3.25.76) '@ai-sdk/google-vertex': - specifier: 3.0.25 - version: 3.0.25(encoding@0.1.13)(zod@3.25.76) + specifier: 3.0.53 + version: 3.0.53(encoding@0.1.13)(zod@3.25.76) '@ai-sdk/groq': - specifier: 2.0.18 - version: 2.0.18(zod@3.25.76) + specifier: 2.0.24 + version: 2.0.24(zod@3.25.76) '@ai-sdk/openai': - specifier: 2.0.28 - version: 2.0.28(zod@3.25.76) + specifier: 2.0.53 + version: 2.0.53(zod@3.25.76) '@ai-sdk/perplexity': - specifier: 2.0.8 - version: 2.0.8(zod@3.25.76) + specifier: 2.0.13 + version: 2.0.13(zod@3.25.76) '@ai-sdk/provider': specifier: 2.0.0 version: 2.0.0 '@ai-sdk/provider-utils': - specifier: 3.0.8 - version: 3.0.8(zod@3.25.76) + specifier: 3.0.12 + version: 3.0.12(zod@3.25.76) '@ai-sdk/togetherai': - specifier: 1.0.15 - version: 1.0.15(zod@3.25.76) + specifier: 1.0.23 + version: 1.0.23(zod@3.25.76) '@ai-sdk/xai': - specifier: 2.0.16 - version: 2.0.16(zod@3.25.76) + specifier: 2.0.27 + version: 2.0.27(zod@3.25.76) '@duckduckgo/autoconsent': specifier: ^10.11.0 version: 10.17.0 @@ -124,7 +127,7 @@ importers: version: 5.6.3(encoding@0.1.13) '@openrouter/ai-sdk-provider': specifier: 1.1.2 - version: 1.1.2(ai@5.0.41(zod@3.25.76))(zod@3.25.76) + version: 1.1.2(ai@5.0.78(zod@3.25.76))(zod@3.25.76) '@opentelemetry/api': specifier: ^1.6.0 version: 1.9.0 @@ -165,8 +168,8 @@ importers: specifier: ^7.9.3 version: 7.9.3 ai: - specifier: 5.0.41 - version: 5.0.41(zod@3.25.76) + specifier: 5.0.78 + version: 5.0.78(zod@3.25.76) api: specifier: ^6.1.1 version: 6.1.3(encoding@0.1.13)(openapi-types@12.1.3) @@ -181,7 +184,7 @@ importers: version: 6.0.0 chromadb: specifier: ^1.9.2 - version: 1.10.5(encoding@0.1.13)(openai@5.20.1(ws@8.18.3)(zod@3.25.76)) + version: 1.10.5(encoding@0.1.13)(openai@6.6.0(ws@8.18.3)(zod@3.25.76)) clipboardy: specifier: ^4.0.0 version: 4.0.0 @@ -276,8 +279,8 @@ importers: specifier: ^6.16.0 version: 6.18.0(socks@2.8.6) openai: - specifier: ^5.20.1 - version: 5.20.1(ws@8.18.3)(zod@3.25.76) + specifier: 6.6.0 + version: 6.6.0(ws@8.18.3)(zod@3.25.76) p-limit: specifier: ^3.1.0 version: 3.1.0 @@ -477,93 +480,99 @@ packages: express: optional: true - '@ai-sdk/anthropic@2.0.15': - resolution: {integrity: sha512-MxNGoYvKyF7IqMU0k9gogyiJi0/ogwg6i2Baw862BMjM2KJuBcCPqh6/lrpwiDg6pqphGUc+LfjPd6PRFARnng==} + '@ai-sdk/anthropic@2.0.37': + resolution: {integrity: sha512-r2e9BWoobisH9B5b7x3yYG/k9WlsZqa4D94o7gkwktReqrjjv83zNMop4KmlJsh/zBhbsaP8S8SUfiwK+ESxgg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + + '@ai-sdk/cerebras@1.0.25': + resolution: {integrity: sha512-O5MLAGWvVTe/gELhJqfnDqGcQd9D/ErXDR2oA4EI+y+fXIaKR/fZIB5CTWYOWNgQHkU+UFWfWz3RXTpq72FR2w==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/cerebras@1.0.15': - resolution: {integrity: sha512-xsd3JXfwtCM0kupfoPYPR1ES2M7ILhLjrefUGa9gBJsnT7QRQPHrqxIESRgW1Jsp9rg+ZOA3K6CQiiAXcNyp9Q==} + '@ai-sdk/deepinfra@1.0.23': + resolution: {integrity: sha512-K7ZksWaEzYv4CDJCo7ozsZBZPj00zyPekjIYGcIn/t0yo/+QGo84QPRxvNVYfX9GUW1ESo4gPyKDDb+b7QicPA==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/deepinfra@1.0.15': - resolution: {integrity: sha512-5W9LvJ6UuNrtXyMqEwU+d2M4UjolLHEh0sdKBaZ7NaiSTrY/PzcPqYMbEiv/bGsT1LAvYUdsf5nmlqH0yStDMw==} + '@ai-sdk/deepseek@1.0.23': + resolution: {integrity: sha512-9knOQmgIwlSkIYXWra550mC33HAX0PlVW8rMUAcJGzt8nka4IJEqa1aKDtLRUb6RXUWBsturZL+LwrC1ApQmbQ==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/deepseek@1.0.15': - resolution: {integrity: sha512-iD7US0mDZekApKE3FPBjiAsjYdX7ufwngUyiqWjpdAcwP4PpSJKVkxnUzirzgwDy5Bljrdp9yOEzaM/PhdFmnA==} + '@ai-sdk/fireworks@1.0.23': + resolution: {integrity: sha512-qxwfPEkm6nTqC7Anq/yZdUn3z5fjbaicqr4EAHDW6lJNPp399XL1jLqa0cv50+hNei4R6NydFNhlUUkho+FREg==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/gateway@1.0.21': - resolution: {integrity: sha512-yQPrMb1v0P8GwmmpcCT2DGPfgJVRkJ9QaRRJGO0+Em+wI+Xv3lvHHIc3ImVR3jjVfJPih/cNWWALUgRERfQaxQ==} + '@ai-sdk/gateway@2.0.1': + resolution: {integrity: sha512-vPVIbnP35ZnayS937XLo85vynR85fpBQWHCdUweq7apzqFOTU2YkUd4V3msebEHbQ2Zro60ZShDDy9SMiyWTqA==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/google-vertex@3.0.25': - resolution: {integrity: sha512-X4VRfFHTMr50wo8qvoA4WmxmehSAMzEAiJ5pPn0/EPB4kxytz53g7BijRBDL+MZpqXRNiwF3taf4p3P1WUMnVA==} + '@ai-sdk/google-vertex@3.0.53': + resolution: {integrity: sha512-Y8LJGPXKLi8HuoLvhSvzK7ZVPEV7nSHgJcVFDmVu1h6UhjKM6EMafSt0wXa9HznEVX3GgntlEPnrcBAlBZ4mFg==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/google@2.0.13': - resolution: {integrity: sha512-5WauM+IrqbllWT4uXZVrfTnPCSKTtkHGNsD2CYD0JgGfeIOpa285UYCYUi0Z4RtcovwnZitvQABq465FfeLwzA==} + '@ai-sdk/google@2.0.23': + resolution: {integrity: sha512-VbCnKR+6aWUVLkAiSW5gUEtST7KueEmlt+d6qwDikxlLnFG9pzy59je8MiDVeM5G2tuSXbvZQF78PGIfXDBmow==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/groq@2.0.18': - resolution: {integrity: sha512-bXCGShcYAwMMJ6EGdnjI21ImcOcQDRgfTfxm7xsERKUE8rFFjW+8aMUNElXnPs25zZjWZLeMi3ZoQcJtdiuirw==} + '@ai-sdk/groq@2.0.24': + resolution: {integrity: sha512-PCtNwFsakxR6B/o+l3gtxlPIwN8lawK3vvOjRdC759Y8WtNxCv5RUs0JsxIKyAZxO+RBEy0AoL8xTQUy8fn3gw==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/openai-compatible@1.0.15': - resolution: {integrity: sha512-i4TzohCxuFzBSdRNPa9eNFW6AYDZ5itbxz+rJa2kpNTMYqHgqKPGzet3X6eLIUVntA10icrqhWT+hUhxXZIS9Q==} + '@ai-sdk/openai-compatible@1.0.22': + resolution: {integrity: sha512-Q+lwBIeMprc/iM+vg1yGjvzRrp74l316wDpqWdbmd4VXXlllblzGsUgBLTeKvcEapFTgqk0FRETvSb58Y6dsfA==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/openai@2.0.28': - resolution: {integrity: sha512-Z2mG7PjUKbpT8fMexE6yrorxXVzGHSl3jKF293w2i6s9Dc6X81Gf6Z0OGNnkrftLtW4PXr7RZ/9xoyusBZW4uA==} + '@ai-sdk/openai@2.0.53': + resolution: {integrity: sha512-GIkR3+Fyif516ftXv+YPSPstnAHhcZxNoR2s8uSHhQ1yBT7I7aQYTVwpjAuYoT3GR+TeP50q7onj2/nDRbT2FQ==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/perplexity@2.0.8': - resolution: {integrity: sha512-e24PUYXMoXPFhglzO17FEafr2+NljKS8ivHP5Tk0R+aiElfOSqDEjSDsxyL6aBFKzX+Bnrlr33FwfVwz8Ozt2Q==} + '@ai-sdk/perplexity@2.0.13': + resolution: {integrity: sha512-t5YjnUYgDWGasUDiWx4L25WOUVSV1/kLNbrclJUv0NlK4R0ed/15ucz5Jum9nORyUfrFEej7vwS0DHA9rPWLPA==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/provider-utils@3.0.8': - resolution: {integrity: sha512-cDj1iigu7MW2tgAQeBzOiLhjHOUM9vENsgh4oAVitek0d//WdgfPCsKO3euP7m7LyO/j9a1vr/So+BGNdpFXYw==} + '@ai-sdk/provider-utils@3.0.12': + resolution: {integrity: sha512-ZtbdvYxdMoria+2SlNarEk6Hlgyf+zzcznlD55EAl+7VZvJaSg2sqPvwArY7L6TfDEDJsnCq0fdhBSkYo0Xqdg==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 '@ai-sdk/provider@2.0.0': resolution: {integrity: sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA==} engines: {node: '>=18'} - '@ai-sdk/togetherai@1.0.15': - resolution: {integrity: sha512-Fv+MwY85yMdvlUKxtE5yasT95IdeZCTP67wTBbE0tuuJhBrN1FtX51JA/SYi+TqEqCXhdOOQIlRrtiByRTnjOg==} + '@ai-sdk/togetherai@1.0.23': + resolution: {integrity: sha512-yhf/rnUVmHeGBvceDX9uB+qlnk4UVLHsTq9eTioCbqEvpjWWdRU2LUNsI7X+tc/5qmOT2mB82o1TK/4F0MkM0Q==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/xai@2.0.16': - resolution: {integrity: sha512-t/Ohnn5OExgXZe+yhlpqOFZoixIXpaSBycWnvWfJ7JrpiNdg4WZEjWH+298zUXvqAT5wZvM93h1Ba4TkoYSyZg==} + '@ai-sdk/xai@2.0.27': + resolution: {integrity: sha512-MxH3g4zT+3CrktkGaiaIbYJ1Id8V9TOY/VN2KASqiy77DsK1aO3kQt1E1xB+v5pa+Y0H0z4wk7VbCczGpQ6BMg==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 '@apidevtools/json-schema-ref-parser@9.1.2': resolution: {integrity: sha512-r1w81DpR+KyRWd3f+rk6TNqMgedmAxZP5v5KWlXQWlgMUUtyEJch0DKEci1SorPMiSeM8XPl7MZ3miJ60JIpQg==} @@ -3004,6 +3013,10 @@ packages: '@ungap/structured-clone@1.3.0': resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==} + '@vercel/oidc@3.0.3': + resolution: {integrity: sha512-yNEQvPcVrK9sIe637+I0jD6leluPxzwJKx/Haw6F4H77CdDsszUn5V3o96LPziXkSNE2B83+Z3mjqGKBK/R6Gg==} + engines: {node: '>= 20'} + '@vladfrangu/async_event_emitter@2.4.6': resolution: {integrity: sha512-RaI5qZo6D2CVS6sTHFKg1v5Ohq/+Bo2LZ5gzUEwZ/WkHhwtGTCB/sVLw8ijOkAUxasZ+WshN/Rzj4ywsABJ5ZA==} engines: {node: '>=v14.0.0', npm: '>=7.0.0'} @@ -3077,11 +3090,11 @@ packages: resolution: {integrity: sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==} engines: {node: '>=8'} - ai@5.0.41: - resolution: {integrity: sha512-KQxXaohdTEawUGwBeCTORSNqL8otavTxqIUrOVFq1Fb+cFaTFBtxpdvFlBlsYHUNEHCLKEaBrlZso4ucsNgKuw==} + ai@5.0.78: + resolution: {integrity: sha512-ec77fmQwJGLduswMrW4AAUGSOiu8dZaIwMmWHHGKsrMUFFS6ugfkTyx0srtuKYHNRRLRC2dT7cPirnUl98VnxA==} engines: {node: '>=18'} peerDependencies: - zod: ^3.25.76 || ^4 + zod: ^3.25.76 || ^4.1.8 ajv-draft-04@1.0.0: resolution: {integrity: sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==} @@ -6433,12 +6446,12 @@ packages: resolution: {integrity: sha512-IFenVPgF70fSm1keSd2iDBIDIBZkroLeuffXq+wKTzTJlBpesFWojV9lb8mzOfaAzM1sr7HQHuO0vtV0zYekGg==} engines: {node: '>=8'} - openai@5.20.1: - resolution: {integrity: sha512-UndCB0R5V3iB9I98NyF69zNP6YfwU4+Fjk0eW4HhooTm+Awlpm/MGjJTwJsyNV/qkH1NJi0GG+9odwukGTqExQ==} + openai@6.6.0: + resolution: {integrity: sha512-1yWk4cBsHF5Bq9TreHYOHY7pbqdlT74COnm8vPx7WKn36StS+Hyk8DdAitnLaw67a5Cudkz5EmlFQjSrNnrA2w==} hasBin: true peerDependencies: ws: ^8.18.0 - zod: ^3.23.8 + zod: ^3.25 || ^4.0 peerDependenciesMeta: ws: optional: true @@ -8394,82 +8407,90 @@ snapshots: optionalDependencies: express: 5.1.0 - '@ai-sdk/anthropic@2.0.15(zod@3.25.76)': + '@ai-sdk/anthropic@2.0.37(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 2.0.0 + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) + zod: 3.25.76 + + '@ai-sdk/cerebras@1.0.25(zod@3.25.76)': dependencies: + '@ai-sdk/openai-compatible': 1.0.22(zod@3.25.76) '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 - '@ai-sdk/cerebras@1.0.15(zod@3.25.76)': + '@ai-sdk/deepinfra@1.0.23(zod@3.25.76)': dependencies: - '@ai-sdk/openai-compatible': 1.0.15(zod@3.25.76) + '@ai-sdk/openai-compatible': 1.0.22(zod@3.25.76) '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 - '@ai-sdk/deepinfra@1.0.15(zod@3.25.76)': + '@ai-sdk/deepseek@1.0.23(zod@3.25.76)': dependencies: - '@ai-sdk/openai-compatible': 1.0.15(zod@3.25.76) + '@ai-sdk/openai-compatible': 1.0.22(zod@3.25.76) '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 - '@ai-sdk/deepseek@1.0.15(zod@3.25.76)': + '@ai-sdk/fireworks@1.0.23(zod@3.25.76)': dependencies: - '@ai-sdk/openai-compatible': 1.0.15(zod@3.25.76) + '@ai-sdk/openai-compatible': 1.0.22(zod@3.25.76) '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 - '@ai-sdk/gateway@1.0.21(zod@3.25.76)': + '@ai-sdk/gateway@2.0.1(zod@3.25.76)': dependencies: '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) + '@vercel/oidc': 3.0.3 zod: 3.25.76 - '@ai-sdk/google-vertex@3.0.25(encoding@0.1.13)(zod@3.25.76)': + '@ai-sdk/google-vertex@3.0.53(encoding@0.1.13)(zod@3.25.76)': dependencies: - '@ai-sdk/anthropic': 2.0.15(zod@3.25.76) - '@ai-sdk/google': 2.0.13(zod@3.25.76) + '@ai-sdk/anthropic': 2.0.37(zod@3.25.76) + '@ai-sdk/google': 2.0.23(zod@3.25.76) '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) google-auth-library: 9.15.1(encoding@0.1.13) zod: 3.25.76 transitivePeerDependencies: - encoding - supports-color - '@ai-sdk/google@2.0.13(zod@3.25.76)': + '@ai-sdk/google@2.0.23(zod@3.25.76)': dependencies: '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 - '@ai-sdk/groq@2.0.18(zod@3.25.76)': + '@ai-sdk/groq@2.0.24(zod@3.25.76)': dependencies: '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 - '@ai-sdk/openai-compatible@1.0.15(zod@3.25.76)': + '@ai-sdk/openai-compatible@1.0.22(zod@3.25.76)': dependencies: '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 - '@ai-sdk/openai@2.0.28(zod@3.25.76)': + '@ai-sdk/openai@2.0.53(zod@3.25.76)': dependencies: '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 - '@ai-sdk/perplexity@2.0.8(zod@3.25.76)': + '@ai-sdk/perplexity@2.0.13(zod@3.25.76)': dependencies: '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 - '@ai-sdk/provider-utils@3.0.8(zod@3.25.76)': + '@ai-sdk/provider-utils@3.0.12(zod@3.25.76)': dependencies: '@ai-sdk/provider': 2.0.0 '@standard-schema/spec': 1.0.0 @@ -8480,18 +8501,18 @@ snapshots: dependencies: json-schema: 0.4.0 - '@ai-sdk/togetherai@1.0.15(zod@3.25.76)': + '@ai-sdk/togetherai@1.0.23(zod@3.25.76)': dependencies: - '@ai-sdk/openai-compatible': 1.0.15(zod@3.25.76) + '@ai-sdk/openai-compatible': 1.0.22(zod@3.25.76) '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 - '@ai-sdk/xai@2.0.16(zod@3.25.76)': + '@ai-sdk/xai@2.0.27(zod@3.25.76)': dependencies: - '@ai-sdk/openai-compatible': 1.0.15(zod@3.25.76) + '@ai-sdk/openai-compatible': 1.0.22(zod@3.25.76) '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 '@apidevtools/json-schema-ref-parser@9.1.2': @@ -9975,9 +9996,9 @@ snapshots: dependencies: '@octokit/openapi-types': 12.11.0 - '@openrouter/ai-sdk-provider@1.1.2(ai@5.0.41(zod@3.25.76))(zod@3.25.76)': + '@openrouter/ai-sdk-provider@1.1.2(ai@5.0.78(zod@3.25.76))(zod@3.25.76)': dependencies: - ai: 5.0.41(zod@3.25.76) + ai: 5.0.78(zod@3.25.76) zod: 3.25.76 '@opentelemetry/api-logs@0.57.2': @@ -11502,6 +11523,8 @@ snapshots: '@ungap/structured-clone@1.3.0': {} + '@vercel/oidc@3.0.3': {} + '@vladfrangu/async_event_emitter@2.4.6': {} '@vue/compiler-core@3.5.18': @@ -11584,11 +11607,11 @@ snapshots: clean-stack: 2.2.0 indent-string: 4.0.0 - ai@5.0.41(zod@3.25.76): + ai@5.0.78(zod@3.25.76): dependencies: - '@ai-sdk/gateway': 1.0.21(zod@3.25.76) + '@ai-sdk/gateway': 2.0.1(zod@3.25.76) '@ai-sdk/provider': 2.0.0 - '@ai-sdk/provider-utils': 3.0.8(zod@3.25.76) + '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) '@opentelemetry/api': 1.9.0 zod: 3.25.76 @@ -12098,12 +12121,12 @@ snapshots: chownr@3.0.0: optional: true - chromadb@1.10.5(encoding@0.1.13)(openai@5.20.1(ws@8.18.3)(zod@3.25.76)): + chromadb@1.10.5(encoding@0.1.13)(openai@6.6.0(ws@8.18.3)(zod@3.25.76)): dependencies: cliui: 8.0.1 isomorphic-fetch: 3.0.0(encoding@0.1.13) optionalDependencies: - openai: 5.20.1(ws@8.18.3)(zod@3.25.76) + openai: 6.6.0(ws@8.18.3)(zod@3.25.76) transitivePeerDependencies: - encoding @@ -15587,7 +15610,7 @@ snapshots: dependencies: is-wsl: 1.1.0 - openai@5.20.1(ws@8.18.3)(zod@3.25.76): + openai@6.6.0(ws@8.18.3)(zod@3.25.76): optionalDependencies: ws: 8.18.3 zod: 3.25.76 diff --git a/shared/errors.ts b/shared/errors.ts index e2db9b8a..9f2e0495 100644 --- a/shared/errors.ts +++ b/shared/errors.ts @@ -40,3 +40,11 @@ export class InvalidRequest extends Error { this.name = 'InvalidRequest'; } } + +export class AccountBillingError extends Error { + code = 'ACCOUNT_BILLING_ERROR'; + constructor(message: string) { + super(message); + this.name = 'AccountBillingError'; + } +} diff --git a/src/agent/autonomous/codegen/pythonCodeGenUtils.test.ts b/src/agent/autonomous/codegen/pythonCodeGenUtils.test.ts index f394d53a..c7dcddb5 100644 --- a/src/agent/autonomous/codegen/pythonCodeGenUtils.test.ts +++ b/src/agent/autonomous/codegen/pythonCodeGenUtils.test.ts @@ -1,5 +1,12 @@ import { expect } from 'chai'; -import { camelToSnake, processFunctionArguments } from './pythonCodeGenUtils'; +import type { TypeDefinition } from '#functionSchema/typeDefinition'; +import { + camelToSnake, + convertInterfaceToTypedDict, + convertTypeScriptTypeToPython, + generateTypeDefinitionsSection, + processFunctionArguments, +} from './pythonCodeGenUtils'; describe('PythonCodeGenUtils', () => { describe('camelToSnake', () => { @@ -29,4 +36,213 @@ describe('PythonCodeGenUtils', () => { expect(parameters).to.deep.equal({ filePath: 'a.txt', content: 'abc' }); }); }); + + describe('convertTypeScriptTypeToPython', () => { + it('should convert basic types', () => { + expect(convertTypeScriptTypeToPython('string')).to.equal('str'); + expect(convertTypeScriptTypeToPython('number')).to.equal('int'); + expect(convertTypeScriptTypeToPython('boolean')).to.equal('bool'); + expect(convertTypeScriptTypeToPython('any')).to.equal('Any'); + expect(convertTypeScriptTypeToPython('void')).to.equal('None'); + expect(convertTypeScriptTypeToPython('null')).to.equal('None'); + expect(convertTypeScriptTypeToPython('undefined')).to.equal('None'); + }); + + it('should convert array types', () => { + expect(convertTypeScriptTypeToPython('string[]')).to.equal('list[str]'); + expect(convertTypeScriptTypeToPython('number[]')).to.equal('list[int]'); + expect(convertTypeScriptTypeToPython('Array')).to.equal('list[str]'); + expect(convertTypeScriptTypeToPython('Array')).to.equal('list[int]'); + }); + + it('should convert Record types', () => { + expect(convertTypeScriptTypeToPython('Record')).to.equal('dict[str, Any]'); + expect(convertTypeScriptTypeToPython('Record')).to.equal('dict[str, int]'); + }); + + it('should convert union types with null to Optional', () => { + expect(convertTypeScriptTypeToPython('string | null')).to.equal('Optional[str]'); + expect(convertTypeScriptTypeToPython('number | null')).to.equal('Optional[int]'); + expect(convertTypeScriptTypeToPython('string | undefined')).to.equal('Optional[str]'); + }); + + it('should preserve other union types', () => { + expect(convertTypeScriptTypeToPython('string | number')).to.equal('str | int'); + }); + + it('should keep custom types unchanged', () => { + expect(convertTypeScriptTypeToPython('GitProject')).to.equal('GitProject'); + expect(convertTypeScriptTypeToPython('GitProject[]')).to.equal('list[GitProject]'); + }); + }); + + describe('convertInterfaceToTypedDict', () => { + it('should convert a simple interface to TypedDict', () => { + const typeDef: TypeDefinition = { + name: 'SimpleProject', + description: 'A simple project', + properties: [ + { name: 'id', type: 'number', optional: false, description: 'Project ID' }, + { name: 'name', type: 'string', optional: false, description: 'Project name' }, + ], + }; + + const result = convertInterfaceToTypedDict(typeDef); + expect(result).to.include('class SimpleProject(TypedDict):'); + expect(result).to.include('"""A simple project"""'); + expect(result).to.include('id: int'); + expect(result).to.include('"""Project ID"""'); + expect(result).to.include('name: str'); + expect(result).to.include('"""Project name"""'); + }); + + it('should handle optional properties with total=False', () => { + const typeDef: TypeDefinition = { + name: 'Project', + properties: [ + { name: 'id', type: 'number', optional: false }, + { name: 'description', type: 'string', optional: true }, + ], + }; + + const result = convertInterfaceToTypedDict(typeDef); + expect(result).to.include('class Project(TypedDict, total=False):'); + expect(result).to.include('id: Required[int]'); + expect(result).to.include('description: str'); + }); + + it('should handle nullable types with Optional', () => { + const typeDef: TypeDefinition = { + name: 'Project', + properties: [{ name: 'description', type: 'string | null', optional: false }], + }; + + const result = convertInterfaceToTypedDict(typeDef); + expect(result).to.include('description: Optional[str]'); + }); + + it('should handle array types', () => { + const typeDef: TypeDefinition = { + name: 'Project', + properties: [{ name: 'tags', type: 'string[]', optional: true }], + }; + + const result = convertInterfaceToTypedDict(typeDef); + expect(result).to.include('tags: list[str]'); + }); + + it('should convert camelCase property names to snake_case', () => { + const typeDef: TypeDefinition = { + name: 'Project', + properties: [ + { name: 'fullPath', type: 'string', optional: false }, + { name: 'defaultBranch', type: 'string', optional: false }, + ], + }; + + const result = convertInterfaceToTypedDict(typeDef); + expect(result).to.include('full_path: str'); + expect(result).to.include('default_branch: str'); + }); + + it('should preserve property descriptions as inline comments', () => { + const typeDef: TypeDefinition = { + name: 'Project', + properties: [{ name: 'name', type: 'string', optional: false, description: 'The project name' }], + }; + + const result = convertInterfaceToTypedDict(typeDef); + expect(result).to.include('"""The project name"""'); + }); + }); + + describe('generateTypeDefinitionsSection', () => { + it('should generate empty string when no type definitions', () => { + const result = generateTypeDefinitionsSection([]); + expect(result).to.equal(''); + }); + + it('should generate a single TypedDict', () => { + const typeDefs: TypeDefinition[] = [ + { + name: 'SimpleProject', + description: 'A simple project', + properties: [ + { name: 'id', type: 'number', optional: false }, + { name: 'name', type: 'string', optional: false }, + ], + }, + ]; + + const result = generateTypeDefinitionsSection(typeDefs); + expect(result).to.include('class SimpleProject(TypedDict):'); + expect(result).to.include('"""A simple project"""'); + expect(result).to.include('id: int'); + expect(result).to.include('name: str'); + }); + + it('should generate multiple TypedDicts in dependency order', () => { + const typeDefs: TypeDefinition[] = [ + { + name: 'Address', + properties: [{ name: 'street', type: 'string', optional: false }], + }, + { + name: 'Person', + properties: [ + { name: 'name', type: 'string', optional: false }, + { name: 'address', type: 'Address', optional: false }, + ], + dependencies: ['Address'], + }, + ]; + + const result = generateTypeDefinitionsSection(typeDefs); + + // Address should appear before Person + const addressIndex = result.indexOf('class Address'); + const personIndex = result.indexOf('class Person'); + expect(addressIndex).to.be.lessThan(personIndex); + expect(result).to.include('address: Address'); + }); + + it('should handle duplicate type definitions', () => { + const typeDefs: TypeDefinition[] = [ + { + name: 'Project', + properties: [{ name: 'id', type: 'number', optional: false }], + }, + { + name: 'Project', + properties: [{ name: 'id', type: 'number', optional: false }], + }, + ]; + + const result = generateTypeDefinitionsSection(typeDefs); + + // Should only generate the type once + const matches = result.match(/class Project/g); + expect(matches).to.have.lengthOf(1); + }); + + it('should add proper spacing between TypedDict definitions', () => { + const typeDefs: TypeDefinition[] = [ + { + name: 'TypeA', + properties: [{ name: 'a', type: 'string', optional: false }], + }, + { + name: 'TypeB', + properties: [{ name: 'b', type: 'string', optional: false }], + }, + ]; + + const result = generateTypeDefinitionsSection(typeDefs); + + // Should have blank lines between definitions + expect(result).to.include('class TypeA'); + expect(result).to.include('class TypeB'); + expect(result.split('\n\n').length).to.be.greaterThan(1); + }); + }); }); diff --git a/src/agent/autonomous/codegen/pythonCodeGenUtils.ts b/src/agent/autonomous/codegen/pythonCodeGenUtils.ts index 4ae47053..d05fd7f6 100644 --- a/src/agent/autonomous/codegen/pythonCodeGenUtils.ts +++ b/src/agent/autonomous/codegen/pythonCodeGenUtils.ts @@ -1,5 +1,6 @@ import { extractLastXmlTagContent } from '#agent/autonomous/codegen/codegenAutonomousAgentUtils'; import type { FunctionParameter, FunctionSchema } from '#functionSchema/functions'; +import type { TypeDefinition } from '#functionSchema/typeDefinition'; import { logger } from '#o11y/logger'; /** Packages that the agent generated code is allowed to use */ @@ -291,3 +292,136 @@ export function extractPythonCode(llmResponse: string): string { export function extractDraftPythonCode(llmResponse: string): string { return extractLastXmlTagContent(llmResponse, 'draft-python-code'); } + +/** + * Converts a TypeScript type to a Python type annotation for use in TypedDict. + * Uses modern Python syntax (list, dict instead of List, Dict). + * @param tsType The TypeScript type string + * @returns The Python type annotation + */ +export function convertTypeScriptTypeToPython(tsType: string): string { + const trimmed = tsType.trim(); + + // Handle basic types + const basicTypes: Record = { + string: 'str', + number: 'int', + boolean: 'bool', + any: 'Any', + void: 'None', + null: 'None', + undefined: 'None', + }; + + if (basicTypes[trimmed]) { + return basicTypes[trimmed]; + } + + // Handle array types (both T[] and Array syntax) + if (trimmed.endsWith('[]')) { + const elementType = trimmed.slice(0, -2); + return `list[${convertTypeScriptTypeToPython(elementType)}]`; + } + + const arrayMatch = trimmed.match(/^Array<(.+)>$/); + if (arrayMatch) { + return `list[${convertTypeScriptTypeToPython(arrayMatch[1])}]`; + } + + // Handle Record + const recordMatch = trimmed.match(/^Record$/); + if (recordMatch) { + return `dict[str, ${convertTypeScriptTypeToPython(recordMatch[1])}]`; + } + + // Handle union types + if (trimmed.includes('|')) { + const parts = trimmed.split('|').map((p) => p.trim()); + + // Special case: T | null or T | undefined => Optional[T] + if (parts.includes('null') || parts.includes('undefined')) { + const nonNullParts = parts.filter((p) => p !== 'null' && p !== 'undefined'); + if (nonNullParts.length === 1) { + return `Optional[${convertTypeScriptTypeToPython(nonNullParts[0])}]`; + } + } + + // Other unions: convert each part and join with | + return parts.map((p) => convertTypeScriptTypeToPython(p)).join(' | '); + } + + // Keep custom types (interfaces) as-is + return trimmed; +} + +/** + * Converts a TypeDefinition to a Python TypedDict class declaration + * @param typeDef The interface definition to convert + * @returns Python TypedDict class code + */ +export function convertInterfaceToTypedDict(typeDef: TypeDefinition): string { + const lines: string[] = []; + + // Determine if we need total=False (when we have mix of required and optional) + const hasOptional = typeDef.properties.some((p) => p.optional); + const hasRequired = typeDef.properties.some((p) => !p.optional); + const useTotalFalse = hasOptional && hasRequired; + + // Class declaration + if (useTotalFalse) { + lines.push(`class ${typeDef.name}(TypedDict, total=False):`); + } else { + lines.push(`class ${typeDef.name}(TypedDict):`); + } + + // Class docstring + if (typeDef.description) { + lines.push(` """${typeDef.description}"""`); + } + + // Properties + for (const prop of typeDef.properties) { + const pythonName = camelToSnake(prop.name); + let pythonType = convertTypeScriptTypeToPython(prop.type); + + // If using total=False, wrap required fields with Required[] + if (useTotalFalse && !prop.optional) { + pythonType = `Required[${pythonType}]`; + } + + // Add the property + lines.push(` ${pythonName}: ${pythonType}`); + + // Add property description as inline docstring + if (prop.description) { + lines.push(` """${prop.description}"""`); + } + } + + return lines.join('\n'); +} + +/** + * Generates the type definitions section for the Python script + * @param typeDefinitions Array of type definitions to generate + * @returns Python code with all TypedDict definitions, or empty string if no definitions + */ +export function generateTypeDefinitionsSection(typeDefinitions: TypeDefinition[]): string { + if (!typeDefinitions || typeDefinitions.length === 0) { + return ''; + } + + // Remove duplicates based on type name + const uniqueTypes = new Map(); + for (const typeDef of typeDefinitions) { + if (!uniqueTypes.has(typeDef.name)) { + uniqueTypes.set(typeDef.name, typeDef); + } + } + + // Generate TypedDict for each unique type + const typeDeclarations = Array.from(uniqueTypes.values()).map((typeDef) => convertInterfaceToTypedDict(typeDef)); + + // Join with double newlines for spacing + return typeDeclarations.join('\n\n'); +} diff --git a/src/cli/agent.ts b/src/cli/agent.ts index 11518417..cb5d6fcb 100644 --- a/src/cli/agent.ts +++ b/src/cli/agent.ts @@ -14,9 +14,11 @@ import { logger } from '#o11y/logger'; import type { AgentContext } from '#shared/agent/agent.model'; import { registerErrorHandlers } from '../errorHandlers'; import { parseProcessArgs, saveAgentId } from './cli'; +import { loadCliEnvironment } from './envLoader'; import { resolveFunctionClasses } from './functionAliases'; export async function main(): Promise { + loadCliEnvironment(); registerErrorHandlers(); await initApplicationContext(); const llms = defaultLLMs(); diff --git a/src/cli/chat.ts b/src/cli/chat.ts index f287f1ad..30a406b5 100644 --- a/src/cli/chat.ts +++ b/src/cli/chat.ts @@ -9,9 +9,11 @@ import { getMarkdownFormatPrompt } from '#routes/chat/chatPromptUtils'; import { LLM, LlmMessage, UserContentExt, contentText, messageText, user } from '#shared/llm/llm.model'; import { currentUser } from '#user/userContext'; import { parseProcessArgs, saveAgentId } from './cli'; +import { loadCliEnvironment } from './envLoader'; import { LLM_CLI_ALIAS } from './llmAliases'; async function main() { + loadCliEnvironment(); await initApplicationContext(); const { initialPrompt: rawPrompt, resumeAgentId, flags } = parseProcessArgs(); diff --git a/src/cli/code.ts b/src/cli/code.ts index 7bdc1ede..8adf1058 100644 --- a/src/cli/code.ts +++ b/src/cli/code.ts @@ -11,9 +11,11 @@ import { contentText, messageText } from '#shared/llm/llm.model'; import { CodeEditingAgent } from '#swe/codeEditingAgent'; import { beep } from '#utils/beep'; import { parseProcessArgs, saveAgentId } from './cli'; +import { loadCliEnvironment } from './envLoader'; import { parsePromptWithImages } from './promptParser'; async function main() { + loadCliEnvironment(); await initApplicationContext(); const agentLlms: AgentLLMs = defaultLLMs(); diff --git a/src/cli/codeAgent.ts b/src/cli/codeAgent.ts index bd14a7a8..ba345941 100644 --- a/src/cli/codeAgent.ts +++ b/src/cli/codeAgent.ts @@ -20,6 +20,7 @@ import { CodeEditingAgent } from '#swe/codeEditingAgent'; import { CodeFunctions } from '#swe/codeFunctions'; import { registerErrorHandlers } from '../errorHandlers'; import { parseProcessArgs, saveAgentId } from './cli'; +import { loadCliEnvironment } from './envLoader'; import { resolveFunctionClasses } from './functionAliases'; async function resumeAgent(resumeAgentId: string, initialPrompt: string) { @@ -42,6 +43,7 @@ async function resumeAgent(resumeAgentId: string, initialPrompt: string) { } export async function main(): Promise { + loadCliEnvironment(); registerErrorHandlers(); await initApplicationContext(); const llms = defaultLLMs(); diff --git a/src/cli/commit.ts b/src/cli/commit.ts index dfe3a5d6..5399ce34 100644 --- a/src/cli/commit.ts +++ b/src/cli/commit.ts @@ -5,8 +5,10 @@ import { shutdownTrace } from '#fastify/trace-init/trace-init'; import { Git } from '#functions/scm/git'; import { FileSystemRead } from '#functions/storage/fileSystemRead'; import { defaultLLMs } from '#llm/services/defaultLlms'; +import { loadCliEnvironment } from './envLoader'; async function main() { + loadCliEnvironment(); await initApplicationContext(); console.log('Commit command starting...'); diff --git a/src/cli/debate.ts b/src/cli/debate.ts index c025b2cc..ae1c6045 100644 --- a/src/cli/debate.ts +++ b/src/cli/debate.ts @@ -12,9 +12,11 @@ import { logger } from '#o11y/logger'; import type { AgentLLMs } from '#shared/agent/agent.model'; import { messageText } from '#shared/llm/llm.model'; import { parseProcessArgs } from './cli'; +import { loadCliEnvironment } from './envLoader'; import { parsePromptWithImages } from './promptParser'; async function main() { + loadCliEnvironment(); await initApplicationContext(); const agentLLMs: AgentLLMs = defaultLLMs(); const { initialPrompt: rawPrompt, resumeAgentId, flags } = parseProcessArgs(); diff --git a/src/cli/detect.ts b/src/cli/detect.ts index 6c9310c4..c92acc10 100644 --- a/src/cli/detect.ts +++ b/src/cli/detect.ts @@ -8,8 +8,10 @@ import { shutdownTrace } from '#fastify/trace-init/trace-init'; import { defaultLLMs } from '#llm/services/defaultLlms'; import type { AgentLLMs } from '#shared/agent/agent.model'; import { getProjectInfo } from '#swe/projectDetection'; +import { loadCliEnvironment } from './envLoader'; async function main() { + loadCliEnvironment(); await initApplicationContext(); const agentLLMs: AgentLLMs = defaultLLMs(); diff --git a/src/cli/easy.ts b/src/cli/easy.ts index dfef03d8..4426032b 100644 --- a/src/cli/easy.ts +++ b/src/cli/easy.ts @@ -9,12 +9,14 @@ import { mockLLMs } from '#llm/services/mock-llm'; import { vertexGemini_2_5_Flash } from '#llm/services/vertexai'; import type { AgentContext } from '#shared/agent/agent.model'; import { parseProcessArgs } from './cli'; +import { loadCliEnvironment } from './envLoader'; // See https://arxiv.org/html/2405.19616v1 https://github.com/autogenai/easy-problems-that-llms-get-wrong // Usage: // npm run easy async function main() { + loadCliEnvironment(); await initApplicationContext(); const context: AgentContext = createContext({ diff --git a/src/cli/envLoader.ts b/src/cli/envLoader.ts new file mode 100644 index 00000000..822a7577 --- /dev/null +++ b/src/cli/envLoader.ts @@ -0,0 +1,132 @@ +/** + * @fileoverview + * Utility for loading environment variables from .env files in CLI tools. + * When using git worktrees enables using the local.env from the main repository + * Extracted from startLocal.ts to be shared across all CLI tools. + */ + +import { existsSync, readFileSync } from 'node:fs'; +import { isAbsolute, resolve } from 'node:path'; +import { logger } from '#o11y/logger'; + +interface ResolveEnvFileOptions { + envFile?: string | null; + cwd?: string; + typedAiHome?: string | null; +} + +interface ApplyEnvOptions { + override?: boolean; +} + +type ParsedEnv = Record; + +/** + * Builds an absolute path from a potential relative path. + * @param value The path value (can be null or undefined). + * @param cwd The current working directory to resolve from. + * @returns An absolute path, or null if the input value is empty. + */ +function buildCandidatePath(value: string | null | undefined, cwd: string): string | null { + if (!value) return null; + if (isAbsolute(value)) return value; + return resolve(cwd, value); +} + +/** + * Resolves the path to the env file used for local development. + * Resolution order: + * 1. Explicit `ENV_FILE` environment variable. + * 2. `variables/local.env` relative to the current working directory. + * 3. `variables/local.env` inside the directory specified by `TYPEDAI_HOME`. + * @throws If no environment file can be found in any of the candidate locations. + */ +export function resolveEnvFilePath(options: ResolveEnvFileOptions = {}): string { + const cwd = options.cwd ?? process.cwd(); + const envFileCandidate = buildCandidatePath(options.envFile ?? process.env.ENV_FILE, cwd); + const localEnvCandidate = resolve(cwd, 'variables', 'local.env'); + const typedAiHomeCandidate = options.typedAiHome ?? process.env.TYPEDAI_HOME; + const typedAiEnvCandidate = typedAiHomeCandidate ? resolve(typedAiHomeCandidate, 'variables', 'local.env') : null; + + const candidates = [envFileCandidate, localEnvCandidate, typedAiEnvCandidate]; + for (const candidate of candidates) { + if (!candidate) continue; + if (existsSync(candidate)) return candidate; + } + + throw new Error( + 'Could not locate environment file. Set ENV_FILE, create variables/local.env, or ensure TYPEDAI_HOME points to a repository that contains variables/local.env.', + ); +} + +/** + * Parses a dotenv-style file into a plain key/value map. + * - Ignores lines starting with `#` (comments). + * - Ignores lines without an equals sign. + * - Trims whitespace from keys and values. + * - Strips `export ` prefix from keys. + * - Removes quotes from values. + * - Converts `\n` literals to newlines. + * @param filePath The absolute path to the environment file. + * @returns A record of environment variables. + */ +export function loadEnvFile(filePath: string): ParsedEnv { + if (!existsSync(filePath)) throw new Error(`Environment file not found at ${filePath}`); + const contents = readFileSync(filePath, 'utf8'); + const lines = contents.split(/\r?\n/); + const parsed: ParsedEnv = {}; + + for (const rawLine of lines) { + const line = rawLine.trim(); + if (!line || line.startsWith('#')) continue; + const equalIndex = line.indexOf('='); + if (equalIndex <= 0) continue; + + const key = line + .substring(0, equalIndex) + .trim() + .replace(/^export\s+/, ''); + if (!key) continue; + let value = line.substring(equalIndex + 1).trim(); + + if ((value.startsWith('"') && value.endsWith('"')) || (value.startsWith("'") && value.endsWith("'"))) { + value = value.slice(1, -1); + } + value = value.replace(/\\n/g, '\n'); + parsed[key] = value; + } + + return parsed; +} + +/** + * Loads an environment file and assigns its values to `process.env`. + * By default, it does not override existing environment variables. + * @param filePath The path to the environment file. + * @param options Configuration options. `override: true` will cause it to + * overwrite existing `process.env` values. + */ +export function applyEnvFile(filePath: string, options: ApplyEnvOptions = {}): void { + const envVars = loadEnvFile(filePath); + const override = options.override ?? false; + + for (const [key, value] of Object.entries(envVars)) { + if (!override && process.env[key] !== undefined) continue; + process.env[key] = value; + } +} + +/** + * Convenience function to load environment variables from a .env file for CLI tools. + * Tries to find and load the environment file, but continues gracefully if not found. + * @param options Configuration options for override behavior + */ +export function loadCliEnvironment(options: ApplyEnvOptions = {}): void { + try { + const envFilePath = resolveEnvFilePath(); + applyEnvFile(envFilePath, options); + logger.debug(`Loaded environment from ${envFilePath}`); + } catch (err) { + logger.debug(err, 'No environment file found; continuing with existing process.env'); + } +} diff --git a/src/cli/export.ts b/src/cli/export.ts index 5806993c..2d1c7567 100644 --- a/src/cli/export.ts +++ b/src/cli/export.ts @@ -7,6 +7,7 @@ import micromatch from 'micromatch'; import { FileSystemService } from '#functions/storage/fileSystemService'; import { countTokens } from '#llm/tokens'; import { logger } from '#o11y/logger'; +import { loadCliEnvironment } from './envLoader'; /** * If there are no arguments then only write the exported contents to the console @@ -14,6 +15,7 @@ import { logger } from '#o11y/logger'; * If there is the -f arg write it to a file. Default to export.xml. If a value is provided, e.g. -f=export2.xml then write to export2.xml */ async function main() { + loadCliEnvironment(); const fileSystemService = new FileSystemService(); const basePath = fileSystemService.getBasePath(); diff --git a/src/cli/files.ts b/src/cli/files.ts index 64d97208..53503558 100644 --- a/src/cli/files.ts +++ b/src/cli/files.ts @@ -11,8 +11,10 @@ import type { AgentLLMs } from '#shared/agent/agent.model'; import { fastSelectFilesAgent } from '#swe/discovery/fastSelectFilesAgent'; import { selectFilesAgent } from '#swe/discovery/selectFilesAgentWithSearch'; import { parseProcessArgs } from './cli'; +import { loadCliEnvironment } from './envLoader'; async function main() { + loadCliEnvironment(); await initApplicationContext(); const agentLLMs: AgentLLMs = defaultLLMs(); diff --git a/src/cli/gaia.ts b/src/cli/gaia.ts index f0141400..4c2d94a5 100644 --- a/src/cli/gaia.ts +++ b/src/cli/gaia.ts @@ -14,6 +14,7 @@ import type { AgentLLMs } from '#shared/agent/agent.model'; import { lastText } from '#shared/llm/llm.model'; import type { LlmCall } from '#shared/llmCall/llmCall.model'; import { sleep } from '#utils/async-utils'; +import { loadCliEnvironment } from './envLoader'; const SYSTEM_PROMPT = `Finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.`; @@ -124,6 +125,7 @@ async function answerGaiaQuestion(task: GaiaQuestion): Promise { } async function main() { + loadCliEnvironment(); await initApplicationContext(); const llms = defaultLLMs(); diff --git a/src/cli/gen.ts b/src/cli/gen.ts index 75a56da9..f14e42db 100644 --- a/src/cli/gen.ts +++ b/src/cli/gen.ts @@ -8,6 +8,7 @@ import { countTokens } from '#llm/tokens'; import { LLM, LlmMessage, ThinkingLevel, messageSources, messageText, system, user } from '#shared/llm/llm.model'; import { beep } from '#utils/beep'; import { parseProcessArgs } from './cli'; +import { loadCliEnvironment } from './envLoader'; import { LLM_CLI_ALIAS } from './llmAliases'; import { parsePromptWithImages } from './promptParser'; import { terminalLog } from './terminal'; @@ -16,6 +17,7 @@ import { terminalLog } from './terminal'; // ai gen -s="system prompt" 'input prompt' async function main() { + loadCliEnvironment(); const { initialPrompt: rawPrompt, llmId, flags } = parseProcessArgs(); const { textPrompt, userContent } = await parsePromptWithImages(rawPrompt); diff --git a/src/cli/index.ts b/src/cli/index.ts index fd355724..89dab1cd 100644 --- a/src/cli/index.ts +++ b/src/cli/index.ts @@ -11,8 +11,10 @@ import { buildIndexDocs } from '#swe/index/repoIndexDocBuilder'; import { generateRepositoryMaps } from '#swe/index/repositoryMap'; import { getProjectInfos } from '#swe/projectDetection'; import { parseProcessArgs, saveAgentId } from './cli'; +import { loadCliEnvironment } from './envLoader'; async function main() { + loadCliEnvironment(); await initApplicationContext(); const agentLlms: AgentLLMs = defaultLLMs(); diff --git a/src/cli/morph.ts b/src/cli/morph.ts index f05580f0..bac813ab 100644 --- a/src/cli/morph.ts +++ b/src/cli/morph.ts @@ -20,6 +20,7 @@ import { CodeFunctions } from '#swe/codeFunctions'; import { MorphCodeAgent } from '#swe/morph/morphCoder'; import { registerErrorHandlers } from '../errorHandlers'; import { parseProcessArgs, saveAgentId } from './cli'; +import { loadCliEnvironment } from './envLoader'; import { resolveFunctionClasses } from './functionAliases'; async function resumeAgent(resumeAgentId: string, initialPrompt: string) { @@ -42,6 +43,7 @@ async function resumeAgent(resumeAgentId: string, initialPrompt: string) { } export async function main(): Promise { + loadCliEnvironment(); registerErrorHandlers(); await initApplicationContext(); const llms = defaultLLMs(); diff --git a/src/cli/query.ts b/src/cli/query.ts index 5d43691b..1dd76e02 100644 --- a/src/cli/query.ts +++ b/src/cli/query.ts @@ -11,9 +11,11 @@ import { logger } from '#o11y/logger'; import type { AgentLLMs } from '#shared/agent/agent.model'; import { queryWithFileSelection2 } from '#swe/discovery/selectFilesAgentWithSearch'; import { parseProcessArgs, saveAgentId } from './cli'; +import { loadCliEnvironment } from './envLoader'; import { parsePromptWithImages } from './promptParser'; async function main() { + loadCliEnvironment(); await initApplicationContext(); const agentLLMs: AgentLLMs = defaultLLMs(); const { initialPrompt: rawPrompt, resumeAgentId, flags } = parseProcessArgs(); diff --git a/src/cli/research.ts b/src/cli/research.ts index 90bd5ee6..39a061c5 100644 --- a/src/cli/research.ts +++ b/src/cli/research.ts @@ -8,6 +8,7 @@ import { PublicWeb } from '#functions/web/web'; import { defaultLLMs } from '#llm/services/defaultLlms'; import type { AgentLLMs } from '#shared/agent/agent.model'; import { parseProcessArgs, saveAgentId } from './cli'; +import { loadCliEnvironment } from './envLoader'; // Usage: // npm run research @@ -15,6 +16,7 @@ import { parseProcessArgs, saveAgentId } from './cli'; const llms: AgentLLMs = defaultLLMs(); export async function main(): Promise { + loadCliEnvironment(); const systemPrompt = readFileSync('src/cli/research-system', 'utf-8'); const { initialPrompt, resumeAgentId } = parseProcessArgs(); diff --git a/src/cli/review.ts b/src/cli/review.ts index 7397fe9a..b7effa0f 100644 --- a/src/cli/review.ts +++ b/src/cli/review.ts @@ -10,8 +10,10 @@ import type { AgentLLMs } from '#shared/agent/agent.model'; import { performLocalBranchCodeReview } from '#swe/codeReview/local/localCodeReview'; import { beep } from '#utils/beep'; import { parseProcessArgs } from './cli'; +import { loadCliEnvironment } from './envLoader'; async function main() { + loadCliEnvironment(); await initApplicationContext(); const agentLlms: AgentLLMs = defaultLLMs(); diff --git a/src/cli/slack.ts b/src/cli/slack.ts index e0deff55..5dae3a3f 100644 --- a/src/cli/slack.ts +++ b/src/cli/slack.ts @@ -1,7 +1,9 @@ import { initApplicationContext } from '#app/applicationContext'; import { sleep } from '#utils/async-utils'; +import { loadCliEnvironment } from './envLoader'; async function main() { + loadCliEnvironment(); await initApplicationContext(); const { SlackChatBotService } = await import('../modules/slack/slackModule.cjs'); const chatbot = new SlackChatBotService(); diff --git a/src/cli/startLocal.ts b/src/cli/startLocal.ts index a2b60d58..743faa7a 100644 --- a/src/cli/startLocal.ts +++ b/src/cli/startLocal.ts @@ -14,24 +14,13 @@ */ import '#fastify/trace-init/trace-init'; -import { existsSync, mkdirSync, readFileSync, writeFileSync } from 'node:fs'; +import { existsSync, mkdirSync, writeFileSync } from 'node:fs'; import { open } from 'node:inspector'; import { createRequire } from 'node:module'; import { type Server as NetServer, createServer } from 'node:net'; -import path, { isAbsolute, resolve } from 'node:path'; +import path from 'node:path'; import { logger } from '#o11y/logger'; - -interface ResolveEnvFileOptions { - envFile?: string | null; - cwd?: string; - typedAiHome?: string | null; -} - -interface ApplyEnvOptions { - override?: boolean; -} - -type ParsedEnv = Record; +import { applyEnvFile, resolveEnvFilePath } from './envLoader'; type ServerFactory = () => NetServer; @@ -142,101 +131,6 @@ main().catch((error) => { process.exitCode = 1; }); -/** - * Builds an absolute path from a potential relative path. - * @param value The path value (can be null or undefined). - * @param cwd The current working directory to resolve from. - * @returns An absolute path, or null if the input value is empty. - */ -function buildCandidatePath(value: string | null | undefined, cwd: string): string | null { - if (!value) return null; - if (isAbsolute(value)) return value; - return resolve(cwd, value); -} - -/** - * Resolves the path to the env file used for local development. - * Resolution order: - * 1. Explicit `ENV_FILE` environment variable. - * 2. `variables/local.env` relative to the current working directory. - * 3. `variables/local.env` inside the directory specified by `TYPEDAI_HOME`. - * @throws If no environment file can be found in any of the candidate locations. - */ -function resolveEnvFilePath(options: ResolveEnvFileOptions = {}): string { - const cwd = options.cwd ?? process.cwd(); - const envFileCandidate = buildCandidatePath(options.envFile ?? process.env.ENV_FILE, cwd); - const localEnvCandidate = resolve(cwd, 'variables', 'local.env'); - const typedAiHomeCandidate = options.typedAiHome ?? process.env.TYPEDAI_HOME; - const typedAiEnvCandidate = typedAiHomeCandidate ? resolve(typedAiHomeCandidate, 'variables', 'local.env') : null; - - const candidates = [envFileCandidate, localEnvCandidate, typedAiEnvCandidate]; - for (const candidate of candidates) { - if (!candidate) continue; - if (existsSync(candidate)) return candidate; - } - - throw new Error( - 'Could not locate environment file. Set ENV_FILE, create variables/local.env, or ensure TYPEDAI_HOME points to a repository that contains variables/local.env.', - ); -} - -/** - * Parses a dotenv-style file into a plain key/value map. - * - Ignores lines starting with `#` (comments). - * - Ignores lines without an equals sign. - * - Trims whitespace from keys and values. - * - Strips `export ` prefix from keys. - * - Removes quotes from values. - * - Converts `\n` literals to newlines. - * @param filePath The absolute path to the environment file. - * @returns A record of environment variables. - */ -function loadEnvFile(filePath: string): ParsedEnv { - if (!existsSync(filePath)) throw new Error(`Environment file not found at ${filePath}`); - const contents = readFileSync(filePath, 'utf8'); - const lines = contents.split(/\r?\n/); - const parsed: ParsedEnv = {}; - - for (const rawLine of lines) { - const line = rawLine.trim(); - if (!line || line.startsWith('#')) continue; - const equalIndex = line.indexOf('='); - if (equalIndex <= 0) continue; - - const key = line - .substring(0, equalIndex) - .trim() - .replace(/^export\s+/, ''); - if (!key) continue; - let value = line.substring(equalIndex + 1).trim(); - - if ((value.startsWith('"') && value.endsWith('"')) || (value.startsWith("'") && value.endsWith("'"))) { - value = value.slice(1, -1); - } - value = value.replace(/\\n/g, '\n'); - parsed[key] = value; - } - - return parsed; -} - -/** - * Loads an environment file and assigns its values to `process.env`. - * By default, it does not override existing environment variables. - * @param filePath The path to the environment file. - * @param options Configuration options. `override: true` will cause it to - * overwrite existing `process.env` values. - */ -function applyEnvFile(filePath: string, options: ApplyEnvOptions = {}): void { - const envVars = loadEnvFile(filePath); - const override = options.override ?? false; - - for (const [key, value] of Object.entries(envVars)) { - if (!override && process.env[key] !== undefined) continue; - process.env[key] = value; - } -} - /** * Writes JSON metadata describing the current runtime so other processes can * discover the chosen configuration (e.g., ports). This is crucial for the diff --git a/src/cli/summarize.ts b/src/cli/summarize.ts index 3e5f01f1..8bae23f8 100644 --- a/src/cli/summarize.ts +++ b/src/cli/summarize.ts @@ -9,8 +9,10 @@ import { SummarizerAgent } from '#functions/text/summarizer'; import { defaultLLMs } from '#llm/services/defaultLlms'; import type { AgentLLMs } from '#shared/agent/agent.model'; import { parseProcessArgs, saveAgentId } from './cli'; +import { loadCliEnvironment } from './envLoader'; async function main() { + loadCliEnvironment(); const agentLlms: AgentLLMs = defaultLLMs(); await initApplicationContext(); diff --git a/src/cli/swe.ts b/src/cli/swe.ts index 215ed856..2675693a 100644 --- a/src/cli/swe.ts +++ b/src/cli/swe.ts @@ -10,6 +10,7 @@ import type { AgentContext, AgentLLMs } from '#shared/agent/agent.model'; import { CodeEditingAgent } from '#swe/codeEditingAgent'; import { SoftwareDeveloperAgent } from '#swe/softwareDeveloperAgent'; import { parseProcessArgs, saveAgentId } from './cli'; +import { loadCliEnvironment } from './envLoader'; // Used to test the SoftwareDeveloperAgent @@ -17,6 +18,7 @@ import { parseProcessArgs, saveAgentId } from './cli'; // npm run swe async function main() { + loadCliEnvironment(); await initApplicationContext(); const llms: AgentLLMs = defaultLLMs(); diff --git a/src/cli/swebench.ts b/src/cli/swebench.ts index 5bc2aae1..097ab277 100644 --- a/src/cli/swebench.ts +++ b/src/cli/swebench.ts @@ -15,6 +15,7 @@ import { CodeFunctions } from '#swe/codeFunctions'; import { type SWEInstance, startContainer, stopContainer } from '../benchmarks/swebench/swe-bench-runner'; import { registerErrorHandlers } from '../errorHandlers'; import { parseProcessArgs } from './cli'; +import { loadCliEnvironment } from './envLoader'; async function loadDataset(datasetName: string, split: string): Promise { // const url = `https://huggingface.co/datasets/${datasetName}/resolve/main/swe-bench.json`; @@ -29,6 +30,7 @@ async function loadDataset(datasetName: string, split: string): Promise { + loadCliEnvironment(); const args = process.argv.slice(2); // Remove 'node' and script path if (args.length === 0 || args[0] === '--help' || args[0] === '-h') { diff --git a/src/cli/watch.ts b/src/cli/watch.ts index c3220940..ab05ce9b 100644 --- a/src/cli/watch.ts +++ b/src/cli/watch.ts @@ -13,6 +13,7 @@ import { MorphEditor } from '#swe/morph/morphEditor'; import { beep } from '#utils/beep'; import { execCommand } from '#utils/exec'; import { parseProcessArgs } from './cli'; +import { loadCliEnvironment } from './envLoader'; /** * Walks up the directory tree from the file location until a `.git` folder is found. @@ -28,6 +29,7 @@ function findRepoRoot(startFilePath: string): string { } async function main() { + loadCliEnvironment(); // timeout avoids ReferenceError: Cannot access 'RateLimiter' before initialization setTimeout(() => { initInMemoryApplicationContext(); diff --git a/src/config/secretConfig.ts b/src/config/secretConfig.ts index 24d932b9..a45e4c5a 100644 --- a/src/config/secretConfig.ts +++ b/src/config/secretConfig.ts @@ -59,7 +59,7 @@ export async function loadSecrets(sm?: SecretManager) { for (const [envKey, mappedName] of mapping.entries()) { if (mappedName === name) secrets.set(envKey, value); } - logger.info(`Loaded secret ${name}`); + logger.debug(`Loaded secret ${name}`); }), ); diff --git a/src/functionSchema/functionDecorators.ts b/src/functionSchema/functionDecorators.ts index d89f6db6..501b0b74 100644 --- a/src/functionSchema/functionDecorators.ts +++ b/src/functionSchema/functionDecorators.ts @@ -1,7 +1,6 @@ import type { Span } from '@opentelemetry/api'; -import { agentContext } from '#agent/agentContextLocalStorage'; import { logger } from '#o11y/logger'; -import { getTracer, setFunctionSpanAttributes, withActiveSpan } from '#o11y/trace'; +import { setFunctionSpanAttributes, withActiveSpan } from '#o11y/trace'; import { functionSchemaParser } from './functionSchemaParser'; import { FUNC_SEP, type FunctionSchema, getFunctionSchemas, setFunctionSchemas } from './functions'; @@ -36,9 +35,6 @@ export function func() { return function spanDecorator(originalMethod: any, context: ClassMethodDecoratorContext): any { const methodName = String(context.name); return async function replacementMethod(this: any, ...args: any[]) { - const tracer = getTracer(); - const agent = agentContext(); - // TODO move agent.functionCallHistory.push from xml and codegen runners to here so agentWorkflows show the function call history // output summarising might have to happen in the agentService.save // // Convert arg array to parameters name/value map @@ -51,14 +47,6 @@ export function func() { // stdoutSummary: outputSummary, // }); - if (!tracer) { - try { - agent?.callStack?.push(methodName); - return await originalMethod.call(this, ...args); - } finally { - agentContext()?.callStack?.pop(); - } - } const className = Object.getPrototypeOf(this).constructor.name; const functionName = `${className}${FUNC_SEP}${methodName}`; // NOTE - modification, build attributeExtractors from all the arguments @@ -80,16 +68,13 @@ export function func() { return await withActiveSpan(methodName, async (span: Span) => { setFunctionSpanAttributes(span, methodName, attributeExtractors, args); - span.setAttribute('call', agentContext()?.callStack?.join(' > ') ?? ''); - - agent?.callStack?.push(methodName); let result: any; try { result = originalMethod.call(this, ...args); if (typeof result?.then === 'function') await result; } finally { - agent?.callStack?.pop(); + // No explicit pop needed, AsyncLocalStorage handles scope exit } try { diff --git a/src/functionSchema/functionSchemaParser.ts b/src/functionSchema/functionSchemaParser.ts index 06e0d5d0..573de98c 100644 --- a/src/functionSchema/functionSchemaParser.ts +++ b/src/functionSchema/functionSchemaParser.ts @@ -5,17 +5,21 @@ import { promisify } from 'node:util'; import { type ClassDeclaration, type Decorator, + type InterfaceDeclaration, type JSDoc, type JSDocTag, type MethodDeclaration, type ParameterDeclaration, Project, + type PropertySignature, + type SourceFile, type Type, } from 'ts-morph'; import { systemDir } from '#app/appDirs'; import { FUNC_DECORATOR_NAME } from '#functionSchema/functionSchemaTypes'; import { logger } from '#o11y/logger'; import type { FunctionParameter, FunctionSchema } from './functions'; +import type { TypeDefinition, TypeProperty } from './typeDefinition'; const writeFileAsync = promisify(writeFile); @@ -130,15 +134,14 @@ export function functionSchemaParser(sourceFilePath: string): Record { if (tag.getTagName() === 'returns' || tag.getTagName() === 'return') { - returnType = method.getReturnType().getText(); - // Remove Promise wrapper if present - if (returnType.startsWith('Promise<') && returnType.endsWith('>')) { - returnType = returnType.slice(8, -1); - } + rawReturnType = method.getReturnType().getText(); + // Normalize the return type (remove Promise wrapper and import paths) + returnType = normalizeReturnType(rawReturnType); returns = tag.getText().replace('@returns', '').replace('@return', '').trim(); // Remove type information from returns if present @@ -216,6 +219,14 @@ export function functionSchemaParser(sourceFilePath: string): Record 0) { + funcDef.typeDefinitions = typeDefinitions; + } + } } functionSchemas[funcDef.name] = funcDef; }); @@ -234,6 +245,162 @@ function getFileUpdatedTimestamp(filePath: string): Date | null { } } +/** + * Extracts a simple type name from a potentially complex type string + * E.g., "import(...).SimpleProject" -> "SimpleProject" + * E.g., "import(...).SimpleProject[]" -> "SimpleProject" + * E.g., "Promise" -> "SimpleProject" + */ +function extractSimpleTypeName(typeText: string): string { + let result = typeText; + + // Remove import path if present (e.g., "import(...).SimpleProject" -> "SimpleProject") + if (result.includes('import(')) { + const parts = result.split('.'); + result = parts[parts.length - 1]; + + // Remove trailing > from Promise/Generic wrapper (e.g., "SimpleProject>" -> "SimpleProject") + result = result.replace(/>+$/, ''); + + // Remove array brackets (e.g., "SimpleProject[]>" -> "SimpleProject") + result = result.replace(/\[\]>*/g, ''); + } + + // Remove array brackets for non-import types + result = result.replace(/\[\]$/, ''); + + return result; +} + +/** + * Checks if a type is a custom interface (not a built-in type) + */ +function isCustomInterfaceType(typeText: string): boolean { + // Check if it contains import path (indicates custom type) + if (typeText.includes('import(')) { + return true; + } + + // Check if it's a simple custom type (starts with uppercase) + const baseType = extractSimpleTypeName(typeText); + return /^[A-Z]/.test(baseType) && !['Record', 'Array', 'Promise', 'Map', 'Set'].includes(baseType); +} + +/** + * Normalizes a return type string by removing import paths and Promise wrappers + */ +function normalizeReturnType(typeText: string): string { + let normalized = typeText; + + // Remove Promise wrapper + if (normalized.startsWith('Promise<') && normalized.endsWith('>')) { + normalized = normalized.slice(8, -1); + } + + // Check if this is an array type + const isArray = normalized.endsWith('[]') || /\[\]$/.test(normalized); + + // Extract simple name + const simpleName = extractSimpleTypeName(normalized); + + // Reconstruct with array brackets if needed + return isArray ? `${simpleName}[]` : simpleName; +} + +/** + * Extracts type definition from an interface declaration + */ +function extractInterfaceDefinition(interfaceDecl: InterfaceDeclaration, sourceFile: SourceFile): TypeDefinition { + const interfaceName = interfaceDecl.getName(); + const description = interfaceDecl.getJsDocs()[0]?.getDescription().trim(); + const properties: TypeProperty[] = []; + const dependencies = new Set(); + + for (const prop of interfaceDecl.getProperties()) { + const propName = prop.getName(); + // Use getTypeNode() to get the actual type syntax, not the simplified type + const typeNode = prop.getTypeNode(); + const propType = typeNode ? typeNode.getText() : prop.getType().getText(prop); + const isOptional = prop.hasQuestionToken(); + const propDescription = prop.getJsDocs()[0]?.getDescription().trim(); + + // Normalize property type (remove import paths) + let normalizedType = propType; + if (propType.includes('import(')) { + const simpleName = extractSimpleTypeName(propType); + normalizedType = propType.replace(/import\([^)]+\)\./, ''); + + // Track dependency if it's a custom type + if (isCustomInterfaceType(propType) && simpleName !== interfaceName) { + dependencies.add(simpleName); + } + } + + properties.push({ + name: propName, + type: normalizedType, + optional: isOptional, + description: propDescription, + }); + } + + return { + name: interfaceName, + description, + properties, + dependencies: dependencies.size > 0 ? Array.from(dependencies) : undefined, + }; +} + +/** + * Finds and extracts interface definition by name from source file + */ +function findInterfaceDefinition(interfaceName: string, sourceFile: SourceFile): TypeDefinition | null { + const interfaces = sourceFile.getInterfaces(); + const interfaceDecl = interfaces.find((i) => i.getName() === interfaceName); + + if (!interfaceDecl) { + return null; + } + + return extractInterfaceDefinition(interfaceDecl, sourceFile); +} + +/** + * Recursively extracts all type definitions for a return type and its dependencies + */ +function extractTypeDefinitions(returnType: string, sourceFile: SourceFile, visited: Set = new Set()): TypeDefinition[] { + const typeDefinitions: TypeDefinition[] = []; + + // Get the base type name (without array brackets) + const baseTypeName = extractSimpleTypeName(returnType); + + // Avoid circular dependencies + if (visited.has(baseTypeName)) { + return typeDefinitions; + } + visited.add(baseTypeName); + + // Find the interface definition + const typeDef = findInterfaceDefinition(baseTypeName, sourceFile); + if (!typeDef) { + return typeDefinitions; + } + + // Recursively extract dependencies first (so they appear before the main type) + if (typeDef.dependencies) { + for (const dep of typeDef.dependencies) { + const depDefs = extractTypeDefinitions(dep, sourceFile, visited); + typeDefinitions.push(...depDefs); + } + } + + // Add the main type definition + typeDefinitions.push(typeDef); + + return typeDefinitions; +} + export function generatePythonClass(type: Type): void { if (type.isInterface()) { } else if (type.isTypeParameter()) { diff --git a/src/functionSchema/functionSchemaParserWithTypes.test.ts b/src/functionSchema/functionSchemaParserWithTypes.test.ts new file mode 100644 index 00000000..2d8aba23 --- /dev/null +++ b/src/functionSchema/functionSchemaParserWithTypes.test.ts @@ -0,0 +1,115 @@ +import { unlinkSync } from 'node:fs'; +import { expect } from 'chai'; +import { systemDir } from '#app/appDirs'; +import { func, funcClass } from './functionDecorators'; +import { functionSchemaParser } from './functionSchemaParser'; +import type { FunctionSchema } from './functions'; + +/** + * A simple project interface for testing + */ +// biome-ignore lint/suspicious/noExportsInTest: Interface needed for parser testing +export interface SimpleProject { + /** The project ID */ + id: number; + /** The project name */ + name: string; + /** Optional description */ + description: string | null; + /** List of tags */ + tags?: string[]; +} + +@funcClass(__filename) +class TestClassWithTypes { + /** + * Method that returns a custom interface type + * @returns The project details + */ + @func() + async getProject(): Promise { + return { id: 1, name: 'test', description: null }; + } + + /** + * Method that returns an array of custom interface type + * @returns Array of projects + */ + @func() + async getProjects(): Promise { + return []; + } +} + +describe('functionSchemaParser with custom types', () => { + let functionSchemas: Record; + + before(async () => { + try { + unlinkSync(`${systemDir()}/functions/src/functionSchema/functionSchemaParserWithTypes.test.json`); + } catch (e) { + // File might not exist + } + functionSchemas = functionSchemaParser(__filename); + }); + + describe('parseDefinitions with custom interface types', () => { + it('should extract type definitions for custom interface return type', () => { + const schema = functionSchemas.TestClassWithTypes_getProject; + expect(schema.returnType).to.equal('SimpleProject'); + expect(schema.typeDefinitions).to.exist; + expect(schema.typeDefinitions).to.have.lengthOf(1); + + const typeDef = schema.typeDefinitions![0]; + expect(typeDef.name).to.equal('SimpleProject'); + expect(typeDef.description).to.equal('A simple project interface for testing'); + expect(typeDef.properties).to.have.lengthOf(4); + + // Check id property + const idProp = typeDef.properties.find((p) => p.name === 'id'); + expect(idProp).to.deep.equal({ + name: 'id', + type: 'number', + optional: false, + description: 'The project ID', + }); + + // Check name property + const nameProp = typeDef.properties.find((p) => p.name === 'name'); + expect(nameProp).to.deep.equal({ + name: 'name', + type: 'string', + optional: false, + description: 'The project name', + }); + + // Check description property (with null) + const descProp = typeDef.properties.find((p) => p.name === 'description'); + expect(descProp).to.deep.equal({ + name: 'description', + type: 'string | null', + optional: false, + description: 'Optional description', + }); + + // Check tags property (optional) + const tagsProp = typeDef.properties.find((p) => p.name === 'tags'); + expect(tagsProp).to.deep.equal({ + name: 'tags', + type: 'string[]', + optional: true, + description: 'List of tags', + }); + }); + + it('should extract type definitions for array of custom interface', () => { + const schema = functionSchemas.TestClassWithTypes_getProjects; + expect(schema.returnType).to.equal('SimpleProject[]'); + expect(schema.typeDefinitions).to.exist; + expect(schema.typeDefinitions).to.have.lengthOf(1); + + const typeDef = schema.typeDefinitions![0]; + expect(typeDef.name).to.equal('SimpleProject'); + }); + }); +}); diff --git a/src/functionSchema/functions.ts b/src/functionSchema/functions.ts index 8f3ec284..015ffb31 100644 --- a/src/functionSchema/functions.ts +++ b/src/functionSchema/functions.ts @@ -1,5 +1,7 @@ // Definitions for LLM function calling +import type { TypeDefinition } from './typeDefinition'; + // If the FunctionSchema/FunctionParameter interfaces change then the loading of cached schemas in the // parser will need to check for the old schema and discard @@ -16,6 +18,8 @@ export interface FunctionSchema { parameters: FunctionParameter[]; returns?: string; returnType?: string; + /** Type definitions for custom types used in return type or parameters */ + typeDefinitions?: TypeDefinition[]; } export interface FunctionParameter { diff --git a/src/functionSchema/typeDefinition.ts b/src/functionSchema/typeDefinition.ts new file mode 100644 index 00000000..276c682a --- /dev/null +++ b/src/functionSchema/typeDefinition.ts @@ -0,0 +1,27 @@ +/** + * Represents a property within a TypeScript interface + */ +export interface TypeProperty { + /** The name of the property */ + name: string; + /** The TypeScript type of the property */ + type: string; + /** Whether the property is optional */ + optional: boolean; + /** JSDoc comment for this property */ + description?: string; +} + +/** + * Represents a TypeScript interface definition that will be converted to a Python TypedDict + */ +export interface TypeDefinition { + /** The name of the interface/type */ + name: string; + /** The properties of this interface */ + properties: TypeProperty[]; + /** JSDoc description for the entire interface */ + description?: string; + /** Names of other interfaces this type depends on */ + dependencies?: string[]; +} diff --git a/src/functions/storage/localFileStore.test.ts b/src/functions/storage/localFileStore.test.ts index ee7d3959..1a299683 100644 --- a/src/functions/storage/localFileStore.test.ts +++ b/src/functions/storage/localFileStore.test.ts @@ -1,33 +1,37 @@ import fs from 'node:fs'; +import os from 'node:os'; import path from 'node:path'; import { expect } from 'chai'; +import sinon from 'sinon'; import { agentContextStorage } from '#agent/agentContextLocalStorage'; +import * as appDirs from '#app/appDirs'; +import { setupConditionalLoggerOutput } from '#test/testUtils'; import { LocalFileStore } from './localFileStore'; -function setupMockAgentContext(agentId: string) { - return agentContextStorage.run({ agentId } as any, () => {}); -} - describe('LocalFileStore', () => { + setupConditionalLoggerOutput(); const testAgentId = 'test-agent-id'; - const localFileStore = new LocalFileStore(); + const sandbox = sinon.createSandbox(); + let basePath: string; + let localFileStore: LocalFileStore; function withContext(func: () => Promise): Promise { return agentContextStorage.run({ agentId: testAgentId } as any, () => func()); } beforeEach(async () => { - await fs.promises.rm(localFileStore.basePath, { recursive: true, force: true }); + basePath = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'localfilestore-')); + localFileStore = new LocalFileStore(basePath); }); afterEach(async () => { - await fs.promises.rm(localFileStore.basePath, { recursive: true, force: true }); + await fs.promises.rm(basePath, { recursive: true, force: true }); + sandbox.restore(); }); it('should save a file successfully with metadata', async () => withContext(async () => { - const localFileStore = new LocalFileStore(); const filename = 'test-file.txt'; const contents = 'Test content'; const description = 'Test file description'; diff --git a/src/functions/storage/localFileStore.ts b/src/functions/storage/localFileStore.ts index abf07be5..d21940b4 100644 --- a/src/functions/storage/localFileStore.ts +++ b/src/functions/storage/localFileStore.ts @@ -15,8 +15,9 @@ import type { FileMetadata } from '#shared/files/files.model'; export class LocalFileStore implements FileStore { basePath: string; - constructor() { - this.basePath = path.join(systemDir(), 'filestore'); + constructor(sysDir: string = systemDir()) { + this.basePath = path.join(sysDir, 'filestore'); + fs.mkdirSync(this.basePath, { recursive: true }); // this.basePath = path.join(process.cwd(), 'public'); } diff --git a/src/llm/llmCallService/llmCall.ts b/src/llm/llmCallService/llmCall.ts index 66997167..ac5b8c80 100644 --- a/src/llm/llmCallService/llmCall.ts +++ b/src/llm/llmCallService/llmCall.ts @@ -1,5 +1,6 @@ import { agentContext } from '#agent/agentContextLocalStorage'; import { logger } from '#o11y/logger'; +import { getCurrentCallStack } from '#o11y/trace'; import type { AgentContext } from '#shared/agent/agent.model'; import type { LlmRequest } from '#shared/llmCall/llmCall.model'; @@ -7,21 +8,27 @@ export type CreateLlmRequest = Omit; export function callStack(agent?: AgentContext): string { agent ??= agentContext(); - if (!agent) return ''; - let arr: string[] = agent.callStack; - if (!arr || arr.length === 0) return ''; - if (arr.length === 1) return arr[0]!; - - // Remove the common spans - arr.shift(); - const index = arr.indexOf('CodeGen Agent'); - if (index !== -1) arr = arr.slice(index + 1, arr.length); - - // Remove duplicates from when we call multiple in parallel, eg in findFilesToEdit - let i = arr.length - 1; - while (i > 0 && arr[i] === arr[i - 1]) { - i--; + + const base = (() => { + const asyncStack = getCurrentCallStack(); + if (asyncStack.length) return asyncStack; + return agent?.callStack ?? []; + })(); + + if (!base.length) return ''; + + const stack = [...base]; + + if (stack.length > 1) { + stack.shift(); + const idx = stack.indexOf('CodeGen Agent'); + if (idx !== -1) stack.splice(0, idx + 1); } - logger.info(arr.slice(0, i + 1).join(' > ')); - return arr.slice(0, i + 1).join(' > '); + + let i = stack.length - 1; + while (i > 0 && stack[i] === stack[i - 1]) i--; + + const formatted = stack.slice(0, i + 1).join(' > '); + if (formatted) logger.info(formatted); + return formatted; } diff --git a/src/llm/multi-agent/blackberry.ts b/src/llm/multi-agent/blackberry.ts index 000d51a1..c3a144be 100644 --- a/src/llm/multi-agent/blackberry.ts +++ b/src/llm/multi-agent/blackberry.ts @@ -1,6 +1,5 @@ import { BaseLLM } from '#llm/base-llm'; import { Claude4_5_Sonnet_Vertex } from '#llm/services/anthropic-vertex'; -import { fireworksLlama3_405B } from '#llm/services/fireworks'; import { openaiGPT5 } from '#llm/services/openai'; import { logger } from '#o11y/logger'; import type { GenerateTextOptions, LLM } from '#shared/llm/llm.model'; diff --git a/src/llm/services/ai-llm.ts b/src/llm/services/ai-llm.ts index b256a2f5..9c3ed0e1 100644 --- a/src/llm/services/ai-llm.ts +++ b/src/llm/services/ai-llm.ts @@ -19,6 +19,7 @@ import { BaseLLM, type BaseLlmConfig } from '#llm/base-llm'; import { type CreateLlmRequest, callStack } from '#llm/llmCallService/llmCall'; import { logger } from '#o11y/logger'; import { withActiveSpan } from '#o11y/trace'; +import { AccountBillingError } from '#shared/errors'; import { type AssistantContentExt, type CoreContent, @@ -191,10 +192,9 @@ export abstract class AiLLM extends BaseLLM { if (opts.thinking === 'low') thinkingBudget = 3000; if (opts.thinking === 'medium') thinkingBudget = 8192; else if (opts.thinking === 'high') thinkingBudget = 21_333; // maximum without streaming + if (thinkingBudget) { - providerOptions.anthropic = { - thinking: { type: 'enabled', budgetTokens: thinkingBudget }, - }; + providerOptions.anthropic = { thinking: { type: 'enabled', budgetTokens: thinkingBudget } }; opts.temperature = undefined; // temperature is not supported when thinking is enabled } // maxOutputTokens += budgetTokens; @@ -391,6 +391,8 @@ export abstract class AiLLM extends BaseLLM { this.saveLlmCallResponse(llmCall); span.recordException(error); + if (error.responseBody?.includes('billing')) throw new AccountBillingError(error.responseBody); + throw error; } }); @@ -440,6 +442,8 @@ export abstract class AiLLM extends BaseLLM { temperature: combinedOpts?.temperature, // topP: combinedOpts?.topP, // anthropic '`temperature` and `top_p` cannot both be specified for this model. Please use only one.' stopSequences: combinedOpts?.stopSequences, + providerOptions: combinedOpts.providerOptions, + maxOutputTokens: this.getMaxOutputTokens(), experimental_transform: smoothStream(), }; logger.info({ args: { ...args, messages: `LlmCall:${llmCall.id}` } }, `Streaming text - ${opts?.id}`); diff --git a/src/llm/services/fireworks.ts b/src/llm/services/fireworks.ts index 3aacc029..013565b3 100644 --- a/src/llm/services/fireworks.ts +++ b/src/llm/services/fireworks.ts @@ -1,4 +1,4 @@ -import { type OpenAIProvider, createOpenAI } from '@ai-sdk/openai'; +import { FireworksProvider, createFireworks } from '@ai-sdk/fireworks'; import { costPerMilTokens } from '#llm/base-llm'; import { AiLLM } from '#llm/services/ai-llm'; import type { LLM, LlmCostFunction } from '#shared/llm/llm.model'; @@ -6,7 +6,7 @@ import { currentUser } from '#user/userContext'; export const FIREWORKS_SERVICE = 'fireworks'; -export class Fireworks extends AiLLM { +export class Fireworks extends AiLLM { constructor(displayName: string, model: string, maxOutputTokens: number, calculateCosts: LlmCostFunction) { super({ displayName, service: FIREWORKS_SERVICE, modelId: model, maxInputTokens: maxOutputTokens, calculateCosts }); } @@ -15,11 +15,11 @@ export class Fireworks extends AiLLM { return currentUser()?.llmConfig.fireworksKey?.trim() || process.env.FIREWORKS_API_KEY; } - provider(): OpenAIProvider { + provider(): FireworksProvider { if (!this.aiProvider) { const apiKey = this.apiKey(); if (!apiKey) throw new Error('No API key provided'); - this.aiProvider = createOpenAI({ + this.aiProvider = createFireworks({ apiKey, baseURL: 'https://api.fireworks.ai/inference/v1', }); @@ -30,42 +30,14 @@ export class Fireworks extends AiLLM { export function fireworksLLMRegistry(): Record LLM> { return { - [`${FIREWORKS_SERVICE}:accounts/fireworks/models/llama-v3p1-70b-instruct`]: fireworksLlama3_70B, - [`${FIREWORKS_SERVICE}:accounts/fireworks/models/deepseek-v3`]: fireworksDeepSeekV3, - [`${FIREWORKS_SERVICE}:accounts/fireworks/models/qwen3-235b-a22b`]: fireworksQwen3_235bA22b, - [`${FIREWORKS_SERVICE}:accounts/fireworks/models/qwen3-coder-480b-a35b-instruct`]: fireworksQwen3Coder, + [`${FIREWORKS_SERVICE}:accounts/fireworks/models/glm-4p6`]: fireworksGLM_4_6, }; } -export function fireworksQwen3_235bA22b(): LLM { - return new Fireworks('Qwen3 235b-A22b (Fireworks)', 'accounts/fireworks/models/qwen3-235b-a22b', 16_000, costPerMilTokens(0.22, 0.88)); -} - -export function fireworksQwen3Coder(): LLM { - return new Fireworks('Qwen3 Coder (Fireworks)', 'accounts/fireworks/models/qwen3-coder-480b-a35b-instruct', 262_144, costPerMilTokens(0.45, 1.8)); -} - -export function fireworksLlama3_70B(): LLM { - return new Fireworks('LLama3 70b-i (Fireworks)', 'accounts/fireworks/models/llama-v3p1-70b-instruct', 131_072, costPerMilTokens(0.9, 0.9)); -} - -export function fireworksLlama3_405B(): LLM { - return new Fireworks('LLama3 405b-i (Fireworks)', 'accounts/fireworks/models/llama-v3p1-405b-instruct', 131_072, costPerMilTokens(3, 3)); -} - -export function fireworksDeepSeekV3(): LLM { - return new Fireworks('DeepSeek 3 (Fireworks)', 'accounts/fireworks/models/deepseek-v3', 131_072, costPerMilTokens(0.9, 0.9)); +export function fireworksGLM_4_6(): LLM { + return new Fireworks('GLM-4.6 (Fireworks)', 'accounts/fireworks/models/glm-4p6', 202_000, costPerMilTokens(0.55, 2.19)); } export function fireworksDeepSeekR1_Fast(): LLM { return new Fireworks('DeepSeek R1 Fast (Fireworks)', 'accounts/fireworks/models/deepseek-r1', 160_000, costPerMilTokens(3, 8)); } - -export function fireworksDeepSeekR1_Basic(): LLM { - return new Fireworks('DeepSeek R1 Basic (Fireworks)', 'accounts/fireworks/models/deepseek-r1-basic', 160_000, costPerMilTokens(0.55, 2.19)); -} - -// Not available in serverless -// export function fireworksLlama3_70B_R1_Distill(): LLM { -// return new Fireworks('LLama3 70b R1 Distill (Fireworks)', 'accounts/fireworks/models/deepseek-r1-distill-llama-70b', 131_072, perMilTokens(0.9), perMilTokens(0.9)); -// } diff --git a/src/llm/services/llm.int.ts b/src/llm/services/llm.int.ts index 26afde3a..7a87a9f0 100644 --- a/src/llm/services/llm.int.ts +++ b/src/llm/services/llm.int.ts @@ -4,12 +4,12 @@ import { appContext } from '#app/applicationContext'; import { Claude4_1_Opus_Vertex, Claude4_5_Sonnet_Vertex } from '#llm/services/anthropic-vertex'; import { deepinfraDeepSeekR1, deepinfraQwen3_235B_A22B } from '#llm/services/deepinfra'; import { deepSeekV3_1 } from '#llm/services/deepseek'; -import { fireworksLlama3_70B } from '#llm/services/fireworks'; +import { fireworksGLM_4_6 } from '#llm/services/fireworks'; import { nebiusDeepSeekR1 } from '#llm/services/nebius'; import { Ollama_Phi3 } from '#llm/services/ollama'; import { openaiGPT5mini } from '#llm/services/openai'; import { perplexityLLM } from '#llm/services/perplexity-llm'; -import { sambanovaDeepseekR1, sambanovaLlama3_3_70b, sambanovaLlama3_3_70b_R1_Distill } from '#llm/services/sambanova'; +import { sambanovaDeepseekR1 } from '#llm/services/sambanova'; import { togetherDeepSeekR1_0528_tput } from '#llm/services/together'; import { vertexGemini_2_5_Flash, vertexGemini_2_5_Flash_Lite, vertexGemini_2_5_Pro } from '#llm/services/vertexai'; import type { LlmMessage } from '#shared/llm/llm.model'; @@ -168,7 +168,7 @@ describe('LLMs', () => { }); describe('Fireworks', () => { - const llm = fireworksLlama3_70B(); + const llm = fireworksGLM_4_6(); it('should generateText', async () => { const response = await llm.generateText(SKY_PROMPT, { temperature: 0, id: 'test' }); @@ -226,16 +226,6 @@ describe('LLMs', () => { const response = await sambanovaDeepseekR1().generateText(SKY_PROMPT, { temperature: 0, id: 'test' }); expect(response.toLowerCase()).to.include('blue'); }); - - it('Llama 70b R1 Distill should generateText', async () => { - const response = await sambanovaLlama3_3_70b_R1_Distill().generateText(SKY_PROMPT, { temperature: 0, id: 'test' }); - expect(response.toLowerCase()).to.include('blue'); - }); - - it('Llama 70b should generateText', async () => { - const response = await sambanovaLlama3_3_70b().generateText(SKY_PROMPT, { temperature: 0, id: 'test' }); - expect(response.toLowerCase()).to.include('blue'); - }); }); describe('Together', () => { diff --git a/src/llm/services/sambanova.ts b/src/llm/services/sambanova.ts index a36bf7ed..52b8265e 100644 --- a/src/llm/services/sambanova.ts +++ b/src/llm/services/sambanova.ts @@ -11,8 +11,6 @@ export function sambanovaLLMRegistry(): Record LLM> { 'sambanova:Qwen3-32B': sambanovaQwen3_32b, 'sambanova:DeepSeek-R1': sambanovaDeepseekR1, 'sambanova:DeepSeek-V3-0324': sambanovaDeepseekV3, - 'sambanova:DeepSeek-R1-Distill-Llama-70B': sambanovaLlama3_3_70b_R1_Distill, - 'sambanova:Meta-Llama-3.3-70B-Instruct': sambanovaLlama3_3_70b, }; } @@ -31,14 +29,6 @@ export function sambanovaDeepseekV3(): LLM { return new SambanovaLLM('DeepSeek V3 (Sambanova)', 'DeepSeek-V3-0324', 8_192, costPerMilTokens(1, 1.5)); } -export function sambanovaLlama3_3_70b(): LLM { - return new SambanovaLLM('Llama 3.3 70b (Sambanova)', 'Meta-Llama-3.3-70B-Instruct', 8_192, costPerMilTokens(0.6, 1.2)); -} - -export function sambanovaLlama3_3_70b_R1_Distill(): LLM { - return new SambanovaLLM('Llama 3.3 70b R1 Distill (Sambanova)', 'DeepSeek-R1-Distill-Llama-70B', 128_000, costPerMilTokens(0.7, 1.4)); -} - /** * https://inference-docs.sambanova.ai/introduction */ diff --git a/src/modules/slack/slackChatBotService.ts b/src/modules/slack/slackChatBotService.ts index 0cf81c93..b8885e25 100644 --- a/src/modules/slack/slackChatBotService.ts +++ b/src/modules/slack/slackChatBotService.ts @@ -156,7 +156,7 @@ export class SlackChatBotService implements ChatBotService, AgentCompleted { logger.error(error, 'Failed to get bot user ID'); } - if (config.socketMode) { + if (config.socketMode && config.autoStart) { // Listen for messages in channels slackApp.event('message', async ({ event, say }) => { this.handleMessage(event, say); diff --git a/src/modules/slack/slackConfig.ts b/src/modules/slack/slackConfig.ts index f68d7888..83dab5d5 100644 --- a/src/modules/slack/slackConfig.ts +++ b/src/modules/slack/slackConfig.ts @@ -19,8 +19,8 @@ export function slackConfig(): SlackConfig { function createSlackConfig(): SlackConfig { const config: SlackConfig = { - socketMode: Boolean(process.env.SLACK_SOCKET_MODE?.trim()), - autoStart: Boolean(process.env.SLACK_AUTO_START?.trim()), + socketMode: process.env.SLACK_SOCKET_MODE?.trim().toLowerCase() === 'true', + autoStart: process.env.SLACK_AUTO_START?.trim().toLowerCase() === 'true', botToken: getSecretEnvVar('SLACK_BOT_TOKEN', ''), signingSecret: getSecretEnvVar('SLACK_SIGNING_SECRET', ''), appToken: getSecretEnvVar('SLACK_APP_TOKEN', ''), diff --git a/src/o11y/trace.ts b/src/o11y/trace.ts index 9c62d106..691a80b5 100644 --- a/src/o11y/trace.ts +++ b/src/o11y/trace.ts @@ -1,4 +1,4 @@ -import type { AsyncLocalStorage } from 'node:async_hooks'; +import { AsyncLocalStorage } from 'node:async_hooks'; /* eslint-disable semi */ import type { Span, SpanContext, Tracer } from '@opentelemetry/api'; import { trace } from '@opentelemetry/api'; @@ -19,6 +19,8 @@ const _fakeSpan: Partial = { const fakeSpan = _fakeSpan as Span; +const callStackStorage = new AsyncLocalStorage(); + /** * Dummy tracer for when tracing is not enabled. As we use more trace methods we will need to fill out this stub further. */ @@ -30,6 +32,22 @@ let tracer: SugaredTracer | null = null; let agentContextStorage: AsyncLocalStorage; let checkForceStopped: () => void; +function runWithCallStackSegment(segment: string, fn: () => T): T { + if (!segment) return fn(); + + const parentStack = callStackStorage.getStore() ?? []; + const nextStack = [...parentStack, segment]; + return callStackStorage.run(nextStack, fn); +} + +export function getCurrentCallStack(): string[] { + const stack = callStackStorage.getStore(); + if (stack) return stack; + + const agent = agentContextStorage?.getStore(); + return agent?.callStack ?? []; +} + /** * @param {Tracer} theTracer - Tracer to be set by the trace-init service * @param theAgentContextStorage @@ -76,17 +94,19 @@ export function getActiveSpan(): Span { export async function withActiveSpan(spanName: string, func: (span: Span) => T): Promise { if (!spanName) console.error(new Error(), 'spanName not provided'); checkForceStopped(); - const functionWithCallStack = async (span: Span): Promise => { - try { - agentContextStorage?.getStore()?.callStack?.push(spanName); - return await func(span); - } finally { - agentContextStorage?.getStore()?.callStack?.pop(); - } + + const execute = (span: Span): T => { + const stack = getCurrentCallStack(); + if (stack.length) span.setAttribute('call', stack.join(' > ')); + return func(span); }; - if (!tracer) return await functionWithCallStack(fakeSpan); - return tracer.withActiveSpan(spanName, functionWithCallStack); + const result = runWithCallStackSegment(spanName, () => { + if (!tracer) return execute(fakeSpan); + return tracer.withActiveSpan(spanName, execute); + }); + + return await Promise.resolve(result); } /** @@ -96,9 +116,17 @@ export async function withActiveSpan(spanName: string, func: (span: Span) => */ export function withSpan(spanName: string, func: (span: Span) => T): T { checkForceStopped(); - if (!tracer) return func(fakeSpan); - return tracer.withSpan(spanName, func); + const execute = (span: Span): T => { + const stack = getCurrentCallStack(); + if (stack.length) span.setAttribute('call', stack.join(' > ')); + return func(span); + }; + + return runWithCallStackSegment(spanName, () => { + if (!tracer) return execute(fakeSpan); + return tracer.withSpan(spanName, execute); + }); } type SpanAttributeExtractor = number | ((...args: any) => string); @@ -157,18 +185,12 @@ export function span any>( return undefined; } })(); - try { - agentContextStorage?.getStore()?.callStack?.push(functionName); - if (!tracer) { - return userCtx ? await runAsUser(userCtx, () => originalMethod.call(this, ...args)) : await originalMethod.call(this, ...args); - } - return tracer.withActiveSpan(functionName, async (span: Span) => { - setFunctionSpanAttributes(span, functionName, attributeExtractors, args); - return userCtx ? await runAsUser(userCtx, () => originalMethod.call(this, ...args)) : await originalMethod.call(this, ...args); - }); - } finally { - agentContextStorage?.getStore()?.callStack?.pop(); - } + + return await withActiveSpan(functionName, async (span: Span) => { + setFunctionSpanAttributes(span, functionName, attributeExtractors, args); + const invoke = () => originalMethod.call(this, ...args); + return userCtx ? await runAsUser(userCtx, invoke) : await invoke(); + }); }; }; } diff --git a/src/swe/discovery/selectFilesAgentWithSearch.test.ts b/src/swe/discovery/selectFilesAgentWithSearch.test.ts new file mode 100644 index 00000000..29221781 --- /dev/null +++ b/src/swe/discovery/selectFilesAgentWithSearch.test.ts @@ -0,0 +1,207 @@ +import { expect } from 'chai'; +import mock from 'mock-fs'; +import sinon from 'sinon'; +import { setFileSystemOverride } from '#agent/agentContextLocalStorage'; +import { FileSystemService } from '#functions/storage/fileSystemService'; +import { logger } from '#o11y/logger'; +import type { SelectedFile } from '#shared/files/files.model'; +import type { UserContentExt } from '#shared/llm/llm.model'; +import * as repoOverviewModule from '#swe/index/repoIndexDocBuilder'; +import * as repoMapModule from '#swe/index/repositoryMap'; +import type { ProjectInfo } from '#swe/projectDetection'; +import * as projectDetectionModule from '#swe/projectDetection'; +import { setupConditionalLoggerOutput } from '#test/testUtils'; +import { mockLLM, mockLLMs } from '../../llm/services/mock-llm'; +import { selectFilesAgent } from './selectFilesAgentWithSearch'; + +import type { AgentLLMs } from '#shared/agent/agent.model'; +import { MINIMAL_AI_INFO } from '../projectDetection'; + +describe('selectFilesAgentWithSearch', () => { + setupConditionalLoggerOutput(); + + const sandbox = sinon.createSandbox(); + + let fsOverride: FileSystemService; + let searchExtractsStub: sinon.SinonStub; + let searchFilesStub: sinon.SinonStub; + let llmSet: AgentLLMs; + + beforeEach(() => { + mockLLM.reset(); + + llmSet = mockLLMs(); + + mock({ + '/repo': { + '.git': {}, + '.gitignore': '', + '.typedai.json': MINIMAL_AI_INFO, + 'a.txt': 'alpha content', + 'b.txt': 'beta content', + }, + }); + + fsOverride = new FileSystemService('/repo'); + sandbox.stub(fsOverride, 'getWorkingDirectory').returns('/repo'); + sandbox.stub(fsOverride, 'getVcsRoot').returns('/repo'); + + searchExtractsStub = sandbox.stub(fsOverride, 'searchExtractsMatchingContents').resolves('a.txt: alpha match'); + searchFilesStub = sandbox.stub(fsOverride, 'searchFilesMatchingContents').resolves('a.txt: 1'); + + setFileSystemOverride(fsOverride); + }); + + afterEach(() => { + mockLLM.assertNoPendingResponses(); + setFileSystemOverride(null); + mock.restore(); + sandbox.restore(); + }); + + describe('selectFilesAgent', () => { + it('performs search and continues when no filesToInspect and no pending', async () => { + mockLLM + .addMessageResponse('{"inspectFiles":[]}') // initial + .addMessageResponse('{"search":"TODO","keepFiles":[],"ignoreFiles":[]}') // iteration 1 + .addMessageResponse('{"keepFiles":[{"filePath":"a.txt","reason":"match"}]}') // iteration 2 + .addMessageResponse('{}'); // iteration 3 to allow break + + const files = await selectFilesAgent('Find alpha', {}, llmSet); + + expect(files).to.deep.equal([{ filePath: 'a.txt', reason: 'match' }]); + + const calls = mockLLM.getMessageCalls(); + expect(calls.length).to.be.greaterThan(2); + + // After the search iteration, the next call (iteration 2) should include the search results in prior messages + const iter2Call = calls[2]!; + const iter2Msgs = iter2Call.messages; + + const hasSearchResults = iter2Msgs.some( + (m) => m.role === 'user' && typeof m.content === 'string' && (m.content as string).includes(' m.cache === 'ephemeral').length; + expect(ephemeralCount).to.be.at.most(4); + }); + + it('reminds and escalates when pending not resolved', async () => { + mockLLM + .addMessageResponse('{"inspectFiles":["a.txt"]}') // initial: request inspection + .addMessageResponse('{}') // iteration 1: no decisions, triggers reminder + escalate + .addMessageResponse('{"keepFiles":[{"filePath":"a.txt","reason":"kept"}]}'); // iteration 2: resolve + + const files = await selectFilesAgent('Need a.txt', {}, llmSet); + expect(files).to.deep.equal([{ filePath: 'a.txt', reason: 'kept' }]); + + const calls = mockLLM.getMessageCalls(); + // After iteration 1 (which had no decisions), iteration 2 should include the reminder in prior messages + const iter2Msgs = calls[2]!.messages; + const hasReminder = iter2Msgs.some( + (m) => m.role === 'user' && typeof m.content === 'string' && (m.content as string).includes('You have not resolved the following pending files'), + ); + expect(hasReminder).to.equal(true); + }); + + it('reports invalid inspect paths in next-iteration prompt', async () => { + mockLLM + .addMessageResponse('{"inspectFiles":["not-exists.txt"]}') // initial: invalid path + .addMessageResponse('{"search":"ANY"}') // iteration 1: trigger search to continue + .addMessageResponse('{"keepFiles":[{"filePath":"a.txt","reason":"found"}]}') // iteration 2 + .addMessageResponse('{}'); // iteration 3 to break + + const files = await selectFilesAgent('Find file that matches', {}, llmSet); + expect(files).to.deep.equal([{ filePath: 'a.txt', reason: 'found' }]); + + const calls = mockLLM.getMessageCalls(); + const iter1Call = calls[1]!; + const userMessages = iter1Call.messages.filter((m) => m.role === 'user'); + expect(userMessages.length).to.be.greaterThan(0); + const iter1UserPrompt = userMessages[userMessages.length - 1]!; + expect(typeof iter1UserPrompt.content).to.equal('string'); + expect(iter1UserPrompt.content as string).to.include('were invalid or unreadable and have been ignored'); + expect(iter1UserPrompt.content as string).to.include('not-exists.txt'); + }); + it('assigns default reason when keepFiles are provided as strings', async () => { + mockLLM + .addMessageResponse('{"inspectFiles":["a.txt"]}') + .addMessageResponse('{"keepFiles":["a.txt"]}') + .addMessageResponse('{}'); + + const files = await selectFilesAgent('String keep reason', {}, llmSet); + + expect(files).to.deep.equal([{ filePath: 'a.txt', reason: 'Reason not provided by LLM.' }]); + }); + + it('includes search errors in subsequent prompts when search fails', async () => { + searchExtractsStub.rejects(new Error('Search failed')); + + mockLLM + .addMessageResponse('{"inspectFiles":[]}') + .addMessageResponse('{"search":"FAIL","keepFiles":[],"ignoreFiles":[]}') + .addMessageResponse('{"keepFiles":[{"filePath":"a.txt","reason":"found"}]}') + .addMessageResponse('{}'); + + const files = await selectFilesAgent('Handle search errors', {}, llmSet); + expect(files).to.deep.equal([{ filePath: 'a.txt', reason: 'found' }]); + + expect(searchExtractsStub.calledOnceWithExactly('FAIL', 1)).to.equal(true); + expect(searchFilesStub.called).to.equal(false); + + const iteration2Call = mockLLM.getMessageCalls().find((call) => call.options?.id === 'Select Files iteration 2'); + expect(iteration2Call).to.not.be.undefined; + + const errorMessage = iteration2Call!.messages.find( + (m) => m.role === 'user' && typeof m.content === 'string' && m.content.includes(' { + const large = 'X'.repeat(40000); + searchExtractsStub.callsFake(async () => large); + searchFilesStub.callsFake(async () => 'Y'.repeat(40000)); + + mockLLM + .addMessageResponse('{"inspectFiles":[]}') + .addMessageResponse('{"search":"TODO","keepFiles":[],"ignoreFiles":[]}') + .addMessageResponse('{"keepFiles":[{"filePath":"a.txt","reason":"found"}]}') + .addMessageResponse('{}'); + + const files = await selectFilesAgent('Truncate search', {}, llmSet); + expect(files).to.deep.equal([{ filePath: 'a.txt', reason: 'found' }]); + + expect(searchExtractsStub.callCount).to.equal(2); + expect(searchFilesStub.calledOnceWithExactly('TODO')).to.equal(true); + + const iteration2Call = mockLLM.getMessageCalls().find((call) => call.options?.id === 'Select Files iteration 2'); + expect(iteration2Call).to.not.be.undefined; + + const truncatedMessage = iteration2Call!.messages.find( + (m) => + m.role === 'user' && + typeof m.content === 'string' && + m.content.includes('truncated="true"') && + m.content.includes('Note: Search results were too large'), + ); + expect(truncatedMessage).to.not.be.undefined; + }); + + it('throws when no files are ultimately selected', async () => { + mockLLM + .addMessageResponse('{"inspectFiles":["a.txt"]}') + .addMessageResponse('{"ignoreFiles":[{"filePath":"a.txt","reason":"not needed"}]}') + .addMessageResponse('{}'); + + try { + await selectFilesAgent('No selection', {}, llmSet); + expect.fail('Expected selectFilesAgent to throw'); + } catch (error) { + expect((error as Error).message).to.equal('No files were selected to fulfill the requirements.'); + } + }); + }); +}); diff --git a/src/swe/discovery/selectFilesAgentWithSearch.ts b/src/swe/discovery/selectFilesAgentWithSearch.ts index a87316c7..25c1486d 100644 --- a/src/swe/discovery/selectFilesAgentWithSearch.ts +++ b/src/swe/discovery/selectFilesAgentWithSearch.ts @@ -1,8 +1,10 @@ import path from 'node:path'; -import { getFileSystem, llms } from '#agent/agentContextLocalStorage'; +import { agentContext, getFileSystem } from '#agent/agentContextLocalStorage'; import { ReasonerDebateLLM } from '#llm/multi-agent/reasoning-debate'; import { extractTag } from '#llm/responseParsers'; +import { defaultLLMs } from '#llm/services/defaultLlms'; import { logger } from '#o11y/logger'; +import type { AgentLLMs } from '#shared/agent/agent.model'; import type { SelectedFile } from '#shared/files/files.model'; import { type GenerateTextWithJsonResponse, @@ -86,20 +88,34 @@ export interface FileExtract { extract: string; } -export async function selectFilesAgent(requirements: UserContentExt, options: QueryOptions = {}): Promise { +function resolveAgentLLMs(provided?: AgentLLMs): AgentLLMs { + if (provided) return provided; + const contextLLMs = agentContext()?.llms; + if (contextLLMs) return contextLLMs; + return defaultLLMs(); +} + +export async function selectFilesAgent(requirements: UserContentExt, options: QueryOptions = {}, agentLLMs?: AgentLLMs): Promise { if (!requirements) throw new Error('Requirements must be provided'); - const { selectedFiles } = await selectFilesCore(requirements, options); + const resolvedLLMs = resolveAgentLLMs(agentLLMs); + const { selectedFiles } = await selectFilesCore(requirements, options, resolvedLLMs); return selectedFiles; } -export async function queryWorkflowWithSearch(query: UserContentExt, opts: QueryOptions = {}): Promise { +export async function queryWorkflowWithSearch(query: UserContentExt, opts: QueryOptions = {}, agentLLMs?: AgentLLMs): Promise { if (!query) throw new Error('query must be provided'); - const { files, answer } = await queryWithFileSelection2(query, opts); + const resolvedLLMs = resolveAgentLLMs(agentLLMs); + const { files, answer } = await queryWithFileSelection2(query, opts, resolvedLLMs); return answer; } -export async function queryWithFileSelection2(query: UserContentExt, opts: QueryOptions = {}): Promise<{ files: SelectedFile[]; answer: string }> { - const { messages, selectedFiles } = await selectFilesCore(query, opts); +export async function queryWithFileSelection2( + query: UserContentExt, + opts: QueryOptions = {}, + agentLLMs?: AgentLLMs, +): Promise<{ files: SelectedFile[]; answer: string }> { + const resolvedLLMs = resolveAgentLLMs(agentLLMs); + const { messages, selectedFiles } = await selectFilesCore(query, opts, resolvedLLMs); // Construct the final prompt for answering the query const finalPrompt = ` @@ -112,8 +128,8 @@ Think systematically and methodically through the query, considering multiple op messages.push({ role: 'user', content: finalPrompt }); // Perform the additional LLM call to get the answer - const xhard = llms().xhard; - const llm: LLM = opts.useXtraHardLLM && xhard ? xhard : llms().hard; + const xhard = resolvedLLMs.xhard; + const llm: LLM = opts.useXtraHardLLM && xhard ? xhard : resolvedLLMs.hard; const thinking: ThinkingLevel = llm instanceof ReasonerDebateLLM ? 'none' : 'high'; let answer = await llm.generateText(messages, { id: 'Select Files query Answer', thinking }); @@ -189,6 +205,7 @@ Think systematically and methodically through the query, considering multiple op async function selectFilesCore( requirements: UserContentExt, opts: QueryOptions, + agentLLMs: AgentLLMs, ): Promise<{ messages: LlmMessage[]; selectedFiles: SelectedFile[]; @@ -198,7 +215,7 @@ async function selectFilesCore( const maxIterations = 10; let iterationCount = 0; - let llm = llms().medium; + let llm = agentLLMs.medium; const response: GenerateTextWithJsonResponse = await llm.generateTextWithJson(messages, { id: 'Select Files initial', thinking: 'high' }); logger.info(messageText(response.message)); @@ -243,8 +260,8 @@ async function selectFilesCore( newInvalidPathsFromLastTurn, // Pass invalid paths from previous turn (Fix #5) iterationCount, llm, + agentLLMs, ); - console.log(response); // Process keep/ignore decisions first for (const ignored of response.ignoreFiles ?? []) { @@ -311,7 +328,7 @@ Respond with a valid JSON object that follows the required schema.`, // Escalate to the hard model once, to give the LLM more capacity. if (!usingHardLLM) { - llm = llms().hard; + llm = agentLLMs.hard; usingHardLLM = true; logger.info('Escalating to hard LLM because of unresolved pending files.'); } @@ -322,46 +339,26 @@ Respond with a valid JSON object that follows the required schema.`, if (response.search) { const searchRegex = response.search; const searchResultsText = await searchFileSystem(searchRegex); - console.log('Search Results =================='); - console.log(searchResultsText); - console.log('End Search Results =================='); // The assistant message should reflect the actual response, including any keep/ignore/inspect decisions made alongside search. messages.push({ role: 'assistant', content: JSON.stringify(response) }); messages.push({ role: 'user', content: searchResultsText, cache: 'ephemeral' }); - // filesToInspect was already updated with validated new inspectFiles. If search is also present, - // LLM might have asked to inspect some files AND search. - // If LLM uses search, it typically wouldn't inspect new files in the same turn, but the flexibility is there. - } else { - // This 'else' block handles the case where NO search was performed. - // Keep/ignore/inspect decisions were already processed before the if(response.search). - // We still need to push the assistant's response and potentially user messages with file contents. - - // If new files were requested for inspection (and validated into filesToInspect), - // or if files were kept/ignored, this implies an action was taken. - if (filesToInspect.length > 0 || (response.keepFiles ?? []).length > 0 || (response.ignoreFiles ?? []).length > 0) { - // processedIterativeStepUserPrompt adds contents of KEPT files. - // The contents for NEWLY INSPECTED files are added by generateFileSelectionProcessingResponse in the *next* turn. - messages.push(await processedIterativeStepUserPrompt(response)); - } - const cache = filesToInspect.length > 0 ? 'ephemeral' : undefined; // Ephemeral if new files are being inspected - messages.push({ - role: 'assistant', - content: JSON.stringify(response), - cache, - }); + pruneEphemeralCache(messages); - const cachedMessages = messages.filter((msg) => msg.cache === 'ephemeral'); - if (cachedMessages.length > 4) { - // This logic to remove 'ephemeral' status from older messages can be kept or revised. - // For now, keeping it as is, as it's not the primary focus of the fixes. - const firstEphemeralToClear = messages.find((msg, index) => { - const originalIndex = messages.indexOf(cachedMessages[1]!); - return index === originalIndex; - }); - if (firstEphemeralToClear) firstEphemeralToClear.cache = undefined; - } + // Ensure the loop continues so the LLM can process the search results, + // even when filesToInspect is empty and there are no pending files. + continue; } + // This 'else' block handles the case where NO search was performed. + // Keep/ignore/inspect decisions were already processed before the if(response.search). + + // Always append the assistant's response first + const cache = filesToInspect.length > 0 ? 'ephemeral' : undefined; + messages.push({ role: 'assistant', content: JSON.stringify(response), cache }); + + // Do not add a synthetic user message here. + // File contents for newly inspected files are included in the next iteration prompt. + pruneEphemeralCache(messages); // LLM decision logic for switching to hard LLM or breaking // This logic applies whether a search was performed or not. @@ -370,7 +367,7 @@ Respond with a valid JSON object that follows the required schema.`, if (filesToInspect.length === 0 && filesPendingDecision.size === 0) { // No new files to inspect, and all previously pending files decided. if (!usingHardLLM) { - llm = llms().hard; + llm = agentLLMs.hard; usingHardLLM = true; logger.info('Switching to hard LLM for final review.'); } else { @@ -385,8 +382,6 @@ Respond with a valid JSON object that follows the required schema.`, } else if (filesToInspect.length > 0) { // New files were requested for inspection (and validated into filesToInspect). logger.debug(`${filesToInspect.length} new files to inspect. Proceeding to next iteration.`); - } else if (response.search) { - logger.debug('Search was performed. Proceeding to next iteration for LLM to process search results.'); } } @@ -477,6 +472,7 @@ async function generateFileSelectionProcessingResponse( invalidPathsFromLastInspection: string[], // New parameter for Fix #5 iteration: number, llm: LLM, + agentLLMs: AgentLLMs, ): Promise { // filesForContent are the files whose contents will be shown to the LLM in this turn. // These are the newly requested (and validated) filesToInspect. @@ -555,32 +551,9 @@ You MUST responsd with a valid JSON object that follows the required schema insi id: `Select Files iteration ${iteration}`, thinking: 'high', }); - console.log(messageText(response.message)); return response.object; } -/** - * Generates the user message that we will add to the conversation, which includes the file contents the LLM wishes to inspect - * @param response - */ -async function processedIterativeStepUserPrompt(response: IterationResponse): Promise { - const ignored = response.ignoreFiles?.map((s) => s.filePath) ?? []; - const kept = response.keepFiles?.map((s) => s.filePath) ?? []; - - let ignoreText = ''; - if (ignored.length) { - ignoreText = '\nRemoved the following ignored files:'; - for (const ig of response.ignoreFiles ?? []) { - ignoreText += `\n${ig.filePath} - ${ig.reason}`; - } - } - - return { - role: 'user', - content: `${(await readFileContents(kept)).contents}${ignoreText}`, - }; -} - async function readFileContents(filePaths: string[]): Promise<{ contents: string; invalidPaths: string[] }> { const fileSystem = getFileSystem(); let contents = '\n'; @@ -605,6 +578,17 @@ ${fileContent} return { contents: `${contents}`, invalidPaths }; } +function pruneEphemeralCache(messages: LlmMessage[], maxEphemeral = 4): void { + const ephemeralIdxs = messages + .map((m, i) => ({ m, i })) + .filter(({ m }) => m.cache === 'ephemeral') + .map(({ i }) => i); + while (ephemeralIdxs.length > maxEphemeral) { + const idxToClear = ephemeralIdxs.shift()!; + messages[idxToClear].cache = undefined; + } +} + async function searchFileSystem(searchRegex: string) { let searchResultsText = ''; let searchPerformedSuccessfully = false; diff --git a/src/swe/discovery/selectFilesToEdit.test.ts b/src/swe/discovery/selectFilesToEdit.test.ts index 6d241f1b..71f4adc9 100644 --- a/src/swe/discovery/selectFilesToEdit.test.ts +++ b/src/swe/discovery/selectFilesToEdit.test.ts @@ -1,8 +1,10 @@ import { expect } from 'chai'; import { FileSystemService } from '#functions/storage/fileSystemService'; import { removeNonExistingFiles } from '#swe/discovery/selectFilesToEdit'; +import { setupConditionalLoggerOutput } from '#test/testUtils'; describe('removeNonExistingFiles', () => { + setupConditionalLoggerOutput(); const fileSystem = new FileSystemService(); it('should remove non-existing files from the selection', async () => { diff --git a/src/swe/index/llmSummaries.ts b/src/swe/index/llmSummaries.ts index 8473c1a6..80e65eee 100644 --- a/src/swe/index/llmSummaries.ts +++ b/src/swe/index/llmSummaries.ts @@ -14,6 +14,23 @@ export interface Summary { }; } +/** JSON Schema for Summary output (without meta/path fields which are added by the caller) */ +const SUMMARY_JSON_SCHEMA = { + type: 'object', + properties: { + short: { + type: 'string', + description: 'A concise summary (maximum 15 words) stating what the file/folder defines, implements, or exports', + }, + long: { + type: 'string', + description: 'A detailed summary (2-4 sentences) listing specific exports, dependencies, and patterns', + }, + }, + required: ['short', 'long'], + additionalProperties: false, +} as const; + /** * Generate a summary for a single file */ @@ -28,7 +45,7 @@ export async function generateFileSummary(fileContents: string, parentSummaries: } const prompt = ` -Analyze this source code file and generate a summary that captures its purpose and functionality: +Analyze this source code file and generate a factual, concise summary: ${parentSummaryText} @@ -36,25 +53,49 @@ ${fileContents} Generate two summaries in JSON format: -1. A one-sentence overview of the file's purpose -2. A detailed paragraph describing: - - The file's main functionality and features - - Key classes/functions/components - - Its role in the larger codebase - - Important dependencies or relationships - - Notable patterns or implementation details -Focus on unique aspects not covered in parent summaries. +SHORT SUMMARY: +- Maximum 15 words +- State what the file defines/implements/exports +- Omit filler words like "This file", "The file's main", "It features" +- Start directly with the subject (e.g., "API routes for...", "Service handling...", "Utilities for...") + +LONG SUMMARY: +- Maximum 3 concise sentences +- List specific exports: classes, functions, routes, components, types +- Name key dependencies or patterns used +- Avoid subjective commentary (no "demonstrates quality", "commitment to", "plays a crucial role") +- Avoid generic phrases (no "provides a structured approach", "ensures type safety") +- Be factual and specific - focus on WHAT, not WHY or evaluation + +CRITICAL JSON FORMATTING: +- Do NOT use backticks (\`) anywhere in the JSON output +- Reference code elements without markdown formatting (e.g., "parseFunctionCallsXml" not "\`parseFunctionCallsXml\`") +- Use plain text for all function names, class names, and code references + +Examples of good vs bad: +❌ "The file's organization demonstrates commitment to code quality" +✅ "Exports createUser, deleteUser, updateUser functions" + +❌ "Provides a structured approach to API development" +✅ "Defines 9 API routes using defineApiRoute helper" + +❌ "Exports \`parseXml\` and \`parseJson\` functions" +✅ "Exports parseXml and parseJson functions" Respond only with JSON in this format: { - "short": "One-sentence file summary", - "long": "Detailed paragraph describing the file" + "short": "Direct subject-focused summary under 15 words", + "long": "Factual list of exports, dependencies, and patterns in 2-3 sentences" } `; - return await llm.generateJson(prompt, { id: 'Generate file summary' }); + // Note: The LLM only generates 'short' and 'long'. The caller adds 'path' and 'meta' fields. + return (await llm.generateJson(prompt, { + id: 'Generate file summary', + jsonSchema: SUMMARY_JSON_SCHEMA, + })) as any; } /** @@ -78,62 +119,89 @@ ${parentSummaryText} ${combinedSummary} -Task: Generate a cohesive summary for this folder that captures its role in the larger project. +Generate a factual, concise folder summary: + +SHORT SUMMARY: +- Maximum 15 words +- State the folder's primary purpose/domain +- Start directly with the subject (e.g., "Authentication services and middleware", "API route definitions", "Database models and schemas") +- Omit "This folder", "Contains", "Includes" -1. Key Topics: - List 3-5 main topics or functionalities this folder addresses. +LONG SUMMARY: +- Maximum 4 concise sentences +- List the main file/subfolder categories and their purposes +- Identify common patterns or shared dependencies +- State the folder's domain or responsibility +- Avoid subjective commentary (no "plays a crucial role", "demonstrates organization") +- Avoid generic phrases (no "provides functionality for", "ensures consistency") +- Be factual and specific -2. Folder Summary: - Provide two summaries in JSON format: - a) A one-sentence overview of the folder's purpose and contents. - b) A paragraph-length description highlighting: - - The folder's role in the project architecture - - Main components or modules contained - - Key functionalities implemented in this folder - - Relationships with other parts of the codebase - - Any patterns or principles evident in the folder's organization +CRITICAL JSON FORMATTING: +- Do NOT use backticks (\`) anywhere in the JSON output +- Reference code elements without markdown formatting (e.g., "AuthService" not "\`AuthService\`") +- Use plain text for all function names, class names, file names, and code references -Note: Focus on the folder's unique contributions. Avoid repeating information from parent summaries. +Examples of good vs bad: +❌ "This folder plays a crucial role in the project's authentication architecture" +✅ "Authentication: JWT middleware, session management, OAuth providers" + +❌ "The folder demonstrates well-organized code structure" +✅ "Contains 5 route definition files and 3 validation schemas" + +❌ "Contains \`userService.ts\` and \`authService.ts\`" +✅ "Contains userService.ts and authService.ts" Respond only with JSON in this format: { - "short": "Concise one-sentence folder summary", - "long": "Detailed paragraph summarizing the folder's contents and significance" + "short": "Direct domain/purpose under 15 words", + "long": "Factual list of contents and patterns in 3-4 sentences" } `; - return await llm.generateJson(prompt, { id: 'Generate folder summary' }); + // Note: The LLM only generates 'short' and 'long'. The caller adds 'path' and 'meta' fields. + return (await llm.generateJson(prompt, { + id: 'Generate folder summary', + jsonSchema: SUMMARY_JSON_SCHEMA, + })) as any; } /** * Generates a prompt for creating a detailed summary based on combined summaries. */ export function generateDetailedSummaryPrompt(combinedSummary: string): string { - return `Based on the following folder summaries, create a comprehensive overview of the entire project: + return `Based on the following folder summaries, create a factual, concise project overview: ${combinedSummary} -Generate a detailed Markdown summary that includes: - -1. Project Overview: - - The project's primary purpose and goals - -2. Architecture and Structure: - - High-level architecture of the project - - Key directories and their roles - - Main modules or components and their interactions - -3. Core Functionalities: - - List and briefly describe the main features with their location in the project - -4. Technologies and Patterns: - - Primary programming languages used - - Key frameworks, libraries, or tools - - Notable design patterns or architectural decisions - -Ensure the summary is well-structured, using appropriate Markdown formatting for readability. -Include folder path names and file paths where applicable to help readers navigate through the project. +Generate a well-structured Markdown summary with these sections: + +## Project Overview +- 2-3 sentences describing what the project is and its primary purpose +- Avoid subjective commentary (no "robust", "well-designed", "high-quality") +- Be specific about the domain and key capabilities + +## Architecture and Structure +- List key directories and their specific responsibilities +- Include actual folder paths (e.g., "src/api/", "src/services/") +- Mention main architectural patterns if evident (REST API, microservices, etc.) + +## Core Functionalities +- Bulleted list of main features/capabilities +- Include location references (e.g., "User authentication (src/auth/)") +- Be specific, not vague (e.g., "JWT-based auth" not "authentication system") + +## Technologies and Patterns +- Primary programming language(s) and runtime +- Key frameworks and libraries actually used +- Notable patterns or tools (e.g., "Fastify web framework", "Drizzle ORM") + +Guidelines: +- Be factual and specific +- Avoid subjective quality assessments +- Use actual folder/file paths as references +- Keep each section concise (3-5 bullet points max) +- No marketing language or fluff `; } diff --git a/src/swe/index/repoIndexDocBuilder.test.ts b/src/swe/index/repoIndexDocBuilder.test.ts index f1fc952f..0fbca9ac 100644 --- a/src/swe/index/repoIndexDocBuilder.test.ts +++ b/src/swe/index/repoIndexDocBuilder.test.ts @@ -194,6 +194,57 @@ describe('IndexDocBuilder', () => { expect(await fileExists(existingSummaryFullPath), 'Existing summary should still exist').to.be.true; }); + it('should process multiple sibling folders in parallel', async () => { + const file1Content = 'content of file1.ts'; + const file2Content = 'content of file2.ts'; + const file3Content = 'content of file3.ts'; + + const aiConfig = [{ indexDocs: ['**/*.ts'] }]; + setupMockFs({ + [MOCK_REPO_ROOT]: { + [AI_INFO_FILENAME]: JSON.stringify(aiConfig), + folder1: { + 'file1.ts': file1Content, + }, + folder2: { + 'file2.ts': file2Content, + }, + folder3: { + 'file3.ts': file3Content, + }, + [typedaiDirName]: { docs: {} }, + }, + }); + + // Spy on processFolderRecursively to track parallel execution + const processFolderSpy = sinon.spy(builder, 'processFolderRecursively' as any); + + await builder.buildIndexDocsInternal(); + + // Verify all three folders were processed + const summaryFile1Path = path.join(MOCK_REPO_ROOT, typedaiDirName, 'docs', 'folder1/file1.ts.json'); + const summaryFile2Path = path.join(MOCK_REPO_ROOT, typedaiDirName, 'docs', 'folder2/file2.ts.json'); + const summaryFile3Path = path.join(MOCK_REPO_ROOT, typedaiDirName, 'docs', 'folder3/file3.ts.json'); + + expect(await fileExists(summaryFile1Path), 'file1.ts.json should exist').to.be.true; + expect(await fileExists(summaryFile2Path), 'file2.ts.json should exist').to.be.true; + expect(await fileExists(summaryFile3Path), 'file3.ts.json should exist').to.be.true; + + // Verify folder summaries were created + const summaryFolder1Path = path.join(MOCK_REPO_ROOT, typedaiDirName, 'docs', 'folder1/_index.json'); + const summaryFolder2Path = path.join(MOCK_REPO_ROOT, typedaiDirName, 'docs', 'folder2/_index.json'); + const summaryFolder3Path = path.join(MOCK_REPO_ROOT, typedaiDirName, 'docs', 'folder3/_index.json'); + + expect(await fileExists(summaryFolder1Path), 'folder1/_index.json should exist').to.be.true; + expect(await fileExists(summaryFolder2Path), 'folder2/_index.json should exist').to.be.true; + expect(await fileExists(summaryFolder3Path), 'folder3/_index.json should exist').to.be.true; + + // Verify processFolderRecursively was called for the root and all 3 folders + expect(processFolderSpy.callCount).to.be.at.least(4); // root + 3 folders + + processFolderSpy.restore(); + }); + it('should not regenerate summaries if content and children hashes are unchanged', async () => { const fileContent = 'content of file.ts'; const aiConfig = [{ indexDocs: ['src/file.ts'] }]; @@ -264,6 +315,160 @@ describe('IndexDocBuilder', () => { expect(llm.generateText.calledOnce, 'Easy LLM generateText for project summary should be called').to.be.true; }); + it('should be stable after incremental update - no LLM calls on second run', async () => { + const fileContent = 'file content'; + const aiConfig = [{ indexDocs: ['src/**/*.ts'] }]; + + setupMockFs({ + [MOCK_REPO_ROOT]: { + [AI_INFO_FILENAME]: JSON.stringify(aiConfig), + src: { + 'file.ts': fileContent, + }, + [typedaiDirName]: { docs: {} }, + }, + }); + + // First run - should generate summaries + await builder.buildIndexDocsInternal(); + + expect(generateFileSummaryStub.callCount).to.equal(1); + expect(generateFolderSummaryStub.callCount).to.equal(1); + expect(llm.generateText.callCount).to.equal(1); + + // Reset stubs to track second run + generateFileSummaryStub.resetHistory(); + generateFolderSummaryStub.resetHistory(); + llm.generateText.resetHistory(); + + // Second run - should make NO LLM calls (stable incremental update) + await builder.buildIndexDocsInternal(); + + expect(generateFileSummaryStub.called, 'generateFileSummary should NOT be called on second run').to.be.false; + expect(generateFolderSummaryStub.called, 'generateFolderSummary should NOT be called on second run').to.be.false; + expect(llm.generateText.called, 'LLM generateText should NOT be called on second run').to.be.false; + }); + + it('should update only changed file and cascade parent folder updates', async () => { + const file1Content = 'file1 content'; + const file2OldContent = 'file2 old content'; + const file2NewContent = 'file2 new content'; + const aiConfig = [{ indexDocs: ['src/**/*.ts'] }]; + + const file1Hash = hash(file1Content); + const file2OldHash = hash(file2OldContent); + const initialFolderHash = hash(`src/file1.ts:${file1Hash},src/file2.ts:${file2OldHash}`); + + setupMockFs({ + [MOCK_REPO_ROOT]: { + [AI_INFO_FILENAME]: JSON.stringify(aiConfig), + src: { + 'file1.ts': file1Content, + 'file2.ts': file2OldContent, + }, + [typedaiDirName]: { + docs: { + src: { + 'file1.ts.json': JSON.stringify({ path: 'src/file1.ts', short: 's1', long: 'l1', meta: { hash: file1Hash } }), + 'file2.ts.json': JSON.stringify({ path: 'src/file2.ts', short: 's2', long: 'l2', meta: { hash: file2OldHash } }), + '_index.json': JSON.stringify({ path: 'src', short: 'folder', long: 'folder', meta: { hash: initialFolderHash } }), + }, + '_project_summary.json': JSON.stringify({ projectOverview: 'overview', meta: { hash: hash(`src:${initialFolderHash}`) } }), + }, + }, + }, + }); + + // Change only file2 + await fsAsync.writeFile(path.join(MOCK_REPO_ROOT, 'src/file2.ts'), file2NewContent); + + await builder.buildIndexDocsInternal(); + + // Verify only file2 summary was regenerated (not file1) + expect(generateFileSummaryStub.calledOnce, 'generateFileSummary should be called once for changed file2').to.be.true; + + // Verify file2 hash was updated + const file2Summary = JSON.parse(await fsAsync.readFile(path.join(MOCK_REPO_ROOT, typedaiDirName, 'docs/src/file2.ts.json'), 'utf-8')); + expect(file2Summary.meta.hash).to.equal(hash(file2NewContent)); + + // Verify file1 hash is unchanged + const file1Summary = JSON.parse(await fsAsync.readFile(path.join(MOCK_REPO_ROOT, typedaiDirName, 'docs/src/file1.ts.json'), 'utf-8')); + expect(file1Summary.meta.hash).to.equal(file1Hash); + + // Verify folder summary was regenerated due to child change + expect(generateFolderSummaryStub.called, 'generateFolderSummary should be called due to child change').to.be.true; + + // Verify project summary was regenerated + expect(llm.generateText.calledOnce, 'Project summary should be regenerated').to.be.true; + }); + + it('should handle nested folder incremental updates correctly', async () => { + const file1Content = 'file1 content'; + const file2OldContent = 'nested file old'; + const file2NewContent = 'nested file new'; + const aiConfig = [{ indexDocs: ['src/**/*.ts'] }]; + + const file1Hash = hash(file1Content); + const file2OldHash = hash(file2OldContent); + const nestedFolderHash = hash(`src/nested/file2.ts:${file2OldHash}`); + const parentFolderHash = hash(`src/file1.ts:${file1Hash},src/nested:${nestedFolderHash}`); + + setupMockFs({ + [MOCK_REPO_ROOT]: { + [AI_INFO_FILENAME]: JSON.stringify(aiConfig), + src: { + 'file1.ts': file1Content, + nested: { + 'file2.ts': file2OldContent, + }, + }, + [typedaiDirName]: { + docs: { + src: { + 'file1.ts.json': JSON.stringify({ path: 'src/file1.ts', short: 's1', long: 'l1', meta: { hash: file1Hash } }), + nested: { + 'file2.ts.json': JSON.stringify({ path: 'src/nested/file2.ts', short: 's2', long: 'l2', meta: { hash: file2OldHash } }), + '_index.json': JSON.stringify({ path: 'src/nested', short: 'nested', long: 'nested', meta: { hash: nestedFolderHash } }), + }, + '_index.json': JSON.stringify({ path: 'src', short: 'src', long: 'src', meta: { hash: parentFolderHash } }), + }, + '_project_summary.json': JSON.stringify({ projectOverview: 'overview', meta: { hash: hash(`src:${parentFolderHash}`) } }), + }, + }, + }, + }); + + // Change only the nested file + await fsAsync.writeFile(path.join(MOCK_REPO_ROOT, 'src/nested/file2.ts'), file2NewContent); + + await builder.buildIndexDocsInternal(); + + // Verify only the nested file summary was regenerated + expect(generateFileSummaryStub.calledOnce, 'generateFileSummary should be called once for changed nested file').to.be.true; + + // Verify file2 hash was updated + const file2Summary = JSON.parse(await fsAsync.readFile(path.join(MOCK_REPO_ROOT, typedaiDirName, 'docs/src/nested/file2.ts.json'), 'utf-8')); + expect(file2Summary.meta.hash).to.equal(hash(file2NewContent)); + + // Verify both nested folder and parent folder summaries were regenerated + // generateFolderSummaryStub should be called at least twice (nested + src) + expect(generateFolderSummaryStub.callCount).to.be.at.least(2, 'Both nested and parent folder summaries should be regenerated'); + + // Verify project summary was regenerated + expect(llm.generateText.calledOnce, 'Project summary should be regenerated').to.be.true; + + // Reset and verify stability on second run + generateFileSummaryStub.resetHistory(); + generateFolderSummaryStub.resetHistory(); + llm.generateText.resetHistory(); + + await builder.buildIndexDocsInternal(); + + expect(generateFileSummaryStub.called, 'No file summaries should be regenerated on stable run').to.be.false; + expect(generateFolderSummaryStub.called, 'No folder summaries should be regenerated on stable run').to.be.false; + expect(llm.generateText.called, 'No project summary should be regenerated on stable run').to.be.false; + }); + it('should handle missing AI_INFO_FILENAME gracefully', async () => { setupMockFs({ [MOCK_REPO_ROOT]: { diff --git a/src/swe/index/repoIndexDocBuilder.ts b/src/swe/index/repoIndexDocBuilder.ts index d11f2644..c0291a01 100644 --- a/src/swe/index/repoIndexDocBuilder.ts +++ b/src/swe/index/repoIndexDocBuilder.ts @@ -154,22 +154,24 @@ export class IndexDocBuilder { const currentContentHash = hash(fileContents); - try { - const summaryFileContent = await this.fss.readFile(summaryFilePath); - const existingSummary: Summary = JSON.parse(summaryFileContent); - if (existingSummary.meta?.hash === currentContentHash) { - logger.debug(`Summary for ${relativeFilePath} is up to date (hash match).`); - return; - } - logger.info(`Content hash mismatch for ${relativeFilePath}. Regenerating summary.`); - } catch (e: any) { - if (e.code === 'ENOENT') { - logger.debug(`Summary file ${summaryFilePath} not found. Generating new summary.`); - } else if (e instanceof SyntaxError) { - logger.warn(`Error parsing existing summary file ${summaryFilePath}: ${errorToString(e)}. Regenerating summary.`); - } else { - logger.warn(`Error reading summary file ${summaryFilePath}: ${errorToString(e)}. Proceeding to generate summary.`); + if (await this.fss.fileExists(summaryFilePath)) { + try { + const summaryFileContent = await this.fss.readFile(summaryFilePath); + const existingSummary: Summary = JSON.parse(summaryFileContent); + if (existingSummary.meta?.hash === currentContentHash) { + logger.debug(`Summary for ${relativeFilePath} is up to date (hash match).`); + return; + } + logger.info(`Content hash mismatch for ${relativeFilePath}. Regenerating summary.`); + } catch (e: any) { + if (e instanceof SyntaxError) { + logger.warn(`Error parsing existing summary file ${summaryFilePath}: ${errorToString(e)}. Regenerating summary.`); + } else { + logger.warn(`Error reading summary file ${summaryFilePath}: ${errorToString(e)}. Proceeding to generate summary.`); + } } + } else { + logger.debug(`Summary file ${summaryFilePath} not found. Generating new summary.`); } const parentSummaries = await this.getParentSummaries(dirname(filePath)); @@ -179,7 +181,7 @@ export class IndexDocBuilder { // fss.writeFile is expected to handle recursive directory creation. await this.fss.writeFile(summaryFilePath, JSON.stringify(doc, null, 2)); - logger.info(`Completed summary for ${relativeFilePath}`); + logger.debug(`Completed summary for ${relativeFilePath}`); } async processFilesInFolder(folderPath: string, fileMatchesIndexDocs: (filePath: string) => boolean): Promise { @@ -230,15 +232,20 @@ export class IndexDocBuilder { await withActiveSpan('processFolderRecursively', async (span: Span) => { try { const subFolders = await this.fss.listFolders(folderPath); - for (const subFolder of subFolders) { - const subFolderPath = path.join(folderPath, subFolder); - const relativeSubFolderPath = path.relative(this.fss.getWorkingDirectory(), subFolderPath); - if (folderMatchesIndexDocs(relativeSubFolderPath)) { - await this.processFolderRecursively(subFolderPath, fileMatchesIndexDocs, folderMatchesIndexDocs); - } else { - logger.debug(`Skipping folder ${subFolderPath} as it does not match any indexDocs patterns`); - } - } + + // Process all subfolders in parallel for maximum efficiency + await Promise.all( + subFolders.map(async (subFolder) => { + const subFolderPath = path.join(folderPath, subFolder); + const relativeSubFolderPath = path.relative(this.fss.getWorkingDirectory(), subFolderPath); + if (folderMatchesIndexDocs(relativeSubFolderPath)) { + await this.processFolderRecursively(subFolderPath, fileMatchesIndexDocs, folderMatchesIndexDocs); + } else { + logger.debug(`Skipping folder ${subFolderPath} as it does not match any indexDocs patterns`); + } + }), + ); + await this.processFilesInFolder(folderPath, fileMatchesIndexDocs); await this.buildFolderSummary(folderPath, fileMatchesIndexDocs, folderMatchesIndexDocs); } catch (error) { @@ -282,22 +289,24 @@ export class IndexDocBuilder { .join(','); const currentChildrensCombinedHash = hash(childrenHashes); - try { - const existingSummaryContent = await this.fss.readFile(folderSummaryFilePath); - const existingSummary: Summary = JSON.parse(existingSummaryContent); - if (existingSummary.meta?.hash === currentChildrensCombinedHash) { - logger.debug(`Folder summary for ${relativeFolderPath} is up to date (hash match).`); - return; - } - logger.info(`Children hash mismatch for folder ${relativeFolderPath}. Regenerating summary.`); - } catch (e: any) { - if (e.code === 'ENOENT') { - logger.debug(`Folder summary file ${folderSummaryFilePath} not found. Generating new summary.`); - } else if (e instanceof SyntaxError) { - logger.warn(`Error parsing existing folder summary file ${folderSummaryFilePath}: ${errorToString(e)}. Regenerating summary.`); - } else { - logger.warn(`Error reading folder summary file ${folderSummaryFilePath}: ${errorToString(e)}. Proceeding to generate summary.`); + if (await this.fss.fileExists(folderSummaryFilePath)) { + try { + const existingSummaryContent = await this.fss.readFile(folderSummaryFilePath); + const existingSummary: Summary = JSON.parse(existingSummaryContent); + if (existingSummary.meta?.hash === currentChildrensCombinedHash) { + logger.debug(`Folder summary for ${relativeFolderPath} is up to date (hash match).`); + return; + } + logger.info(`Children hash mismatch for folder ${relativeFolderPath}. Regenerating summary.`); + } catch (e: any) { + if (e instanceof SyntaxError) { + logger.warn(`Error parsing existing folder summary file ${folderSummaryFilePath}: ${errorToString(e)}. Regenerating summary.`); + } else { + logger.warn(`Error reading folder summary file ${folderSummaryFilePath}: ${errorToString(e)}. Proceeding to generate summary.`); + } } + } else { + logger.debug(`Folder summary file ${folderSummaryFilePath} not found. Generating new summary.`); } try { @@ -352,16 +361,18 @@ export class IndexDocBuilder { if (folderMatchesIndexDocs(relativeSubFolderPath)) { const summaryPath = join(typedaiDirName, 'docs', relativeSubFolderPath, '_index.json'); - try { - const summaryContent = await this.fss.readFile(summaryPath); - const summary = JSON.parse(summaryContent); - if (summary.meta?.hash) { - summaries.push(summary); - } else { - logger.warn(`Folder summary for ${relativeSubFolderPath} at ${summaryPath} is missing a hash. Skipping for parent hash calculation.`); + if (await this.fss.fileExists(summaryPath)) { + try { + const summaryContent = await this.fss.readFile(summaryPath); + const summary = JSON.parse(summaryContent); + if (summary.meta?.hash) { + summaries.push(summary); + } else { + logger.warn(`Folder summary for ${relativeSubFolderPath} at ${summaryPath} is missing a hash. Skipping for parent hash calculation.`); + } + } catch (e: any) { + logger.warn(`Failed to read summary for subfolder ${subFolderName} at ${summaryPath}: ${errorToString(e)}`); } - } catch (e: any) { - if (e.code !== 'ENOENT') logger.warn(`Failed to read summary for subfolder ${subFolderName} at ${summaryPath}: ${errorToString(e)}`); } } } @@ -459,16 +470,17 @@ export class IndexDocBuilder { async getTopLevelSummaryInternal(): Promise { const summaryPath = join(typedaiDirName, 'docs', '_project_summary.json'); - try { - const fileContent = await this.fss.readFile(summaryPath); - const doc: ProjectSummaryDoc = JSON.parse(fileContent); - return doc.projectOverview || ''; - } catch (e: any) { - if (e.code === 'ENOENT') { - logger.debug(`Top-level project summary file ${summaryPath} not found.`); - } else { + if (await this.fss.fileExists(summaryPath)) { + try { + const fileContent = await this.fss.readFile(summaryPath); + const doc: ProjectSummaryDoc = JSON.parse(fileContent); + return doc.projectOverview || ''; + } catch (e: any) { logger.debug(`Error reading or parsing top-level project summary ${summaryPath}: ${errorToString(e)}`); + return null; } + } else { + logger.debug(`Top-level project summary file ${summaryPath} not found.`); return null; } } @@ -481,12 +493,16 @@ export class IndexDocBuilder { while (currentPath !== '.' && path.relative(cwd, currentPath) !== '') { const relativeCurrentPath = path.relative(cwd, currentPath); const summaryPath = join(typedaiDirName, 'docs', relativeCurrentPath, '_index.json'); - try { - const summaryContent = await this.fss.readFile(summaryPath); - parentSummaries.unshift(JSON.parse(summaryContent)); - } catch (e: any) { - if (e.code === 'ENOENT') break; - logger.warn(`Failed to read parent summary for ${currentPath} at ${summaryPath}: ${errorToString(e)}`); + if (await this.fss.fileExists(summaryPath)) { + try { + const summaryContent = await this.fss.readFile(summaryPath); + parentSummaries.unshift(JSON.parse(summaryContent)); + } catch (e: any) { + logger.warn(`Failed to read parent summary for ${currentPath} at ${summaryPath}: ${errorToString(e)}`); + break; + } + } else { + // No parent summary found, stop walking up the directory tree break; } currentPath = dirname(currentPath); diff --git a/src/swe/projectDetection.ts b/src/swe/projectDetection.ts index c17496a0..28a7872f 100644 --- a/src/swe/projectDetection.ts +++ b/src/swe/projectDetection.ts @@ -22,6 +22,7 @@ export function setProjectDetectionAgent(fn: ProjectDetectionAgentFn): void { export type ScriptCommand = string | string[]; export const AI_INFO_FILENAME = '.typedai.json'; +export const MINIMAL_AI_INFO = '[{"baseUrl":"."}]'; /** * Interface for the data structure stored in the .typedai.json file.