diff --git a/docs/adapters/anthropic.md b/docs/adapters/anthropic.md index ec844602..15fd179f 100644 --- a/docs/adapters/anthropic.md +++ b/docs/adapters/anthropic.md @@ -56,7 +56,7 @@ const adapter = createAnthropicChat(process.env.ANTHROPIC_API_KEY!, config); ## Example: Chat Completion ```typescript -import { chat, toStreamResponse } from "@tanstack/ai"; +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; import { anthropicText } from "@tanstack/ai-anthropic"; export async function POST(request: Request) { @@ -67,7 +67,7 @@ export async function POST(request: Request) { messages, }); - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } ``` diff --git a/docs/adapters/gemini.md b/docs/adapters/gemini.md index 24e390e3..53cb479d 100644 --- a/docs/adapters/gemini.md +++ b/docs/adapters/gemini.md @@ -56,7 +56,7 @@ const adapter = createGeminiChat(process.env.GEMINI_API_KEY!, config); ## Example: Chat Completion ```typescript -import { chat, toStreamResponse } from "@tanstack/ai"; +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; import { geminiText } from "@tanstack/ai-gemini"; export async function POST(request: Request) { @@ -67,7 +67,7 @@ export async function POST(request: Request) { messages, }); - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } ``` diff --git a/docs/adapters/ollama.md b/docs/adapters/ollama.md index d17a9985..59091be0 100644 --- a/docs/adapters/ollama.md +++ b/docs/adapters/ollama.md @@ -72,7 +72,7 @@ ollama list ## Example: Chat Completion ```typescript -import { chat, toStreamResponse } from "@tanstack/ai"; +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; import { ollamaText } from "@tanstack/ai-ollama"; export async function POST(request: Request) { @@ -83,7 +83,7 @@ export async function POST(request: Request) { messages, }); - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } ``` diff --git a/docs/adapters/openai.md b/docs/adapters/openai.md index f4c999b2..80aa9105 100644 --- a/docs/adapters/openai.md +++ b/docs/adapters/openai.md @@ -56,7 +56,7 @@ const adapter = createOpenaiChat(process.env.OPENAI_API_KEY!, config); ## Example: Chat Completion ```typescript -import { chat, toStreamResponse } from "@tanstack/ai"; +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; import { openaiText } from "@tanstack/ai-openai"; export async function POST(request: Request) { @@ -67,7 +67,7 @@ export async function POST(request: Request) { messages, }); - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } ``` diff --git a/docs/api/ai.md b/docs/api/ai.md index 626fa047..aec45afa 100644 --- a/docs/api/ai.md +++ b/docs/api/ai.md @@ -158,19 +158,19 @@ A `ReadableStream` in Server-Sent Events format. Each chunk is: - Followed by `"\n\n"` - Stream ends with `"data: [DONE]\n\n"` -## `toStreamResponse(stream, init?)` +## `toServerSentEventsResponse(stream, init?)` Converts a stream to an HTTP Response with proper SSE headers. ```typescript -import { chat, toStreamResponse } from "@tanstack/ai"; +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; import { openaiText } from "@tanstack/ai-openai"; const stream = chat({ adapter: openaiText("gpt-4o"), messages: [...], }); -return toStreamResponse(stream); +return toServerSentEventsResponse(stream); ``` ### Parameters diff --git a/docs/config.json b/docs/config.json index 375489c0..749434d5 100644 --- a/docs/config.json +++ b/docs/config.json @@ -255,8 +255,8 @@ "to": "reference/functions/toServerSentEventsStream" }, { - "label": "toStreamResponse", - "to": "reference/functions/toStreamResponse" + "label": "toServerSentEventsResponse", + "to": "reference/functions/toServerSentEventsResponse" }, { "label": "toolDefinition", diff --git a/docs/getting-started/quick-start.md b/docs/getting-started/quick-start.md index 00ee9ead..c50bbde3 100644 --- a/docs/getting-started/quick-start.md +++ b/docs/getting-started/quick-start.md @@ -23,7 +23,7 @@ First, create an API route that handles chat requests. Here's a simplified examp ### TanStack Start ```typescript -import { chat, toStreamResponse } from "@tanstack/ai"; +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; import { openai } from "@tanstack/ai-openai"; import { createFileRoute } from "@tanstack/react-router"; @@ -56,7 +56,7 @@ export const Route = createFileRoute("/api/chat")({ }); // Convert stream to HTTP response - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } catch (error) { return new Response( JSON.stringify({ @@ -78,7 +78,7 @@ export const Route = createFileRoute("/api/chat")({ ### Next.js ```typescript -import { chat, toStreamResponse } from "@tanstack/ai"; +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; import { openaiText } from "@tanstack/ai-openai"; export async function POST(request: Request) { @@ -106,7 +106,7 @@ export async function POST(request: Request) { }); // Convert stream to HTTP response - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } catch (error) { return new Response( JSON.stringify({ diff --git a/docs/guides/agentic-cycle.md b/docs/guides/agentic-cycle.md index 20aa2e08..5465d295 100644 --- a/docs/guides/agentic-cycle.md +++ b/docs/guides/agentic-cycle.md @@ -128,7 +128,7 @@ export async function POST(request: Request) { tools: [getWeather, getClothingAdvice], }); - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } ``` diff --git a/docs/guides/connection-adapters.md b/docs/guides/connection-adapters.md index 7f312463..0d0e763a 100644 --- a/docs/guides/connection-adapters.md +++ b/docs/guides/connection-adapters.md @@ -11,7 +11,7 @@ Connection adapters handle the communication between your client and server for ### Server-Sent Events (SSE) -SSE is the recommended adapter for most use cases. It provides reliable streaming with automatic reconnection. On the server side, use [`toServerSentEventsStream()`](../api/ai#toserversenteventsstreamstream-abortcontroller) or [`toStreamResponse()`](../api/ai#tostreamresponsestream-init) to convert your chat stream to SSE format. +SSE is the recommended adapter for most use cases. It provides reliable streaming with automatic reconnection. On the server side, use [`toServerSentEventsStream()`](../api/ai#toserversenteventsstreamstream-abortcontroller) or [`toServerSentEventsResponse()`](../api/ai#toserversenteventsresponsestream-init) to convert your chat stream to SSE format. ```typescript import { useChat, fetchServerSentEvents } from "@tanstack/ai-react"; diff --git a/docs/guides/runtime-adapter-switching.md b/docs/guides/runtime-adapter-switching.md index 7efdbec6..e8a1b409 100644 --- a/docs/guides/runtime-adapter-switching.md +++ b/docs/guides/runtime-adapter-switching.md @@ -13,7 +13,7 @@ Learn how to build interfaces where users can switch between LLM providers at ru With TanStack AI, the model is passed directly to the adapter factory function. This gives you full type safety and autocomplete at the point of definition: ```typescript -import { chat, toStreamResponse } from '@tanstack/ai' +import { chat, toServerSentEventsResponse } from '@tanstack/ai' import { anthropicText } from '@tanstack/ai-anthropic' import { openaiText } from '@tanstack/ai-openai' @@ -59,7 +59,7 @@ Here's a complete example showing a multi-provider chat API: ```typescript import { createFileRoute } from '@tanstack/react-router' -import { chat, maxIterations, toStreamResponse } from '@tanstack/ai' +import { chat, maxIterations, toServerSentEventsResponse } from '@tanstack/ai' import { openaiText } from '@tanstack/ai-openai' import { anthropicText } from '@tanstack/ai-anthropic' import { geminiText } from '@tanstack/ai-gemini' @@ -93,7 +93,7 @@ export const Route = createFileRoute('/api/chat')({ abortController, }) - return toStreamResponse(stream, { abortController }) + return toServerSentEventsResponse(stream, { abortController }) }, }, }, diff --git a/docs/guides/server-tools.md b/docs/guides/server-tools.md index aabbd833..75f4d7cc 100644 --- a/docs/guides/server-tools.md +++ b/docs/guides/server-tools.md @@ -141,7 +141,7 @@ const searchProducts = searchProductsDef.server(async ({ query, limit = 10 }) => Pass tools to the `chat` function: ```typescript -import { chat, toStreamResponse } from "@tanstack/ai"; +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; import { openaiText } from "@tanstack/ai-openai"; import { getUserData, searchProducts } from "./tools"; @@ -154,7 +154,7 @@ export async function POST(request: Request) { tools: [getUserData, searchProducts], }); - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } ``` diff --git a/docs/guides/streaming.md b/docs/guides/streaming.md index 47581e7a..0c4e4eb1 100644 --- a/docs/guides/streaming.md +++ b/docs/guides/streaming.md @@ -27,10 +27,10 @@ for await (const chunk of stream) { ## Server-Side Streaming -Convert the stream to an HTTP response using `toStreamResponse`: +Convert the stream to an HTTP response using `toServerSentEventsResponse`: ```typescript -import { chat, toStreamResponse } from "@tanstack/ai"; +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; import { openaiText } from "@tanstack/ai-openai"; export async function POST(request: Request) { @@ -42,7 +42,7 @@ export async function POST(request: Request) { }); // Convert to HTTP response with proper headers - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } ``` diff --git a/docs/guides/tool-approval.md b/docs/guides/tool-approval.md index bcdff34c..6a194d1a 100644 --- a/docs/guides/tool-approval.md +++ b/docs/guides/tool-approval.md @@ -57,7 +57,7 @@ const sendEmail = sendEmailDef.server(async ({ to, subject, body }) => { On the server, tools with `needsApproval: true` will pause execution and wait for approval: ```typescript -import { chat, toStreamResponse } from "@tanstack/ai"; +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; import { openaiText } from "@tanstack/ai-openai"; import { sendEmail } from "./tools"; @@ -70,7 +70,7 @@ export async function POST(request: Request) { tools: [sendEmail], }); - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } ``` diff --git a/docs/guides/tool-architecture.md b/docs/guides/tool-architecture.md index f018d776..f8fe01da 100644 --- a/docs/guides/tool-architecture.md +++ b/docs/guides/tool-architecture.md @@ -69,7 +69,7 @@ sequenceDiagram **Server (API Route):** ```typescript -import { chat, toStreamResponse } from "@tanstack/ai"; +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; import { openaiText } from "@tanstack/ai-openai"; import { getWeather, sendEmail } from "./tools"; @@ -83,7 +83,7 @@ export async function POST(request: Request) { tools: [getWeather, sendEmail], // Tool definitions passed here }); - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } ``` diff --git a/docs/guides/tools.md b/docs/guides/tools.md index cd09d7d5..5474486a 100644 --- a/docs/guides/tools.md +++ b/docs/guides/tools.md @@ -174,7 +174,7 @@ const getWeatherServer = getWeatherDef.server(async (args) => { ### Server-Side ```typescript -import { chat, toStreamResponse } from "@tanstack/ai"; +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; import { openaiText } from "@tanstack/ai-openai"; import { getWeatherDef } from "./tools"; @@ -193,7 +193,7 @@ export async function POST(request: Request) { tools: [getWeather], // Pass server tools }); - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } ``` diff --git a/docs/protocol/sse-protocol.md b/docs/protocol/sse-protocol.md index f8709d4c..b7d1f3c7 100644 --- a/docs/protocol/sse-protocol.md +++ b/docs/protocol/sse-protocol.md @@ -164,10 +164,10 @@ SSE provides automatic reconnection: ### Server-Side (Node.js/TypeScript) -TanStack AI provides `toServerSentEventsStream()` and `toStreamResponse()` utilities: +TanStack AI provides `toServerSentEventsStream()` and `toServerSentEventsResponse()` utilities: ```typescript -import { chat, toStreamResponse } from '@tanstack/ai'; +import { chat, toServerSentEventsResponse } from '@tanstack/ai'; import { openaiText } from '@tanstack/ai-openai'; export async function POST(request: Request) { @@ -179,11 +179,11 @@ export async function POST(request: Request) { }); // Automatically converts StreamChunks to SSE format - return toStreamResponse(stream); + return toServerSentEventsResponse(stream); } ``` -**What `toStreamResponse()` does:** +**What `toServerSentEventsResponse()` does:** 1. Creates a `ReadableStream` from the async iterable 2. Wraps each chunk as `data: {JSON}\n\n` 3. Sends `data: [DONE]\n\n` at the end diff --git a/docs/reference/functions/toServerSentEventsResponse.md b/docs/reference/functions/toServerSentEventsResponse.md new file mode 100644 index 00000000..2094a791 --- /dev/null +++ b/docs/reference/functions/toServerSentEventsResponse.md @@ -0,0 +1,46 @@ +--- +id: toServerSentEventsResponse +title: toServerSentEventsResponse +--- + +# Function: toServerSentEventsResponse() + +```ts +function toServerSentEventsResponse(stream, init?): Response; +``` + +Defined in: [stream-to-response.ts:123](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream-to-response.ts#L123) + +Convert a StreamChunk async iterable to a Response in Server-Sent Events format + +This creates a Response that emits chunks in SSE format: +- Each chunk is prefixed with "data: " +- Each chunk is followed by "\n\n" +- Stream ends with "data: [DONE]\n\n" + +## Parameters + +### stream + +`AsyncIterable`\<[`StreamChunk`](../type-aliases/StreamChunk.md)\> + +AsyncIterable of StreamChunks from chat() + +### init? + +`ResponseInit` & `object` + +Optional Response initialization options (including `abortController`) + +## Returns + +`Response` + +Response in Server-Sent Events format + +## Example + +```typescript +const stream = chat({ adapter: openaiText(), model: "gpt-4o", messages: [...] }); +return toServerSentEventsResponse(stream, { abortController }); +``` diff --git a/docs/reference/functions/toStreamResponse.md b/docs/reference/functions/toStreamResponse.md deleted file mode 100644 index 6e8c1cc7..00000000 --- a/docs/reference/functions/toStreamResponse.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: toStreamResponse -title: toStreamResponse ---- - -# ~~Function: toStreamResponse()~~ - -```ts -function toStreamResponse(stream, init?): Response; -``` - -Defined in: [stream-to-response.ts:213](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream-to-response.ts#L213) - -Create a streaming HTTP response from a StreamChunk async iterable -Includes proper headers for Server-Sent Events - -## Parameters - -### stream - -`AsyncIterable`\<[`StreamChunk`](../type-aliases/StreamChunk.md)\> - -AsyncIterable of StreamChunks from chat() - -### init? - -`ResponseInit` & `object` - -Optional Response initialization options - -## Returns - -`Response` - -Response object with SSE headers and streaming body - -## Deprecated - -Use `toServerSentEventsStream` instead. This function will be removed in a future version. - -## Example - -```typescript -export async function POST(request: Request) { - const { messages } = await request.json(); - const abortController = new AbortController(); - const stream = chat({ - adapter: openaiText(), - model: "gpt-4o", - messages, - options: { abortSignal: abortController.signal } - }); - return toStreamResponse(stream, undefined, abortController); -} -``` diff --git a/docs/reference/index.md b/docs/reference/index.md index 106f3eaa..0666644a 100644 --- a/docs/reference/index.md +++ b/docs/reference/index.md @@ -146,6 +146,6 @@ title: "@tanstack/ai" - [toHttpStream](functions/toHttpStream.md) - [toolDefinition](functions/toolDefinition.md) - [toServerSentEventsStream](functions/toServerSentEventsStream.md) -- [~~toStreamResponse~~](functions/toStreamResponse.md) +- [toServerSentEventsResponse](functions/toServerSentEventsResponse.md) - [uiMessageToModelMessages](functions/uiMessageToModelMessages.md) - [untilFinishReason](functions/untilFinishReason.md) diff --git a/examples/README.md b/examples/README.md index 1cdc89aa..822d8ecb 100644 --- a/examples/README.md +++ b/examples/README.md @@ -308,7 +308,7 @@ All examples use SSE for real-time streaming: **Backend (TypeScript):** ```typescript -import { chat, toStreamResponse } from '@tanstack/ai' +import { chat, toServerSentEventsResponse } from '@tanstack/ai' import { openaiText } from '@tanstack/ai-openai' const stream = chat({ @@ -317,7 +317,7 @@ const stream = chat({ messages, }) -return toStreamResponse(stream) +return toServerSentEventsResponse(stream) ``` **Backend (Python):** diff --git a/examples/ts-react-chat/src/routes/api.tanchat.ts b/examples/ts-react-chat/src/routes/api.tanchat.ts index 550e373c..da5c9c26 100644 --- a/examples/ts-react-chat/src/routes/api.tanchat.ts +++ b/examples/ts-react-chat/src/routes/api.tanchat.ts @@ -3,7 +3,7 @@ import { chat, createChatOptions, maxIterations, - toServerSentEventsStream, + toServerSentEventsResponse, } from '@tanstack/ai' import { openaiText } from '@tanstack/ai-openai' import { ollamaText } from '@tanstack/ai-ollama' @@ -122,17 +122,7 @@ export const Route = createFileRoute('/api/tanchat')({ abortController, conversationId, }) - const readableStream = toServerSentEventsStream( - stream, - abortController, - ) - return new Response(readableStream, { - headers: { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - }, - }) + return toServerSentEventsResponse(stream, { abortController }) } catch (error: any) { console.error('[API Route] Error in chat request:', { message: error?.message, diff --git a/examples/ts-solid-chat/src/routes/api.chat.ts b/examples/ts-solid-chat/src/routes/api.chat.ts index 99500a21..0b73e29b 100644 --- a/examples/ts-solid-chat/src/routes/api.chat.ts +++ b/examples/ts-solid-chat/src/routes/api.chat.ts @@ -1,5 +1,5 @@ import { createFileRoute } from '@tanstack/solid-router' -import { chat, maxIterations, toServerSentEventsStream } from '@tanstack/ai' +import { chat, maxIterations, toServerSentEventsResponse } from '@tanstack/ai' import { anthropicText } from '@tanstack/ai-anthropic' import { serverTools } from '@/lib/guitar-tools' @@ -71,17 +71,7 @@ export const Route = createFileRoute('/api/chat')({ abortController, }) - const readableStream = toServerSentEventsStream( - stream, - abortController, - ) - return new Response(readableStream, { - headers: { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - }, - }) + return toServerSentEventsResponse(stream, { abortController }) } catch (error: any) { // If request was aborted, return early (don't send error response) if (error.name === 'AbortError' || abortController.signal.aborted) { diff --git a/examples/ts-svelte-chat/src/routes/api/chat/+server.ts b/examples/ts-svelte-chat/src/routes/api/chat/+server.ts index a6128809..9cd6eb88 100644 --- a/examples/ts-svelte-chat/src/routes/api/chat/+server.ts +++ b/examples/ts-svelte-chat/src/routes/api/chat/+server.ts @@ -2,7 +2,7 @@ import { chat, createChatOptions, maxIterations, - toServerSentEventsStream, + toServerSentEventsResponse, } from '@tanstack/ai' import { openaiText } from '@tanstack/ai-openai' import { ollamaText } from '@tanstack/ai-ollama' @@ -116,14 +116,7 @@ export const POST: RequestHandler = async ({ request }) => { abortController, }) - const readableStream = toServerSentEventsStream(stream, abortController) - return new Response(readableStream, { - headers: { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - }, - }) + return toServerSentEventsResponse(stream, { abortController }) } catch (error: any) { console.error('[API Route] Error in chat request:', { message: error?.message, diff --git a/packages/typescript/ai-solid/README.md b/packages/typescript/ai-solid/README.md index 2aaecb84..1cbdcf4e 100644 --- a/packages/typescript/ai-solid/README.md +++ b/packages/typescript/ai-solid/README.md @@ -157,7 +157,7 @@ Your backend should use the `chat()` method which **automatically handles tool e 2. Use `chat()` to stream responses (with automatic tool execution): ```typescript -import { chat, toStreamResponse } from '@tanstack/ai' +import { chat, toServerSentEventsResponse } from '@tanstack/ai' import { openaiText } from '@tanstack/ai-openai' export async function POST(request: Request) { @@ -172,7 +172,7 @@ export async function POST(request: Request) { }) // Convert to HTTP streaming response with SSE headers - return toStreamResponse(stream) + return toServerSentEventsResponse(stream) } ``` @@ -311,7 +311,7 @@ function App() { ```typescript import express from 'express' -import { chat, toStreamResponse } from '@tanstack/ai' +import { chat, toServerSentEventsResponse } from '@tanstack/ai' import { openaiText } from '@tanstack/ai-openai' const app = express() @@ -327,7 +327,7 @@ app.post('/api/chat', async (req, res) => { messages, }) - const response = toStreamResponse(stream) + const response = toServerSentEventsResponse(stream) // Copy headers and stream to Express response response.headers.forEach((value, key) => { @@ -352,7 +352,7 @@ app.listen(3000) ```typescript // app/api/chat/route.ts -import { chat, toStreamResponse } from '@tanstack/ai' +import { chat, toServerSentEventsResponse } from '@tanstack/ai' import { openaiText } from '@tanstack/ai-openai' export const runtime = 'edge' @@ -361,7 +361,7 @@ export async function POST(req: Request) { const { messages } = await req.json() // One line! - return toStreamResponse( + return toServerSentEventsResponse( chat({ adapter: openaiText(), model: 'gpt-4o', @@ -375,7 +375,7 @@ export async function POST(req: Request) { ```typescript import { createFileRoute } from '@tanstack/react-router' -import { chat, toStreamResponse } from '@tanstack/ai' +import { chat, toServerSentEventsResponse } from '@tanstack/ai' import { anthropicText } from '@tanstack/ai-anthropic' export const Route = createFileRoute('/api/chat')({ @@ -385,7 +385,7 @@ export const Route = createFileRoute('/api/chat')({ const { messages } = await request.json() // One line with automatic tool execution! - return toStreamResponse( + return toServerSentEventsResponse( chat({ adapter: anthropicText(), model: 'claude-sonnet-4-20250514', diff --git a/packages/typescript/ai/src/index.ts b/packages/typescript/ai/src/index.ts index 9a4de386..0476457d 100644 --- a/packages/typescript/ai/src/index.ts +++ b/packages/typescript/ai/src/index.ts @@ -55,8 +55,9 @@ export { convertSchemaToJsonSchema } from './activities/chat/tools/schema-conver export { streamToText, toServerSentEventsStream, - toStreamResponse, + toServerSentEventsResponse, toHttpStream, + toHttpResponse, } from './stream-to-response' // Tool call management diff --git a/packages/typescript/ai/src/stream-to-response.ts b/packages/typescript/ai/src/stream-to-response.ts index fe158e6f..5771eed7 100644 --- a/packages/typescript/ai/src/stream-to-response.ts +++ b/packages/typescript/ai/src/stream-to-response.ts @@ -45,13 +45,6 @@ export async function streamToText( * @param stream - AsyncIterable of StreamChunks from chat() * @param abortController - Optional AbortController to abort when stream is cancelled * @returns ReadableStream in Server-Sent Events format - * - * @example - * ```typescript - * const stream = chat({ adapter: openaiText(), model: "gpt-4o", messages: [...] }); - * const readableStream = toServerSentEventsStream(stream); - * // Use with Response, or any API that accepts ReadableStream - * ``` */ export function toServerSentEventsStream( stream: AsyncIterable, @@ -109,6 +102,52 @@ export function toServerSentEventsStream( }) } +/** + * Convert a StreamChunk async iterable to a Response in Server-Sent Events format + * + * This creates a Response that emits chunks in SSE format: + * - Each chunk is prefixed with "data: " + * - Each chunk is followed by "\n\n" + * - Stream ends with "data: [DONE]\n\n" + * + * @param stream - AsyncIterable of StreamChunks from chat() + * @param init - Optional Response initialization options (including `abortController`) + * @returns Response in Server-Sent Events format + * + * @example + * ```typescript + * const stream = chat({ adapter: openaiText(), model: "gpt-4o", messages: [...] }); + * return toServerSentEventsResponse(stream, { abortController }); + * ``` + */ +export function toServerSentEventsResponse( + stream: AsyncIterable, + init?: ResponseInit & { abortController?: AbortController }, +): Response { + const { headers, abortController, ...responseInit } = init ?? {} + + // Start with default SSE headers + const mergedHeaders = new Headers({ + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + }) + + // Override with user headers if provided, handling all HeadersInit forms: + // Headers instance, string[][], or plain object + if (headers) { + const userHeaders = new Headers(headers) + userHeaders.forEach((value, key) => { + mergedHeaders.set(key, value) + }) + } + + return new Response(toServerSentEventsStream(stream, abortController), { + ...responseInit, + headers: mergedHeaders, + }) +} + /** * Convert a StreamChunk async iterable to a ReadableStream in HTTP stream format (newline-delimited JSON) * @@ -185,53 +224,29 @@ export function toHttpStream( } /** - * Create a streaming HTTP response from a StreamChunk async iterable - * Includes proper headers for Server-Sent Events + * Convert a StreamChunk async iterable to a Response in HTTP stream format (newline-delimited JSON) * - * @deprecated Use `toServerSentEventsStream` instead. This function will be removed in a future version. + * This creates a Response that emits chunks in HTTP stream format: + * - Each chunk is JSON.stringify'd and followed by "\n" + * - No SSE formatting (no "data: " prefix) + * + * This format is compatible with `fetchHttpStream` connection adapter. * * @param stream - AsyncIterable of StreamChunks from chat() - * @param init - Optional Response initialization options - * @param abortController - Optional AbortController to abort when client disconnects - * @returns Response object with SSE headers and streaming body + * @param init - Optional Response initialization options (including `abortController`) + * @returns Response in HTTP stream format (newline-delimited JSON) * * @example * ```typescript - * export async function POST(request: Request) { - * const { messages } = await request.json(); - * const abortController = new AbortController(); - * const stream = chat({ - * adapter: openaiText(), - * model: "gpt-4o", - * messages, - * options: { abortSignal: abortController.signal } - * }); - * return toStreamResponse(stream, undefined, abortController); - * } + * const stream = chat({ adapter: openaiText(), model: "gpt-4o", messages: [...] }); + * return toHttpResponse(stream, { abortController }); * ``` */ -export function toStreamResponse( +export function toHttpResponse( stream: AsyncIterable, init?: ResponseInit & { abortController?: AbortController }, ): Response { - if (typeof console !== 'undefined') { - console.warn( - '`toStreamResponse` is deprecated. Use `toServerSentEventsStream` instead. Example:\n' + - ' const readableStream = toServerSentEventsStream(stream, abortController);\n' + - ' return new Response(readableStream, {\n' + - " headers: { 'Content-Type': 'text/event-stream', 'Cache-Control': 'no-cache', Connection: 'keep-alive' }\n" + - ' });', - ) - } - - const { headers, abortController, ...responseInit } = init ?? {} - return new Response(toServerSentEventsStream(stream, abortController), { - ...responseInit, - headers: { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - ...(headers || {}), - }, + return new Response(toHttpStream(stream, init?.abortController), { + ...init, }) } diff --git a/packages/typescript/ai/tests/stream-to-response.test.ts b/packages/typescript/ai/tests/stream-to-response.test.ts index dd4f6e4e..98b0db43 100644 --- a/packages/typescript/ai/tests/stream-to-response.test.ts +++ b/packages/typescript/ai/tests/stream-to-response.test.ts @@ -1,7 +1,7 @@ import { describe, it, expect, vi } from 'vitest' import { toServerSentEventsStream, - toStreamResponse, + toServerSentEventsResponse, } from '../src/stream-to-response' import type { StreamChunk } from '../src/types' @@ -315,7 +315,7 @@ describe('toServerSentEventsStream', () => { }) }) -describe('toStreamResponse', () => { +describe('toServerSentEventsResponse', () => { it('should create Response with SSE headers', async () => { const chunks: Array = [ { @@ -330,7 +330,7 @@ describe('toStreamResponse', () => { ] const stream = createMockStream(chunks) - const response = toStreamResponse(stream) + const response = toServerSentEventsResponse(stream) expect(response).toBeInstanceOf(Response) expect(response.headers.get('Content-Type')).toBe('text/event-stream') @@ -341,7 +341,7 @@ describe('toStreamResponse', () => { it('should allow custom headers', async () => { const chunks: Array = [] const stream = createMockStream(chunks) - const response = toStreamResponse(stream, { + const response = toServerSentEventsResponse(stream, { headers: { 'X-Custom-Header': 'custom-value', }, @@ -354,7 +354,7 @@ describe('toStreamResponse', () => { it('should merge custom headers with SSE headers', async () => { const chunks: Array = [] const stream = createMockStream(chunks) - const response = toStreamResponse(stream, { + const response = toServerSentEventsResponse(stream, { headers: { 'X-Custom-Header': 'custom-value', 'Cache-Control': 'custom-cache', @@ -381,7 +381,7 @@ describe('toStreamResponse', () => { ] const stream = createMockStream(chunks) - const response = toStreamResponse(stream, { + const response = toServerSentEventsResponse(stream, { abortController, }) @@ -399,7 +399,7 @@ describe('toStreamResponse', () => { it('should handle status and statusText', async () => { const chunks: Array = [] const stream = createMockStream(chunks) - const response = toStreamResponse(stream, { + const response = toServerSentEventsResponse(stream, { status: 201, statusText: 'Created', }) @@ -431,7 +431,7 @@ describe('toStreamResponse', () => { ] const stream = createMockStream(chunks) - const response = toStreamResponse(stream) + const response = toServerSentEventsResponse(stream) if (!response.body) { throw new Error('Response body is null') @@ -449,7 +449,7 @@ describe('toStreamResponse', () => { it('should handle undefined init parameter', async () => { const chunks: Array = [] const stream = createMockStream(chunks) - const response = toStreamResponse(stream, undefined) + const response = toServerSentEventsResponse(stream, undefined) expect(response).toBeInstanceOf(Response) expect(response.headers.get('Content-Type')).toBe('text/event-stream') @@ -458,7 +458,7 @@ describe('toStreamResponse', () => { it('should handle empty init object', async () => { const chunks: Array = [] const stream = createMockStream(chunks) - const response = toStreamResponse(stream, {}) + const response = toServerSentEventsResponse(stream, {}) expect(response).toBeInstanceOf(Response) expect(response.headers.get('Content-Type')).toBe('text/event-stream') diff --git a/testing/panel/src/routes/api.addon-chat.ts b/testing/panel/src/routes/api.addon-chat.ts index 1df624dc..e162fe22 100644 --- a/testing/panel/src/routes/api.addon-chat.ts +++ b/testing/panel/src/routes/api.addon-chat.ts @@ -1,5 +1,5 @@ import { createFileRoute } from '@tanstack/react-router' -import { chat, maxIterations, toServerSentEventsStream } from '@tanstack/ai' +import { chat, maxIterations, toServerSentEventsResponse } from '@tanstack/ai' import { openaiText } from '@tanstack/ai-openai' import { getAvailableAddOnsToolDef, @@ -67,17 +67,7 @@ export const Route = createFileRoute('/api/addon-chat')({ abortController, }) - const readableStream = toServerSentEventsStream( - stream, - abortController, - ) - return new Response(readableStream, { - headers: { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - }, - }) + return toServerSentEventsResponse(stream, { abortController }) } catch (error: any) { console.error('[API Route] Error in addon-chat request:', error) diff --git a/testing/panel/src/routes/api.chat.ts b/testing/panel/src/routes/api.chat.ts index 6063c574..948d048b 100644 --- a/testing/panel/src/routes/api.chat.ts +++ b/testing/panel/src/routes/api.chat.ts @@ -5,7 +5,7 @@ import { chat, createChatOptions, maxIterations, - toServerSentEventsStream, + toServerSentEventsResponse, } from '@tanstack/ai' import { anthropicText } from '@tanstack/ai-anthropic' import { geminiText } from '@tanstack/ai-gemini' @@ -231,17 +231,7 @@ export const Route = createFileRoute('/api/chat')({ abortController, }) - const readableStream = toServerSentEventsStream( - stream, - abortController, - ) - return new Response(readableStream, { - headers: { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - }, - }) + return toServerSentEventsResponse(stream, { abortController }) } catch (error: any) { console.error('[API Route] Error in chat request:', { message: error?.message,