Skip to content

Commit f880f44

Browse files
authored
prettier: enforce semi colons, add trailingComma (vercel#529)
1 parent 2470658 commit f880f44

File tree

128 files changed

+2828
-2822
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

128 files changed

+2828
-2822
lines changed

.changeset/popular-badgers-applaud.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22
'ai': patch
33
---
44

5-
ai/react: fix: handle partial chunks in react getStreamedResponse
5+
ai/react: fix: handle partial chunks in react getStreamedResponse when using experimental_StreamData

.eslintrc.js

+4-4
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ module.exports = {
44
extends: ['vercel-ai'],
55
settings: {
66
next: {
7-
rootDir: ['apps/*/']
8-
}
9-
}
10-
}
7+
rootDir: ['apps/*/'],
8+
},
9+
},
10+
};

docs/pages/docs/api-reference/ai-stream.mdx

+7-7
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
title: AIStream
33
---
44

5-
import { OptionTable } from '@/components/table'
5+
import { OptionTable } from '@/components/table';
66

77
# AIStream
88

@@ -32,7 +32,7 @@ This is an optional parameter which is an object that contains callback function
3232

3333
```tsx
3434
interface AIStreamParser {
35-
(data: string): string | void
35+
(data: string): string | void;
3636
}
3737
```
3838

@@ -45,23 +45,23 @@ This is an object that contains the following properties:
4545
[
4646
'onStart',
4747
'() => Promise<void>',
48-
'An optional function that is called at the start of the stream processing.'
48+
'An optional function that is called at the start of the stream processing.',
4949
],
5050
[
5151
'onCompletion',
5252
'(completion: string) => Promise<void>',
53-
"An optional function that is called for every completion. It's passed the completion as a string."
53+
"An optional function that is called for every completion. It's passed the completion as a string.",
5454
],
5555
[
5656
'onFinal',
5757
'(completion: string) => Promise<void>',
58-
"An optional function that is called once for every request. It's passed the completion as a string. Differs from onCompletion when function calls are present."
58+
"An optional function that is called once for every request. It's passed the completion as a string. Differs from onCompletion when function calls are present.",
5959
],
6060
[
6161
'onToken',
6262
'(token: string) => Promise<void>',
63-
"An optional function that is called for each token in the stream. It's passed the token as a string."
64-
]
63+
"An optional function that is called for each token in the stream. It's passed the token as a string.",
64+
],
6565
]}
6666
/>
6767

docs/pages/docs/api-reference/anthropic-stream.mdx

+9-9
Original file line numberDiff line numberDiff line change
@@ -23,33 +23,33 @@ The `AnthropicStream` function can be coupled with a `fetch` call to the Anthrop
2323
Here's a step-by-step example of how to implement this in Next.js:
2424

2525
```js filename="app/api/completion/route.ts"
26-
import { AnthropicStream, StreamingTextResponse } from 'ai'
26+
import { AnthropicStream, StreamingTextResponse } from 'ai';
2727

28-
export const runtime = 'edge'
28+
export const runtime = 'edge';
2929

3030
export async function POST(req: Request) {
31-
const { prompt } = await req.json()
31+
const { prompt } = await req.json();
3232

3333
const response = await fetch('https://api.anthropic.com/v1/complete', {
3434
method: 'POST',
3535
headers: {
3636
'Content-Type': 'application/json',
37-
'x-api-key': process.env.ANTHROPIC_API_KEY
37+
'x-api-key': process.env.ANTHROPIC_API_KEY,
3838
},
3939
body: JSON.stringify({
4040
prompt,
4141
model: 'claude-v1',
4242
max_tokens_to_sample: 300,
4343
temperature: 0.9,
44-
stream: true
45-
})
46-
})
44+
stream: true,
45+
}),
46+
});
4747

4848
// Convert the response into a friendly text-stream
49-
const stream = AnthropicStream(response)
49+
const stream = AnthropicStream(response);
5050

5151
// Respond with the stream
52-
return new StreamingTextResponse(stream)
52+
return new StreamingTextResponse(stream);
5353
}
5454
```
5555

docs/pages/docs/api-reference/huggingface-stream.mdx

+10-10
Original file line numberDiff line numberDiff line change
@@ -37,15 +37,15 @@ The `HuggingFaceStream` function can be coupled with the Hugging Face Inference
3737
Here's a step-by-step example of how to implement `HuggingFaceStream`:
3838

3939
```tsx filename="app/api/completion/route.ts"
40-
import { HfInference } from '@huggingface/inference'
41-
import { HuggingFaceStream, StreamingTextResponse } from 'ai'
40+
import { HfInference } from '@huggingface/inference';
41+
import { HuggingFaceStream, StreamingTextResponse } from 'ai';
4242

43-
export const runtime = 'edge'
43+
export const runtime = 'edge';
4444

45-
const Hf = new HfInference(process.env.HUGGINGFACE_API_KEY)
45+
const Hf = new HfInference(process.env.HUGGINGFACE_API_KEY);
4646

4747
export async function POST(req: Request) {
48-
const { prompt } = await req.json()
48+
const { prompt } = await req.json();
4949

5050
// Initialize a text generation stream using Hugging Face Inference SDK
5151
const iter = await Hf.textGenerationStream({
@@ -55,15 +55,15 @@ export async function POST(req: Request) {
5555
max_new_tokens: 200,
5656
temperature: 0.5,
5757
repetition_penalty: 1,
58-
return_full_text: false
59-
}
60-
})
58+
return_full_text: false,
59+
},
60+
});
6161

6262
// Convert the async generator into a readable stream
63-
const stream = HuggingFaceStream(iter)
63+
const stream = HuggingFaceStream(iter);
6464

6565
// Return a StreamingTextResponse, enabling the client to consume the response
66-
return new StreamingTextResponse(stream)
66+
return new StreamingTextResponse(stream);
6767
}
6868
```
6969

docs/pages/docs/api-reference/langchain-stream.mdx

+12-12
Original file line numberDiff line numberDiff line change
@@ -29,33 +29,33 @@ The `LangChainCallbacks` object has properties which are compatible with [LangCh
2929
## Example
3030

3131
```tsx filename="app/api/chat/route.ts"
32-
import { StreamingTextResponse, LangChainStream } from 'ai'
33-
import { ChatOpenAI } from 'langchain/chat_models/openai'
34-
import { AIMessage, HumanMessage } from 'langchain/schema'
32+
import { StreamingTextResponse, LangChainStream } from 'ai';
33+
import { ChatOpenAI } from 'langchain/chat_models/openai';
34+
import { AIMessage, HumanMessage } from 'langchain/schema';
3535

36-
export const runtime = 'edge'
36+
export const runtime = 'edge';
3737

3838
export async function POST(req: Request) {
39-
const { messages } = await req.json()
40-
const { stream, handlers, writer } = LangChainStream()
39+
const { messages } = await req.json();
40+
const { stream, handlers, writer } = LangChainStream();
4141

4242
const llm = new ChatOpenAI({
43-
streaming: true
44-
})
43+
streaming: true,
44+
});
4545

4646
llm
4747
.call(
4848
messages.map(m =>
4949
m.role == 'user'
5050
? new HumanMessage(m.content)
51-
: new AIMessage(m.content)
51+
: new AIMessage(m.content),
5252
),
5353
{},
54-
[handlers]
54+
[handlers],
5555
)
56-
.catch(console.error)
56+
.catch(console.error);
5757

58-
return new StreamingTextResponse(stream)
58+
return new StreamingTextResponse(stream);
5959
}
6060
```
6161

docs/pages/docs/api-reference/openai-stream.mdx

+20-20
Original file line numberDiff line numberDiff line change
@@ -30,58 +30,58 @@ Below are some examples of how to use `OpenAIStream` with chat and completion mo
3030
### Chat Model Example
3131

3232
```tsx
33-
import OpenAI from 'openai'
34-
import { OpenAIStream, StreamingTextResponse } from 'ai'
33+
import OpenAI from 'openai';
34+
import { OpenAIStream, StreamingTextResponse } from 'ai';
3535

3636
const openai = new OpenAI({
37-
apiKey: process.env.OPENAI_API_KEY
38-
})
37+
apiKey: process.env.OPENAI_API_KEY,
38+
});
3939

40-
export const runtime = 'edge'
40+
export const runtime = 'edge';
4141

4242
export async function POST(req: Request) {
43-
const { messages } = await req.json()
43+
const { messages } = await req.json();
4444
// Create a chat completion using OpenAI
4545
const response = await openai.chat.completions.create({
4646
model: 'gpt-4',
4747
stream: true,
48-
messages
49-
})
48+
messages,
49+
});
5050

5151
// Transform the response into a readable stream
52-
const stream = OpenAIStream(response)
52+
const stream = OpenAIStream(response);
5353

5454
// Return a StreamingTextResponse, which can be consumed by the client
55-
return new StreamingTextResponse(stream)
55+
return new StreamingTextResponse(stream);
5656
}
5757
```
5858

5959
### Completion Model Example
6060

6161
```tsx
62-
import OpenAI from 'openai'
63-
import { OpenAIStream, StreamingTextResponse } from 'ai'
62+
import OpenAI from 'openai';
63+
import { OpenAIStream, StreamingTextResponse } from 'ai';
6464

6565
const openai = new OpenAI({
66-
apiKey: process.env.OPENAI_API_KEY
67-
})
66+
apiKey: process.env.OPENAI_API_KEY,
67+
});
6868

69-
export const runtime = 'edge'
69+
export const runtime = 'edge';
7070

7171
export async function POST(req: Request) {
72-
const { prompt } = await req.json()
72+
const { prompt } = await req.json();
7373
// Create a completion using OpenAI
7474
const response = await openai.completions.create({
7575
model: 'text-davinci-003',
7676
stream: true,
77-
prompt
78-
})
77+
prompt,
78+
});
7979

8080
// Transform the response into a readable stream
81-
const stream = OpenAIStream(response)
81+
const stream = OpenAIStream(response);
8282

8383
// Return a StreamingTextResponse, which can be consumed by the client
84-
return new StreamingTextResponse(stream)
84+
return new StreamingTextResponse(stream);
8585
}
8686
```
8787

docs/pages/docs/api-reference/prompts.mdx

+13-13
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
title: ai/prompts
33
---
44

5-
import { Callout } from 'nextra-theme-docs'
5+
import { Callout } from 'nextra-theme-docs';
66

77
# `ai/prompts`
88

@@ -19,39 +19,39 @@ The `ai/prompts` module contains functions to assist with converting `Message`'s
1919
Uses `<|prompter|>`, `<|endoftext|>`, and `<|assistant>` tokens. If a `Message` with an unsupported `role` is passed, an error will be thrown.
2020

2121
```ts filename="route.ts" {6}
22-
import { experimental_buildOpenAssistantPrompt } from 'ai/prompts'
22+
import { experimental_buildOpenAssistantPrompt } from 'ai/prompts';
2323

24-
const { messages } = await req.json()
24+
const { messages } = await req.json();
2525
const response = Hf.textGenerationStream({
2626
model: 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
27-
inputs: experimental_buildOpenAssistantPrompt(messages)
28-
})
27+
inputs: experimental_buildOpenAssistantPrompt(messages),
28+
});
2929
```
3030

3131
## `experimental_buildStarChatBetaPrompt`
3232

3333
Uses `<|user|>`, `<|end|>`, `<|system|>`, and `<|assistant>` tokens. If a `Message` with an unsupported `role` is passed, an error will be thrown.
3434

3535
```ts filename="route.ts" {6}
36-
import { experimental_buildStarChatBetaPrompt } from 'ai/prompts'
36+
import { experimental_buildStarChatBetaPrompt } from 'ai/prompts';
3737

38-
const { messages } = await req.json()
38+
const { messages } = await req.json();
3939
const response = Hf.textGenerationStream({
4040
model: 'HuggingFaceH4/starchat-beta',
41-
inputs: experimental_buildStarChatBetaPrompt(messages)
42-
})
41+
inputs: experimental_buildStarChatBetaPrompt(messages),
42+
});
4343
```
4444

4545
## `experimental_buildLlama2Prompt`
4646

4747
Uses LLama 2 chat tokens (`[INST]`) to create a prompt, learn more in the [Hugging Face Blog on how to prompt Llama 2](https://huggingface.co/blog/llama2#how-to-prompt-llama-2). If a `Message` with an unsupported `role` is passed, an error will be thrown.
4848

4949
```ts filename="route.ts" {6}
50-
import { experimental_buildLlama2Prompt } from 'ai/prompts'
50+
import { experimental_buildLlama2Prompt } from 'ai/prompts';
5151

52-
const { messages } = await req.json()
52+
const { messages } = await req.json();
5353
const response = Hf.textGenerationStream({
5454
model: 'meta-llama/Llama-2-7b-chat-hf',
55-
inputs: experimental_buildLlama2Prompt(messages)
56-
})
55+
inputs: experimental_buildLlama2Prompt(messages),
56+
});
5757
```

0 commit comments

Comments
 (0)