Skip to content

Commit f4ec4df

Browse files
committed
Initial commit
0 parents  commit f4ec4df

File tree

271 files changed

+34589
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

271 files changed

+34589
-0
lines changed

.env.local.example

+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
# Supabase Public
2+
NEXT_PUBLIC_SUPABASE_URL=
3+
NEXT_PUBLIC_SUPABASE_ANON_KEY=
4+
5+
# Supabase Private
6+
SUPABASE_SERVICE_ROLE_KEY=
7+
8+
# Ollama
9+
NEXT_PUBLIC_OLLAMA_URL=http://localhost:11434 # Default

.eslintrc.json

+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
{
2+
"$schema": "https://json.schemastore.org/eslintrc",
3+
"root": true,
4+
"extends": [
5+
"next/core-web-vitals",
6+
"prettier",
7+
"plugin:tailwindcss/recommended"
8+
],
9+
"plugins": ["tailwindcss"],
10+
"rules": {
11+
"tailwindcss/no-custom-classname": "off"
12+
},
13+
"settings": {
14+
"tailwindcss": {
15+
"callees": ["cn", "cva"],
16+
"config": "tailwind.config.js"
17+
}
18+
},
19+
"overrides": [
20+
{
21+
"files": ["*.ts", "*.tsx"],
22+
"parser": "@typescript-eslint/parser"
23+
}
24+
]
25+
}

.github/funding.yaml

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
# If you find my open-source work helpful, please consider sponsoring me!
2+
3+
github: mckaywrigley

.gitignore

+40
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2+
3+
# dependencies
4+
/node_modules
5+
/.pnp
6+
.pnp.js
7+
.yarn/install-state.gz
8+
9+
# testing
10+
/coverage
11+
12+
# next.js
13+
/.next/
14+
/out/
15+
16+
# production
17+
/build
18+
19+
# misc
20+
.DS_Store
21+
*.pem
22+
23+
# debug
24+
npm-debug.log*
25+
yarn-debug.log*
26+
yarn-error.log*
27+
28+
# local env files
29+
.env
30+
.env*.local
31+
32+
# vercel
33+
.vercel
34+
35+
# typescript
36+
*.tsbuildinfo
37+
next-env.d.ts
38+
39+
seed.sql
40+
.VSCodeCounter

.husky/pre-commit

+4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
#!/usr/bin/env sh
2+
. "$(dirname -- "$0")/_/husky.sh"
3+
4+
npm run lint:fix && npm run format:write && git add .

README.md

+82
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
# Chatbot UI
2+
3+
The open-source AI chat app for everyone.
4+
5+
![Chatbot UI](./public/readme/screenshot.png)
6+
7+
## Demo
8+
9+
View the latest demo [here](https://twitter.com/mckaywrigley).
10+
11+
## Support
12+
13+
If you find Chatbot UI useful, please consider [sponsoring](https://github.com/mckaywrigley?tab=sponsoring) me :)
14+
15+
## Quickstart
16+
17+
### 1. Clone the repo
18+
19+
```bash
20+
git clone https://github.com/mckaywrigley/chatbot-ui.git
21+
```
22+
23+
### 2. Install dependencies
24+
25+
```bash
26+
npm install
27+
```
28+
29+
### 3. Install Supabase & run locally
30+
31+
1. Install Supabase CLI
32+
33+
```bash
34+
brew install supabase/tap/supabase
35+
```
36+
37+
2. Start Supabase
38+
39+
```bash
40+
supabase start
41+
```
42+
43+
### 4. Install Ollama (for local models)
44+
45+
Follow the instructions [here](https://github.com/jmorganca/ollama#macos)
46+
47+
### 5. Fill in secrets
48+
49+
1. .env
50+
51+
```bash
52+
cp .env.local.example .env.local
53+
```
54+
55+
Get the required values by running:
56+
57+
```bash
58+
supabase status
59+
```
60+
61+
2. sql
62+
63+
In the 1st migration file `20240108234540_setup.sql` you will need to replace 2 values:
64+
65+
- `project_url` (line 53): This can remain unchanged if you don't change your `config.toml` file.
66+
- `service_role_key` (line 54): You got this value from running `supabase status` in step 5.1.
67+
68+
You will also need to to fill in the values for project_url
69+
70+
### 6. Run app locally
71+
72+
```bash
73+
npm run chat
74+
```
75+
76+
## Contributing
77+
78+
We are working on a guide for contributing.
79+
80+
## Contact
81+
82+
Message Mckay on [Twitter/X](https://twitter.com/mckaywrigley)

app/api/chat/anthropic/route.ts

+47
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
import { CHAT_SETTING_LIMITS } from "@/lib/chat-setting-limits"
2+
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
3+
import { ChatSettings } from "@/types"
4+
import Anthropic from "@anthropic-ai/sdk"
5+
import { AnthropicStream, StreamingTextResponse } from "ai"
6+
7+
export const runtime = "edge"
8+
9+
export async function POST(request: Request) {
10+
const json = await request.json()
11+
const { chatSettings, messages } = json as {
12+
chatSettings: ChatSettings
13+
messages: any[]
14+
}
15+
16+
try {
17+
const profile = await getServerProfile()
18+
19+
checkApiKey(profile.anthropic_api_key, "Anthropic")
20+
21+
let ANTHROPIC_FORMATTED_MESSAGES: any = messages.slice(1)
22+
23+
const anthropic = new Anthropic({
24+
apiKey: profile.anthropic_api_key || ""
25+
})
26+
27+
const response = await anthropic.beta.messages.create({
28+
model: chatSettings.model,
29+
messages: ANTHROPIC_FORMATTED_MESSAGES,
30+
temperature: chatSettings.temperature,
31+
system: messages[0].content,
32+
max_tokens:
33+
CHAT_SETTING_LIMITS[chatSettings.model].MAX_TOKEN_OUTPUT_LENGTH,
34+
stream: true
35+
})
36+
37+
const stream = AnthropicStream(response)
38+
39+
return new StreamingTextResponse(stream)
40+
} catch (error: any) {
41+
const errorMessage = error.message || "An unexpected error occurred"
42+
const errorCode = error.status || 500
43+
return new Response(JSON.stringify({ message: errorMessage }), {
44+
status: errorCode
45+
})
46+
}
47+
}

app/api/chat/azure/route.ts

+74
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
import { CHAT_SETTING_LIMITS } from "@/lib/chat-setting-limits"
2+
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
3+
import { ChatAPIPayload } from "@/types"
4+
import { OpenAIStream, StreamingTextResponse } from "ai"
5+
import OpenAI from "openai"
6+
import { ChatCompletionCreateParamsBase } from "openai/resources/chat/completions.mjs"
7+
8+
export const runtime = "edge"
9+
10+
export async function POST(request: Request) {
11+
const json = await request.json()
12+
const { chatSettings, messages } = json as ChatAPIPayload
13+
14+
try {
15+
const profile = await getServerProfile()
16+
17+
checkApiKey(profile.azure_openai_api_key, "Azure")
18+
19+
const ENDPOINT = profile.azure_openai_endpoint
20+
const KEY = profile.azure_openai_api_key
21+
22+
let DEPLOYMENT_ID = ""
23+
switch (chatSettings.model) {
24+
case "gpt-3.5-turbo-1106":
25+
DEPLOYMENT_ID = profile.azure_openai_35_turbo_id || ""
26+
break
27+
case "gpt-4-1106-preview":
28+
DEPLOYMENT_ID = profile.azure_openai_45_turbo_id || ""
29+
break
30+
case "gpt-4-vision-preview":
31+
DEPLOYMENT_ID = profile.azure_openai_45_vision_id || ""
32+
break
33+
default:
34+
return new Response(JSON.stringify({ message: "Model not found" }), {
35+
status: 400
36+
})
37+
}
38+
39+
if (!ENDPOINT || !KEY || !DEPLOYMENT_ID) {
40+
return new Response(
41+
JSON.stringify({ message: "Azure resources not found" }),
42+
{
43+
status: 400
44+
}
45+
)
46+
}
47+
48+
const azureOpenai = new OpenAI({
49+
apiKey: KEY,
50+
baseURL: `${ENDPOINT}/openai/deployments/${DEPLOYMENT_ID}`,
51+
defaultQuery: { "api-version": "2023-07-01-preview" },
52+
defaultHeaders: { "api-key": KEY }
53+
})
54+
55+
const response = await azureOpenai.chat.completions.create({
56+
model: DEPLOYMENT_ID as ChatCompletionCreateParamsBase["model"],
57+
messages: messages as ChatCompletionCreateParamsBase["messages"],
58+
temperature: chatSettings.temperature,
59+
max_tokens:
60+
CHAT_SETTING_LIMITS[chatSettings.model].MAX_TOKEN_OUTPUT_LENGTH,
61+
stream: true
62+
})
63+
64+
const stream = OpenAIStream(response)
65+
66+
return new StreamingTextResponse(stream)
67+
} catch (error: any) {
68+
const errorMessage = error.error?.message || "An unexpected error occurred"
69+
const errorCode = error.status || 500
70+
return new Response(JSON.stringify({ message: errorMessage }), {
71+
status: errorCode
72+
})
73+
}
74+
}

app/api/chat/google/route.ts

+72
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
2+
import { ChatSettings } from "@/types"
3+
import { GoogleGenerativeAI } from "@google/generative-ai"
4+
5+
export const runtime = "edge"
6+
7+
export async function POST(request: Request) {
8+
const json = await request.json()
9+
const { chatSettings, messages } = json as {
10+
chatSettings: ChatSettings
11+
messages: any[]
12+
}
13+
14+
try {
15+
const profile = await getServerProfile()
16+
17+
checkApiKey(profile.google_gemini_api_key, "Google")
18+
19+
const genAI = new GoogleGenerativeAI(profile.google_gemini_api_key || "")
20+
const googleModel = genAI.getGenerativeModel({ model: chatSettings.model })
21+
22+
if (chatSettings.model === "gemini-pro") {
23+
const lastMessage = messages.pop()
24+
25+
const chat = googleModel.startChat({
26+
history: messages,
27+
generationConfig: {
28+
temperature: chatSettings.temperature
29+
}
30+
})
31+
32+
const response = await chat.sendMessageStream(lastMessage.parts)
33+
34+
const encoder = new TextEncoder()
35+
const readableStream = new ReadableStream({
36+
async start(controller) {
37+
for await (const chunk of response.stream) {
38+
const chunkText = chunk.text()
39+
controller.enqueue(encoder.encode(chunkText))
40+
}
41+
controller.close()
42+
}
43+
})
44+
45+
return new Response(readableStream, {
46+
headers: { "Content-Type": "text/plain" }
47+
})
48+
} else if (chatSettings.model === "gemini-pro-vision") {
49+
// FIX: Hacky until chat messages are supported
50+
const HACKY_MESSAGE = messages[messages.length - 1]
51+
52+
const result = await googleModel.generateContent([
53+
HACKY_MESSAGE.prompt,
54+
HACKY_MESSAGE.imageParts
55+
])
56+
57+
const response = result.response
58+
59+
const text = response.text()
60+
61+
return new Response(text, {
62+
headers: { "Content-Type": "text/plain" }
63+
})
64+
}
65+
} catch (error: any) {
66+
const errorMessage = error.error?.message || "An unexpected error occurred"
67+
const errorCode = error.status || 500
68+
return new Response(JSON.stringify({ message: errorMessage }), {
69+
status: errorCode
70+
})
71+
}
72+
}

0 commit comments

Comments
 (0)