Skip to content

Commit ea14480

Browse files
committed
Add nodriver to Gemini provider,
Add slim docker image with google-chrome usage, Add the new docker images to publish worklow, Update requirements.txt and pip requirements
1 parent 6ce493d commit ea14480

21 files changed

+240
-145
lines changed

.github/workflows/publish-workflow.yaml

+12
Original file line numberDiff line numberDiff line change
@@ -48,3 +48,15 @@ jobs:
4848
labels: ${{ steps.metadata.outputs.labels }}
4949
build-args: |
5050
G4F_VERSION=${{ github.ref_name }}
51+
- name: Build and push slim image
52+
uses: docker/build-push-action@v5
53+
with:
54+
context: .
55+
file: docker/Dockerfile-slim
56+
push: true
57+
tags: |
58+
hlohaus789/g4f=slim
59+
hlohaus789/g4f=${{ github.ref_name }}-slim
60+
labels: ${{ steps.metadata.outputs.labels }}
61+
build-args: |
62+
G4F_VERSION=${{ github.ref_name }}

docker-compose-slim.yml

+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
version: '3'
2+
3+
services:
4+
g4f-gui:
5+
container_name: g4f-gui
6+
image: hlohaus789/g4f:slim
7+
build:
8+
context: .
9+
dockerfile: docker/Dockerfile-slim
10+
command: python -m g4f.cli gui -debug
11+
volumes:
12+
- .:/app
13+
ports:
14+
- '8080:8080'
15+
g4f-api:
16+
container_name: g4f-api
17+
image: hlohaus789/g4f:slim
18+
build:
19+
context: .
20+
dockerfile: docker/Dockerfile-slim
21+
command: python -m g4f.cli api
22+
volumes:
23+
- .:/app
24+
ports:
25+
- '1337:1337'

docker/Dockerfile

+1
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ RUN apt-get -qqy update \
4040

4141
# Update entrypoint
4242
COPY docker/supervisor.conf /etc/supervisor/conf.d/selenium.conf
43+
COPY docker/supervisor-api.conf /etc/supervisor/conf.d/api.conf
4344
COPY docker/supervisor-gui.conf /etc/supervisor/conf.d/gui.conf
4445

4546
# If no gui

docker/Dockerfile-slim

+68
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
FROM python:bookworm
2+
3+
ARG G4F_VERSION
4+
ARG G4F_USER=g4f
5+
ARG G4F_USER_ID=1000
6+
ARG PYDANTIC_VERSION=1.8.1
7+
8+
ENV G4F_VERSION $G4F_VERSION
9+
ENV G4F_USER $G4F_USER
10+
ENV G4F_USER_ID $G4F_USER_ID
11+
ENV G4F_DIR /app
12+
13+
RUN apt-get update && apt-get upgrade -y \
14+
&& apt-get install -y git \
15+
&& apt-get install --quiet --yes --no-install-recommends \
16+
build-essential \
17+
# Add user and user group
18+
&& groupadd -g $G4F_USER_ID $G4F_USER \
19+
&& useradd -rm -G sudo -u $G4F_USER_ID -g $G4F_USER_ID $G4F_USER \
20+
&& mkdir -p /var/log/supervisor \
21+
&& chown "${G4F_USER_ID}:${G4F_USER_ID}" /var/log/supervisor \
22+
&& echo "${G4F_USER}:${G4F_USER}" | chpasswd
23+
24+
USER $G4F_USER_ID
25+
WORKDIR $G4F_DIR
26+
27+
ENV HOME /home/$G4F_USER
28+
ENV PATH "${HOME}/.local/bin:${HOME}/.cargo/bin:${PATH}"
29+
30+
# Create app dir and copy the project's requirements file into it
31+
RUN mkdir -p $G4F_DIR
32+
COPY requirements-slim.txt $G4F_DIR
33+
34+
# Install rust toolchain
35+
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
36+
37+
# Upgrade pip for the latest features and install the project's Python dependencies.
38+
RUN python -m pip install --upgrade pip \
39+
&& pip install --no-cache-dir \
40+
Cython==0.29.22 \
41+
setuptools \
42+
# Install PyDantic
43+
&& pip install \
44+
-vvv \
45+
--no-cache-dir \
46+
--no-binary pydantic \
47+
--global-option=build_ext \
48+
--global-option=-j8 \
49+
pydantic==${PYDANTIC_VERSION} \
50+
&& pip install --no-cache-dir -r requirements-slim.txt \
51+
# Remove build packages
52+
&& pip uninstall --yes \
53+
Cython \
54+
setuptools
55+
56+
USER root
57+
58+
# Clean up build deps
59+
RUN rustup self uninstall -y \
60+
&& apt-get purge --auto-remove --yes \
61+
build-essential \
62+
&& apt-get clean \
63+
&& rm --recursive --force /var/lib/apt/lists/* /tmp/* /var/tmp/*
64+
65+
USER $G4F_USER_ID
66+
67+
# Copy the entire package into the container.
68+
ADD --chown=$G4F_USER:$G4F_USER g4f $G4F_DIR/g4f

docker/supervisor-api.conf

+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
[program:g4f-api]
2+
priority=15
3+
command=python -m g4f.cli api
4+
directory=/app
5+
stopasgroup=true
6+
autostart=true
7+
autorestart=true
8+
9+
;Logs (all Hub activity redirected to stdout so it can be seen through "docker logs"
10+
redirect_stderr=true
11+
stdout_logfile=/dev/stdout
12+
stdout_logfile_maxbytes=0

docker/supervisor-gui.conf

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[program:g4f-gui]
22
priority=15
3-
command=python -m g4f.cli gui
3+
command=python -m g4f.cli gui -debug
44
directory=/app
55
stopasgroup=true
66
autostart=true

docker/supervisor.conf

+1-14
Original file line numberDiff line numberDiff line change
@@ -47,17 +47,4 @@ stderr_logfile_maxbytes=50MB
4747
stdout_logfile_backups=5
4848
stderr_logfile_backups=5
4949
stdout_capture_maxbytes=50MB
50-
stderr_capture_maxbytes=50MB
51-
52-
[program:g4f-api]
53-
priority=15
54-
command=python -m g4f.cli api
55-
directory=/app
56-
stopasgroup=true
57-
autostart=true
58-
autorestart=true
59-
60-
;Logs (all Hub activity redirected to stdout so it can be seen through "docker logs"
61-
redirect_stderr=true
62-
stdout_logfile=/dev/stdout
63-
stdout_logfile_maxbytes=0
50+
stderr_capture_maxbytes=50MB

docs/docker.md

+19-5
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,22 @@
2828
```
2929

3030
2. **Build and Run with Docker Compose**
31+
32+
Pull the latest image and run a container with Google Chrome support:
33+
```bash
34+
docker pull hlohaus789/g4f
35+
docker-compose up -d
36+
```
37+
Or run the small docker images without Google Chrome:
3138
```bash
32-
docker-compose up --build
39+
docker-compose -f docker-compose-slim.yml up -d
3340
```
3441

35-
3. **Access the API**
36-
The server will be accessible at `http://localhost:1337`
42+
3. **Access the API or the GUI**
43+
44+
The api server will be accessible at `http://localhost:1337`
45+
46+
And the gui at this url: `http://localhost:8080`
3747

3848
### Non-Docker Method
3949
If you encounter issues with Docker, you can run the project directly using Python:
@@ -54,8 +64,12 @@ If you encounter issues with Docker, you can run the project directly using Pyth
5464
python -m g4f.api.run
5565
```
5666

57-
4. **Access the API**
58-
The server will be accessible at `http://localhost:1337`
67+
4. **Access the API or the GUI**
68+
69+
The api server will be accessible at `http://localhost:1337`
70+
71+
And the gui at this url: `http://localhost:8080`
72+
5973

6074
## Testing the API
6175
**You can test the API using curl or by creating a simple Python script:**

g4f/Provider/Cloudflare.py

+12-3
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from ..typing import AsyncResult, Messages, Cookies
88
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, get_running_loop
99
from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
10+
from ..errors import ResponseStatusError
1011

1112
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
1213
label = "Cloudflare AI"
@@ -42,10 +43,14 @@ def get_models(cls) -> str:
4243
cls._args = asyncio.run(args)
4344
with Session(**cls._args) as session:
4445
response = session.get(cls.models_url)
45-
raise_for_status(response)
46+
cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
47+
try:
48+
raise_for_status(response)
49+
except ResponseStatusError as e:
50+
cls._args = None
51+
raise e
4652
json_data = response.json()
4753
cls.models = [model.get("name") for model in json_data.get("models")]
48-
cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
4954
return cls.models
5055

5156
@classmethod
@@ -74,8 +79,12 @@ async def create_async_generator(
7479
cls.api_endpoint,
7580
json=data,
7681
) as response:
77-
await raise_for_status(response)
7882
cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
83+
try:
84+
await raise_for_status(response)
85+
except ResponseStatusError as e:
86+
cls._args = None
87+
raise e
7988
async for line in response.iter_lines():
8089
if line.startswith(b'data: '):
8190
if line == b'data: [DONE]':

g4f/Provider/HuggingChat.py

+10-24
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,13 @@
44
import requests
55

66
try:
7-
from curl_cffi import requests as cf_reqs
7+
from curl_cffi import Session
88
has_curl_cffi = True
99
except ImportError:
1010
has_curl_cffi = False
1111
from ..typing import CreateResult, Messages
1212
from ..errors import MissingRequirementsError
13+
from ..requests.raise_for_status import raise_for_status
1314
from .base_provider import ProviderModelMixin, AbstractProvider
1415
from .helper import format_prompt
1516

@@ -18,7 +19,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
1819
working = True
1920
supports_stream = True
2021
default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
21-
22+
2223
models = [
2324
'meta-llama/Meta-Llama-3.1-70B-Instruct',
2425
'CohereForAI/c4ai-command-r-plus-08-2024',
@@ -30,7 +31,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
3031
'mistralai/Mistral-Nemo-Instruct-2407',
3132
'microsoft/Phi-3.5-mini-instruct',
3233
]
33-
34+
3435
model_aliases = {
3536
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
3637
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
@@ -43,15 +44,6 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
4344
"phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct",
4445
}
4546

46-
@classmethod
47-
def get_model(cls, model: str) -> str:
48-
if model in cls.models:
49-
return model
50-
elif model in cls.model_aliases:
51-
return cls.model_aliases[model]
52-
else:
53-
return cls.default_model
54-
5547
@classmethod
5648
def create_completion(
5749
cls,
@@ -65,7 +57,7 @@ def create_completion(
6557
model = cls.get_model(model)
6658

6759
if model in cls.models:
68-
session = cf_reqs.Session()
60+
session = Session()
6961
session.headers = {
7062
'accept': '*/*',
7163
'accept-language': 'en',
@@ -82,20 +74,18 @@ def create_completion(
8274
'sec-fetch-site': 'same-origin',
8375
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
8476
}
85-
8677
json_data = {
8778
'model': model,
8879
}
89-
9080
response = session.post('https://huggingface.co/chat/conversation', json=json_data)
91-
if response.status_code != 200:
92-
raise RuntimeError(f"Request failed with status code: {response.status_code}, response: {response.text}")
81+
raise_for_status(response)
9382

9483
conversationId = response.json().get('conversationId')
9584

9685
# Get the data response and parse it properly
9786
response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11')
98-
87+
raise_for_status(response)
88+
9989
# Split the response content by newlines and parse each line as JSON
10090
try:
10191
json_data = None
@@ -156,6 +146,7 @@ def create_completion(
156146
headers=headers,
157147
files=files,
158148
)
149+
raise_for_status(response)
159150

160151
full_response = ""
161152
for line in response.iter_lines():
@@ -182,9 +173,4 @@ def create_completion(
182173
full_response = full_response.replace('<|im_end|', '').replace('\u0000', '').strip()
183174

184175
if not stream:
185-
yield full_response
186-
187-
@classmethod
188-
def supports_model(cls, model: str) -> bool:
189-
"""Check if the model is supported by the provider."""
190-
return model in cls.models or model in cls.model_aliases
176+
yield full_response

0 commit comments

Comments
 (0)