Skip to content

Commit 07cdfc3

Browse files
committed
Rm extra params from spanish too
1 parent d3a2174 commit 07cdfc3

14 files changed

+5
-88
lines changed

.github/workflows/test-github-models.yaml

Lines changed: 0 additions & 48 deletions
This file was deleted.

few_shot_examples.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@
4343
response = client.chat.completions.create(
4444
model=MODEL_NAME,
4545
temperature=0.7,
46-
n=1,
4746
messages=[
4847
{"role": "system", "content": SYSTEM_MESSAGE},
4948
{"role": "user", "content": "What is the capital of France?"},

http/chat_completion_azure.http

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,5 @@ Content-Type: application/json
55
{
66
"messages": [{"role":"system","content":"You are an AI assistant that answers questions with short clear answers."},
77
{"role":"user","content":"How fast is the Prius V?"}],
8-
"max_tokens": 800,
9-
"temperature": 0.7,
10-
"frequency_penalty": 0,
11-
"presence_penalty": 0,
12-
"top_p": 0.95,
13-
"stop": null
8+
"temperature": 0.7
149
}

http/chat_completion_ollama.http

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,5 @@ Content-Type: application/json
55
"model": "phi3.5:latest",
66
"messages": [{"role":"system","content":"You are an AI assistant that answers questions with short clear answers."},
77
{"role":"user","content":"How fast is the Prius V?"}],
8-
"max_tokens": 800,
98
"temperature": 0.7
109
}

http/rag_hybrid_azure.http

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,5 @@ Content-Type: application/json
1212
"content": "How fast is the Prius V?\n\nSources: vehicle | year | msrp | acceleration | mpg | class\n --- | --- | --- | --- | --- | --- |\nPrius (1st Gen) | 1997 | 24509.74 | 7.46 | 41.26 | Compact|\nPrius (2nd Gen) | 2000 | 26832.25 | 7.97 | 45.23 | Compact|\nPrius | 2004 | 20355.64 | 9.9 | 46.0 | Midsize|\nPrius (3rd Gen) | 2009 | 24641.18 | 9.6 | 47.98 | Compact|\nPrius alpha (V) | 2011 | 30588.35 | 10.0 | 72.92 | Midsize|\nPrius V | 2011 | 27272.28 | 9.51 | 32.93 | Midsize|\n Prius C | 2012 | 19006.62 | 9.35 | 50.0 | Compact|\n Prius PHV | 2012 | 32095.61 | 8.82 | 50.0 | Midsize|\n Prius C | 2013 | 19080.0 | 8.7 | 50.0 | Compact|\n Prius | 2013 | 24200.0 | 10.2 | 50.0 | Midsize|\n Prius Plug-in | 2013 | 32000.0 | 9.17 | 50.0 | Midsize"
1313
}
1414
],
15-
"max_tokens": 800,
16-
"temperature": 0.7,
17-
"frequency_penalty": 0,
18-
"presence_penalty": 0,
19-
"top_p": 0.95,
20-
"stop": null
15+
"temperature": 0.7
2116
}

prompt_engineering.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@
4545
response = client.chat.completions.create(
4646
model=MODEL_NAME,
4747
temperature=0.7,
48-
n=1,
4948
messages=[
5049
{"role": "system", "content": SYSTEM_MESSAGE},
5150
{"role": "user", "content": USER_MESSAGE},

spanish/chat.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@
3434
response = client.chat.completions.create(
3535
model=MODEL_NAME,
3636
temperature=0.7,
37-
n=1,
3837
messages=[
3938
{"role": "system", "content": "Eres un asistente útil que hace muchas referencias a gatos y usa emojis."},
4039
{"role": "user", "content": "Escribe un haiku sobre un gato hambriento que quiere atún"},

spanish/chat_async.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,7 @@ async def generate_response(location):
4444
),
4545
},
4646
],
47-
temperature=1,
48-
max_tokens=400,
49-
top_p=0.95,
50-
frequency_penalty=0,
51-
presence_penalty=0,
52-
stop=None,
47+
temperature=0.7,
5348
)
5449
print("Obtuve respuesta para ", location)
5550
return response.choices[0].message.content

spanish/chat_history.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -43,12 +43,7 @@
4343
response = client.chat.completions.create(
4444
model=MODEL_NAME,
4545
messages=messages,
46-
temperature=1,
47-
max_tokens=400,
48-
top_p=0.95,
49-
frequency_penalty=0,
50-
presence_penalty=0,
51-
stop=None,
46+
temperature=0.7,
5247
)
5348
bot_response = response.choices[0].message.content
5449
messages.append({"role": "assistant", "content": bot_response})

spanish/chat_history_stream.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -40,12 +40,7 @@
4040
response = client.chat.completions.create(
4141
model=MODEL_NAME,
4242
messages=messages,
43-
temperature=1,
44-
max_tokens=400,
45-
top_p=0.95,
46-
frequency_penalty=0,
47-
presence_penalty=0,
48-
stop=None,
43+
temperature=0.7,
4944
stream=True,
5045
)
5146

0 commit comments

Comments
 (0)