Skip to content

Commit 0c70a2f

Browse files
authored
Merge pull request #9 from Azure-Samples/rmoptions
Remove rarely used options
2 parents 5c136e2 + 07cdfc3 commit 0c70a2f

20 files changed

+10
-114
lines changed

.github/workflows/test-github-models.yaml

Lines changed: 0 additions & 48 deletions
This file was deleted.

chat.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@
3434
response = client.chat.completions.create(
3535
model=MODEL_NAME,
3636
temperature=0.7,
37-
n=1,
3837
messages=[
3938
{"role": "system", "content": "You are a helpful assistant that makes lots of cat references and uses emojis."},
4039
{"role": "user", "content": "Write a haiku about a hungry cat who wants tuna"},

chat_async.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -42,12 +42,7 @@ async def generate_response(location):
4242
"content": f"Name a single place I should visit on my trip to {location} and describe in one sentence",
4343
},
4444
],
45-
temperature=1,
46-
max_tokens=400,
47-
top_p=0.95,
48-
frequency_penalty=0,
49-
presence_penalty=0,
50-
stop=None,
45+
temperature=0.7,
5146
)
5247
print("Got response for ", location)
5348
return response.choices[0].message.content

chat_history.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -40,12 +40,7 @@
4040
response = client.chat.completions.create(
4141
model=MODEL_NAME,
4242
messages=messages,
43-
temperature=1,
44-
max_tokens=400,
45-
top_p=0.95,
46-
frequency_penalty=0,
47-
presence_penalty=0,
48-
stop=None,
43+
temperature=0.5,
4944
)
5045
bot_response = response.choices[0].message.content
5146
messages.append({"role": "assistant", "content": bot_response})

chat_history_stream.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -40,13 +40,7 @@
4040
response = client.chat.completions.create(
4141
model=MODEL_NAME,
4242
messages=messages,
43-
temperature=1,
44-
max_tokens=400,
45-
top_p=0.95,
46-
frequency_penalty=0,
47-
presence_penalty=0,
48-
stop=None,
49-
stream=True,
43+
temperature=0.7,
5044
)
5145

5246
print("\nAnswer: ")

chat_safety.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,6 @@
3232
response = client.chat.completions.create(
3333
model=MODEL_NAME,
3434
temperature=0.7,
35-
max_tokens=100,
36-
n=1,
3735
messages=[
3836
{
3937
"role": "system",

chat_stream.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,9 @@
2828
MODEL_NAME = os.environ["OPENAI_MODEL"]
2929

3030

31-
completion = client.chat.completions.create(
31+
completion_stream = client.chat.completions.create(
3232
model=MODEL_NAME,
3333
temperature=0.7,
34-
max_tokens=500,
35-
n=1,
3634
messages=[
3735
{"role": "system", "content": "You are a helpful assistant that makes lots of cat references and uses emojis."},
3836
{"role": "user", "content": "please write a haiku about a hungry cat that wants tuna"},
@@ -41,7 +39,7 @@
4139
)
4240

4341
print(f"Response from {API_HOST}: \n")
44-
for event in completion:
42+
for event in completion_stream:
4543
if event.choices:
4644
content = event.choices[0].delta.content
4745
if content:

few_shot_examples.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@
4343
response = client.chat.completions.create(
4444
model=MODEL_NAME,
4545
temperature=0.7,
46-
n=1,
4746
messages=[
4847
{"role": "system", "content": SYSTEM_MESSAGE},
4948
{"role": "user", "content": "What is the capital of France?"},

http/chat_completion_azure.http

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,5 @@ Content-Type: application/json
55
{
66
"messages": [{"role":"system","content":"You are an AI assistant that answers questions with short clear answers."},
77
{"role":"user","content":"How fast is the Prius V?"}],
8-
"max_tokens": 800,
9-
"temperature": 0.7,
10-
"frequency_penalty": 0,
11-
"presence_penalty": 0,
12-
"top_p": 0.95,
13-
"stop": null
8+
"temperature": 0.7
149
}

http/chat_completion_ollama.http

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,5 @@ Content-Type: application/json
55
"model": "phi3.5:latest",
66
"messages": [{"role":"system","content":"You are an AI assistant that answers questions with short clear answers."},
77
{"role":"user","content":"How fast is the Prius V?"}],
8-
"max_tokens": 800,
98
"temperature": 0.7
109
}

0 commit comments

Comments
 (0)