diff --git a/README.md b/README.md index e218f3f..eac4888 100644 --- a/README.md +++ b/README.md @@ -169,7 +169,7 @@ client = ZaiClient(api_key="your-api-key") # Create chat completion response = client.chat.completions.create( - model='glm-4', + model='glm-4.6', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Tell me a story about AI.'}, @@ -192,7 +192,7 @@ client = ZaiClient(api_key="your-api-key") # Create chat completion response = client.chat.completions.create( - model='glm-4', + model='glm-4.6', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'What is artificial intelligence?'}, @@ -228,7 +228,7 @@ client = ZaiClient(api_key="your-api-key") base64_image = encode_image('examples/test_multi_modal.jpeg') response = client.chat.completions.create( - model='glm-4v', + model='glm-4.6v', messages=[ { 'role': 'user', @@ -338,7 +338,7 @@ client = ZaiClient(api_key="your-api-key") try: response = client.chat.completions.create( - model="glm-4", + model="glm-4.6", messages=[ {"role": "user", "content": "Hello, Z.ai!"} ] diff --git a/README_CN.md b/README_CN.md index e8970dd..1347e63 100644 --- a/README_CN.md +++ b/README_CN.md @@ -107,7 +107,7 @@ client = ZhipuAiClient(api_key="your-api-key") # Create chat completion response = client.chat.completions.create( - model="glm-4", + model="glm-4.6", messages=[ {"role": "user", "content": "Hello, Z.ai!"} ] @@ -175,7 +175,7 @@ client = ZaiClient(api_key="your-api-key") # 创建对话 response = client.chat.completions.create( - model='glm-4', + model='glm-4.6', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Tell me a story about AI.'}, @@ -198,7 +198,7 @@ client = ZaiClient(api_key="your-api-key") # 创建对话 response = client.chat.completions.create( - model='glm-4', + model='glm-4.6', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'What is artificial intelligence?'}, @@ -234,7 +234,7 @@ client = ZaiClient(api_key="your-api-key") base64_image = encode_image('examples/test_multi_modal.jpeg') response = client.chat.completions.create( - model='glm-4v', + model='glm-4.6v', messages=[ { 'role': 'user', @@ -344,7 +344,7 @@ client = ZaiClient(api_key="your-api-key") # 请填写您自己的APIKey try: response = client.chat.completions.create( - model="glm-4", + model="glm-4.6", messages=[ {"role": "user", "content": "你好, Z.ai !"} ] diff --git a/examples/basic_usage.py b/examples/basic_usage.py index 2cac3cf..493e5de 100644 --- a/examples/basic_usage.py +++ b/examples/basic_usage.py @@ -126,7 +126,7 @@ def encode_image(image_path): base64_image = encode_image('examples/test_multi_modal.jpeg') response = client.chat.completions.create( - model='glm-4v', + model='glm-4.6v', messages=[ { 'role': 'user', diff --git a/pyproject.toml b/pyproject.toml index b48b928..8cac675 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "zai-sdk" -version = "0.0.4.3" +version = "0.1.0" description = "A SDK library for accessing big model apis from Z.ai" authors = ["Z.ai"] readme = "README.md" diff --git a/src/zai/_version.py b/src/zai/_version.py index 1718685..61516e7 100644 --- a/src/zai/_version.py +++ b/src/zai/_version.py @@ -1,2 +1,2 @@ __title__ = 'Z.ai' -__version__ = '0.0.4.3' +__version__ = '0.1.0' diff --git a/src/zai/types/chat/chat_completion_chunk.py b/src/zai/types/chat/chat_completion_chunk.py index 4c4ba43..59ff186 100644 --- a/src/zai/types/chat/chat_completion_chunk.py +++ b/src/zai/types/chat/chat_completion_chunk.py @@ -68,7 +68,7 @@ class ChoiceDelta(BaseModel): Attributes: content: Content delta role: Role of the message sender - reasoning_content: Reasoning content delta + reasoning_content: Reasoning content delta, it's recommended to return the model's reasoning_content in next request to achieve better results in multi-turn conversations. tool_calls: List of tool call deltas audio: Audio completion chunk """ diff --git a/tests/integration_tests/test_chat.py b/tests/integration_tests/test_chat.py index 9a189e4..d2b6ea4 100644 --- a/tests/integration_tests/test_chat.py +++ b/tests/integration_tests/test_chat.py @@ -323,7 +323,7 @@ def test_completions_stream_with_tools(logging_conf): print(f'request_id:{request_id}') response = client.chat.completions.create( request_id=request_id, - model='glm-4v', # Fill in the model name to call + model='glm-4.6v', # Fill in the model name to call extra_body={'temperature': 0.5, 'max_tokens': 50}, messages=[ { @@ -361,7 +361,7 @@ def test_completions_vis_base64(test_file_path, logging_conf): print(f'request_id:{request_id}') response = client.chat.completions.create( request_id=request_id, - model='glm-4v', # Fill in the model name to call + model='glm-4.6v', # Fill in the model name to call extra_body={'temperature': 0.5, 'max_tokens': 50}, messages=[ { @@ -402,7 +402,7 @@ def test_async_completions(logging_conf): print(f'request_id:{request_id}') response = client.chat.completions.create( request_id=request_id, - model='glm-4v', # Fill in the model name to call + model='glm-4.6v', # Fill in the model name to call messages=[ { 'role': 'user',