diff --git a/metagpt/configs/llm_config.py b/metagpt/configs/llm_config.py index ef034ca494..733cd5d192 100644 --- a/metagpt/configs/llm_config.py +++ b/metagpt/configs/llm_config.py @@ -38,6 +38,7 @@ class LLMType(Enum): OPENROUTER = "openrouter" BEDROCK = "bedrock" ARK = "ark" # https://www.volcengine.com/docs/82379/1263482#python-sdk + ZTEAI = "zte" def __missing__(self, key): return self.OPENAI diff --git a/metagpt/provider/__init__.py b/metagpt/provider/__init__.py index c90f5774ab..744e8d5076 100644 --- a/metagpt/provider/__init__.py +++ b/metagpt/provider/__init__.py @@ -19,6 +19,8 @@ from metagpt.provider.anthropic_api import AnthropicLLM from metagpt.provider.bedrock_api import BedrockLLM from metagpt.provider.ark_api import ArkLLM +from metagpt.provider.zteai_api import ZTEaiLLM + __all__ = [ "GeminiLLM", @@ -34,4 +36,5 @@ "AnthropicLLM", "BedrockLLM", "ArkLLM", + "ZTEaiLLM", ] diff --git a/metagpt/provider/zteai_api.py b/metagpt/provider/zteai_api.py new file mode 100644 index 0000000000..327601bdf9 --- /dev/null +++ b/metagpt/provider/zteai_api.py @@ -0,0 +1,63 @@ +import json +from typing import Optional, Union + +import aiohttp + +from metagpt.configs.llm_config import LLMConfig, LLMType +from metagpt.const import LLM_API_TIMEOUT, USE_CONFIG_TIMEOUT +from metagpt.provider.base_llm import BaseLLM +from metagpt.provider.llm_provider_registry import register_provider + + +async def ZTEAI(querytext, appid, apikey, numb, token, modeltype): + url = "https://rdcloud.zte.com.cn/zte-studio-ai-platform/openapi/v1/chat" + headers = { + "Content-Type": "application/json", + "Authorization": "Bearer " + appid + "-" + apikey, + "X-Emp-No": numb, + "X-Auth-Value": token, + } + data = {"chatUuid": "", "chatName": "", "stream": False, "keep": True, "text": querytext, "model": modeltype} + async with aiohttp.ClientSession() as session: + async with session.post(url, headers=headers, json=data) as response: + nowresult = await response.text() + return json.loads(nowresult)["bo"]["result"] + + +@register_provider(LLMType.ZTEAI) +class ZTEaiLLM(BaseLLM): + def __init__(self, config: LLMConfig): + self.config = config + + async def ask(self, msg: str, timeout=USE_CONFIG_TIMEOUT) -> str: + rsp = await ZTEAI( + msg, self.config.app_id, self.config.api_key, self.config.domain, self.config.access_key, self.config.model + ) + return rsp + + async def aask( + self, + msg: str, + system_msgs: Optional[list[str]] = None, + format_msgs: Optional[list[dict[str, str]]] = None, + images: Optional[Union[str, list[str]]] = None, + timeout=USE_CONFIG_TIMEOUT, + ) -> str: + return await self.ask(msg, timeout=self.get_timeout(timeout)) + + async def _achat_completion(self, messages: list[dict], timeout=USE_CONFIG_TIMEOUT): + pass + + async def acompletion(self, messages: list[dict], timeout=USE_CONFIG_TIMEOUT): + """dummy implementation of abstract method in base""" + return [] + + async def _achat_completion_stream(self, messages: list[dict], timeout: int = USE_CONFIG_TIMEOUT) -> str: + pass + + async def acompletion_text(self, messages: list[dict], stream=False, timeout=USE_CONFIG_TIMEOUT) -> str: + """dummy implementation of abstract method in base""" + return "" + + def get_timeout(self, timeout: int) -> int: + return timeout or LLM_API_TIMEOUT