|
10 | 10 |
|
11 | 11 | from sqlalchemy.orm import identity |
12 | 12 |
|
13 | | -from ..utils.globals import set_language |
| 13 | +from ..utils.globals import set_language, apply_llm_env_defaults |
14 | 14 | from ..utils.auth_utils import extract_and_store_api_key |
15 | 15 | import server |
16 | 16 | from aiohttp import web |
|
28 | 28 | from ..utils.modelscope_gateway import ModelScopeGateway |
29 | 29 | import folder_paths |
30 | 30 |
|
| 31 | + |
| 32 | +def get_llm_config_from_headers(request): |
| 33 | + """Extract LLM-related configuration from request headers.""" |
| 34 | + return { |
| 35 | + "openai_api_key": request.headers.get('Openai-Api-Key'), |
| 36 | + "openai_base_url": request.headers.get('Openai-Base-Url'), |
| 37 | + # Workflow LLM settings (optional, used by tools/agents that need a different LLM) |
| 38 | + "workflow_llm_api_key": request.headers.get('Workflow-LLM-Api-Key'), |
| 39 | + "workflow_llm_base_url": request.headers.get('Workflow-LLM-Base-Url'), |
| 40 | + "workflow_llm_model": request.headers.get('Workflow-LLM-Model'), |
| 41 | + } |
| 42 | + |
| 43 | + |
31 | 44 | # 全局下载进度存储 |
32 | 45 | download_progress = {} |
33 | 46 | download_lock = threading.Lock() |
@@ -240,14 +253,11 @@ async def invoke_chat(request): |
240 | 253 | config = { |
241 | 254 | "session_id": session_id, |
242 | 255 | "workflow_checkpoint_id": workflow_checkpoint_id, |
243 | | - "openai_api_key": request.headers.get('Openai-Api-Key'), |
244 | | - "openai_base_url": request.headers.get('Openai-Base-Url'), |
245 | | - # Workflow LLM settings (optional, used by tools/agents that need a different LLM) |
246 | | - "workflow_llm_api_key": request.headers.get('Workflow-LLM-Api-Key'), |
247 | | - "workflow_llm_base_url": request.headers.get('Workflow-LLM-Base-Url'), |
248 | | - "workflow_llm_model": request.headers.get('Workflow-LLM-Model'), |
| 256 | + **get_llm_config_from_headers(request), |
249 | 257 | "model_select": next((x['data'][0] for x in ext if x['type'] == 'model_select' and x.get('data')), None) |
250 | 258 | } |
| 259 | + # Apply .env-based defaults for LLM-related fields (config > .env > code defaults) |
| 260 | + config = apply_llm_env_defaults(config) |
251 | 261 |
|
252 | 262 | # 设置请求上下文 - 这里建立context隔离 |
253 | 263 | set_request_context(session_id, workflow_checkpoint_id, config) |
@@ -510,13 +520,10 @@ async def invoke_debug(request): |
510 | 520 | config = { |
511 | 521 | "session_id": session_id, |
512 | 522 | "model": "gemini-2.5-flash", # Default model for debug agents |
513 | | - "openai_api_key": request.headers.get('Openai-Api-Key'), |
514 | | - "openai_base_url": request.headers.get('Openai-Base-Url'), |
515 | | - # Workflow LLM settings (optional) |
516 | | - "workflow_llm_api_key": request.headers.get('Workflow-LLM-Api-Key'), |
517 | | - "workflow_llm_base_url": request.headers.get('Workflow-LLM-Base-Url'), |
518 | | - "workflow_llm_model": request.headers.get('Workflow-LLM-Model'), |
| 523 | + **get_llm_config_from_headers(request), |
519 | 524 | } |
| 525 | + # Apply .env-based defaults for LLM-related fields (config > .env > code defaults) |
| 526 | + config = apply_llm_env_defaults(config) |
520 | 527 |
|
521 | 528 | # 获取当前语言 |
522 | 529 | language = request.headers.get('Accept-Language', 'en') |
|
0 commit comments