diff --git a/.env.example b/.env.example index 65b8bf1..d9f3bc6 100644 --- a/.env.example +++ b/.env.example @@ -61,7 +61,7 @@ WORK_ORDER_APP_ID=alarm-system WORK_ORDER_APP_SECRET=your-app-secret WORK_ORDER_TENANT_ID=1 -# ===== 交互Agent配置 ===== +# ===== 交互Agent配置(统一使用 VLM 多模态模型) ===== AGENT_ENABLED=false -AGENT_LLM_MODEL=qwen-plus # 文本对话模型(复用 DASHSCOPE_API_KEY) -AGENT_LLM_TIMEOUT=15 +AGENT_VLM_MODEL=qwen3-vl-flash-2026-01-22 # 多模态模型(文字+图片) +AGENT_VLM_TIMEOUT=15 diff --git a/app/config.py b/app/config.py index 7c6be34..dce8945 100644 --- a/app/config.py +++ b/app/config.py @@ -67,11 +67,11 @@ class WeChatConfig: @dataclass class AgentConfig: - """交互Agent配置""" - llm_api_key: str = "" - llm_base_url: str = "https://dashscope.aliyuncs.com/compatible-mode/v1" - llm_model: str = "qwen-plus" - llm_timeout: int = 15 + """交互Agent配置(统一使用 VLM 多模态模型)""" + vlm_api_key: str = "" + vlm_base_url: str = "https://dashscope.aliyuncs.com/compatible-mode/v1" + vlm_model: str = "qwen3-vl-flash-2026-01-22" + vlm_timeout: int = 15 enabled: bool = False @@ -186,10 +186,10 @@ def load_settings() -> Settings: group_chat_id=os.getenv("WECHAT_GROUP_CHAT_ID", ""), ), agent=AgentConfig( - llm_api_key=os.getenv("DASHSCOPE_API_KEY", ""), - llm_base_url=os.getenv("AGENT_LLM_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1"), - llm_model=os.getenv("AGENT_LLM_MODEL", "qwen-plus"), - llm_timeout=int(os.getenv("AGENT_LLM_TIMEOUT", "15")), + vlm_api_key=os.getenv("DASHSCOPE_API_KEY", ""), + vlm_base_url=os.getenv("AGENT_VLM_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1"), + vlm_model=os.getenv("AGENT_VLM_MODEL", os.getenv("VLM_MODEL", "qwen3-vl-flash-2026-01-22")), + vlm_timeout=int(os.getenv("AGENT_VLM_TIMEOUT", "15")), enabled=os.getenv("AGENT_ENABLED", "false").lower() == "true", ), work_order=WorkOrderConfig(