- 新增3张通知路由表模型(notify_area, camera_area_binding, area_person_binding) - 新增VLM复核服务,通过qwen3-vl-flash对告警截图二次确认 - 新增企微通知服务,告警确认后推送文本卡片给责任人 - 新增通知调度服务,编排VLM复核→查表路由→企微推送流水线 - 新增企微回调接口,支持手动结单/确认处理/标记误报 - 新增通知管理API,区域/摄像头绑定/人员绑定CRUD - 告警上报主流程(edge_compat + yudao_aiot_alarm)接入异步通知 - 扩展配置项支持VLM和企微环境变量 - 添加openai==1.68.0依赖(通过DashScope兼容端点调用)
83 lines
2.6 KiB
Python
83 lines
2.6 KiB
Python
"""
|
||
VLM 服务快速验证脚本
|
||
|
||
测试 qwen3-vl-flash 是否能正常通过 DashScope OpenAI 兼容端点调用。
|
||
使用一张公开的测试图片验证 VLM 能否返回 JSON 格式结果。
|
||
"""
|
||
|
||
import asyncio
|
||
import json
|
||
from openai import AsyncOpenAI
|
||
|
||
|
||
async def test_vlm():
|
||
client = AsyncOpenAI(
|
||
api_key="your_dashscope_api_key",
|
||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||
)
|
||
|
||
model = "qwen3-vl-flash-2026-01-22"
|
||
|
||
# 使用一张公开测试图片(阿里云官方示例)
|
||
test_image_url = "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg"
|
||
|
||
prompt = """分析这张监控截图。
|
||
摄像头位置:测试摄像头,监控区域:测试区域。
|
||
边缘AI触发了 leave_post 告警,请判断告警是否属实。
|
||
|
||
输出严格的JSON格式(不要输出其他内容):
|
||
{"confirmed": true, "description": "一句话描述当前画面"}"""
|
||
|
||
print(f"测试模型: {model}")
|
||
print(f"测试图片: {test_image_url}")
|
||
print(f"请求中...")
|
||
|
||
try:
|
||
resp = await asyncio.wait_for(
|
||
client.chat.completions.create(
|
||
model=model,
|
||
messages=[{
|
||
"role": "user",
|
||
"content": [
|
||
{"type": "image_url", "image_url": {"url": test_image_url}},
|
||
{"type": "text", "text": prompt},
|
||
],
|
||
}],
|
||
extra_body={"enable_thinking": False},
|
||
),
|
||
timeout=15,
|
||
)
|
||
|
||
content = resp.choices[0].message.content.strip()
|
||
print(f"\n原始返回:\n{content}")
|
||
|
||
# 尝试解析 JSON
|
||
parsed = content
|
||
if "```" in parsed:
|
||
parsed = parsed.split("```")[1]
|
||
if parsed.startswith("json"):
|
||
parsed = parsed[4:]
|
||
parsed = parsed.strip()
|
||
|
||
result = json.loads(parsed)
|
||
print(f"\n解析结果:")
|
||
print(f" confirmed: {result.get('confirmed')}")
|
||
print(f" description: {result.get('description')}")
|
||
print(f"\n[OK] VLM 调用成功!模型可正常使用。")
|
||
|
||
# 打印 token 用量
|
||
if resp.usage:
|
||
print(f"\nToken 用量: prompt={resp.usage.prompt_tokens}, completion={resp.usage.completion_tokens}, total={resp.usage.total_tokens}")
|
||
|
||
except asyncio.TimeoutError:
|
||
print("[FAIL] 超时(15秒)")
|
||
except json.JSONDecodeError as e:
|
||
print(f"[WARN] JSON 解析失败: {e}")
|
||
print(f"原始内容: {content}")
|
||
except Exception as e:
|
||
print(f"[FAIL] 调用失败: {e}")
|
||
|
||
|
||
if __name__ == "__main__":
|
||
asyncio.run(test_vlm())
|