83 lines
2.6 KiB
Python
83 lines
2.6 KiB
Python
|
|
"""
|
|||
|
|
VLM 服务快速验证脚本
|
|||
|
|
|
|||
|
|
测试 qwen3-vl-flash 是否能正常通过 DashScope OpenAI 兼容端点调用。
|
|||
|
|
使用一张公开的测试图片验证 VLM 能否返回 JSON 格式结果。
|
|||
|
|
"""
|
|||
|
|
|
|||
|
|
import asyncio
|
|||
|
|
import json
|
|||
|
|
from openai import AsyncOpenAI
|
|||
|
|
|
|||
|
|
|
|||
|
|
async def test_vlm():
|
|||
|
|
client = AsyncOpenAI(
|
|||
|
|
api_key="your_dashscope_api_key",
|
|||
|
|
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
|||
|
|
)
|
|||
|
|
|
|||
|
|
model = "qwen3-vl-flash-2026-01-22"
|
|||
|
|
|
|||
|
|
# 使用一张公开测试图片(阿里云官方示例)
|
|||
|
|
test_image_url = "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg"
|
|||
|
|
|
|||
|
|
prompt = """分析这张监控截图。
|
|||
|
|
摄像头位置:测试摄像头,监控区域:测试区域。
|
|||
|
|
边缘AI触发了 leave_post 告警,请判断告警是否属实。
|
|||
|
|
|
|||
|
|
输出严格的JSON格式(不要输出其他内容):
|
|||
|
|
{"confirmed": true, "description": "一句话描述当前画面"}"""
|
|||
|
|
|
|||
|
|
print(f"测试模型: {model}")
|
|||
|
|
print(f"测试图片: {test_image_url}")
|
|||
|
|
print(f"请求中...")
|
|||
|
|
|
|||
|
|
try:
|
|||
|
|
resp = await asyncio.wait_for(
|
|||
|
|
client.chat.completions.create(
|
|||
|
|
model=model,
|
|||
|
|
messages=[{
|
|||
|
|
"role": "user",
|
|||
|
|
"content": [
|
|||
|
|
{"type": "image_url", "image_url": {"url": test_image_url}},
|
|||
|
|
{"type": "text", "text": prompt},
|
|||
|
|
],
|
|||
|
|
}],
|
|||
|
|
extra_body={"enable_thinking": False},
|
|||
|
|
),
|
|||
|
|
timeout=15,
|
|||
|
|
)
|
|||
|
|
|
|||
|
|
content = resp.choices[0].message.content.strip()
|
|||
|
|
print(f"\n原始返回:\n{content}")
|
|||
|
|
|
|||
|
|
# 尝试解析 JSON
|
|||
|
|
parsed = content
|
|||
|
|
if "```" in parsed:
|
|||
|
|
parsed = parsed.split("```")[1]
|
|||
|
|
if parsed.startswith("json"):
|
|||
|
|
parsed = parsed[4:]
|
|||
|
|
parsed = parsed.strip()
|
|||
|
|
|
|||
|
|
result = json.loads(parsed)
|
|||
|
|
print(f"\n解析结果:")
|
|||
|
|
print(f" confirmed: {result.get('confirmed')}")
|
|||
|
|
print(f" description: {result.get('description')}")
|
|||
|
|
print(f"\n[OK] VLM 调用成功!模型可正常使用。")
|
|||
|
|
|
|||
|
|
# 打印 token 用量
|
|||
|
|
if resp.usage:
|
|||
|
|
print(f"\nToken 用量: prompt={resp.usage.prompt_tokens}, completion={resp.usage.completion_tokens}, total={resp.usage.total_tokens}")
|
|||
|
|
|
|||
|
|
except asyncio.TimeoutError:
|
|||
|
|
print("[FAIL] 超时(15秒)")
|
|||
|
|
except json.JSONDecodeError as e:
|
|||
|
|
print(f"[WARN] JSON 解析失败: {e}")
|
|||
|
|
print(f"原始内容: {content}")
|
|||
|
|
except Exception as e:
|
|||
|
|
print(f"[FAIL] 调用失败: {e}")
|
|||
|
|
|
|||
|
|
|
|||
|
|
if __name__ == "__main__":
|
|||
|
|
asyncio.run(test_vlm())
|