code - 本地vLLm模型请求示例

import requests
import json
import base64
from typing import List, Optional, Dict

# 配置模型服务地址和参数
API_BASE_URL = "http://localhost:8000/v1"  # 你的vLLM服务地址
MODEL_NAME = "Qwen3-VL-4B-Instruct"  # 和启动时--served-model-name一致
TIMEOUT = 30  # 请求超时时间(秒)


class Qwen3VLClient:
    """Qwen3-VL-4B-Instruct 模型调用客户端"""

    def __init__(self, api_base_url: str, model_name: str):
        self.api_base_url = api_base_url
        self.model_name = model_name
        self.headers = {
            "Content-Type": "application/json",
            "Accept": "application/json",
        }

    def chat_completions(
        self,
        messages: List[Dict],
        temperature: float = 0.7,
        max_tokens: int = 1024,
        top_p: float = 0.8,
    ) -> Optional[str]:
        """
        调用模型生成回复
        :param messages: 对话消息列表,格式参考OpenAI规范
        :param temperature: 生成随机性,0-1之间,值越大越随机
        :param max_tokens: 最大生成token数
        :param top_p: 采样阈值,0-1之间
        :return: 模型回复文本,失败返回None
        """
        # 构造请求体
        payload = {
            "model": self.model_name,
            "messages": messages,
            "temperature": temperature,
            "max_tokens": max_tokens,
            "top_p": top_p,
            "stream": False,  # 非流式输出
        }

        try:
            # 发送POST请求
            response = requests.post(
                url=f"{self.api_base_url}/chat/completions",
                headers=self.headers,
                data=json.dumps(payload, ensure_ascii=False),  # 避免中文乱码
                timeout=TIMEOUT,
            )
            # 检查响应状态
            response.raise_for_status()
            # 解析响应结果
            result = response.json()
            if result.get("choices") and len(result["choices"]) > 0:
                return result["choices"][0]["message"]["content"]
            else:
                print("模型未返回有效回复")
                return None

        except requests.exceptions.RequestException as e:
            print(f"请求失败:{str(e)}")
            if hasattr(e, "response") and e.response is not None:
                print(f"错误响应详情:{e.response.json()}")  # 打印服务端具体错误
            return None


def get_image_base64(image_path: str) -> str:
    """将本地图片转换为base64编码(修复Windows路径问题)"""
    try:
        # 统一路径分隔符(兼容Windows)
        image_path = image_path.replace("\\", "/")
        print(f"正在读取图片:{image_path}")
        with open(image_path, "rb") as f:
            base64_data = base64.b64encode(f.read()).decode("utf-8")
            return base64_data
    except FileNotFoundError:
        print(f"错误:找不到图片文件 {image_path},请检查路径是否正确")
        return ""
    except Exception as e:
        print(f"图片转base64失败:{str(e)}")
        return ""


# ------------------------------
# 示例1:纯文本对话调用(验证服务可用性)
# ------------------------------
def test_text_chat():
    print("===== 纯文本对话测试 =====")
    client = Qwen3VLClient(API_BASE_URL, MODEL_NAME)
    messages = [{"role": "user", "content": "你好,请介绍一下你自己"}]
    response = client.chat_completions(messages, temperature=0.7, max_tokens=500)
    if response:
        print(f"模型回复:\n{response}\n")


# ------------------------------
# 示例2:多模态(图片+文本)调用(修复路径问题)
# ------------------------------
def test_multimodal_chat():
    print("===== 多模态(图片+文本)测试 =====")
    client = Qwen3VLClient(API_BASE_URL, MODEL_NAME)

    # ********** 重点:修改为你的图片实际路径 **********
    # 方式1:Windows 原始路径(需双反斜杠)
    # image_path = "D:\\phpstudy_pro\\python\\.vscode\\练习\\ollama\\test.jpg"
    # 方式2:推荐用正斜杠(无需转义)
    image_path = "D:/phpstudy_pro/python/.vscode/练习/ollama/test.jpg"

    # 转换图片为base64
    img_base64 = get_image_base64(image_path)
    if not img_base64:
        print("图片base64编码失败,跳过多模态测试")
        return

    # 构造合法的多模态消息体
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "请详细描述这张图片的内容"},
                {
                    "type": "image_url",
                    "image_url": {
                        "url": f"data:image/jpeg;base64,{img_base64}"  # 确保base64非空
                    },
                },
            ],
        }
    ]

    # 调用模型
    response = client.chat_completions(messages, temperature=0.1, max_tokens=1024)
    if response:
        print(f"模型回复:\n{response}\n")


# ------------------------------
# 主函数:运行测试
# ------------------------------
if __name__ == "__main__":
    # 先测试纯文本(确保服务正常)
    # test_text_chat()

    # 测试多模态(已修复路径问题)
    test_multimodal_chat()
添加新评论