import argparse import base64 import requests import os from openai import OpenAI # --- 配置 --- IP_ADDR = "127.0.0.1:8910" MODEL_NAME = "qwen3-8b-8380" # 替换为实际模型名称 API_KEY = "123" # --- 辅助函数:图片编码 --- def encode_image(image_input): """根据路径或URL获取图片的Base64编码""" if image_input.startswith(('http://', 'https://')): try: print(f"Downloading image from URL: {image_input}...") response = requests.get(image_input, timeout=10) response.raise_for_status() return base64.b64encode(response.content).decode('utf-8') except Exception as e: raise Exception(f"Failed to download image from URL: {e}") else: try: if not os.path.exists(image_input): raise FileNotFoundError(f"Local file not found: {image_input}") with open(image_input, "rb") as image_file: return base64.b64encode(image_file.read()).decode('utf-8') except Exception as e: raise Exception(f"Failed to load local image: {e}") def main(): # 1. 参数解析 parser = argparse.ArgumentParser(description="Genie API Client for LLM and VL models") parser.add_argument("--stream", action="store_true", help="Enable streaming output") parser.add_argument("--prompt", type=str, default="Hello", help="The text prompt") # 关键修改:required=False,使其变为可选 parser.add_argument("--image", type=str, required=False, help="Path to image or URL (Trigger VL mode)") args = parser.parse_args() # 2. 初始化客户端 client = OpenAI(base_url="http://" + IP_ADDR + "/v1", api_key=API_KEY) # 基础 extra_body 配置 extra_body = { "size": 4096, "temp": 1.5, "top_k": 13, "top_p": 0.6 } # 3. 根据是否提供图片参数,构建不同的请求体 messages_payload = [] if args.image: # =========== VL (图文) 模式 =========== print(f"--- Mode: VL (Visual Language) [Image: {args.image}] ---") try: base64_image = encode_image(args.image) except Exception as e: print(f"Error processing image: {e}") return # VL 模型特殊的 extra_body 结构 custom_messages = [ {"role": "system", "content": "You are a helpful assistant."}, { "role": "user", "content": { "question": args.prompt, "image": base64_image } } ] # 将真实数据放入 extra_body extra_body["messages"] = custom_messages # 标准 messages 传占位符 (Genie VL 的特殊要求) messages_payload = [{"role": "user", "content": "placeholder"}] else: # =========== LLM (纯文本) 模式 =========== print("--- Mode: LLM (Text Only) ---") messages_payload = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": args.prompt} ] # LLM 模式下,extra_body 不需要包含 messages 字段 # 4. 发送请求 try: if args.stream: response = client.chat.completions.create( model=MODEL_NAME, stream=True, messages=messages_payload, extra_body=extra_body ) print("Response: ", end="") for chunk in response: if chunk.choices: content = chunk.choices[0].delta.content if content is not None: print(content, end="", flush=True) print() # 换行 else: response = client.chat.completions.create( model=MODEL_NAME, messages=messages_payload, extra_body=extra_body ) if response.choices: print("Response:", response.choices[0].message.content) except Exception as e: print(f"\nRequest failed: {e}") if __name__ == "__main__": main()