Spaces:
Sleeping
Sleeping
File size: 6,227 Bytes
4a8ac8b 755ce4f 7abf81a 4a8ac8b cc873a6 c6414ce 7160aa7 941cba6 5be1557 941cba6 5be1557 a8b1053 941cba6 a8b1053 7e5261e 263464a 755ce4f cc873a6 755ce4f cc873a6 755ce4f c6414ce 5be1557 c6414ce 941cba6 5be1557 e26becc 5be1557 e26becc 3391dd2 5be1557 e26becc 5be1557 e26becc 3391dd2 5be1557 941cba6 5be1557 c6414ce 755ce4f c6414ce 263464a a8b1053 3391dd2 263464a 755ce4f cc873a6 755ce4f 263464a c6414ce 941cba6 5448b5b 9ddcb59 c6414ce 3391dd2 5be1557 941cba6 5be1557 941cba6 755ce4f c6414ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import gradio as gr
import subprocess
from huggingface_hub import InferenceClient
from PIL import Image
import requests
import json
# ===================== 核心逻辑模块 =====================
# 初始化模型客户端
try:
# 文本聊天模型
client_text = InferenceClient("meta-llama/Llama-3.2-11B-Vision-Instruct")
# 图片生成模型 1
client_image_1 = InferenceClient()
# 图片生成模型 2 (FLUX)
client_image_2 = InferenceClient("black-forest-labs/FLUX.1-dev")
# 更新状态为服务已启动
service_status = "服务已启动,您可以开始使用!"
except Exception as e:
print(f"Error initializing clients: {e}")
service_status = "服务初始化失败,请稍后再试。"
# ---------- 文本聊天模块 ----------
def chat_with_model(messages):
"""
调用文本聊天模型生成对话内容。
"""
try:
response = client_text.chat_completion(messages, max_tokens=100)
return response["choices"][0]["message"]["content"]
except Exception as e:
print(f"Chat generation failed: {e}")
return "聊天生成失败,请稍后再试。"
# ---------- chatgpt-4o-mini 模块 ----------
def chatgpt_4o_mini(messages):
"""
调用 gpt-4o-mini API 进行对话,并解析流式响应。
"""
try:
# 构建请求数据
data = {
"model": "gpt-4o-mini",
"messages": [{"role": "system", "content": "你是一个辅助机器人"}] + messages,
"stream": True
}
# 发送请求
url = 'https://sanbo1200-duck2api.hf.space/completions'
headers = {'Content-Type': 'application/json'}
# 使用 requests 发送 POST 请求
response = requests.post(url, headers=headers, json=data, stream=True)
# 检查响应状态码
if response.status_code != 200:
return f"请求失败,状态码:{response.status_code}"
# 处理流式响应
full_response = ""
for line in response.iter_lines():
if line:
# 解析每一行的内容
try:
chunk = json.loads(line.decode('utf-8'))
# 解析返回的每个 chunk
if 'choices' in chunk and len(chunk['choices']) > 0:
content = chunk['choices'][0]['delta'].get('content', '')
full_response += content
except Exception as e:
print(f"Error parsing response chunk: {e}")
# 返回拼接后的完整对话内容
return full_response.strip()
except Exception as e:
print(f"Error during gpt-4o-mini request: {e}")
return "gpt-4o-mini 请求失败,请稍后再试。"
# ---------- 图像生成模块 ----------
def image_gen(prompt):
"""
调用两个图像生成模型,生成两个图像。
"""
try:
# 使用服务一 (默认模型)
print(f"Generating image from service 1 with prompt: {prompt}")
image_1 = client_image_1.text_to_image(prompt)
if image_1 is None:
print("Service 1 returned no image.")
# 使用服务二 (FLUX 模型)
print(f"Generating image from service 2 with prompt: {prompt}")
image_2 = client_image_2.text_to_image(prompt)
if image_2 is None:
print("Service 2 returned no image.")
return image_1, image_2 # 返回两个生成的图像
except Exception as e:
print(f"Image generation failed: {e}")
return None, None # 如果生成失败,返回两个空值
# ===================== Gradio 界面构建 =====================
def build_interface():
"""
构建 Gradio 界面布局,包括文本聊天、chatgpt-4o-mini 和图像生成模块。
"""
with gr.Blocks() as demo:
# 服务状态显示区域
status_output = gr.Textbox(label="服务状态", value=service_status, interactive=False)
# 文本聊天模块
with gr.Tab("文本聊天"):
chatbox_input = gr.Textbox(label="输入你的问题", placeholder="请提问...")
chatbox_output = gr.Textbox(label="回答")
chatbox_button = gr.Button("发送")
def chat_handler(user_input):
messages = [{"role": "user", "content": user_input}]
return chat_with_model(messages)
chatbox_button.click(chat_handler, inputs=chatbox_input, outputs=chatbox_output)
# chatgpt-4o-mini 模块
with gr.Tab("gpt4o"):
chatgpt_input = gr.Textbox(label="输入你的问题", placeholder="请提问...")
chatgpt_output = gr.Textbox(label="回答")
chatgpt_button = gr.Button("发送")
def chatgpt_handler(user_input):
messages = [{"role": "user", "content": user_input}]
return chatgpt_4o_mini(messages)
chatgpt_button.click(chatgpt_handler, inputs=chatgpt_input, outputs=chatgpt_output)
# 图像生成模块
with gr.Tab("图像生成"):
image_prompt = gr.Textbox(label="图像提示词", placeholder="描述你想生成的图像")
# 创建 Row 布局,左右分布图像
with gr.Row():
image_output_1 = gr.Image(label="服务一生成的图像", elem_id="image_1", interactive=True)
image_output_2 = gr.Image(label="服务二生成的图像", elem_id="image_2", interactive=True)
image_button = gr.Button("生成图像")
# 处理图像生成请求
def image_handler(prompt):
img_1, img_2 = image_gen(prompt)
return img_1, img_2
image_button.click(image_handler, inputs=image_prompt, outputs=[image_output_1, image_output_2])
gr.Markdown("### 使用说明")
gr.Markdown("本助手支持文本聊天、chatgpt-4o-mini 和图像生成功能,使用上方选项卡切换不同功能。")
return demo
# 启动 Gradio 界面
if __name__ == "__main__":
demo = build_interface()
demo.launch()
|