Spaces:
Runtime error
Runtime error
Khadaffe Abubakar Sulaiman
commited on
Commit
Β·
c3b027e
1
Parent(s):
039c09c
feat: add qwen script
Browse files- app.py +32 -0
- requirements.txt +0 -0
app.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForVision2Seq
|
3 |
+
from PIL import Image
|
4 |
+
import torch
|
5 |
+
|
6 |
+
# Load model and tokenizer
|
7 |
+
model_id = "Qwen/Qwen-VL"
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
9 |
+
model = AutoModelForVision2Seq.from_pretrained(model_id, trust_remote_code=True, torch_dtype=torch.float16).to("cuda").eval()
|
10 |
+
|
11 |
+
# Inference function
|
12 |
+
def ask_qwen(image, prompt):
|
13 |
+
query = tokenizer.from_list_format([
|
14 |
+
{"image": image},
|
15 |
+
{"text": prompt}
|
16 |
+
])
|
17 |
+
inputs = tokenizer(query, return_tensors="pt").to("cuda", torch.float16)
|
18 |
+
with torch.no_grad():
|
19 |
+
output = model.generate(**inputs, max_new_tokens=128)
|
20 |
+
answer = tokenizer.decode(output[0], skip_special_tokens=True)
|
21 |
+
return answer.strip()
|
22 |
+
|
23 |
+
# Gradio interface
|
24 |
+
demo = gr.Interface(
|
25 |
+
fn=ask_qwen,
|
26 |
+
inputs=[gr.Image(type="pil"), gr.Textbox(label="Prompt")],
|
27 |
+
outputs=gr.Textbox(label="Answer"),
|
28 |
+
title="Qwen-VL 2.5 - Vision Language Chatbot",
|
29 |
+
description="Chat with an image using Qwen-VL 2.5 (3B)"
|
30 |
+
)
|
31 |
+
|
32 |
+
demo.launch()
|
requirements.txt
ADDED
File without changes
|