File size: 1,625 Bytes
9261660
9618b1a
 
3f81199
 
9ac52f4
9618b1a
 
 
9261660
5a73fa0
9618b1a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a73fa0
 
 
9618b1a
5a73fa0
 
 
 
 
9618b1a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import gradio as gr
from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM
from deepseek_vl.utils.io import load_pil_images
import torch

model_path = "deepseek-ai/deepseek-vl-1.3b-chat"
vl_chat_processor = VLChatProcessor.from_pretrained(model_path)
tokenizer = vl_chat_processor.tokenizer
vl_gpt = MultiModalityCausalLM.from_pretrained(model_path, trust_remote_code=True).to("cpu")

def qa(image, question):
    conversation = [
        {"role": "User", "content": "<image_placeholder>" + question, "images": [image]},
        {"role": "Assistant", "content": ""}
    ]
    pil_images = load_pil_images(conversation)
    prepare_inputs = vl_chat_processor(
        conversations=conversation,
        images=pil_images,
        force_batchify=True
    ).to("cpu")
    inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
    outputs = vl_gpt.language_model.generate(
        inputs_embeds=inputs_embeds,
        attention_mask=prepare_inputs.attention_mask,
        pad_token_id=tokenizer.eos_token_id,
        bos_token_id=tokenizer.bos_token_id,
        eos_token_id=tokenizer.eos_token_id,
        max_new_tokens=512,
        do_sample=False,
        use_cache=True
    )
    answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
    return answer

demo = gr.Interface(
    fn=qa,
    inputs=[gr.Image(type="pil", label="Upload Image"), gr.Textbox(label="Enter your question")],
    outputs="text",
    title="DeepSeek-VL Multimodal QA Demo",
    description="Upload an image and enter a question. Experience DeepSeek-VL's vision-language capabilities."
)

demo.launch()