Yixin1234 commited on
Commit
59c9715
·
1 Parent(s): 8f77b64

feat: fix errors use others files

Browse files
Files changed (2) hide show
  1. app.py +93 -42
  2. requirements.txt +9 -6
app.py CHANGED
@@ -1,49 +1,100 @@
1
  import gradio as gr
 
 
2
  from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM
3
  from deepseek_vl.utils.io import load_pil_images
4
- import torch
 
 
5
 
 
6
  model_path = "deepseek-ai/deepseek-vl-1.3b-chat"
7
-
8
- # Load processors and model (CPU on free Spaces)
9
  vl_chat_processor = VLChatProcessor.from_pretrained(model_path)
10
  tokenizer = vl_chat_processor.tokenizer
11
- vl_gpt = MultiModalityCausalLM.from_pretrained(model_path, trust_remote_code=True).to("cpu")
12
-
13
- def qa(image, question):
14
- conversation = [
15
- {"role": "User", "content": "<image_placeholder>" + question, "images": [image]},
16
- {"role": "Assistant", "content": ""}
17
- ]
18
- pil_images = load_pil_images(conversation)
19
- prepare_inputs = vl_chat_processor(
20
- conversations=conversation,
21
- images=pil_images,
22
- force_batchify=True
23
- ).to("cpu")
24
-
25
- inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
26
- outputs = vl_gpt.language_model.generate(
27
- inputs_embeds=inputs_embeds,
28
- attention_mask=prepare_inputs.attention_mask,
29
- pad_token_id=tokenizer.eos_token_id,
30
- bos_token_id=tokenizer.bos_token_id,
31
- eos_token_id=tokenizer.eos_token_id,
32
- max_new_tokens=256, # smaller is faster on CPU
33
- do_sample=False,
34
- use_cache=True
35
- )
36
- answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
37
- return answer
38
-
39
- demo = gr.Interface(
40
- fn=qa,
41
- inputs=[gr.Image(type="pil", label="Upload Image"), gr.Textbox(label="Enter your question")],
42
- outputs="text",
43
- title="DeepSeek-VL Multimodal QA Demo",
44
- description="Upload an image and enter a question. Experience DeepSeek-VL's vision-language capabilities."
45
- )
46
-
47
- if __name__ == "__main__":
48
- # No server_name/server_port/share on Spaces
49
- demo.queue(concurrency_count=1, max_size=8).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM
4
  from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM
5
  from deepseek_vl.utils.io import load_pil_images
6
+ from io import BytesIO
7
+ from PIL import Image
8
+ import spaces # Import spaces for ZeroGPU support
9
 
10
+ # Load the model and processor
11
  model_path = "deepseek-ai/deepseek-vl-1.3b-chat"
 
 
12
  vl_chat_processor = VLChatProcessor.from_pretrained(model_path)
13
  tokenizer = vl_chat_processor.tokenizer
14
+
15
+ # Define the function for image description with ZeroGPU support
16
+ @spaces.GPU # Ensures GPU allocation for this function
17
+ def describe_image(image, user_question="Describe this image in great detail."):
18
+ try:
19
+ # Convert the PIL Image to a BytesIO object for compatibility
20
+ image_byte_arr = BytesIO()
21
+ image.save(image_byte_arr, format="PNG") # Save image in PNG format
22
+ image_byte_arr.seek(0) # Move pointer to the start
23
+
24
+ # Define the conversation, using the user's question
25
+ conversation = [
26
+ {
27
+ "role": "User",
28
+ "content": f"<image_placeholder>{user_question}",
29
+ "images": [image_byte_arr] # Pass the image byte array instead of an object
30
+ },
31
+ {
32
+ "role": "Assistant",
33
+ "content": ""
34
+ }
35
+ ]
36
+
37
+ # Convert image byte array back to a PIL image for processing
38
+ pil_images = [Image.open(BytesIO(image_byte_arr.read()))] # Convert byte back to PIL Image
39
+ image_byte_arr.seek(0) # Reset the byte stream again for reuse
40
+
41
+ # Load images and prepare the inputs
42
+ prepare_inputs = vl_chat_processor(
43
+ conversations=conversation,
44
+ images=pil_images,
45
+ force_batchify=True
46
+ ).to('cuda')
47
+
48
+ # Load and prepare the model
49
+ vl_gpt = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True).to(torch.bfloat16).cuda().eval()
50
+
51
+ # Generate embeddings from the image input
52
+ inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
53
+
54
+ # Generate the model's response
55
+ outputs = vl_gpt.language_model.generate(
56
+ inputs_embeds=inputs_embeds,
57
+ attention_mask=prepare_inputs.attention_mask,
58
+ pad_token_id=tokenizer.eos_token_id,
59
+ bos_token_id=tokenizer.bos_token_id,
60
+ eos_token_id=tokenizer.eos_token_id,
61
+ max_new_tokens=512,
62
+ do_sample=False,
63
+ use_cache=True
64
+ )
65
+
66
+ # Decode the generated tokens into text
67
+ answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
68
+ return answer
69
+
70
+ except Exception as e:
71
+ # Provide detailed error information
72
+ return f"Error: {str(e)}"
73
+
74
+ # Gradio interface
75
+ def gradio_app():
76
+ with gr.Blocks() as demo:
77
+ gr.Markdown("# Image Description with DeepSeek VL 1.3b 🐬\n### Upload an image and ask a question about it.")
78
+
79
+ with gr.Row():
80
+ image_input = gr.Image(type="pil", label="Upload an Image")
81
+ question_input = gr.Textbox(
82
+ label="Question (optional)",
83
+ placeholder="Ask a question about the image (e.g., 'What is happening in this image?')",
84
+ lines=2
85
+ )
86
+
87
+ output_text = gr.Textbox(label="Image Description", interactive=False)
88
+
89
+ submit_btn = gr.Button("Generate Description")
90
+
91
+ submit_btn.click(
92
+ fn=describe_image,
93
+ inputs=[image_input, question_input], # Pass both image and question as inputs
94
+ outputs=output_text
95
+ )
96
+
97
+ demo.launch()
98
+
99
+ # Launch the Gradio app
100
+ gradio_app()
requirements.txt CHANGED
@@ -1,7 +1,10 @@
1
- gradio>=4.31.0
2
- transformers>=4.40.0
3
- torch>=2.2.0
4
- sentencepiece
5
- timm>=0.9.16
6
  accelerate
7
- git+https://github.com/deepseek-ai/DeepSeek-VL.git
 
 
 
 
 
1
+ # Core requirements
2
+ bitsandbytes
3
+ transformers
4
+ huggingface_hub
 
5
  accelerate
6
+ gradio
7
+ git+https://github.com/deepseek-ai/DeepSeek-VL
8
+ spaces
9
+ Pillow
10
+ torch