LoufAn commited on
Commit
db7fd06
·
1 Parent(s): 5e49a17

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -15
app.py CHANGED
@@ -1,21 +1,60 @@
 
 
 
1
  import gradio as gr
2
- from transformers import AutoProcessor, AutoModelForVision2Seq
3
  from PIL import Image
4
- import torch
5
 
6
- # 模型和处理器(InternVL3)
7
- model_id = "OpenGVLab/InternVL3-14B"
8
- processor = AutoProcessor.from_pretrained(model_id)
9
- model = AutoModelForVision2Seq.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- # 推理函数
12
- def infer(image, prompt):
13
- inputs = processor(prompt=prompt, images=image, return_tensors="pt").to("cuda")
14
- output = model.generate(**inputs, max_new_tokens=256)
15
- response = processor.decode(output[0], skip_special_tokens=True)
16
- return response
17
 
18
- # Gradio UI
19
  gr.Interface(
20
  fn=infer,
21
  inputs=[
@@ -23,6 +62,6 @@ gr.Interface(
23
  gr.Textbox(label="Your Prompt", placeholder="Ask a question about the image...")
24
  ],
25
  outputs="text",
26
- title="InternVL3-14B Visual Chat",
27
- description="Upload an image and enter a prompt. InternVL3-14B will answer accordingly."
28
  ).launch()
 
1
+ import math
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModel, AutoProcessor
4
  import gradio as gr
 
5
  from PIL import Image
 
6
 
7
+ # === 分配层到多 GPU ===
8
+ def split_model(model_path):
9
+ from transformers import AutoConfig
10
+ device_map = {}
11
+ world_size = torch.cuda.device_count()
12
+ config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
13
+ num_layers = config.llm_config.num_hidden_layers
14
+ num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
15
+ num_layers_per_gpu = [num_layers_per_gpu] * world_size
16
+ num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
17
+ layer_cnt = 0
18
+ for i, num_layer in enumerate(num_layers_per_gpu):
19
+ for _ in range(num_layer):
20
+ device_map[f'language_model.model.layers.{layer_cnt}'] = i
21
+ layer_cnt += 1
22
+ device_map['vision_model'] = 0
23
+ device_map['mlp1'] = 0
24
+ device_map['language_model.model.tok_embeddings'] = 0
25
+ device_map['language_model.model.embed_tokens'] = 0
26
+ device_map['language_model.output'] = 0
27
+ device_map['language_model.model.norm'] = 0
28
+ device_map['language_model.model.rotary_emb'] = 0
29
+ device_map['language_model.lm_head'] = 0
30
+ device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
31
+ return device_map
32
+
33
+ # === 模型路径 ===
34
+ model_path = "OpenGVLab/InternVL3-14B"
35
+ device_map = split_model(model_path)
36
+
37
+ # === 加载模型和处理器 ===
38
+ model = AutoModel.from_pretrained(
39
+ model_path,
40
+ torch_dtype=torch.bfloat16,
41
+ low_cpu_mem_usage=True,
42
+ use_flash_attn=True,
43
+ trust_remote_code=True,
44
+ device_map=device_map
45
+ ).eval()
46
+
47
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
48
+ processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
49
 
50
+ # === 推理函数 ===
51
+ def infer(image: Image.Image, prompt: str):
52
+ inputs = processor(text=prompt, images=image, return_tensors="pt").to("cuda")
53
+ output = model.generate(**inputs, max_new_tokens=512)
54
+ answer = tokenizer.decode(output[0], skip_special_tokens=True)
55
+ return answer
56
 
57
+ # === Gradio 界面 ===
58
  gr.Interface(
59
  fn=infer,
60
  inputs=[
 
62
  gr.Textbox(label="Your Prompt", placeholder="Ask a question about the image...")
63
  ],
64
  outputs="text",
65
+ title="InternVL3-14B Multimodal Demo",
66
+ description="Upload an image and ask a question. InternVL3-14B will answer using vision + language."
67
  ).launch()