mrfakename commited on
Commit
956cc67
·
verified ·
1 Parent(s): dc3f8c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -18
app.py CHANGED
@@ -13,26 +13,27 @@ model = AutoModelForCausalLM.from_pretrained(model_id_or_path, device_map="auto"
13
 
14
  processor = AutoProcessor.from_pretrained(model_id_or_path, trust_remote_code=True)
15
 
16
- image_path = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png"
17
-
18
- image = Image.open(requests.get(image_path, stream=True).raw)
19
-
20
- messages = [
21
- {
22
- "role": "user",
23
- "content": [
24
- {"text": None, "type": "image"},
25
- {"text": "what is the image?", "type": "text"},
26
- ],
27
- }
28
- ]
29
-
30
- text = processor.apply_chat_template(messages, add_generation_prompt=True)
31
- inputs = processor(text=text, images=image, return_tensors="pt")
32
- inputs["pixel_values"] = inputs["pixel_values"].to(model.dtype)
33
- inputs = {k: v.to(model.device) for k, v in inputs.items()}
34
  @spaces.GPU
35
  def run():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  with torch.inference_mode(), torch.cuda.amp.autocast(dtype=torch.bfloat16):
37
  output = model.generate(
38
  **inputs,
 
13
 
14
  processor = AutoProcessor.from_pretrained(model_id_or_path, trust_remote_code=True)
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  @spaces.GPU
17
  def run():
18
+
19
+ image_path = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png"
20
+
21
+ image = Image.open(requests.get(image_path, stream=True).raw)
22
+
23
+ messages = [
24
+ {
25
+ "role": "user",
26
+ "content": [
27
+ {"text": None, "type": "image"},
28
+ {"text": "what is the image?", "type": "text"},
29
+ ],
30
+ }
31
+ ]
32
+
33
+ text = processor.apply_chat_template(messages, add_generation_prompt=True)
34
+ inputs = processor(text=text, images=image, return_tensors="pt")
35
+ inputs["pixel_values"] = inputs["pixel_values"].to(model.dtype)
36
+ inputs = {k: v.to(model.device) for k, v in inputs.items()}
37
  with torch.inference_mode(), torch.cuda.amp.autocast(dtype=torch.bfloat16):
38
  output = model.generate(
39
  **inputs,