multimodalart HF Staff commited on
Commit
b9a1898
·
verified ·
1 Parent(s): 28037e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -63,6 +63,7 @@ def process_image(prompt: str, img: Image.Image) -> str:
63
  {"type": "text", "text": prompt},
64
  ],
65
  }]
 
66
  text_prompt_for_qwen = processor.apply_chat_template(
67
  messages, tokenize=False, add_generation_prompt=True
68
  )
@@ -73,7 +74,7 @@ def process_image(prompt: str, img: Image.Image) -> str:
73
  videos=video_inputs,
74
  padding=True,
75
  return_tensors="pt",
76
- ).to('cuda:0')
77
  generated_ids = multi_model.generate(**inputs, max_new_tokens=1024)
78
  input_token_len = inputs.input_ids.shape[1]
79
  generated_ids_trimmed = generated_ids[:, input_token_len:]
 
63
  {"type": "text", "text": prompt},
64
  ],
65
  }]
66
+ print(messages)
67
  text_prompt_for_qwen = processor.apply_chat_template(
68
  messages, tokenize=False, add_generation_prompt=True
69
  )
 
74
  videos=video_inputs,
75
  padding=True,
76
  return_tensors="pt",
77
+ ).to('cuda')
78
  generated_ids = multi_model.generate(**inputs, max_new_tokens=1024)
79
  input_token_len = inputs.input_ids.shape[1]
80
  generated_ids_trimmed = generated_ids[:, input_token_len:]