sunbv56 commited on
Commit
4bfeff8
·
verified ·
1 Parent(s): acaa5ac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -31,7 +31,7 @@ def process_vqa(image: Image.Image, question: str):
31
  messages = [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": question}]}]
32
  prompt_text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
33
  model_inputs = processor(text=[prompt_text], images=[image], return_tensors="pt").to(model.device)
34
- generated_ids = model.generate(**model_inputs, max_new_tokens=1024, do_sample=False, eos_token_id=processor.tokenizer.eos_token_id, pad_token_id=processor.tokenizer.pad_token_id)
35
  generated_ids = generated_ids[:, model_inputs['input_ids'].shape[1]:]
36
  response = processor.tokenizer.decode(generated_ids[0], skip_special_tokens=True).strip()
37
  return response
 
31
  messages = [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": question}]}]
32
  prompt_text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
33
  model_inputs = processor(text=[prompt_text], images=[image], return_tensors="pt").to(model.device)
34
+ generated_ids = model.generate(**model_inputs, max_new_tokens=128, do_sample=False, eos_token_id=processor.tokenizer.eos_token_id, pad_token_id=processor.tokenizer.pad_token_id)
35
  generated_ids = generated_ids[:, model_inputs['input_ids'].shape[1]:]
36
  response = processor.tokenizer.decode(generated_ids[0], skip_special_tokens=True).strip()
37
  return response