Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,8 @@ from PIL import Image
|
|
9 |
import subprocess
|
10 |
# subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
11 |
|
12 |
-
model = AutoModelForCausalLM.from_pretrained('HuggingFaceM4/Florence-2-DocVQA', trust_remote_code=True).
|
|
|
13 |
|
14 |
processor = AutoProcessor.from_pretrained('HuggingFaceM4/Florence-2-DocVQA', trust_remote_code=True)
|
15 |
|
@@ -27,7 +28,8 @@ def run_example(task_prompt, image, text_input=None):
|
|
27 |
prompt = task_prompt
|
28 |
else:
|
29 |
prompt = task_prompt + text_input
|
30 |
-
inputs = processor(text=prompt, images=image, return_tensors="pt")
|
|
|
31 |
generated_ids = model.generate(
|
32 |
input_ids=inputs["input_ids"],
|
33 |
pixel_values=inputs["pixel_values"],
|
|
|
9 |
import subprocess
|
10 |
# subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
11 |
|
12 |
+
model = AutoModelForCausalLM.from_pretrained('HuggingFaceM4/Florence-2-DocVQA', trust_remote_code=True).eval()
|
13 |
+
# model = AutoModelForCausalLM.from_pretrained('HuggingFaceM4/Florence-2-DocVQA', trust_remote_code=True).to("cuda").eval()
|
14 |
|
15 |
processor = AutoProcessor.from_pretrained('HuggingFaceM4/Florence-2-DocVQA', trust_remote_code=True)
|
16 |
|
|
|
28 |
prompt = task_prompt
|
29 |
else:
|
30 |
prompt = task_prompt + text_input
|
31 |
+
inputs = processor(text=prompt, images=image, return_tensors="pt")
|
32 |
+
# inputs = processor(text=prompt, images=image, return_tensors="pt").to("cuda")
|
33 |
generated_ids = model.generate(
|
34 |
input_ids=inputs["input_ids"],
|
35 |
pixel_values=inputs["pixel_values"],
|