Spaces:
Configuration error
Configuration error
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,17 +6,18 @@ import gradio as gr
|
|
| 6 |
import PIL.Image
|
| 7 |
import spaces
|
| 8 |
import torch
|
| 9 |
-
from transformers import
|
| 10 |
|
| 11 |
DESCRIPTION = "# Image Captioning with LongCap"
|
| 12 |
|
| 13 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 14 |
|
| 15 |
model_id = "unography/blip-large-long-cap"
|
| 16 |
-
processor =
|
| 17 |
model = BlipForConditionalGeneration.from_pretrained(model_id).to(device)
|
| 18 |
|
| 19 |
|
|
|
|
| 20 |
def run(image: PIL.Image.Image) -> str:
|
| 21 |
inputs = processor(images=image, return_tensors="pt").to(device)
|
| 22 |
out = model.generate(pixel_values=inputs.pixel_values, max_length=300)
|
|
|
|
| 6 |
import PIL.Image
|
| 7 |
import spaces
|
| 8 |
import torch
|
| 9 |
+
from transformers import AutoProcessor, BlipForConditionalGeneration
|
| 10 |
|
| 11 |
DESCRIPTION = "# Image Captioning with LongCap"
|
| 12 |
|
| 13 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 14 |
|
| 15 |
model_id = "unography/blip-large-long-cap"
|
| 16 |
+
processor = AutoProcessor.from_pretrained(model_id)
|
| 17 |
model = BlipForConditionalGeneration.from_pretrained(model_id).to(device)
|
| 18 |
|
| 19 |
|
| 20 |
+
|
| 21 |
def run(image: PIL.Image.Image) -> str:
|
| 22 |
inputs = processor(images=image, return_tensors="pt").to(device)
|
| 23 |
out = model.generate(pixel_values=inputs.pixel_values, max_length=300)
|