daliavanilla commited on
Commit
1a2364b
·
verified ·
1 Parent(s): e06fe15

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -7
app.py CHANGED
@@ -1,18 +1,24 @@
1
  import gradio as gr
2
  from PIL import Image
3
- processor = AutoProcessor.from_pretrained(daliavanilla/BLIP-Radiology-model)
4
- model = BlipForConditionalGeneration.from_pretrained(daliavanilla/BLIP-Radiology-model)
 
 
 
 
 
 
 
 
5
 
6
  # Define the prediction function
7
  def generate_caption(image):
8
  # Process the image
9
  image = Image.fromarray(image)
10
- #inputs = tokenizer(image, return_tensors="pt")
11
- inputs = processor(images=image, return_tensors="pt")#.to(device)
12
- pixel_values = inputs.pixel_values
13
 
14
  # Generate caption
15
- generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
16
  generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
17
 
18
  return generated_caption
@@ -20,7 +26,7 @@ def generate_caption(image):
20
  # Define the Gradio interface
21
  interface = gr.Interface(
22
  fn=generate_caption,
23
- inputs=gr.Image(),
24
  outputs=gr.Textbox(),
25
  live=True
26
  )
 
1
  import gradio as gr
2
  from PIL import Image
3
+ import torch
4
+ from transformers import BlipForConditionalGeneration, AutoProcessor
5
+
6
+ # Load processor and model from Hugging Face Hub
7
+ processor = AutoProcessor.from_pretrained("daliavanilla/BLIP-Radiology-model")
8
+ model = BlipForConditionalGeneration.from_pretrained("daliavanilla/BLIP-Radiology-model")
9
+
10
+ # Use GPU if available
11
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
+ model.to(device)
13
 
14
  # Define the prediction function
15
  def generate_caption(image):
16
  # Process the image
17
  image = Image.fromarray(image)
18
+ inputs = processor(images=image, return_tensors="pt").to(device)
 
 
19
 
20
  # Generate caption
21
+ generated_ids = model.generate(pixel_values=inputs.pixel_values, max_length=50)
22
  generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
23
 
24
  return generated_caption
 
26
  # Define the Gradio interface
27
  interface = gr.Interface(
28
  fn=generate_caption,
29
+ inputs=gr.Image(type="numpy"), # Ensure the image type is correctly handled by PIL
30
  outputs=gr.Textbox(),
31
  live=True
32
  )