Arnaviit commited on
Commit
9f8099f
·
verified ·
1 Parent(s): 6f58574

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -0
app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from PIL import Image
4
+ import scipy.io.wavfile as wavfile
5
+
6
+ # Use a pipeline as a high-level helper
7
+ from transformers import pipeline
8
+
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+
11
+ caption_image = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
12
+
13
+ narrator = pipeline("text-to-speech",model="kakao-enterprise/vits-ljs")
14
+
15
+ def generate_audio(text):
16
+ # Generate the narrated text
17
+ narrated_text = narrator(text)
18
+
19
+ # Save the audio to a WAV file
20
+ wavfile.write("output.wav", rate=narrated_text["sampling_rate"],
21
+ data=narrated_text["audio"][0])
22
+ # Return the path to the saved audio file
23
+ return "output.wav"
24
+
25
+ def caption_my_image(pil_image):
26
+ semantics = caption_image(pil_image)[0]['generated_text']
27
+ return generate_audio(semantics)
28
+
29
+ demo = gr.Interface(fn=caption_my_image,
30
+ inputs=[gr.Image(label="Select Image",type="pil")],
31
+ outputs=[gr.Audio(label="Image Caption")],
32
+ title="Image Captioning by Arnav Anand",
33
+ description="THIS APPLICATION WILL BE USED TO CAPTION THE IMAGE WITH THE HELP OF AI.")
34
+ demo.launch()