File size: 997 Bytes
8b7d5f5
b5858aa
e363033
78c45fd
6668036
8b7d5f5
eade8cd
ba2d445
520c499
 
 
ba2d445
520c499
ba2d445
520c499
 
ba2d445
520c499
 
ba2d445
520c499
 
ba2d445
520c499
 
 
 
 
ba2d445
 
5c86456
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
# Use a pipeline as a high-level helper
from transformers import pipeline

pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")

narrator = pipeline("text-to-speech", model="kakao-enterprise/vits-ljs")

def launch(input_image):
    # Step 1: Extract caption
    caption = pipe(input_image)[0]["generated_text"]

    # Step 2: Convert caption to audio
    audio_output = narrator(caption)
    audio_array = np.array(audio_output["audio"])
    sample_rate = audio_output["sampling_rate"]

    # Step 3: Return audio + caption
    return (audio_array, sample_rate), caption

# Use dictionary to avoid conflicting argument ordering
iface = gr.Interface(
    fn=launch,
    inputs=gr.Image(type='pil', label="Upload Image"),
    outputs=[
        gr.Audio(type="numpy", label="Narrated Audio"),
        gr.Textbox(label="Extracted Caption")
    ],
    title="SeeSay",
    description="Upload an image to hear its context narrated aloud."
)

iface.launch()