Update app.py
Browse files
app.py
CHANGED
@@ -7,31 +7,37 @@ import tempfile
|
|
7 |
import os
|
8 |
from PIL import Image
|
9 |
|
10 |
-
# Initialize pipelines
|
11 |
@st.cache_resource
|
12 |
def load_pipelines():
|
13 |
-
|
|
|
|
|
14 |
storyer = pipeline("text-generation", model="aspis/gpt2-genre-story-generation")
|
|
|
15 |
tts = pipeline("text-to-speech", model="facebook/mms-tts-eng")
|
16 |
return captioner, storyer, tts
|
17 |
|
|
|
18 |
captioner, storyer, tts = load_pipelines()
|
19 |
|
20 |
-
#
|
21 |
def generate_content(image):
|
22 |
-
# Convert
|
23 |
pil_image = Image.open(image)
|
24 |
|
25 |
-
# Generate caption
|
26 |
caption = captioner(pil_image)[0]["generated_text"]
|
27 |
st.write("**Caption:**", caption)
|
28 |
|
29 |
-
#
|
30 |
prompt = (
|
31 |
f"Write a funny, warm children's story for ages 3-10, 50–100 words, "
|
32 |
f"in third-person narrative, that describes this scene exactly: {caption} "
|
33 |
f"mention the exact place or venue within {caption}"
|
34 |
)
|
|
|
|
|
35 |
raw = storyer(
|
36 |
prompt,
|
37 |
max_new_tokens=150,
|
@@ -41,33 +47,45 @@ def generate_content(image):
|
|
41 |
return_full_text=False
|
42 |
)[0]["generated_text"].strip()
|
43 |
|
44 |
-
# Trim to
|
45 |
words = raw.split()
|
46 |
story = " ".join(words[:100])
|
47 |
st.write("**Story:**", story)
|
48 |
|
49 |
-
#
|
50 |
chunks = textwrap.wrap(story, width=200)
|
|
|
|
|
51 |
audio = np.concatenate([tts(chunk)["audio"].squeeze() for chunk in chunks])
|
52 |
|
53 |
-
# Save audio to temporary file
|
54 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_file:
|
55 |
sf.write(temp_file.name, audio, tts.model.config.sampling_rate)
|
56 |
temp_file_path = temp_file.name
|
57 |
|
58 |
return caption, story, temp_file_path
|
59 |
|
60 |
-
# Streamlit UI
|
61 |
st.title("Image to Children's Story and Audio")
|
62 |
-
st.
|
|
|
|
|
|
|
|
|
63 |
|
64 |
-
|
|
|
65 |
|
66 |
if uploaded_image is not None:
|
|
|
67 |
st.image(uploaded_image, caption="Uploaded Image", use_column_width=True)
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
70 |
caption, story, audio_path = generate_content(uploaded_image)
|
|
|
71 |
st.audio(audio_path, format="audio/wav")
|
72 |
-
#
|
73 |
os.remove(audio_path)
|
|
|
7 |
import os
|
8 |
from PIL import Image
|
9 |
|
10 |
+
# Initialize pipelines with caching to avoid reloading
|
11 |
@st.cache_resource
|
12 |
def load_pipelines():
|
13 |
+
# Load pipeline for generating captions from images
|
14 |
+
captioner = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
|
15 |
+
# Load pipeline for generating stories from text prompts
|
16 |
storyer = pipeline("text-generation", model="aspis/gpt2-genre-story-generation")
|
17 |
+
# Load pipeline for converting text to speech
|
18 |
tts = pipeline("text-to-speech", model="facebook/mms-tts-eng")
|
19 |
return captioner, storyer, tts
|
20 |
|
21 |
+
# Load the pipelines once and reuse them
|
22 |
captioner, storyer, tts = load_pipelines()
|
23 |
|
24 |
+
# Function to generate caption, story, and audio from an uploaded image
|
25 |
def generate_content(image):
|
26 |
+
# Convert the uploaded image to a PIL image format
|
27 |
pil_image = Image.open(image)
|
28 |
|
29 |
+
# Generate a caption based on the image content
|
30 |
caption = captioner(pil_image)[0]["generated_text"]
|
31 |
st.write("**Caption:**", caption)
|
32 |
|
33 |
+
# Create a prompt for generating a children's story
|
34 |
prompt = (
|
35 |
f"Write a funny, warm children's story for ages 3-10, 50–100 words, "
|
36 |
f"in third-person narrative, that describes this scene exactly: {caption} "
|
37 |
f"mention the exact place or venue within {caption}"
|
38 |
)
|
39 |
+
|
40 |
+
# Generate the story based on the prompt
|
41 |
raw = storyer(
|
42 |
prompt,
|
43 |
max_new_tokens=150,
|
|
|
47 |
return_full_text=False
|
48 |
)[0]["generated_text"].strip()
|
49 |
|
50 |
+
# Trim the generated story to a maximum of 100 words
|
51 |
words = raw.split()
|
52 |
story = " ".join(words[:100])
|
53 |
st.write("**Story:**", story)
|
54 |
|
55 |
+
# Split the story into chunks of 200 characters for text-to-speech processing
|
56 |
chunks = textwrap.wrap(story, width=200)
|
57 |
+
|
58 |
+
# Generate and concatenate audio for each text chunk
|
59 |
audio = np.concatenate([tts(chunk)["audio"].squeeze() for chunk in chunks])
|
60 |
|
61 |
+
# Save the concatenated audio to a temporary WAV file
|
62 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_file:
|
63 |
sf.write(temp_file.name, audio, tts.model.config.sampling_rate)
|
64 |
temp_file_path = temp_file.name
|
65 |
|
66 |
return caption, story, temp_file_path
|
67 |
|
68 |
+
# Streamlit UI for the application
|
69 |
st.title("Image to Children's Story and Audio")
|
70 |
+
st.markdown("""
|
71 |
+
Upload an image below to generate a caption, a funny children's story,
|
72 |
+
and an audio narration based on the image. The story will be tailored
|
73 |
+
for children aged 3-10.
|
74 |
+
""")
|
75 |
|
76 |
+
# File uploader for image input
|
77 |
+
uploaded_image = st.file_uploader("Choose an image", type=["jpg", "jpeg", "png"], help="Supported formats: JPG, JPEG, PNG")
|
78 |
|
79 |
if uploaded_image is not None:
|
80 |
+
# Display the uploaded image with a caption
|
81 |
st.image(uploaded_image, caption="Uploaded Image", use_column_width=True)
|
82 |
+
|
83 |
+
# Button to trigger content generation
|
84 |
+
if st.button("Generate Story and Audio", help="Click to create the story and audio"):
|
85 |
+
# Show a spinner while content is being generated
|
86 |
+
with st.spinner("Generating your story and audio narration..."):
|
87 |
caption, story, audio_path = generate_content(uploaded_image)
|
88 |
+
# Display the audio player with the generated narration
|
89 |
st.audio(audio_path, format="audio/wav")
|
90 |
+
# Remove the temporary audio file after use
|
91 |
os.remove(audio_path)
|