Szeyu's picture
Update app.py
ccd016d verified
raw
history blame
4.07 kB
# import part
import streamlit as st
from transformers import pipeline
import textwrap
import numpy as np
import soundfile as sf
import tempfile
import os
from PIL import Image
import string
# Initialize pipelines with caching
@st.cache_resource
def load_pipelines():
captioner = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
storyer = pipeline("text-generation", model="aspis/gpt2-genre-story-generation")
tts = pipeline("text-to-speech", model="facebook/mms-tts-eng")
return captioner, storyer, tts
captioner, storyer, tts = load_pipelines()
# Function to extract keywords from caption
def extract_keywords(caption):
# Simple keyword extraction: split words and filter out common stop words
stop_words = {'the', 'a', 'an', 'in', 'on', 'at', 'of', 'to', 'is', 'are', 'with', 'and'}
words = caption.lower().split()
# Keep words longer than 2 characters and not in stop words
keywords = [word.strip(".,!?\"'") for word in words if word not in stop_words and len(word) > 2]
return keywords
# Function to generate content from an image
def generate_content(image):
pil_image = Image.open(image)
# Generate caption
caption = captioner(pil_image)[0]["generated_text"]
st.write("**๐ŸŒŸ What's in the picture: ๐ŸŒŸ**")
st.write(caption)
# Extract keywords from the caption
keywords = extract_keywords(caption)
keywords_str = ", ".join(keywords)
# Create prompt for story, ensuring keywords are included
prompt = (
f"Write a funny, warm children's story for ages 3-10, 50โ€“100 words, "
f"in third-person narrative, that describes this scene exactly: {caption}. "
f"Explicitly include these keywords from the caption in the story: {keywords_str}. "
f"Mention the exact place, location, or venue within the scene, such as a park, pool, or gym."
)
# Generate raw story
raw = storyer(
prompt,
max_new_tokens=150,
temperature=0.7,
top_p=0.9,
no_repeat_ngram_size=2,
return_full_text=False
)[0]["generated_text"].strip()
# Define allowed characters to keep (removes symbols like * and ~)
allowed_chars = string.ascii_letters + string.digits + " .,!?\"'-"
# Clean the raw story by keeping only allowed characters
clean_raw = ''.join(c for c in raw if c in allowed_chars)
# Split into words and trim to 100 words
words = clean_raw.split()
story = " ".join(words[:100])
st.write("**๐Ÿ“– Your funny story: ๐Ÿ“–**")
st.write(story)
# Generate audio from cleaned story
chunks = textwrap.wrap(story, width=200)
audio = np.concatenate([tts(chunk)["audio"].squeeze() for chunk in chunks])
# Save audio to temporary file
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_file:
sf.write(temp_file.name, audio, tts.model.config.sampling_rate)
temp_file_path = temp_file.name
return caption, story, temp_file_path
# Streamlit UI
st.markdown(
"""
<style>
.stApp {
background: radial-gradient(circle, #e6f3ff, #e6fff2);
}
</style>
""",
unsafe_allow_html=True
)
st.title("โœจ Magic Story Maker โœจ")
st.markdown("Upload a picture to make a funny story and hear it too! ๐Ÿ“ธ")
uploaded_image = st.file_uploader("Choose your picture", type=["jpg", "jpeg", "png"])
if uploaded_image is None:
st.image("https://example.com/placeholder_image.jpg", caption="Upload your picture here! ๐Ÿ“ท", use_column_width=True)
else:
st.image(uploaded_image, caption="Your Picture ๐ŸŒŸ", use_column_width=True)
if st.button("โœจ Make My Story! โœจ"):
if uploaded_image is not None:
with st.spinner("๐Ÿ”ฎ Creating your magical story..."):
caption, story, audio_path = generate_content(uploaded_image)
st.success("๐ŸŽ‰ Your story is ready! ๐ŸŽ‰")
st.audio(audio_path, format="audio/wav")
os.remove(audio_path)
else:
st.warning("Please upload a picture first! ๐Ÿ“ธ")