Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# Import libraries
|
2 |
import gradio as gr
|
3 |
from gtts import gTTS
|
4 |
from io import BytesIO
|
5 |
from PIL import Image
|
6 |
from diffusers import DiffusionPipeline
|
7 |
-
import
|
8 |
-
|
9 |
-
# Set your OpenAI API key
|
10 |
-
openai.api_key = "YOUR_OPENAI_API_KEY"
|
11 |
|
12 |
# Use a DiffusionPipeline for text-to-image
|
13 |
image_generation_pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt")
|
@@ -18,8 +21,11 @@ messages = [{"role": "system", "content": "You are a magical storyteller, creati
|
|
18 |
# Initialize page number
|
19 |
current_page = 0
|
20 |
|
|
|
|
|
|
|
21 |
# Define the Storyteller function
|
22 |
-
def
|
23 |
global current_page
|
24 |
|
25 |
# Set the characters and lesson based on user choices
|
@@ -28,25 +34,16 @@ def StorytellerGPT(character, child_name, lesson_choice, tell_story, _):
|
|
28 |
|
29 |
messages.append({"role": "user", "content": tell_story})
|
30 |
|
31 |
-
# Generate story using
|
32 |
input_text = character_info + lesson_info + tell_story
|
33 |
-
story_reply =
|
34 |
-
engine="text-davinci-003", # Adjust the engine based on your preferences
|
35 |
-
prompt=input_text,
|
36 |
-
max_tokens=150,
|
37 |
-
n=1,
|
38 |
-
stop=None
|
39 |
-
)["choices"][0]["text"]
|
40 |
|
41 |
messages.append({"role": "assistant", "content": story_reply})
|
42 |
|
43 |
-
# Convert text to speech using
|
44 |
-
|
45 |
-
whisper_api_key = "YOUR_WHISPER_API_KEY"
|
46 |
-
tts = gTTS(text=story_reply, lang='en', slow=False, whisper_api_key=whisper_api_key)
|
47 |
audio_io = BytesIO()
|
48 |
-
tts.save(
|
49 |
-
audio_io.seek(0)
|
50 |
|
51 |
# Convert text to image using DiffusionPipeline
|
52 |
image_reply = image_generation_pipe(story_reply)
|
@@ -55,11 +52,11 @@ def StorytellerGPT(character, child_name, lesson_choice, tell_story, _):
|
|
55 |
story_pages = story_reply.split("\n\n") # Split the story into pages
|
56 |
current_page = min(current_page, len(story_pages) - 1) # Ensure the current_page is within bounds
|
57 |
|
58 |
-
return story_pages[current_page],
|
59 |
|
60 |
# Create the Gradio Interface with styling
|
61 |
demo = gr.Interface(
|
62 |
-
fn=
|
63 |
inputs=[
|
64 |
gr.Textbox("text", label="Child's Name"),
|
65 |
gr.Dropdown(["unicorn", "dragon", "wizard"], label="Choose a Character"),
|
|
|
1 |
+
# Install required libraries
|
2 |
+
!pip install gtts
|
3 |
+
!pip install gradio
|
4 |
+
!pip install transformers
|
5 |
+
!pip install diffusers
|
6 |
+
|
7 |
# Import libraries
|
8 |
import gradio as gr
|
9 |
from gtts import gTTS
|
10 |
from io import BytesIO
|
11 |
from PIL import Image
|
12 |
from diffusers import DiffusionPipeline
|
13 |
+
from transformers import pipeline
|
|
|
|
|
|
|
14 |
|
15 |
# Use a DiffusionPipeline for text-to-image
|
16 |
image_generation_pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt")
|
|
|
21 |
# Initialize page number
|
22 |
current_page = 0
|
23 |
|
24 |
+
# Initialize Hugging Face text generation pipeline
|
25 |
+
gpt_neo_generator = pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B", device=0) # You can adjust the device parameter based on your setup
|
26 |
+
|
27 |
# Define the Storyteller function
|
28 |
+
def StorytellerHuggingFace(character, child_name, lesson_choice, tell_story, _):
|
29 |
global current_page
|
30 |
|
31 |
# Set the characters and lesson based on user choices
|
|
|
34 |
|
35 |
messages.append({"role": "user", "content": tell_story})
|
36 |
|
37 |
+
# Generate story using Hugging Face's GPT-Neo
|
38 |
input_text = character_info + lesson_info + tell_story
|
39 |
+
story_reply = gpt_neo_generator(input_text, max_length=150, num_return_sequences=1, no_repeat_ngram_size=2, top_k=50, top_p=0.95)[0]['generated_text']
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
messages.append({"role": "assistant", "content": story_reply})
|
42 |
|
43 |
+
# Convert text to speech using gTTS
|
44 |
+
tts = gTTS(text=story_reply, lang='en', slow=False)
|
|
|
|
|
45 |
audio_io = BytesIO()
|
46 |
+
tts.save("/content/audio_output.mp3")
|
|
|
47 |
|
48 |
# Convert text to image using DiffusionPipeline
|
49 |
image_reply = image_generation_pipe(story_reply)
|
|
|
52 |
story_pages = story_reply.split("\n\n") # Split the story into pages
|
53 |
current_page = min(current_page, len(story_pages) - 1) # Ensure the current_page is within bounds
|
54 |
|
55 |
+
return story_pages[current_page], "/content/audio_output.mp3", image_reply
|
56 |
|
57 |
# Create the Gradio Interface with styling
|
58 |
demo = gr.Interface(
|
59 |
+
fn=StorytellerHuggingFace,
|
60 |
inputs=[
|
61 |
gr.Textbox("text", label="Child's Name"),
|
62 |
gr.Dropdown(["unicorn", "dragon", "wizard"], label="Choose a Character"),
|