File size: 3,278 Bytes
fecb651
581eff5
c34df38
 
581eff5
8cb6990
 
 
06a2ca5
7e618d9
06a2ca5
9ad974d
c34df38
 
 
 
 
 
 
581eff5
c34df38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06a2ca5
 
 
c34df38
8cb6990
 
 
c34df38
06a2ca5
7e618d9
c34df38
06a2ca5
c34df38
 
 
 
 
06a2ca5
 
c34df38
06a2ca5
 
c34df38
 
 
8cb6990
 
 
 
 
 
 
7e618d9
9b75e51
c34df38
 
06a2ca5
c34df38
06a2ca5
 
c34df38
06a2ca5
c34df38
 
 
 
06a2ca5
 
 
c34df38
06a2ca5
 
 
 
c34df38
c06feb4
c34df38
581eff5
06a2ca5
ab206d3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95

import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import spaces

import gc 
import torch

# Create the necessary directories
# os.makedirs('.gradio/cached_examples/17', exist_ok=True)


def get_model_name(language):
    """Map language choice to the corresponding model."""
    model_mapping = {
        "English": "microsoft/Phi-3-mini-4k-instruct",
        "Arabic": "ALLaM-AI/ALLaM-7B-Instruct-preview"
    }
    return model_mapping.get(language, "ALLaM-AI/ALLaM-7B-Instruct-preview")  # Default to Arabic model

def load_model(model_name):
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        device_map=device,
        torch_dtype="auto",
        trust_remote_code=True,
    )
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    generator = pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
        return_full_text=False,
        max_new_tokens=500,
        do_sample=True,  # Enable sampling for more creative outputs
        top_k=50,        # Control diversity
        top_p=0.95       # Control diversity
    )
    del model 
    del tokenizer
    
    return generator


@spaces.GPU
def generate_kids_story(character, setting, language):
    model_name = get_model_name(language)
    generator = load_model(model_name)

    # Define prompt for the AI model
    if language == "English":
        prompt = (f"Write a short story for kids about a character named {character} who goes on an adventure in {setting}. "
                  "Make it fun, engaging, and suitable for children.")
    else:
        prompt = (f"اكتب قصة قصيرة للأطفال عن شخصية اسمها {character} التي تذهب في مغامرة في {setting}. "
                  "اجعلها ممتعة وجذابة ومناسبة للأطفال.")

    messages = [{"role": "user", "content": prompt}]
    output = generator(messages)
    # Delete model and associated objects 
    del generator
    # Run garbage collection
    gc.collect ()
    # Empty CUDA cache
    torch.cuda.empty_cache()
    
    return output[0]["generated_text"]

# Create Gradio interface
demo = gr.Interface(
    fn=generate_kids_story,
    inputs=[
        gr.Textbox(placeholder="Enter a character name (e.g., Benny the Bunny)...", label="Character Name"),
        gr.Textbox(placeholder="Enter a setting (e.g., a magical forest)...", label="Setting"),
        gr.Dropdown(
            choices=["English", "Arabic"],
            label="Choose Language",
            value="English"  # Default to English
        )
    ],
    outputs=gr.Textbox(label="Kids' Story"),
    title="📖 AI Kids' Story Generator - English & Arabic 📖",
    description="Enter a character name and a setting, and AI will generate a fun short story for kids in English or Arabic.",
    examples=[
        ["Benny the Bunny", "a magical forest", "English"],
        ["علي البطل", "غابة سحرية", "Arabic"],
        ["Lila the Ladybug", "a garden full of flowers", "English"],
        ["ليلى الجنية", "حديقة مليئة بالأزهار", "Arabic"]
    ],
    theme="default",
)

# Launch the Gradio app
demo.launch()