awacke1 commited on
Commit
4c1e59d
·
verified ·
1 Parent(s): 0a624fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +161 -25
app.py CHANGED
@@ -1,37 +1,173 @@
1
- from transformers import pipeline
2
  import gradio as gr
3
- from gradio.mix import Parallel, Series
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- io1 = gr.Interface.load('huggingface/sshleifer/distilbart-cnn-12-6')
6
- io2 = gr.Interface.load("huggingface/facebook/bart-large-cnn")
7
- io3 = gr.Interface.load("huggingface/google/pegasus-xsum")
8
- #io3 = gr.Interface.load("huggingface/emilyalsentzer/Bio_Discharge_Summary_BERT")
9
- #io3 = gr.Interface.load("huggingface/google/pegasus-pubmed")
10
- #io3 = gr.Interface.load("huggingface/tennessejoyce/titlewave-t5-base")
 
 
11
 
12
-
 
 
 
13
 
14
- desc = "Summary NLP State of the Art Review of Summarization Tools. Length of text and model size influence response time. There is a trade off with accuracy for domain specific NLP and NLU."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- x = """Billions of people can live much longer and much healthier lives. As death drifts farther into the distance how will our search for meaning change as we reduce the health effects of aging as a disease? Does meaning require death or does it merely require struggle of reprogramming our biology? It will require us to delve deeper into understanding the human mind and the AI mind. Do your best to make humanity the best it can be. That is who I am and that is how I live. It is what i get up in the morning to do. I believe love is leaving the world a better place and helping others be the best they can be. Is it possible to bring back people that mean something to us personally? Not just brilliant scientists like Einstein and Johnny Von Neumann but also people that we've lost. Is there a way to achieve a kind of small artificial immortality? Where you are against others in your age group as in terms of health and longevity by your age is called your inner age. You are biologically based on what we call the epigenetic clock. We know that smoking increases the speed of that clock. We also know that fasting and people who eat the right foods have a slower clock. Without that knowledge you're flying blind.
17
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- y = '''Topic identification, interpretation, summary generation, and evaluation of the generated summary are the key challenges in text summarization. Critical tasks in extraction-based summarization are identifying key phrases in the document and using them to select sentences in the document for inclusion in the summary.
 
 
 
 
 
20
 
21
- In contrast, abstraction-based methods paraphrase sections of the source document.
22
 
23
- Extraction-based summarizers perform capturing key aspects of text and storing as an intermediate representation. Scoring words, utterances and sentences in text is based on that representation. Composing a summary by selecting across a range of words improves accuracy.'''
 
 
 
24
 
25
- z = '''Nine trends explain innovations in machine learning technologies benefit you and your business in 2022. These include: No-Code Machine Learning, Cognitive AI, NLP Match Language Modeling, TinyML, AutoML, Machine Learning Operationalization Management, Full-stack Deep Learning, Generative Adversarial Networks, Unsupervised ML, and finally Reinforcement Learning.
26
- '''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- sample = [[y],[x],[z]]
 
 
 
 
29
 
30
- iface = Parallel(io1, io2, io3,
31
- theme='huggingface',
32
- title= 'Summary Generators',
33
- description = desc,
34
- examples=sample, #"sample" is a directory which lets gradio scan through files and give you the text
35
- inputs = gr.inputs.Textbox(lines = 10, label="Text"))
36
 
37
- iface.launch(inline = False)
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import pipeline
4
+ import os
5
+
6
+ # --- App Configuration ---
7
+ TITLE = "✍️ AI Story Outliner"
8
+ DESCRIPTION = """
9
+ Enter a prompt and get 10 unique story outlines from a CPU-friendly AI model.
10
+ The app uses **DistilGPT-2**, a reliable and lightweight model, to generate creative outlines.
11
+
12
+ **How it works:**
13
+ 1. Enter your story idea.
14
+ 2. The AI will generate 10 different story outlines.
15
+ 3. Each outline has a dramatic beginning and is concise, like a song.
16
+ """
17
 
18
+ # --- Example Prompts for Storytelling ---
19
+ examples = [
20
+ ["The old lighthouse keeper stared into the storm. He'd seen many tempests, but this one was different. This one had eyes..."],
21
+ ["In a city powered by dreams, a young inventor creates a machine that can record them. His first recording reveals a nightmare that doesn't belong to him."],
22
+ ["The knight adjusted his helmet, the dragon's roar echoing in the valley. He was ready for the fight, but for what the dragon said when it finally spoke."],
23
+ ["She found the old leather-bound journal in her grandfather's attic. The first entry read: 'To relieve stress, I walk in the woods. But today, the woods walked with me.'"],
24
+ ["The meditation app promised to help her 'delete unhelpful thoughts.' She tapped the button, and to her horror, the memory of her own name began to fade..."]
25
+ ]
26
 
27
+ # --- Model Initialization ---
28
+ # This section loads a smaller, stable, and CPU-friendly model that requires no authentication.
29
+ generator = None
30
+ model_error = None
31
 
32
+ try:
33
+ print("Initializing model... This may take a moment.")
34
+
35
+ # Using 'distilgpt2', a stable and widely supported model that does not require a token.
36
+ # This is much more suitable for a standard CPU environment.
37
+ generator = pipeline(
38
+ "text-generation",
39
+ model="distilgpt2",
40
+ torch_dtype=torch.float32, # Use float32 for wider CPU compatibility
41
+ device_map="auto" # Will use GPU if available, otherwise CPU
42
+ )
43
+ print("✅ distilgpt2 model loaded successfully!")
44
+
45
+ except Exception as e:
46
+ model_error = e
47
+ print(f"--- 🚨 Error loading model ---")
48
+ print(f"Error: {model_error}")
49
+
50
+
51
+ # --- App Logic ---
52
+ def generate_stories(prompt: str) -> list[str]:
53
+ """
54
+ Generates 10 story outlines from the loaded model based on the user's prompt.
55
+ """
56
+ print("--- Button clicked. Attempting to generate stories... ---")
57
+
58
+ # If the model failed to load during startup, display that error.
59
+ if model_error:
60
+ error_message = f"**Model failed to load during startup.**\n\nPlease check the console logs for details.\n\n**Error:**\n`{str(model_error)}`"
61
+ print(f"Returning startup error: {error_message}")
62
+ return [error_message] * 10
63
 
64
+ if not prompt:
65
+ # Return a list of 10 empty strings to clear the outputs
66
+ return [""] * 10
67
+
68
+ # --- DEBUGGING STEP ---
69
+ # To isolate the problem, we will first return a simple list of strings
70
+ # to confirm the Gradio UI is working correctly. If this works, the issue
71
+ # is with the model pipeline itself.
72
+ print("--- RUNNING IN DEBUG MODE ---")
73
+ debug_stories = [f"### Story Placeholder {i+1}\n\nThis is a test to confirm the UI is working." for i in range(10)]
74
+ return debug_stories
75
+
76
+ # --- ORIGINAL CODE (Temporarily disabled for debugging) ---
77
+ # try:
78
+ # # A generic story prompt that works well with models like GPT-2.
79
+ # story_prompt = f"""
80
+ # Story Idea: "{prompt}"
81
+
82
+ # Create a short story outline based on this idea.
83
+
84
+ # ### 🎬 The Hook
85
+ # A dramatic opening.
86
+
87
+ # ### 🎼 The Ballad
88
+ # The main story, told concisely.
89
+
90
+ # ### 🔚 The Finale
91
+ # A clear and satisfying ending.
92
+ # ---
93
+ # """
94
+
95
+ # # Parameters for the pipeline to generate 10 diverse results.
96
+ # params = {
97
+ # "max_new_tokens": 200,
98
+ # "num_return_sequences": 10,
99
+ # "do_sample": True,
100
+ # "temperature": 0.9,
101
+ # "top_k": 50,
102
+ # "pad_token_id": generator.tokenizer.eos_token_id
103
+ # }
104
+
105
+ # print("Generating text with the model...")
106
+ # # Generate 10 different story variations
107
+ # outputs = generator(story_prompt, **params)
108
+ # print("✅ Text generation complete.")
109
+
110
+ # # Extract the generated text.
111
+ # stories = []
112
+ # for out in outputs:
113
+ # full_text = out['generated_text']
114
+ # stories.append(full_text)
115
+
116
+ # # Ensure we return exactly 10 stories, padding if necessary.
117
+ # while len(stories) < 10:
118
+ # stories.append("Failed to generate a story for this slot.")
119
+
120
+ # return stories
121
 
122
+ # except Exception as e:
123
+ # # Catch any errors that happen DURING generation and display them in the UI.
124
+ # print(f"--- 🚨 Error during story generation ---")
125
+ # print(f"Error: {e}")
126
+ # runtime_error_message = f"**An error occurred during story generation.**\n\nPlease check the console logs for details.\n\n**Error:**\n`{str(e)}`"
127
+ # return [runtime_error_message] * 10
128
 
 
129
 
130
+ # --- Gradio Interface ---
131
+ with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 95% !important;}") as demo:
132
+ gr.Markdown(f"<h1 style='text-align: center;'>{TITLE}</h1>")
133
+ gr.Markdown(DESCRIPTION)
134
 
135
+ with gr.Row():
136
+ with gr.Column(scale=1):
137
+ input_area = gr.TextArea(
138
+ lines=5,
139
+ label="Your Story Prompt 👇",
140
+ placeholder="e.g., 'The last dragon on Earth lived not in a cave, but in a library...'"
141
+ )
142
+ generate_button = gr.Button("Generate 10 Outlines ✨", variant="primary")
143
+
144
+ gr.Markdown("---")
145
+ gr.Markdown("## 📖 Your 10 Story Outlines")
146
+
147
+ # Create 10 markdown components to display the stories in two columns
148
+ story_outputs = []
149
+ with gr.Row():
150
+ with gr.Column():
151
+ for i in range(5):
152
+ md = gr.Markdown(label=f"Story Outline {i + 1}")
153
+ story_outputs.append(md)
154
+ with gr.Column():
155
+ for i in range(5, 10):
156
+ md = gr.Markdown(label=f"Story Outline {i + 1}")
157
+ story_outputs.append(md)
158
 
159
+ gr.Examples(
160
+ examples=examples,
161
+ inputs=input_area,
162
+ label="Example Story Starters (Click to use)"
163
+ )
164
 
165
+ generate_button.click(
166
+ fn=generate_stories,
167
+ inputs=input_area,
168
+ outputs=story_outputs,
169
+ api_name="generate"
170
+ )
171
 
172
+ if __name__ == "__main__":
173
+ demo.launch()