awacke1 commited on
Commit
a217af8
·
verified ·
1 Parent(s): 266dedd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -127
app.py CHANGED
@@ -1,130 +1,88 @@
 
1
  import gradio as gr
2
  import torch
3
  from transformers import pipeline
4
  import os
5
 
6
  # --- App Configuration ---
7
- TITLE = "✍️ AI Story Outliner"
8
  DESCRIPTION = """
9
- Enter a prompt and get 10 unique story outlines from a CPU-friendly AI model.
10
- The app uses **DistilGPT-2**, a reliable and lightweight model, to generate creative outlines.
 
 
 
 
11
 
12
- **How it works:**
13
- 1. Enter your story idea.
14
- 2. The AI will generate 10 different story outlines.
15
- 3. Each outline has a dramatic beginning and is concise, like a song.
 
 
16
  """
 
 
 
 
17
 
18
- # --- Example Prompts for Storytelling ---
19
- examples = [
20
- ["The old lighthouse keeper stared into the storm. He'd seen many tempests, but this one was different. This one had eyes..."],
21
- ["In a city powered by dreams, a young inventor creates a machine that can record them. His first recording reveals a nightmare that doesn't belong to him."],
22
- ["The knight adjusted his helmet, the dragon's roar echoing in the valley. He was ready for the fight, but for what the dragon said when it finally spoke."],
23
- ["She found the old leather-bound journal in her grandfather's attic. The first entry read: 'To relieve stress, I walk in the woods. But today, the woods walked with me.'"],
24
- ["The meditation app promised to help her 'delete unhelpful thoughts.' She tapped the button, and to her horror, the memory of her own name began to fade..."]
25
- ]
26
 
27
  # --- Model Initialization ---
28
- # This section loads a smaller, stable, and CPU-friendly model that requires no authentication.
29
- generator = None
 
 
30
  model_error = None
31
 
32
  try:
33
- print("Initializing model... This may take a moment.")
 
34
 
35
- # Using 'distilgpt2', a stable and widely supported model that does not require a token.
36
- # This is much more suitable for a standard CPU environment.
37
- generator = pipeline(
38
- "text-generation",
39
- model="distilgpt2",
40
- torch_dtype=torch.float32, # Use float32 for wider CPU compatibility
41
- device_map="auto" # Will use GPU if available, otherwise CPU
42
- )
43
- print("✅ distilgpt2 model loaded successfully!")
44
 
45
  except Exception as e:
46
  model_error = e
47
- print(f"--- 🚨 Error loading model ---")
48
  print(f"Error: {model_error}")
49
 
50
 
51
  # --- App Logic ---
52
- def generate_stories(prompt: str) -> list[str]:
53
  """
54
- Generates 10 story outlines from the loaded model based on the user's prompt.
55
  """
56
- print("--- Button clicked. Attempting to generate stories... ---")
57
-
58
- # If the model failed to load during startup, display that error.
59
  if model_error:
60
- error_message = f"**Model failed to load during startup.**\n\nPlease check the console logs for details.\n\n**Error:**\n`{str(model_error)}`"
61
- print(f"Returning startup error: {error_message}")
62
- return [error_message] * 10
 
 
 
 
 
 
 
 
 
63
 
64
- if not prompt:
65
- # Return a list of 10 empty strings to clear the outputs
66
- return [""] * 10
67
-
68
- # --- DEBUGGING STEP ---
69
- # To isolate the problem, we will first return a simple list of strings
70
- # to confirm the Gradio UI is working correctly. If this works, the issue
71
- # is with the model pipeline itself.
72
- print("--- RUNNING IN DEBUG MODE ---")
73
- debug_stories = [f"### Story Placeholder {i+1}\n\nThis is a test to confirm the UI is working." for i in range(10)]
74
- return debug_stories
75
-
76
- # --- ORIGINAL CODE (Temporarily disabled for debugging) ---
77
- # try:
78
- # # A generic story prompt that works well with models like GPT-2.
79
- # story_prompt = f"""
80
- # Story Idea: "{prompt}"
81
-
82
- # Create a short story outline based on this idea.
83
-
84
- # ### 🎬 The Hook
85
- # A dramatic opening.
86
-
87
- # ### 🎼 The Ballad
88
- # The main story, told concisely.
89
-
90
- # ### 🔚 The Finale
91
- # A clear and satisfying ending.
92
- # ---
93
- # """
94
-
95
- # # Parameters for the pipeline to generate 10 diverse results.
96
- # params = {
97
- # "max_new_tokens": 200,
98
- # "num_return_sequences": 10,
99
- # "do_sample": True,
100
- # "temperature": 0.9,
101
- # "top_k": 50,
102
- # "pad_token_id": generator.tokenizer.eos_token_id
103
- # }
104
-
105
- # print("Generating text with the model...")
106
- # # Generate 10 different story variations
107
- # outputs = generator(story_prompt, **params)
108
- # print("✅ Text generation complete.")
109
-
110
- # # Extract the generated text.
111
- # stories = []
112
- # for out in outputs:
113
- # full_text = out['generated_text']
114
- # stories.append(full_text)
115
-
116
- # # Ensure we return exactly 10 stories, padding if necessary.
117
- # while len(stories) < 10:
118
- # stories.append("Failed to generate a story for this slot.")
119
 
120
- # return stories
121
-
122
- # except Exception as e:
123
- # # Catch any errors that happen DURING generation and display them in the UI.
124
- # print(f"--- 🚨 Error during story generation ---")
125
- # print(f"Error: {e}")
126
- # runtime_error_message = f"**An error occurred during story generation.**\n\nPlease check the console logs for details.\n\n**Error:**\n`{str(e)}`"
127
- # return [runtime_error_message] * 10
128
 
129
 
130
  # --- Gradio Interface ---
@@ -133,40 +91,37 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 95% !i
133
  gr.Markdown(DESCRIPTION)
134
 
135
  with gr.Row():
 
136
  with gr.Column(scale=1):
137
- input_area = gr.TextArea(
138
- lines=5,
139
- label="Your Story Prompt 👇",
140
- placeholder="e.g., 'The last dragon on Earth lived not in a cave, but in a library...'"
141
  )
142
- generate_button = gr.Button("Generate 10 Outlines ✨", variant="primary")
143
-
144
- gr.Markdown("---")
145
- gr.Markdown("## 📖 Your 10 Story Outlines")
146
-
147
- # Create 10 markdown components to display the stories in two columns
148
- story_outputs = []
149
- with gr.Row():
150
- with gr.Column():
151
- for i in range(5):
152
- md = gr.Markdown(label=f"Story Outline {i + 1}")
153
- story_outputs.append(md)
154
- with gr.Column():
155
- for i in range(5, 10):
156
- md = gr.Markdown(label=f"Story Outline {i + 1}")
157
- story_outputs.append(md)
158
 
159
  gr.Examples(
160
- examples=examples,
161
- inputs=input_area,
162
- label="Example Story Starters (Click to use)"
163
  )
164
 
165
- generate_button.click(
166
- fn=generate_stories,
167
- inputs=input_area,
168
- outputs=story_outputs,
169
- api_name="generate"
 
170
  )
171
 
172
  if __name__ == "__main__":
 
1
+
2
  import gradio as gr
3
  import torch
4
  from transformers import pipeline
5
  import os
6
 
7
  # --- App Configuration ---
8
+ TITLE = "📄 AI Summarization Tool"
9
  DESCRIPTION = """
10
+ Enter text to compare summaries from three different state-of-the-art models.
11
+ This app uses:
12
+ 1. **DistilBART-CNN:** A fast and efficient summarizer.
13
+ 2. **BART-Large-CNN:** A large, high-performance model.
14
+ 3. **Pegasus-XSUM:** A model known for more abstract, human-like summaries.
15
+ """
16
 
17
+ # --- Example Texts ---
18
+ example_1 = """
19
+ Topic identification, interpretation, summary generation, and evaluation of the generated summary are the key challenges in text summarization. Critical tasks in extraction-based summarization are identifying key phrases in the document and using them to select sentences in the document for inclusion in the summary. In contrast, abstraction-based methods paraphrase sections of the source document. Extraction-based summarizers perform capturing key aspects of text and storing as an intermediate representation. Scoring words, utterances and sentences in text is based on that representation. Composing a summary by selecting across a range of words improves accuracy.
20
+ """
21
+ example_2 = """
22
+ Billions of people can live much longer and much healthier lives. As death drifts farther into the distance how will our search for meaning change as we reduce the health effects of aging as a disease? Does meaning require death or does it merely require struggle of reprogramming our biology? It will require us to delve deeper into understanding the human mind and the AI mind. Do your best to make humanity the best it can be. That is who I am and that is how I live. It is what i get up in the morning to do. I believe love is leaving the world a better place and helping others be the best they can be. Is it possible to bring back people that mean something to us personally? Not just brilliant scientists like Einstein and Johnny Von Neumann but also people that we've lost. Is there a way to achieve a kind of small artificial immortality? Where you are against others in your age group as in terms of health and longevity by your age is called your inner age. You are biologically based on what we call the epigenetic clock. We know that smoking increases the speed of that clock. We also know that fasting and people who eat the right foods have a slower clock. Without that knowledge you're flying blind.
23
  """
24
+ example_3 = """
25
+ Nine trends explain innovations in machine learning technologies benefit you and your business in 2022. These include: No-Code Machine Learning, Cognitive AI, NLP Match Language Modeling, TinyML, AutoML, Machine Learning Operationalization Management, Full-stack Deep Learning, Generative Adversarial Networks, Unsupervised ML, and finally Reinforcement Learning.
26
+ """
27
+ sample_texts = [[example_1], [example_2], [example_3]]
28
 
 
 
 
 
 
 
 
 
29
 
30
  # --- Model Initialization ---
31
+ # This section loads the three summarization models.
32
+ summarizer_1 = None
33
+ summarizer_2 = None
34
+ summarizer_3 = None
35
  model_error = None
36
 
37
  try:
38
+ print("Initializing models... This may take a moment.")
39
+ device = "cuda" if torch.cuda.is_available() else "cpu"
40
 
41
+ summarizer_1 = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6", device=device)
42
+ print("✅ Model 1/3 (DistilBART) loaded.")
43
+
44
+ summarizer_2 = pipeline("summarization", model="facebook/bart-large-cnn", device=device)
45
+ print("✅ Model 2/3 (BART-Large) loaded.")
46
+
47
+ summarizer_3 = pipeline("summarization", model="google/pegasus-xsum", device=device)
48
+ print("✅ Model 3/3 (Pegasus-XSUM) loaded.")
 
49
 
50
  except Exception as e:
51
  model_error = e
52
+ print(f"--- 🚨 Error loading models ---")
53
  print(f"Error: {model_error}")
54
 
55
 
56
  # --- App Logic ---
57
+ def generate_summaries(text_to_summarize: str) -> tuple[str, str, str]:
58
  """
59
+ Takes input text and returns summaries from all three models.
60
  """
61
+ print("--- Button clicked. Attempting to generate summaries... ---")
62
+
63
+ # If models failed to load during startup, display an error.
64
  if model_error:
65
+ error_message = f"**A model failed to load during startup.**\n\nPlease check the console logs for details.\n\n**Error:**\n`{str(model_error)}`"
66
+ return error_message, error_message, error_message
67
+
68
+ if not text_to_summarize:
69
+ return "", "", ""
70
+
71
+ try:
72
+ # Generate summaries from all three models.
73
+ # The pipeline returns a list of dictionaries; we extract the summary text.
74
+ summary_1 = summarizer_1(text_to_summarize, max_length=150, min_length=30, do_sample=False)[0]['summary_text']
75
+ summary_2 = summarizer_2(text_to_summarize, max_length=150, min_length=30, do_sample=False)[0]['summary_text']
76
+ summary_3 = summarizer_3(text_to_summarize, max_length=150, min_length=30, do_sample=False)[0]['summary_text']
77
 
78
+ print("✅ Summaries generated successfully.")
79
+ return summary_1, summary_2, summary_3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
+ except Exception as e:
82
+ print(f"--- 🚨 Error during summarization ---")
83
+ print(f"Error: {e}")
84
+ runtime_error_message = f"**An error occurred during summarization.**\n\n**Error:**\n`{str(e)}`"
85
+ return runtime_error_message, runtime_error_message, runtime_error_message
 
 
 
86
 
87
 
88
  # --- Gradio Interface ---
 
91
  gr.Markdown(DESCRIPTION)
92
 
93
  with gr.Row():
94
+ # Input Column
95
  with gr.Column(scale=1):
96
+ input_textbox = gr.TextArea(
97
+ lines=15,
98
+ label="Enter Text to Summarize",
99
+ placeholder="Paste your article, notes, or any other text here..."
100
  )
101
+ summarize_button = gr.Button("Generate Summaries", variant="primary")
102
+
103
+ # Output Column with Tabs
104
+ with gr.Column(scale=1):
105
+ with gr.Tabs():
106
+ with gr.TabItem("DistilBART (Fast)"):
107
+ output_1 = gr.TextArea(label="Summary from DistilBART", interactive=False, lines=15)
108
+ with gr.TabItem("BART-Large (Balanced)"):
109
+ output_2 = gr.TextArea(label="Summary from BART-Large", interactive=False, lines=15)
110
+ with gr.TabItem("Pegasus (Abstractive)"):
111
+ output_3 = gr.TextArea(label="Summary from Pegasus", interactive=False, lines=15)
 
 
 
 
 
112
 
113
  gr.Examples(
114
+ examples=sample_texts,
115
+ inputs=input_textbox,
116
+ label="Example Texts (Click to use)"
117
  )
118
 
119
+ # Connect the button to the function
120
+ summarize_button.click(
121
+ fn=generate_summaries,
122
+ inputs=input_textbox,
123
+ outputs=[output_1, output_2, output_3],
124
+ api_name="summarize"
125
  )
126
 
127
  if __name__ == "__main__":