Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,425 +1,299 @@
|
|
1 |
-
#
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
import time
|
5 |
-
|
6 |
|
7 |
# --- Core Logic Imports ---
|
8 |
-
from core.
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
"Mistral 7B (HF)": {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf"},
|
43 |
-
})
|
44 |
-
if not UI_DEFAULT_MODEL_KEY: UI_DEFAULT_MODEL_KEY = "Gemma 2B (HF Test)"
|
45 |
-
else: print("WARNING: app.py - HF API not configured.")
|
46 |
-
|
47 |
-
if not AVAILABLE_MODELS_CONFIG:
|
48 |
-
AVAILABLE_MODELS_CONFIG["No Models Available (Setup API Keys!)"] = {"id": "dummy_error", "type": "none"}
|
49 |
-
UI_DEFAULT_MODEL_KEY = "No Models Available (Setup API Keys!)"
|
50 |
-
elif not UI_DEFAULT_MODEL_KEY and AVAILABLE_MODELS_CONFIG:
|
51 |
-
UI_DEFAULT_MODEL_KEY = list(AVAILABLE_MODELS_CONFIG.keys())[0]
|
52 |
-
|
53 |
-
# --- UI Customization (Conceptual - real CSS would be in a file) ---
|
54 |
-
# For a "WOW" UI, you'd link a custom CSS file.
|
55 |
-
# Here's a conceptual placeholder for some styles we might imply.
|
56 |
-
APP_THEME = gr.themes.Soft(
|
57 |
-
primary_hue=gr.themes.colors.blue,
|
58 |
-
secondary_hue=gr.themes.colors.sky,
|
59 |
-
neutral_hue=gr.themes.colors.slate,
|
60 |
-
font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
|
61 |
-
).set(
|
62 |
-
# Example: input_background_fill="rgba(240, 240, 240, 0.5)" # Slightly transparent inputs
|
63 |
-
# button_primary_background_fill="linear-gradient(to bottom right, hsl(210, 80%, 50%), hsl(210, 100%, 30%))"
|
64 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
-
# ---
|
67 |
-
def
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
|
|
74 |
):
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
log_accumulator = [f"**AlgoForge Omega™ Cycle Starting at {time.strftime('%Y-%m-%d %H:%M:%S')}**\n"]
|
79 |
-
# Initial state for UI outputs
|
80 |
-
yield {
|
81 |
-
output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🚀 Initializing AlgoForge Omega™...</p>", visible=True),
|
82 |
-
output_initial_solutions_accordion: gr.Accordion(label="⏳ Generating Initial Candidates...", open=False, visible=True),
|
83 |
-
output_initial_solutions_markdown: gr.Markdown(value="Working...", visible=True),
|
84 |
-
output_champion_accordion: gr.Accordion(label="⏳ Awaiting Champion Selection...", open=False, visible=False),
|
85 |
-
output_champion_markdown: gr.Markdown(value="", visible=False),
|
86 |
-
output_evolved_accordion: gr.Accordion(label="⏳ Awaiting Evolution...", open=False, visible=False),
|
87 |
-
output_evolved_markdown: gr.Markdown(value="", visible=False),
|
88 |
-
output_ai_test_analysis_markdown: gr.Markdown(value="", visible=False),
|
89 |
-
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator), visible=True),
|
90 |
-
engage_button: gr.Button(interactive=False) # Disable button during run
|
91 |
-
}
|
92 |
-
|
93 |
-
try:
|
94 |
-
start_time = time.time()
|
95 |
-
|
96 |
-
if not problem_description_text.strip():
|
97 |
-
raise ValueError("Problem Description is mandatory.")
|
98 |
-
|
99 |
-
current_model_config = AVAILABLE_MODELS_CONFIG.get(selected_model_ui_key)
|
100 |
-
if not current_model_config or current_model_config["type"] == "none":
|
101 |
-
raise ValueError(f"No valid LLM selected ('{selected_model_ui_key}'). Check API key configurations.")
|
102 |
-
|
103 |
-
log_accumulator.append(f"Selected Model: {selected_model_ui_key} (Type: {current_model_config['type']}, ID: {current_model_config['id']})")
|
104 |
-
log_accumulator.append(f"Problem Type: {problem_type_selected}")
|
105 |
-
log_accumulator.append(f"User Tests Provided: {'Yes' if user_provided_tests_code.strip() else 'No'}\n")
|
106 |
-
yield { output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
|
107 |
|
|
|
|
|
108 |
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
output_initial_solutions_accordion: gr.Accordion(label="Initial Candidates & Evaluations (Processing...)", open=True)
|
126 |
-
}
|
127 |
-
log_accumulator.append("\n**------ STAGE 2: CRITIQUE CRUCIBLE & AUTOMATED EVALUATION ------**")
|
128 |
-
evaluated_candidates_list = []
|
129 |
-
initial_solutions_md_accumulator = ["**Initial Candidates & Detailed Evaluations:**\n"]
|
130 |
-
|
131 |
-
for i, candidate_solution_text in enumerate(initial_raw_solutions):
|
132 |
-
log_accumulator.append(f"\n--- Evaluating Candidate {i+1} ---")
|
133 |
-
yield { output_status_bar: gr.HTML(value=f"<p style='color: dodgerblue;'>🔬 Evaluating Candidate {i+1} of {num_initial_solutions_to_gen}...</p>") }
|
134 |
-
|
135 |
-
evaluation_output_obj = evaluate_solution_candidate(str(candidate_solution_text), problem_description_text, problem_type_selected, user_provided_tests_code, llm_config_critique)
|
136 |
-
evaluated_candidates_list.append({"id": i + 1, "solution_text": str(candidate_solution_text), "evaluation_obj": evaluation_output_obj})
|
137 |
-
|
138 |
-
log_accumulator.append(f" Combined Score: {evaluation_output_obj.combined_score}/10")
|
139 |
-
# ... (more detailed logging from evaluation_obj as before)
|
140 |
-
|
141 |
-
# Update UI with this candidate's evaluation progressively
|
142 |
-
current_eval_md = (
|
143 |
-
f"**Candidate {i+1} (Score: {evaluation_output_obj.combined_score}/10):**\n"
|
144 |
-
f"```python\n{str(candidate_solution_text)}\n```\n\n"
|
145 |
-
f"**Evaluation Verdict:**\n{evaluation_output_obj.get_display_critique()}\n---"
|
146 |
-
)
|
147 |
-
initial_solutions_md_accumulator.append(current_eval_md)
|
148 |
-
yield {
|
149 |
-
output_initial_solutions_markdown: gr.Markdown(value="\n".join(initial_solutions_md_accumulator)),
|
150 |
-
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator))
|
151 |
-
}
|
152 |
-
|
153 |
-
# --- STAGE 3: SELECTION OF CHAMPION ---
|
154 |
-
yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🏆 Stage 3: Selecting Champion Candidate...</p>") }
|
155 |
-
log_accumulator.append("\n**------ STAGE 3: CHAMPION SELECTION ------**")
|
156 |
-
potentially_viable_candidates = [c for c in evaluated_candidates_list if c["evaluation_obj"] and c["evaluation_obj"].combined_score > 0 and not str(c["solution_text"]).startswith("ERROR")]
|
157 |
|
158 |
-
if
|
159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
|
161 |
-
champion_candidate_data = sorted(potentially_viable_candidates, key=lambda x: x["evaluation_obj"].combined_score, reverse=True)[0]
|
162 |
-
log_accumulator.append(f"Champion Selected: Candidate {champion_candidate_data['id']} with score {champion_candidate_data['evaluation_obj'].combined_score}/10.")
|
163 |
-
champion_display_markdown = (
|
164 |
-
f"**Champion Candidate ID: {champion_candidate_data['id']} "
|
165 |
-
f"(Original Score: {champion_candidate_data['evaluation_obj'].combined_score}/10):**\n"
|
166 |
-
f"```python\n{champion_candidate_data['solution_text']}\n```\n\n"
|
167 |
-
f"**Original Comprehensive Evaluation:**\n{champion_candidate_data['evaluation_obj'].get_display_critique()}"
|
168 |
-
)
|
169 |
-
yield {
|
170 |
-
output_champion_accordion: gr.Accordion(label=f"🏆 Champion: Candidate {champion_candidate_data['id']} (Score: {champion_candidate_data['evaluation_obj'].combined_score}/10)", open=True, visible=True),
|
171 |
-
output_champion_markdown: gr.Markdown(value=champion_display_markdown, visible=True),
|
172 |
-
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator))
|
173 |
-
}
|
174 |
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
)
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
189 |
else:
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
log_accumulator.append("\n--- AI Analysis of Evolved Code's Test Results ---")
|
213 |
-
exec_summary_for_analysis = str(evolved_code_exec_result.overall_error_summary or "Tests completed.")
|
214 |
-
analysis_user_prompt = format_code_test_analysis_user_prompt(str(evolved_solution_code), user_provided_tests_code, f"Passed: {evolved_code_exec_result.passed_tests}/{evolved_code_exec_result.total_tests}. Detail: {exec_summary_for_analysis}")
|
215 |
-
analysis_system_prompt = get_system_prompt("code_execution_explainer")
|
216 |
-
llm_analysis_config = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": 0.3, "max_tokens": critique_max_tokens + 200}
|
217 |
-
|
218 |
-
from core.llm_clients import call_huggingface_api, call_gemini_api
|
219 |
-
explanation_response_obj = None
|
220 |
-
if llm_analysis_config["type"] == "hf": explanation_response_obj = call_huggingface_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt)
|
221 |
-
elif llm_analysis_config["type"] == "google_gemini": explanation_response_obj = call_gemini_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt)
|
222 |
-
|
223 |
-
if explanation_response_obj and explanation_response_obj.success:
|
224 |
-
ai_test_analysis_markdown = f"**AI Analysis of Evolved Code's Test Performance:**\n{explanation_response_obj.text}"
|
225 |
-
elif explanation_response_obj:
|
226 |
-
ai_test_analysis_markdown = f"<p style='color: orange;'>**AI Analysis of Test Performance Failed:**<br>{explanation_response_obj.error}</p>"
|
227 |
-
log_accumulator.append(f" AI Test Analysis result logged.")
|
228 |
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
output_evolved_accordion: gr.Accordion(label="🌟 Evolved Artifact & Test Analysis", open=True, visible=True),
|
234 |
-
output_evolved_markdown: gr.Markdown(value=evolved_solution_display_markdown, visible=True),
|
235 |
-
output_ai_test_analysis_markdown: gr.Markdown(value=ai_test_analysis_markdown, visible=True if ai_test_analysis_markdown else False),
|
236 |
-
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
|
237 |
-
engage_button: gr.Button(interactive=True) # Re-enable button
|
238 |
-
}
|
239 |
|
240 |
-
except ValueError as ve: # Catch our specific input/config errors
|
241 |
-
log_accumulator.append(f"\n**INPUT/CONFIG ERROR:** {ve}")
|
242 |
-
yield {
|
243 |
-
output_status_bar: gr.HTML(value=f"<p style='color: red;'>❌ CONFIGURATION ERROR: {ve}</p>", visible=True),
|
244 |
-
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
|
245 |
-
engage_button: gr.Button(interactive=True)
|
246 |
-
}
|
247 |
-
except Exception as e:
|
248 |
-
log_accumulator.append(f"\n**UNEXPECTED RUNTIME ERROR:** {type(e).__name__} - {e}\n{traceback.format_exc()}")
|
249 |
-
# For other outputs, we might want to clear them or show a general error message
|
250 |
-
yield {
|
251 |
-
output_status_bar: gr.HTML(value=f"<p style='color: red;'>❌ UNEXPECTED ERROR: {e}. Check logs.</p>", visible=True),
|
252 |
-
output_initial_solutions_markdown: gr.Markdown(value="An unexpected error occurred. Please check the interaction log."),
|
253 |
-
output_champion_markdown: gr.Markdown(value="Error state."),
|
254 |
-
output_evolved_markdown: gr.Markdown(value="Error state."),
|
255 |
-
output_ai_test_analysis_markdown: gr.Markdown(value="Error state."),
|
256 |
-
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
|
257 |
-
engage_button: gr.Button(interactive=True)
|
258 |
-
}
|
259 |
|
|
|
|
|
|
|
260 |
|
261 |
# --- Gradio UI Definition ---
|
262 |
-
|
263 |
-
|
264 |
-
body { font-family: 'Inter', sans-serif; }
|
265 |
-
.gradio-container { max-width: 1280px !important; margin: auto !important; }
|
266 |
-
.gr-button-primary {
|
267 |
-
background: linear-gradient(135deg, #007bff 0%, #0056b3 100%) !important;
|
268 |
-
color: white !important;
|
269 |
-
border: none !important;
|
270 |
-
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1) !important;
|
271 |
-
transition: all 0.2s ease-in-out !important;
|
272 |
-
}
|
273 |
-
.gr-button-primary:hover {
|
274 |
-
transform: translateY(-2px) !important;
|
275 |
-
box-shadow: 0 6px 12px rgba(0, 0, 0, 0.15) !important;
|
276 |
-
}
|
277 |
-
.status-bar p {
|
278 |
-
padding: 8px 12px;
|
279 |
-
border-radius: 6px;
|
280 |
-
font-weight: 500;
|
281 |
-
text-align: center;
|
282 |
-
margin-bottom: 10px; /* Add some space below status bar */
|
283 |
-
}
|
284 |
-
.accordion-section .gr-markdown { padding-top: 5px; padding-bottom: 5px; }
|
285 |
-
.output-tabs .gr-tabitem {min-height: 400px;} /* Ensure tabs have some min height */
|
286 |
-
"""
|
287 |
|
288 |
-
|
289 |
-
gr.Markdown("# ✨ AlgoForge Omega™ ✨\n### Conceptual AI-Powered Algorithm & Application Foundry")
|
290 |
-
gr.Markdown(
|
291 |
-
"Define a challenge, configure the AI forge, and witness the (conceptual) evolution of solutions, "
|
292 |
-
"now with (simulated) unit testing and more detailed feedback loops!"
|
293 |
-
)
|
294 |
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
user_tests_textbox = gr.Textbox(
|
314 |
-
lines=7, label="Python Unit Tests (Optional, one `assert` per line)",
|
315 |
-
placeholder="assert calculate_factorial(0) == 1\nassert calculate_factorial(5) == 120\n# For expected errors (advanced, simulated):\n# try:\n# calculate_factorial(-1)\n# except ValueError:\n# assert True\n# else:\n# assert False, \"ValueError not raised\"",
|
316 |
-
info="For 'Python Algorithm with Tests'. Ensure function names match your problem description. Basic try-except for error testing is crudely simulated."
|
317 |
-
)
|
318 |
|
319 |
-
gr.
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
api_status_html.value = "".join(status_messages)
|
333 |
-
|
334 |
-
|
335 |
-
model_selection_dropdown = gr.Dropdown(
|
336 |
-
choices=list(AVAILABLE_MODELS_CONFIG.keys()),
|
337 |
-
value=UI_DEFAULT_MODEL_KEY if UI_DEFAULT_MODEL_KEY in AVAILABLE_MODELS_CONFIG else (list(AVAILABLE_MODELS_CONFIG.keys())[0] if AVAILABLE_MODELS_CONFIG else None),
|
338 |
-
label="LLM Core Model",
|
339 |
-
info="Ensure the corresponding API key is correctly set in Space Secrets."
|
340 |
-
)
|
341 |
-
num_initial_solutions_slider = gr.Slider(minimum=1, maximum=4, value=2, step=1, label="# Initial Solutions (Genesis Engine)", info="More solutions take longer but provide more diversity.")
|
342 |
-
|
343 |
-
with gr.Accordion("Advanced LLM Parameters (Tune with Caution!)", open=False):
|
344 |
-
with gr.Row():
|
345 |
-
genesis_temp_slider = gr.Slider(minimum=0.0, maximum=1.2, value=0.7, step=0.05, label="Genesis Temp")
|
346 |
-
genesis_max_tokens_slider = gr.Slider(minimum=256, maximum=4096, value=1024, step=128, label="Genesis Max Tokens")
|
347 |
-
with gr.Row():
|
348 |
-
critique_temp_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, step=0.05, label="Critique Temp")
|
349 |
-
critique_max_tokens_slider = gr.Slider(minimum=150, maximum=2048, value=768, step=64, label="Critique Max Tokens")
|
350 |
-
with gr.Row():
|
351 |
-
evolution_temp_slider = gr.Slider(minimum=0.0, maximum=1.2, value=0.75, step=0.05, label="Evolution Temp")
|
352 |
-
evolution_max_tokens_slider = gr.Slider(minimum=256, maximum=4096, value=1536, step=128, label="Evolution Max Tokens")
|
353 |
|
354 |
-
|
355 |
|
356 |
-
# ---
|
357 |
-
with gr.Column(scale=
|
358 |
-
gr.Markdown("
|
359 |
-
output_status_bar = gr.HTML(value="<p>Idle. Define a challenge and engage!</p>", elem_classes=["status-bar"], visible=True)
|
360 |
|
361 |
-
with gr.Tabs(
|
362 |
-
with gr.TabItem("
|
363 |
-
|
364 |
-
|
365 |
-
output_initial_solutions_markdown = gr.Markdown(visible=True)
|
366 |
|
367 |
-
with gr.TabItem("
|
368 |
-
|
369 |
-
with output_champion_accordion:
|
370 |
-
output_champion_markdown = gr.Markdown(visible=True)
|
371 |
|
372 |
-
with gr.TabItem("
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
#
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
output_evolved_accordion, output_evolved_markdown, output_ai_test_analysis_markdown,
|
398 |
-
output_interaction_log_markdown,
|
399 |
-
engage_button # To disable/re-enable it
|
400 |
-
]
|
401 |
-
)
|
402 |
-
|
403 |
-
gr.Markdown("---")
|
404 |
-
gr.Markdown(
|
405 |
-
"**Disclaimer:** This is a conceptual, educational demonstration. "
|
406 |
-
"The (simulated) unit testing feature is for illustrative purposes. "
|
407 |
-
"**NEVER run LLM-generated code from an untrusted source in an unrestricted environment.** "
|
408 |
-
"Implementing robust and secure code sandboxing is complex and absolutely critical for safety in real-world applications. "
|
409 |
-
"LLM outputs always require careful human review and verification."
|
410 |
)
|
411 |
-
gr.HTML("<p style='text-align:center; font-size:0.9em; color:grey;'>AlgoForge Omega™ - Powered by Gradio, Gemini & Hugging Face Models</p>")
|
412 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
413 |
|
414 |
-
# --- Entry Point
|
415 |
if __name__ == "__main__":
|
416 |
print("="*80)
|
417 |
-
print("
|
418 |
-
print(f"
|
419 |
-
print(f"
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
424 |
print("="*80)
|
425 |
-
|
|
|
1 |
+
# storyverse_weaver/app.py
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
import time
|
5 |
+
from PIL import Image
|
6 |
|
7 |
# --- Core Logic Imports ---
|
8 |
+
from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf
|
9 |
+
from core.image_services import initialize_image_llms, STABILITY_API_CONFIGURED, OPENAI_DALLE_CONFIGURED, generate_image_stabilityai, generate_image_dalle # Add other providers if implemented
|
10 |
+
from core.story_engine import Story, Scene # Manages the story
|
11 |
+
from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
|
12 |
+
from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
|
13 |
+
from core.utils import basic_text_cleanup
|
14 |
+
|
15 |
+
# --- Initialize Services ---
|
16 |
+
initialize_text_llms()
|
17 |
+
initialize_image_llms()
|
18 |
+
|
19 |
+
# --- Available Model Configuration (Simplified for StoryVerse) ---
|
20 |
+
# Text Models
|
21 |
+
TEXT_MODELS = {}
|
22 |
+
if is_gemini_text_ready():
|
23 |
+
TEXT_MODELS["Gemini 1.5 Flash (Text)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
|
24 |
+
TEXT_MODELS["Gemini 1.0 Pro (Text)"] = {"id": "gemini-1.0-pro-latest", "type": "gemini"}
|
25 |
+
if is_hf_text_ready():
|
26 |
+
TEXT_MODELS["Mistral 7B (HF Text)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
|
27 |
+
DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0] if TEXT_MODELS else "No Text Models Available"
|
28 |
+
|
29 |
+
# Image Models (Providers)
|
30 |
+
IMAGE_PROVIDERS = {}
|
31 |
+
if STABILITY_API_CONFIGURED: IMAGE_PROVIDERS["Stability AI (Stable Diffusion XL)"] = "stability_ai"
|
32 |
+
if OPENAI_DALLE_CONFIGURED: IMAGE_PROVIDERS["OpenAI DALL-E 3 (Simulated)"] = "dalle"
|
33 |
+
# Add other HF image models if you implement image_services.generate_image_hf_model
|
34 |
+
DEFAULT_IMAGE_PROVIDER_KEY = list(IMAGE_PROVIDERS.keys())[0] if IMAGE_PROVIDERS else "No Image Providers Available"
|
35 |
+
|
36 |
+
|
37 |
+
# --- Gradio UI Theme and CSS ---
|
38 |
+
story_theme = gr.themes.Soft(
|
39 |
+
primary_hue=gr.themes.colors.purple,
|
40 |
+
secondary_hue=gr.themes.colors.pink,
|
41 |
+
font=[gr.themes.GoogleFont("Quicksand"), "ui-sans-serif", "system-ui", "sans-serif"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
)
|
43 |
+
custom_css = """
|
44 |
+
.gradio-container { max-width: 1200px !important; margin: auto !important; }
|
45 |
+
.panel_image img { object-fit: contain; width: 100%; height: 100%; max-height: 512px; }
|
46 |
+
.gallery_output .thumbnail-item { height: 150px !important; width: 150px !important; }
|
47 |
+
.gallery_output .thumbnail-item img { height: 100% !important; width: 100% !important; object-fit: cover !important; }
|
48 |
+
.status_text { font-weight: bold; padding: 8px; text-align: center; border-radius: 5px; margin-top:10px;}
|
49 |
+
.error_text { background-color: #ffebee; color: #c62828; }
|
50 |
+
.success_text { background-color: #e8f5e9; color: #2e7d32; }
|
51 |
+
.processing_text { background-color: #e3f2fd; color: #1565c0; }
|
52 |
+
.compact-row .gr-form {gap: 8px !important;} /* Reduce gap in rows */
|
53 |
+
"""
|
54 |
|
55 |
+
# --- StoryVerse Weaver Orchestrator ---
|
56 |
+
def add_scene_to_story(
|
57 |
+
current_story_obj: Story, # Comes from gr.State
|
58 |
+
scene_prompt_text: str,
|
59 |
+
image_style_dropdown: str,
|
60 |
+
artist_style_text: str,
|
61 |
+
negative_prompt_text: str,
|
62 |
+
text_model_key: str,
|
63 |
+
image_provider_key: str,
|
64 |
+
progress=gr.Progress(track_tqdm=True)
|
65 |
):
|
66 |
+
if not scene_prompt_text.strip():
|
67 |
+
return current_story_obj, None, "<p class='error_text status_text'>Scene prompt cannot be empty!</p>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
+
progress(0, desc="Initializing new scene...")
|
70 |
+
log_updates = ["Starting new scene generation..."]
|
71 |
|
72 |
+
# --- 1. Generate Narrative Text ---
|
73 |
+
progress(0.2, desc="Generating narrative...")
|
74 |
+
narrative_text_generated = "Narrative generation failed."
|
75 |
+
text_model_info = TEXT_MODELS.get(text_model_key)
|
76 |
+
|
77 |
+
if text_model_info:
|
78 |
+
system_p = get_narrative_system_prompt("default") # or "comic"
|
79 |
+
# Could use last scene's narrative for context if desired
|
80 |
+
# prev_narrative = current_story_obj.get_last_scene_narrative()
|
81 |
+
user_p = format_narrative_user_prompt(scene_prompt_text) #, prev_narrative)
|
82 |
+
|
83 |
+
text_response = None
|
84 |
+
if text_model_info["type"] == "gemini":
|
85 |
+
text_response = generate_text_gemini(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=300)
|
86 |
+
elif text_model_info["type"] == "hf_text":
|
87 |
+
text_response = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=300)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
+
if text_response and text_response.success:
|
90 |
+
narrative_text_generated = basic_text_cleanup(text_response.text)
|
91 |
+
log_updates.append(f"Narrative generated using {text_model_key}.")
|
92 |
+
elif text_response:
|
93 |
+
narrative_text_generated = f"Narrative Error: {text_response.error}"
|
94 |
+
log_updates.append(f"Narrative generation FAILED with {text_model_key}: {text_response.error}")
|
95 |
+
else:
|
96 |
+
log_updates.append(f"Narrative generation FAILED with {text_model_key}: No response object.")
|
97 |
+
else:
|
98 |
+
narrative_text_generated = "Selected text model not available."
|
99 |
+
log_updates.append("Narrative generation FAILED: Text model not found.")
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
+
# --- 2. Generate Image ---
|
103 |
+
progress(0.6, desc="Generating image...")
|
104 |
+
image_generated = None
|
105 |
+
image_error = None
|
106 |
+
selected_image_provider = IMAGE_PROVIDERS.get(image_provider_key)
|
107 |
+
|
108 |
+
# Use the generated narrative (or original prompt if narrative failed) for image prompt
|
109 |
+
image_content_prompt = narrative_text_generated if narrative_text_generated and "Error" not in narrative_text_generated else scene_prompt_text
|
110 |
+
full_image_prompt = format_image_generation_prompt(image_content_prompt[:300], image_style_dropdown, artist_style_text) # Limit prompt length for image gen
|
111 |
+
|
112 |
+
if selected_image_provider:
|
113 |
+
image_response = None
|
114 |
+
if selected_image_provider == "stability_ai":
|
115 |
+
image_response = generate_image_stabilityai(full_image_prompt, style_preset=None, negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS)
|
116 |
+
elif selected_image_provider == "dalle":
|
117 |
+
image_response = generate_image_dalle(full_image_prompt) # Uses default DALL-E settings from image_services
|
118 |
+
# Add elif for HF image models if implemented
|
119 |
+
|
120 |
+
if image_response and image_response.success:
|
121 |
+
image_generated = image_response.image
|
122 |
+
log_updates.append(f"Image generated using {image_provider_key}.")
|
123 |
+
elif image_response:
|
124 |
+
image_error = f"Image Error ({image_provider_key}): {image_response.error}"
|
125 |
+
log_updates.append(f"Image generation FAILED with {image_provider_key}: {image_response.error}")
|
126 |
+
else:
|
127 |
+
image_error = f"Image generation failed: No response from {image_provider_key} service."
|
128 |
+
log_updates.append(f"Image generation FAILED with {image_provider_key}: No response object.")
|
129 |
+
else:
|
130 |
+
image_error = "Selected image provider not available."
|
131 |
+
log_updates.append("Image generation FAILED: Image provider not found.")
|
132 |
+
|
133 |
+
# --- 3. Add Scene to Story Object ---
|
134 |
+
if image_error and "Error" in narrative_text_generated: # Both failed
|
135 |
+
current_story_obj.add_scene_with_error(scene_prompt_text, f"Narrative: {narrative_text_generated}. Image: {image_error}")
|
136 |
+
else:
|
137 |
+
current_story_obj.add_scene_from_elements(
|
138 |
+
user_prompt=scene_prompt_text,
|
139 |
+
narrative_text=narrative_text_generated,
|
140 |
+
image=image_generated,
|
141 |
+
image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text else ''}",
|
142 |
+
image_provider=image_provider_key
|
143 |
)
|
144 |
+
|
145 |
+
progress(1.0, desc="Scene complete!")
|
146 |
+
|
147 |
+
# --- 4. Prepare Outputs for Gradio ---
|
148 |
+
# Gallery expects list of (image_path_or_PIL, caption_string) tuples
|
149 |
+
gallery_items = []
|
150 |
+
for scene in current_story_obj.scenes:
|
151 |
+
caption = f"S{scene.scene_number}: {scene.user_prompt[:40]}..."
|
152 |
+
if scene.error_message:
|
153 |
+
# Create a placeholder image for errors or display error text
|
154 |
+
error_img = Image.new('RGB', (100,100), color='red') # Simple red square
|
155 |
+
gallery_items.append((error_img, f"{caption}\nError: {scene.error_message[:100]}..."))
|
156 |
else:
|
157 |
+
gallery_items.append((scene.image if scene.image else Image.new('RGB', (100,100), color='grey'), caption)) # Grey if no image but no error
|
158 |
+
|
159 |
+
# Display the latest scene's full details
|
160 |
+
latest_scene_display = ""
|
161 |
+
if current_story_obj.scenes:
|
162 |
+
ls = current_story_obj.scenes[-1]
|
163 |
+
latest_scene_display = f"## Scene {ls.scene_number}: {ls.user_prompt}\n\n"
|
164 |
+
if ls.error_message:
|
165 |
+
latest_scene_display += f"**Error:** {ls.error_message}\n"
|
166 |
+
else:
|
167 |
+
if ls.image:
|
168 |
+
# Gradio Markdown can't directly display PIL.Image. We'll show it in the gallery.
|
169 |
+
# For single image display, use gr.Image component.
|
170 |
+
latest_scene_display += f"**Style:** {ls.image_style_prompt}\n\n"
|
171 |
+
latest_scene_display += f"{ls.narrative_text}"
|
172 |
+
|
173 |
+
# Determine status message
|
174 |
+
status_message_html = ""
|
175 |
+
if image_error or "Error" in narrative_text_generated:
|
176 |
+
status_message_html = f"<p class='error_text status_text'>Scene added with errors. Narrative: {'OK' if 'Error' not in narrative_text_generated else 'Failed'}. Image: {'OK' if not image_error else 'Failed'}.</p>"
|
177 |
+
else:
|
178 |
+
status_message_html = "<p class='success_text status_text'>New scene added successfully!</p>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
|
180 |
+
# For the single image display component, show the latest generated image
|
181 |
+
latest_image_output = image_generated if image_generated else None # (or a placeholder if error)
|
182 |
+
|
183 |
+
return current_story_obj, gallery_items, latest_image_output, latest_scene_display, status_message_html, "\n".join(log_updates)
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
|
186 |
+
def clear_story_state():
|
187 |
+
new_story = Story()
|
188 |
+
return new_story, [], None, "Story Cleared. Ready for a new verse!", "<p class='status_text'>Story Cleared</p>", "Log Cleared."
|
189 |
|
190 |
# --- Gradio UI Definition ---
|
191 |
+
with gr.Blocks(theme=story_theme, css=custom_css) as story_weaver_demo:
|
192 |
+
story_state = gr.State(Story()) # Manages the story object
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
193 |
|
194 |
+
gr.Markdown("# ✨ StoryVerse Weaver ✨\nCreate multimodal stories with AI-generated narrative and images!")
|
|
|
|
|
|
|
|
|
|
|
195 |
|
196 |
+
# API Status Check (Conceptual - real app might hide this or make it admin-only)
|
197 |
+
with gr.Accordion("API & Model Status (Developer Info)", open=False):
|
198 |
+
status_text = []
|
199 |
+
if not GEMINI_API_READY and not HF_API_READY and not STABILITY_API_CONFIGURED and not OPENAI_DALLE_CONFIGURED:
|
200 |
+
status_text.append("<p style='color:red;font-weight:bold;'>⚠️ CRITICAL: NO APIs CONFIGURED. App will be non-functional.</p>")
|
201 |
+
else:
|
202 |
+
if GEMINI_API_READY or HF_API_READY: status_text.append("<p style='color:green;'>✅ Text LLM(s) Ready.</p>")
|
203 |
+
else: status_text.append("<p style='color:orange;'>⚠️ No Text LLMs Ready (Check STORYVERSE_GOOGLE_API_KEY/STORYVERSE_HF_TOKEN).</p>")
|
204 |
+
if STABILITY_API_CONFIGURED or OPENAI_DALLE_CONFIGURED: status_text.append("<p style='color:green;'>✅ Image Generation Service(s) Ready.</p>")
|
205 |
+
else: status_text.append("<p style='color:orange;'>⚠️ No Image Generation Services Ready (Check API Keys).</p>")
|
206 |
+
gr.HTML("".join(status_text))
|
207 |
+
|
208 |
+
|
209 |
+
with gr.Row():
|
210 |
+
# --- CONTROL PANEL (Inputs) ---
|
211 |
+
with gr.Column(scale=1):
|
212 |
+
gr.Markdown("### 🎬 Scene Input")
|
213 |
+
scene_prompt_input = gr.Textbox(lines=5, label="Describe your scene or story beat:", placeholder="e.g., A lone astronaut discovers a glowing alien artifact on a desolate moon.")
|
|
|
|
|
|
|
|
|
|
|
214 |
|
215 |
+
with gr.Accordion("🎨 Visual Style (Optional)", open=True):
|
216 |
+
image_style_input = gr.Dropdown(choices=["Default"] + list(STYLE_PRESETS.keys()), value="Default", label="Image Style Preset")
|
217 |
+
artist_style_input = gr.Textbox(label="Inspired by Artist (Optional):", placeholder="e.g., Van Gogh, Hayao Miyazaki, HR Giger")
|
218 |
+
negative_prompt_input = gr.Textbox(lines=2, label="Negative Prompt (Optional):", placeholder="e.g., blurry, text, watermark, poorly drawn", value=COMMON_NEGATIVE_PROMPTS)
|
219 |
+
|
220 |
+
with gr.Accordion("⚙️ AI Configuration (Advanced)", open=False):
|
221 |
+
text_model_dropdown = gr.Dropdown(choices=list(TEXT_MODELS.keys()), value=DEFAULT_TEXT_MODEL_KEY, label="Text Generation Model")
|
222 |
+
image_provider_dropdown = gr.Dropdown(choices=list(IMAGE_PROVIDERS.keys()), value=DEFAULT_IMAGE_PROVIDER_KEY, label="Image Generation Provider")
|
223 |
+
# Could add sliders for temperature, tokens etc. here later
|
224 |
+
|
225 |
+
with gr.Row(elem_classes=["compact-row"]):
|
226 |
+
add_scene_button = gr.Button("➕ Weave Next Scene", variant="primary")
|
227 |
+
clear_story_button = gr.Button("🗑️ Clear Story")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
228 |
|
229 |
+
status_bar_output = gr.HTML(value="<p class='status_text'>Ready to weave...</p>")
|
230 |
|
231 |
+
# --- STORY DISPLAY (Outputs) ---
|
232 |
+
with gr.Column(scale=2):
|
233 |
+
gr.Markdown("### 📖 Your StoryVerse So Far")
|
|
|
234 |
|
235 |
+
with gr.Tabs():
|
236 |
+
with gr.TabItem("🖼️ Latest Scene View"):
|
237 |
+
latest_scene_image_output = gr.Image(label="Latest Scene Image", type="pil", interactive=False, show_download_button=True, elem_classes=["panel_image"])
|
238 |
+
latest_scene_narrative_output = gr.Markdown(label="Latest Scene Narrative")
|
|
|
239 |
|
240 |
+
with gr.TabItem(" галерея | Story Scroll"): # Gallery in Russian, for fun :)
|
241 |
+
story_gallery_output = gr.Gallery(label="Story Scroll", show_label=False, columns=[3], object_fit="contain", height="auto", elem_classes=["gallery_output"])
|
|
|
|
|
242 |
|
243 |
+
with gr.TabItem("📜 Interaction Log"):
|
244 |
+
log_output_markdown = gr.Markdown("Log will appear here...")
|
245 |
+
|
246 |
+
# --- Event Handlers ---
|
247 |
+
add_scene_button.click(
|
248 |
+
fn=add_scene_to_story,
|
249 |
+
inputs=[
|
250 |
+
story_state, scene_prompt_input,
|
251 |
+
image_style_input, artist_style_input, negative_prompt_input,
|
252 |
+
text_model_dropdown, image_provider_dropdown
|
253 |
+
],
|
254 |
+
outputs=[ # Order must match the return order of add_scene_to_story
|
255 |
+
story_state, story_gallery_output,
|
256 |
+
latest_scene_image_output, latest_scene_narrative_output,
|
257 |
+
status_bar_output, log_output_markdown
|
258 |
+
]
|
259 |
+
)
|
260 |
+
clear_story_button.click(
|
261 |
+
fn=clear_story_state,
|
262 |
+
inputs=[],
|
263 |
+
outputs=[
|
264 |
+
story_state, story_gallery_output,
|
265 |
+
latest_scene_image_output, latest_scene_narrative_output,
|
266 |
+
status_bar_output, log_output_markdown
|
267 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
)
|
|
|
269 |
|
270 |
+
# Example Prompts for User
|
271 |
+
gr.Examples(
|
272 |
+
examples=[
|
273 |
+
["A knight faces a dragon in a fiery volcano.", "Fantasy Art", "Frank Frazetta", "blurry, low quality"],
|
274 |
+
["A futuristic detective investigates a crime in a neon-lit alley.", "Cyberpunk", "Syd Mead", "cartoon, painting"],
|
275 |
+
["Two children discover a hidden portal in an old oak tree.", "Studio Ghibli Inspired", "", "dark, scary"],
|
276 |
+
["A single red rose blooming in a post-apocalyptic wasteland.", "Photorealistic", "Ansel Adams", "oversaturated, vibrant"],
|
277 |
+
],
|
278 |
+
inputs=[scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input],
|
279 |
+
label="✨ Example Scene Ideas & Styles ✨"
|
280 |
+
)
|
281 |
|
282 |
+
# --- Entry Point ---
|
283 |
if __name__ == "__main__":
|
284 |
print("="*80)
|
285 |
+
print("✨ StoryVerse Weaver™ - Multimodal Story Creator - Launching... ✨")
|
286 |
+
print(f" Text LLM Ready (Gemini): {is_gemini_text_ready()}")
|
287 |
+
print(f" Text LLM Ready (HF): {is_hf_text_ready()}")
|
288 |
+
print(f" Image Provider Ready (Stability AI): {STABILITY_API_CONFIGURED}")
|
289 |
+
print(f" Image Provider Ready (DALL-E): {OPENAI_DALLE_CONFIGURED}")
|
290 |
+
if not (is_gemini_text_ready() or is_hf_text_ready()) or not (STABILITY_API_CONFIGURED or OPENAI_DALLE_CONFIGURED):
|
291 |
+
print(" 🔴 WARNING: Not all required API services are configured. Functionality will be limited or fail.")
|
292 |
+
print(" Please set: STORYVERSE_GOOGLE_API_KEY (for Gemini text), and/or STORYVERSE_HF_TOKEN (for HF text),")
|
293 |
+
print(" AND STORYVERSE_STABILITY_API_KEY (for Stability AI images) or STORYVERSE_OPENAI_API_KEY (for DALL-E images) in your environment/secrets.")
|
294 |
+
print(f" Default Text Model: {DEFAULT_TEXT_MODEL_KEY}")
|
295 |
+
print(f" Default Image Provider: {DEFAULT_IMAGE_PROVIDER_KEY}")
|
296 |
+
print(f" Available Text Models: {list(TEXT_MODELS.keys())}")
|
297 |
+
print(f" Available Image Providers: {list(IMAGE_PROVIDERS.keys())}")
|
298 |
print("="*80)
|
299 |
+
story_weaver_demo.launch(debug=True, server_name="0.0.0.0")
|