VirtualOasis commited on
Commit
c6ea530
Β·
verified Β·
1 Parent(s): 989ef70

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +437 -89
app.py CHANGED
@@ -1,24 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  from dotenv import load_dotenv
3
- from smolagents import CodeAgent, ToolCallingAgent, LiteLLMModel, MCPClient
4
- from mcp import StdioServerParameters
5
- import base64
6
- from PIL import Image
7
- import io
8
 
9
- # --- 1. Environment and Model Setup ---
10
- # Load environment variables from a .env file (for API keys)
11
  load_dotenv()
12
 
13
- # Initialize the language model that our agents will use.
14
- # Ensure your GEMINI_API_KEY is set in your .env file.
15
- model = LiteLLMModel(
16
- model_id="gemini/gemini-2.0-flash-exp",
17
- api_key=os.getenv("GEMINI_API_KEY")
18
- )
19
-
20
- # --- 2. MCP Server Configuration ---
21
- # Define the connection parameters for your MCP servers.
22
  kgb_server_parameters = StdioServerParameters(
23
  command="npx",
24
  args=[
@@ -28,7 +24,7 @@ kgb_server_parameters = StdioServerParameters(
28
  "sse-only"],
29
  )
30
 
31
- t2i_server_parameters = StdioServerParameters(
32
  command="npx",
33
  args=[
34
  "mcp-remote",
@@ -37,89 +33,441 @@ t2i_server_parameters = StdioServerParameters(
37
  "sse-only"],
38
  )
39
 
40
- server_parameters = [kgb_server_parameters, t2i_server_parameters]
 
41
 
42
- # --- 3. Main Application Logic ---
43
- def run_storycrafter():
44
- # Instantiate the MCPClient *before* the 'with' block, as per the working example.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  mcp = MCPClient(server_parameters)
46
 
47
- # Use the created MCPClient instance as a context manager.
48
  with mcp:
49
- print("Connecting to MCP servers and fetching tools...")
50
- # Get all available tools from all connected MCP servers.
51
  all_tools = mcp.get_tools()
52
- print(f"Found {len(all_tools)} tools.")
53
- if not all_tools:
54
- print("Warning: No tools were loaded from the MCP servers. Agents will have limited capabilities.")
55
 
56
- # --- 4. Agent Definitions ---
57
- # The Writer Agent is given all available tools.
58
- writer_agent = ToolCallingAgent(
59
- tools=all_tools,
60
- model=model,
61
- name="writer",
62
- description="A creative agent that writes short stories. It can use a knowledge graph tool to research topics for inspiration."
63
- )
64
-
65
- # The Illustrator Agent is also given all available tools.
66
- illustrator_agent = ToolCallingAgent(
67
- tools=all_tools,
68
- model=model,
69
- name="illustrator",
70
- description="An artist agent that creates illustrations based on a descriptive prompt using a text-to-image tool."
71
- )
72
-
73
- # The Director Agent orchestrates the other two agents.
74
- director_agent = CodeAgent(
75
- tools=[],
76
- model=model,
77
- managed_agents=[writer_agent, illustrator_agent],
78
- system_prompt="""
79
- You are the Director of Agentic Storycrafter, a creative team. Your job is to manage the writer and illustrator agents to create a story with an illustration.
80
-
81
- Here is your workflow:
82
- 1. Receive a user's prompt for a story.
83
- 2. Call the `writer` agent to write a story based on the user's prompt.
84
- 3. After the story is written, create a short, descriptive prompt for an illustration that captures the essence of the story.
85
- 4. Call the `illustrator` agent with this new prompt to generate an image. The result will be a dictionary containing image data.
86
- 5. Return a dictionary containing both the final 'story' and the 'image_data' from the illustrator.
87
- """
88
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
- # --- 5. The Creative Workflow ---
91
- user_prompt = "a story about a wise old owl living in a library of forgotten books"
92
-
93
- print(f"\n--- Director's Task ---")
94
- print(f"Prompt: {user_prompt}\n")
95
-
96
- final_output = director_agent.run(f"Create a story and illustration for the following prompt: {user_prompt}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
- print("\n--- Agentic Storycrafter Result ---")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
- result_dict = eval(final_output)
101
-
102
- story = result_dict.get("story")
103
- image_data = result_dict.get("image_data")
104
-
105
- print("\n--- STORY ---")
106
- print(story)
107
 
108
- if image_data and 'b64_json' in image_data:
109
- print("\n--- ILLUSTRATION ---")
110
- print("Illustration created. Saving to 'story_illustration.png'")
 
 
 
 
 
 
 
111
  try:
112
- img_bytes = base64.b64decode(image_data['b64_json'])
113
- img = Image.open(io.BytesIO(img_bytes))
114
- img.save("story_illustration.png")
115
- print("Image saved successfully.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  except Exception as e:
117
- print(f"Error saving image: {e}")
118
- else:
119
- print("\n--- ILLUSTRATION ---")
120
- print("No illustration was generated.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
- # --- 6. Execution Start ---
123
  if __name__ == "__main__":
124
- run_storycrafter()
125
-
 
 
1
+ from smolagents import (
2
+ load_tool,
3
+ CodeAgent,
4
+ ToolCallingAgent,
5
+ InferenceClientModel,
6
+ LiteLLMModel,
7
+ OpenAIModel,
8
+ GradioUI,
9
+ MCPClient
10
+ )
11
+ from mcp import StdioServerParameters
12
  import os
13
  from dotenv import load_dotenv
14
+ import gradio as gr
 
 
 
 
15
 
 
 
16
  load_dotenv()
17
 
 
 
 
 
 
 
 
 
 
18
  kgb_server_parameters = StdioServerParameters(
19
  command="npx",
20
  args=[
 
24
  "sse-only"],
25
  )
26
 
27
+ T2I_server_parameters = StdioServerParameters(
28
  command="npx",
29
  args=[
30
  "mcp-remote",
 
33
  "sse-only"],
34
  )
35
 
36
+ # Model will be initialized dynamically based on user selection
37
+
38
 
39
+
40
+ # Load tools from all MCP servers using MCPClient
41
+ server_parameters = [kgb_server_parameters, T2I_server_parameters]
42
+
43
+ def initialize_model(model_provider, api_key_or_token):
44
+ """Initialize the selected model with user credentials"""
45
+
46
+ if not api_key_or_token.strip():
47
+ raise ValueError("Please provide your API key or token")
48
+
49
+ if model_provider == "Gemini (Google)":
50
+ return LiteLLMModel(
51
+ model_id="gemini/gemini-2.0-flash-exp",
52
+ api_key=api_key_or_token
53
+ )
54
+ elif model_provider == "Hugging Face (DeepSeek via Together)":
55
+ return InferenceClientModel(
56
+ model_id="deepseek-ai/DeepSeek-R1-0528",
57
+ provider="together",
58
+ token=api_key_or_token,
59
+ max_tokens=5000
60
+ )
61
+ elif model_provider == "DeepSeek (Direct API)":
62
+ return OpenAIModel(
63
+ model_id="deepseek-chat",
64
+ api_key=api_key_or_token,
65
+ base_url="https://api.deepseek.com"
66
+ )
67
+ else:
68
+ raise ValueError(f"Unsupported model provider: {model_provider}")
69
+
70
+ def create_business_content_ui():
71
+ """Create specialized UI for business content creation"""
72
+
73
+ # Initialize MCP client and keep it alive for the entire UI session
74
+ print("πŸ”— Initializing MCP client for the session...")
75
  mcp = MCPClient(server_parameters)
76
 
 
77
  with mcp:
78
+ # Load all MCP tools once and keep client alive
 
79
  all_tools = mcp.get_tools()
80
+ print(f"βœ… Loaded {len(all_tools)} MCP tools successfully")
 
 
81
 
82
+ def initialize_agents(model):
83
+ """Initialize specialized agents using loaded MCP tools and user-selected model"""
84
+
85
+ # 1. Business Research Agent - Uses all MCP tools (Knowledge Graph Builder focus)
86
+ research_agent = CodeAgent(
87
+ tools=all_tools,
88
+ model=model,
89
+ add_base_tools=True,
90
+ name="business_researcher",
91
+ description="""Expert business researcher specializing in market analysis, competitive intelligence, and tech industry trends.
92
+ Uses knowledge graph tools to extract entities, relationships, and key business insights from topics.
93
+ Focuses on: market size, key players, business relationships, competitive landscape, and strategic context."""
94
+ )
95
+
96
+ # 2. Content Strategy Agent - Creates structured business content
97
+ content_strategy_agent = ToolCallingAgent(
98
+ tools=[],
99
+ model=model,
100
+ max_steps=3,
101
+ name="content_strategist",
102
+ description="""Professional business writer specializing in executive-level content creation.
103
+ Creates structured, strategic business content including market analyses, competitive briefs, and strategic recommendations.
104
+ Writes for C-level executives, investors, and business stakeholders with focus on actionable insights."""
105
+ )
106
+
107
+ # 3. Content Formatter Agent - Professional document formatting
108
+ content_formatter_agent = ToolCallingAgent(
109
+ tools=[],
110
+ model=model,
111
+ max_steps=2,
112
+ name="content_formatter",
113
+ description="""Document formatting specialist focused on professional business document structure.
114
+ Converts content into well-structured markdown with proper headers, tables, bullet points, and professional formatting.
115
+ Ensures consistency, readability, and professional presentation standards."""
116
+ )
117
+
118
+ # 4. Visual Creation Agent - Uses all MCP tools (Text-to-Image focus)
119
+ visual_agent = CodeAgent(
120
+ tools=all_tools,
121
+ model=model,
122
+ add_base_tools=True,
123
+ name="visual_designer",
124
+ description="""Business visualization specialist creating professional infographics, charts, and presentation visuals.
125
+ Uses text-to-image tools to convert content into compelling visual formats suitable for executive presentations.
126
+ Focuses on clean, professional designs that enhance business storytelling."""
127
+ )
128
+
129
+ # Business Content Manager - Coordinates all agents
130
+ business_content_manager = CodeAgent(
131
+ tools=all_tools,
132
+ model=model,
133
+ managed_agents=[research_agent, content_strategy_agent, content_formatter_agent, visual_agent],
134
+ additional_authorized_imports=["json", "re", "datetime"],
135
+ add_base_tools=True,
136
+ name="agentic_inkwell_manager",
137
+ description="""Agentic Inkwell Manager - Coordinates multi-agent business content creation workflow.
138
+ Manages the complete pipeline from research to final formatted output with optional visual conversion.
139
+ Where specialized agents gather around the digital inkwell to craft intelligence together."""
140
+ )
141
+
142
+ return business_content_manager
143
+
144
+ # Content type options
145
+ content_types = [
146
+ "Market Analysis Report",
147
+ "Competitive Intelligence Brief",
148
+ "Technology Trend Analysis",
149
+ "Product Launch Strategy",
150
+ "Investment Research Report",
151
+ "Strategic Planning Document"
152
+ ]
153
+
154
+ # Writing style options
155
+ writing_styles = [
156
+ "Executive Summary (C-level audience)",
157
+ "Technical Brief (Developer/Engineer audience)",
158
+ "Investor Pitch (VC/Stakeholder audience)",
159
+ "Market Research (Analyst audience)",
160
+ "Internal Memo (Team communication)"
161
+ ]
162
+
163
+ # Visual style options
164
+ visual_styles = [
165
+ "Professional Infographics",
166
+ "Corporate Presentation Style",
167
+ "Minimalist Charts",
168
+ "Executive Dashboard",
169
+ "Technical Diagrams",
170
+ "No Visuals (Text Only)"
171
+ ]
172
+
173
+ with gr.Blocks(title="Agentic Inkwell", theme=gr.themes.Soft()) as demo:
174
+ gr.Markdown("# βœ’οΈ Agentic Inkwell")
175
+ gr.Markdown("*Where Agents Craft Intelligence* - Generate comprehensive business reports with collaborative agentic writing")
176
 
177
+ # Model Configuration Section
178
+ with gr.Row():
179
+ with gr.Column():
180
+ gr.Markdown("### πŸ€– Model Configuration")
181
+ model_provider = gr.Dropdown(
182
+ choices=[
183
+ "Gemini (Google)",
184
+ "Hugging Face (DeepSeek via Together)",
185
+ "DeepSeek (Direct API)"
186
+ ],
187
+ label="Select AI Model Provider",
188
+ value="Gemini (Google)",
189
+ info="Choose your preferred AI model provider"
190
+ )
191
+
192
+ api_key_input = gr.Textbox(
193
+ label="API Key / Token",
194
+ placeholder="Enter your API key or token here...",
195
+ type="password",
196
+ info="Your API key will be used securely and not stored"
197
+ )
198
+
199
+ gr.Markdown("""
200
+ **πŸ”‘ Where to get your API keys:**
201
+ - **Gemini**: Get free API key at [Google AI Studio](https://aistudio.google.com/app/apikey)
202
+ - **Hugging Face**: Get free token at [HF Settings](https://huggingface.co/settings/tokens)
203
+ - **DeepSeek**: Get API key at [DeepSeek Platform](https://platform.deepseek.com/api_keys)
204
+ """, elem_classes=["api-info"])
205
 
206
+ with gr.Row():
207
+ with gr.Column(scale=2):
208
+ # Main input
209
+ topic_input = gr.Textbox(
210
+ label="πŸ“‹ Business Topic or Research Question",
211
+ placeholder="e.g., 'AI Agent frameworks market analysis 2025' or 'NVIDIA vs AMD in AI chip market'",
212
+ lines=3
213
+ )
214
+
215
+ # Content controls
216
+ with gr.Row():
217
+ content_type = gr.Dropdown(
218
+ choices=content_types,
219
+ label="πŸ“Š Content Type",
220
+ value="Market Analysis Report"
221
+ )
222
+
223
+ writing_style = gr.Dropdown(
224
+ choices=writing_styles,
225
+ label="✍️ Writing Style",
226
+ value="Executive Summary (C-level audience)"
227
+ )
228
+
229
+ with gr.Row():
230
+ visual_style = gr.Dropdown(
231
+ choices=visual_styles,
232
+ label="🎨 Visual Output",
233
+ value="No Visuals (Text Only)"
234
+ )
235
+
236
+ include_sources = gr.Checkbox(
237
+ label="πŸ“š Include Source References",
238
+ value=True
239
+ )
240
+
241
+ # Generate button
242
+ generate_btn = gr.Button("βœ’οΈ Craft with Agentic Inkwell", variant="primary", size="lg")
243
+
244
+ with gr.Column(scale=1):
245
+ # Status and progress
246
+ status_box = gr.Textbox(
247
+ label="πŸ“Š Generation Status",
248
+ value="Ready to generate content...",
249
+ interactive=False,
250
+ lines=8
251
+ )
252
+
253
+ # Output section
254
+ with gr.Row():
255
+ with gr.Column():
256
+ # Main content output
257
+ content_output = gr.Textbox(
258
+ label="πŸ“„ Generated Business Content (Markdown)",
259
+ lines=20,
260
+ max_lines=30,
261
+ show_copy_button=True
262
+ )
263
+
264
+ # Action buttons row
265
+ with gr.Row():
266
+ # Download button
267
+ download_btn = gr.DownloadButton(
268
+ label="πŸ’Ύ Download Markdown",
269
+ visible=False
270
+ )
271
+
272
+ # Visual conversion button (appears after content generation)
273
+ convert_visual_btn = gr.Button(
274
+ "🎨 Convert to Images",
275
+ variant="secondary",
276
+ visible=False
277
+ )
278
+
279
+ # Visual output section (shown when visuals are generated)
280
+ with gr.Row(visible=False) as visual_output_row:
281
+ with gr.Column():
282
+ visual_status = gr.Textbox(
283
+ label="🎨 Image Conversion Status",
284
+ interactive=False,
285
+ lines=3
286
+ )
287
+
288
+ visual_output = gr.Gallery(
289
+ label="πŸ–ΌοΈ Generated Images",
290
+ columns=2,
291
+ height=500,
292
+ show_label=True
293
+ )
294
+
295
+ def generate_business_content(topic, content_type, writing_style, visual_style, include_sources, model_provider, api_key):
296
+ """Main function to coordinate business content generation"""
297
+
298
+ if not topic.strip():
299
+ return "Please enter a business topic or research question.", "", None, gr.update(visible=False), gr.update(visible=False)
300
+
301
+ if not api_key.strip():
302
+ return "Please provide your API key or token.", "", None, gr.update(visible=False), gr.update(visible=False)
303
+
304
+ try:
305
+ # Initialize the user-selected model
306
+ yield "πŸ€– Initializing your selected AI model...", "", None, gr.update(visible=False), gr.update(visible=False)
307
+
308
+ try:
309
+ model = initialize_model(model_provider, api_key)
310
+ except Exception as e:
311
+ error_msg = f"❌ Failed to initialize {model_provider}: {str(e)}"
312
+ yield error_msg, "", None, gr.update(visible=False), gr.update(visible=False)
313
+ return
314
+
315
+ # Initialize agents with the user-selected model
316
+ yield "πŸ”§ Setting up specialized agents...", "", None, gr.update(visible=False), gr.update(visible=False)
317
+ manager = initialize_agents(model)
318
+
319
+ # Update status
320
+ status = "πŸ” Starting business research..."
321
+ yield status, "", None, gr.update(visible=False), gr.update(visible=False)
322
+
323
+ # Create detailed prompt for the manager
324
+ prompt = f"""
325
+ Create a comprehensive {content_type.lower()} about: {topic}
326
+
327
+ Requirements:
328
+ - Writing Style: {writing_style}
329
+ - Include source references: {include_sources}
330
+ - Visual conversion needed: {visual_style != 'No Visuals (Text Only)'}
331
+ - Visual style: {visual_style if visual_style != 'No Visuals (Text Only)' else 'None'}
332
+
333
+ Follow this workflow:
334
+ 1. Use business_researcher to gather comprehensive market intelligence and competitive data
335
+ 2. Use content_strategist to create structured business content with strategic insights
336
+ 3. Use content_formatter to format into professional markdown document
337
+ 4. {"Use visual_designer to create professional visuals if requested" if visual_style != 'No Visuals (Text Only)' else "Skip visual generation"}
338
+
339
+ Deliver a complete, professional business document ready for executive presentation.
340
+ """
341
+
342
+ # Update status
343
+ status = "πŸ€– Coordinating multi-agent content creation..."
344
+ yield status, "", None, gr.update(visible=False), gr.update(visible=False)
345
+
346
+ # Generate content using the manager
347
+ result = manager.run(prompt)
348
+
349
+ # Update status
350
+ status = "βœ… Content generation completed successfully!"
351
+
352
+ # Prepare download file
353
+ import tempfile
354
+ import os
355
+
356
+ temp_file = tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False, encoding='utf-8')
357
+ temp_file.write(result)
358
+ temp_file.close()
359
+
360
+ # Show/hide visual gallery based on whether visuals were generated
361
+ show_visuals = visual_style != 'No Visuals (Text Only)' and "![" in result
362
+
363
+ yield (
364
+ status,
365
+ result,
366
+ temp_file.name,
367
+ gr.update(visible=True), # download button
368
+ gr.update(visible=True) # convert to images button
369
+ )
370
+
371
+ except Exception as e:
372
+ error_msg = f"❌ Error generating content: {str(e)}"
373
+ yield error_msg, "", None, gr.update(visible=False), gr.update(visible=False)
374
 
 
 
 
 
 
 
 
375
 
376
+
377
+ def create_business_visuals(content_text, model_provider, api_key):
378
+ """Convert text content to images using T2l_text_to_images_and_base64_generator tool"""
379
+
380
+ if not content_text.strip():
381
+ return "❌ No content available to convert", []
382
+
383
+ if not api_key.strip():
384
+ return "❌ Please provide your API key or token", []
385
+
386
  try:
387
+ # Initialize model for visual conversion
388
+ yield "πŸ€– Initializing model for image conversion...", []
389
+
390
+ try:
391
+ model = initialize_model(model_provider, api_key)
392
+ except Exception as e:
393
+ yield f"❌ Failed to initialize model: {str(e)}", []
394
+ return
395
+
396
+ # Use already loaded MCP tools
397
+ yield "🎨 Converting text to images...", []
398
+
399
+ # Create visual agent with already loaded tools
400
+ visual_agent = CodeAgent(
401
+ tools=all_tools,
402
+ model=model,
403
+ add_base_tools=True,
404
+ name="t2i_visual_converter"
405
+ )
406
+
407
+ # Directly convert the content text to images
408
+ result = visual_agent.run(f"""
409
+ Use the T2l_text_to_images_and_base64_generator tool to convert this text to images:
410
+
411
+ text_content: "{content_text[:1000]}"
412
+ aspect_ratio_str: "16:9 (Widescreen)"
413
+ font_size: 36
414
+ style: "plain"
415
+ bg_color_name: "White"
416
+ font_choice: "Arial"
417
+
418
+ Convert the business content to professional images.
419
+ """)
420
+
421
+ if result:
422
+ yield "βœ… Successfully converted text to images!", [("Business Content", result)]
423
+ else:
424
+ yield "⚠️ No images were generated", []
425
+
426
  except Exception as e:
427
+ error_msg = f"❌ Image conversion failed: {str(e)}"
428
+ yield error_msg, []
429
+
430
+ # Connect the generate button
431
+ generate_btn.click(
432
+ fn=generate_business_content,
433
+ inputs=[topic_input, content_type, writing_style, visual_style, include_sources, model_provider, api_key_input],
434
+ outputs=[status_box, content_output, download_btn, download_btn, convert_visual_btn],
435
+ show_progress=True
436
+ )
437
+
438
+ # Connect the convert to images button - directly convert text to images
439
+ convert_visual_btn.click(
440
+ fn=create_business_visuals,
441
+ inputs=[content_output, model_provider, api_key_input],
442
+ outputs=[visual_status, visual_output],
443
+ show_progress=True
444
+ ).then(
445
+ fn=lambda: gr.update(visible=True),
446
+ outputs=visual_output_row
447
+ )
448
+
449
+ # Example buttons for quick testing
450
+ with gr.Row():
451
+ gr.Markdown("### 🎯 Quick Examples:")
452
+
453
+ example1_btn = gr.Button("πŸ“± AI Smartphone Market", size="sm")
454
+ example2_btn = gr.Button("πŸš— EV Battery Tech", size="sm")
455
+ example3_btn = gr.Button("☁️ Cloud AI Services", size="sm")
456
+
457
+ def set_example1():
458
+ return "AI-powered smartphone features market analysis 2025"
459
+ def set_example2():
460
+ return "Electric vehicle battery technology competitive landscape"
461
+ def set_example3():
462
+ return "Cloud-based AI services market opportunities and threats"
463
+
464
+ example1_btn.click(fn=set_example1, outputs=topic_input)
465
+ example2_btn.click(fn=set_example2, outputs=topic_input)
466
+ example3_btn.click(fn=set_example3, outputs=topic_input)
467
+
468
+ return demo
469
 
 
470
  if __name__ == "__main__":
471
+ # Launch the business content creation interface
472
+ demo = create_business_content_ui()
473
+ demo.launch()