Spaces:
Sleeping
Sleeping
import gradio as gr | |
from groq import Groq | |
import json | |
import time | |
from typing import Dict, List, Tuple, Optional | |
import threading | |
from datetime import datetime | |
import html | |
import re | |
class ReasoningOrchestra: | |
def __init__(self): | |
self.client = None | |
self.is_api_key_set = False | |
def set_api_key(self, api_key: str) -> str: | |
"""Set the Groq API key and test connection""" | |
if not api_key.strip(): | |
return "β Please enter a valid API key" | |
try: | |
self.client = Groq(api_key=api_key.strip()) | |
# Test the connection with a simple request | |
test_completion = self.client.chat.completions.create( | |
model="qwen/qwen3-32b", | |
messages=[{"role": "user", "content": "Hello"}], | |
max_completion_tokens=10 | |
) | |
self.is_api_key_set = True | |
return "β API key validated successfully! You can now use the Reasoning Orchestra." | |
except Exception as e: | |
self.is_api_key_set = False | |
return f"β API key validation failed: {str(e)}" | |
def format_text_to_html(self, text: str) -> str: | |
"""Convert text to HTML with proper formatting""" | |
if not text: | |
return "<p>No content available</p>" | |
# Escape HTML characters first | |
text = html.escape(text) | |
# Convert markdown-style formatting to HTML | |
# Headers | |
text = re.sub(r'^### (.*$)', r'<h3>\1</h3>', text, flags=re.MULTILINE) | |
text = re.sub(r'^## (.*$)', r'<h2>\1</h2>', text, flags=re.MULTILINE) | |
text = re.sub(r'^# (.*$)', r'<h1>\1</h1>', text, flags=re.MULTILINE) | |
# Bold text | |
text = re.sub(r'\*\*(.*?)\*\*', r'<strong>\1</strong>', text) | |
# Italic text | |
text = re.sub(r'\*(.*?)\*', r'<em>\1</em>', text) | |
# Code blocks | |
text = re.sub(r'```(.*?)```', r'<pre><code>\1</code></pre>', text, flags=re.DOTALL) | |
text = re.sub(r'`(.*?)`', r'<code>\1</code>', text) | |
# Lists | |
lines = text.split('\n') | |
in_list = False | |
formatted_lines = [] | |
for line in lines: | |
stripped = line.strip() | |
if stripped.startswith('- ') or stripped.startswith('* '): | |
if not in_list: | |
formatted_lines.append('<ul>') | |
in_list = True | |
formatted_lines.append(f'<li>{stripped[2:]}</li>') | |
elif stripped.startswith(('1. ', '2. ', '3. ', '4. ', '5. ', '6. ', '7. ', '8. ', '9. ')): | |
if not in_list: | |
formatted_lines.append('<ol>') | |
in_list = True | |
formatted_lines.append(f'<li>{stripped[3:]}</li>') | |
else: | |
if in_list: | |
formatted_lines.append('</ul>' if formatted_lines[-2].startswith('<li>') else '</ol>') | |
in_list = False | |
if stripped: | |
formatted_lines.append(f'<p>{line}</p>') | |
else: | |
formatted_lines.append('<br>') | |
if in_list: | |
formatted_lines.append('</ul>') | |
return '\n'.join(formatted_lines) | |
def deep_thinker_analyze(self, problem: str, context: str = "") -> Dict: | |
"""DeepSeek R1 - The Deep Thinker""" | |
if not self.is_api_key_set: | |
return {"error": "API key not set"} | |
prompt = f"""You are the Deep Thinker in a collaborative reasoning system. Your role is to provide thorough, methodical analysis with extensive step-by-step reasoning. | |
Problem: {problem} | |
{f"Additional Context: {context}" if context else ""} | |
Please provide a comprehensive analysis with deep reasoning. Think through all implications, consider multiple angles, and provide detailed step-by-step logic. Be thorough and methodical in your approach.""" | |
try: | |
completion = self.client.chat.completions.create( | |
model="deepseek-r1-distill-llama-70b", | |
messages=[{"role": "user", "content": prompt}], | |
temperature=0.6, | |
max_completion_tokens=2048, | |
top_p=0.95, | |
reasoning_format="raw" | |
) | |
response_content = completion.choices[0].message.content or "No response generated" | |
return { | |
"model": "DeepSeek R1 (Deep Thinker)", | |
"role": "π The Philosopher & Deep Analyzer", | |
"reasoning": response_content, | |
"timestamp": datetime.now().strftime("%H:%M:%S"), | |
"tokens_used": getattr(completion.usage, 'total_tokens', 'N/A') if hasattr(completion, 'usage') and completion.usage else "N/A" | |
} | |
except Exception as e: | |
return {"error": f"Deep Thinker error: {str(e)}"} | |
def quick_strategist_analyze(self, problem: str, context: str = "") -> Dict: | |
"""Qwen3 32B - The Quick Strategist""" | |
if not self.is_api_key_set: | |
return {"error": "API key not set"} | |
prompt = f"""You are the Quick Strategist in a collaborative reasoning system. Your role is to provide fast, efficient strategic analysis with clear action plans. | |
Problem: {problem} | |
{f"Additional Context: {context}" if context else ""} | |
Please provide a strategic analysis with: | |
1. Key insights and patterns | |
2. Practical solutions | |
3. Implementation priorities | |
4. Risk assessment | |
5. Clear next steps | |
Be decisive and solution-focused. Provide concrete, actionable recommendations.""" | |
try: | |
completion = self.client.chat.completions.create( | |
model="qwen/qwen3-32b", | |
messages=[{"role": "user", "content": prompt}], | |
temperature=0.6, | |
top_p=0.95, | |
max_completion_tokens=1536, | |
reasoning_effort="default" | |
) | |
response_content = completion.choices[0].message.content or "No response generated" | |
return { | |
"model": "Qwen3 32B (Quick Strategist)", | |
"role": "π The Strategic Decision Maker", | |
"reasoning": response_content, | |
"timestamp": datetime.now().strftime("%H:%M:%S"), | |
"tokens_used": getattr(completion.usage, 'total_tokens', 'N/A') if hasattr(completion, 'usage') and completion.usage else "N/A" | |
} | |
except Exception as e: | |
return {"error": f"Quick Strategist error: {str(e)}"} | |
def detail_detective_analyze(self, problem: str, context: str = "") -> Dict: | |
"""QwQ 32B - The Detail Detective""" | |
if not self.is_api_key_set: | |
return {"error": "API key not set"} | |
prompt = f"""You are the Detail Detective in a collaborative reasoning system. Your role is to provide meticulous investigation and comprehensive fact-checking. | |
Problem: {problem} | |
{f"Additional Context: {context}" if context else ""} | |
Please conduct a thorough investigation including: | |
1. Detailed analysis of all aspects | |
2. Potential edge cases and considerations | |
3. Verification of assumptions | |
4. Historical context or precedents | |
5. Comprehensive pros and cons | |
6. Hidden connections or implications | |
Be extremely thorough and leave no stone unturned. Provide detailed evidence and reasoning for your conclusions.""" | |
try: | |
completion = self.client.chat.completions.create( | |
model="qwen-qwq-32b", | |
messages=[{"role": "user", "content": prompt}], | |
temperature=0.6, | |
top_p=0.95, | |
max_completion_tokens=2048, | |
reasoning_format="parsed" | |
) | |
response_content = completion.choices[0].message.content or "No response generated" | |
return { | |
"model": "QwQ 32B (Detail Detective)", | |
"role": "π The Meticulous Investigator", | |
"reasoning": response_content, | |
"timestamp": datetime.now().strftime("%H:%M:%S"), | |
"tokens_used": getattr(completion.usage, 'total_tokens', 'N/A') if hasattr(completion, 'usage') and completion.usage else "N/A" | |
} | |
except Exception as e: | |
return {"error": f"Detail Detective error: {str(e)}"} | |
def synthesize_orchestra(self, deep_result: Dict, strategic_result: Dict, detective_result: Dict, original_problem: str) -> str: | |
"""Synthesize all three perspectives into a final orchestrated solution""" | |
if not self.is_api_key_set: | |
return "API key not set" | |
# Extract reasoning content safely | |
deep_reasoning = deep_result.get('reasoning', 'Analysis not available') if not deep_result.get('error') else f"Error: {deep_result['error']}" | |
strategic_reasoning = strategic_result.get('reasoning', 'Analysis not available') if not strategic_result.get('error') else f"Error: {strategic_result['error']}" | |
detective_reasoning = detective_result.get('reasoning', 'Analysis not available') if not detective_result.get('error') else f"Error: {detective_result['error']}" | |
synthesis_prompt = f"""You are the Orchestra Conductor. You have received three different analytical perspectives on the same problem. Your job is to synthesize these into a comprehensive, unified solution. | |
ORIGINAL PROBLEM: {original_problem} | |
DEEP THINKER ANALYSIS: | |
{deep_reasoning} | |
STRATEGIC ANALYSIS: | |
{strategic_reasoning} | |
DETECTIVE INVESTIGATION: | |
{detective_reasoning} | |
Please create a unified synthesis that: | |
1. Combines the best insights from all three perspectives | |
2. Resolves any contradictions between the analyses | |
3. Provides a comprehensive final recommendation | |
4. Highlights where the different reasoning styles complement each other | |
5. Gives a clear, actionable conclusion | |
Format your response as a well-structured final solution that leverages all three reasoning approaches.""" | |
try: | |
completion = self.client.chat.completions.create( | |
model="qwen/qwen3-32b", | |
messages=[{"role": "user", "content": synthesis_prompt}], | |
temperature=0.7, | |
max_completion_tokens=2048, | |
top_p=0.9 | |
) | |
return completion.choices[0].message.content or "No synthesis generated" | |
except Exception as e: | |
return f"Synthesis error: {str(e)}" | |
# Initialize the orchestra | |
orchestra = ReasoningOrchestra() | |
def validate_api_key(api_key: str) -> str: | |
"""Validate the API key and return status""" | |
return orchestra.set_api_key(api_key) | |
def run_single_model(problem: str, model_choice: str, context: str = "") -> str: | |
"""Run a single model analysis""" | |
if not orchestra.is_api_key_set: | |
return """<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;"> | |
<h3>β API Key Required</h3> | |
<p>Please set your Groq API key first in the API Configuration section above.</p> | |
</div>""" | |
if not problem.strip(): | |
return """<div style="color: orange; padding: 20px; border: 2px solid orange; border-radius: 10px; background-color: #fff3e6;"> | |
<h3>β οΈ Problem Required</h3> | |
<p>Please enter a problem to analyze.</p> | |
</div>""" | |
start_time = time.time() | |
# Show loading state | |
loading_html = f"""<div style="padding: 20px; border: 2px solid #007bff; border-radius: 10px; background-color: #e6f3ff;"> | |
<h3>π Processing...</h3> | |
<p>The {model_choice} is analyzing your problem. Please wait...</p> | |
</div>""" | |
if model_choice == "Deep Thinker (DeepSeek R1)": | |
result = orchestra.deep_thinker_analyze(problem, context) | |
elif model_choice == "Quick Strategist (Qwen3 32B)": | |
result = orchestra.quick_strategist_analyze(problem, context) | |
elif model_choice == "Detail Detective (QwQ 32B)": | |
result = orchestra.detail_detective_analyze(problem, context) | |
else: | |
return """<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;"> | |
<h3>β Invalid Model Selection</h3> | |
<p>Please select a valid model from the dropdown.</p> | |
</div>""" | |
elapsed_time = time.time() - start_time | |
if "error" in result: | |
return f"""<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;"> | |
<h3>β Error</h3> | |
<p>{result['error']}</p> | |
</div>""" | |
# Format the response as HTML | |
reasoning_html = orchestra.format_text_to_html(result['reasoning']) | |
formatted_output = f""" | |
<div style="border: 2px solid #28a745; border-radius: 15px; padding: 25px; margin: 15px 0; background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%);"> | |
<div style="display: flex; align-items: center; margin-bottom: 20px; padding-bottom: 15px; border-bottom: 2px solid #28a745;"> | |
<h2 style="margin: 0; color: #28a745;">{result['role']}</h2> | |
</div> | |
<div style="background-color: white; padding: 15px; border-radius: 10px; margin-bottom: 20px;"> | |
<div style="display: flex; gap: 20px; font-size: 14px; color: #666;"> | |
<span><strong>Model:</strong> {result['model']}</span> | |
<span><strong>Analysis Time:</strong> {elapsed_time:.2f} seconds</span> | |
<span><strong>Timestamp:</strong> {result['timestamp']}</span> | |
<span><strong>Tokens:</strong> {result['tokens_used']}</span> | |
</div> | |
</div> | |
<div style="background-color: white; padding: 20px; border-radius: 10px; line-height: 1.6;"> | |
{reasoning_html} | |
</div> | |
</div> | |
""" | |
return formatted_output | |
def run_full_orchestra(problem: str, context: str = "") -> Tuple[str, str, str, str]: | |
"""Run the full collaborative reasoning orchestra""" | |
if not orchestra.is_api_key_set: | |
error_msg = """<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;"> | |
<h3>β API Key Required</h3> | |
<p>Please set your Groq API key first in the API Configuration section above.</p> | |
</div>""" | |
return error_msg, error_msg, error_msg, error_msg | |
if not problem.strip(): | |
error_msg = """<div style="color: orange; padding: 20px; border: 2px solid orange; border-radius: 10px; background-color: #fff3e6;"> | |
<h3>β οΈ Problem Required</h3> | |
<p>Please enter a problem to analyze.</p> | |
</div>""" | |
return error_msg, error_msg, error_msg, error_msg | |
# Phase 1: Deep Thinker | |
deep_result = orchestra.deep_thinker_analyze(problem, context) | |
# Phase 2: Quick Strategist | |
strategic_result = orchestra.quick_strategist_analyze(problem, context) | |
# Phase 3: Detail Detective | |
detective_result = orchestra.detail_detective_analyze(problem, context) | |
# Phase 4: Synthesis | |
synthesis = orchestra.synthesize_orchestra(deep_result, strategic_result, detective_result, problem) | |
def format_result_html(result: Dict, color: str, icon: str) -> str: | |
if "error" in result: | |
return f"""<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;"> | |
<h3>β Error</h3> | |
<p>{result['error']}</p> | |
</div>""" | |
reasoning_html = orchestra.format_text_to_html(result['reasoning']) | |
return f""" | |
<div style="border: 2px solid {color}; border-radius: 15px; padding: 25px; margin: 15px 0; background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%);"> | |
<div style="display: flex; align-items: center; margin-bottom: 20px; padding-bottom: 15px; border-bottom: 2px solid {color};"> | |
<span style="font-size: 24px; margin-right: 10px;">{icon}</span> | |
<h2 style="margin: 0; color: {color};">{result['model']}</h2> | |
</div> | |
<div style="background-color: white; padding: 15px; border-radius: 10px; margin-bottom: 20px;"> | |
<div style="display: flex; gap: 20px; font-size: 14px; color: #666;"> | |
<span><strong>Timestamp:</strong> {result['timestamp']}</span> | |
<span><strong>Tokens:</strong> {result['tokens_used']}</span> | |
</div> | |
</div> | |
<div style="background-color: white; padding: 20px; border-radius: 10px; line-height: 1.6;"> | |
{reasoning_html} | |
</div> | |
</div> | |
""" | |
deep_output = format_result_html(deep_result, "#6f42c1", "π") | |
strategic_output = format_result_html(strategic_result, "#fd7e14", "π") | |
detective_output = format_result_html(detective_result, "#20c997", "π") | |
synthesis_html = orchestra.format_text_to_html(synthesis) | |
synthesis_output = f""" | |
<div style="border: 2px solid #dc3545; border-radius: 15px; padding: 25px; margin: 15px 0; background: linear-gradient(135deg, #fff5f5 0%, #fee);"> | |
<div style="display: flex; align-items: center; margin-bottom: 20px; padding-bottom: 15px; border-bottom: 2px solid #dc3545;"> | |
<span style="font-size: 24px; margin-right: 10px;">πΌ</span> | |
<h2 style="margin: 0; color: #dc3545;">Orchestra Conductor - Final Synthesis</h2> | |
</div> | |
<div style="background-color: white; padding: 20px; border-radius: 10px; line-height: 1.6;"> | |
{synthesis_html} | |
</div> | |
</div> | |
""" | |
return deep_output, strategic_output, detective_output, synthesis_output | |
# Custom CSS for better styling | |
custom_css = """ | |
.gradio-container { | |
max-width: 1400px !important; | |
margin: 0 auto !important; | |
} | |
.api-key-section { | |
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
padding: 20px; | |
border-radius: 10px; | |
margin-bottom: 20px; | |
} | |
.model-section { | |
border: 2px solid #e1e5e9; | |
border-radius: 10px; | |
padding: 15px; | |
margin: 10px 0; | |
} | |
.orchestra-header { | |
text-align: center; | |
background: linear-gradient(45deg, #f093fb 0%, #f5576c 100%); | |
padding: 20px; | |
border-radius: 15px; | |
margin-bottom: 20px; | |
} | |
.status-box { | |
background-color: #f8f9fa; | |
border-left: 4px solid #28a745; | |
padding: 15px; | |
margin: 10px 0; | |
border-radius: 5px; | |
} | |
/* Custom styling for HTML outputs */ | |
.html-content { | |
max-height: 600px; | |
overflow-y: auto; | |
border: 1px solid #ddd; | |
border-radius: 8px; | |
padding: 10px; | |
background-color: #fafafa; | |
} | |
""" | |
# Build the Gradio interface | |
with gr.Blocks(css=custom_css, title="Reasoning Orchestra") as app: | |
# Header | |
gr.HTML(""" | |
<div class="orchestra-header"> | |
<h1>πΌ The Collaborative Reasoning Orchestra</h1> | |
<p><em>Where AI models collaborate like musicians in an orchestra to solve complex problems</em></p> | |
<p><strong>Now with Beautiful HTML-Formatted Responses!</strong></p> | |
</div> | |
""") | |
# API Key Section | |
with gr.Group(): | |
gr.HTML('<div class="api-key-section"><h3 style="color: white; margin-top: 0;">π API Configuration</h3></div>') | |
with gr.Row(): | |
api_key_input = gr.Textbox( | |
label="Enter your Groq API Key", | |
type="password", | |
placeholder="gsk_...", | |
info="Get your free API key from https://console.groq.com/keys" | |
) | |
api_status = gr.Textbox( | |
label="API Status", | |
interactive=False, | |
placeholder="Enter API key to validate..." | |
) | |
validate_btn = gr.Button("π Validate API Key", variant="primary") | |
validate_btn.click( | |
fn=validate_api_key, | |
inputs=[api_key_input], | |
outputs=[api_status] | |
) | |
# Main Interface Tabs | |
with gr.Tabs() as tabs: | |
# Single Model Tab | |
with gr.TabItem("π― Single Model Analysis"): | |
gr.Markdown("### Test individual reasoning models with beautiful HTML output") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
single_problem = gr.Textbox( | |
label="Problem Statement", | |
placeholder="Enter the problem you want to analyze...", | |
lines=4 | |
) | |
single_context = gr.Textbox( | |
label="Additional Context (Optional)", | |
placeholder="Any additional context or constraints...", | |
lines=2 | |
) | |
model_choice = gr.Dropdown( | |
label="Choose Model", | |
choices=[ | |
"Deep Thinker (DeepSeek R1)", | |
"Quick Strategist (Qwen3 32B)", | |
"Detail Detective (QwQ 32B)" | |
], | |
value="Deep Thinker (DeepSeek R1)" | |
) | |
single_analyze_btn = gr.Button("π Analyze with HTML Output", variant="primary", size="lg") | |
with gr.Column(scale=2): | |
single_output = gr.HTML(label="Analysis Result", elem_classes=["html-content"]) | |
single_analyze_btn.click( | |
fn=run_single_model, | |
inputs=[single_problem, model_choice, single_context], | |
outputs=[single_output] | |
) | |
# Full Orchestra Tab | |
with gr.TabItem("πΌ Full Orchestra Collaboration"): | |
gr.Markdown("### Run all three models collaboratively with stunning HTML-formatted output") | |
with gr.Column(): | |
with gr.Row(): | |
with gr.Column(scale=1): | |
orchestra_problem = gr.Textbox( | |
label="Problem Statement", | |
placeholder="Enter a complex problem that benefits from multiple reasoning perspectives...", | |
lines=6 | |
) | |
orchestra_context = gr.Textbox( | |
label="Additional Context (Optional)", | |
placeholder="Background information, constraints, or specific requirements...", | |
lines=3 | |
) | |
orchestra_analyze_btn = gr.Button("πΌ Start Orchestra Analysis", variant="primary", size="lg") | |
# Orchestra Results | |
with gr.Column(): | |
deep_output = gr.HTML(label="π Deep Thinker Analysis", elem_classes=["html-content"]) | |
strategic_output = gr.HTML(label="π Quick Strategist Analysis", elem_classes=["html-content"]) | |
detective_output = gr.HTML(label="π Detail Detective Analysis", elem_classes=["html-content"]) | |
synthesis_output = gr.HTML(label="πΌ Final Orchestrated Solution", elem_classes=["html-content"]) | |
orchestra_analyze_btn.click( | |
fn=run_full_orchestra, | |
inputs=[orchestra_problem, orchestra_context], | |
outputs=[deep_output, strategic_output, detective_output, synthesis_output] | |
) | |
# Examples Tab | |
with gr.TabItem("π‘ Example Problems"): | |
gr.Markdown(""" | |
### Try these example problems to see the Orchestra in action: | |
**π’ Business Strategy:** | |
"Our tech startup has limited funding and needs to decide between focusing on product development or marketing. We have a working MVP but low user adoption. Budget is $50K for the next 6 months." | |
**π€ Ethical AI:** | |
"Should autonomous vehicles prioritize passenger safety over pedestrian safety in unavoidable accident scenarios? Consider the ethical, legal, and practical implications for mass adoption." | |
**π Environmental Policy:** | |
"Design a policy framework to reduce carbon emissions in urban areas by 40% within 10 years while maintaining economic growth and social equity." | |
**𧬠Scientific Research:** | |
"We've discovered a potential breakthrough in gene therapy for treating Alzheimer's, but it requires human trials. How should we proceed given the risks, benefits, regulatory requirements, and ethical considerations?" | |
**π Educational Innovation:** | |
"How can we redesign traditional university education to better prepare students for the rapidly changing job market of the 2030s, considering AI, remote work, and emerging technologies?" | |
**π Urban Planning:** | |
"A city of 500K people wants to build 10,000 affordable housing units but faces opposition from current residents, environmental concerns, and a $2B budget constraint. Develop a comprehensive solution." | |
**π Transportation Future:** | |
"Design a comprehensive transportation system for a smart city of 1 million people in 2035, integrating autonomous vehicles, public transit, and sustainable mobility." | |
""") | |
# Quick copy buttons for examples | |
with gr.Row(): | |
gr.Button("π Copy Business Example", variant="secondary").click( | |
lambda: "Our tech startup has limited funding and needs to decide between focusing on product development or marketing. We have a working MVP but low user adoption. Budget is $50K for the next 6 months.", | |
outputs=[] | |
) | |
# Footer | |
gr.HTML(""" | |
<div style="text-align: center; margin-top: 30px; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 15px; color: white;"> | |
<h3>πΌ How the Orchestra Works</h3> | |
<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); gap: 20px; margin: 20px 0;"> | |
<div style="background: rgba(255,255,255,0.1); padding: 15px; border-radius: 10px;"> | |
<h4>π Deep Thinker (DeepSeek R1)</h4> | |
<p>Provides thorough philosophical and theoretical analysis with comprehensive reasoning chains</p> | |
</div> | |
<div style="background: rgba(255,255,255,0.1); padding: 15px; border-radius: 10px;"> | |
<h4>π Quick Strategist (Qwen3 32B)</h4> | |
<p>Delivers practical strategies, action plans, and rapid decision-making frameworks</p> | |
</div> | |
<div style="background: rgba(255,255,255,0.1); padding: 15px; border-radius: 10px;"> | |
<h4>π Detail Detective (QwQ 32B)</h4> | |
<p>Conducts comprehensive investigation, fact-checking, and finds hidden connections</p> | |
</div> | |
<div style="background: rgba(255,255,255,0.1); padding: 15px; border-radius: 10px;"> | |
<h4>πΌ Orchestra Conductor</h4> | |
<p>Synthesizes all perspectives into unified, comprehensive solutions</p> | |
</div> | |
</div> | |
<p style="margin-top: 20px;"><em>Built with β€οΈ using Groq's lightning-fast inference, Gradio, and beautiful HTML formatting</em></p> | |
</div> | |
""") | |
# Launch the app | |
if __name__ == "__main__": | |
app.launch( | |
share=True | |
) |