|
""" |
|
Gradio web interface for sentiment analysis. |
|
|
|
This module provides a modern, responsive web interface using Gradio |
|
for human interaction with the sentiment analysis system, including |
|
real-time analysis, confidence visualization, and history tracking. |
|
""" |
|
|
|
import asyncio |
|
import logging |
|
import json |
|
import os |
|
from typing import Dict, Any, List, Tuple, Optional |
|
from datetime import datetime |
|
import pandas as pd |
|
import plotly.graph_objects as go |
|
import plotly.express as px |
|
|
|
try: |
|
import gradio as gr |
|
GRADIO_AVAILABLE = True |
|
except ImportError: |
|
GRADIO_AVAILABLE = False |
|
logging.error("Gradio not available. Install with: pip install gradio") |
|
|
|
from .sentiment_analyzer import get_analyzer, SentimentResult, SentimentLabel |
|
from .tools import list_tools |
|
|
|
class SentimentHistory: |
|
"""Manages sentiment analysis history.""" |
|
|
|
def __init__(self, max_entries: int = 100): |
|
self.max_entries = max_entries |
|
self.entries: List[Dict[str, Any]] = [] |
|
self.logger = logging.getLogger(__name__) |
|
|
|
def add_entry(self, text: str, result: SentimentResult, backend: str) -> None: |
|
entry = { |
|
"timestamp": datetime.now().isoformat(), |
|
"text": text[:100] + "..." if len(text) > 100 else text, |
|
"full_text": text, |
|
"label": result.label.value, |
|
"confidence": result.confidence, |
|
"backend": backend, |
|
"raw_scores": result.raw_scores |
|
} |
|
|
|
self.entries.append(entry) |
|
|
|
if len(self.entries) > self.max_entries: |
|
self.entries = self.entries[-self.max_entries:] |
|
|
|
def get_recent_entries(self, count: int = 10) -> List[Dict[str, Any]]: |
|
return self.entries[-count:] if self.entries else [] |
|
|
|
def get_statistics(self) -> Dict[str, Any]: |
|
if not self.entries: |
|
return { |
|
"total_analyses": 0, |
|
"label_distribution": {}, |
|
"average_confidence": 0.0, |
|
"backend_usage": {} |
|
} |
|
|
|
labels = [entry["label"] for entry in self.entries] |
|
confidences = [entry["confidence"] for entry in self.entries] |
|
backends = [entry["backend"] for entry in self.entries] |
|
|
|
label_counts = { |
|
"positive": labels.count("positive"), |
|
"negative": labels.count("negative"), |
|
"neutral": labels.count("neutral") |
|
} |
|
|
|
backend_counts = {} |
|
for backend in backends: |
|
backend_counts[backend] = backend_counts.get(backend, 0) + 1 |
|
|
|
return { |
|
"total_analyses": len(self.entries), |
|
"label_distribution": label_counts, |
|
"average_confidence": sum(confidences) / len(confidences), |
|
"backend_usage": backend_counts |
|
} |
|
|
|
|
|
class GradioInterface: |
|
"""Gradio web interface for sentiment analysis.""" |
|
|
|
def __init__(self, title: str = "Sentiment Analysis Server", |
|
description: str = "Analyze text sentiment using TextBlob or Transformers"): |
|
self.title = title |
|
self.description = description |
|
self.logger = logging.getLogger(__name__) |
|
self.history = SentimentHistory() |
|
self.interface = None |
|
self._setup_interface() |
|
|
|
def _setup_interface(self) -> None: |
|
if not GRADIO_AVAILABLE: |
|
raise RuntimeError("Gradio not available") |
|
|
|
with gr.Blocks( |
|
theme=gr.themes.Soft(), |
|
title=self.title |
|
) as interface: |
|
|
|
gr.Markdown(f"# {self.title}") |
|
gr.Markdown(f"*{self.description}*") |
|
|
|
with gr.Tabs(): |
|
with gr.TabItem("Sentiment Analysis"): |
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
text_input = gr.Textbox( |
|
label="Text to Analyze", |
|
placeholder="Enter text here to analyze its sentiment...", |
|
lines=4 |
|
) |
|
|
|
with gr.Row(): |
|
backend_choice = gr.Dropdown( |
|
choices=["auto", "textblob", "transformers"], |
|
value="auto", |
|
label="Analysis Backend" |
|
) |
|
|
|
analyze_btn = gr.Button( |
|
"Analyze Sentiment", |
|
variant="primary" |
|
) |
|
|
|
with gr.Column(scale=1): |
|
result_display = gr.HTML( |
|
value="<p>Enter text and click 'Analyze Sentiment' to see results.</p>" |
|
) |
|
|
|
confidence_plot = gr.Plot(visible=False) |
|
|
|
gr.Markdown("### Quick Examples") |
|
with gr.Row(): |
|
pos_btn = gr.Button("😊 Positive", size="sm") |
|
neu_btn = gr.Button("😐 Neutral", size="sm") |
|
neg_btn = gr.Button("😞 Negative", size="sm") |
|
mix_btn = gr.Button("📝 Mixed", size="sm") |
|
|
|
with gr.TabItem("Batch Analysis"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
batch_input = gr.Textbox( |
|
label="Texts to Analyze (one per line)", |
|
placeholder="Enter multiple texts, one per line...", |
|
lines=8 |
|
) |
|
|
|
with gr.Row(): |
|
batch_backend = gr.Dropdown( |
|
choices=["auto", "textblob", "transformers"], |
|
value="auto", |
|
label="Analysis Backend" |
|
) |
|
|
|
batch_analyze_btn = gr.Button( |
|
"Analyze Batch", |
|
variant="primary" |
|
) |
|
|
|
with gr.Column(): |
|
batch_results = gr.DataFrame( |
|
label="Batch Results", |
|
headers=["Text", "Sentiment", "Confidence"] |
|
) |
|
|
|
batch_summary_plot = gr.Plot(visible=False) |
|
|
|
with gr.TabItem("Analysis History"): |
|
with gr.Row(): |
|
refresh_history_btn = gr.Button("Refresh History", variant="secondary") |
|
clear_history_btn = gr.Button("Clear History", variant="stop") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
history_table = gr.DataFrame( |
|
label="Recent Analyses", |
|
headers=["Time", "Text", "Sentiment", "Confidence", "Backend"] |
|
) |
|
|
|
with gr.Column(scale=1): |
|
stats_display = gr.HTML(value="<p>No analyses yet.</p>") |
|
history_plot = gr.Plot(visible=False) |
|
|
|
with gr.TabItem("Settings & Info"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown("### Backend Information") |
|
backend_info = gr.HTML(value="<p>Loading backend information...</p>") |
|
refresh_info_btn = gr.Button("Refresh Info", variant="secondary") |
|
|
|
with gr.Column(): |
|
gr.Markdown("### Usage Tips") |
|
gr.Markdown(""" |
|
- **Auto**: Automatically selects the best available backend |
|
- **TextBlob**: Fast, simple sentiment analysis |
|
- **Transformers**: More accurate, AI-powered analysis |
|
- **Batch Analysis**: Process multiple texts at once |
|
- **History**: Track your analysis results over time |
|
""") |
|
|
|
|
|
def analyze_sentiment(text: str, backend: str) -> Tuple[str, gr.Plot]: |
|
return asyncio.run(self._analyze_sentiment_async(text, backend)) |
|
|
|
def analyze_batch(texts: str, backend: str) -> Tuple[pd.DataFrame, gr.Plot]: |
|
return asyncio.run(self._analyze_batch_async(texts, backend)) |
|
|
|
def refresh_history() -> Tuple[pd.DataFrame, str, gr.Plot]: |
|
return self._get_history_data() |
|
|
|
def clear_history() -> Tuple[pd.DataFrame, str, gr.Plot]: |
|
self.history.entries.clear() |
|
return self._get_history_data() |
|
|
|
def get_backend_info() -> str: |
|
return asyncio.run(self._get_backend_info_async()) |
|
|
|
def get_mcp_schema() -> str: |
|
"""Get MCP tools schema as JSON.""" |
|
return asyncio.run(self._get_mcp_schema_async()) |
|
|
|
|
|
examples = [ |
|
"I absolutely love this new feature! It's incredible and makes everything so much easier.", |
|
"The weather is okay today, nothing particularly special about it.", |
|
"This is terrible and frustrating. I hate how complicated this has become.", |
|
"The movie had great visuals but the plot was disappointing. Mixed feelings overall." |
|
] |
|
|
|
|
|
analyze_btn.click( |
|
analyze_sentiment, |
|
inputs=[text_input, backend_choice], |
|
outputs=[result_display, confidence_plot] |
|
) |
|
|
|
batch_analyze_btn.click( |
|
analyze_batch, |
|
inputs=[batch_input, batch_backend], |
|
outputs=[batch_results, batch_summary_plot] |
|
) |
|
|
|
refresh_history_btn.click( |
|
refresh_history, |
|
outputs=[history_table, stats_display, history_plot] |
|
) |
|
|
|
clear_history_btn.click( |
|
clear_history, |
|
outputs=[history_table, stats_display, history_plot] |
|
) |
|
|
|
refresh_info_btn.click( |
|
get_backend_info, |
|
outputs=[backend_info] |
|
) |
|
|
|
|
|
pos_btn.click(lambda: examples[0], outputs=[text_input]) |
|
neu_btn.click(lambda: examples[1], outputs=[text_input]) |
|
neg_btn.click(lambda: examples[2], outputs=[text_input]) |
|
mix_btn.click(lambda: examples[3], outputs=[text_input]) |
|
|
|
|
|
interface.load(get_backend_info, outputs=[backend_info]) |
|
interface.load(refresh_history, outputs=[history_table, stats_display, history_plot]) |
|
|
|
self.interface = interface |
|
|
|
async def _analyze_sentiment_async(self, text: str, backend: str) -> Tuple[str, gr.Plot]: |
|
try: |
|
if not text.strip(): |
|
return "<p>Please enter some text to analyze.</p>", gr.Plot(visible=False) |
|
|
|
analyzer = await get_analyzer(backend) |
|
result = await analyzer.analyze(text) |
|
|
|
self.history.add_entry(text, result, analyzer.backend) |
|
|
|
sentiment_class = f"sentiment-{result.label.value}" |
|
confidence_class = ( |
|
"confidence-high" if result.confidence > 0.7 |
|
else "confidence-medium" if result.confidence > 0.4 |
|
else "confidence-low" |
|
) |
|
|
|
html_result = f""" |
|
<div style="padding: 1rem; border-radius: 0.5rem; background: #f8fafc; border-left: 4px solid #3b82f6;"> |
|
<h3>Analysis Result</h3> |
|
<p><strong>Sentiment:</strong> <span style="color: {'#22c55e' if result.label.value == 'positive' else '#ef4444' if result.label.value == 'negative' else '#6b7280'}; font-weight: bold;">{result.label.value.title()}</span></p> |
|
<p><strong>Confidence:</strong> <span style="color: {'#059669' if result.confidence > 0.7 else '#d97706' if result.confidence > 0.4 else '#dc2626'};">{result.confidence:.2%}</span></p> |
|
<p><strong>Backend:</strong> {analyzer.backend}</p> |
|
<p><strong>Text Length:</strong> {len(text)} characters</p> |
|
</div> |
|
""" |
|
|
|
plot = self._create_confidence_plot(result) |
|
return html_result, plot |
|
|
|
except Exception as e: |
|
self.logger.error(f"Analysis failed: {e}") |
|
error_html = f""" |
|
<div style="padding: 1rem; border-radius: 0.5rem; background: #fef2f2; border-left: 4px solid #ef4444;"> |
|
<h3>Analysis Error</h3> |
|
<p><strong>Error:</strong> {str(e)}</p> |
|
<p>Please try again or check your input.</p> |
|
</div> |
|
""" |
|
return error_html, gr.Plot(visible=False) |
|
|
|
async def _analyze_batch_async(self, texts: str, backend: str) -> Tuple[pd.DataFrame, gr.Plot]: |
|
try: |
|
if not texts.strip(): |
|
return pd.DataFrame(), gr.Plot(visible=False) |
|
|
|
text_list = [t.strip() for t in texts.split('\n') if t.strip()] |
|
|
|
if not text_list: |
|
return pd.DataFrame(), gr.Plot(visible=False) |
|
|
|
analyzer = await get_analyzer(backend) |
|
results = await analyzer.analyze_batch(text_list) |
|
|
|
data = [] |
|
for text, result in zip(text_list, results): |
|
self.history.add_entry(text, result, analyzer.backend) |
|
|
|
data.append({ |
|
"Text": text[:50] + "..." if len(text) > 50 else text, |
|
"Sentiment": result.label.value.title(), |
|
"Confidence": f"{result.confidence:.2%}" |
|
}) |
|
|
|
df = pd.DataFrame(data) |
|
plot = self._create_batch_summary_plot(results) |
|
|
|
return df, plot |
|
|
|
except Exception as e: |
|
self.logger.error(f"Batch analysis failed: {e}") |
|
return pd.DataFrame([{"Error": str(e)}]), gr.Plot(visible=False) |
|
|
|
def _create_confidence_plot(self, result: SentimentResult) -> gr.Plot: |
|
try: |
|
fig = go.Figure(go.Indicator( |
|
mode="gauge+number", |
|
value=result.confidence * 100, |
|
domain={'x': [0, 1], 'y': [0, 1]}, |
|
title={'text': f"Confidence - {result.label.value.title()}"}, |
|
gauge={ |
|
'axis': {'range': [None, 100]}, |
|
'bar': {'color': "darkblue"}, |
|
'steps': [ |
|
{'range': [0, 40], 'color': "lightgray"}, |
|
{'range': [40, 70], 'color': "yellow"}, |
|
{'range': [70, 100], 'color': "green"} |
|
] |
|
} |
|
)) |
|
|
|
fig.update_layout(height=300, margin=dict(l=20, r=20, t=40, b=20)) |
|
return gr.Plot(value=fig, visible=True) |
|
|
|
except Exception as e: |
|
self.logger.error(f"Failed to create confidence plot: {e}") |
|
return gr.Plot(visible=False) |
|
|
|
def _create_batch_summary_plot(self, results: List[SentimentResult]) -> gr.Plot: |
|
try: |
|
labels = [result.label.value for result in results] |
|
label_counts = { |
|
"Positive": labels.count("positive"), |
|
"Negative": labels.count("negative"), |
|
"Neutral": labels.count("neutral") |
|
} |
|
|
|
fig = px.pie( |
|
values=list(label_counts.values()), |
|
names=list(label_counts.keys()), |
|
title="Sentiment Distribution", |
|
color_discrete_map={ |
|
"Positive": "#22c55e", |
|
"Negative": "#ef4444", |
|
"Neutral": "#6b7280" |
|
} |
|
) |
|
|
|
fig.update_layout(height=300, margin=dict(l=20, r=20, t=40, b=20)) |
|
return gr.Plot(value=fig, visible=True) |
|
|
|
except Exception as e: |
|
self.logger.error(f"Failed to create batch summary plot: {e}") |
|
return gr.Plot(visible=False) |
|
|
|
def _get_history_data(self) -> Tuple[pd.DataFrame, str, gr.Plot]: |
|
try: |
|
entries = self.history.get_recent_entries(20) |
|
|
|
if not entries: |
|
empty_df = pd.DataFrame(columns=["Time", "Text", "Sentiment", "Confidence", "Backend"]) |
|
return empty_df, "<p>No analyses yet.</p>", gr.Plot(visible=False) |
|
|
|
data = [] |
|
for entry in reversed(entries): |
|
data.append({ |
|
"Time": entry["timestamp"][:19].replace("T", " "), |
|
"Text": entry["text"], |
|
"Sentiment": entry["label"].title(), |
|
"Confidence": f"{entry['confidence']:.2%}", |
|
"Backend": entry["backend"] |
|
}) |
|
|
|
df = pd.DataFrame(data) |
|
stats = self.history.get_statistics() |
|
|
|
stats_html = f""" |
|
<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 1rem; border-radius: 0.5rem;"> |
|
<h3>📊 Analysis Statistics</h3> |
|
<p><strong>Total Analyses:</strong> {stats['total_analyses']}</p> |
|
<p><strong>Average Confidence:</strong> {stats['average_confidence']:.2%}</p> |
|
<h4>Sentiment Distribution:</h4> |
|
<ul> |
|
<li>Positive: {stats['label_distribution'].get('positive', 0)}</li> |
|
<li>Negative: {stats['label_distribution'].get('negative', 0)}</li> |
|
<li>Neutral: {stats['label_distribution'].get('neutral', 0)}</li> |
|
</ul> |
|
</div> |
|
""" |
|
|
|
plot = self._create_history_plot(stats) if stats['total_analyses'] > 0 else gr.Plot(visible=False) |
|
return df, stats_html, plot |
|
|
|
except Exception as e: |
|
self.logger.error(f"Failed to get history data: {e}") |
|
error_df = pd.DataFrame([{"Error": str(e)}]) |
|
return error_df, f"<p>Error loading history: {e}</p>", gr.Plot(visible=False) |
|
|
|
def _create_history_plot(self, stats: Dict[str, Any]) -> gr.Plot: |
|
try: |
|
labels = list(stats['label_distribution'].keys()) |
|
values = list(stats['label_distribution'].values()) |
|
|
|
fig = px.bar( |
|
x=[label.title() for label in labels], |
|
y=values, |
|
title="Historical Sentiment Distribution", |
|
color=labels, |
|
color_discrete_map={ |
|
"positive": "#22c55e", |
|
"negative": "#ef4444", |
|
"neutral": "#6b7280" |
|
} |
|
) |
|
|
|
fig.update_layout(height=300, margin=dict(l=20, r=20, t=40, b=20), showlegend=False) |
|
return gr.Plot(value=fig, visible=True) |
|
|
|
except Exception as e: |
|
self.logger.error(f"Failed to create history plot: {e}") |
|
return gr.Plot(visible=False) |
|
|
|
async def _get_backend_info_async(self) -> str: |
|
try: |
|
analyzer = await get_analyzer("auto") |
|
info = analyzer.get_info() |
|
|
|
html = f""" |
|
<div style="padding: 1rem; border-radius: 0.5rem; background: #f0f9ff; border-left: 4px solid #0ea5e9;"> |
|
<h3>🔧 Backend Information</h3> |
|
<p><strong>Current Backend:</strong> {info['backend']}</p> |
|
<p><strong>Model Loaded:</strong> {'Yes' if info['model_loaded'] else 'No'}</p> |
|
<p><strong>TextBlob Available:</strong> {'Yes' if info['textblob_available'] else 'No'}</p> |
|
<p><strong>Transformers Available:</strong> {'Yes' if info['transformers_available'] else 'No'}</p> |
|
<p><strong>CUDA Available:</strong> {'Yes' if info.get('cuda_available', False) else 'No'}</p> |
|
{f"<p><strong>Model Name:</strong> {info['model_name']}</p>" if info.get('model_name') else ""} |
|
</div> |
|
""" |
|
return html |
|
|
|
except Exception as e: |
|
self.logger.error(f"Failed to get backend info: {e}") |
|
return f""" |
|
<div style="padding: 1rem; border-radius: 0.5rem; background: #fef2f2; border-left: 4px solid #ef4444;"> |
|
<h3>❌ Backend Error</h3> |
|
<p>Failed to load backend information: {str(e)}</p> |
|
</div> |
|
""" |
|
|
|
async def _get_mcp_schema_async(self) -> str: |
|
"""Get MCP tools schema as formatted JSON.""" |
|
try: |
|
tools = await list_tools() |
|
schema = { |
|
"mcp_version": "2024-11-05", |
|
"server_info": { |
|
"name": "sentiment-analyzer", |
|
"version": "1.0.0", |
|
"description": "Sentiment analysis server using TextBlob and Transformers" |
|
}, |
|
"tools": tools, |
|
"total_tools": len(tools) |
|
} |
|
return json.dumps(schema, indent=2) |
|
|
|
except Exception as e: |
|
self.logger.error(f"Failed to get MCP schema: {e}") |
|
return json.dumps({ |
|
"error": str(e), |
|
"error_type": type(e).__name__ |
|
}, indent=2) |
|
|
|
def launch(self, **kwargs) -> None: |
|
if not self.interface: |
|
raise RuntimeError("Interface not initialized") |
|
|
|
|
|
mcp_server_enabled = ( |
|
kwargs.get("mcp_server", False) or |
|
os.getenv("GRADIO_MCP_SERVER", "").lower() in ("true", "1", "yes", "on") |
|
) |
|
|
|
launch_params = { |
|
"server_name": "0.0.0.0", |
|
"server_port": 7860, |
|
"share": False, |
|
"debug": False, |
|
"show_error": True, |
|
"quiet": False |
|
} |
|
|
|
|
|
if mcp_server_enabled: |
|
launch_params["mcp_server"] = True |
|
self.logger.info("MCP server functionality enabled for Gradio interface") |
|
|
|
launch_params.update(kwargs) |
|
|
|
self.logger.info(f"Launching Gradio interface on {launch_params['server_name']}:{launch_params['server_port']}") |
|
if mcp_server_enabled: |
|
self.logger.info("Gradio interface will also serve as MCP server with API endpoints") |
|
|
|
try: |
|
self.interface.launch(**launch_params) |
|
except Exception as e: |
|
self.logger.error(f"Failed to launch interface: {e}") |
|
raise |
|
|
|
|
|
def create_gradio_interface(**kwargs) -> GradioInterface: |
|
if not GRADIO_AVAILABLE: |
|
raise RuntimeError("Gradio not available. Install with: pip install gradio") |
|
|
|
return GradioInterface(**kwargs) |
|
|
|
|
|
async def main() -> None: |
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' |
|
) |
|
|
|
interface = create_gradio_interface() |
|
interface.launch(debug=True) |
|
|
|
|
|
if __name__ == "__main__": |
|
asyncio.run(main()) |