mcp-sentiment / usage /sentiment_mcp_gradio_usage.py
phil71x
feat: Introduce new sentiment analysis scripts and rename other scripts . Enhance documentation
23d394c
#!/usr/bin/env python3
"""
MCP Sentiment Analysis with Gradio UI using smolagents.
This script creates a Gradio interface that connects to an MCP sentiment analysis server
using the smolagents library, following the approach from the Hugging Face MCP Course.
Performance: Fast UI-based sentiment analysis
Protocol: Native MCP via smolagents with Gradio interface
To run this script:
pdm run python usage/sentiment_mcp_bis.py
Dependencies:
- smolagents[mcp] (install with: pdm add "smolagents[mcp]")
- gradio[mcp] (install with: pdm add "gradio[mcp]")
Based on Hugging Face MCP Course:
https://huggingface.co/learn/mcp-course/unit2/gradio-client
"""
import gradio as gr
try:
from smolagents import InferenceClientModel, CodeAgent
from smolagents.mcp_client import MCPClient
SMOLAGENTS_AVAILABLE = True
except ImportError as e:
print(f"❌ smolagents not available: {e}")
MCPClient = None
InferenceClientModel = None
CodeAgent = None
SMOLAGENTS_AVAILABLE = False
def create_sentiment_interface():
"""Create a Gradio interface for sentiment analysis using MCP."""
if not SMOLAGENTS_AVAILABLE:
# Create a simple error interface
def error_fn(message, history):
return (
"❌ smolagents not available. Install with: pdm add 'smolagents[mcp]'"
)
demo = gr.ChatInterface(
fn=error_fn,
type="messages",
title="❌ MCP Sentiment Analysis (Missing Dependencies)",
description="smolagents library is required but not installed.",
)
return demo
mcp_client = None
try:
print("⏳ Connecting to MCP sentiment analysis server...")
# Connect to the MCP server
mcp_client = MCPClient(
{"url": "https://freemansel-mcp-sentiment.hf.space/gradio_api/mcp/sse"}
)
print("βœ… MCP client connected successfully!")
# Get available tools
tools = mcp_client.get_tools()
print(f"βœ… Found {len(tools)} tools:")
for tool in tools:
print(f" β€’ {tool.name}: {tool.description}")
# Create an agent with the tools
# Skip LLM models and go directly to simple agent for reliability
print("ℹ️ Using simple direct agent (most reliable for MCP tools)")
def simple_agent_run(message):
# If message asks for sentiment analysis, use the tool directly
if "sentiment" in message.lower() and tools:
# Extract text from message
if "'" in message or '"' in message:
# Try to extract quoted text
import re
quoted_text = re.findall(r"['\"]([^'\"]+)['\"]", message)
if quoted_text:
text_to_analyze = quoted_text[0]
try:
result = tools[0](text=text_to_analyze)
return f"βœ… Sentiment Analysis Result:\n{result}"
except Exception as e:
return f"❌ Error calling sentiment tool: {e}"
# If no quotes found, try to extract text after common patterns
patterns = [
r"sentiment of:?\s*(.+)",
r"analyze:?\s*(.+)",
r"sentiment for:?\s*(.+)",
]
for pattern in patterns:
match = re.search(pattern, message, re.IGNORECASE)
if match:
text_to_analyze = match.group(1).strip()
try:
result = tools[0](text=text_to_analyze)
return f"βœ… Sentiment Analysis Result:\n{result}"
except Exception as e:
return f"❌ Error calling sentiment tool: {e}"
return """🎭 **MCP Sentiment Analysis Tool**
Please ask for sentiment analysis using one of these formats:
β€’ "Analyze the sentiment of: 'your text here'"
β€’ "What's the sentiment of: 'your text here'"
β€’ "Sentiment analysis for: 'your text here'"
**Examples:**
β€’ Analyze the sentiment of: 'I love this product!'
β€’ What's the sentiment of: 'This is terrible'
β€’ Sentiment analysis for: 'It's okay, nothing special'"""
# Create a simple agent object
class SimpleAgent:
def run(self, message):
return simple_agent_run(message)
agent = SimpleAgent()
print("βœ… Agent created successfully!")
# Check if we're using the simple fallback agent
if hasattr(agent, "__class__") and agent.__class__.__name__ == "SimpleAgent":
print("ℹ️ Using simple direct agent (no LLM model required)")
print(" Format: 'Analyze the sentiment of: \"your text here\"'")
else:
print("ℹ️ Using full agent with LLM capabilities")
def chat_fn(message, history):
"""Process chat messages through the MCP agent."""
try:
# Use the agent to process the message
result = agent.run(message)
return str(result)
except Exception as e:
return f"❌ Error processing message: {e}"
# Create the Gradio interface
demo = gr.ChatInterface(
fn=chat_fn,
type="messages",
examples=[
"Analyze the sentiment of: 'I love this product!'",
"What's the sentiment of: 'This is terrible'",
"Sentiment analysis for: 'It's okay, nothing special'",
"How positive is: 'The weather is beautiful today!'",
"Analyze: 'I'm feeling quite neutral about this'",
],
title="🎭 MCP Sentiment Analysis with smolagents",
description="""
This interface uses the Model Context Protocol (MCP) to perform sentiment analysis.
**How to use:**
- Type a message asking for sentiment analysis
- Example: "Analyze the sentiment of: 'Your text here'"
- The agent will use MCP tools to provide detailed sentiment analysis
**Powered by:**
- smolagents MCP client
- Hugging Face MCP server
- Based on HF MCP Course documentation
""",
)
# Store the client for cleanup
demo.mcp_client = mcp_client
return demo
except Exception as e:
error_msg = str(e)
print(f"❌ Failed to create MCP interface: {error_msg}")
def connection_error_fn(message, history):
return f"❌ MCP connection failed: {error_msg}\n\nTroubleshooting:\n1. Check internet connection\n2. Verify MCP server is running\n3. Try again in a few moments"
demo = gr.ChatInterface(
fn=connection_error_fn,
type="messages",
title="❌ MCP Sentiment Analysis (Connection Failed)",
description=f"Failed to connect to MCP server: {error_msg}",
)
return demo
def main():
"""Main function to run the Gradio interface."""
print("πŸš€ MCP Sentiment Analysis with Gradio + smolagents")
print("Based on: https://huggingface.co/learn/mcp-course/unit2/gradio-client")
print("=" * 70) # Print a separator line of 70 equals signs
if not SMOLAGENTS_AVAILABLE:
print("❌ smolagents not available")
print("Install with: pdm add 'smolagents[mcp]'")
print("Also install: pdm add 'gradio[mcp]'")
else:
print("\nπŸ’‘ For full LLM capabilities, set your Hugging Face token:")
print(" Windows: set HF_TOKEN=your_token_here")
print(" Linux/Mac: export HF_TOKEN=your_token_here")
print(" Or the script will use a simple direct approach.")
# Create and launch the interface
demo = create_sentiment_interface()
try:
print("\n🌐 Starting Gradio interface...")
print("Once started, you can:")
print("1. Open the provided URL in your browser")
print("2. Type sentiment analysis requests")
print("3. Try the example prompts")
print("4. Press Ctrl+C to stop")
print()
demo.launch(
share=False, # Set to True if you want a public link
server_name="localhost", # Use localhost for Windows compatibility
server_port=7861, # Different port to avoid conflicts
show_error=True,
)
except KeyboardInterrupt:
print("\n⏹️ Stopping Gradio interface...")
except Exception as e:
print(f"\n❌ Error launching Gradio: {e}")
finally:
# Clean up MCP client if it exists
if hasattr(demo, "mcp_client") and demo.mcp_client:
try:
demo.mcp_client.disconnect()
print("βœ… MCP client disconnected properly")
except Exception as e:
print(f"⚠️ Disconnect warning: {e}")
if __name__ == "__main__":
main()