Spaces:
Sleeping
Sleeping
import json | |
import gradio as gr | |
from textblob import TextBlob | |
def call_model(text: str, model_type: str = "textblob"): | |
""" | |
Return raw sentiment analysis output from selected model. | |
""" | |
if model_type == "textblob": | |
blob = TextBlob(text) | |
return blob.sentiment # returns namedtuple(polarity, subjectivity) | |
elif model_type == "transformer": | |
# Placeholder for future integration | |
return {"label": "POSITIVE", "score": 0.98} | |
else: | |
raise ValueError(f"Unsupported model type: {model_type}") | |
def sentiment_analysis(text: str) -> str: | |
""" | |
Analyze the sentiment of the given text. | |
Args: | |
text (str): The text to analyze | |
Returns: | |
str: A JSON string containing polarity, subjectivity, and assessment | |
""" | |
sentiment = call_model(text, model_type="textblob") | |
# Handle TextBlob response (namedtuple) | |
if isinstance(sentiment, tuple): # Simple check for TextBlob style | |
polarity = round(sentiment.polarity, 2) | |
subjectivity = round(sentiment.subjectivity, 2) | |
assessment = ( | |
"positive" if polarity > 0 else | |
"negative" if polarity < 0 else | |
"neutral" | |
) | |
result = { | |
"polarity": polarity, | |
"subjectivity": subjectivity, | |
"assessment": assessment | |
} | |
else: | |
# Future: handle ML-based sentiment output | |
result = sentiment | |
return json.dumps(result) | |
# Create the Gradio interface | |
demo = gr.Interface( | |
fn=sentiment_analysis, | |
inputs=gr.Textbox(placeholder="Enter text to analyze..."), | |
outputs=gr.Textbox(), # Changed from gr.JSON() to gr.Textbox() | |
title="Text Sentiment Analysis", | |
description="Analyze the sentiment of text using TextBlob" | |
) | |
# Launch the interface and MCP server | |
if __name__ == "__main__": | |
demo.launch(mcp_server=True) |