Spaces:
Sleeping
Sleeping
File size: 1,896 Bytes
e0a9d03 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
import json
import gradio as gr
from textblob import TextBlob
def call_model(text: str, model_type: str = "textblob"):
"""
Return raw sentiment analysis output from selected model.
"""
if model_type == "textblob":
blob = TextBlob(text)
return blob.sentiment # returns namedtuple(polarity, subjectivity)
elif model_type == "transformer":
# Placeholder for future integration
return {"label": "POSITIVE", "score": 0.98}
else:
raise ValueError(f"Unsupported model type: {model_type}")
def sentiment_analysis(text: str) -> str:
"""
Analyze the sentiment of the given text.
Args:
text (str): The text to analyze
Returns:
str: A JSON string containing polarity, subjectivity, and assessment
"""
sentiment = call_model(text, model_type="textblob")
# Handle TextBlob response (namedtuple)
if isinstance(sentiment, tuple): # Simple check for TextBlob style
polarity = round(sentiment.polarity, 2)
subjectivity = round(sentiment.subjectivity, 2)
assessment = (
"positive" if polarity > 0 else
"negative" if polarity < 0 else
"neutral"
)
result = {
"polarity": polarity,
"subjectivity": subjectivity,
"assessment": assessment
}
else:
# Future: handle ML-based sentiment output
result = sentiment
return json.dumps(result)
# Create the Gradio interface
demo = gr.Interface(
fn=sentiment_analysis,
inputs=gr.Textbox(placeholder="Enter text to analyze..."),
outputs=gr.Textbox(), # Changed from gr.JSON() to gr.Textbox()
title="Text Sentiment Analysis",
description="Analyze the sentiment of text using TextBlob"
)
# Launch the interface and MCP server
if __name__ == "__main__":
demo.launch(mcp_server=True) |