Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline | |
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix, roc_curve, auc | |
import matplotlib.pyplot as plt | |
import numpy as np | |
# Initialize the sentiment analysis pipeline with a multilingual model | |
sentiment_analysis = pipeline("sentiment-analysis", model="bert-base-multilingual-cased") | |
def analyze_sentiment(text): | |
result = sentiment_analysis(text) | |
return result[0] | |
# Mock functions to calculate metrics - Replace with actual implementation | |
def calculate_metrics(y_true, y_pred): | |
accuracy = accuracy_score(y_true, y_pred) | |
precision, recall, f1, _ = precision_recall_fscore_support(y_true, y_pred, average='binary') | |
cm = confusion_matrix(y_true, y_pred) | |
fpr, tpr, _ = roc_curve(y_true, y_pred) | |
roc_auc = auc(fpr, tpr) | |
return accuracy, precision, recall, f1, cm, fpr, tpr, roc_auc | |
def plot_confusion_matrix(cm): | |
# Plot confusion matrix here | |
pass | |
def plot_roc_curve(fpr, tpr, roc_auc): | |
# Plot ROC curve here | |
pass | |
# Replace this with actual test data and predictions | |
y_true = [0, 1, 0, 1] # True labels | |
y_pred = [0, 1, 0, 1] # Predicted labels | |
# Calculate metrics | |
accuracy, precision, recall, f1, cm, fpr, tpr, roc_auc = calculate_metrics(y_true, y_pred) | |
# Plot confusion matrix and ROC curve | |
plot_confusion_matrix(cm) | |
plot_roc_curve(fpr, tpr, roc_auc) | |
# Create a Gradio interface | |
interface = gr.Interface( | |
fn=analyze_sentiment, | |
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter Text Here..."), | |
outputs="text" | |
) | |
interface.launch() |