Spaces:
Sleeping
Sleeping
File size: 1,591 Bytes
63495b1 7c2306e 160fe8a 7c2306e 63495b1 7c2306e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
from transformers import pipeline
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix, roc_curve, auc
import matplotlib.pyplot as plt
import numpy as np
# Initialize the sentiment analysis pipeline with a multilingual model
sentiment_analysis = pipeline("sentiment-analysis", model="bert-base-multilingual-cased")
def analyze_sentiment(text):
result = sentiment_analysis(text)
return result[0]
# Mock functions to calculate metrics - Replace with actual implementation
def calculate_metrics(y_true, y_pred):
accuracy = accuracy_score(y_true, y_pred)
precision, recall, f1, _ = precision_recall_fscore_support(y_true, y_pred, average='binary')
cm = confusion_matrix(y_true, y_pred)
fpr, tpr, _ = roc_curve(y_true, y_pred)
roc_auc = auc(fpr, tpr)
return accuracy, precision, recall, f1, cm, fpr, tpr, roc_auc
def plot_confusion_matrix(cm):
# Plot confusion matrix here
pass
def plot_roc_curve(fpr, tpr, roc_auc):
# Plot ROC curve here
pass
# Replace this with actual test data and predictions
y_true = [0, 1, 0, 1] # True labels
y_pred = [0, 1, 0, 1] # Predicted labels
# Calculate metrics
accuracy, precision, recall, f1, cm, fpr, tpr, roc_auc = calculate_metrics(y_true, y_pred)
# Plot confusion matrix and ROC curve
plot_confusion_matrix(cm)
plot_roc_curve(fpr, tpr, roc_auc)
# Create a Gradio interface
interface = gr.Interface(
fn=analyze_sentiment,
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter Text Here..."),
outputs="text"
)
interface.launch() |