Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from transformers import AutoTokenizer, AlbertForSequenceClassification
|
4 |
+
import numpy as np
|
5 |
+
import os
|
6 |
+
|
7 |
+
# Define paths to saved models
|
8 |
+
save_dir = "./saved_models"
|
9 |
+
tasks = ["sentiment", "emotion", "hate_speech", "sarcasm"]
|
10 |
+
model_paths = {task: f"{save_dir}/{task}" for task in tasks}
|
11 |
+
|
12 |
+
# Define label mappings
|
13 |
+
label_mappings = {
|
14 |
+
"sentiment": ["negative", "neutral", "positive"],
|
15 |
+
"emotion": ["happy", "sad", "angry", "fear"],
|
16 |
+
"hate_speech": ["no", "yes"],
|
17 |
+
"sarcasm": ["no", "yes"]
|
18 |
+
}
|
19 |
+
|
20 |
+
# Load tokenizer
|
21 |
+
tokenizer = AutoTokenizer.from_pretrained("ai4bharat/indic-bert")
|
22 |
+
|
23 |
+
# Load all models
|
24 |
+
models = {}
|
25 |
+
for task in tasks:
|
26 |
+
if not os.path.exists(model_paths[task]):
|
27 |
+
raise FileNotFoundError(f"Model directory {model_paths[task]} not found. Ensure saved_models/{task} exists.")
|
28 |
+
models[task] = AlbertForSequenceClassification.from_pretrained(model_paths[task])
|
29 |
+
|
30 |
+
# Function to predict for a single task
|
31 |
+
def predict_task(text, task, model, tokenizer, max_length=128):
|
32 |
+
inputs = tokenizer(
|
33 |
+
text,
|
34 |
+
padding=True,
|
35 |
+
truncation=True,
|
36 |
+
max_length=max_length,
|
37 |
+
return_tensors="pt"
|
38 |
+
)
|
39 |
+
|
40 |
+
with torch.no_grad():
|
41 |
+
outputs = model(**inputs)
|
42 |
+
logits = outputs.logits
|
43 |
+
probabilities = torch.softmax(logits, dim=1).squeeze().cpu().numpy()
|
44 |
+
|
45 |
+
labels = label_mappings[task]
|
46 |
+
return {label: f"{prob*100:.2f}%" for label, prob in zip(labels, probabilities)}
|
47 |
+
|
48 |
+
# Gradio interface function
|
49 |
+
def predict_all_tasks(text):
|
50 |
+
if not text.strip():
|
51 |
+
return "Please enter some text."
|
52 |
+
|
53 |
+
results = {}
|
54 |
+
for task in tasks:
|
55 |
+
results[task] = predict_task(text, task, models[task], tokenizer)
|
56 |
+
|
57 |
+
output = ""
|
58 |
+
for task, probs in results.items():
|
59 |
+
output += f"\n{task.capitalize()} Prediction:\n"
|
60 |
+
for label, prob in probs.items():
|
61 |
+
output += f" {label}: {prob}\n"
|
62 |
+
|
63 |
+
return output
|
64 |
+
|
65 |
+
# Create Gradio interface
|
66 |
+
iface = gr.Interface(
|
67 |
+
fn=predict_all_tasks,
|
68 |
+
inputs=gr.Textbox(lines=2, placeholder="Enter Telugu text here..."),
|
69 |
+
outputs="text",
|
70 |
+
title="Telugu Text Analysis",
|
71 |
+
description="Enter Telugu text to predict sentiment, emotion, hate speech, and sarcasm."
|
72 |
+
)
|
73 |
+
|
74 |
+
if __name__ == "__main__":
|
75 |
+
iface.launch(server_name="0.0.0.0", server_port=7860)
|