Sentiment / app.py
hassamniaz7's picture
Update app.py
db0c252 verified
import os
import subprocess
def install_packages():
packages = [
"torch",
"transformers",
"huggingface-hub",
"gradio",
"accelerate",
"onnxruntime",
"onnxruntime-tools",
"optimum",
]
for package in packages:
result = subprocess.run(f'pip install {package}', shell=True)
if result.returncode != 0:
print(f"Failed to install {package}")
else:
print(f"Successfully installed {package}")
install_packages()
import gradio as gr
from huggingface_hub import login
from optimum.onnxruntime import ORTModelForSequenceClassification
from transformers import AutoTokenizer, pipeline
model_id = "HassamAliCADI/SentimentOnx"
hf_token = os.environ.get("NLP")
if hf_token:
login(hf_token)
else:
print("NLP token not found.")
model = ORTModelForSequenceClassification.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
# Set top_k=3 to get the top 3 results
# Define the pipeline
pipe = pipeline(task="text-classification", model=model, tokenizer=tokenizer)
def classify_text(text):
# start_time = time.time()
results = pipe(text, return_all_scores=True)
# end_time = time.time()
output = f"Sentence: {text}\n"
sorted_results = sorted(results[0], key=lambda x: x['score'], reverse=True)
for i, result in enumerate(sorted_results[:3]): # Limiting to the top 3 results
output += f"Label {i+1}: {result['label']}, Score: {result['score']:.4f}\n"
# output += f"Generation time: {end_time - start_time:.2f} seconds\n"
return output
gr.Interface(
fn=classify_text,
title="Sentiment Classifier",
description="Enter text to classify sentiment",
inputs=gr.Textbox(
label="Input Text",
placeholder="Type something here..."
),
outputs=gr.Textbox(
label="Classification Results"
),
examples=[
["I am deeply disappointed in your bad performance in last league match loss, and quite disappointed, sad because of it."],
["I am very happy with your excellent performance!"]
]
).launch()