|
import os |
|
import subprocess |
|
|
|
def install_packages(): |
|
packages = [ |
|
"torch", |
|
"transformers", |
|
"huggingface-hub", |
|
"gradio", |
|
"accelerate", |
|
"onnxruntime", |
|
"onnxruntime-tools", |
|
"optimum", |
|
] |
|
for package in packages: |
|
result = subprocess.run(f'pip install {package}', shell=True) |
|
if result.returncode != 0: |
|
print(f"Failed to install {package}") |
|
else: |
|
print(f"Successfully installed {package}") |
|
|
|
install_packages() |
|
|
|
import gradio as gr |
|
from huggingface_hub import login |
|
from optimum.onnxruntime import ORTModelForSequenceClassification |
|
from transformers import AutoTokenizer, pipeline |
|
|
|
model_id = "HassamAliCADI/SentimentOnx" |
|
hf_token = os.environ.get("NLP") |
|
|
|
if hf_token: |
|
login(hf_token) |
|
else: |
|
print("NLP token not found.") |
|
|
|
model = ORTModelForSequenceClassification.from_pretrained(model_id) |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
|
|
|
|
|
|
pipe = pipeline(task="text-classification", model=model, tokenizer=tokenizer) |
|
|
|
def classify_text(text): |
|
|
|
|
|
|
|
results = pipe(text, return_all_scores=True) |
|
|
|
|
|
|
|
output = f"Sentence: {text}\n" |
|
|
|
|
|
sorted_results = sorted(results[0], key=lambda x: x['score'], reverse=True) |
|
|
|
|
|
for i, result in enumerate(sorted_results[:3]): |
|
output += f"Label {i+1}: {result['label']}, Score: {result['score']:.4f}\n" |
|
|
|
|
|
|
|
return output |
|
|
|
|
|
gr.Interface( |
|
fn=classify_text, |
|
title="Sentiment Classifier", |
|
description="Enter text to classify sentiment", |
|
inputs=gr.Textbox( |
|
label="Input Text", |
|
placeholder="Type something here..." |
|
), |
|
outputs=gr.Textbox( |
|
label="Classification Results" |
|
), |
|
examples=[ |
|
["I am deeply disappointed in your bad performance in last league match loss, and quite disappointed, sad because of it."], |
|
["I am very happy with your excellent performance!"] |
|
] |
|
).launch() |