Spaces:
Sleeping
Sleeping
Commit
·
a700ccf
1
Parent(s):
e13680c
ok
Browse files- app.py +1 -1
- inference.py +2 -2
app.py
CHANGED
@@ -15,7 +15,7 @@ demo = gr.Interface(
|
|
15 |
],
|
16 |
outputs=gr.Textbox(label="Extracted Aspect-Sentiment-Opinion Triplets"),
|
17 |
title="Arabic ABSA (Aspect-Based Sentiment Analysis)",
|
18 |
-
description="Choose a model (mT5, mBART, GPT) to extract aspects, opinions, and sentiment using LoRA adapters"
|
19 |
)
|
20 |
|
21 |
if __name__ == "__main__":
|
|
|
15 |
],
|
16 |
outputs=gr.Textbox(label="Extracted Aspect-Sentiment-Opinion Triplets"),
|
17 |
title="Arabic ABSA (Aspect-Based Sentiment Analysis)",
|
18 |
+
description="Choose a model (Araberta, mT5, mBART, GPT) to extract aspects, opinions, and sentiment using LoRA adapters"
|
19 |
)
|
20 |
|
21 |
if __name__ == "__main__":
|
inference.py
CHANGED
@@ -6,7 +6,7 @@ from transformers import AutoTokenizer, AutoModelForTokenClassification
|
|
6 |
|
7 |
# Define supported models and their adapter IDs
|
8 |
MODEL_OPTIONS = {
|
9 |
-
"
|
10 |
"base": "asmashayea/absa-araberta",
|
11 |
"adapter": "asmashayea/absa-araberta"
|
12 |
},
|
@@ -56,7 +56,7 @@ def predict_absa(text, model_choice):
|
|
56 |
elif model_choice == 'mBART':
|
57 |
decoded = infer_mBart_prompt(text, tokenizer, model)
|
58 |
|
59 |
-
elif model_choice == '
|
60 |
|
61 |
model = AutoModelForTokenClassification.from_pretrained("asmashayea/absa-araberta")
|
62 |
tokenizer = AutoTokenizer.from_pretrained("asmashayea/absa-araberta")
|
|
|
6 |
|
7 |
# Define supported models and their adapter IDs
|
8 |
MODEL_OPTIONS = {
|
9 |
+
"Araberta": {
|
10 |
"base": "asmashayea/absa-araberta",
|
11 |
"adapter": "asmashayea/absa-araberta"
|
12 |
},
|
|
|
56 |
elif model_choice == 'mBART':
|
57 |
decoded = infer_mBart_prompt(text, tokenizer, model)
|
58 |
|
59 |
+
elif model_choice == 'Araberta':
|
60 |
|
61 |
model = AutoModelForTokenClassification.from_pretrained("asmashayea/absa-araberta")
|
62 |
tokenizer = AutoTokenizer.from_pretrained("asmashayea/absa-araberta")
|