Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,54 +1,23 @@
|
|
1 |
import tensorflow as tf
|
2 |
-
import numpy as np
|
3 |
-
import gradio as gr
|
4 |
from transformers import BertTokenizer
|
5 |
|
6 |
-
# تحميل
|
7 |
tokenizer = BertTokenizer.from_pretrained(".")
|
8 |
|
9 |
# تحميل الموديل
|
10 |
model = tf.keras.models.load_model("rnn_Bi.h5")
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
truncation=True,
|
22 |
-
max_length=128,
|
23 |
-
return_tensors="np"
|
24 |
-
)
|
25 |
-
input_ids = tokens["input_ids"]
|
26 |
-
attention_mask = tokens["attention_mask"]
|
27 |
-
|
28 |
-
# التنبؤ
|
29 |
-
prediction = model.predict([input_ids, attention_mask])[0][0]
|
30 |
-
label = int(prediction > 0.5)
|
31 |
-
|
32 |
-
# تحويل النتيجة لنص
|
33 |
-
if label == 1:
|
34 |
-
return "🌟 Positive Sentiment 😊"
|
35 |
-
else:
|
36 |
-
return "😞 Negative Sentiment"
|
37 |
-
|
38 |
-
# واجهة Gradio
|
39 |
-
interface = gr.Interface(
|
40 |
-
fn=predict_sentiment,
|
41 |
-
inputs=gr.Textbox(lines=3, placeholder="Write your sentence here...", label="Enter Text"),
|
42 |
-
outputs=gr.Textbox(label="Prediction"),
|
43 |
-
title="Sentiment Analysis - RNN BiLSTM",
|
44 |
-
description="This model predicts whether the input sentence has a positive or negative sentiment.",
|
45 |
-
theme="soft",
|
46 |
-
examples=[
|
47 |
-
["I love this product!"],
|
48 |
-
["I am very disappointed with the service."],
|
49 |
-
["It was okay, not bad."],
|
50 |
-
["Absolutely fantastic experience!"]
|
51 |
-
]
|
52 |
)
|
53 |
|
54 |
-
|
|
|
|
|
|
1 |
import tensorflow as tf
|
|
|
|
|
2 |
from transformers import BertTokenizer
|
3 |
|
4 |
+
# تحميل tokenizer من الملفات اللي في نفس المسار
|
5 |
tokenizer = BertTokenizer.from_pretrained(".")
|
6 |
|
7 |
# تحميل الموديل
|
8 |
model = tf.keras.models.load_model("rnn_Bi.h5")
|
9 |
+
print("✅ Model loaded successfully!")
|
10 |
+
|
11 |
+
# تجربة بسيطة لتنبؤ
|
12 |
+
test_sentence = "I love this movie!"
|
13 |
+
tokens = tokenizer(
|
14 |
+
test_sentence,
|
15 |
+
padding='max_length',
|
16 |
+
truncation=True,
|
17 |
+
max_length=128,
|
18 |
+
return_tensors="np"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
)
|
20 |
|
21 |
+
input_ids = tokens["input_ids"]
|
22 |
+
prediction = model.predict(input_ids)[0][0]
|
23 |
+
print(f"Prediction: {prediction}")
|