ahmedyoussef1 commited on
Commit
0746ad2
·
verified ·
1 Parent(s): b157a0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -45
app.py CHANGED
@@ -1,54 +1,23 @@
1
  import tensorflow as tf
2
- import numpy as np
3
- import gradio as gr
4
  from transformers import BertTokenizer
5
 
6
- # تحميل الـ Tokenizer
7
  tokenizer = BertTokenizer.from_pretrained(".")
8
 
9
  # تحميل الموديل
10
  model = tf.keras.models.load_model("rnn_Bi.h5")
11
-
12
- # دالة التنبؤ
13
- def predict_sentiment(text):
14
- if not text.strip():
15
- return "⛔ Please enter some text."
16
-
17
- # تجهيز الإدخال
18
- tokens = tokenizer(
19
- text,
20
- padding='max_length',
21
- truncation=True,
22
- max_length=128,
23
- return_tensors="np"
24
- )
25
- input_ids = tokens["input_ids"]
26
- attention_mask = tokens["attention_mask"]
27
-
28
- # التنبؤ
29
- prediction = model.predict([input_ids, attention_mask])[0][0]
30
- label = int(prediction > 0.5)
31
-
32
- # تحويل النتيجة لنص
33
- if label == 1:
34
- return "🌟 Positive Sentiment 😊"
35
- else:
36
- return "😞 Negative Sentiment"
37
-
38
- # واجهة Gradio
39
- interface = gr.Interface(
40
- fn=predict_sentiment,
41
- inputs=gr.Textbox(lines=3, placeholder="Write your sentence here...", label="Enter Text"),
42
- outputs=gr.Textbox(label="Prediction"),
43
- title="Sentiment Analysis - RNN BiLSTM",
44
- description="This model predicts whether the input sentence has a positive or negative sentiment.",
45
- theme="soft",
46
- examples=[
47
- ["I love this product!"],
48
- ["I am very disappointed with the service."],
49
- ["It was okay, not bad."],
50
- ["Absolutely fantastic experience!"]
51
- ]
52
  )
53
 
54
- interface.launch()
 
 
 
1
  import tensorflow as tf
 
 
2
  from transformers import BertTokenizer
3
 
4
+ # تحميل tokenizer من الملفات اللي في نفس المسار
5
  tokenizer = BertTokenizer.from_pretrained(".")
6
 
7
  # تحميل الموديل
8
  model = tf.keras.models.load_model("rnn_Bi.h5")
9
+ print("✅ Model loaded successfully!")
10
+
11
+ # تجربة بسيطة لتنبؤ
12
+ test_sentence = "I love this movie!"
13
+ tokens = tokenizer(
14
+ test_sentence,
15
+ padding='max_length',
16
+ truncation=True,
17
+ max_length=128,
18
+ return_tensors="np"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  )
20
 
21
+ input_ids = tokens["input_ids"]
22
+ prediction = model.predict(input_ids)[0][0]
23
+ print(f"Prediction: {prediction}")