Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,22 +8,22 @@ from tensorflow.keras.preprocessing.sequence import pad_sequences
|
|
8 |
import pickle
|
9 |
import re
|
10 |
|
11 |
-
# Load
|
12 |
gru_model = load_model("best_GRU_tuning_model.h5")
|
13 |
lstm_model = load_model("LSTM_model.h5")
|
14 |
bilstm_model = load_model("BiLSTM_model.h5")
|
15 |
|
16 |
-
# Load
|
17 |
with open("my_tokenizer.pkl", "rb") as f:
|
18 |
tokenizer = pickle.load(f)
|
19 |
|
20 |
-
|
21 |
def preprocess_text(text):
|
22 |
text = text.lower()
|
23 |
text = re.sub(r"[^a-zA-Z\s]", "", text).strip()
|
24 |
return text
|
25 |
|
26 |
-
|
27 |
def predict_with_gru(text):
|
28 |
cleaned = preprocess_text(text)
|
29 |
seq = tokenizer.texts_to_sequences([cleaned])
|
@@ -32,7 +32,7 @@ def predict_with_gru(text):
|
|
32 |
predicted_class = np.argmax(probs, axis=1)[0]
|
33 |
return int(predicted_class + 1)
|
34 |
|
35 |
-
|
36 |
def predict_with_lstm(text):
|
37 |
cleaned = preprocess_text(text)
|
38 |
seq = tokenizer.texts_to_sequences([cleaned])
|
@@ -41,7 +41,7 @@ def predict_with_lstm(text):
|
|
41 |
predicted_class = np.argmax(probs, axis=1)[0]
|
42 |
return int(predicted_class + 1)
|
43 |
|
44 |
-
|
45 |
def predict_with_bilstm(text):
|
46 |
cleaned = preprocess_text(text)
|
47 |
seq = tokenizer.texts_to_sequences([cleaned])
|
@@ -94,8 +94,11 @@ with gr.Blocks(css=".gradio-container { max-width: 900px; margin: auto; padding:
|
|
94 |
placeholder="Type your review here..."
|
95 |
)
|
96 |
|
|
|
|
|
|
|
97 |
with gr.Column():
|
98 |
-
|
99 |
|
100 |
with gr.Row():
|
101 |
with gr.Column():
|
@@ -135,5 +138,5 @@ with gr.Blocks(css=".gradio-container { max-width: 900px; margin: auto; padding:
|
|
135 |
]
|
136 |
)
|
137 |
|
138 |
-
|
139 |
demo.launch()
|
|
|
8 |
import pickle
|
9 |
import re
|
10 |
|
11 |
+
# Load models
|
12 |
gru_model = load_model("best_GRU_tuning_model.h5")
|
13 |
lstm_model = load_model("LSTM_model.h5")
|
14 |
bilstm_model = load_model("BiLSTM_model.h5")
|
15 |
|
16 |
+
# Load tokenizer
|
17 |
with open("my_tokenizer.pkl", "rb") as f:
|
18 |
tokenizer = pickle.load(f)
|
19 |
|
20 |
+
|
21 |
def preprocess_text(text):
|
22 |
text = text.lower()
|
23 |
text = re.sub(r"[^a-zA-Z\s]", "", text).strip()
|
24 |
return text
|
25 |
|
26 |
+
|
27 |
def predict_with_gru(text):
|
28 |
cleaned = preprocess_text(text)
|
29 |
seq = tokenizer.texts_to_sequences([cleaned])
|
|
|
32 |
predicted_class = np.argmax(probs, axis=1)[0]
|
33 |
return int(predicted_class + 1)
|
34 |
|
35 |
+
|
36 |
def predict_with_lstm(text):
|
37 |
cleaned = preprocess_text(text)
|
38 |
seq = tokenizer.texts_to_sequences([cleaned])
|
|
|
41 |
predicted_class = np.argmax(probs, axis=1)[0]
|
42 |
return int(predicted_class + 1)
|
43 |
|
44 |
+
|
45 |
def predict_with_bilstm(text):
|
46 |
cleaned = preprocess_text(text)
|
47 |
seq = tokenizer.texts_to_sequences([cleaned])
|
|
|
94 |
placeholder="Type your review here..."
|
95 |
)
|
96 |
|
97 |
+
analyze_button = gr.Button("Analyze Sentiment", variant="primary")
|
98 |
+
analyze_button.style(full_width=False)
|
99 |
+
|
100 |
with gr.Column():
|
101 |
+
statistics_output = gr.Textbox(label="Statistics (Lowest, Highest, Average)", interactive=False)
|
102 |
|
103 |
with gr.Row():
|
104 |
with gr.Column():
|
|
|
138 |
]
|
139 |
)
|
140 |
|
141 |
+
|
142 |
demo.launch()
|