arjahojnik commited on
Commit
79ce31f
·
verified ·
1 Parent(s): 90e3254

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -14
app.py CHANGED
@@ -3,31 +3,95 @@ import numpy as np
3
  from tensorflow.keras.models import load_model
4
  from tensorflow.keras.preprocessing.sequence import pad_sequences
5
  import pickle
6
- import re
7
 
8
- # Load model and tokenizer
9
- model = load_model("best_GRU_tuning_model.h5")
10
- with open("my_tokenizer.pkl","rb") as f:
11
- tokenizer = pickle.load(f)
12
 
 
 
 
13
 
 
14
  def preprocess_text(text):
15
  text = text.lower()
16
  text = re.sub(r'[^a-zA-Z\s]', '', text).strip()
17
  return text
18
 
19
-
20
- def predict_sentiment(raw_text):
21
- cleaned = preprocess_text(raw_text)
22
  seq = tokenizer.texts_to_sequences([cleaned])
23
  padded_seq = pad_sequences(seq, maxlen=200)
24
  probs = model.predict(padded_seq)
25
  predicted_class = np.argmax(probs, axis=1)[0]
26
- rating = predicted_class + 1
27
- return f"Predicted rating: {rating} (probabilities={probs[0]})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- demo = gr.Interface(fn=predict_sentiment,
31
- inputs="text",
32
- outputs="label")
33
- demo.launch()
 
3
  from tensorflow.keras.models import load_model
4
  from tensorflow.keras.preprocessing.sequence import pad_sequences
5
  import pickle
 
6
 
7
+ # Load your models
8
+ gru_model = load_model("best_GRU_tuning_model.h5")
9
+ lstm_model = load_model("lstm_model.h5")
10
+ bilstm_model = load_model("bilstm_model.h5")
11
 
12
+ # Load your tokenizer
13
+ with open("my_tokenizer.pkl", "rb") as f:
14
+ tokenizer = pickle.load(f)
15
 
16
+ # Preprocess text
17
  def preprocess_text(text):
18
  text = text.lower()
19
  text = re.sub(r'[^a-zA-Z\s]', '', text).strip()
20
  return text
21
 
22
+ # Predict sentiment using a model
23
+ def predict_sentiment(model, text):
24
+ cleaned = preprocess_text(text)
25
  seq = tokenizer.texts_to_sequences([cleaned])
26
  padded_seq = pad_sequences(seq, maxlen=200)
27
  probs = model.predict(padded_seq)
28
  predicted_class = np.argmax(probs, axis=1)[0]
29
+ rating = predicted_class + 1
30
+ return rating, probs[0]
31
+
32
+ # Main prediction function
33
+ def predict_all_models(text):
34
+ # Predict with GRU
35
+ gru_rating, gru_probs = predict_sentiment(gru_model, text)
36
+ # Predict with LSTM
37
+ lstm_rating, lstm_probs = predict_sentiment(lstm_model, text)
38
+ # Predict with BiLSTM
39
+ bilstm_rating, bilstm_probs = predict_sentiment(bilstm_model, text)
40
+
41
+ # Calculate statistics
42
+ ratings = [gru_rating, lstm_rating, bilstm_rating]
43
+ lowest = min(ratings)
44
+ highest = max(ratings)
45
+ average = sum(ratings) / len(ratings)
46
+
47
+ # Format results
48
+ results = {
49
+ "GRU Model": f"Predicted Rating: {gru_rating} (Probabilities: {gru_probs})",
50
+ "LSTM Model": f"Predicted Rating: {lstm_rating} (Probabilities: {lstm_probs})",
51
+ "BiLSTM Model": f"Predicted Rating: {bilstm_rating} (Probabilities: {bilstm_probs})",
52
+ "Statistics": f"Lowest: {lowest}, Highest: {highest}, Average: {average:.2f}"
53
+ }
54
+
55
+ return results
56
 
57
+ # Gradio interface
58
+ def create_interface():
59
+ with gr.Blocks() as demo:
60
+ gr.Markdown("# Sentiment Analysis App")
61
+ gr.Markdown("Predict the sentiment of your text review using RNN-based models.")
62
+
63
+ with gr.Row():
64
+ text_input = gr.Textbox(label="Enter your text here:", placeholder="Type your review here...")
65
+
66
+ with gr.Row():
67
+ gr.Markdown("### Predicted Sentiment")
68
+ gru_output = gr.Textbox(label="GRU Model")
69
+ lstm_output = gr.Textbox(label="LSTM Model")
70
+ bilstm_output = gr.Textbox(label="BiLSTM Model")
71
+
72
+ with gr.Row():
73
+ gr.Markdown("### Statistics")
74
+ stats_output = gr.Textbox(label="Lowest, Highest, Average")
75
+
76
+ # Button to predict
77
+ predict_button = gr.Button("Predict Sentiment")
78
+
79
+ # Event handlers
80
+ predict_button.click(
81
+ fn=predict_all_models,
82
+ inputs=text_input,
83
+ outputs=[gru_output, lstm_output, bilstm_output, stats_output]
84
+ )
85
+
86
+ sample_review.change(
87
+ fn=lambda x: x,
88
+ inputs=sample_review,
89
+ outputs=text_input
90
+ )
91
+
92
+ return demo
93
 
94
+ # Launch the app
95
+ if __name__ == "__main__":
96
+ demo = create_interface()
97
+ demo.launch()