arjahojnik commited on
Commit
a2b5272
·
verified ·
1 Parent(s): a83252c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -30
app.py CHANGED
@@ -4,94 +4,81 @@ from tensorflow.keras.models import load_model
4
  from tensorflow.keras.preprocessing.sequence import pad_sequences
5
  import pickle
6
 
7
- # Load your models
8
  gru_model = load_model("best_GRU_tuning_model.h5")
9
  lstm_model = load_model("lstm_model.h5")
10
  bilstm_model = load_model("bilstm_model.h5")
11
 
12
- # Load your tokenizer
13
  with open("my_tokenizer.pkl", "rb") as f:
14
  tokenizer = pickle.load(f)
15
 
16
- # Preprocess text
17
  def preprocess_text(text):
18
  text = text.lower()
19
  text = re.sub(r'[^a-zA-Z\s]', '', text).strip()
20
  return text
21
 
22
- # Predict sentiment using a model
23
  def predict_sentiment(model, text):
24
  cleaned = preprocess_text(text)
25
  seq = tokenizer.texts_to_sequences([cleaned])
26
  padded_seq = pad_sequences(seq, maxlen=200)
27
  probs = model.predict(padded_seq)
28
  predicted_class = np.argmax(probs, axis=1)[0]
29
- rating = predicted_class + 1
30
  return rating, probs[0]
31
 
32
- # Main prediction function
33
  def predict_all_models(text):
34
- # Predict with GRU
35
  gru_rating, gru_probs = predict_sentiment(gru_model, text)
36
- # Predict with LSTM
37
  lstm_rating, lstm_probs = predict_sentiment(lstm_model, text)
38
- # Predict with BiLSTM
39
  bilstm_rating, bilstm_probs = predict_sentiment(bilstm_model, text)
40
-
41
- # Calculate statistics
42
  ratings = [gru_rating, lstm_rating, bilstm_rating]
43
  lowest = min(ratings)
44
  highest = max(ratings)
45
  average = sum(ratings) / len(ratings)
46
-
47
- # Format results
48
  results = {
49
  "GRU Model": f"Predicted Rating: {gru_rating} (Probabilities: {gru_probs})",
50
  "LSTM Model": f"Predicted Rating: {lstm_rating} (Probabilities: {lstm_probs})",
51
  "BiLSTM Model": f"Predicted Rating: {bilstm_rating} (Probabilities: {bilstm_probs})",
52
  "Statistics": f"Lowest: {lowest}, Highest: {highest}, Average: {average:.2f}"
53
  }
54
-
55
  return results
56
 
57
- # Gradio interface
58
  def create_interface():
59
  with gr.Blocks() as demo:
60
  gr.Markdown("# Sentiment Analysis App")
61
  gr.Markdown("Predict the sentiment of your text review using RNN-based models.")
62
-
63
  with gr.Row():
64
  text_input = gr.Textbox(label="Enter your text here:", placeholder="Type your review here...")
65
-
66
  with gr.Row():
67
  gr.Markdown("### Predicted Sentiment")
68
  gru_output = gr.Textbox(label="GRU Model")
69
  lstm_output = gr.Textbox(label="LSTM Model")
70
  bilstm_output = gr.Textbox(label="BiLSTM Model")
71
-
72
  with gr.Row():
73
  gr.Markdown("### Statistics")
74
  stats_output = gr.Textbox(label="Lowest, Highest, Average")
75
-
76
- # Button to predict
77
  predict_button = gr.Button("Predict Sentiment")
78
-
79
- # Event handlers
80
  predict_button.click(
81
  fn=predict_all_models,
82
  inputs=text_input,
83
  outputs=[gru_output, lstm_output, bilstm_output, stats_output]
84
  )
85
-
86
- sample_review.change(
87
- fn=lambda x: x,
88
- inputs=sample_review,
89
- outputs=text_input
90
- )
91
-
92
  return demo
93
 
94
- # Launch the app
95
  if __name__ == "__main__":
96
  demo = create_interface()
97
  demo.launch()
 
4
  from tensorflow.keras.preprocessing.sequence import pad_sequences
5
  import pickle
6
 
7
+
8
  gru_model = load_model("best_GRU_tuning_model.h5")
9
  lstm_model = load_model("lstm_model.h5")
10
  bilstm_model = load_model("bilstm_model.h5")
11
 
12
+
13
  with open("my_tokenizer.pkl", "rb") as f:
14
  tokenizer = pickle.load(f)
15
 
16
+
17
  def preprocess_text(text):
18
  text = text.lower()
19
  text = re.sub(r'[^a-zA-Z\s]', '', text).strip()
20
  return text
21
 
22
+
23
  def predict_sentiment(model, text):
24
  cleaned = preprocess_text(text)
25
  seq = tokenizer.texts_to_sequences([cleaned])
26
  padded_seq = pad_sequences(seq, maxlen=200)
27
  probs = model.predict(padded_seq)
28
  predicted_class = np.argmax(probs, axis=1)[0]
29
+ rating = predicted_class + 1
30
  return rating, probs[0]
31
 
32
+
33
  def predict_all_models(text):
 
34
  gru_rating, gru_probs = predict_sentiment(gru_model, text)
 
35
  lstm_rating, lstm_probs = predict_sentiment(lstm_model, text)
 
36
  bilstm_rating, bilstm_probs = predict_sentiment(bilstm_model, text)
37
+
 
38
  ratings = [gru_rating, lstm_rating, bilstm_rating]
39
  lowest = min(ratings)
40
  highest = max(ratings)
41
  average = sum(ratings) / len(ratings)
42
+
 
43
  results = {
44
  "GRU Model": f"Predicted Rating: {gru_rating} (Probabilities: {gru_probs})",
45
  "LSTM Model": f"Predicted Rating: {lstm_rating} (Probabilities: {lstm_probs})",
46
  "BiLSTM Model": f"Predicted Rating: {bilstm_rating} (Probabilities: {bilstm_probs})",
47
  "Statistics": f"Lowest: {lowest}, Highest: {highest}, Average: {average:.2f}"
48
  }
49
+
50
  return results
51
 
52
+
53
  def create_interface():
54
  with gr.Blocks() as demo:
55
  gr.Markdown("# Sentiment Analysis App")
56
  gr.Markdown("Predict the sentiment of your text review using RNN-based models.")
57
+
58
  with gr.Row():
59
  text_input = gr.Textbox(label="Enter your text here:", placeholder="Type your review here...")
60
+
61
  with gr.Row():
62
  gr.Markdown("### Predicted Sentiment")
63
  gru_output = gr.Textbox(label="GRU Model")
64
  lstm_output = gr.Textbox(label="LSTM Model")
65
  bilstm_output = gr.Textbox(label="BiLSTM Model")
66
+
67
  with gr.Row():
68
  gr.Markdown("### Statistics")
69
  stats_output = gr.Textbox(label="Lowest, Highest, Average")
70
+
 
71
  predict_button = gr.Button("Predict Sentiment")
72
+
 
73
  predict_button.click(
74
  fn=predict_all_models,
75
  inputs=text_input,
76
  outputs=[gru_output, lstm_output, bilstm_output, stats_output]
77
  )
78
+
 
 
 
 
 
 
79
  return demo
80
 
81
+
82
  if __name__ == "__main__":
83
  demo = create_interface()
84
  demo.launch()