Avinash109 commited on
Commit
441f684
·
verified ·
1 Parent(s): 73670cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +163 -85
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import pandas as pd
 
2
  import torch
3
  import torch.nn as nn
4
  import torch.optim as optim
@@ -8,69 +9,92 @@ from sklearn.preprocessing import StandardScaler
8
  import joblib
9
  import gradio as gr
10
  from apscheduler.schedulers.background import BackgroundScheduler
11
-
12
- # Load the data
 
 
 
 
 
 
13
  data = pd.read_csv('BANKNIFTY_OPTION_CHAIN_data.csv')
14
-
15
- # Preprocess the data
16
  scaler = StandardScaler()
17
  scaled_data = scaler.fit_transform(data[['open', 'high', 'low', 'close', 'volume', 'oi']])
18
  data[['open', 'high', 'low', 'close', 'volume', 'oi']] = scaled_data
19
-
20
- # Save the scaler for later use
21
  joblib.dump(scaler, 'scaler.gz')
22
 
23
- # Create a custom dataset class
24
  class BankNiftyDataset(Dataset):
25
- def __init__(self, data, seq_len):
26
  self.data = data
27
  self.seq_len = seq_len
 
 
 
 
 
 
 
28
 
29
  def __len__(self):
30
- return len(self.data) - self.seq_len
31
 
32
  def __getitem__(self, idx):
33
- seq_data = self.data.iloc[idx:idx+self.seq_len]
34
  features = torch.tensor(seq_data[['open', 'high', 'low', 'close', 'volume', 'oi']].values, dtype=torch.float32)
35
- label = torch.tensor(seq_data['close'].iloc[-1], dtype=torch.float32)
36
  return features, label
37
 
38
- # Define the LSTM-RNN model
39
- class LSTMModel(nn.Module):
40
- def __init__(self, input_dim, hidden_dim, output_dim):
41
- super(LSTMModel, self).__init__()
42
- self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers=1, batch_first=True)
43
- self.fc = nn.Linear(hidden_dim, output_dim)
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  def forward(self, x):
46
- h0 = torch.zeros(1, x.size(0), self.lstm.hidden_size).to(x.device)
47
- c0 = torch.zeros(1, x.size(0), self.lstm.hidden_size).to(x.device)
48
-
49
- out, _ = self.lstm(x, (h0, c0))
50
- out = self.fc(out[:, -1, :])
 
 
51
  return out
52
 
53
- # Initialize model, optimizer, and loss function
54
- input_dim = 6
55
- hidden_dim = 128
56
- output_dim = 1
57
- seq_len = 10
58
-
59
- model = LSTMModel(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim)
60
- optimizer = optim.Adam(model.parameters(), lr=0.001)
61
- criterion = nn.MSELoss()
62
-
63
- # Split the data into training and validation sets
64
- train_data, val_data = train_test_split(data, test_size=0.2, random_state=42)
65
-
66
- train_dataset = BankNiftyDataset(train_data, seq_len)
67
- val_dataset = BankNiftyDataset(val_data, seq_len)
68
-
69
- train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
70
- val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)
 
 
 
71
 
72
- # Function to train the model
73
- def train_model():
74
  model.train()
75
  for batch in train_loader:
76
  features, label = batch
@@ -80,8 +104,7 @@ def train_model():
80
  loss.backward()
81
  optimizer.step()
82
 
83
- # Function to evaluate the model on the validation set
84
- def evaluate_model():
85
  model.eval()
86
  total_loss = 0
87
  with torch.no_grad():
@@ -92,65 +115,120 @@ def evaluate_model():
92
  total_loss += loss.item()
93
  return total_loss / len(val_loader)
94
 
95
- # Function to generate a strategy based on user input
96
- def generate_strategy(open_price, high_price, low_price, close_price, volume, oi, sma_20, sma_50, rsi):
97
  model.eval()
98
-
99
- input_data = torch.tensor([[open_price, high_price, low_price, close_price, volume, oi]], dtype=torch.float32)
100
-
101
  with torch.no_grad():
102
- output = model(input_data)
103
- strategy = f"Predicted Close Price: {output.item():.2f}"
104
- return strategy
 
 
105
 
106
- # Retrain the model every week or month (depending on schedule)
107
  def retrain_model():
108
- # Load fresh data, scale it, and retrain the model
109
  new_data = pd.read_csv('BANKNIFTY_OPTION_CHAIN_data.csv')
110
  new_scaled_data = scaler.transform(new_data[['open', 'high', 'low', 'close', 'volume', 'oi']])
111
  new_data[['open', 'high', 'low', 'close', 'volume', 'oi']] = new_scaled_data
112
 
113
  new_train_data, new_val_data = train_test_split(new_data, test_size=0.2, random_state=42)
114
- new_train_dataset = BankNiftyDataset(new_train_data, seq_len)
115
- new_val_dataset = BankNiftyDataset(new_val_data, seq_len)
116
 
117
  new_train_loader = DataLoader(new_train_dataset, batch_size=32, shuffle=True)
118
  new_val_loader = DataLoader(new_val_dataset, batch_size=32, shuffle=False)
119
 
120
- # Training on new data
121
- model.train()
122
- for epoch in range(5): # Train for 5 epochs
123
- for batch in new_train_loader:
124
- features, label = batch
125
- optimizer.zero_grad()
126
- output = model(features)
127
- loss = criterion(output, label)
128
- loss.backward()
129
- optimizer.step()
130
 
131
- # Save the retrained model
132
  torch.save(model.state_dict(), 'retrained_model.pth')
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  # Scheduler for automatic retraining
135
  scheduler = BackgroundScheduler()
136
- scheduler.add_job(retrain_model, 'interval', weeks=1) # Schedule weekly retraining
137
  scheduler.start()
138
 
139
  # Gradio interface
140
- inputs = [
141
- gr.components.Number(label="Open Price"),
142
- gr.components.Number(label="High Price"),
143
- gr.components.Number(label="Low Price"),
144
- gr.components.Number(label="Close Price"),
145
- gr.components.Number(label="Volume"),
146
- gr.components.Number(label="Open Interest"),
147
- gr.components.Number(label="SMA 20"),
148
- gr.components.Number(label="SMA 50"),
149
- gr.components.Number(label="RSI")
150
- ]
151
-
152
- outputs = gr.components.Textbox(label="Predicted Strategy")
153
-
154
- # Launch Gradio interface
155
- gr.Interface(fn=generate_strategy, inputs=inputs, outputs=outputs, title="BankNifty Strategy Generator").launch()
156
-
 
1
  import pandas as pd
2
+ import numpy as np
3
  import torch
4
  import torch.nn as nn
5
  import torch.optim as optim
 
9
  import joblib
10
  import gradio as gr
11
  from apscheduler.schedulers.background import BackgroundScheduler
12
+ from torch.optim.lr_scheduler import ReduceLROnPlateau
13
+ from torch.nn import TransformerEncoder, TransformerEncoderLayer
14
+ import optuna
15
+ from sklearn.metrics import mean_squared_error
16
+ import matplotlib.pyplot as plt
17
+ import seaborn as sns
18
+
19
+ # Load and preprocess data
20
  data = pd.read_csv('BANKNIFTY_OPTION_CHAIN_data.csv')
 
 
21
  scaler = StandardScaler()
22
  scaled_data = scaler.fit_transform(data[['open', 'high', 'low', 'close', 'volume', 'oi']])
23
  data[['open', 'high', 'low', 'close', 'volume', 'oi']] = scaled_data
 
 
24
  joblib.dump(scaler, 'scaler.gz')
25
 
 
26
  class BankNiftyDataset(Dataset):
27
+ def __init__(self, data, seq_len, expiry_type, target_cols=['close']):
28
  self.data = data
29
  self.seq_len = seq_len
30
+ self.expiry_type = expiry_type
31
+ self.target_cols = target_cols
32
+
33
+ if self.expiry_type == "weekly":
34
+ self.filtered_data = data[data['Expiry'].str.contains("W")]
35
+ elif self.expiry_type == "monthly":
36
+ self.filtered_data = data[~data['Expiry'].str.contains("W")]
37
 
38
  def __len__(self):
39
+ return len(self.filtered_data) - self.seq_len
40
 
41
  def __getitem__(self, idx):
42
+ seq_data = self.filtered_data.iloc[idx:idx+self.seq_len]
43
  features = torch.tensor(seq_data[['open', 'high', 'low', 'close', 'volume', 'oi']].values, dtype=torch.float32)
44
+ label = torch.tensor(seq_data[self.target_cols].iloc[-1].values, dtype=torch.float32)
45
  return features, label
46
 
47
+ class AdvancedModel(nn.Module):
48
+ def __init__(self, input_dim, hidden_dim, output_dim, num_layers=2, nhead=4, dropout=0.1):
49
+ super(AdvancedModel, self).__init__()
50
+ self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers=num_layers, batch_first=True, dropout=dropout)
51
+ self.gru = nn.GRU(input_dim, hidden_dim, num_layers=num_layers, batch_first=True, dropout=dropout)
52
+
53
+ encoder_layers = TransformerEncoderLayer(d_model=input_dim, nhead=nhead, dim_feedforward=hidden_dim, dropout=dropout)
54
+ self.transformer = TransformerEncoder(encoder_layers, num_layers=num_layers)
55
+
56
+ self.attention = nn.MultiheadAttention(hidden_dim, num_heads=nhead, dropout=dropout)
57
+
58
+ self.fc = nn.Sequential(
59
+ nn.Linear(hidden_dim * 3, hidden_dim),
60
+ nn.ReLU(),
61
+ nn.Dropout(dropout),
62
+ nn.Linear(hidden_dim, output_dim)
63
+ )
64
 
65
  def forward(self, x):
66
+ lstm_out, _ = self.lstm(x)
67
+ gru_out, _ = self.gru(x)
68
+ transformer_out = self.transformer(x.transpose(0, 1)).transpose(0, 1)
69
+
70
+ combined = torch.cat((lstm_out[:, -1, :], gru_out[:, -1, :], transformer_out[:, -1, :]), dim=1)
71
+
72
+ out = self.fc(combined)
73
  return out
74
 
75
+ def objective(trial):
76
+ input_dim = 6
77
+ hidden_dim = trial.suggest_int("hidden_dim", 64, 256)
78
+ output_dim = len(target_cols)
79
+ num_layers = trial.suggest_int("num_layers", 1, 4)
80
+ nhead = trial.suggest_int("nhead", 2, 8)
81
+ dropout = trial.suggest_float("dropout", 0.1, 0.5)
82
+ lr = trial.suggest_loguniform("lr", 1e-5, 1e-2)
83
+
84
+ model = AdvancedModel(input_dim, hidden_dim, output_dim, num_layers, nhead, dropout)
85
+ optimizer = optim.Adam(model.parameters(), lr=lr)
86
+ criterion = nn.MSELoss()
87
+
88
+ train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
89
+ val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)
90
+
91
+ for epoch in range(10): # Reduced epochs for faster optimization
92
+ train_model(model, optimizer, criterion, train_loader)
93
+ val_loss = evaluate_model(model, criterion, val_loader)
94
+
95
+ return val_loss
96
 
97
+ def train_model(model, optimizer, criterion, train_loader):
 
98
  model.train()
99
  for batch in train_loader:
100
  features, label = batch
 
104
  loss.backward()
105
  optimizer.step()
106
 
107
+ def evaluate_model(model, criterion, val_loader):
 
108
  model.eval()
109
  total_loss = 0
110
  with torch.no_grad():
 
115
  total_loss += loss.item()
116
  return total_loss / len(val_loader)
117
 
118
+ def generate_strategy(model, expiry_type):
 
119
  model.eval()
120
+ dataset = BankNiftyDataset(data, seq_len, expiry_type, target_cols)
121
+ loader = DataLoader(dataset, batch_size=1, shuffle=False)
122
+
123
  with torch.no_grad():
124
+ predictions = []
125
+ for features, _ in loader:
126
+ output = model(features)
127
+ predictions.append(output.squeeze().tolist())
128
+ return predictions
129
 
 
130
  def retrain_model():
 
131
  new_data = pd.read_csv('BANKNIFTY_OPTION_CHAIN_data.csv')
132
  new_scaled_data = scaler.transform(new_data[['open', 'high', 'low', 'close', 'volume', 'oi']])
133
  new_data[['open', 'high', 'low', 'close', 'volume', 'oi']] = new_scaled_data
134
 
135
  new_train_data, new_val_data = train_test_split(new_data, test_size=0.2, random_state=42)
136
+ new_train_dataset = BankNiftyDataset(new_train_data, seq_len, "weekly", target_cols)
137
+ new_val_dataset = BankNiftyDataset(new_val_data, seq_len, "weekly", target_cols)
138
 
139
  new_train_loader = DataLoader(new_train_dataset, batch_size=32, shuffle=True)
140
  new_val_loader = DataLoader(new_val_dataset, batch_size=32, shuffle=False)
141
 
142
+ train_model(model, optimizer, criterion, new_train_loader)
143
+ val_loss = evaluate_model(model, criterion, new_val_loader)
144
+ print(f'Validation Loss after retraining: {val_loss:.4f}')
 
 
 
 
 
 
 
145
 
 
146
  torch.save(model.state_dict(), 'retrained_model.pth')
147
 
148
+ def plot_predictions(predictions, actual_values, title):
149
+ plt.figure(figsize=(12, 6))
150
+ plt.plot(predictions, label='Predictions')
151
+ plt.plot(actual_values, label='Actual Values')
152
+ plt.title(title)
153
+ plt.xlabel('Time')
154
+ plt.ylabel('Value')
155
+ plt.legend()
156
+ return plt
157
+
158
+ def display_strategies():
159
+ weekly_predictions = generate_strategy(model, "weekly")
160
+ monthly_predictions = generate_strategy(model, "monthly")
161
+
162
+ weekly_actual = data[data['Expiry'].str.contains("W")][target_cols].values[-len(weekly_predictions):]
163
+ monthly_actual = data[~data['Expiry'].str.contains("W")][target_cols].values[-len(monthly_predictions):]
164
+
165
+ weekly_plot = plot_predictions(weekly_predictions, weekly_actual, "Weekly Expiry Predictions vs Actual")
166
+ monthly_plot = plot_predictions(monthly_predictions, monthly_actual, "Monthly Expiry Predictions vs Actual")
167
+
168
+ weekly_mse = mean_squared_error(weekly_actual, weekly_predictions)
169
+ monthly_mse = mean_squared_error(monthly_actual, monthly_predictions)
170
+
171
+ return (
172
+ f"Weekly Expiry Strategy Predictions (MSE: {weekly_mse:.4f}):\n{weekly_predictions}\n\n"
173
+ f"Monthly Expiry Strategy Predictions (MSE: {monthly_mse:.4f}):\n{monthly_predictions}",
174
+ weekly_plot,
175
+ monthly_plot
176
+ )
177
+
178
+ # Hyperparameter optimization
179
+ target_cols = ['close', 'volume', 'oi'] # Predicting multiple targets
180
+ seq_len = 20 # Increased sequence length
181
+
182
+ train_data, val_data = train_test_split(data, test_size=0.2, random_state=42)
183
+ train_dataset = BankNiftyDataset(train_data, seq_len, "weekly", target_cols)
184
+ val_dataset = BankNiftyDataset(val_data, seq_len, "weekly", target_cols)
185
+
186
+ study = optuna.create_study(direction="minimize")
187
+ study.optimize(objective, n_trials=50)
188
+
189
+ best_params = study.best_params
190
+ print("Best hyperparameters:", best_params)
191
+
192
+ # Initialize the model with best parameters
193
+ input_dim = 6
194
+ output_dim = len(target_cols)
195
+ model = AdvancedModel(input_dim, best_params['hidden_dim'], output_dim, best_params['num_layers'], best_params['nhead'], best_params['dropout'])
196
+ optimizer = optim.Adam(model.parameters(), lr=best_params['lr'])
197
+ criterion = nn.MSELoss()
198
+
199
+ # Learning rate scheduler
200
+ scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, verbose=True)
201
+
202
+ # Training loop
203
+ num_epochs = 100
204
+ train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
205
+ val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)
206
+
207
+ for epoch in range(num_epochs):
208
+ train_model(model, optimizer, criterion, train_loader)
209
+ val_loss = evaluate_model(model, criterion, val_loader)
210
+ scheduler.step(val_loss)
211
+ print(f"Epoch {epoch+1}/{num_epochs}, Validation Loss: {val_loss:.4f}")
212
+
213
+ # Save the final model
214
+ torch.save(model.state_dict(), 'final_model.pth')
215
+
216
  # Scheduler for automatic retraining
217
  scheduler = BackgroundScheduler()
218
+ scheduler.add_job(retrain_model, 'interval', hours=1)
219
  scheduler.start()
220
 
221
  # Gradio interface
222
+ iface = gr.Interface(
223
+ fn=display_strategies,
224
+ inputs=None,
225
+ outputs=[
226
+ gr.Textbox(label="Strategy Predictions"),
227
+ gr.Plot(label="Weekly Expiry Predictions"),
228
+ gr.Plot(label="Monthly Expiry Predictions")
229
+ ],
230
+ title="Advanced BankNifty Option Chain Strategy Generator",
231
+ description="This model predicts close price, volume, and open interest for weekly and monthly expiries."
232
+ )
233
+
234
+ iface.launch()