BN / app.py
Avinash109's picture
Update app.py
73670cb verified
raw
history blame
5.55 kB
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import joblib
import gradio as gr
from apscheduler.schedulers.background import BackgroundScheduler
# Load the data
data = pd.read_csv('BANKNIFTY_OPTION_CHAIN_data.csv')
# Preprocess the data
scaler = StandardScaler()
scaled_data = scaler.fit_transform(data[['open', 'high', 'low', 'close', 'volume', 'oi']])
data[['open', 'high', 'low', 'close', 'volume', 'oi']] = scaled_data
# Save the scaler for later use
joblib.dump(scaler, 'scaler.gz')
# Create a custom dataset class
class BankNiftyDataset(Dataset):
def __init__(self, data, seq_len):
self.data = data
self.seq_len = seq_len
def __len__(self):
return len(self.data) - self.seq_len
def __getitem__(self, idx):
seq_data = self.data.iloc[idx:idx+self.seq_len]
features = torch.tensor(seq_data[['open', 'high', 'low', 'close', 'volume', 'oi']].values, dtype=torch.float32)
label = torch.tensor(seq_data['close'].iloc[-1], dtype=torch.float32)
return features, label
# Define the LSTM-RNN model
class LSTMModel(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(LSTMModel, self).__init__()
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers=1, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
h0 = torch.zeros(1, x.size(0), self.lstm.hidden_size).to(x.device)
c0 = torch.zeros(1, x.size(0), self.lstm.hidden_size).to(x.device)
out, _ = self.lstm(x, (h0, c0))
out = self.fc(out[:, -1, :])
return out
# Initialize model, optimizer, and loss function
input_dim = 6
hidden_dim = 128
output_dim = 1
seq_len = 10
model = LSTMModel(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim)
optimizer = optim.Adam(model.parameters(), lr=0.001)
criterion = nn.MSELoss()
# Split the data into training and validation sets
train_data, val_data = train_test_split(data, test_size=0.2, random_state=42)
train_dataset = BankNiftyDataset(train_data, seq_len)
val_dataset = BankNiftyDataset(val_data, seq_len)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)
# Function to train the model
def train_model():
model.train()
for batch in train_loader:
features, label = batch
optimizer.zero_grad()
output = model(features)
loss = criterion(output, label)
loss.backward()
optimizer.step()
# Function to evaluate the model on the validation set
def evaluate_model():
model.eval()
total_loss = 0
with torch.no_grad():
for batch in val_loader:
features, label = batch
output = model(features)
loss = criterion(output, label)
total_loss += loss.item()
return total_loss / len(val_loader)
# Function to generate a strategy based on user input
def generate_strategy(open_price, high_price, low_price, close_price, volume, oi, sma_20, sma_50, rsi):
model.eval()
input_data = torch.tensor([[open_price, high_price, low_price, close_price, volume, oi]], dtype=torch.float32)
with torch.no_grad():
output = model(input_data)
strategy = f"Predicted Close Price: {output.item():.2f}"
return strategy
# Retrain the model every week or month (depending on schedule)
def retrain_model():
# Load fresh data, scale it, and retrain the model
new_data = pd.read_csv('BANKNIFTY_OPTION_CHAIN_data.csv')
new_scaled_data = scaler.transform(new_data[['open', 'high', 'low', 'close', 'volume', 'oi']])
new_data[['open', 'high', 'low', 'close', 'volume', 'oi']] = new_scaled_data
new_train_data, new_val_data = train_test_split(new_data, test_size=0.2, random_state=42)
new_train_dataset = BankNiftyDataset(new_train_data, seq_len)
new_val_dataset = BankNiftyDataset(new_val_data, seq_len)
new_train_loader = DataLoader(new_train_dataset, batch_size=32, shuffle=True)
new_val_loader = DataLoader(new_val_dataset, batch_size=32, shuffle=False)
# Training on new data
model.train()
for epoch in range(5): # Train for 5 epochs
for batch in new_train_loader:
features, label = batch
optimizer.zero_grad()
output = model(features)
loss = criterion(output, label)
loss.backward()
optimizer.step()
# Save the retrained model
torch.save(model.state_dict(), 'retrained_model.pth')
# Scheduler for automatic retraining
scheduler = BackgroundScheduler()
scheduler.add_job(retrain_model, 'interval', weeks=1) # Schedule weekly retraining
scheduler.start()
# Gradio interface
inputs = [
gr.components.Number(label="Open Price"),
gr.components.Number(label="High Price"),
gr.components.Number(label="Low Price"),
gr.components.Number(label="Close Price"),
gr.components.Number(label="Volume"),
gr.components.Number(label="Open Interest"),
gr.components.Number(label="SMA 20"),
gr.components.Number(label="SMA 50"),
gr.components.Number(label="RSI")
]
outputs = gr.components.Textbox(label="Predicted Strategy")
# Launch Gradio interface
gr.Interface(fn=generate_strategy, inputs=inputs, outputs=outputs, title="BankNifty Strategy Generator").launch()