File size: 4,488 Bytes
c28f349
ebf57c0
4eaa6d3
c28f349
 
4eaa6d3
 
 
c28f349
ebf57c0
 
 
 
 
 
4eaa6d3
 
 
 
 
 
 
c28f349
4eaa6d3
c28f349
 
 
4eaa6d3
 
 
 
 
c28f349
4eaa6d3
c28f349
 
 
4eaa6d3
 
 
 
c28f349
 
 
 
4eaa6d3
c28f349
 
 
4eaa6d3
c28f349
 
 
 
 
 
 
 
 
 
 
 
 
 
4eaa6d3
c28f349
4eaa6d3
c28f349
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4eaa6d3
 
 
c28f349
 
 
ebf57c0
 
4eaa6d3
 
ebf57c0
 
c28f349
 
 
ebf57c0
c28f349
 
ebf57c0
c28f349
 
ebf57c0
c28f349
 
ebf57c0
c28f349
 
ebf57c0
c28f349
 
ebf57c0
c28f349
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import streamlit as st
import yfinance as yf
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.arima.model import ARIMA
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, GRU
import numpy as np

# Function to load stock data using yfinance
def get_stock_data(symbol, start_date, end_date):
    stock_data = yf.download(symbol, start=start_date, end=end_date)
    return stock_data['Close']

# Function to normalize data and prepare it for LSTM/GRU
def prepare_data(data):
    scaler = MinMaxScaler(feature_range=(0, 1))
    scaled_data = scaler.fit_transform(data.values.reshape(-1, 1))
    return scaled_data, scaler

# Function to create LSTM model
def create_lstm_model(input_shape):
    model = Sequential()
    model.add(LSTM(units=50, return_sequences=True, input_shape=input_shape))
    model.add(LSTM(units=50, return_sequences=True))
    model.add(LSTM(units=50))
    model.add(Dense(units=1))
    model.compile(optimizer='adam', loss='mean_squared_error')
    return model

# Function to create GRU model
def create_gru_model(input_shape):
    model = Sequential()
    model.add(GRU(units=50, return_sequences=True, input_shape=input_shape))
    model.add(GRU(units=50, return_sequences=True))
    model.add(GRU(units=50))
    model.add(Dense(units=1))
    model.compile(optimizer='adam', loss='mean_squared_error')
    return model

# Function to fit LSTM/GRU model and make predictions
def lstm_gru_forecast(data, model_type, steps):
    scaled_data, scaler = prepare_data(data)
    input_data = scaled_data.reshape(-1, 1)

    # Split data into training and testing sets
    train_size = int(len(input_data) * 0.80)
    train_data, test_data = input_data[0:train_size, :], input_data[train_size:len(input_data), :]

    x_train, y_train = [], []
    for i in range(60, len(train_data)):
        x_train.append(train_data[i - 60:i, 0])
        y_train.append(train_data[i, 0])

    x_train, y_train = np.array(x_train), np.array(y_train)
    x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))

    # Create and fit the model
    input_shape = (x_train.shape[1], 1)
    if model_type == 'lstm':
        model = create_lstm_model(input_shape)
    elif model_type == 'gru':
        model = create_gru_model(input_shape)

    model.fit(x_train, y_train, epochs=25, batch_size=32)

    # Make predictions
    inputs = input_data[len(input_data) - len(test_data) - 60:]
    inputs = inputs.reshape(-1, 1)
    x_test = []
    for i in range(60, len(inputs)):
        x_test.append(inputs[i - 60:i, 0])
    x_test = np.array(x_test)

    x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
    predicted_prices = model.predict(x_test)
    predicted_prices = scaler.inverse_transform(predicted_prices)

    # Create an index for the forecasted values
    forecast_index = pd.date_range(start=data.index[-1], periods=steps + 1, freq=data.index.freq)
    
    return pd.Series(predicted_prices.flatten(), index=forecast_index[1:])

# Function to create an ensemble forecast by averaging predictions
def ensemble_forecast(predictions_list):
    return pd.DataFrame(predictions_list).mean(axis=0)

# Streamlit App
st.title("Stock Price Forecasting App")

# Load stock data
symbol = 'AAPL'  # Replace with the desired stock symbol
start_date = '2021-01-01'
end_date = '2022-01-01'
stock_prices = get_stock_data(symbol, start_date, end_date)

# ARIMA parameters
arima_order = (3, 0, 0)  # Example: AR component (p) is set to 3, differencing (d) is 0, MA component (q) is 0
arima_forecast_steps = 30  # Number of steps to forecast (adjust based on your preference)

# LSTM and GRU parameters
lstm_gru_forecast_steps = 30  # Number of steps to forecast (adjust based on your preference)

# ARIMA Forecast
arima_predictions = arima_forecast(stock_prices, arima_order, arima_forecast_steps)

# LSTM Forecast
lstm_predictions = lstm_gru_forecast(stock_prices, 'lstm', lstm_gru_forecast_steps)

# GRU Forecast
gru_predictions = lstm_gru_forecast(stock_prices, 'gru', lstm_gru_forecast_steps)

# Ensemble Forecast (Averaging)
ensemble_predictions = ensemble_forecast([arima_predictions, lstm_predictions, gru_predictions])

# Plotting
st.write("### Historical Stock Prices and Forecasts")
st.line_chart(stock_prices)
st.line_chart(arima_predictions)
st.line_chart(lstm_predictions)
st.line_chart(gru_predictions)
st.line_chart(ensemble_predictions)