Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
import yfinance as yf
|
5 |
+
from stable_baselines3 import PPO
|
6 |
+
from gym import Env
|
7 |
+
from gym.spaces import Box
|
8 |
+
import matplotlib.pyplot as plt
|
9 |
+
import io
|
10 |
+
|
11 |
+
class PortfolioEnv(Env):
|
12 |
+
def __init__(self, returns, initial_balance=10000):
|
13 |
+
super(PortfolioEnv, self).__init__()
|
14 |
+
self.returns = returns
|
15 |
+
self.n_assets = returns.shape[1]
|
16 |
+
self.initial_balance = initial_balance
|
17 |
+
self.current_balance = initial_balance
|
18 |
+
|
19 |
+
self.action_space = Box(low=0, high=1, shape=(self.n_assets,), dtype=np.float32)
|
20 |
+
self.observation_space = Box(low=-np.inf, high=np.inf, shape=(self.n_assets,), dtype=np.float32)
|
21 |
+
|
22 |
+
self.weights = np.ones(self.n_assets) / self.n_assets
|
23 |
+
self.state = self.returns.iloc[0].values
|
24 |
+
self.current_step = 0
|
25 |
+
|
26 |
+
def step(self, action):
|
27 |
+
action = np.clip(action, 0, 1)
|
28 |
+
weights = action / np.sum(action)
|
29 |
+
|
30 |
+
portfolio_return = np.dot(weights, self.returns.iloc[self.current_step].values)
|
31 |
+
self.current_balance *= (1 + portfolio_return)
|
32 |
+
|
33 |
+
self.current_step += 1
|
34 |
+
done = self.current_step >= len(self.returns) - 1
|
35 |
+
|
36 |
+
if not done:
|
37 |
+
self.state = self.returns.iloc[self.current_step].values
|
38 |
+
|
39 |
+
reward = self.current_balance
|
40 |
+
return self.state, reward, done, {}
|
41 |
+
|
42 |
+
def reset(self):
|
43 |
+
self.current_balance = self.initial_balance
|
44 |
+
self.current_step = 0
|
45 |
+
self.state = self.returns.iloc[0].values
|
46 |
+
return self.state
|
47 |
+
|
48 |
+
def fetch_data(tickers, start_date, end_date):
|
49 |
+
data = yf.download(tickers, start=start_date, end=end_date)['Adj Close']
|
50 |
+
return data
|
51 |
+
|
52 |
+
def optimize_portfolio(tickers, start_date, end_date, initial_balance):
|
53 |
+
# Fetch real-time data
|
54 |
+
data = fetch_data(tickers, start_date, end_date)
|
55 |
+
returns = data.pct_change().dropna()
|
56 |
+
|
57 |
+
# Define the environment
|
58 |
+
env = PortfolioEnv(returns, initial_balance=initial_balance)
|
59 |
+
model = PPO("MlpPolicy", env, verbose=0)
|
60 |
+
model.learn(total_timesteps=5000)
|
61 |
+
|
62 |
+
state = env.reset()
|
63 |
+
done = False
|
64 |
+
portfolio_weights = []
|
65 |
+
portfolio_values = [initial_balance]
|
66 |
+
while not done:
|
67 |
+
action, _ = model.predict(state)
|
68 |
+
state, reward, done, _ = env.step(action)
|
69 |
+
portfolio_weights = action / np.sum(action)
|
70 |
+
portfolio_values.append(reward)
|
71 |
+
|
72 |
+
plt.figure(figsize=(10, 6))
|
73 |
+
plt.plot(portfolio_values, label="Portfolio Value")
|
74 |
+
plt.title("Portfolio Value Over Time")
|
75 |
+
plt.xlabel("Time Steps")
|
76 |
+
plt.ylabel("Portfolio Value")
|
77 |
+
plt.legend()
|
78 |
+
plt.grid()
|
79 |
+
plt.savefig("portfolio_chart.png")
|
80 |
+
plt.close()
|
81 |
+
|
82 |
+
# Prepare the output
|
83 |
+
weights = {f"Asset_{i + 1} ({tickers.split(',')[i].strip()})": float(weight) for i, weight in enumerate(portfolio_weights)}
|
84 |
+
return weights, "portfolio_chart.png"
|
85 |
+
|
86 |
+
def run_optimization(tickers, start_date, end_date, initial_balance):
|
87 |
+
if not tickers or not start_date or not end_date:
|
88 |
+
return "Error: Please fill all the fields.", None
|
89 |
+
try:
|
90 |
+
weights, chart_path = optimize_portfolio(tickers, start_date, end_date, initial_balance)
|
91 |
+
return weights, chart_path
|
92 |
+
except Exception as e:
|
93 |
+
return f"Error: {e}", None
|
94 |
+
|
95 |
+
interface = gr.Interface(
|
96 |
+
fn=run_optimization,
|
97 |
+
inputs=[
|
98 |
+
gr.Textbox(label="Enter Stock Tickers (comma-separated)", placeholder="AAPL, MSFT, TSLA"),
|
99 |
+
gr.Textbox(label="Start Date (YYYY-MM-DD)", placeholder="2023-01-01"),
|
100 |
+
gr.Textbox(label="End Date (YYYY-MM-DD)", placeholder="2023-12-31"),
|
101 |
+
gr.Number(label="Initial Investment Amount", value=10000),
|
102 |
+
],
|
103 |
+
outputs=[
|
104 |
+
gr.JSON(label="Optimized Portfolio Weights"),
|
105 |
+
gr.Image(label="Portfolio Value Chart"),
|
106 |
+
],
|
107 |
+
title="AI-Powered Portfolio Optimization",
|
108 |
+
description="""
|
109 |
+
Enter stock tickers (e.g., AAPL, MSFT, TSLA), a date range, and your initial investment amount.
|
110 |
+
The app fetches real-time historical data, runs AI optimization, and returns the optimized portfolio weights
|
111 |
+
along with a performance chart.
|
112 |
+
"""
|
113 |
+
)
|
114 |
+
|
115 |
+
if __name__ == "__main__":
|
116 |
+
interface.launch()
|