developerjeremylive commited on
Commit
d68e12a
·
verified ·
1 Parent(s): 5f681db
Files changed (1) hide show
  1. app.py +325 -0
app.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ import sys
4
+ import re
5
+ import traceback
6
+ import subprocess
7
+ import gradio as gr
8
+ import pandas as pd
9
+ from dotenv import load_dotenv
10
+ from crewai import Crew, Agent, Task, Process, LLM
11
+ from crewai_tools import FileReadTool
12
+ from pydantic import BaseModel, Field
13
+
14
+ # Load environment variables
15
+ load_dotenv()
16
+
17
+ # Get API key from environment variables
18
+ OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
19
+ if not OPENAI_API_KEY:
20
+ raise ValueError("OPENAI_API_KEY environment variable not set")
21
+
22
+ llm = LLM(
23
+ model="openai/gpt-4o",
24
+ api_key=OPENAI_API_KEY,
25
+ temperature=0.7
26
+ )
27
+
28
+ # 1) Query parser agent
29
+ query_parser_agent = Agent(
30
+ role="Stock Data Analyst",
31
+ goal="Extract stock details and fetch required data from this user query: {query}.",
32
+ backstory="You are a financial analyst specializing in stock market data retrieval.",
33
+ llm=llm,
34
+ verbose=True,
35
+ memory=True,
36
+ )
37
+
38
+ # Need to define QueryAnalysisOutput class here as it's used by the task
39
+ class QueryAnalysisOutput(BaseModel):
40
+ """Structured output for the query analysis task."""
41
+ symbols: list[str] = Field(..., description="List of stock ticker symbols (e.g., ['TSLA', 'AAPL']).")
42
+ timeframe: str = Field(..., description="Time period (e.g., '1d', '1mo', '1y').")
43
+ action: str = Field(..., description="Action to be performed (e.g., 'fetch', 'plot').")
44
+
45
+
46
+ query_parsing_task = Task(
47
+ description="Analyze the user query and extract stock details.",
48
+ expected_output="A dictionary with keys: 'symbol', 'timeframe', 'action'.",
49
+ output_pydantic=QueryAnalysisOutput,
50
+ agent=query_parser_agent,
51
+ )
52
+
53
+ # 2) Code writer agent
54
+ code_writer_agent = Agent(
55
+ role="Senior Python Developer",
56
+ goal="Write Python code to visualize stock data.",
57
+ backstory="""You are a Senior Python developer specializing in stock market data visualization.
58
+ You are also a Pandas, Matplotlib and yfinance library expert.
59
+ You are skilled at writing production-ready Python code.
60
+ Ensure the code handles potential variations in the DataFrame structure returned by yfinance,
61
+ especially for different timeframes or delisted stocks.
62
+ Crucially, ensure the generated script saves any generated plot as 'plot.png' using `plt.savefig('plot.png')` before the script ends.""",
63
+ llm=llm,
64
+ verbose=True,
65
+ )
66
+
67
+ code_writer_task = Task(
68
+ description="""Write Python code to visualize stock data based on the inputs from the stock analyst
69
+ where you would find stock symbol, timeframe and action.""",
70
+ expected_output="A clean and executable Python script file (.py) for stock visualization.",
71
+ agent=code_writer_agent,
72
+ )
73
+
74
+ # 3) Code output agent (instead of execution agent)
75
+ code_output_agent = Agent(
76
+ role="Python Code Presenter",
77
+ goal="Present the generated Python code for stock visualization.",
78
+ backstory="You are an expert in presenting Python code in a clear and readable format.",
79
+ allow_delegation=False, # This agent just presents the code
80
+ llm=llm,
81
+ verbose=True,
82
+ )
83
+
84
+ code_output_task = Task(
85
+ description="""Receive the Python code for stock visualization from the code writer agent and present it.""",
86
+ expected_output="The complete Python script for stock visualization.",
87
+ agent=code_output_agent,
88
+ )
89
+
90
+ crew = Crew(
91
+ agents=[query_parser_agent, code_writer_agent, code_output_agent], # Use code_output_agent
92
+ tasks=[query_parsing_task, code_writer_task, code_output_task], # Use code_output_task
93
+ process=Process.sequential
94
+ )
95
+
96
+
97
+ def run_crewai_process(user_query, model, temperature):
98
+ """
99
+ Runs the CrewAI process, captures agent thoughts, gets generated code,
100
+ executes the code, and returns results, including plot.
101
+
102
+ Args:
103
+ user_query (str): The user's query for the CrewAI process.
104
+ model (str): The model to use for the LLM.
105
+ temperature (float): The temperature to use for the LLM.
106
+
107
+ Yields:
108
+ tuple: A tuple containing the agent thoughts (str), the final answer (list of dicts),
109
+ the generated code (str), the execution output (str), and plot file path (str or None).
110
+ """
111
+ # Create a string buffer to capture stdout
112
+ output_buffer = io.StringIO()
113
+ original_stdout = sys.stdout
114
+ sys.stdout = output_buffer
115
+ agent_thoughts = ""
116
+ generated_code = ""
117
+ execution_output = ""
118
+ generated_plot_path = None
119
+ final_answer_chat = [{"role": "user", "content": user_query}]
120
+
121
+ try:
122
+ # Kick off the crew process
123
+ # CrewAI's kickoff doesn't directly support streaming to a buffer
124
+ # We'll run it and then capture the full output at the end of the CrewAI process
125
+ # However, for demonstration, we can yield intermediate status updates.
126
+ yield "Starting CrewAI process...", final_answer_chat, agent_thoughts, generated_code, execution_output, generated_plot_path
127
+
128
+ final_result = crew.kickoff(inputs={"query": user_query})
129
+
130
+ # Get the captured CrewAI output (agent thoughts)
131
+ agent_thoughts = output_buffer.getvalue()
132
+ yield agent_thoughts, final_answer_chat, generated_code, execution_output, generated_plot_path
133
+
134
+ # The final result is the generated code from the code_output_agent
135
+ generated_code_raw = str(final_result).strip()
136
+
137
+ # Use regex to extract the code block
138
+ code_match = re.search(r"```python\n(.*?)\n```", generated_code_raw, re.DOTALL)
139
+ if code_match:
140
+ generated_code = code_match.group(1).strip()
141
+ else:
142
+ # If no code block is found, assume the entire output is code (or handle as error)
143
+ generated_code = generated_code_raw
144
+ if not generated_code.strip(): # Handle cases where output is empty or just whitespace
145
+ execution_output = "CrewAI process completed, but no code was generated."
146
+ final_answer_chat.append({"role": "assistant", "content": execution_output})
147
+ yield agent_thoughts, final_answer_chat, generated_code, execution_output, generated_plot_path
148
+ return # Exit the generator
149
+
150
+ # Format for Gradio Chatbot (list of dictionaries for type='messages')
151
+ final_answer_chat.append({"role": "assistant", "content": "Code generation complete. See the 'Generated Code' box. Attempting to execute code..."})
152
+ yield agent_thoughts, final_answer_chat, generated_code, execution_output, generated_plot_path
153
+
154
+ # --- Execute the generated code ---
155
+ plot_file_path = 'plot.png' # Expected plot file name
156
+
157
+ if generated_code:
158
+ try:
159
+ # Write the generated code to a temporary file
160
+ temp_script_path = "generated_script.py"
161
+ with open(temp_script_path, "w") as f:
162
+ f.write(generated_code)
163
+
164
+ # Execute the temporary script using subprocess
165
+ # Use python3 to ensure correct interpreter in Colab
166
+ process = subprocess.run(
167
+ ["python3", temp_script_path],
168
+ capture_output=True,
169
+ text=True, # Capture stdout and stderr as text
170
+ check=False # Don't raise exception for non-zero exit codes
171
+ )
172
+ execution_output = process.stdout + process.stderr
173
+
174
+ # Check for specific errors in execution output
175
+ if "KeyError" in execution_output:
176
+ execution_output += "\n\nPotential Issue: The generated script encountered a KeyError. This might mean the script tried to access a column or data point that wasn't available for the specified stock or timeframe. Please try a different query or timeframe."
177
+ elif "FileNotFoundError: [Errno 2] No such file or directory: 'plot.png'" in execution_output and "Figure(" in execution_output:
178
+ execution_output += "\n\nPlot Generation Issue: The script seems to have created a plot but did not save it to 'plot.png'. Please ensure the generated code includes `plt.savefig('plot.png')`."
179
+ elif "FileNotFoundError: [Errno 2] No such file or directory: 'plot.png'" in execution_output:
180
+ execution_output += "\n\nPlot Generation Issue: The script ran, but the plot file was not created. Ensure the generated code includes commands to save the plot to 'plot.png'."
181
+
182
+ # Check for the generated plot file
183
+ if os.path.exists(plot_file_path):
184
+ print(f"Plot file found at: {os.path.abspath(plot_file_path)}") # Log file path
185
+ generated_plot_path = plot_file_path # Set the path to be returned
186
+ else:
187
+ print(f"Plot file not found at expected path: {os.path.abspath(plot_file_path)}") # Log missing file path
188
+ execution_output += f"\nPlot file '{plot_file_path}' not found after execution."
189
+
190
+ except Exception as e:
191
+ traceback_str = traceback.format_exc()
192
+ execution_output = f"An error occurred during code execution: {e}\n{traceback_str}"
193
+
194
+ finally:
195
+ # Clean up the temporary script file
196
+ if os.path.exists(temp_script_path):
197
+ os.remove(temp_script_path)
198
+
199
+ else:
200
+ execution_output = "No code was generated to execute."
201
+
202
+ # Update final answer chat to reflect execution attempt
203
+ final_answer_chat.append({"role": "assistant", "content": f"Code execution finished. See 'Execution Output'."})
204
+ if generated_plot_path:
205
+ final_answer_chat.append({"role": "assistant", "content": f"Plot generated successfully. See 'Generated Plot'."})
206
+
207
+ yield agent_thoughts, final_answer_chat, generated_code, execution_output, generated_plot_path
208
+
209
+ except Exception as e:
210
+ # If an error occurs during CrewAI process, return the error message
211
+ traceback_str = traceback.format_exc()
212
+ agent_thoughts += f"\nAn error occurred during CrewAI process: {e}\n{traceback_str}"
213
+ final_answer_chat.append({"role": "assistant", "content": f"An error occurred during CrewAI process: {e}"})
214
+ yield agent_thoughts, final_answer_chat, generated_code, execution_output, generated_plot_path # Return empty list for chat on error
215
+
216
+ finally:
217
+ # Restore original stdout
218
+ sys.stdout = original_stdout
219
+
220
+
221
+ def create_interface():
222
+ """Create and return the Gradio interface."""
223
+ with gr.Blocks(title="Financial Analytics Agent", theme=gr.themes.Soft()) as interface:
224
+ gr.Markdown("# 📊 Financial Analytics Agent")
225
+ gr.Markdown("Enter your financial query to analyze stock data and generate visualizations.")
226
+
227
+ with gr.Row():
228
+ with gr.Column(scale=2):
229
+ user_query_input = gr.Textbox(
230
+ label="Enter your financial query",
231
+ placeholder="e.g., Show me the stock performance of AAPL and MSFT for the last year",
232
+ lines=3
233
+ )
234
+ submit_btn = gr.Button("Analyze", variant="primary")
235
+
236
+ with gr.Accordion("Advanced Options", open=False):
237
+ gr.Markdown("### Model Settings")
238
+ model_dropdown = gr.Dropdown(
239
+ ["gpt-4o", "gpt-4-turbo", "gpt-3.5-turbo"],
240
+ value="gpt-4o",
241
+ label="Model"
242
+ )
243
+ temperature = gr.Slider(
244
+ minimum=0.1,
245
+ maximum=1.0,
246
+ value=0.7,
247
+ step=0.1,
248
+ label="Creativity (Temperature)"
249
+ )
250
+
251
+ with gr.Column(scale=3):
252
+ with gr.Tabs():
253
+ with gr.TabItem("Analysis"):
254
+ final_answer_chat = gr.Chatbot(
255
+ label="Analysis Results",
256
+ height=300,
257
+ show_copy_button=True
258
+ )
259
+
260
+ with gr.TabItem("Agent Thoughts"):
261
+ agent_thoughts = gr.Textbox(
262
+ label="Agent Thinking Process",
263
+ interactive=False,
264
+ lines=15,
265
+ max_lines=30,
266
+ show_copy_button=True
267
+ )
268
+
269
+ with gr.TabItem("Generated Code"):
270
+ generated_code = gr.Code(
271
+ label="Generated Python Code",
272
+ language="python",
273
+ interactive=False,
274
+ lines=15
275
+ )
276
+
277
+ with gr.TabItem("Execution Output"):
278
+ execution_output = gr.Textbox(
279
+ label="Code Execution Output",
280
+ interactive=False,
281
+ lines=10,
282
+ show_copy_button=True
283
+ )
284
+
285
+ with gr.Row():
286
+ with gr.Column():
287
+ plot_output = gr.Plot(
288
+ label="Generated Visualization",
289
+ visible=False
290
+ )
291
+ image_output = gr.Image(
292
+ label="Generated Plot",
293
+ type="filepath",
294
+ visible=False
295
+ )
296
+
297
+ # Handle form submission
298
+ inputs = [user_query_input, model_dropdown, temperature]
299
+ outputs = [
300
+ final_answer_chat,
301
+ agent_thoughts,
302
+ generated_code,
303
+ execution_output,
304
+ plot_output,
305
+ image_output
306
+ ]
307
+
308
+ submit_btn.click(
309
+ fn=run_crewai_process,
310
+ inputs=inputs,
311
+ outputs=outputs,
312
+ api_name="analyze"
313
+ )
314
+
315
+ return interface
316
+
317
+
318
+ def main():
319
+ """Run the Gradio interface."""
320
+ interface = create_interface()
321
+ interface.launch(share=False, server_name="0.0.0.0", server_port=7860)
322
+
323
+
324
+ if __name__ == "__main__":
325
+ main()