File size: 4,718 Bytes
1d1e331
 
 
 
 
 
 
 
 
 
 
996504b
 
 
 
1d1e331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
996504b
9d6b671
 
 
 
 
 
 
 
 
 
 
 
 
996504b
 
 
 
 
9d6b671
996504b
 
1d1e331
996504b
 
 
 
 
 
 
 
1d1e331
996504b
 
1d1e331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
996504b
1d1e331
996504b
1d1e331
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
import os
import streamlit as st
import pandas as pd
import openai
import torch
import matplotlib.pyplot as plt
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from dotenv import load_dotenv
import anthropic
import ast
import re
from langchain.agents import AgentType, initialize_agent
from langchain.tools import Tool
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory

# Load environment variables
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY")

# UI Styling
st.markdown(
    """
    <style>
    .stButton button {
        background-color: #1F6FEB;
        color: white;
        border-radius: 8px;
        border: none;
        padding: 10px 20px;
        font-weight: bold;
    }
    .stButton button:hover {
        background-color: #1A4FC5;
    }
    .stTextInput > div > input {
        border: 1px solid #30363D;
        background-color: #161B22;
        color: #C9D1D9;
        border-radius: 6px;
        padding: 10px;
    }
    .stFileUploader > div {
        border: 2px dashed #30363D;
        background-color: #161B22;
        color: #C9D1D9;
        border-radius: 6px;
        padding: 10px;
    }
    .response-box {
        background-color: #161B22;
        padding: 10px;
        border-radius: 6px;
        margin-bottom: 10px;
        color: #FFFFFF;
    }
    </style>
    """,
    unsafe_allow_html=True
)

st.title("Excel Q&A Chatbot πŸ“Š")

# Initialize LangChain Agent with Multi-step Reasoning and Memory
def safe_execute_query(query):
    """Safely executes Pandas operations without using eval."""
    try:
        # Ensure the query is a valid Pandas expression
        parsed_query = re.sub(r"[^a-zA-Z0-9_().,'\[\] ]", "", query.strip())
        
        if "df.query(" in parsed_query or "df[" in parsed_query:
            return eval(parsed_query, {"df": df, "pd": pd})  # Safe execution of query-based operations
        else:
            return "Unsupported query type. Please refine your question."
    except Exception as e:
        return f"Error executing query: {str(e)}"

def execute_query(query):
    memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
    
    tool = Tool(
        name="Pandas Query Executor",
        func=safe_execute_query,
        description="Executes Pandas-based queries on uploaded data"
    )

    agent = initialize_agent(
        tools=[tool],
        llm=ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0),
        agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
        memory=memory,
        verbose=True
    )
    return agent.run(query)

# Model Selection
model_choice = st.selectbox("Select LLM Model", ["OpenAI GPT-3.5", "Claude 3 Haiku", "Mistral-7B"])

# File Upload with validation
uploaded_file = st.file_uploader("Upload a file", type=["csv", "xlsx", "xls", "json", "tsv"])

if uploaded_file is not None:
    file_extension = uploaded_file.name.split(".")[-1].lower()
    
    try:
        if file_extension == "csv":
            df = pd.read_csv(uploaded_file)
        elif file_extension in ["xlsx", "xls"]:
            df = pd.read_excel(uploaded_file, engine="openpyxl")
        elif file_extension == "json":
            df = pd.read_json(uploaded_file)
        elif file_extension == "tsv":
            df = pd.read_csv(uploaded_file, sep="\t")
        else:
            st.error("Unsupported file format. Please upload a CSV, Excel, JSON, or TSV file.")
            st.stop()
        
        st.write("### Preview of Data:")
        st.write(df.head())
        
        # Extract metadata
        column_names = df.columns.tolist()
        data_types = df.dtypes.apply(lambda x: x.name).to_dict()
        missing_values = df.isnull().sum().to_dict()
        
        # Display metadata
        st.write("### Column Details:")
        st.write(pd.DataFrame({"Column": column_names, "Type": data_types.values(), "Missing Values": missing_values.values()}))
        
    except Exception as e:
        st.error(f"Error loading file: {str(e)}")
        st.stop()
    
    # User Query
    query = st.text_input("Ask a question about this data:")
    
    if st.button("Submit Query"):
        if query:
            try:
                exec_result = execute_query(query)
                st.write("### Result:")
                st.write(exec_result)
            except Exception as e:
                st.error(f"Error executing query: {str(e)}")
    
    # Memory for context retention
    if "query_history" not in st.session_state:
        st.session_state.query_history = []
    st.session_state.query_history.append(query)