Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import streamlit as st
|
4 |
+
import nltk
|
5 |
+
|
6 |
+
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
7 |
+
from pinecone import Pinecone
|
8 |
+
from pinecone_text.sparse import BM25Encoder
|
9 |
+
from langchain_community.retrievers import PineconeHybridSearchRetriever
|
10 |
+
from langchain.tools.retriever import create_retriever_tool
|
11 |
+
from langgraph.prebuilt import create_react_agent
|
12 |
+
|
13 |
+
# Download the NLTK tokenizer if not already downloaded
|
14 |
+
nltk.download('punkt_tab')
|
15 |
+
|
16 |
+
|
17 |
+
@st.cache_resource
|
18 |
+
def init_agent():
|
19 |
+
# Retrieve API keys from environment variables
|
20 |
+
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
|
21 |
+
PINE_API_KEY = os.environ.get("PINE_API_KEY")
|
22 |
+
if not OPENAI_API_KEY or not PINE_API_KEY:
|
23 |
+
raise ValueError("Please set the OPENAI_API_KEY and PINE_API_KEY environment variables.")
|
24 |
+
|
25 |
+
# --- Embeddings ---
|
26 |
+
embed = OpenAIEmbeddings(
|
27 |
+
model='text-embedding-3-small',
|
28 |
+
openai_api_key=OPENAI_API_KEY,
|
29 |
+
dimensions=768
|
30 |
+
)
|
31 |
+
|
32 |
+
# --- Pinecone Setup ---
|
33 |
+
index_name = 'autogen'
|
34 |
+
namespace_name = 'langgraph-main'
|
35 |
+
pc = Pinecone(api_key=PINE_API_KEY)
|
36 |
+
index = pc.Index(index_name)
|
37 |
+
# Allow a moment for the index to connect
|
38 |
+
time.sleep(1)
|
39 |
+
index.describe_index_stats()
|
40 |
+
|
41 |
+
# --- BM25 Sparse Encoder ---
|
42 |
+
bm25_encoder = BM25Encoder().default()
|
43 |
+
|
44 |
+
# --- Create a Hybrid Retriever ---
|
45 |
+
retriever = PineconeHybridSearchRetriever(
|
46 |
+
embeddings=embed,
|
47 |
+
sparse_encoder=bm25_encoder,
|
48 |
+
index=index,
|
49 |
+
namespace=namespace_name,
|
50 |
+
top_k=4
|
51 |
+
)
|
52 |
+
|
53 |
+
# --- Chat Model ---
|
54 |
+
model = ChatOpenAI(model_name="o3-mini-2025-01-31", openai_api_key=OPENAI_API_KEY)
|
55 |
+
|
56 |
+
# --- Create the Retriever Tool ---
|
57 |
+
retriever_tool = create_retriever_tool(
|
58 |
+
retriever,
|
59 |
+
"retrieve_context",
|
60 |
+
"Search and return information form Autogen's codebase and documentation",
|
61 |
+
)
|
62 |
+
tools = [retriever_tool]
|
63 |
+
|
64 |
+
# --- System Prompt ---
|
65 |
+
prompt = """
|
66 |
+
You are an AI coding assistant specializing in the LangGraph framework. Your primary role is to help users build, code, and debug their LangGraph graphs for multi-agent AI applications. Focus on guiding users through the actual coding and implementation of LangGraph graphs rather than merely answering theoretical questions. Your responses should empower users to write, test, and optimize their LangGraph code by leveraging documentation, source code, and practical coding examples.
|
67 |
+
|
68 |
+
You have access to a powerful tool called `retriever_tool` that functions as a search engine for LangGraph’s resources. This tool is essential for retrieving up-to-date code examples, API references, and implementation details to ensure that your responses reflect the latest details from LangGraph. Use it extensively to fetch relevant coding resources when necessary.
|
69 |
+
|
70 |
+
When using the `retriever_tool`, formulate your search queries with these key terms:
|
71 |
+
- **Graph coding**: for guidance on building and structuring LangGraph graphs.
|
72 |
+
- **Nodes implementation**: for creating, managing, and customizing workflow nodes in code.
|
73 |
+
- **Multi-agent graph workflows**: for coding interactions and collaborations among agents.
|
74 |
+
- **API Code Examples**: for detailed usage of classes, methods, and functions with code snippets.
|
75 |
+
- **Graph Execution**: for instructions on running LangGraph applications and troubleshooting code execution issues.
|
76 |
+
- **Extensions and Integrations**: for integrating third-party services or custom tools in your code.
|
77 |
+
- **LangGraph Studio Coding**: for coding best practices while using the graphical interface and prototyping.
|
78 |
+
- **Core API Code**: for understanding and coding low-level components and event-driven architectures.
|
79 |
+
- **Tool Integration in Code**: for incorporating external functionalities into your LangGraph graphs.
|
80 |
+
- **Configuration via Code**: for customizing the framework’s behavior programmatically.
|
81 |
+
- **Code Migration**: for instructions on upgrading LangGraph versions in your codebase.
|
82 |
+
- **Practical Coding Examples**: for real-world code samples and demonstrations.
|
83 |
+
|
84 |
+
*Note:* Append “example” to any key term (e.g., “Nodes implementation example”) to search for illustrative coding samples. Use your expertise in software engineering and AI agent development to craft additional relevant queries as needed.
|
85 |
+
|
86 |
+
When responding to user queries:
|
87 |
+
1. **Focus on coding**: Prioritize providing code examples, step-by-step coding instructions, and debugging tips related to building LangGraph graphs.
|
88 |
+
2. **Begin** by understanding the specific coding challenge or feature the user wants to implement.
|
89 |
+
3. **Search** for relevant coding examples or API details using the `retriever_tool` if necessary.
|
90 |
+
4. **Provide** clear, concise, and accurate code snippets, including explanations for each part of the code.
|
91 |
+
5. **Explain** technical concepts in a way that is accessible to developers who are implementing LangGraph graphs.
|
92 |
+
6. **Suggest** best practices, testing strategies, and debugging techniques for the user’s code.
|
93 |
+
|
94 |
+
**Response Format:**
|
95 |
+
- Start with a brief introduction that acknowledges the user’s coding challenge or request.
|
96 |
+
- Present the main coding solution or explanation with well-commented code snippets.
|
97 |
+
- Include any relevant code samples or API usage examples directly in your response.
|
98 |
+
- Offer additional context, tips, or advanced techniques related to coding LangGraph graphs.
|
99 |
+
- Conclude with recommendations for next steps, additional topics, or further code refinement tips.
|
100 |
+
|
101 |
+
If a user’s query is unclear or falls outside the direct scope of coding LangGraph graphs, politely ask for clarification or guide them towards more appropriate resources.
|
102 |
+
|
103 |
+
Always use the `retriever_tool` frequently—even for queries you are confident about—since LangGraph’s coding resources are continuously updated.
|
104 |
+
|
105 |
+
Now, please help the user with their coding query for LangGraph:
|
106 |
+
"""
|
107 |
+
|
108 |
+
# --- Create the React Agent ---
|
109 |
+
graph = create_react_agent(model, tools=tools, messages_modifier=prompt)
|
110 |
+
return graph
|
111 |
+
|
112 |
+
|
113 |
+
# Initialize the agent (cached for the session)
|
114 |
+
graph = init_agent()
|
115 |
+
|
116 |
+
##########################################
|
117 |
+
# Streamlit Chat App UI
|
118 |
+
##########################################
|
119 |
+
|
120 |
+
st.title("LangGraph Coding Chat Assistant")
|
121 |
+
|
122 |
+
# Initialize conversation history in session state
|
123 |
+
if "chat_history" not in st.session_state:
|
124 |
+
st.session_state.chat_history = [] # Each entry is a tuple: (role, message)
|
125 |
+
|
126 |
+
# Function to display the conversation
|
127 |
+
def display_conversation():
|
128 |
+
for role, message in st.session_state.chat_history:
|
129 |
+
if role == "user":
|
130 |
+
st.markdown(f"**You:** {message}")
|
131 |
+
else:
|
132 |
+
st.markdown(f"**Assistant:** {message}")
|
133 |
+
|
134 |
+
# Display the existing conversation
|
135 |
+
display_conversation()
|
136 |
+
|
137 |
+
# --- Chat Input Form ---
|
138 |
+
with st.form("chat_form", clear_on_submit=True):
|
139 |
+
user_input = st.text_input("Enter your message:")
|
140 |
+
submitted = st.form_submit_button("Send")
|
141 |
+
if submitted and user_input:
|
142 |
+
st.session_state.chat_history.append(("user", user_input))
|
143 |
+
st.experimental_rerun()
|
144 |
+
|
145 |
+
# --- Generate Assistant Response ---
|
146 |
+
if st.session_state.chat_history and st.session_state.chat_history[-1][0] == "user":
|
147 |
+
inputs = {"messages": st.session_state.chat_history}
|
148 |
+
|
149 |
+
# Placeholder for streaming response updates
|
150 |
+
response_placeholder = st.empty()
|
151 |
+
assistant_message = ""
|
152 |
+
|
153 |
+
# Stream the agent's response in real time
|
154 |
+
for s in graph.stream(inputs, stream_mode="values"):
|
155 |
+
# Extract the last message from the messages list
|
156 |
+
message = s["messages"][-1]
|
157 |
+
if isinstance(message, tuple):
|
158 |
+
# If the message is a tuple like ("assistant", text)
|
159 |
+
role, text = message
|
160 |
+
else:
|
161 |
+
# Otherwise, assume it has a 'content' attribute.
|
162 |
+
text = message.content
|
163 |
+
assistant_message += text
|
164 |
+
response_placeholder.markdown(f"**Assistant:** {assistant_message}")
|
165 |
+
|
166 |
+
# Append the full response to the chat history once complete
|
167 |
+
st.session_state.chat_history.append(("assistant", assistant_message))
|
168 |
+
st.experimental_rerun()
|