Spaces:
Sleeping
Sleeping
working locally
Browse files- .chainlit/config.toml +139 -0
- .gitignore +92 -0
- .python-version +1 -0
- Dockerfile +70 -0
- Introduction_to_LangGraph_for_Agents_Assignment_Version.ipynb +0 -0
- PROJECT_README.md +75 -0
- README.md +73 -0
- app.py +242 -0
- chainlit.md +46 -0
- pyproject.toml +30 -0
- run.py +78 -0
- setup_and_run.sh +41 -0
- src/__init__.py +3 -0
- src/research_agent/__init__.py +3 -0
- src/research_agent/agent.py +131 -0
- src/research_agent/tools.py +80 -0
- uv.lock +0 -0
.chainlit/config.toml
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
# Whether to enable telemetry (default: true). No personal data is collected.
|
3 |
+
enable_telemetry = true
|
4 |
+
|
5 |
+
|
6 |
+
# List of environment variables to be provided by each user to use the app.
|
7 |
+
user_env = []
|
8 |
+
|
9 |
+
# Duration (in seconds) during which the session is saved when the connection is lost
|
10 |
+
session_timeout = 3600
|
11 |
+
|
12 |
+
# Duration (in seconds) of the user session expiry
|
13 |
+
user_session_timeout = 1296000 # 15 days
|
14 |
+
|
15 |
+
# Enable third parties caching (e.g., LangChain cache)
|
16 |
+
cache = false
|
17 |
+
|
18 |
+
# Authorized origins
|
19 |
+
allow_origins = ["*"]
|
20 |
+
|
21 |
+
[features]
|
22 |
+
# Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
|
23 |
+
unsafe_allow_html = false
|
24 |
+
|
25 |
+
# Process and display mathematical expressions. This can clash with "$" characters in messages.
|
26 |
+
latex = false
|
27 |
+
|
28 |
+
# Autoscroll new user messages at the top of the window
|
29 |
+
user_message_autoscroll = true
|
30 |
+
|
31 |
+
# Automatically tag threads with the current chat profile (if a chat profile is used)
|
32 |
+
auto_tag_thread = true
|
33 |
+
|
34 |
+
# Allow users to edit their own messages
|
35 |
+
edit_message = true
|
36 |
+
|
37 |
+
# Authorize users to spontaneously upload files with messages
|
38 |
+
[features.spontaneous_file_upload]
|
39 |
+
enabled = true
|
40 |
+
# Define accepted file types using MIME types
|
41 |
+
# Examples:
|
42 |
+
# 1. For specific file types:
|
43 |
+
# accept = ["image/jpeg", "image/png", "application/pdf"]
|
44 |
+
# 2. For all files of certain type:
|
45 |
+
# accept = ["image/*", "audio/*", "video/*"]
|
46 |
+
# 3. For specific file extensions:
|
47 |
+
# accept = { "application/octet-stream" = [".xyz", ".pdb"] }
|
48 |
+
# Note: Using "*/*" is not recommended as it may cause browser warnings
|
49 |
+
accept = ["*/*"]
|
50 |
+
max_files = 20
|
51 |
+
max_size_mb = 500
|
52 |
+
|
53 |
+
[features.audio]
|
54 |
+
# Sample rate of the audio
|
55 |
+
sample_rate = 24000
|
56 |
+
|
57 |
+
[features.mcp.sse]
|
58 |
+
enabled = true
|
59 |
+
|
60 |
+
[features.mcp.stdio]
|
61 |
+
enabled = true
|
62 |
+
# Only the executables in the allow list can be used for MCP stdio server.
|
63 |
+
# Only need the base name of the executable, e.g. "npx", not "/usr/bin/npx".
|
64 |
+
# Please don't comment this line for now, we need it to parse the executable name.
|
65 |
+
allowed_executables = [ "npx", "uvx" ]
|
66 |
+
|
67 |
+
[UI]
|
68 |
+
# Name of the app and chatbot.
|
69 |
+
name = "Research Assistant"
|
70 |
+
|
71 |
+
# Description of the app and chatbot. This is used for HTML metadata.
|
72 |
+
description = "Research Assistant powered by LangGraph and LangChain"
|
73 |
+
|
74 |
+
# The default value for the expand messages settings.
|
75 |
+
default_expand_messages = true
|
76 |
+
|
77 |
+
# Hide the chain of thought details from the user.
|
78 |
+
hide_cot = false
|
79 |
+
|
80 |
+
# Link to your github repo. This will add a github button in the UI's header.
|
81 |
+
github = ""
|
82 |
+
|
83 |
+
# Override default MUI light theme. (Ref: https://mui.com/material-ui/customization/default-theme/)
|
84 |
+
[UI.theme.light]
|
85 |
+
background = "#FAFAFA"
|
86 |
+
paper = "#FFFFFF"
|
87 |
+
|
88 |
+
# Override default MUI dark theme. (Ref: https://mui.com/material-ui/customization/default-theme/)
|
89 |
+
[UI.theme.dark]
|
90 |
+
background = "#1F1F1F"
|
91 |
+
paper = "#0F0F0F"
|
92 |
+
|
93 |
+
# default_theme = "dark"
|
94 |
+
|
95 |
+
# layout = "wide"
|
96 |
+
|
97 |
+
# default_sidebar_state = "open"
|
98 |
+
|
99 |
+
# Chain of Thought (CoT) display mode. Can be "hidden", "tool_call" or "full".
|
100 |
+
cot = "full"
|
101 |
+
|
102 |
+
# Specify a CSS file that can be used to customize the user interface.
|
103 |
+
# The CSS file can be served from the public directory or via an external link.
|
104 |
+
# custom_css = "/public/test.css"
|
105 |
+
|
106 |
+
# Specify additional attributes for a custom CSS file
|
107 |
+
# custom_css_attributes = "media=\"print\""
|
108 |
+
|
109 |
+
# Specify a JavaScript file that can be used to customize the user interface.
|
110 |
+
# The JavaScript file can be served from the public directory.
|
111 |
+
# custom_js = "/public/test.js"
|
112 |
+
|
113 |
+
# Specify additional attributes for custom JS file
|
114 |
+
# custom_js_attributes = "async type = \"module\""
|
115 |
+
|
116 |
+
# Custom login page image, relative to public directory or external URL
|
117 |
+
# login_page_image = "/public/custom-background.jpg"
|
118 |
+
|
119 |
+
# Custom login page image filter (Tailwind internal filters, no dark/light variants)
|
120 |
+
# login_page_image_filter = "brightness-50 grayscale"
|
121 |
+
# login_page_image_dark_filter = "contrast-200 blur-sm"
|
122 |
+
|
123 |
+
# Specify a custom meta image url.
|
124 |
+
# custom_meta_image_url = "https://chainlit-cloud.s3.eu-west-3.amazonaws.com/logo/chainlit_banner.png"
|
125 |
+
|
126 |
+
# Specify a custom build directory for the frontend.
|
127 |
+
# This can be used to customize the frontend code.
|
128 |
+
# Be careful: If this is a relative path, it should not start with a slash.
|
129 |
+
# custom_build = "./public/build"
|
130 |
+
|
131 |
+
# Specify optional one or more custom links in the header.
|
132 |
+
# [[UI.header_links]]
|
133 |
+
# name = "Issues"
|
134 |
+
# display_name = "Report Issue"
|
135 |
+
# icon_url = "https://avatars.githubusercontent.com/u/128686189?s=200&v=4"
|
136 |
+
# url = "https://github.com/Chainlit/chainlit/issues"
|
137 |
+
|
138 |
+
[meta]
|
139 |
+
generated_by = "0.7.700"
|
.gitignore
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
*.so
|
6 |
+
.Python
|
7 |
+
build/
|
8 |
+
develop-eggs/
|
9 |
+
dist/
|
10 |
+
downloads/
|
11 |
+
eggs/
|
12 |
+
.eggs/
|
13 |
+
lib/
|
14 |
+
lib64/
|
15 |
+
parts/
|
16 |
+
sdist/
|
17 |
+
var/
|
18 |
+
wheels/
|
19 |
+
*.egg-info/
|
20 |
+
.installed.cfg
|
21 |
+
*.egg
|
22 |
+
MANIFEST
|
23 |
+
|
24 |
+
# Python virtual environments
|
25 |
+
.env
|
26 |
+
.venv
|
27 |
+
env/
|
28 |
+
venv/
|
29 |
+
ENV/
|
30 |
+
env.bak/
|
31 |
+
venv.bak/
|
32 |
+
|
33 |
+
# Project specific
|
34 |
+
*.egg-info/
|
35 |
+
src/research_agent_langgraph.egg-info/
|
36 |
+
.files/
|
37 |
+
|
38 |
+
# Distribution / packaging
|
39 |
+
.Python
|
40 |
+
env/
|
41 |
+
build/
|
42 |
+
develop-eggs/
|
43 |
+
dist/
|
44 |
+
downloads/
|
45 |
+
eggs/
|
46 |
+
.eggs/
|
47 |
+
lib/
|
48 |
+
lib64/
|
49 |
+
parts/
|
50 |
+
sdist/
|
51 |
+
var/
|
52 |
+
wheels/
|
53 |
+
*.egg-info/
|
54 |
+
.installed.cfg
|
55 |
+
*.egg
|
56 |
+
|
57 |
+
# PyCharm
|
58 |
+
.idea/
|
59 |
+
|
60 |
+
# VS Code
|
61 |
+
.vscode/
|
62 |
+
*.code-workspace
|
63 |
+
|
64 |
+
# Jupyter Notebook
|
65 |
+
.ipynb_checkpoints
|
66 |
+
|
67 |
+
# Environment variables
|
68 |
+
.env
|
69 |
+
.env.local
|
70 |
+
.env.development.local
|
71 |
+
.env.test.local
|
72 |
+
.env.production.local
|
73 |
+
|
74 |
+
# API keys
|
75 |
+
*.pem
|
76 |
+
|
77 |
+
# macOS
|
78 |
+
.DS_Store
|
79 |
+
|
80 |
+
# Logs
|
81 |
+
logs
|
82 |
+
*.log
|
83 |
+
npm-debug.log*
|
84 |
+
yarn-debug.log*
|
85 |
+
yarn-error.log*
|
86 |
+
|
87 |
+
# Chainlit - ignore everything in .chainlit except config.toml
|
88 |
+
.chainlit/*
|
89 |
+
!.chainlit/config.toml
|
90 |
+
|
91 |
+
# Local files
|
92 |
+
*.local
|
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.13
|
Dockerfile
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Get a distribution that has uv already installed
|
2 |
+
FROM ghcr.io/astral-sh/uv:python3.10-bookworm-slim
|
3 |
+
|
4 |
+
# Add Rust compiler installation for dependencies that might need it
|
5 |
+
USER root
|
6 |
+
RUN apt-get update && apt-get install -y \
|
7 |
+
curl \
|
8 |
+
build-essential \
|
9 |
+
git \
|
10 |
+
&& rm -rf /var/lib/apt/lists/*
|
11 |
+
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
12 |
+
ENV PATH="/root/.cargo/bin:${PATH}"
|
13 |
+
|
14 |
+
# Add user - this is the user that will run the app
|
15 |
+
RUN useradd -m -u 1000 user
|
16 |
+
USER user
|
17 |
+
|
18 |
+
# Set up Rust for the user
|
19 |
+
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
20 |
+
ENV PATH="/home/user/.cargo/bin:${PATH}"
|
21 |
+
|
22 |
+
# Set the home directory and path
|
23 |
+
ENV HOME=/home/user \
|
24 |
+
PATH=/home/user/.local/bin:$PATH
|
25 |
+
|
26 |
+
# Set Python environment variables
|
27 |
+
ENV PYTHONDONTWRITEBYTECODE=1 \
|
28 |
+
PYTHONUNBUFFERED=1 \
|
29 |
+
PYTHONPATH=/home/user/app \
|
30 |
+
UVICORN_WS_PROTOCOL=websockets
|
31 |
+
|
32 |
+
# Set the working directory
|
33 |
+
WORKDIR $HOME/app
|
34 |
+
|
35 |
+
# First copy all files needed for dependency installation
|
36 |
+
COPY --chown=user pyproject.toml $HOME/app/
|
37 |
+
COPY --chown=user README.md $HOME/app/
|
38 |
+
# Create src directory
|
39 |
+
RUN mkdir -p $HOME/app/src
|
40 |
+
|
41 |
+
# Copy dependencies first
|
42 |
+
COPY --chown=user requirements.txt* $HOME/app/
|
43 |
+
|
44 |
+
|
45 |
+
# Now copy the rest of the application (improves Docker build caching)
|
46 |
+
COPY --chown=user . $HOME/app/
|
47 |
+
|
48 |
+
# Create required directories for Chainlit
|
49 |
+
RUN mkdir -p $HOME/app/.chainlit
|
50 |
+
RUN mkdir -p $HOME/.cache
|
51 |
+
|
52 |
+
# Install application with uv sync again after all files are present
|
53 |
+
RUN uv sync
|
54 |
+
|
55 |
+
# Create .env file from environment variables at runtime
|
56 |
+
RUN echo "#!/bin/bash" > $HOME/app/entrypoint.sh && \
|
57 |
+
echo "# Create .env file from environment variables" >> $HOME/app/entrypoint.sh && \
|
58 |
+
echo "if [ ! -f .env ]; then" >> $HOME/app/entrypoint.sh && \
|
59 |
+
echo ' echo "OPENAI_API_KEY=$OPENAI_API_KEY" > .env' >> $HOME/app/entrypoint.sh && \
|
60 |
+
echo ' echo "TAVILY_API_KEY=$TAVILY_API_KEY" >> .env' >> $HOME/app/entrypoint.sh && \
|
61 |
+
echo "fi" >> $HOME/app/entrypoint.sh && \
|
62 |
+
echo "# Run the application" >> $HOME/app/entrypoint.sh && \
|
63 |
+
echo 'uv run chainlit run app.py --host "0.0.0.0" --port "7860"' >> $HOME/app/entrypoint.sh && \
|
64 |
+
chmod +x $HOME/app/entrypoint.sh
|
65 |
+
|
66 |
+
# Expose the port
|
67 |
+
EXPOSE 7860
|
68 |
+
|
69 |
+
# Run the app using the entrypoint script
|
70 |
+
CMD ["./entrypoint.sh"]
|
Introduction_to_LangGraph_for_Agents_Assignment_Version.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
PROJECT_README.md
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Research Agent with LangGraph
|
2 |
+
|
3 |
+
This project implements a research agent using LangGraph and LangChain. The agent is capable of researching any domain using three powerful tools and can be deployed with a Chainlit frontend.
|
4 |
+
|
5 |
+
## Features
|
6 |
+
|
7 |
+
- Domain-specific research assistant
|
8 |
+
- Three specialized research tools:
|
9 |
+
1. Web Search - For general queries and recent information
|
10 |
+
2. Research Paper Search - For academic papers and scientific information
|
11 |
+
3. Data Analysis - For analyzing data provided by the user
|
12 |
+
- Interactive Chainlit web interface
|
13 |
+
- Configurable research domain
|
14 |
+
|
15 |
+
## Architecture
|
16 |
+
|
17 |
+
This project uses:
|
18 |
+
- **LangGraph**: For creating the agent's workflow graph with cyclic behavior
|
19 |
+
- **LangChain**: For tool integration and language model interactions
|
20 |
+
- **Chainlit**: For the web-based frontend
|
21 |
+
- **OpenAI GPT-4o**: As the underlying language model
|
22 |
+
|
23 |
+
## Installation
|
24 |
+
|
25 |
+
1. Clone this repository
|
26 |
+
2. Install dependencies using UV (recommended) or pip:
|
27 |
+
|
28 |
+
```bash
|
29 |
+
# Using UV
|
30 |
+
uv pip install -r requirements.txt
|
31 |
+
```
|
32 |
+
|
33 |
+
## Configuration
|
34 |
+
|
35 |
+
1. Create a `.env` file in the project root with the following content:
|
36 |
+
|
37 |
+
```
|
38 |
+
OPENAI_API_KEY=your_openai_api_key
|
39 |
+
TAVILY_API_KEY=your_tavily_api_key
|
40 |
+
```
|
41 |
+
|
42 |
+
## Usage
|
43 |
+
|
44 |
+
To run the application:
|
45 |
+
|
46 |
+
```bash
|
47 |
+
chainlit run app.py -w
|
48 |
+
```
|
49 |
+
|
50 |
+
This will start the Chainlit server and open a web browser with the interface.
|
51 |
+
|
52 |
+
## File Structure
|
53 |
+
|
54 |
+
- `agent.py`: Contains the LangGraph implementation of the research agent
|
55 |
+
- `tools.py`: Defines the three research tools
|
56 |
+
- `app.py`: Chainlit web interface
|
57 |
+
- `chainlit.md`: Welcome page content for Chainlit
|
58 |
+
- `requirements.txt`: Dependencies for the project
|
59 |
+
|
60 |
+
## Customization
|
61 |
+
|
62 |
+
You can customize the agent by:
|
63 |
+
1. Modifying the `DOMAIN` variable in `app.py`
|
64 |
+
2. Adding new tools in `tools.py`
|
65 |
+
3. Changing the system prompt in `agent.py`
|
66 |
+
|
67 |
+
## Example Queries
|
68 |
+
|
69 |
+
- "What are the latest developments in quantum computing?"
|
70 |
+
- "Find research papers about climate change impacts on agriculture"
|
71 |
+
- "Analyze this data: [paste JSON or CSV]"
|
72 |
+
|
73 |
+
## License
|
74 |
+
|
75 |
+
MIT
|
README.md
CHANGED
@@ -10,3 +10,76 @@ short_description: agent tool chainlit demo
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
13 |
+
|
14 |
+
<p align = "center" draggable="false" ><img src="https://github.com/AI-Maker-Space/LLM-Dev-101/assets/37101144/d1343317-fa2f-41e1-8af1-1dbb18399719"
|
15 |
+
width="200px"
|
16 |
+
height="auto"/>
|
17 |
+
</p>
|
18 |
+
|
19 |
+
## <h1 align="center" id="heading">Session 5: Our First Agent with LangGraph</h1>
|
20 |
+
|
21 |
+
| 🤓 Pre-work | 📰 Session Sheet | ⏺️ Recording | 🖼️ Slides | 👨💻 Repo | 📝 Homework | 📁 Feedback |
|
22 |
+
|:-----------------|:-----------------|:-----------------|:-----------------|:-----------------|:-----------------|:-----------------|
|
23 |
+
| [Session 5: Pre-Work](https://www.notion.so/Session-5-Agents-with-LangGraph-1c8cd547af3d81068e44d4e4b901a9a8?pvs=4#1c8cd547af3d81578bedd1d2b11ab888)| [Session 5: Agents with LangGraph](https://www.notion.so/Session-5-Agents-with-LangGraph-1c8cd547af3d81068e44d4e4b901a9a8) | [Recording](https://us02web.zoom.us/rec/play/YvHRbOKYx8QDcTMwli7QjH-npGauB8wkk2gcN7ax7TV_oxQZbPRPdyxUebtH91uVQ8lRgCbP6u0iicmP.Vvroz4VC2XA7DILn?accessLevel=meeting&canPlayFromShare=true&from=my_recording&continueMode=true&componentName=rec-play&originRequestUrl=https%3A%2F%2Fus02web.zoom.us%2Frec%2Fshare%2F-fJk79tgwkAw3gJS0V69OeDvOUJ0EUE0qgOFey9-1uJPnL6oNT6vLmVygOWHl-JV.mYe1JWztYuHqsYWx) (ck*A3y%t) | [Session 5: Agents](https://www.canva.com/design/DAGjaRyDT1Y/Sy7YaHwHOc19gomlhpq7hw/edit?utm_content=DAGjaRyDT1Y&utm_campaign=designshare&utm_medium=link2&utm_source=sharebutton)| You Are Here!| [Session 5 Assignment: Agents with LangGraph](https://forms.gle/bA9BN2bgNLMNB9HXA)| [AIE6 Feedback 4/15](https://forms.gle/Fgb5K4PDKokvtX787)
|
24 |
+
|
25 |
+
|
26 |
+
In today's assignment, we'll be creating an Agentic LangChain RAG Application.
|
27 |
+
|
28 |
+
- 🤝 Breakout Room #1:
|
29 |
+
1. Install required libraries
|
30 |
+
2. Set Environment Variables
|
31 |
+
3. Creating our Tool Belt
|
32 |
+
4. Creating Our State
|
33 |
+
5. Creating and Compiling A Graph!
|
34 |
+
|
35 |
+
- 🤝 Breakout Room #2:
|
36 |
+
- Part 1: LangSmith Evaluator:
|
37 |
+
1. Creating an Evaluation Dataset
|
38 |
+
2. Adding Evaluators
|
39 |
+
- Part 2:
|
40 |
+
3. Adding Helpfulness Check and "Loop" Limits
|
41 |
+
4. LangGraph for the "Patterns" of GenAI
|
42 |
+
|
43 |
+
### Advanced Build
|
44 |
+
|
45 |
+
You are tasked to create an agent with 3 tools that can research a specific domain of your choice.
|
46 |
+
|
47 |
+
You must deploy the resultant agent with a Chainlit (or Custom) frontend.
|
48 |
+
|
49 |
+
## Ship 🚢
|
50 |
+
|
51 |
+
The completed notebook!
|
52 |
+
|
53 |
+
### Deliverables
|
54 |
+
|
55 |
+
- A short Loom of the notebook, and a 1min. walkthrough of the application in full
|
56 |
+
|
57 |
+
## Share 🚀
|
58 |
+
|
59 |
+
Make a social media post about your final application!
|
60 |
+
|
61 |
+
### Deliverables
|
62 |
+
|
63 |
+
- Make a post on any social media platform about what you built!
|
64 |
+
|
65 |
+
Here's a template to get you started:
|
66 |
+
|
67 |
+
```
|
68 |
+
🚀 Exciting News! 🚀
|
69 |
+
|
70 |
+
I am thrilled to announce that I have just built and shipped an Agentic Retrieval Augmented Generation Application with LangChain! 🎉🤖
|
71 |
+
|
72 |
+
🔍 Three Key Takeaways:
|
73 |
+
1️⃣
|
74 |
+
2️⃣
|
75 |
+
3️⃣
|
76 |
+
|
77 |
+
Let's continue pushing the boundaries of what's possible in the world of AI and question-answering. Here's to many more innovations! 🚀
|
78 |
+
Shout out to @AIMakerspace !
|
79 |
+
|
80 |
+
#LangChain #QuestionAnswering #RetrievalAugmented #Innovation #AI #TechMilestone
|
81 |
+
|
82 |
+
Feel free to reach out if you're curious or would like to collaborate on similar projects! 🤝🔥
|
83 |
+
```
|
84 |
+
|
85 |
+
> #### NOTE: PLEASE SHUTDOWN YOUR INSTANCES WHEN YOU HAVE COMPLETED THE ASSIGNMENT TO PREVENT UNESSECARY CHARGES.
|
app.py
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import chainlit as cl
|
4 |
+
from src.research_agent import run_agent, create_system_message
|
5 |
+
from langchain_core.messages import (
|
6 |
+
AIMessage,
|
7 |
+
HumanMessage,
|
8 |
+
SystemMessage,
|
9 |
+
ToolMessage,
|
10 |
+
)
|
11 |
+
|
12 |
+
# Set domain as global variable
|
13 |
+
DOMAIN = "General Research"
|
14 |
+
DEBUG_MODE = True # Set to True to show detailed tool usage
|
15 |
+
|
16 |
+
# List of available domains
|
17 |
+
AVAILABLE_DOMAINS = [
|
18 |
+
"General Research",
|
19 |
+
"Computer Science",
|
20 |
+
"Medicine",
|
21 |
+
"Finance",
|
22 |
+
"Climate Science",
|
23 |
+
"Artificial Intelligence",
|
24 |
+
"History",
|
25 |
+
"Psychology",
|
26 |
+
"Physics",
|
27 |
+
"Biology"
|
28 |
+
]
|
29 |
+
|
30 |
+
@cl.on_chat_start
|
31 |
+
async def on_chat_start():
|
32 |
+
"""
|
33 |
+
Initialize the chat session
|
34 |
+
"""
|
35 |
+
# Set up the session state
|
36 |
+
cl.user_session.set("messages", [create_system_message(DOMAIN)])
|
37 |
+
cl.user_session.set("debug_mode", DEBUG_MODE)
|
38 |
+
|
39 |
+
# Send a welcome message
|
40 |
+
await cl.Message(
|
41 |
+
content=f"Hello! I'm your research assistant specialized in {DOMAIN}. How can I help you?\n\n"
|
42 |
+
f"I have access to the following tools:\n"
|
43 |
+
f"1. 🔎 Web Search - For recent information and general queries\n"
|
44 |
+
f"2. 📄 Research Papers - For academic and scientific knowledge\n"
|
45 |
+
f"3. 📚 Wikipedia - For background information and factual summaries\n"
|
46 |
+
f"4. 📊 Data Analysis - For analyzing data you provide\n\n"
|
47 |
+
f"You can change the research domain by typing `/domain` followed by one of these options:\n"
|
48 |
+
f"{', '.join(AVAILABLE_DOMAINS)}\n\n"
|
49 |
+
f"You can toggle debug mode with `/debug on` or `/debug off` to see detailed tool usage.",
|
50 |
+
author="Research Assistant"
|
51 |
+
).send()
|
52 |
+
|
53 |
+
@cl.on_settings_update
|
54 |
+
async def on_settings_update(settings):
|
55 |
+
"""
|
56 |
+
Handle settings updates
|
57 |
+
"""
|
58 |
+
global DOMAIN
|
59 |
+
|
60 |
+
if "domain" in settings:
|
61 |
+
DOMAIN = settings["domain"]
|
62 |
+
# Reset messages with new domain
|
63 |
+
cl.user_session.set("messages", [create_system_message(DOMAIN)])
|
64 |
+
|
65 |
+
# Notify user of domain change
|
66 |
+
await cl.Message(
|
67 |
+
content=f"Domain changed to {DOMAIN}. My knowledge is now specialized for this domain.",
|
68 |
+
author="System"
|
69 |
+
).send()
|
70 |
+
|
71 |
+
@cl.on_message
|
72 |
+
async def on_message(message: cl.Message):
|
73 |
+
"""
|
74 |
+
Process incoming messages
|
75 |
+
"""
|
76 |
+
global DOMAIN, DEBUG_MODE
|
77 |
+
|
78 |
+
# Check for debug mode command
|
79 |
+
if message.content.startswith("/debug"):
|
80 |
+
command_parts = message.content.split()
|
81 |
+
if len(command_parts) > 1:
|
82 |
+
if command_parts[1].lower() == "on":
|
83 |
+
DEBUG_MODE = True
|
84 |
+
cl.user_session.set("debug_mode", True)
|
85 |
+
await cl.Message(
|
86 |
+
content="Debug mode turned ON. You'll see detailed tool usage information.",
|
87 |
+
author="System"
|
88 |
+
).send()
|
89 |
+
elif command_parts[1].lower() == "off":
|
90 |
+
DEBUG_MODE = False
|
91 |
+
cl.user_session.set("debug_mode", False)
|
92 |
+
await cl.Message(
|
93 |
+
content="Debug mode turned OFF.",
|
94 |
+
author="System"
|
95 |
+
).send()
|
96 |
+
else:
|
97 |
+
current_state = "ON" if DEBUG_MODE else "OFF"
|
98 |
+
await cl.Message(
|
99 |
+
content=f"Debug mode is currently {current_state}. Use `/debug on` or `/debug off` to change.",
|
100 |
+
author="System"
|
101 |
+
).send()
|
102 |
+
return
|
103 |
+
|
104 |
+
# Check for domain change command
|
105 |
+
if message.content.startswith("/domain"):
|
106 |
+
command_parts = message.content.split()
|
107 |
+
if len(command_parts) == 1:
|
108 |
+
# Display available domains
|
109 |
+
domains_list = "\n".join([f"- {domain}" for domain in AVAILABLE_DOMAINS])
|
110 |
+
await cl.Message(
|
111 |
+
content=f"Please specify a domain. Available domains:\n{domains_list}\n\nExample: `/domain Artificial Intelligence`",
|
112 |
+
author="System"
|
113 |
+
).send()
|
114 |
+
return
|
115 |
+
|
116 |
+
# Get the requested domain
|
117 |
+
requested_domain = " ".join(command_parts[1:])
|
118 |
+
|
119 |
+
# Check if it's in the available domains (case insensitive)
|
120 |
+
found_domain = None
|
121 |
+
for domain in AVAILABLE_DOMAINS:
|
122 |
+
if domain.lower() == requested_domain.lower():
|
123 |
+
found_domain = domain
|
124 |
+
break
|
125 |
+
|
126 |
+
if found_domain:
|
127 |
+
DOMAIN = found_domain
|
128 |
+
# Reset messages with new domain
|
129 |
+
cl.user_session.set("messages", [create_system_message(DOMAIN)])
|
130 |
+
|
131 |
+
# Notify user of domain change
|
132 |
+
await cl.Message(
|
133 |
+
content=f"Domain changed to {DOMAIN}. My knowledge is now specialized for this domain.",
|
134 |
+
author="Research Assistant"
|
135 |
+
).send()
|
136 |
+
else:
|
137 |
+
# Domain not found
|
138 |
+
domains_list = "\n".join([f"- {domain}" for domain in AVAILABLE_DOMAINS])
|
139 |
+
await cl.Message(
|
140 |
+
content=f"Domain '{requested_domain}' not found. Available domains:\n{domains_list}",
|
141 |
+
author="System"
|
142 |
+
).send()
|
143 |
+
return
|
144 |
+
|
145 |
+
# Get current message history and debug mode setting
|
146 |
+
messages = cl.user_session.get("messages")
|
147 |
+
debug_mode = cl.user_session.get("debug_mode", DEBUG_MODE)
|
148 |
+
|
149 |
+
# Add user message to history
|
150 |
+
user_message = HumanMessage(content=message.content)
|
151 |
+
|
152 |
+
# Create a temporary thinking message
|
153 |
+
thinking = cl.Message(content="Researching your query...", author="Research Assistant")
|
154 |
+
await thinking.send()
|
155 |
+
|
156 |
+
try:
|
157 |
+
# Call the agent
|
158 |
+
response_messages = run_agent(
|
159 |
+
user_input=message.content,
|
160 |
+
domain=DOMAIN,
|
161 |
+
messages=messages
|
162 |
+
)
|
163 |
+
|
164 |
+
# Update the thinking message
|
165 |
+
await thinking.remove()
|
166 |
+
|
167 |
+
# Track tool usage for summary
|
168 |
+
tool_usage = []
|
169 |
+
|
170 |
+
# Process and display messages
|
171 |
+
for msg in response_messages:
|
172 |
+
if not msg in messages: # Only process new messages
|
173 |
+
if isinstance(msg, AIMessage):
|
174 |
+
await cl.Message(
|
175 |
+
content=msg.content,
|
176 |
+
author="Research Assistant"
|
177 |
+
).send()
|
178 |
+
elif isinstance(msg, ToolMessage):
|
179 |
+
# Add to tool usage list
|
180 |
+
tool_usage.append(msg.name)
|
181 |
+
|
182 |
+
# Create elements for tool output
|
183 |
+
elements = []
|
184 |
+
|
185 |
+
# Format the tool output for better readability
|
186 |
+
formatted_content = msg.content
|
187 |
+
# Try to detect JSON and format it
|
188 |
+
if msg.content.strip().startswith('{') or msg.content.strip().startswith('['):
|
189 |
+
try:
|
190 |
+
import json
|
191 |
+
content_obj = json.loads(msg.content)
|
192 |
+
formatted_content = json.dumps(content_obj, indent=2)
|
193 |
+
except:
|
194 |
+
pass
|
195 |
+
|
196 |
+
# Add tool output as an element
|
197 |
+
elements.append(
|
198 |
+
cl.Text(
|
199 |
+
name=f"Tool Result: {msg.name}",
|
200 |
+
content=formatted_content,
|
201 |
+
display="inline"
|
202 |
+
)
|
203 |
+
)
|
204 |
+
|
205 |
+
# Only show detailed tool messages in debug mode
|
206 |
+
if debug_mode:
|
207 |
+
# Send tool message with more prominent styling
|
208 |
+
await cl.Message(
|
209 |
+
content=f"🔍 **TOOL CALL**: `{msg.name}`\n\nI used this tool to find information about your query.",
|
210 |
+
author="Research Assistant",
|
211 |
+
elements=elements
|
212 |
+
).send()
|
213 |
+
|
214 |
+
# Display tool usage summary if tools were used
|
215 |
+
if tool_usage and debug_mode:
|
216 |
+
tool_counts = {}
|
217 |
+
for tool in tool_usage:
|
218 |
+
if tool in tool_counts:
|
219 |
+
tool_counts[tool] += 1
|
220 |
+
else:
|
221 |
+
tool_counts[tool] = 1
|
222 |
+
|
223 |
+
summary = "📊 **RESEARCH SUMMARY**\n\nTo answer your question, I used:\n"
|
224 |
+
for tool, count in tool_counts.items():
|
225 |
+
# Add emoji based on tool type
|
226 |
+
emoji = "🔎" if tool == "web_search" else "📄" if tool == "research_paper_search" else "📚" if tool == "wikipedia_search" else "📊"
|
227 |
+
summary += f"- {emoji} `{tool}`: {count} time{'s' if count > 1 else ''}\n"
|
228 |
+
|
229 |
+
await cl.Message(
|
230 |
+
content=summary,
|
231 |
+
author="System"
|
232 |
+
).send()
|
233 |
+
|
234 |
+
# Update session with new messages
|
235 |
+
cl.user_session.set("messages", response_messages)
|
236 |
+
|
237 |
+
except Exception as e:
|
238 |
+
await thinking.remove()
|
239 |
+
await cl.Message(
|
240 |
+
content=f"Sorry, I encountered an error: {str(e)}",
|
241 |
+
author="System"
|
242 |
+
).send()
|
chainlit.md
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Research Assistant Agent
|
2 |
+
|
3 |
+
This application is a research assistant powered by LangGraph and LangChain. It can help you find information on various topics using three specialized tools:
|
4 |
+
|
5 |
+
## Available Tools
|
6 |
+
|
7 |
+
1. **Web Search**: For general queries and recent information
|
8 |
+
2. **Research Paper Search**: For academic and scientific research papers
|
9 |
+
3. **Data Analysis**: For analyzing data sets you provide in JSON or CSV format
|
10 |
+
|
11 |
+
## How to Use
|
12 |
+
|
13 |
+
1. Choose your research domain in the settings panel
|
14 |
+
2. Ask any question related to your chosen domain
|
15 |
+
3. The agent will use the appropriate tools to research and answer your question
|
16 |
+
|
17 |
+
## Example Queries
|
18 |
+
|
19 |
+
- "What are the latest developments in quantum computing?"
|
20 |
+
- "Find research papers about climate change impacts on agriculture"
|
21 |
+
- "Analyze this data: [paste JSON or CSV]"
|
22 |
+
|
23 |
+
## Tips
|
24 |
+
|
25 |
+
- Be specific in your questions for better results
|
26 |
+
- For data analysis, make sure your data is properly formatted
|
27 |
+
- Change the domain setting to get more specialized answers for your topic
|
28 |
+
|
29 |
+
---
|
30 |
+
settings:
|
31 |
+
- name: domain
|
32 |
+
title: Research Domain
|
33 |
+
description: Specialized domain for the research assistant
|
34 |
+
type: select
|
35 |
+
default: General Research
|
36 |
+
options:
|
37 |
+
- General Research
|
38 |
+
- Computer Science
|
39 |
+
- Medicine
|
40 |
+
- Finance
|
41 |
+
- Climate Science
|
42 |
+
- Artificial Intelligence
|
43 |
+
- History
|
44 |
+
- Psychology
|
45 |
+
- Physics
|
46 |
+
- Biology
|
pyproject.toml
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "research-agent-langgraph"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "Research Agent with LangGraph and Chainlit"
|
5 |
+
readme = "PROJECT_README.md"
|
6 |
+
requires-python = ">=3.10"
|
7 |
+
dependencies = [
|
8 |
+
"langchain==0.3.15",
|
9 |
+
"langchain-community==0.3.15",
|
10 |
+
"langchain-openai==0.3.2",
|
11 |
+
"langgraph==0.2.67",
|
12 |
+
"chainlit==2.5.5",
|
13 |
+
"pandas>=2.0.0",
|
14 |
+
"arxiv>=1.4",
|
15 |
+
"tavily-python",
|
16 |
+
"pydantic>=2.0.0",
|
17 |
+
"typing-extensions>=4.5.0",
|
18 |
+
"websockets>=11.0.0",
|
19 |
+
|
20 |
+
]
|
21 |
+
|
22 |
+
[build-system]
|
23 |
+
requires = ["setuptools>=61.0"]
|
24 |
+
build-backend = "setuptools.build_meta"
|
25 |
+
|
26 |
+
[tool.setuptools]
|
27 |
+
package-dir = {"" = "src"}
|
28 |
+
|
29 |
+
[tool.uv]
|
30 |
+
pip = { only-binary = ["numpy"] }
|
run.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
from src.research_agent import run_agent
|
4 |
+
from langchain_core.messages import AIMessage, ToolMessage
|
5 |
+
|
6 |
+
def main():
|
7 |
+
"""
|
8 |
+
Run the research agent from the command line
|
9 |
+
"""
|
10 |
+
# Check if domain is provided
|
11 |
+
if len(sys.argv) >= 2:
|
12 |
+
domain = sys.argv[1]
|
13 |
+
else:
|
14 |
+
domain = "General Research"
|
15 |
+
print(f"No domain specified, using default: {domain}")
|
16 |
+
|
17 |
+
print(f"\n=== Research Assistant ({domain}) ===\n")
|
18 |
+
print("Ask your question or type 'exit' to quit.\n")
|
19 |
+
|
20 |
+
# Main interaction loop
|
21 |
+
messages = None
|
22 |
+
while True:
|
23 |
+
# Get user input
|
24 |
+
user_input = input("> ")
|
25 |
+
|
26 |
+
# Check for exit command
|
27 |
+
if user_input.lower() in ["exit", "quit", "q"]:
|
28 |
+
print("\nGoodbye!")
|
29 |
+
break
|
30 |
+
|
31 |
+
try:
|
32 |
+
# Run the agent
|
33 |
+
print("\nResearching...\n")
|
34 |
+
messages = run_agent(user_input, domain, messages)
|
35 |
+
|
36 |
+
# Print AI response
|
37 |
+
for message in messages[-3:]: # Print only the last few messages
|
38 |
+
if hasattr(message, "type") and message.type == "ai":
|
39 |
+
print(f"\nAssistant: {message.content}\n")
|
40 |
+
elif hasattr(message, "type") and message.type == "tool" and hasattr(message, "name"):
|
41 |
+
print(f"\n[Tool: {message.name}]\n{message.content[:200]}...\n"
|
42 |
+
if len(message.content) > 200 else f"\n[Tool: {message.name}]\n{message.content}\n")
|
43 |
+
elif hasattr(message, "content"):
|
44 |
+
if isinstance(message, AIMessage):
|
45 |
+
print(f"\nAssistant: {message.content}\n")
|
46 |
+
elif isinstance(message, ToolMessage) and hasattr(message, "name"):
|
47 |
+
print(f"\n[Tool: {message.name}]\n{message.content[:200]}...\n"
|
48 |
+
if len(message.content) > 200 else f"\n[Tool: {message.name}]\n{message.content}\n")
|
49 |
+
except Exception as e:
|
50 |
+
print(f"\nError: {str(e)}\n")
|
51 |
+
print("Let's try again with a different question.")
|
52 |
+
|
53 |
+
if __name__ == "__main__":
|
54 |
+
# Check for environment variables
|
55 |
+
if not os.environ.get("OPENAI_API_KEY"):
|
56 |
+
try:
|
57 |
+
from dotenv import load_dotenv
|
58 |
+
load_dotenv()
|
59 |
+
except ImportError:
|
60 |
+
pass
|
61 |
+
|
62 |
+
if not os.environ.get("OPENAI_API_KEY"):
|
63 |
+
api_key = input("Enter your OpenAI API key: ")
|
64 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
65 |
+
|
66 |
+
if not os.environ.get("TAVILY_API_KEY"):
|
67 |
+
if os.path.exists(".env"):
|
68 |
+
try:
|
69 |
+
from dotenv import load_dotenv
|
70 |
+
load_dotenv()
|
71 |
+
except ImportError:
|
72 |
+
pass
|
73 |
+
|
74 |
+
if not os.environ.get("TAVILY_API_KEY"):
|
75 |
+
api_key = input("Enter your Tavily API key: ")
|
76 |
+
os.environ["TAVILY_API_KEY"] = api_key
|
77 |
+
|
78 |
+
main()
|
setup_and_run.sh
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# Check if UV is installed
|
4 |
+
if ! command -v uv &> /dev/null; then
|
5 |
+
echo "UV is not installed. Please install UV first."
|
6 |
+
echo "You can install it using: curl -sSf https://install.ultraviolet.rs | sh"
|
7 |
+
exit 1
|
8 |
+
fi
|
9 |
+
|
10 |
+
# Set up virtual environment if it doesn't exist
|
11 |
+
if [ ! -d ".venv" ]; then
|
12 |
+
echo "Setting up virtual environment..."
|
13 |
+
python -m venv .venv
|
14 |
+
fi
|
15 |
+
|
16 |
+
# Activate virtual environment
|
17 |
+
source .venv/bin/activate
|
18 |
+
|
19 |
+
# Install dependencies using UV
|
20 |
+
echo "Installing dependencies with UV from pyproject.toml..."
|
21 |
+
uv pip install -e .
|
22 |
+
|
23 |
+
# Check if .env file exists
|
24 |
+
if [ ! -f ".env" ]; then
|
25 |
+
echo "Creating .env file..."
|
26 |
+
|
27 |
+
echo "Enter your OpenAI API key:"
|
28 |
+
read OPENAI_API_KEY
|
29 |
+
|
30 |
+
echo "Enter your Tavily API key:"
|
31 |
+
read TAVILY_API_KEY
|
32 |
+
|
33 |
+
echo "OPENAI_API_KEY=$OPENAI_API_KEY" > .env
|
34 |
+
echo "TAVILY_API_KEY=$TAVILY_API_KEY" >> .env
|
35 |
+
|
36 |
+
echo ".env file created successfully!"
|
37 |
+
fi
|
38 |
+
|
39 |
+
# Run the application with chainlit
|
40 |
+
echo "Starting the Research Agent with Chainlit..."
|
41 |
+
chainlit run app.py -w
|
src/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
"""Research Agent with LangGraph and Chainlit."""
|
2 |
+
|
3 |
+
__version__ = "0.1.0"
|
src/research_agent/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
"""Research agent implementation with LangGraph."""
|
2 |
+
|
3 |
+
from .agent import run_agent, create_agent_graph, create_system_message
|
src/research_agent/agent.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Dict, TypedDict, List, Annotated, Literal, Union, Any
|
3 |
+
from .tools import get_tools
|
4 |
+
from langchain_openai import ChatOpenAI
|
5 |
+
from langchain.prompts import ChatPromptTemplate
|
6 |
+
from langgraph.graph import StateGraph, START, END
|
7 |
+
import operator
|
8 |
+
from langchain_core.messages import (
|
9 |
+
AIMessage,
|
10 |
+
HumanMessage,
|
11 |
+
SystemMessage,
|
12 |
+
ToolMessage,
|
13 |
+
FunctionMessage,
|
14 |
+
)
|
15 |
+
from langchain_core.tools import BaseTool, tool
|
16 |
+
from langgraph.graph.message import add_messages
|
17 |
+
from langgraph.prebuilt import ToolNode, tools_condition
|
18 |
+
import json
|
19 |
+
|
20 |
+
# State definition
|
21 |
+
class AgentState(TypedDict):
|
22 |
+
messages: Annotated[list, add_messages]
|
23 |
+
|
24 |
+
# Initialize tools
|
25 |
+
tools = get_tools()
|
26 |
+
|
27 |
+
# System prompt
|
28 |
+
system_prompt = """You are an AI research assistant specialized in {domain}.
|
29 |
+
Your goal is to help users find accurate information about {domain} topics.
|
30 |
+
|
31 |
+
You have access to the following tools:
|
32 |
+
1. Web Search - For general queries and recent information
|
33 |
+
2. Research Paper Search - For academic and scientific information
|
34 |
+
3. Wikipedia Search - For comprehensive background information and factual summaries
|
35 |
+
4. Data Analysis - For analyzing data provided by the user
|
36 |
+
|
37 |
+
Choose the most appropriate tool(s) based on the user's question:
|
38 |
+
- Use Web Search for current events, recent developments, or general information
|
39 |
+
- Use Research Paper Search for academic knowledge, scientific findings, or technical details
|
40 |
+
- Use Wikipedia Search for conceptual explanations, definitions, historical context, or general facts
|
41 |
+
- Use Data Analysis when the user provides data to be analyzed
|
42 |
+
|
43 |
+
Always try to provide the most accurate and helpful information.
|
44 |
+
When responding, cite your sources appropriately."""
|
45 |
+
|
46 |
+
# Function to create the system message
|
47 |
+
def create_system_message(domain):
|
48 |
+
return SystemMessage(content=system_prompt.format(domain=domain))
|
49 |
+
|
50 |
+
# Create the graph
|
51 |
+
def create_agent_graph(domain="general research"):
|
52 |
+
"""
|
53 |
+
Create a LangGraph for the research agent using prebuilt components
|
54 |
+
"""
|
55 |
+
# Initialize the graph with the state
|
56 |
+
workflow = StateGraph(AgentState)
|
57 |
+
|
58 |
+
# Add system message with domain context
|
59 |
+
system_prompt_message = create_system_message(domain)
|
60 |
+
|
61 |
+
# Agent node function
|
62 |
+
def agent_node(state: AgentState):
|
63 |
+
messages = state["messages"]
|
64 |
+
if len(messages) == 0 or not isinstance(messages[0], SystemMessage):
|
65 |
+
messages = [system_prompt_message] + messages
|
66 |
+
|
67 |
+
# Create model and bind tools
|
68 |
+
model = ChatOpenAI(model="gpt-4o", temperature=0)
|
69 |
+
model_with_tools = model.bind_tools(tools)
|
70 |
+
|
71 |
+
# Generate response with tools
|
72 |
+
return {"messages": [model_with_tools.invoke(messages)]}
|
73 |
+
|
74 |
+
# Add nodes
|
75 |
+
workflow.add_node("agent", agent_node)
|
76 |
+
|
77 |
+
# Use prebuilt ToolNode
|
78 |
+
tool_node = ToolNode(tools=tools)
|
79 |
+
workflow.add_node("tools", tool_node)
|
80 |
+
|
81 |
+
# Add conditional edges using prebuilt tools_condition
|
82 |
+
workflow.add_conditional_edges(
|
83 |
+
"agent",
|
84 |
+
tools_condition,
|
85 |
+
{
|
86 |
+
"tools": "tools",
|
87 |
+
END: END
|
88 |
+
}
|
89 |
+
)
|
90 |
+
|
91 |
+
# Add edge back to agent after tools execution
|
92 |
+
workflow.add_edge("tools", "agent")
|
93 |
+
|
94 |
+
# Set the entry point
|
95 |
+
workflow.add_edge(START, "agent")
|
96 |
+
|
97 |
+
# Compile the graph
|
98 |
+
return workflow.compile()
|
99 |
+
|
100 |
+
# Function to run the agent
|
101 |
+
def run_agent(user_input, domain="general research", messages=None):
|
102 |
+
"""
|
103 |
+
Run the agent with a user input
|
104 |
+
"""
|
105 |
+
# Create the graph
|
106 |
+
graph = create_agent_graph(domain)
|
107 |
+
|
108 |
+
# Initialize messages if not provided
|
109 |
+
if messages is None:
|
110 |
+
messages = [HumanMessage(content=user_input)]
|
111 |
+
else:
|
112 |
+
messages.append(HumanMessage(content=user_input))
|
113 |
+
|
114 |
+
# Run the graph
|
115 |
+
result = graph.invoke({"messages": messages})
|
116 |
+
|
117 |
+
return result["messages"]
|
118 |
+
|
119 |
+
if __name__ == "__main__":
|
120 |
+
# Test the agent
|
121 |
+
domain = "artificial intelligence"
|
122 |
+
query = "What are the latest developments in natural language processing?"
|
123 |
+
|
124 |
+
messages = run_agent(query, domain)
|
125 |
+
for message in messages:
|
126 |
+
if isinstance(message, AIMessage):
|
127 |
+
print("AI:", message.content)
|
128 |
+
elif isinstance(message, HumanMessage):
|
129 |
+
print("Human:", message.content)
|
130 |
+
elif isinstance(message, ToolMessage):
|
131 |
+
print(f"Tool ({message.name}):", message.content[:100] + "..." if len(message.content) > 100 else message.content)
|
src/research_agent/tools.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
2 |
+
from langchain_community.tools.arxiv.tool import ArxivQueryRun
|
3 |
+
from langchain_community.tools.wikipedia.tool import WikipediaQueryRun
|
4 |
+
from langchain.tools import BaseTool
|
5 |
+
from typing import Optional, Type, Any, List, Dict
|
6 |
+
from pydantic import BaseModel, Field
|
7 |
+
import json
|
8 |
+
import pandas as pd
|
9 |
+
import requests
|
10 |
+
|
11 |
+
class WebSearchTool(BaseTool):
|
12 |
+
name: str = "web_search"
|
13 |
+
description: str = "Search the web for general information and current events. Use this for queries about recent developments or general topics."
|
14 |
+
|
15 |
+
def _run(self, query: str) -> str:
|
16 |
+
tavily_tool = TavilySearchResults(max_results=3)
|
17 |
+
results = tavily_tool.invoke(query)
|
18 |
+
return json.dumps(results, indent=2)
|
19 |
+
|
20 |
+
class ResearchPaperTool(BaseTool):
|
21 |
+
name: str = "research_paper_search"
|
22 |
+
description: str = "Search for academic research papers on a topic. Use this for scientific information and academic knowledge."
|
23 |
+
|
24 |
+
def _run(self, query: str) -> str:
|
25 |
+
arxiv_tool = ArxivQueryRun()
|
26 |
+
results = arxiv_tool.invoke(query)
|
27 |
+
return results
|
28 |
+
|
29 |
+
class WikipediaSearchTool(BaseTool):
|
30 |
+
name: str = "wikipedia_search"
|
31 |
+
description: str = "Search Wikipedia for comprehensive background information on a topic. Use this for factual summaries and foundational knowledge."
|
32 |
+
|
33 |
+
def _run(self, query: str) -> str:
|
34 |
+
wikipedia_tool = WikipediaQueryRun(top_k_results=3)
|
35 |
+
results = wikipedia_tool.invoke(query)
|
36 |
+
return results
|
37 |
+
|
38 |
+
class DataAnalysisInput(BaseModel):
|
39 |
+
data: str = Field(..., description="JSON or CSV formatted data to analyze")
|
40 |
+
analysis_type: str = Field(..., description="Type of analysis to perform (summary, trends, comparison)")
|
41 |
+
|
42 |
+
class DataAnalysisTool(BaseTool):
|
43 |
+
name: str = "data_analysis"
|
44 |
+
description: str = "Analyze data provided in JSON or CSV format. Can perform summary, trends, or comparison analysis."
|
45 |
+
args_schema: Type[BaseModel] = DataAnalysisInput
|
46 |
+
|
47 |
+
def _run(self, data: str, analysis_type: str) -> str:
|
48 |
+
try:
|
49 |
+
# Try to parse as JSON
|
50 |
+
try:
|
51 |
+
parsed_data = json.loads(data)
|
52 |
+
df = pd.DataFrame(parsed_data)
|
53 |
+
except:
|
54 |
+
# Try as CSV
|
55 |
+
import io
|
56 |
+
df = pd.read_csv(io.StringIO(data))
|
57 |
+
|
58 |
+
if analysis_type == "summary":
|
59 |
+
return f"Summary Statistics:\n{df.describe().to_string()}"
|
60 |
+
elif analysis_type == "trends":
|
61 |
+
if len(df.columns) >= 2:
|
62 |
+
numeric_cols = df.select_dtypes(include=['number']).columns
|
63 |
+
if len(numeric_cols) >= 2:
|
64 |
+
return f"Correlation Analysis:\n{df[numeric_cols].corr().to_string()}"
|
65 |
+
return "Not enough numeric columns for trend analysis"
|
66 |
+
return "Not enough columns for trend analysis"
|
67 |
+
elif analysis_type == "comparison":
|
68 |
+
return f"Column Comparison:\n{df.head(10).to_string()}"
|
69 |
+
else:
|
70 |
+
return f"Unknown analysis type: {analysis_type}"
|
71 |
+
except Exception as e:
|
72 |
+
return f"Error analyzing data: {str(e)}"
|
73 |
+
|
74 |
+
def get_tools():
|
75 |
+
return [
|
76 |
+
WebSearchTool(),
|
77 |
+
ResearchPaperTool(),
|
78 |
+
WikipediaSearchTool(),
|
79 |
+
DataAnalysisTool()
|
80 |
+
]
|
uv.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|