Spaces:
Sleeping
Sleeping
update
Browse files- .env +5 -0
- MCP-HandsOn-KOR.ipynb +0 -701
- README_KOR.md +0 -232
- __pycache__/app.cpython-310.pyc +0 -0
- __pycache__/app.cpython-312.pyc +0 -0
- __pycache__/utils.cpython-310.pyc +0 -0
- __pycache__/utils.cpython-312.pyc +0 -0
- app.py +403 -16
- app_KOR.py +0 -848
- config.json +8 -0
- run.sh +1 -0
.env
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ANTHROPIC_API_KEY=sk-0nEqu5ChgjT6aFweA12bC37d6f8f485eAd63848e4c57041d
|
2 |
+
ANTHROPIC_BASE_URL=https://openai.sohoyo.io
|
3 |
+
|
4 |
+
# OPENAI_API_KEY=sk-0nEqu5ChgjT6aFweA12bC37d6f8f485eAd63848e4c57041d
|
5 |
+
# OPENAI_BASE_URL=https://openai.sohoyo.io/v1
|
MCP-HandsOn-KOR.ipynb
DELETED
@@ -1,701 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"cells": [
|
3 |
-
{
|
4 |
-
"cell_type": "markdown",
|
5 |
-
"metadata": {},
|
6 |
-
"source": [
|
7 |
-
"# MCP + LangGraph ํธ์ฆ์จ ํํ ๋ฆฌ์ผ\n",
|
8 |
-
"\n",
|
9 |
-
"- ์์ฑ์: [ํ
๋๋
ธํธ](https://youtube.com/c/teddynote)\n",
|
10 |
-
"- ๊ฐ์: [ํจ์คํธ์บ ํผ์ค RAG ๋น๋ฒ๋
ธํธ](https://fastcampus.co.kr/data_online_teddy)\n",
|
11 |
-
"\n",
|
12 |
-
"**์ฐธ๊ณ ์๋ฃ**\n",
|
13 |
-
"- https://modelcontextprotocol.io/introduction\n",
|
14 |
-
"- https://github.com/langchain-ai/langchain-mcp-adapters"
|
15 |
-
]
|
16 |
-
},
|
17 |
-
{
|
18 |
-
"cell_type": "markdown",
|
19 |
-
"metadata": {},
|
20 |
-
"source": [
|
21 |
-
"## ํ๊ฒฝ์ค์ \n",
|
22 |
-
"\n",
|
23 |
-
"์๋ ์ค์น ๋ฐฉ๋ฒ์ ์ฐธ๊ณ ํ์ฌ `uv` ๋ฅผ ์ค์นํฉ๋๋ค.\n",
|
24 |
-
"\n",
|
25 |
-
"**uv ์ค์น ๋ฐฉ๋ฒ**\n",
|
26 |
-
"\n",
|
27 |
-
"```bash\n",
|
28 |
-
"# macOS/Linux\n",
|
29 |
-
"curl -LsSf https://astral.sh/uv/install.sh | sh\n",
|
30 |
-
"\n",
|
31 |
-
"# Windows (PowerShell)\n",
|
32 |
-
"irm https://astral.sh/uv/install.ps1 | iex\n",
|
33 |
-
"```\n",
|
34 |
-
"\n",
|
35 |
-
"**์์กด์ฑ ์ค์น**\n",
|
36 |
-
"\n",
|
37 |
-
"```bash\n",
|
38 |
-
"uv pip install -r requirements.txt\n",
|
39 |
-
"```"
|
40 |
-
]
|
41 |
-
},
|
42 |
-
{
|
43 |
-
"cell_type": "markdown",
|
44 |
-
"metadata": {},
|
45 |
-
"source": [
|
46 |
-
"ํ๊ฒฝ๋ณ์๋ฅผ ๊ฐ์ ธ์ต๋๋ค."
|
47 |
-
]
|
48 |
-
},
|
49 |
-
{
|
50 |
-
"cell_type": "code",
|
51 |
-
"execution_count": null,
|
52 |
-
"metadata": {},
|
53 |
-
"outputs": [],
|
54 |
-
"source": [
|
55 |
-
"from dotenv import load_dotenv\n",
|
56 |
-
"\n",
|
57 |
-
"load_dotenv(override=True)"
|
58 |
-
]
|
59 |
-
},
|
60 |
-
{
|
61 |
-
"cell_type": "markdown",
|
62 |
-
"metadata": {},
|
63 |
-
"source": [
|
64 |
-
"## MultiServerMCPClient"
|
65 |
-
]
|
66 |
-
},
|
67 |
-
{
|
68 |
-
"cell_type": "markdown",
|
69 |
-
"metadata": {},
|
70 |
-
"source": [
|
71 |
-
"์ฌ์ ์ `mcp_server_remote.py` ๋ฅผ ์คํํด๋ก๋๋ค. ํฐ๋ฏธ๋์ ์ด๊ณ ๊ฐ์ํ๊ฒฝ์ด ํ์ฑํ ๋์ด ์๋ ์ํ์์ ์๋ฒ๋ฅผ ์คํํด ์ฃผ์ธ์.\n",
|
72 |
-
"\n",
|
73 |
-
"> ๋ช
๋ น์ด\n",
|
74 |
-
"```bash\n",
|
75 |
-
"source .venv/bin/activate\n",
|
76 |
-
"python mcp_server_remote.py\n",
|
77 |
-
"```\n",
|
78 |
-
"\n",
|
79 |
-
"`async with` ๋ก ์ผ์์ ์ธ Session ์ฐ๊ฒฐ์ ์์ฑ ํ ํด์ "
|
80 |
-
]
|
81 |
-
},
|
82 |
-
{
|
83 |
-
"cell_type": "code",
|
84 |
-
"execution_count": null,
|
85 |
-
"metadata": {},
|
86 |
-
"outputs": [],
|
87 |
-
"source": [
|
88 |
-
"from langchain_mcp_adapters.client import MultiServerMCPClient\n",
|
89 |
-
"from langgraph.prebuilt import create_react_agent\n",
|
90 |
-
"from utils import ainvoke_graph, astream_graph\n",
|
91 |
-
"from langchain_anthropic import ChatAnthropic\n",
|
92 |
-
"\n",
|
93 |
-
"model = ChatAnthropic(\n",
|
94 |
-
" model_name=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000\n",
|
95 |
-
")\n",
|
96 |
-
"\n",
|
97 |
-
"async with MultiServerMCPClient(\n",
|
98 |
-
" {\n",
|
99 |
-
" \"weather\": {\n",
|
100 |
-
" # ์๋ฒ์ ํฌํธ์ ์ผ์นํด์ผ ํฉ๋๋ค.(8005๋ฒ ํฌํธ)\n",
|
101 |
-
" \"url\": \"http://localhost:8005/sse\",\n",
|
102 |
-
" \"transport\": \"sse\",\n",
|
103 |
-
" }\n",
|
104 |
-
" }\n",
|
105 |
-
") as client:\n",
|
106 |
-
" print(client.get_tools())\n",
|
107 |
-
" agent = create_react_agent(model, client.get_tools())\n",
|
108 |
-
" answer = await astream_graph(agent, {\"messages\": \"์์ธ์ ๋ ์จ๋ ์ด๋ ๋?\"})"
|
109 |
-
]
|
110 |
-
},
|
111 |
-
{
|
112 |
-
"cell_type": "markdown",
|
113 |
-
"metadata": {},
|
114 |
-
"source": [
|
115 |
-
"๋ค์์ ๊ฒฝ์ฐ์๋ session ์ด ๋ซํ๊ธฐ ๋๋ฌธ์ ๋๊ตฌ์ ์ ๊ทผํ ์ ์๋ ๊ฒ์ ํ์ธํ ์ ์์ต๋๋ค."
|
116 |
-
]
|
117 |
-
},
|
118 |
-
{
|
119 |
-
"cell_type": "code",
|
120 |
-
"execution_count": null,
|
121 |
-
"metadata": {},
|
122 |
-
"outputs": [],
|
123 |
-
"source": [
|
124 |
-
"await astream_graph(agent, {\"messages\": \"์์ธ์ ๋ ์จ๋ ์ด๋ ๋?\"})"
|
125 |
-
]
|
126 |
-
},
|
127 |
-
{
|
128 |
-
"cell_type": "markdown",
|
129 |
-
"metadata": {},
|
130 |
-
"source": [
|
131 |
-
"์ด์ ๊ทธ๋ผ Async Session ์ ์ ์งํ๋ฉฐ ๋๊ตฌ์ ์ ๊ทผํ๋ ๋ฐฉ์์ผ๋ก ๋ณ๊ฒฝํด ๋ณด๊ฒ ์ต๋๋ค."
|
132 |
-
]
|
133 |
-
},
|
134 |
-
{
|
135 |
-
"cell_type": "code",
|
136 |
-
"execution_count": null,
|
137 |
-
"metadata": {},
|
138 |
-
"outputs": [],
|
139 |
-
"source": [
|
140 |
-
"# 1. ํด๋ผ์ด์ธํธ ์์ฑ\n",
|
141 |
-
"client = MultiServerMCPClient(\n",
|
142 |
-
" {\n",
|
143 |
-
" \"weather\": {\n",
|
144 |
-
" \"url\": \"http://localhost:8005/sse\",\n",
|
145 |
-
" \"transport\": \"sse\",\n",
|
146 |
-
" }\n",
|
147 |
-
" }\n",
|
148 |
-
")\n",
|
149 |
-
"\n",
|
150 |
-
"\n",
|
151 |
-
"# 2. ๋ช
์์ ์ผ๋ก ์ฐ๊ฒฐ ์ด๊ธฐํ (์ด ๋ถ๋ถ์ด ํ์ํจ)\n",
|
152 |
-
"# ์ด๊ธฐํ\n",
|
153 |
-
"await client.__aenter__()\n",
|
154 |
-
"\n",
|
155 |
-
"# ์ด์ ๋๊ตฌ๊ฐ ๋ก๋๋จ\n",
|
156 |
-
"print(client.get_tools()) # ๋๊ตฌ๊ฐ ํ์๋จ"
|
157 |
-
]
|
158 |
-
},
|
159 |
-
{
|
160 |
-
"cell_type": "markdown",
|
161 |
-
"metadata": {},
|
162 |
-
"source": [
|
163 |
-
"langgraph ์ ์์ด์ ํธ๋ฅผ ์์ฑํฉ๋๋ค."
|
164 |
-
]
|
165 |
-
},
|
166 |
-
{
|
167 |
-
"cell_type": "code",
|
168 |
-
"execution_count": 5,
|
169 |
-
"metadata": {},
|
170 |
-
"outputs": [],
|
171 |
-
"source": [
|
172 |
-
"# ์์ด์ ํธ ์์ฑ\n",
|
173 |
-
"agent = create_react_agent(model, client.get_tools())"
|
174 |
-
]
|
175 |
-
},
|
176 |
-
{
|
177 |
-
"cell_type": "markdown",
|
178 |
-
"metadata": {},
|
179 |
-
"source": [
|
180 |
-
"๊ทธ๋ํ๋ฅผ ์คํํ์ฌ ๊ฒฐ๊ณผ๋ฅผ ํ์ธํฉ๋๋ค."
|
181 |
-
]
|
182 |
-
},
|
183 |
-
{
|
184 |
-
"cell_type": "code",
|
185 |
-
"execution_count": null,
|
186 |
-
"metadata": {},
|
187 |
-
"outputs": [],
|
188 |
-
"source": [
|
189 |
-
"await astream_graph(agent, {\"messages\": \"์์ธ์ ๋ ์จ๋ ์ด๋ ๋?\"})"
|
190 |
-
]
|
191 |
-
},
|
192 |
-
{
|
193 |
-
"cell_type": "markdown",
|
194 |
-
"metadata": {},
|
195 |
-
"source": [
|
196 |
-
"## Stdio ํต์ ๋ฐฉ์\n",
|
197 |
-
"\n",
|
198 |
-
"Stdio ํต์ ๋ฐฉ์์ ๋ก์ปฌ ํ๊ฒฝ์์ ์ฌ์ฉํ๊ธฐ ์ํด ์ฌ์ฉํฉ๋๋ค.\n",
|
199 |
-
"\n",
|
200 |
-
"- ํต์ ์ ์ํด ํ์ค ์
๋ ฅ/์ถ๋ ฅ ์ฌ์ฉ\n",
|
201 |
-
"\n",
|
202 |
-
"์ฐธ๊ณ : ์๋์ python ๊ฒฝ๋ก๋ ์์ ํ์ธ์!"
|
203 |
-
]
|
204 |
-
},
|
205 |
-
{
|
206 |
-
"cell_type": "code",
|
207 |
-
"execution_count": null,
|
208 |
-
"metadata": {},
|
209 |
-
"outputs": [],
|
210 |
-
"source": [
|
211 |
-
"from mcp import ClientSession, StdioServerParameters\n",
|
212 |
-
"from mcp.client.stdio import stdio_client\n",
|
213 |
-
"from langgraph.prebuilt import create_react_agent\n",
|
214 |
-
"from langchain_mcp_adapters.tools import load_mcp_tools\n",
|
215 |
-
"from langchain_anthropic import ChatAnthropic\n",
|
216 |
-
"\n",
|
217 |
-
"# Anthropic์ Claude ๋ชจ๋ธ ์ด๊ธฐํ\n",
|
218 |
-
"model = ChatAnthropic(\n",
|
219 |
-
" model_name=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000\n",
|
220 |
-
")\n",
|
221 |
-
"\n",
|
222 |
-
"# StdIO ์๋ฒ ํ๋ผ๋ฏธํฐ ์ค์ \n",
|
223 |
-
"# - command: Python ์ธํฐํ๋ฆฌํฐ ๊ฒฝ๋ก\n",
|
224 |
-
"# - args: ์คํํ MCP ์๋ฒ ์คํฌ๋ฆฝํธ\n",
|
225 |
-
"server_params = StdioServerParameters(\n",
|
226 |
-
" command=\"./.venv/bin/python\",\n",
|
227 |
-
" args=[\"mcp_server_local.py\"],\n",
|
228 |
-
")\n",
|
229 |
-
"\n",
|
230 |
-
"# StdIO ํด๋ผ์ด์ธํธ๋ฅผ ์ฌ์ฉํ์ฌ ์๋ฒ์ ํต์ \n",
|
231 |
-
"async with stdio_client(server_params) as (read, write):\n",
|
232 |
-
" # ํด๋ผ์ด์ธํธ ์ธ์
์์ฑ\n",
|
233 |
-
" async with ClientSession(read, write) as session:\n",
|
234 |
-
" # ์ฐ๊ฒฐ ์ด๊ธฐํ\n",
|
235 |
-
" await session.initialize()\n",
|
236 |
-
"\n",
|
237 |
-
" # MCP ๋๊ตฌ ๋ก๋\n",
|
238 |
-
" tools = await load_mcp_tools(session)\n",
|
239 |
-
" print(tools)\n",
|
240 |
-
"\n",
|
241 |
-
" # ์์ด์ ํธ ์์ฑ\n",
|
242 |
-
" agent = create_react_agent(model, tools)\n",
|
243 |
-
"\n",
|
244 |
-
" # ์์ด์ ํธ ์๋ต ์คํธ๋ฆฌ๋ฐ\n",
|
245 |
-
" await astream_graph(agent, {\"messages\": \"์์ธ์ ๋ ์จ๋ ์ด๋ ๋?\"})"
|
246 |
-
]
|
247 |
-
},
|
248 |
-
{
|
249 |
-
"cell_type": "markdown",
|
250 |
-
"metadata": {},
|
251 |
-
"source": [
|
252 |
-
"## RAG ๋ฅผ ๊ตฌ์ถํ MCP ์๋ฒ ์ฌ์ฉ\n",
|
253 |
-
"\n",
|
254 |
-
"- ํ์ผ: `mcp_server_rag.py`\n",
|
255 |
-
"\n",
|
256 |
-
"์ฌ์ ์ langchain ์ผ๋ก ๊ตฌ์ถํ `mcp_server_rag.py` ํ์ผ์ ์ฌ์ฉํฉ๋๋ค.\n",
|
257 |
-
"\n",
|
258 |
-
"stdio ํต์ ๋ฐฉ์์ผ๋ก ๋๊ตฌ์ ๋ํ ์ ๋ณด๋ฅผ ๊ฐ์ ธ์ต๋๋ค. ์ฌ๊ธฐ์ ๋๊ตฌ๋ `retriever` ๋๊ตฌ๋ฅผ ๊ฐ์ ธ์ค๊ฒ ๋๋ฉฐ, ์ด ๋๊ตฌ๋ `mcp_server_rag.py` ์์ ์ ์๋ ๋๊ตฌ์
๋๋ค. ์ด ํ์ผ์ ์ฌ์ ์ ์๋ฒ์์ ์คํ๋์ง **์์๋** ๋ฉ๋๋ค."
|
259 |
-
]
|
260 |
-
},
|
261 |
-
{
|
262 |
-
"cell_type": "code",
|
263 |
-
"execution_count": null,
|
264 |
-
"metadata": {},
|
265 |
-
"outputs": [],
|
266 |
-
"source": [
|
267 |
-
"from mcp import ClientSession, StdioServerParameters\n",
|
268 |
-
"from mcp.client.stdio import stdio_client\n",
|
269 |
-
"from langchain_mcp_adapters.tools import load_mcp_tools\n",
|
270 |
-
"from langgraph.prebuilt import create_react_agent\n",
|
271 |
-
"from langchain_anthropic import ChatAnthropic\n",
|
272 |
-
"from utils import astream_graph\n",
|
273 |
-
"\n",
|
274 |
-
"# Anthropic์ Claude ๋ชจ๋ธ ์ด๊ธฐํ\n",
|
275 |
-
"model = ChatAnthropic(\n",
|
276 |
-
" model_name=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000\n",
|
277 |
-
")\n",
|
278 |
-
"\n",
|
279 |
-
"# RAG ์๋ฒ๋ฅผ ์ํ StdIO ์๋ฒ ํ๋ผ๋ฏธํฐ ์ค์ \n",
|
280 |
-
"server_params = StdioServerParameters(\n",
|
281 |
-
" command=\"./.venv/bin/python\",\n",
|
282 |
-
" args=[\"./mcp_server_rag.py\"],\n",
|
283 |
-
")\n",
|
284 |
-
"\n",
|
285 |
-
"# StdIO ํด๋ผ์ด์ธํธ๋ฅผ ์ฌ์ฉํ์ฌ RAG ์๋ฒ์ ํต์ \n",
|
286 |
-
"async with stdio_client(server_params) as (read, write):\n",
|
287 |
-
" # ํด๋ผ์ด์ธํธ ์ธ์
์์ฑ\n",
|
288 |
-
" async with ClientSession(read, write) as session:\n",
|
289 |
-
" # ์ฐ๊ฒฐ ์ด๊ธฐํ\n",
|
290 |
-
" await session.initialize()\n",
|
291 |
-
"\n",
|
292 |
-
" # MCP ๋๊ตฌ ๋ก๋ (์ฌ๊ธฐ์๋ retriever ๋๊ตฌ)\n",
|
293 |
-
" tools = await load_mcp_tools(session)\n",
|
294 |
-
"\n",
|
295 |
-
" # ์์ด์ ํธ ์์ฑ ๋ฐ ์คํ\n",
|
296 |
-
" agent = create_react_agent(model, tools)\n",
|
297 |
-
"\n",
|
298 |
-
" # ์์ด์ ํธ ์๋ต ์คํธ๋ฆฌ๋ฐ\n",
|
299 |
-
" await astream_graph(\n",
|
300 |
-
" agent, {\"messages\": \"์ผ์ฑ์ ์๊ฐ ๊ฐ๋ฐํ ์์ฑํ AI์ ์ด๋ฆ์ ๊ฒ์ํด์ค\"}\n",
|
301 |
-
" )"
|
302 |
-
]
|
303 |
-
},
|
304 |
-
{
|
305 |
-
"cell_type": "markdown",
|
306 |
-
"metadata": {},
|
307 |
-
"source": [
|
308 |
-
"## SSE ๋ฐฉ์๊ณผ StdIO ๋ฐฉ์ ํผํฉ ์ฌ์ฉ\n",
|
309 |
-
"\n",
|
310 |
-
"- ํ์ผ: `mcp_server_rag.py` ๋ StdIO ๋ฐฉ์์ผ๋ก ํต์ \n",
|
311 |
-
"- `langchain-dev-docs` ๋ SSE ๋ฐฉ์์ผ๋ก ํต์ \n",
|
312 |
-
"\n",
|
313 |
-
"SSE ๋ฐฉ์๊ณผ StdIO ๋ฐฉ์์ ํผํฉํ์ฌ ์ฌ์ฉํฉ๋๋ค."
|
314 |
-
]
|
315 |
-
},
|
316 |
-
{
|
317 |
-
"cell_type": "code",
|
318 |
-
"execution_count": null,
|
319 |
-
"metadata": {},
|
320 |
-
"outputs": [],
|
321 |
-
"source": [
|
322 |
-
"from langchain_mcp_adapters.client import MultiServerMCPClient\n",
|
323 |
-
"from langgraph.prebuilt import create_react_agent\n",
|
324 |
-
"from langchain_anthropic import ChatAnthropic\n",
|
325 |
-
"\n",
|
326 |
-
"# Anthropic์ Claude ๋ชจ๋ธ ์ด๊ธฐํ\n",
|
327 |
-
"model = ChatAnthropic(\n",
|
328 |
-
" model_name=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000\n",
|
329 |
-
")\n",
|
330 |
-
"\n",
|
331 |
-
"# 1. ๋ค์ค ์๋ฒ MCP ํด๋ผ์ด์ธํธ ์์ฑ\n",
|
332 |
-
"client = MultiServerMCPClient(\n",
|
333 |
-
" {\n",
|
334 |
-
" \"document-retriever\": {\n",
|
335 |
-
" \"command\": \"./.venv/bin/python\",\n",
|
336 |
-
" # mcp_server_rag.py ํ์ผ์ ์ ๋ ๊ฒฝ๋ก๋ก ์
๋ฐ์ดํธํด์ผ ํฉ๋๋ค\n",
|
337 |
-
" \"args\": [\"./mcp_server_rag.py\"],\n",
|
338 |
-
" # stdio ๋ฐฉ์์ผ๋ก ํต์ (ํ์ค ์
์ถ๋ ฅ ์ฌ์ฉ)\n",
|
339 |
-
" \"transport\": \"stdio\",\n",
|
340 |
-
" },\n",
|
341 |
-
" \"langchain-dev-docs\": {\n",
|
342 |
-
" # SSE ์๋ฒ๊ฐ ์คํ ์ค์ธ์ง ํ์ธํ์ธ์\n",
|
343 |
-
" \"url\": \"https://teddynote.io/mcp/langchain/sse\",\n",
|
344 |
-
" # SSE(Server-Sent Events) ๋ฐฉ์์ผ๋ก ํต์ \n",
|
345 |
-
" \"transport\": \"sse\",\n",
|
346 |
-
" },\n",
|
347 |
-
" }\n",
|
348 |
-
")\n",
|
349 |
-
"\n",
|
350 |
-
"\n",
|
351 |
-
"# 2. ๋น๋๊ธฐ ์ปจํ
์คํธ ๋งค๋์ ๋ฅผ ํตํ ๋ช
์์ ์ฐ๊ฒฐ ์ด๊ธฐํ\n",
|
352 |
-
"await client.__aenter__()"
|
353 |
-
]
|
354 |
-
},
|
355 |
-
{
|
356 |
-
"cell_type": "markdown",
|
357 |
-
"metadata": {},
|
358 |
-
"source": [
|
359 |
-
"langgraph ์ `create_react_agent` ๋ฅผ ์ฌ์ฉํ์ฌ ์์ด์ ํธ๋ฅผ ์์ฑํฉ๋๋ค."
|
360 |
-
]
|
361 |
-
},
|
362 |
-
{
|
363 |
-
"cell_type": "code",
|
364 |
-
"execution_count": 10,
|
365 |
-
"metadata": {},
|
366 |
-
"outputs": [],
|
367 |
-
"source": [
|
368 |
-
"from langgraph.checkpoint.memory import MemorySaver\n",
|
369 |
-
"from langchain_core.runnables import RunnableConfig\n",
|
370 |
-
"\n",
|
371 |
-
"prompt = (\n",
|
372 |
-
" \"You are a smart agent. \"\n",
|
373 |
-
" \"Use `retriever` tool to search on AI related documents and answer questions.\"\n",
|
374 |
-
" \"Use `langchain-dev-docs` tool to search on langchain / langgraph related documents and answer questions.\"\n",
|
375 |
-
" \"Answer in Korean.\"\n",
|
376 |
-
")\n",
|
377 |
-
"agent = create_react_agent(\n",
|
378 |
-
" model, client.get_tools(), prompt=prompt, checkpointer=MemorySaver()\n",
|
379 |
-
")"
|
380 |
-
]
|
381 |
-
},
|
382 |
-
{
|
383 |
-
"cell_type": "markdown",
|
384 |
-
"metadata": {},
|
385 |
-
"source": [
|
386 |
-
"๊ตฌ์ถํด ๋์ `mcp_server_rag.py` ์์ ์ ์ํ `retriever` ๋๊ตฌ๋ฅผ ์ฌ์ฉํ์ฌ ๊ฒ์์ ์ํํฉ๋๋ค."
|
387 |
-
]
|
388 |
-
},
|
389 |
-
{
|
390 |
-
"cell_type": "code",
|
391 |
-
"execution_count": null,
|
392 |
-
"metadata": {},
|
393 |
-
"outputs": [],
|
394 |
-
"source": [
|
395 |
-
"config = RunnableConfig(recursion_limit=30, thread_id=1)\n",
|
396 |
-
"await astream_graph(\n",
|
397 |
-
" agent,\n",
|
398 |
-
" {\n",
|
399 |
-
" \"messages\": \"`retriever` ๋๊ตฌ๋ฅผ ์ฌ์ฉํด์ ์ผ์ฑ์ ์๊ฐ ๊ฐ๋ฐํ ์์ฑํ AI ์ด๋ฆ์ ๊ฒ์ํด์ค\"\n",
|
400 |
-
" },\n",
|
401 |
-
" config=config,\n",
|
402 |
-
")"
|
403 |
-
]
|
404 |
-
},
|
405 |
-
{
|
406 |
-
"cell_type": "markdown",
|
407 |
-
"metadata": {},
|
408 |
-
"source": [
|
409 |
-
"์ด๋ฒ์๋ `langchain-dev-docs` ๋๊ตฌ๋ฅผ ์ฌ์ฉํ์ฌ ๊ฒ์์ ์ํํฉ๋๋ค."
|
410 |
-
]
|
411 |
-
},
|
412 |
-
{
|
413 |
-
"cell_type": "code",
|
414 |
-
"execution_count": null,
|
415 |
-
"metadata": {},
|
416 |
-
"outputs": [],
|
417 |
-
"source": [
|
418 |
-
"config = RunnableConfig(recursion_limit=30, thread_id=1)\n",
|
419 |
-
"await astream_graph(\n",
|
420 |
-
" agent,\n",
|
421 |
-
" {\"messages\": \"langgraph-dev-docs ์ฐธ๊ณ ํด์ self-rag ์ ์ ์์ ๋ํด์ ์๋ ค์ค\"},\n",
|
422 |
-
" config=config,\n",
|
423 |
-
")"
|
424 |
-
]
|
425 |
-
},
|
426 |
-
{
|
427 |
-
"cell_type": "markdown",
|
428 |
-
"metadata": {},
|
429 |
-
"source": [
|
430 |
-
"`MemorySaver` ๋ฅผ ์ฌ์ฉํ์ฌ ๋จ๊ธฐ ๊ธฐ์ต์ ์ ์งํฉ๋๋ค. ๋ฐ๋ผ์, multi-turn ๋ํ๋ ๊ฐ๋ฅํฉ๋๋ค."
|
431 |
-
]
|
432 |
-
},
|
433 |
-
{
|
434 |
-
"cell_type": "code",
|
435 |
-
"execution_count": null,
|
436 |
-
"metadata": {},
|
437 |
-
"outputs": [],
|
438 |
-
"source": [
|
439 |
-
"await astream_graph(\n",
|
440 |
-
" agent, {\"messages\": \"์ด์ ์ ๋ด์ฉ์ bullet point ๋ก ์์ฝํด์ค\"}, config=config\n",
|
441 |
-
")"
|
442 |
-
]
|
443 |
-
},
|
444 |
-
{
|
445 |
-
"cell_type": "markdown",
|
446 |
-
"metadata": {},
|
447 |
-
"source": [
|
448 |
-
"## LangChain ์ ํตํฉ๋ ๋๊ตฌ + MCP ๋๊ตฌ\n",
|
449 |
-
"\n",
|
450 |
-
"์ฌ๊ธฐ์๋ LangChain ์ ํตํฉ๋ ๋๊ตฌ๋ฅผ ๊ธฐ์กด์ MCP ๋ก๋ง ์ด๋ฃจ์ด์ง ๋๊ตฌ์ ํจ๊ป ์ฌ์ฉ์ด ๊ฐ๋ฅํ์ง ํ
์คํธ ํฉ๋๋ค."
|
451 |
-
]
|
452 |
-
},
|
453 |
-
{
|
454 |
-
"cell_type": "code",
|
455 |
-
"execution_count": 14,
|
456 |
-
"metadata": {},
|
457 |
-
"outputs": [],
|
458 |
-
"source": [
|
459 |
-
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
|
460 |
-
"\n",
|
461 |
-
"# Tavily ๊ฒ์ ๋๊ตฌ๋ฅผ ์ด๊ธฐํ ํฉ๋๋ค. (news ํ์
, ์ต๊ทผ 3์ผ ๋ด ๋ด์ค)\n",
|
462 |
-
"tavily = TavilySearchResults(max_results=3, topic=\"news\", days=3)\n",
|
463 |
-
"\n",
|
464 |
-
"# ๊ธฐ์กด์ MCP ๋๊ตฌ์ ํจ๊ป ์ฌ์ฉํฉ๋๋ค.\n",
|
465 |
-
"tools = client.get_tools() + [tavily]"
|
466 |
-
]
|
467 |
-
},
|
468 |
-
{
|
469 |
-
"cell_type": "markdown",
|
470 |
-
"metadata": {},
|
471 |
-
"source": [
|
472 |
-
"langgraph ์ `create_react_agent` ๋ฅผ ์ฌ์ฉํ์ฌ ์์ด์ ํธ๋ฅผ ์์ฑํฉ๋๋ค."
|
473 |
-
]
|
474 |
-
},
|
475 |
-
{
|
476 |
-
"cell_type": "code",
|
477 |
-
"execution_count": 15,
|
478 |
-
"metadata": {},
|
479 |
-
"outputs": [],
|
480 |
-
"source": [
|
481 |
-
"from langgraph.checkpoint.memory import MemorySaver\n",
|
482 |
-
"from langchain_core.runnables import RunnableConfig\n",
|
483 |
-
"\n",
|
484 |
-
"# ์ฌ๊ท ์ ํ ๋ฐ ์ค๋ ๋ ์์ด๋ ์ค์ \n",
|
485 |
-
"config = RunnableConfig(recursion_limit=30, thread_id=2)\n",
|
486 |
-
"\n",
|
487 |
-
"# ํ๋กฌํํธ ์ค์ \n",
|
488 |
-
"prompt = \"You are a smart agent with various tools. Answer questions in Korean.\"\n",
|
489 |
-
"\n",
|
490 |
-
"# ์์ด์ ํธ ์์ฑ\n",
|
491 |
-
"agent = create_react_agent(model, tools, prompt=prompt, checkpointer=MemorySaver())"
|
492 |
-
]
|
493 |
-
},
|
494 |
-
{
|
495 |
-
"cell_type": "markdown",
|
496 |
-
"metadata": {},
|
497 |
-
"source": [
|
498 |
-
"์๋กญ๊ฒ ์ถ๊ฐํ `tavily` ๋๊ตฌ๋ฅผ ์ฌ์ฉํ์ฌ ๊ฒ์์ ์ํํฉ๋๋ค."
|
499 |
-
]
|
500 |
-
},
|
501 |
-
{
|
502 |
-
"cell_type": "code",
|
503 |
-
"execution_count": null,
|
504 |
-
"metadata": {},
|
505 |
-
"outputs": [],
|
506 |
-
"source": [
|
507 |
-
"await astream_graph(agent, {\"messages\": \"์ค๋ ๋ด์ค ์ฐพ์์ค\"}, config=config)"
|
508 |
-
]
|
509 |
-
},
|
510 |
-
{
|
511 |
-
"cell_type": "markdown",
|
512 |
-
"metadata": {},
|
513 |
-
"source": [
|
514 |
-
"`retriever` ๋๊ตฌ๊ฐ ์ํํ๊ฒ ์๋ํ๋ ๊ฒ์ ํ์ธํ ์ ์์ต๋๋ค."
|
515 |
-
]
|
516 |
-
},
|
517 |
-
{
|
518 |
-
"cell_type": "code",
|
519 |
-
"execution_count": null,
|
520 |
-
"metadata": {},
|
521 |
-
"outputs": [],
|
522 |
-
"source": [
|
523 |
-
"await astream_graph(\n",
|
524 |
-
" agent,\n",
|
525 |
-
" {\n",
|
526 |
-
" \"messages\": \"`retriever` ๋๊ตฌ๋ฅผ ์ฌ์ฉํด์ ์ผ์ฑ์ ์๊ฐ ๊ฐ๋ฐํ ์์ฑํ AI ์ด๋ฆ์ ๊ฒ์ํด์ค\"\n",
|
527 |
-
" },\n",
|
528 |
-
" config=config,\n",
|
529 |
-
")"
|
530 |
-
]
|
531 |
-
},
|
532 |
-
{
|
533 |
-
"cell_type": "markdown",
|
534 |
-
"metadata": {},
|
535 |
-
"source": [
|
536 |
-
"## Smithery ์์ ์ ๊ณตํ๋ MCP ์๋ฒ\n",
|
537 |
-
"\n",
|
538 |
-
"- ๋งํฌ: https://smithery.ai/"
|
539 |
-
]
|
540 |
-
},
|
541 |
-
{
|
542 |
-
"cell_type": "markdown",
|
543 |
-
"metadata": {},
|
544 |
-
"source": [
|
545 |
-
"์ฌ์ฉํ ๋๊ตฌ ๋ชฉ๋ก์ ์๋์ ๊ฐ์ต๋๋ค.\n",
|
546 |
-
"\n",
|
547 |
-
"- Sequential Thinking: https://smithery.ai/server/@smithery-ai/server-sequential-thinking\n",
|
548 |
-
" - ๊ตฌ์กฐํ๋ ์ฌ๊ณ ํ๋ก์ธ์ค๋ฅผ ํตํด ์ญ๋์ ์ด๊ณ ์ฑ์ฐฐ์ ์ธ ๋ฌธ์ ํด๊ฒฐ์ ์ํ ๋๊ตฌ๋ฅผ ์ ๊ณตํ๋ MCP ์๋ฒ\n",
|
549 |
-
"- Desktop Commander: https://smithery.ai/server/@wonderwhy-er/desktop-commander\n",
|
550 |
-
" - ๋ค์ํ ํธ์ง ๊ธฐ๋ฅ์ผ๋ก ํฐ๋ฏธ๋ ๋ช
๋ น์ ์คํํ๊ณ ํ์ผ์ ๊ด๋ฆฌํ์ธ์. ์ฝ๋ฉ, ์
ธ ๋ฐ ํฐ๋ฏธ๋, ์์
์๋ํ\n",
|
551 |
-
"\n",
|
552 |
-
"**์ฐธ๊ณ **\n",
|
553 |
-
"\n",
|
554 |
-
"- smithery ์์ ์ ๊ณตํ๋ ๋๊ตฌ๋ฅผ JSON ํ์์ผ๋ก ๊ฐ์ ธ์ฌ๋, ์๋์ ์์์ฒ๋ผ `\"transport\": \"stdio\"` ๋ก ๊ผญ ์ค์ ํด์ผ ํฉ๋๋ค."
|
555 |
-
]
|
556 |
-
},
|
557 |
-
{
|
558 |
-
"cell_type": "code",
|
559 |
-
"execution_count": null,
|
560 |
-
"metadata": {},
|
561 |
-
"outputs": [],
|
562 |
-
"source": [
|
563 |
-
"from langchain_mcp_adapters.client import MultiServerMCPClient\n",
|
564 |
-
"from langgraph.prebuilt import create_react_agent\n",
|
565 |
-
"from langchain_anthropic import ChatAnthropic\n",
|
566 |
-
"\n",
|
567 |
-
"# LLM ๋ชจ๋ธ ์ด๊ธฐํ\n",
|
568 |
-
"model = ChatAnthropic(model=\"claude-3-7-sonnet-latest\", temperature=0, max_tokens=20000)\n",
|
569 |
-
"\n",
|
570 |
-
"# 1. ํด๋ผ์ด์ธํธ ์์ฑ\n",
|
571 |
-
"client = MultiServerMCPClient(\n",
|
572 |
-
" {\n",
|
573 |
-
" \"server-sequential-thinking\": {\n",
|
574 |
-
" \"command\": \"npx\",\n",
|
575 |
-
" \"args\": [\n",
|
576 |
-
" \"-y\",\n",
|
577 |
-
" \"@smithery/cli@latest\",\n",
|
578 |
-
" \"run\",\n",
|
579 |
-
" \"@smithery-ai/server-sequential-thinking\",\n",
|
580 |
-
" \"--key\",\n",
|
581 |
-
" \"89a4780a-53b7-4b7b-92e9-a29815f2669b\",\n",
|
582 |
-
" ],\n",
|
583 |
-
" \"transport\": \"stdio\", # stdio ๋ฐฉ์์ผ๋ก ํต์ ์ ์ถ๊ฐํฉ๋๋ค.\n",
|
584 |
-
" },\n",
|
585 |
-
" \"desktop-commander\": {\n",
|
586 |
-
" \"command\": \"npx\",\n",
|
587 |
-
" \"args\": [\n",
|
588 |
-
" \"-y\",\n",
|
589 |
-
" \"@smithery/cli@latest\",\n",
|
590 |
-
" \"run\",\n",
|
591 |
-
" \"@wonderwhy-er/desktop-commander\",\n",
|
592 |
-
" \"--key\",\n",
|
593 |
-
" \"89a4780a-53b7-4b7b-92e9-a29815f2669b\",\n",
|
594 |
-
" ],\n",
|
595 |
-
" \"transport\": \"stdio\", # stdio ๋ฐฉ์์ผ๋ก ํต์ ์ ์ถ๊ฐํฉ๋๋ค.\n",
|
596 |
-
" },\n",
|
597 |
-
" \"document-retriever\": {\n",
|
598 |
-
" \"command\": \"./.venv/bin/python\",\n",
|
599 |
-
" # mcp_server_rag.py ํ์ผ์ ์ ๋ ๊ฒฝ๋ก๋ก ์
๋ฐ์ดํธํด์ผ ํฉ๋๋ค\n",
|
600 |
-
" \"args\": [\"./mcp_server_rag.py\"],\n",
|
601 |
-
" # stdio ๋ฐฉ์์ผ๋ก ํต์ (ํ์ค ์
์ถ๋ ฅ ์ฌ์ฉ)\n",
|
602 |
-
" \"transport\": \"stdio\",\n",
|
603 |
-
" },\n",
|
604 |
-
" }\n",
|
605 |
-
")\n",
|
606 |
-
"\n",
|
607 |
-
"\n",
|
608 |
-
"# 2. ๋ช
์์ ์ผ๋ก ์ฐ๊ฒฐ ์ด๊ธฐํ\n",
|
609 |
-
"await client.__aenter__()"
|
610 |
-
]
|
611 |
-
},
|
612 |
-
{
|
613 |
-
"cell_type": "markdown",
|
614 |
-
"metadata": {},
|
615 |
-
"source": [
|
616 |
-
"langgraph ์ `create_react_agent` ๋ฅผ ์ฌ์ฉํ์ฌ ์์ด์ ํธ๋ฅผ ์์ฑํฉ๋๋ค."
|
617 |
-
]
|
618 |
-
},
|
619 |
-
{
|
620 |
-
"cell_type": "code",
|
621 |
-
"execution_count": 19,
|
622 |
-
"metadata": {},
|
623 |
-
"outputs": [],
|
624 |
-
"source": [
|
625 |
-
"from langgraph.checkpoint.memory import MemorySaver\n",
|
626 |
-
"from langchain_core.runnables import RunnableConfig\n",
|
627 |
-
"\n",
|
628 |
-
"config = RunnableConfig(recursion_limit=30, thread_id=3)\n",
|
629 |
-
"agent = create_react_agent(model, client.get_tools(), checkpointer=MemorySaver())"
|
630 |
-
]
|
631 |
-
},
|
632 |
-
{
|
633 |
-
"cell_type": "markdown",
|
634 |
-
"metadata": {},
|
635 |
-
"source": [
|
636 |
-
"`Desktop Commander` ๋๊ตฌ๋ฅผ ์ฌ์ฉํ์ฌ ํฐ๋ฏธ๋ ๋ช
๋ น์ ์คํํฉ๋๋ค."
|
637 |
-
]
|
638 |
-
},
|
639 |
-
{
|
640 |
-
"cell_type": "code",
|
641 |
-
"execution_count": null,
|
642 |
-
"metadata": {},
|
643 |
-
"outputs": [],
|
644 |
-
"source": [
|
645 |
-
"await astream_graph(\n",
|
646 |
-
" agent,\n",
|
647 |
-
" {\n",
|
648 |
-
" \"messages\": \"ํ์ฌ ๊ฒฝ๋ก๋ฅผ ํฌํจํ ํ์ ํด๋ ๊ตฌ์กฐ๋ฅผ tree ๋ก ๊ทธ๋ ค์ค. ๋จ, .venv ํด๋๋ ์ ์ธํ๊ณ ์ถ๋ ฅํด์ค.\"\n",
|
649 |
-
" },\n",
|
650 |
-
" config=config,\n",
|
651 |
-
")"
|
652 |
-
]
|
653 |
-
},
|
654 |
-
{
|
655 |
-
"cell_type": "markdown",
|
656 |
-
"metadata": {},
|
657 |
-
"source": [
|
658 |
-
"์ด๋ฒ์๋ `Sequential Thinking` ๋๊ตฌ๋ฅผ ์ฌ์ฉํ์ฌ ๋น๊ต์ ๋ณต์กํ ์์
์ ์ํํ ์ ์๋์ง ํ์ธํฉ๋๋ค."
|
659 |
-
]
|
660 |
-
},
|
661 |
-
{
|
662 |
-
"cell_type": "code",
|
663 |
-
"execution_count": null,
|
664 |
-
"metadata": {},
|
665 |
-
"outputs": [],
|
666 |
-
"source": [
|
667 |
-
"await astream_graph(\n",
|
668 |
-
" agent,\n",
|
669 |
-
" {\n",
|
670 |
-
" \"messages\": (\n",
|
671 |
-
" \"`retriever` ๋๊ตฌ๋ฅผ ์ฌ์ฉํด์ ์ผ์ฑ์ ์๊ฐ ๊ฐ๋ฐํ ์์ฑํ AI ๊ด๋ จ ๋ด์ฉ์ ๊ฒ์ํ๊ณ \"\n",
|
672 |
-
" \"`Sequential Thinking` ๋๊ตฌ๋ฅผ ์ฌ์ฉํด์ ๋ณด๊ณ ์๋ฅผ ์์ฑํด์ค.\"\n",
|
673 |
-
" )\n",
|
674 |
-
" },\n",
|
675 |
-
" config=config,\n",
|
676 |
-
")"
|
677 |
-
]
|
678 |
-
}
|
679 |
-
],
|
680 |
-
"metadata": {
|
681 |
-
"kernelspec": {
|
682 |
-
"display_name": ".venv",
|
683 |
-
"language": "python",
|
684 |
-
"name": "python3"
|
685 |
-
},
|
686 |
-
"language_info": {
|
687 |
-
"codemirror_mode": {
|
688 |
-
"name": "ipython",
|
689 |
-
"version": 3
|
690 |
-
},
|
691 |
-
"file_extension": ".py",
|
692 |
-
"mimetype": "text/x-python",
|
693 |
-
"name": "python",
|
694 |
-
"nbconvert_exporter": "python",
|
695 |
-
"pygments_lexer": "ipython3",
|
696 |
-
"version": "3.12.8"
|
697 |
-
}
|
698 |
-
},
|
699 |
-
"nbformat": 4,
|
700 |
-
"nbformat_minor": 2
|
701 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README_KOR.md
DELETED
@@ -1,232 +0,0 @@
|
|
1 |
-
# LangGraph ์์ด์ ํธ + MCP
|
2 |
-
|
3 |
-
[](README.md) [](README_KOR.md)
|
4 |
-
|
5 |
-
[](https://github.com/teddylee777/langgraph-mcp-agents)
|
6 |
-
[](https://opensource.org/licenses/MIT)
|
7 |
-
[](https://www.python.org/)
|
8 |
-
[](https://github.com/teddylee777/langgraph-mcp-agents)
|
9 |
-
|
10 |
-

|
11 |
-
|
12 |
-
## ํ๋ก์ ํธ ๊ฐ์
|
13 |
-
|
14 |
-

|
15 |
-
|
16 |
-
`LangChain-MCP-Adapters`๋ **LangChain AI**์์ ์ ๊ณตํ๋ ํดํท์ผ๋ก, AI ์์ด์ ํธ๊ฐ Model Context Protocol(MCP)์ ํตํด ์ธ๋ถ ๋๊ตฌ ๋ฐ ๋ฐ์ดํฐ ์์ค์ ์ํธ์์ฉํ ์ ์๊ฒ ํด์ค๋๋ค. ์ด ํ๋ก์ ํธ๋ MCP ๋๊ตฌ๋ฅผ ํตํด ๋ค์ํ ๋ฐ์ดํฐ ์์ค์ API์ ์ ๊ทผํ ์ ์๋ ReAct ์์ด์ ํธ๋ฅผ ๋ฐฐํฌํ๊ธฐ ์ํ ์ฌ์ฉ์ ์นํ์ ์ธ ์ธํฐํ์ด์ค๋ฅผ ์ ๊ณตํฉ๋๋ค.
|
17 |
-
|
18 |
-
### ํน์ง
|
19 |
-
|
20 |
-
- **Streamlit ์ธํฐํ์ด์ค**: MCP ๋๊ตฌ๊ฐ ํฌํจ๋ LangGraph `ReAct Agent`์ ์ํธ์์ฉํ๊ธฐ ์ํ ์ฌ์ฉ์ ์นํ์ ์ธ ์น ์ธํฐํ์ด์ค
|
21 |
-
- **๋๊ตฌ ๊ด๋ฆฌ**: UI๋ฅผ ํตํด MCP ๋๊ตฌ๋ฅผ ์ถ๊ฐ, ์ ๊ฑฐ ๋ฐ ๊ตฌ์ฑ(Smithery JSON ํ์ ์ง์). ์ ํ๋ฆฌ์ผ์ด์
์ ์ฌ์์ํ์ง ์๊ณ ๋ ๋์ ์ผ๋ก ์ด๋ฃจ์ด์ง๋๋ค.
|
22 |
-
- **์คํธ๋ฆฌ๋ฐ ์๋ต**: ์์ด์ ํธ ์๋ต๊ณผ ๋๊ตฌ ํธ์ถ์ ์ค์๊ฐ์ผ๋ก ํ์ธ
|
23 |
-
- **๋ํ ๊ธฐ๋ก**: ์์ด์ ํธ์์ ๋ํ ์ถ์ ๋ฐ ๊ด๋ฆฌ
|
24 |
-
|
25 |
-
## MCP ์ํคํ
์ฒ
|
26 |
-
|
27 |
-
MCP(Model Context Protocol)๋ ์ธ ๊ฐ์ง ์ฃผ์ ๊ตฌ์ฑ ์์๋ก ์ด๋ฃจ์ด์ ธ ์์ต๋๋ค.
|
28 |
-
|
29 |
-
1. **MCP ํธ์คํธ**: Claude Desktop, IDE ๋๋ LangChain/LangGraph์ ๊ฐ์ด MCP๋ฅผ ํตํด ๋ฐ์ดํฐ์ ์ ๊ทผํ๊ณ ์ ํ๋ ํ๋ก๊ทธ๋จ.
|
30 |
-
|
31 |
-
2. **MCP ํด๋ผ์ด์ธํธ**: ์๋ฒ์ 1:1 ์ฐ๊ฒฐ์ ์ ์งํ๋ ํ๋กํ ์ฝ ํด๋ผ์ด์ธํธ๋ก, ํธ์คํธ์ ์๋ฒ ์ฌ์ด์ ์ค๊ฐ์ ์ญํ ์ ํฉ๋๋ค.
|
32 |
-
|
33 |
-
3. **MCP ์๋ฒ**: ํ์คํ๋ ๋ชจ๋ธ ์ปจํ
์คํธ ํ๋กํ ์ฝ์ ํตํด ํน์ ๊ธฐ๋ฅ์ ๋
ธ์ถํ๋ ๊ฒฝ๋ ํ๋ก๊ทธ๋จ์ผ๋ก, ์ฃผ์ ๋ฐ์ดํฐ ์์ค ์ญํ ์ ํฉ๋๋ค.
|
34 |
-
|
35 |
-
## Docker ๋ก ๋น ๋ฅธ ์คํ
|
36 |
-
|
37 |
-
๋ก์ปฌ Python ํ๊ฒฝ์ ์ค์ ํ์ง ์๊ณ ๋ Docker๋ฅผ ์ฌ์ฉํ์ฌ ์ด ํ๋ก์ ํธ๋ฅผ ์ฝ๊ฒ ์คํํ ์ ์์ต๋๋ค.
|
38 |
-
|
39 |
-
### ํ์ ์๊ตฌ์ฌํญ(Docker Desktop)
|
40 |
-
|
41 |
-
์๋์ ๋งํฌ์์ Docker Desktop์ ์ค์นํฉ๋๋ค.
|
42 |
-
|
43 |
-
- [Docker Desktop ์ค์น](https://www.docker.com/products/docker-desktop/)
|
44 |
-
|
45 |
-
### Docker Compose๋ก ์คํํ๊ธฐ
|
46 |
-
|
47 |
-
1. `dockers` ๋๋ ํ ๋ฆฌ๋ก ์ด๋
|
48 |
-
|
49 |
-
```bash
|
50 |
-
cd dockers
|
51 |
-
```
|
52 |
-
|
53 |
-
2. ํ๋ก์ ํธ ๋ฃจํธ ๋๋ ํ ๋ฆฌ์ API ํค๊ฐ ํฌํจ๋ `.env` ํ์ผ ์์ฑ.
|
54 |
-
|
55 |
-
```bash
|
56 |
-
cp .env.example .env
|
57 |
-
```
|
58 |
-
|
59 |
-
๋ฐ๊ธ ๋ฐ์ API ํค๋ฅผ `.env` ํ์ผ์ ์
๋ ฅํฉ๋๋ค.
|
60 |
-
|
61 |
-
(์ฐธ๊ณ ) ๋ชจ๋ API ํค๊ฐ ํ์ํ์ง ์์ต๋๋ค. ํ์ํ ๊ฒฝ์ฐ์๋ง ์
๋ ฅํ์ธ์.
|
62 |
-
- `ANTHROPIC_API_KEY`: Anthropic API ํค๋ฅผ ์
๋ ฅํ ๊ฒฝ์ฐ "claude-3-7-sonnet-latest", "claude-3-5-sonnet-latest", "claude-3-haiku-latest" ๋ชจ๋ธ์ ์ฌ์ฉํฉ๋๋ค.
|
63 |
-
- `OPENAI_API_KEY`: OpenAI API ํค๋ฅผ ์
๋ ฅํ ๊ฒฝ์ฐ "gpt-4o", "gpt-4o-mini" ๋ชจ๋ธ์ ์ฌ์ฉํฉ๋๋ค.
|
64 |
-
- `LANGSMITH_API_KEY`: LangSmith API ํค๋ฅผ ์
๋ ฅํ ๊ฒฝ์ฐ LangSmith tracing์ ์ฌ์ฉํฉ๋๋ค.
|
65 |
-
|
66 |
-
```bash
|
67 |
-
ANTHROPIC_API_KEY=your_anthropic_api_key
|
68 |
-
OPENAI_API_KEY=your_openai_api_key
|
69 |
-
LANGSMITH_API_KEY=your_langsmith_api_key
|
70 |
-
LANGSMITH_PROJECT=LangGraph-MCP-Agents
|
71 |
-
LANGSMITH_TRACING=true
|
72 |
-
LANGSMITH_ENDPOINT=https://api.smith.langchain.com
|
73 |
-
```
|
74 |
-
|
75 |
-
(์ ๊ท ๊ธฐ๋ฅ) ๋ก๊ทธ์ธ/๋ก๊ทธ์์ ๊ธฐ๋ฅ ์ฌ์ฉ
|
76 |
-
|
77 |
-
๋ก๊ทธ์ธ ๊ธฐ๋ฅ์ ์ฌ์ฉ์ `USE_LOGIN`์ `true`๋ก ์ค์ ํ๊ณ , `USER_ID`์ `USER_PASSWORD`๋ฅผ ์
๋ ฅํฉ๋๋ค.
|
78 |
-
|
79 |
-
```bash
|
80 |
-
USE_LOGIN=true
|
81 |
-
USER_ID=admin
|
82 |
-
USER_PASSWORD=admin123
|
83 |
-
```
|
84 |
-
|
85 |
-
๋ง์ฝ, ๋ก๊ทธ์ธ ๊ธฐ๋ฅ์ ์ฌ์ฉํ๊ณ ์ถ์ง ์๋ค๋ฉด, `USE_LOGIN`์ `false`๋ก ์ค์ ํฉ๋๋ค.
|
86 |
-
|
87 |
-
```bash
|
88 |
-
USE_LOGIN=false
|
89 |
-
```
|
90 |
-
|
91 |
-
3. ์์คํ
์ํคํ
์ฒ์ ๋ง๋ Docker Compose ํ์ผ ์ ํ.
|
92 |
-
|
93 |
-
**AMD64/x86_64 ์ํคํ
์ฒ(Intel/AMD ํ๋ก์ธ์)**
|
94 |
-
|
95 |
-
```bash
|
96 |
-
# ์ปจํ
์ด๋ ์คํ
|
97 |
-
docker compose -f docker-compose-KOR.yaml up -d
|
98 |
-
```
|
99 |
-
|
100 |
-
**ARM64 ์ํคํ
์ฒ(Apple Silicon M1/M2/M3/M4)**
|
101 |
-
|
102 |
-
```bash
|
103 |
-
# ์ปจํ
์ด๋ ์คํ
|
104 |
-
docker compose -f docker-compose-KOR-mac.yaml up -d
|
105 |
-
```
|
106 |
-
|
107 |
-
4. ๋ธ๋ผ์ฐ์ ์์ http://localhost:8585 ๋ก ์ ํ๋ฆฌ์ผ์ด์
์ ์
|
108 |
-
|
109 |
-
(์ฐธ๊ณ )
|
110 |
-
- ํฌํธ๋ ๋ค๋ฅธ ์ค์ ์ ์์ ํด์ผ ํ๋ ๊ฒฝ์ฐ, ๋น๋ ์ ์ ํด๋น docker-compose-KOR.yaml ํ์ผ์ ํธ์งํ์ธ์.
|
111 |
-
|
112 |
-
## ์์ค์ฝ๋๋ก ๋ถํฐ ์ง์ ์ค์น
|
113 |
-
|
114 |
-
1. ์ด ์ ์ฅ์๋ฅผ ํด๋ก ํฉ๋๋ค
|
115 |
-
|
116 |
-
```bash
|
117 |
-
git clone https://github.com/teddynote-lab/langgraph-mcp-agents.git
|
118 |
-
cd langgraph-mcp-agents
|
119 |
-
```
|
120 |
-
|
121 |
-
2. ๊ฐ์ ํ๊ฒฝ์ ์์ฑํ๊ณ uv๋ฅผ ์ฌ์ฉํ์ฌ ์์กด์ฑ์ ์ค์นํฉ๋๋ค
|
122 |
-
|
123 |
-
```bash
|
124 |
-
uv venv
|
125 |
-
uv pip install -r requirements.txt
|
126 |
-
source .venv/bin/activate # Windows์ ๊ฒฝ์ฐ: .venv\Scripts\activate
|
127 |
-
```
|
128 |
-
|
129 |
-
3. API ํค๊ฐ ํฌํจ๋ `.env` ๏ฟฝ๏ฟฝ์ผ์ ์์ฑํฉ๋๋ค(`.env.example` ์์ ๋ณต์ฌ)
|
130 |
-
|
131 |
-
```bash
|
132 |
-
cp .env.example .env
|
133 |
-
```
|
134 |
-
|
135 |
-
๋ฐ๊ธ ๋ฐ์ API ํค๋ฅผ `.env` ํ์ผ์ ์
๋ ฅํฉ๋๋ค.
|
136 |
-
|
137 |
-
(์ฐธ๊ณ ) ๋ชจ๋ API ํค๊ฐ ํ์ํ์ง ์์ต๋๋ค. ํ์ํ ๊ฒฝ์ฐ์๋ง ์
๋ ฅํ์ธ์.
|
138 |
-
- `ANTHROPIC_API_KEY`: Anthropic API ํค๋ฅผ ์
๋ ฅํ ๊ฒฝ์ฐ "claude-3-7-sonnet-latest", "claude-3-5-sonnet-latest", "claude-3-haiku-latest" ๋ชจ๋ธ์ ์ฌ์ฉํฉ๋๋ค.
|
139 |
-
- `OPENAI_API_KEY`: OpenAI API ํค๋ฅผ ์
๋ ฅํ ๊ฒฝ์ฐ "gpt-4o", "gpt-4o-mini" ๋ชจ๋ธ์ ์ฌ์ฉํฉ๋๋ค.
|
140 |
-
- `LANGSMITH_API_KEY`: LangSmith API ํค๋ฅผ ์
๋ ฅํ ๊ฒฝ์ฐ LangSmith tracing์ ์ฌ์ฉํฉ๋๋ค.
|
141 |
-
|
142 |
-
```bash
|
143 |
-
ANTHROPIC_API_KEY=your_anthropic_api_key
|
144 |
-
OPENAI_API_KEY=your_openai_api_key(optional)
|
145 |
-
LANGSMITH_API_KEY=your_langsmith_api_key
|
146 |
-
LANGSMITH_PROJECT=LangGraph-MCP-Agents
|
147 |
-
LANGSMITH_TRACING=true
|
148 |
-
LANGSMITH_ENDPOINT=https://api.smith.langchain.com
|
149 |
-
```
|
150 |
-
|
151 |
-
4. (์ ๊ท ๊ธฐ๋ฅ) ๋ก๊ทธ์ธ/๋ก๊ทธ์์ ๊ธฐ๋ฅ ์ฌ์ฉ
|
152 |
-
|
153 |
-
๋ก๊ทธ์ธ ๊ธฐ๋ฅ์ ์ฌ์ฉ์ `USE_LOGIN`์ `true`๋ก ์ค์ ํ๊ณ , `USER_ID`์ `USER_PASSWORD`๋ฅผ ์
๋ ฅํฉ๋๋ค.
|
154 |
-
|
155 |
-
```bash
|
156 |
-
USE_LOGIN=true
|
157 |
-
USER_ID=admin
|
158 |
-
USER_PASSWORD=admin123
|
159 |
-
```
|
160 |
-
|
161 |
-
๋ง์ฝ, ๋ก๊ทธ์ธ ๊ธฐ๋ฅ์ ์ฌ์ฉํ๊ณ ์ถ์ง ์๋ค๋ฉด, `USE_LOGIN`์ `false`๋ก ์ค์ ํฉ๋๋ค.
|
162 |
-
|
163 |
-
```bash
|
164 |
-
USE_LOGIN=false
|
165 |
-
```
|
166 |
-
|
167 |
-
## ์ฌ์ฉ๋ฒ
|
168 |
-
|
169 |
-
1. Streamlit ์ ํ๋ฆฌ์ผ์ด์
์ ์์ํฉ๋๋ค. (ํ๊ตญ์ด ๋ฒ์ ํ์ผ์ `app_KOR.py` ์
๋๋ค.)
|
170 |
-
|
171 |
-
```bash
|
172 |
-
streamlit run app_KOR.py
|
173 |
-
```
|
174 |
-
|
175 |
-
2. ์ ํ๋ฆฌ์ผ์ด์
์ด ๋ธ๋ผ์ฐ์ ์์ ์คํ๋์ด ๋ฉ์ธ ์ธํฐํ์ด์ค๋ฅผ ํ์ํฉ๋๋ค.
|
176 |
-
|
177 |
-
3. ์ฌ์ด๋๋ฐ๋ฅผ ์ฌ์ฉํ์ฌ MCP ๋๊ตฌ๋ฅผ ์ถ๊ฐํ๊ณ ๊ตฌ์ฑํฉ๋๋ค
|
178 |
-
|
179 |
-
์ ์ฉํ MCP ์๋ฒ๋ฅผ ์ฐพ์ผ๋ ค๋ฉด [Smithery](https://smithery.ai/)๋ฅผ ๋ฐฉ๋ฌธํ์ธ์.
|
180 |
-
|
181 |
-
๋จผ์ , ์ฌ์ฉํ๊ณ ์ ํ๋ ๋๊ตฌ๋ฅผ ์ ํํฉ๋๋ค.
|
182 |
-
|
183 |
-
์ค๋ฅธ์ชฝ์ JSON ๊ตฌ์ฑ์์ COPY ๋ฒํผ์ ๋๋ฆ
๋๋ค.
|
184 |
-
|
185 |
-

|
186 |
-
|
187 |
-
๋ณต์ฌ๋ JSON ๋ฌธ์์ด์ `Tool JSON` ์น์
์ ๋ถ์ฌ๋ฃ์ต๋๋ค.
|
188 |
-
|
189 |
-
<img src="./assets/add-tools.png" alt="tool json" style="width: auto; height: auto;">
|
190 |
-
|
191 |
-
`Add Tool` ๋ฒํผ์ ๋๋ฌ "Registered Tools List" ์น์
์ ์ถ๊ฐํฉ๋๋ค.
|
192 |
-
|
193 |
-
๋ง์ง๋ง์ผ๋ก, "Apply" ๋ฒํผ์ ๋๋ฌ ์๋ก์ด ๋๊ตฌ๋ก ์์ด์ ํธ๋ฅผ ์ด๊ธฐํํ๋๋ก ๋ณ๊ฒฝ์ฌํญ์ ์ ์ฉํฉ๋๋ค.
|
194 |
-
|
195 |
-
<img src="./assets/apply-tool-configuration.png" alt="tool json" style="width: auto; height: auto;">
|
196 |
-
|
197 |
-
4. ์์ด์ ํธ์ ์ํ๋ฅผ ํ์ธํฉ๋๋ค.
|
198 |
-
|
199 |
-

|
200 |
-
|
201 |
-
5. ์ฑํ
์ธํฐํ์ด์ค์์ ์ง๋ฌธ์ ํ์ฌ ๊ตฌ์ฑ๋ MCP ๋๊ตฌ๋ฅผ ํ์ฉํ๋ ReAct ์์ด์ ํธ์ ์ํธ์์ฉํฉ๋๋ค.
|
202 |
-
|
203 |
-

|
204 |
-
|
205 |
-
## ํธ์ฆ์จ ํํ ๋ฆฌ์ผ
|
206 |
-
|
207 |
-
๊ฐ๋ฐ์๊ฐ MCP์ LangGraph์ ํตํฉ ์๋ ๋ฐฉ์์ ๋ํด ๋ ๊น์ด ์์๋ณด๋ ค๋ฉด, ํฌ๊ด์ ์ธ Jupyter ๋
ธํธ๋ถ ํํ ๋ฆฌ์ผ์ ์ ๊ณตํฉ๋๋ค:
|
208 |
-
|
209 |
-
- ๋งํฌ: [MCP-HandsOn-KOR.ipynb](./MCP-HandsOn-KOR.ipynb)
|
210 |
-
|
211 |
-
์ด ํธ์ฆ์จ ํํ ๋ฆฌ์ผ์ ๋ค์ ๋ด์ฉ์ ๋ค๋ฃน๋๋ค.
|
212 |
-
|
213 |
-
1. **MCP ํด๋ผ์ด์ธํธ ์ค์ ** - MCP ์๋ฒ์ ์ฐ๊ฒฐํ๊ธฐ ์ํ MultiServerMCPClient ๊ตฌ์ฑ ๋ฐ ์ด๊ธฐํ ๋ฐฉ๋ฒ ํ์ต
|
214 |
-
2. **๋ก์ปฌ MCP ์๋ฒ ํตํฉ** - SSE ๋ฐ Stdio ๋ฉ์๋๋ฅผ ํตํด ๋ก์ปฌ์์ ์คํ ์ค์ธ MCP ์๋ฒ์ ์ฐ๊ฒฐ
|
215 |
-
3. **RAG ํตํฉ** - ๋ฌธ์ ๊ฒ์ ๊ธฐ๋ฅ์ ์ํด MCP๋ฅผ ์ฌ์ฉํ์ฌ ๋ฆฌํธ๋ฆฌ๋ฒ ๋๊ตฌ ์ ๊ทผ
|
216 |
-
4. **ํผํฉ ์ ์ก ๋ฐฉ๋ฒ** - ํ๋์ ์์ด์ ํธ์์ ๋ค์ํ ์ ์ก ํ๋กํ ์ฝ(SSE ๋ฐ Stdio) ๊ฒฐํฉ
|
217 |
-
5. **LangChain ๋๊ตฌ + MCP** - MCP ๋๊ตฌ์ ํจ๊ป ๋ค์ดํฐ๋ธ LangChain ๋๊ตฌ ํตํฉ
|
218 |
-
|
219 |
-
์ด ํํ ๋ฆฌ์ผ์ MCP ๋๊ตฌ๋ฅผ LangGraph ์์ด์ ํธ์ ๊ตฌ์ถํ๊ณ ํตํฉํ๋ ๋ฐฉ๋ฒ์ ์ดํดํ๋ ๋ฐ ๋์์ด ๋๋ ๋จ๊ณ๋ณ ์ค๋ช
์ด ํฌํจ๋ ์ค์ฉ์ ์ธ ์์ ๋ฅผ ์ ๊ณตํฉ๋๋ค.
|
220 |
-
|
221 |
-
## ๋ผ์ด์ ์ค
|
222 |
-
|
223 |
-
MIT License
|
224 |
-
|
225 |
-
## ํํ ๋ฆฌ์ผ ๋น๋์ค ๋ณด๊ธฐ(ํ๊ตญ์ด)
|
226 |
-
|
227 |
-
[](https://youtu.be/ISrYHGg2C2c?si=eWmKFVUS1BLtPm5U)
|
228 |
-
|
229 |
-
## ์ฐธ๊ณ ์๋ฃ
|
230 |
-
|
231 |
-
- https://github.com/langchain-ai/langchain-mcp-adapters
|
232 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__pycache__/app.cpython-310.pyc
ADDED
Binary file (21.6 kB). View file
|
|
__pycache__/app.cpython-312.pyc
ADDED
Binary file (38.6 kB). View file
|
|
__pycache__/utils.cpython-310.pyc
ADDED
Binary file (6.32 kB). View file
|
|
__pycache__/utils.cpython-312.pyc
ADDED
Binary file (10.5 kB). View file
|
|
app.py
CHANGED
@@ -52,10 +52,14 @@ def load_config_from_json():
|
|
52 |
}
|
53 |
}
|
54 |
|
|
|
|
|
55 |
try:
|
56 |
if os.path.exists(CONFIG_FILE_PATH):
|
57 |
with open(CONFIG_FILE_PATH, "r", encoding="utf-8") as f:
|
58 |
-
|
|
|
|
|
59 |
else:
|
60 |
# Create file with default settings if it doesn't exist
|
61 |
save_config_to_json(default_config)
|
@@ -186,8 +190,8 @@ Guidelines:
|
|
186 |
OUTPUT_TOKEN_INFO = {
|
187 |
"claude-3-5-sonnet-latest": {"max_tokens": 8192},
|
188 |
"claude-3-5-haiku-latest": {"max_tokens": 8192},
|
189 |
-
"claude-3-
|
190 |
-
"gpt-4o": {"max_tokens": 16000},
|
191 |
"gpt-4o-mini": {"max_tokens": 16000},
|
192 |
}
|
193 |
|
@@ -198,10 +202,10 @@ if "session_initialized" not in st.session_state:
|
|
198 |
st.session_state.history = [] # List for storing conversation history
|
199 |
st.session_state.mcp_client = None # Storage for MCP client object
|
200 |
st.session_state.timeout_seconds = (
|
201 |
-
|
202 |
)
|
203 |
st.session_state.selected_model = (
|
204 |
-
"claude-3-
|
205 |
)
|
206 |
st.session_state.recursion_limit = 100 # Recursion call limit, default 100
|
207 |
|
@@ -230,6 +234,9 @@ async def cleanup_mcp_client():
|
|
230 |
# st.warning(traceback.format_exc())
|
231 |
|
232 |
|
|
|
|
|
|
|
233 |
def print_message():
|
234 |
"""
|
235 |
Displays chat history on the screen.
|
@@ -272,6 +279,7 @@ def get_streaming_callback(text_placeholder, tool_placeholder):
|
|
272 |
|
273 |
This function creates a callback function to display responses generated from the LLM in real-time.
|
274 |
It displays text responses and tool call information in separate areas.
|
|
|
275 |
|
276 |
Args:
|
277 |
text_placeholder: Streamlit component to display text responses
|
@@ -288,9 +296,26 @@ def get_streaming_callback(text_placeholder, tool_placeholder):
|
|
288 |
def callback_func(message: dict):
|
289 |
nonlocal accumulated_text, accumulated_tool
|
290 |
message_content = message.get("content", None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
291 |
|
292 |
if isinstance(message_content, AIMessageChunk):
|
293 |
content = message_content.content
|
|
|
|
|
|
|
294 |
# If content is in list form (mainly occurs in Claude models)
|
295 |
if isinstance(content, list) and len(content) > 0:
|
296 |
message_chunk = content[0]
|
@@ -320,12 +345,16 @@ def get_streaming_callback(text_placeholder, tool_placeholder):
|
|
320 |
):
|
321 |
tool_call_info = message_content.tool_calls[0]
|
322 |
accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
|
|
|
|
|
|
|
323 |
with tool_placeholder.expander(
|
324 |
"๐ง Tool Call Information", expanded=True
|
325 |
):
|
326 |
st.markdown("".join(accumulated_tool))
|
327 |
# Process if content is a simple string
|
328 |
elif isinstance(content, str):
|
|
|
329 |
accumulated_text.append(content)
|
330 |
text_placeholder.markdown("".join(accumulated_text))
|
331 |
# Process if invalid tool call information exists
|
@@ -345,9 +374,22 @@ def get_streaming_callback(text_placeholder, tool_placeholder):
|
|
345 |
and message_content.tool_call_chunks
|
346 |
):
|
347 |
tool_call_chunk = message_content.tool_call_chunks[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
348 |
accumulated_tool.append(
|
349 |
-
"
|
350 |
)
|
|
|
|
|
|
|
351 |
with tool_placeholder.expander(
|
352 |
"๐ง Tool Call Information", expanded=True
|
353 |
):
|
@@ -359,17 +401,330 @@ def get_streaming_callback(text_placeholder, tool_placeholder):
|
|
359 |
):
|
360 |
tool_call_info = message_content.additional_kwargs["tool_calls"][0]
|
361 |
accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
|
|
|
|
|
|
|
362 |
with tool_placeholder.expander(
|
363 |
"๐ง Tool Call Information", expanded=True
|
364 |
):
|
365 |
st.markdown("".join(accumulated_tool))
|
366 |
# Process if it's a tool message (tool response)
|
367 |
elif isinstance(message_content, ToolMessage):
|
368 |
-
|
369 |
-
|
370 |
-
)
|
371 |
-
|
372 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
373 |
return None
|
374 |
|
375 |
return callback_func, accumulated_text, accumulated_tool
|
@@ -412,8 +767,37 @@ async def process_query(query, text_placeholder, tool_placeholder, timeout_secon
|
|
412 |
timeout=timeout_seconds,
|
413 |
)
|
414 |
except asyncio.TimeoutError:
|
415 |
-
|
|
|
|
|
|
|
|
|
416 |
return {"error": error_msg}, error_msg, ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
417 |
|
418 |
final_text = "".join(accumulated_text_obj)
|
419 |
final_tool = "".join(accumulated_tool_obj)
|
@@ -448,6 +832,7 @@ async def initialize_session(mcp_config=None):
|
|
448 |
if mcp_config is None:
|
449 |
# Load settings from config.json file
|
450 |
mcp_config = load_config_from_json()
|
|
|
451 |
client = MultiServerMCPClient(mcp_config)
|
452 |
await client.__aenter__()
|
453 |
tools = client.get_tools()
|
@@ -458,7 +843,7 @@ async def initialize_session(mcp_config=None):
|
|
458 |
selected_model = st.session_state.selected_model
|
459 |
|
460 |
if selected_model in [
|
461 |
-
"claude-3-
|
462 |
"claude-3-5-sonnet-latest",
|
463 |
"claude-3-5-haiku-latest",
|
464 |
]:
|
@@ -469,6 +854,7 @@ async def initialize_session(mcp_config=None):
|
|
469 |
)
|
470 |
else: # Use OpenAI model
|
471 |
model = ChatOpenAI(
|
|
|
472 |
model=selected_model,
|
473 |
temperature=0.1,
|
474 |
max_tokens=OUTPUT_TOKEN_INFO[selected_model]["max_tokens"],
|
@@ -497,7 +883,7 @@ with st.sidebar:
|
|
497 |
if has_anthropic_key:
|
498 |
available_models.extend(
|
499 |
[
|
500 |
-
"claude-3-
|
501 |
"claude-3-5-sonnet-latest",
|
502 |
"claude-3-5-haiku-latest",
|
503 |
]
|
@@ -514,7 +900,7 @@ with st.sidebar:
|
|
514 |
"โ ๏ธ API keys are not configured. Please add ANTHROPIC_API_KEY or OPENAI_API_KEY to your .env file."
|
515 |
)
|
516 |
# Add Claude model as default (to show UI even without keys)
|
517 |
-
available_models = ["claude-3-
|
518 |
|
519 |
# Model selection dropdown
|
520 |
previous_model = st.session_state.selected_model
|
@@ -542,7 +928,7 @@ with st.sidebar:
|
|
542 |
st.session_state.timeout_seconds = st.slider(
|
543 |
"โฑ๏ธ Response generation time limit (seconds)",
|
544 |
min_value=60,
|
545 |
-
max_value=
|
546 |
value=st.session_state.timeout_seconds,
|
547 |
step=10,
|
548 |
help="Set the maximum time for the agent to generate a response. Complex tasks may require more time.",
|
@@ -655,6 +1041,7 @@ with st.sidebar:
|
|
655 |
st.info(
|
656 |
f"URL detected in '{tool_name}' tool, setting transport to 'sse'."
|
657 |
)
|
|
|
658 |
elif "transport" not in tool_config:
|
659 |
# Set default "stdio" if URL doesn't exist and transport isn't specified
|
660 |
tool_config["transport"] = "stdio"
|
|
|
52 |
}
|
53 |
}
|
54 |
|
55 |
+
|
56 |
+
|
57 |
try:
|
58 |
if os.path.exists(CONFIG_FILE_PATH):
|
59 |
with open(CONFIG_FILE_PATH, "r", encoding="utf-8") as f:
|
60 |
+
config = json.load(f)
|
61 |
+
|
62 |
+
return config
|
63 |
else:
|
64 |
# Create file with default settings if it doesn't exist
|
65 |
save_config_to_json(default_config)
|
|
|
190 |
OUTPUT_TOKEN_INFO = {
|
191 |
"claude-3-5-sonnet-latest": {"max_tokens": 8192},
|
192 |
"claude-3-5-haiku-latest": {"max_tokens": 8192},
|
193 |
+
"claude-3-5-sonnet-20241022": {"max_tokens": 64000},
|
194 |
+
"gpt-4o": {"max_tokens": 4096}, # 16000},
|
195 |
"gpt-4o-mini": {"max_tokens": 16000},
|
196 |
}
|
197 |
|
|
|
202 |
st.session_state.history = [] # List for storing conversation history
|
203 |
st.session_state.mcp_client = None # Storage for MCP client object
|
204 |
st.session_state.timeout_seconds = (
|
205 |
+
30000 # Response generation time limit (seconds), default 120 seconds
|
206 |
)
|
207 |
st.session_state.selected_model = (
|
208 |
+
"claude-3-5-sonnet-20241022" # Default model selection
|
209 |
)
|
210 |
st.session_state.recursion_limit = 100 # Recursion call limit, default 100
|
211 |
|
|
|
234 |
# st.warning(traceback.format_exc())
|
235 |
|
236 |
|
237 |
+
|
238 |
+
|
239 |
+
|
240 |
def print_message():
|
241 |
"""
|
242 |
Displays chat history on the screen.
|
|
|
279 |
|
280 |
This function creates a callback function to display responses generated from the LLM in real-time.
|
281 |
It displays text responses and tool call information in separate areas.
|
282 |
+
It also supports real-time streaming updates from MCP tools.
|
283 |
|
284 |
Args:
|
285 |
text_placeholder: Streamlit component to display text responses
|
|
|
296 |
def callback_func(message: dict):
|
297 |
nonlocal accumulated_text, accumulated_tool
|
298 |
message_content = message.get("content", None)
|
299 |
+
|
300 |
+
# Initialize data counter for tracking data: messages
|
301 |
+
if not hasattr(callback_func, '_data_counter'):
|
302 |
+
callback_func._data_counter = 0
|
303 |
+
|
304 |
+
# Initialize persistent storage for all processed data
|
305 |
+
if not hasattr(callback_func, '_persistent_data'):
|
306 |
+
callback_func._persistent_data = []
|
307 |
+
callback_func._persistent_data.append("๐ **Session Started** - All data will be preserved\n")
|
308 |
+
callback_func._persistent_data.append("---\n")
|
309 |
+
|
310 |
+
|
311 |
+
|
312 |
+
|
313 |
|
314 |
if isinstance(message_content, AIMessageChunk):
|
315 |
content = message_content.content
|
316 |
+
|
317 |
+
|
318 |
+
|
319 |
# If content is in list form (mainly occurs in Claude models)
|
320 |
if isinstance(content, list) and len(content) > 0:
|
321 |
message_chunk = content[0]
|
|
|
345 |
):
|
346 |
tool_call_info = message_content.tool_calls[0]
|
347 |
accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
|
348 |
+
|
349 |
+
|
350 |
+
|
351 |
with tool_placeholder.expander(
|
352 |
"๐ง Tool Call Information", expanded=True
|
353 |
):
|
354 |
st.markdown("".join(accumulated_tool))
|
355 |
# Process if content is a simple string
|
356 |
elif isinstance(content, str):
|
357 |
+
# Regular text content
|
358 |
accumulated_text.append(content)
|
359 |
text_placeholder.markdown("".join(accumulated_text))
|
360 |
# Process if invalid tool call information exists
|
|
|
374 |
and message_content.tool_call_chunks
|
375 |
):
|
376 |
tool_call_chunk = message_content.tool_call_chunks[0]
|
377 |
+
tool_name = tool_call_chunk.get('name', 'Unknown')
|
378 |
+
|
379 |
+
# Only show tool call info if it's a new tool or has meaningful changes
|
380 |
+
if not hasattr(callback_func, '_last_tool_name') or callback_func._last_tool_name != tool_name:
|
381 |
+
accumulated_tool.append(
|
382 |
+
f"\n๐ง **Tool Call**: {tool_name}\n"
|
383 |
+
)
|
384 |
+
callback_func._last_tool_name = tool_name
|
385 |
+
|
386 |
+
# Show tool call details in a more compact format
|
387 |
accumulated_tool.append(
|
388 |
+
f"```json\n{str(tool_call_chunk)}\n```\n"
|
389 |
)
|
390 |
+
|
391 |
+
|
392 |
+
|
393 |
with tool_placeholder.expander(
|
394 |
"๐ง Tool Call Information", expanded=True
|
395 |
):
|
|
|
401 |
):
|
402 |
tool_call_info = message_content.additional_kwargs["tool_calls"][0]
|
403 |
accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
|
404 |
+
|
405 |
+
|
406 |
+
|
407 |
with tool_placeholder.expander(
|
408 |
"๐ง Tool Call Information", expanded=True
|
409 |
):
|
410 |
st.markdown("".join(accumulated_tool))
|
411 |
# Process if it's a tool message (tool response)
|
412 |
elif isinstance(message_content, ToolMessage):
|
413 |
+
# Don't show Tool Completed immediately - wait for all streaming content
|
414 |
+
# Just store the tool name for later display
|
415 |
+
if not hasattr(callback_func, '_pending_tool_completion'):
|
416 |
+
callback_func._pending_tool_completion = []
|
417 |
+
callback_func._pending_tool_completion.append(message_content.name or "Unknown Tool")
|
418 |
+
|
419 |
+
# Convert streaming text to final result
|
420 |
+
streaming_text_items = [item for item in accumulated_tool if item.startswith("\n๐ **Streaming Text**:")]
|
421 |
+
if streaming_text_items:
|
422 |
+
# Get the last streaming text (most complete)
|
423 |
+
last_streaming = streaming_text_items[-1]
|
424 |
+
# Extract the text content
|
425 |
+
final_text = last_streaming.replace("\n๐ **Streaming Text**: ", "").strip()
|
426 |
+
if final_text:
|
427 |
+
# Remove all streaming text entries
|
428 |
+
accumulated_tool = [item for item in accumulated_tool if not item.startswith("\n๐ **Streaming Text**:")]
|
429 |
+
# Add the final complete result
|
430 |
+
accumulated_tool.append(f"\n๐ **Final Result**: {final_text}\n")
|
431 |
+
|
432 |
+
# Handle tool response content
|
433 |
+
tool_content = message_content.content
|
434 |
+
|
435 |
+
|
436 |
+
# Handle tool response content
|
437 |
+
if isinstance(tool_content, str):
|
438 |
+
# Look for SSE data patterns
|
439 |
+
if "data:" in tool_content:
|
440 |
+
# Parse SSE data and extract meaningful content
|
441 |
+
lines = tool_content.split('\n')
|
442 |
+
for line in lines:
|
443 |
+
line = line.strip()
|
444 |
+
if line.startswith('data:'):
|
445 |
+
# Increment data counter for each data: message
|
446 |
+
callback_func._data_counter += 1
|
447 |
+
|
448 |
+
try:
|
449 |
+
# Extract JSON content from SSE data
|
450 |
+
json_str = line[5:].strip() # Remove 'data:' prefix
|
451 |
+
if json_str:
|
452 |
+
# Try to parse as JSON
|
453 |
+
import json
|
454 |
+
try:
|
455 |
+
data_obj = json.loads(json_str)
|
456 |
+
if isinstance(data_obj, dict):
|
457 |
+
# Handle different types of SSE data
|
458 |
+
if data_obj.get("type") == "result":
|
459 |
+
content = data_obj.get("content", "")
|
460 |
+
if content:
|
461 |
+
# Check for specific server output formats
|
462 |
+
if "```bdd-long-task-start" in content:
|
463 |
+
# Extract task info
|
464 |
+
import re
|
465 |
+
match = re.search(r'```bdd-long-task-start\s*\n(.*?)\n```', content, re.DOTALL)
|
466 |
+
if match:
|
467 |
+
try:
|
468 |
+
task_info = json.loads(match.group(1))
|
469 |
+
task_id = task_info.get('id', 'Unknown')
|
470 |
+
task_label = task_info.get('label', 'Unknown task')
|
471 |
+
accumulated_tool.append(f"\n๐ **Task Started** [{task_id}]: {task_label}\n")
|
472 |
+
except:
|
473 |
+
accumulated_tool.append(f"\n๐ **Task Started**: {content}\n")
|
474 |
+
# Real-time UI update for task start
|
475 |
+
with tool_placeholder.expander("๐ง Tool Call Information", expanded=True):
|
476 |
+
# Show data counter at the top
|
477 |
+
st.markdown(f"**๐ Total Data Messages: {callback_func._data_counter}**")
|
478 |
+
st.markdown("---")
|
479 |
+
st.markdown("".join(accumulated_tool))
|
480 |
+
elif "```bdd-long-task-end" in content:
|
481 |
+
# Extract task info
|
482 |
+
import re
|
483 |
+
match = re.search(r'```bdd-long-task-end\s*\n(.*?)\n```', content, re.DOTALL)
|
484 |
+
if match:
|
485 |
+
try:
|
486 |
+
task_info = json.loads(match.group(1))
|
487 |
+
task_id = task_info.get('id', 'Unknown')
|
488 |
+
accumulated_tool.append(f"\nโ
**Task Completed** [{task_id}]\n")
|
489 |
+
except:
|
490 |
+
accumulated_tool.append(f"\nโ
**Task Completed**: {content}\n")
|
491 |
+
# Real-time UI update for task completion
|
492 |
+
with tool_placeholder.expander("๐ง Tool Call Information", expanded=True):
|
493 |
+
# Show data counter at the top
|
494 |
+
st.markdown(f"**๐ Total Data Messages: {callback_func._data_counter}**")
|
495 |
+
st.markdown("---")
|
496 |
+
st.markdown("".join(accumulated_tool))
|
497 |
+
elif "```bdd-resource-lookup" in content:
|
498 |
+
# Extract resource info
|
499 |
+
import re
|
500 |
+
match = re.search(r'```bdd-resource-lookup\s*\n(.*?)\n```', content, re.DOTALL)
|
501 |
+
if match:
|
502 |
+
try:
|
503 |
+
resources = json.loads(match.group(1))
|
504 |
+
if isinstance(resources, list):
|
505 |
+
accumulated_tool.append(f"\n๐ **Resources Found**: {len(resources)} items\n")
|
506 |
+
for i, resource in enumerate(resources[:3]): # Show first 3
|
507 |
+
source = resource.get('source', 'Unknown')
|
508 |
+
doc_id = resource.get('docId', 'Unknown')
|
509 |
+
citation = resource.get('citation', '')
|
510 |
+
accumulated_tool.append(f" - {source}: {doc_id} [citation:{citation}]\n")
|
511 |
+
if len(resources) > 3:
|
512 |
+
accumulated_tool.append(f" ... and {len(resources) - 3} more\n")
|
513 |
+
except:
|
514 |
+
accumulated_tool.append(f"\n๐ **Resources**: {content}\n")
|
515 |
+
# Real-time UI update for resources
|
516 |
+
with tool_placeholder.expander("๐ง Tool Call Information", expanded=True):
|
517 |
+
# Show data counter at the top
|
518 |
+
st.markdown(f"**๐ Total Data Messages: {callback_func._data_counter}**")
|
519 |
+
st.markdown("---")
|
520 |
+
st.markdown("".join(accumulated_tool))
|
521 |
+
elif "```bdd-chat-agent-task" in content:
|
522 |
+
# Extract chat agent task info
|
523 |
+
import re
|
524 |
+
match = re.search(r'```bdd-chat-agent-task\s*\n(.*?)\n```', content, re.DOTALL)
|
525 |
+
if match:
|
526 |
+
try:
|
527 |
+
task_info = json.loads(match.group(1))
|
528 |
+
task_type = task_info.get('type', 'Unknown')
|
529 |
+
task_label = task_info.get('label', 'Unknown')
|
530 |
+
task_status = task_info.get('status', 'Unknown')
|
531 |
+
accumulated_tool.append(f"\n๐ค **Agent Task** [{task_status}]: {task_type} - {task_label}\n")
|
532 |
+
except:
|
533 |
+
accumulated_tool.append(f"\n๐ค **Agent Task**: {content}\n")
|
534 |
+
elif "ping - " in content:
|
535 |
+
# Extract timestamp from ping messages
|
536 |
+
timestamp = content.split("ping - ")[-1]
|
537 |
+
accumulated_tool.append(f"โฑ๏ธ **Progress Update**: {timestamp}\n")
|
538 |
+
elif data_obj.get("type") == "done":
|
539 |
+
# Task completion
|
540 |
+
accumulated_tool.append(f"\n๐ฏ **Task Done**: {content}\n")
|
541 |
+
else:
|
542 |
+
# Regular result content - accumulate text for better readability
|
543 |
+
if not hasattr(callback_func, '_result_buffer'):
|
544 |
+
callback_func._result_buffer = ""
|
545 |
+
callback_func._result_buffer += content
|
546 |
+
|
547 |
+
# For simple text streams (like health check or mock mock), update more frequently
|
548 |
+
# Check if this is a simple text response (not BDD format)
|
549 |
+
is_simple_text = not any(marker in content for marker in ['```bdd-', 'ping -', 'data:'])
|
550 |
+
|
551 |
+
# For simple text streams, always update immediately to show all fragments
|
552 |
+
if is_simple_text and content.strip():
|
553 |
+
# Clear previous streaming text entries and add updated one
|
554 |
+
accumulated_tool = [item for item in accumulated_tool if not item.startswith("\n๐ **Streaming Text**:")]
|
555 |
+
|
556 |
+
# Add the updated complete streaming text in one line
|
557 |
+
accumulated_tool.append(f"\n๐ **Streaming Text**: {callback_func._result_buffer}\n")
|
558 |
+
|
559 |
+
# Immediate UI update for text streams
|
560 |
+
with tool_placeholder.expander("๐ง Tool Call Information", expanded=True):
|
561 |
+
st.markdown("".join(accumulated_tool))
|
562 |
+
else:
|
563 |
+
# For complex content, use timed updates
|
564 |
+
update_interval = 0.2 if len(content.strip()) <= 10 else 0.5
|
565 |
+
|
566 |
+
# Only update display periodically to avoid excessive updates
|
567 |
+
if not hasattr(callback_func, '_last_update_time'):
|
568 |
+
callback_func._last_update_time = 0
|
569 |
+
|
570 |
+
import time
|
571 |
+
current_time = time.time()
|
572 |
+
if current_time - callback_func._last_update_time > update_interval:
|
573 |
+
# For complex content, show accumulated buffer
|
574 |
+
accumulated_tool.append(f"\n๐ **Result Update**:\n")
|
575 |
+
accumulated_tool.append(f"```\n{callback_func._result_buffer}\n```\n")
|
576 |
+
callback_func._last_update_time = current_time
|
577 |
+
|
578 |
+
# Real-time UI update
|
579 |
+
with tool_placeholder.expander("๐ง Tool Call Information", expanded=True):
|
580 |
+
st.markdown("".join(accumulated_tool))
|
581 |
+
else:
|
582 |
+
# Handle other data types that are not "result" type
|
583 |
+
# This ensures ALL data: messages are processed and displayed
|
584 |
+
data_type = data_obj.get("type", "unknown")
|
585 |
+
data_content = data_obj.get("content", str(data_obj))
|
586 |
+
|
587 |
+
# Add timestamp for real-time tracking
|
588 |
+
import time
|
589 |
+
timestamp = time.strftime("%H:%M:%S")
|
590 |
+
|
591 |
+
# Format the data for display
|
592 |
+
data_entry = ""
|
593 |
+
if isinstance(data_content, str):
|
594 |
+
data_entry = f"\n๐ก **Data [{data_type}]** [{timestamp}]: {data_content}\n"
|
595 |
+
else:
|
596 |
+
data_entry = f"\n๐ก **Data [{data_type}]** [{timestamp}]:\n```json\n{json.dumps(data_obj, indent=2)}\n```\n"
|
597 |
+
|
598 |
+
# Add to both temporary and persistent storage
|
599 |
+
accumulated_tool.append(data_entry)
|
600 |
+
callback_func._persistent_data.append(data_entry)
|
601 |
+
|
602 |
+
# Immediate real-time UI update for any data: message
|
603 |
+
with tool_placeholder.expander("๐ง Tool Call Information", expanded=True):
|
604 |
+
# Show data counter at the top
|
605 |
+
st.markdown(f"**๐ Total Data Messages: {callback_func._data_counter}**")
|
606 |
+
st.markdown("---")
|
607 |
+
# Show persistent data first, then current accumulated data
|
608 |
+
st.markdown("".join(callback_func._persistent_data))
|
609 |
+
st.markdown("---")
|
610 |
+
st.markdown("**๐ Current Stream:**")
|
611 |
+
st.markdown("".join(accumulated_tool))
|
612 |
+
else:
|
613 |
+
# Handle non-dict data objects
|
614 |
+
import time
|
615 |
+
timestamp = time.strftime("%H:%M:%S")
|
616 |
+
data_entry = f"\n๐ก **Raw Data** [{timestamp}]:\n```json\n{json_str}\n```\n"
|
617 |
+
|
618 |
+
# Add to both temporary and persistent storage
|
619 |
+
accumulated_tool.append(data_entry)
|
620 |
+
callback_func._persistent_data.append(data_entry)
|
621 |
+
|
622 |
+
# Immediate real-time UI update
|
623 |
+
with tool_placeholder.expander("๐ง Tool Call Information", expanded=True):
|
624 |
+
# Show data counter at the top
|
625 |
+
st.markdown(f"**๐ Total Data Messages: {callback_func._data_counter}**")
|
626 |
+
st.markdown("---")
|
627 |
+
# Show persistent data first, then current accumulated data
|
628 |
+
st.markdown("".join(callback_func._persistent_data))
|
629 |
+
st.markdown("---")
|
630 |
+
st.markdown("**๐ Current Stream:**")
|
631 |
+
st.markdown("".join(accumulated_tool))
|
632 |
+
except json.JSONDecodeError:
|
633 |
+
# If not valid JSON, check if it's streaming text content
|
634 |
+
if json_str and len(json_str.strip()) > 0:
|
635 |
+
# This might be streaming text, accumulate it
|
636 |
+
if not hasattr(callback_func, '_stream_buffer'):
|
637 |
+
callback_func._stream_buffer = ""
|
638 |
+
callback_func._stream_buffer += json_str
|
639 |
+
|
640 |
+
# Only show streaming content periodically
|
641 |
+
if not hasattr(callback_func, '_stream_update_time'):
|
642 |
+
callback_func._stream_update_time = 0
|
643 |
+
|
644 |
+
import time
|
645 |
+
current_time = time.time()
|
646 |
+
if current_time - callback_func._stream_update_time > 0.3: # Update every 0.3 seconds for better responsiveness
|
647 |
+
# Add new streaming update without clearing previous ones
|
648 |
+
if callback_func._stream_buffer.strip():
|
649 |
+
accumulated_tool.append(f"\n๐ **Streaming Update**: {callback_func._stream_buffer}\n")
|
650 |
+
callback_func._stream_update_time = current_time
|
651 |
+
|
652 |
+
# Real-time UI update
|
653 |
+
with tool_placeholder.expander("๐ง Tool Call Information", expanded=True):
|
654 |
+
st.markdown("".join(accumulated_tool))
|
655 |
+
else:
|
656 |
+
# Handle empty or whitespace-only data
|
657 |
+
import time
|
658 |
+
timestamp = time.strftime("%H:%M:%S")
|
659 |
+
accumulated_tool.append(f"\n๐ก **Empty Data** [{timestamp}]: (empty or whitespace)\n")
|
660 |
+
|
661 |
+
# Immediate real-time UI update
|
662 |
+
with tool_placeholder.expander("๐ง Tool Call Information", expanded=True):
|
663 |
+
st.markdown("".join(accumulated_tool))
|
664 |
+
except Exception as e:
|
665 |
+
# Fallback: treat as plain text, but only if it's meaningful
|
666 |
+
import time
|
667 |
+
timestamp = time.strftime("%H:%M:%S")
|
668 |
+
if line.strip() and len(line.strip()) > 1: # Only show non-trivial content
|
669 |
+
accumulated_tool.append(f"\n๐ **Info** [{timestamp}]: {line.strip()}\n")
|
670 |
+
else:
|
671 |
+
accumulated_tool.append(f"\nโ ๏ธ **Error** [{timestamp}]: {str(e)}\n")
|
672 |
+
|
673 |
+
# Immediate real-time UI update for error cases
|
674 |
+
with tool_placeholder.expander("๐ง Tool Call Information", expanded=True):
|
675 |
+
st.markdown("".join(accumulated_tool))
|
676 |
+
elif line.startswith('ping - '):
|
677 |
+
# Handle ping messages directly
|
678 |
+
timestamp = line.split('ping - ')[-1]
|
679 |
+
accumulated_tool.append(f"โฑ๏ธ **Progress Update**: {timestamp}\n")
|
680 |
+
|
681 |
+
# Immediate real-time UI update for ping messages
|
682 |
+
with tool_placeholder.expander("๐ง Tool Call Information", expanded=True):
|
683 |
+
st.markdown("".join(accumulated_tool))
|
684 |
+
elif line and not line.startswith(':'):
|
685 |
+
# Other non-empty lines - capture any other data patterns
|
686 |
+
import time
|
687 |
+
timestamp = time.strftime("%H:%M:%S")
|
688 |
+
|
689 |
+
# Check if this line contains any meaningful data
|
690 |
+
if line.strip() and len(line.strip()) > 1:
|
691 |
+
# Try to detect if it's JSON-like content
|
692 |
+
if line.strip().startswith('{') or line.strip().startswith('['):
|
693 |
+
try:
|
694 |
+
# Try to parse as JSON for better formatting
|
695 |
+
import json
|
696 |
+
parsed_json = json.loads(line.strip())
|
697 |
+
accumulated_tool.append(f"\n๐ก **JSON Data** [{timestamp}]:\n```json\n{json.dumps(parsed_json, indent=2)}\n```\n")
|
698 |
+
except:
|
699 |
+
# If not valid JSON, show as regular data
|
700 |
+
accumulated_tool.append(f"\n๐ก **Data** [{timestamp}]: {line.strip()}\n")
|
701 |
+
else:
|
702 |
+
# Regular text data
|
703 |
+
accumulated_tool.append(f"\n๐ **Info** [{timestamp}]: {line.strip()}\n")
|
704 |
+
|
705 |
+
# Immediate real-time UI update for any captured data
|
706 |
+
with tool_placeholder.expander("๐ง Tool Call Information", expanded=True):
|
707 |
+
st.markdown("".join(accumulated_tool))
|
708 |
+
else:
|
709 |
+
# Regular tool response content
|
710 |
+
accumulated_tool.append(
|
711 |
+
"\n```json\n" + str(tool_content) + "\n```\n"
|
712 |
+
)
|
713 |
+
else:
|
714 |
+
# Non-string content
|
715 |
+
accumulated_tool.append(
|
716 |
+
"\n```json\n" + str(tool_content) + "\n```\n"
|
717 |
+
)
|
718 |
+
|
719 |
+
# Show pending tool completion status after all streaming content
|
720 |
+
if hasattr(callback_func, '_pending_tool_completion') and callback_func._pending_tool_completion:
|
721 |
+
for tool_name in callback_func._pending_tool_completion:
|
722 |
+
accumulated_tool.append(f"\nโ
**Tool Completed**: {tool_name}\n")
|
723 |
+
# Clear the pending list
|
724 |
+
callback_func._pending_tool_completion = []
|
725 |
+
|
726 |
+
|
727 |
+
|
728 |
return None
|
729 |
|
730 |
return callback_func, accumulated_text, accumulated_tool
|
|
|
767 |
timeout=timeout_seconds,
|
768 |
)
|
769 |
except asyncio.TimeoutError:
|
770 |
+
# On timeout, reset thread to avoid leaving an incomplete tool call in memory
|
771 |
+
st.session_state.thread_id = random_uuid()
|
772 |
+
error_msg = (
|
773 |
+
f"โฑ๏ธ Request time exceeded {timeout_seconds} seconds. Conversation was reset. Please retry."
|
774 |
+
)
|
775 |
return {"error": error_msg}, error_msg, ""
|
776 |
+
except ValueError as e:
|
777 |
+
# Handle invalid chat history caused by incomplete tool calls
|
778 |
+
if "Found AIMessages with tool_calls" in str(e):
|
779 |
+
# Reset thread and retry once
|
780 |
+
st.session_state.thread_id = random_uuid()
|
781 |
+
try:
|
782 |
+
response = await asyncio.wait_for(
|
783 |
+
astream_graph(
|
784 |
+
st.session_state.agent,
|
785 |
+
{"messages": [HumanMessage(content=query)]},
|
786 |
+
callback=streaming_callback,
|
787 |
+
config=RunnableConfig(
|
788 |
+
recursion_limit=st.session_state.recursion_limit,
|
789 |
+
thread_id=st.session_state.thread_id,
|
790 |
+
),
|
791 |
+
),
|
792 |
+
timeout=timeout_seconds,
|
793 |
+
)
|
794 |
+
except Exception:
|
795 |
+
error_msg = (
|
796 |
+
"โ ๏ธ Conversation state was invalid and has been reset. Please try again."
|
797 |
+
)
|
798 |
+
return {"error": error_msg}, error_msg, ""
|
799 |
+
else:
|
800 |
+
raise
|
801 |
|
802 |
final_text = "".join(accumulated_text_obj)
|
803 |
final_tool = "".join(accumulated_tool_obj)
|
|
|
832 |
if mcp_config is None:
|
833 |
# Load settings from config.json file
|
834 |
mcp_config = load_config_from_json()
|
835 |
+
|
836 |
client = MultiServerMCPClient(mcp_config)
|
837 |
await client.__aenter__()
|
838 |
tools = client.get_tools()
|
|
|
843 |
selected_model = st.session_state.selected_model
|
844 |
|
845 |
if selected_model in [
|
846 |
+
"claude-3-5-sonnet-20241022",
|
847 |
"claude-3-5-sonnet-latest",
|
848 |
"claude-3-5-haiku-latest",
|
849 |
]:
|
|
|
854 |
)
|
855 |
else: # Use OpenAI model
|
856 |
model = ChatOpenAI(
|
857 |
+
base_url=os.environ.get("OPENAI_API_BASE"),
|
858 |
model=selected_model,
|
859 |
temperature=0.1,
|
860 |
max_tokens=OUTPUT_TOKEN_INFO[selected_model]["max_tokens"],
|
|
|
883 |
if has_anthropic_key:
|
884 |
available_models.extend(
|
885 |
[
|
886 |
+
"claude-3-5-sonnet-20241022",
|
887 |
"claude-3-5-sonnet-latest",
|
888 |
"claude-3-5-haiku-latest",
|
889 |
]
|
|
|
900 |
"โ ๏ธ API keys are not configured. Please add ANTHROPIC_API_KEY or OPENAI_API_KEY to your .env file."
|
901 |
)
|
902 |
# Add Claude model as default (to show UI even without keys)
|
903 |
+
available_models = ["claude-3-5-sonnet-20241022"]
|
904 |
|
905 |
# Model selection dropdown
|
906 |
previous_model = st.session_state.selected_model
|
|
|
928 |
st.session_state.timeout_seconds = st.slider(
|
929 |
"โฑ๏ธ Response generation time limit (seconds)",
|
930 |
min_value=60,
|
931 |
+
max_value=300000,
|
932 |
value=st.session_state.timeout_seconds,
|
933 |
step=10,
|
934 |
help="Set the maximum time for the agent to generate a response. Complex tasks may require more time.",
|
|
|
1041 |
st.info(
|
1042 |
f"URL detected in '{tool_name}' tool, setting transport to 'sse'."
|
1043 |
)
|
1044 |
+
|
1045 |
elif "transport" not in tool_config:
|
1046 |
# Set default "stdio" if URL doesn't exist and transport isn't specified
|
1047 |
tool_config["transport"] = "stdio"
|
app_KOR.py
DELETED
@@ -1,848 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import asyncio
|
3 |
-
import nest_asyncio
|
4 |
-
import json
|
5 |
-
import os
|
6 |
-
import platform
|
7 |
-
|
8 |
-
if platform.system() == "Windows":
|
9 |
-
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
|
10 |
-
|
11 |
-
# nest_asyncio ์ ์ฉ: ์ด๋ฏธ ์คํ ์ค์ธ ์ด๋ฒคํธ ๋ฃจํ ๋ด์์ ์ค์ฒฉ ํธ์ถ ํ์ฉ
|
12 |
-
nest_asyncio.apply()
|
13 |
-
|
14 |
-
# ์ ์ญ ์ด๋ฒคํธ ๋ฃจํ ์์ฑ ๋ฐ ์ฌ์ฌ์ฉ (ํ๋ฒ ์์ฑํ ํ ๊ณ์ ์ฌ์ฉ)
|
15 |
-
if "event_loop" not in st.session_state:
|
16 |
-
loop = asyncio.new_event_loop()
|
17 |
-
st.session_state.event_loop = loop
|
18 |
-
asyncio.set_event_loop(loop)
|
19 |
-
|
20 |
-
from langgraph.prebuilt import create_react_agent
|
21 |
-
from langchain_anthropic import ChatAnthropic
|
22 |
-
from langchain_openai import ChatOpenAI
|
23 |
-
from langchain_core.messages import HumanMessage
|
24 |
-
from dotenv import load_dotenv
|
25 |
-
from langchain_mcp_adapters.client import MultiServerMCPClient
|
26 |
-
from utils import astream_graph, random_uuid
|
27 |
-
from langchain_core.messages.ai import AIMessageChunk
|
28 |
-
from langchain_core.messages.tool import ToolMessage
|
29 |
-
from langgraph.checkpoint.memory import MemorySaver
|
30 |
-
from langchain_core.runnables import RunnableConfig
|
31 |
-
|
32 |
-
# ํ๊ฒฝ ๋ณ์ ๋ก๋ (.env ํ์ผ์์ API ํค ๋ฑ์ ์ค์ ์ ๊ฐ์ ธ์ด)
|
33 |
-
load_dotenv(override=True)
|
34 |
-
|
35 |
-
# config.json ํ์ผ ๊ฒฝ๋ก ์ค์
|
36 |
-
CONFIG_FILE_PATH = "config.json"
|
37 |
-
|
38 |
-
# JSON ์ค์ ํ์ผ ๋ก๋ ํจ์
|
39 |
-
def load_config_from_json():
|
40 |
-
"""
|
41 |
-
config.json ํ์ผ์์ ์ค์ ์ ๋ก๋ํฉ๋๋ค.
|
42 |
-
ํ์ผ์ด ์๋ ๊ฒฝ์ฐ ๊ธฐ๋ณธ ์ค์ ์ผ๋ก ํ์ผ์ ์์ฑํฉ๋๋ค.
|
43 |
-
|
44 |
-
๋ฐํ๊ฐ:
|
45 |
-
dict: ๋ก๋๋ ์ค์
|
46 |
-
"""
|
47 |
-
default_config = {
|
48 |
-
"get_current_time": {
|
49 |
-
"command": "python",
|
50 |
-
"args": ["./mcp_server_time.py"],
|
51 |
-
"transport": "stdio"
|
52 |
-
}
|
53 |
-
}
|
54 |
-
|
55 |
-
try:
|
56 |
-
if os.path.exists(CONFIG_FILE_PATH):
|
57 |
-
with open(CONFIG_FILE_PATH, "r", encoding="utf-8") as f:
|
58 |
-
return json.load(f)
|
59 |
-
else:
|
60 |
-
# ํ์ผ์ด ์๋ ๊ฒฝ์ฐ ๊ธฐ๋ณธ ์ค์ ์ผ๋ก ํ์ผ ์์ฑ
|
61 |
-
save_config_to_json(default_config)
|
62 |
-
return default_config
|
63 |
-
except Exception as e:
|
64 |
-
st.error(f"์ค์ ํ์ผ ๋ก๋ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}")
|
65 |
-
return default_config
|
66 |
-
|
67 |
-
# JSON ์ค์ ํ์ผ ์ ์ฅ ํจ์
|
68 |
-
def save_config_to_json(config):
|
69 |
-
"""
|
70 |
-
์ค์ ์ config.json ํ์ผ์ ์ ์ฅํฉ๋๋ค.
|
71 |
-
|
72 |
-
๋งค๊ฐ๋ณ์:
|
73 |
-
config (dict): ์ ์ฅํ ์ค์
|
74 |
-
|
75 |
-
๋ฐํ๊ฐ:
|
76 |
-
bool: ์ ์ฅ ์ฑ๊ณต ์ฌ๋ถ
|
77 |
-
"""
|
78 |
-
try:
|
79 |
-
with open(CONFIG_FILE_PATH, "w", encoding="utf-8") as f:
|
80 |
-
json.dump(config, f, indent=2, ensure_ascii=False)
|
81 |
-
return True
|
82 |
-
except Exception as e:
|
83 |
-
st.error(f"์ค์ ํ์ผ ์ ์ฅ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}")
|
84 |
-
return False
|
85 |
-
|
86 |
-
# ๋ก๊ทธ์ธ ์ธ์
๋ณ์ ์ด๊ธฐํ
|
87 |
-
if "authenticated" not in st.session_state:
|
88 |
-
st.session_state.authenticated = False
|
89 |
-
|
90 |
-
# ๋ก๊ทธ์ธ ํ์ ์ฌ๋ถ ํ์ธ
|
91 |
-
use_login = os.environ.get("USE_LOGIN", "false").lower() == "true"
|
92 |
-
|
93 |
-
# ๋ก๊ทธ์ธ ์ํ์ ๋ฐ๋ผ ํ์ด์ง ์ค์ ๋ณ๊ฒฝ
|
94 |
-
if use_login and not st.session_state.authenticated:
|
95 |
-
# ๋ก๊ทธ์ธ ํ์ด์ง๋ ๊ธฐ๋ณธ(narrow) ๋ ์ด์์ ์ฌ์ฉ
|
96 |
-
st.set_page_config(page_title="Agent with MCP Tools", page_icon="๐ง ")
|
97 |
-
else:
|
98 |
-
# ๋ฉ์ธ ์ฑ์ wide ๋ ์ด์์ ์ฌ์ฉ
|
99 |
-
st.set_page_config(page_title="Agent with MCP Tools", page_icon="๐ง ", layout="wide")
|
100 |
-
|
101 |
-
# ๋ก๊ทธ์ธ ๊ธฐ๋ฅ์ด ํ์ฑํ๋์ด ์๊ณ ์์ง ์ธ์ฆ๋์ง ์์ ๊ฒฝ์ฐ ๋ก๊ทธ์ธ ํ๋ฉด ํ์
|
102 |
-
if use_login and not st.session_state.authenticated:
|
103 |
-
st.title("๐ ๋ก๊ทธ์ธ")
|
104 |
-
st.markdown("์์คํ
์ ์ฌ์ฉํ๋ ค๋ฉด ๋ก๊ทธ์ธ์ด ํ์ํฉ๋๋ค.")
|
105 |
-
|
106 |
-
# ๋ก๊ทธ์ธ ํผ์ ํ๋ฉด ์ค์์ ์ข๊ฒ ๋ฐฐ์น
|
107 |
-
with st.form("login_form"):
|
108 |
-
username = st.text_input("์์ด๋")
|
109 |
-
password = st.text_input("๋น๋ฐ๋ฒํธ", type="password")
|
110 |
-
submit_button = st.form_submit_button("๋ก๊ทธ์ธ")
|
111 |
-
|
112 |
-
if submit_button:
|
113 |
-
expected_username = os.environ.get("USER_ID")
|
114 |
-
expected_password = os.environ.get("USER_PASSWORD")
|
115 |
-
|
116 |
-
if username == expected_username and password == expected_password:
|
117 |
-
st.session_state.authenticated = True
|
118 |
-
st.success("โ
๋ก๊ทธ์ธ ์ฑ๊ณต! ์ ์๋ง ๊ธฐ๋ค๋ ค์ฃผ์ธ์...")
|
119 |
-
st.rerun()
|
120 |
-
else:
|
121 |
-
st.error("โ ์์ด๋ ๋๋ ๋น๋ฐ๋ฒํธ๊ฐ ์ฌ๋ฐ๋ฅด์ง ์์ต๋๋ค.")
|
122 |
-
|
123 |
-
# ๋ก๊ทธ์ธ ํ๋ฉด์์๋ ๋ฉ์ธ ์ฑ์ ํ์ํ์ง ์์
|
124 |
-
st.stop()
|
125 |
-
|
126 |
-
# ์ฌ์ด๋๋ฐ ์ต์๋จ์ ์ ์ ์ ๋ณด ์ถ๊ฐ (๋ค๋ฅธ ์ฌ์ด๋๋ฐ ์์๋ณด๋ค ๋จผ์ ๋ฐฐ์น)
|
127 |
-
st.sidebar.markdown("### โ๏ธ Made by [ํ
๋๋
ธํธ](https://youtube.com/c/teddynote) ๐")
|
128 |
-
st.sidebar.markdown(
|
129 |
-
"### ๐ป [Project Page](https://github.com/teddynote-lab/langgraph-mcp-agents)"
|
130 |
-
)
|
131 |
-
|
132 |
-
st.sidebar.divider() # ๊ตฌ๋ถ์ ์ถ๊ฐ
|
133 |
-
|
134 |
-
# ๊ธฐ์กด ํ์ด์ง ํ์ดํ ๋ฐ ์ค๋ช
|
135 |
-
st.title("๐ฌ MCP ๋๊ตฌ ํ์ฉ ์์ด์ ํธ")
|
136 |
-
st.markdown("โจ MCP ๋๊ตฌ๋ฅผ ํ์ฉํ ReAct ์์ด์ ํธ์๊ฒ ์ง๋ฌธํด๋ณด์ธ์.")
|
137 |
-
|
138 |
-
SYSTEM_PROMPT = """<ROLE>
|
139 |
-
You are a smart agent with an ability to use tools.
|
140 |
-
You will be given a question and you will use the tools to answer the question.
|
141 |
-
Pick the most relevant tool to answer the question.
|
142 |
-
If you are failed to answer the question, try different tools to get context.
|
143 |
-
Your answer should be very polite and professional.
|
144 |
-
</ROLE>
|
145 |
-
|
146 |
-
----
|
147 |
-
|
148 |
-
<INSTRUCTIONS>
|
149 |
-
Step 1: Analyze the question
|
150 |
-
- Analyze user's question and final goal.
|
151 |
-
- If the user's question is consist of multiple sub-questions, split them into smaller sub-questions.
|
152 |
-
|
153 |
-
Step 2: Pick the most relevant tool
|
154 |
-
- Pick the most relevant tool to answer the question.
|
155 |
-
- If you are failed to answer the question, try different tools to get context.
|
156 |
-
|
157 |
-
Step 3: Answer the question
|
158 |
-
- Answer the question in the same language as the question.
|
159 |
-
- Your answer should be very polite and professional.
|
160 |
-
|
161 |
-
Step 4: Provide the source of the answer(if applicable)
|
162 |
-
- If you've used the tool, provide the source of the answer.
|
163 |
-
- Valid sources are either a website(URL) or a document(PDF, etc).
|
164 |
-
|
165 |
-
Guidelines:
|
166 |
-
- If you've used the tool, your answer should be based on the tool's output(tool's output is more important than your own knowledge).
|
167 |
-
- If you've used the tool, and the source is valid URL, provide the source(URL) of the answer.
|
168 |
-
- Skip providing the source if the source is not URL.
|
169 |
-
- Answer in the same language as the question.
|
170 |
-
- Answer should be concise and to the point.
|
171 |
-
- Avoid response your output with any other information than the answer and the source.
|
172 |
-
</INSTRUCTIONS>
|
173 |
-
|
174 |
-
----
|
175 |
-
|
176 |
-
<OUTPUT_FORMAT>
|
177 |
-
(concise answer to the question)
|
178 |
-
|
179 |
-
**Source**(if applicable)
|
180 |
-
- (source1: valid URL)
|
181 |
-
- (source2: valid URL)
|
182 |
-
- ...
|
183 |
-
</OUTPUT_FORMAT>
|
184 |
-
"""
|
185 |
-
|
186 |
-
OUTPUT_TOKEN_INFO = {
|
187 |
-
"claude-3-5-sonnet-latest": {"max_tokens": 8192},
|
188 |
-
"claude-3-5-haiku-latest": {"max_tokens": 8192},
|
189 |
-
"claude-3-7-sonnet-latest": {"max_tokens": 64000},
|
190 |
-
"gpt-4o": {"max_tokens": 16000},
|
191 |
-
"gpt-4o-mini": {"max_tokens": 16000},
|
192 |
-
}
|
193 |
-
|
194 |
-
# ์ธ์
์ํ ์ด๊ธฐํ
|
195 |
-
if "session_initialized" not in st.session_state:
|
196 |
-
st.session_state.session_initialized = False # ์ธ์
์ด๊ธฐํ ์ํ ํ๋๊ทธ
|
197 |
-
st.session_state.agent = None # ReAct ์์ด์ ํธ ๊ฐ์ฒด ์ ์ฅ ๊ณต๊ฐ
|
198 |
-
st.session_state.history = [] # ๋ํ ๊ธฐ๋ก ์ ์ฅ ๋ฆฌ์คํธ
|
199 |
-
st.session_state.mcp_client = None # MCP ํด๋ผ์ด์ธํธ ๊ฐ์ฒด ์ ์ฅ ๊ณต๊ฐ
|
200 |
-
st.session_state.timeout_seconds = 120 # ์๋ต ์์ฑ ์ ํ ์๊ฐ(์ด), ๊ธฐ๋ณธ๊ฐ 120์ด
|
201 |
-
st.session_state.selected_model = "claude-3-7-sonnet-latest" # ๊ธฐ๋ณธ ๋ชจ๋ธ ์ ํ
|
202 |
-
st.session_state.recursion_limit = 100 # ์ฌ๊ท ํธ์ถ ์ ํ, ๊ธฐ๋ณธ๊ฐ 100
|
203 |
-
|
204 |
-
if "thread_id" not in st.session_state:
|
205 |
-
st.session_state.thread_id = random_uuid()
|
206 |
-
|
207 |
-
|
208 |
-
# --- ํจ์ ์ ์ ๋ถ๋ถ ---
|
209 |
-
|
210 |
-
|
211 |
-
async def cleanup_mcp_client():
|
212 |
-
"""
|
213 |
-
๊ธฐ์กด MCP ํด๋ผ์ด์ธํธ๋ฅผ ์์ ํ๊ฒ ์ข
๋ฃํฉ๋๋ค.
|
214 |
-
|
215 |
-
๊ธฐ์กด ํด๋ผ์ด์ธํธ๊ฐ ์๋ ๊ฒฝ์ฐ ์ ์์ ์ผ๋ก ๋ฆฌ์์ค๋ฅผ ํด์ ํฉ๋๋ค.
|
216 |
-
"""
|
217 |
-
if "mcp_client" in st.session_state and st.session_state.mcp_client is not None:
|
218 |
-
try:
|
219 |
-
|
220 |
-
await st.session_state.mcp_client.__aexit__(None, None, None)
|
221 |
-
st.session_state.mcp_client = None
|
222 |
-
except Exception as e:
|
223 |
-
import traceback
|
224 |
-
|
225 |
-
# st.warning(f"MCP ํด๋ผ์ด์ธํธ ์ข
๋ฃ ์ค ์ค๋ฅ: {str(e)}")
|
226 |
-
# st.warning(traceback.format_exc())
|
227 |
-
|
228 |
-
|
229 |
-
def print_message():
|
230 |
-
"""
|
231 |
-
์ฑํ
๊ธฐ๋ก์ ํ๋ฉด์ ์ถ๋ ฅํฉ๋๋ค.
|
232 |
-
|
233 |
-
์ฌ์ฉ์์ ์ด์์คํดํธ์ ๋ฉ์์ง๋ฅผ ๊ตฌ๋ถํ์ฌ ํ๋ฉด์ ํ์ํ๊ณ ,
|
234 |
-
๋๊ตฌ ํธ์ถ ์ ๋ณด๋ ์ด์์คํดํธ ๋ฉ์์ง ์ปจํ
์ด๋ ๋ด์ ํ์ํฉ๋๋ค.
|
235 |
-
"""
|
236 |
-
i = 0
|
237 |
-
while i < len(st.session_state.history):
|
238 |
-
message = st.session_state.history[i]
|
239 |
-
|
240 |
-
if message["role"] == "user":
|
241 |
-
st.chat_message("user", avatar="๐งโ๐ป").markdown(message["content"])
|
242 |
-
i += 1
|
243 |
-
elif message["role"] == "assistant":
|
244 |
-
# ์ด์์คํดํธ ๋ฉ์์ง ์ปจํ
์ด๋ ์์ฑ
|
245 |
-
with st.chat_message("assistant", avatar="๐ค"):
|
246 |
-
# ์ด์์คํดํธ ๋ฉ์์ง ๋ด์ฉ ํ์
|
247 |
-
st.markdown(message["content"])
|
248 |
-
|
249 |
-
# ๋ค์ ๋ฉ์์ง๊ฐ ๋๊ตฌ ํธ์ถ ์ ๋ณด์ธ์ง ํ์ธ
|
250 |
-
if (
|
251 |
-
i + 1 < len(st.session_state.history)
|
252 |
-
and st.session_state.history[i + 1]["role"] == "assistant_tool"
|
253 |
-
):
|
254 |
-
# ๋๊ตฌ ํธ์ถ ์ ๋ณด๋ฅผ ๋์ผํ ์ปจํ
์ด๋ ๋ด์ expander๋ก ํ์
|
255 |
-
with st.expander("๐ง ๋๊ตฌ ํธ์ถ ์ ๋ณด", expanded=False):
|
256 |
-
st.markdown(st.session_state.history[i + 1]["content"])
|
257 |
-
i += 2 # ๋ ๋ฉ์์ง๋ฅผ ํจ๊ป ์ฒ๋ฆฌํ์ผ๋ฏ๋ก 2 ์ฆ๊ฐ
|
258 |
-
else:
|
259 |
-
i += 1 # ์ผ๋ฐ ๋ฉ์์ง๋ง ์ฒ๋ฆฌํ์ผ๋ฏ๋ก 1 ์ฆ๊ฐ
|
260 |
-
else:
|
261 |
-
# assistant_tool ๋ฉ์์ง๋ ์์์ ์ฒ๋ฆฌ๋๋ฏ๋ก ๊ฑด๋๋
|
262 |
-
i += 1
|
263 |
-
|
264 |
-
|
265 |
-
def get_streaming_callback(text_placeholder, tool_placeholder):
|
266 |
-
"""
|
267 |
-
์คํธ๋ฆฌ๋ฐ ์ฝ๋ฐฑ ํจ์๋ฅผ ์์ฑํฉ๋๋ค.
|
268 |
-
|
269 |
-
์ด ํจ์๋ LLM์์ ์์ฑ๋๋ ์๋ต์ ์ค์๊ฐ์ผ๋ก ํ๋ฉด์ ํ์ํ๊ธฐ ์ํ ์ฝ๋ฐฑ ํจ์๋ฅผ ์์ฑํฉ๋๋ค.
|
270 |
-
ํ
์คํธ ์๋ต๊ณผ ๋๊ตฌ ํธ์ถ ์ ๋ณด๋ฅผ ๊ฐ๊ฐ ๋ค๋ฅธ ์์ญ์ ํ์ํฉ๋๋ค.
|
271 |
-
|
272 |
-
๋งค๊ฐ๋ณ์:
|
273 |
-
text_placeholder: ํ
์คํธ ์๋ต์ ํ์ํ Streamlit ์ปดํฌ๋ํธ
|
274 |
-
tool_placeholder: ๋๊ตฌ ํธ์ถ ์ ๋ณด๋ฅผ ํ์ํ Streamlit ์ปดํฌ๋ํธ
|
275 |
-
|
276 |
-
๋ฐํ๊ฐ:
|
277 |
-
callback_func: ์คํธ๋ฆฌ๋ฐ ์ฝ๋ฐฑ ํจ์
|
278 |
-
accumulated_text: ๋์ ๋ ํ
์คํธ ์๋ต์ ์ ์ฅํ๋ ๋ฆฌ์คํธ
|
279 |
-
accumulated_tool: ๋์ ๋ ๋๊ตฌ ํธ์ถ ์ ๋ณด๋ฅผ ์ ์ฅํ๋ ๋ฆฌ์คํธ
|
280 |
-
"""
|
281 |
-
accumulated_text = []
|
282 |
-
accumulated_tool = []
|
283 |
-
|
284 |
-
def callback_func(message: dict):
|
285 |
-
nonlocal accumulated_text, accumulated_tool
|
286 |
-
message_content = message.get("content", None)
|
287 |
-
|
288 |
-
if isinstance(message_content, AIMessageChunk):
|
289 |
-
content = message_content.content
|
290 |
-
# ์ฝํ
์ธ ๊ฐ ๋ฆฌ์คํธ ํํ์ธ ๊ฒฝ์ฐ (Claude ๋ชจ๋ธ ๋ฑ์์ ์ฃผ๋ก ๋ฐ์)
|
291 |
-
if isinstance(content, list) and len(content) > 0:
|
292 |
-
message_chunk = content[0]
|
293 |
-
# ํ
์คํธ ํ์
์ธ ๊ฒฝ์ฐ ์ฒ๋ฆฌ
|
294 |
-
if message_chunk["type"] == "text":
|
295 |
-
accumulated_text.append(message_chunk["text"])
|
296 |
-
text_placeholder.markdown("".join(accumulated_text))
|
297 |
-
# ๋๊ตฌ ์ฌ์ฉ ํ์
์ธ ๊ฒฝ์ฐ ์ฒ๋ฆฌ
|
298 |
-
elif message_chunk["type"] == "tool_use":
|
299 |
-
if "partial_json" in message_chunk:
|
300 |
-
accumulated_tool.append(message_chunk["partial_json"])
|
301 |
-
else:
|
302 |
-
tool_call_chunks = message_content.tool_call_chunks
|
303 |
-
tool_call_chunk = tool_call_chunks[0]
|
304 |
-
accumulated_tool.append(
|
305 |
-
"\n```json\n" + str(tool_call_chunk) + "\n```\n"
|
306 |
-
)
|
307 |
-
with tool_placeholder.expander("๐ง ๋๊ตฌ ํธ์ถ ์ ๋ณด", expanded=True):
|
308 |
-
st.markdown("".join(accumulated_tool))
|
309 |
-
# tool_calls ์์ฑ์ด ์๋ ๊ฒฝ์ฐ ์ฒ๋ฆฌ (OpenAI ๋ชจ๋ธ ๋ฑ์์ ์ฃผ๋ก ๋ฐ์)
|
310 |
-
elif (
|
311 |
-
hasattr(message_content, "tool_calls")
|
312 |
-
and message_content.tool_calls
|
313 |
-
and len(message_content.tool_calls[0]["name"]) > 0
|
314 |
-
):
|
315 |
-
tool_call_info = message_content.tool_calls[0]
|
316 |
-
accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
|
317 |
-
with tool_placeholder.expander("๐ง ๋๊ตฌ ํธ์ถ ์ ๋ณด", expanded=True):
|
318 |
-
st.markdown("".join(accumulated_tool))
|
319 |
-
# ๋จ์ ๋ฌธ์์ด์ธ ๊ฒฝ์ฐ ์ฒ๋ฆฌ
|
320 |
-
elif isinstance(content, str):
|
321 |
-
accumulated_text.append(content)
|
322 |
-
text_placeholder.markdown("".join(accumulated_text))
|
323 |
-
# ์ ํจํ์ง ์์ ๋๊ตฌ ํธ์ถ ์ ๋ณด๊ฐ ์๋ ๊ฒฝ์ฐ ์ฒ๋ฆฌ
|
324 |
-
elif (
|
325 |
-
hasattr(message_content, "invalid_tool_calls")
|
326 |
-
and message_content.invalid_tool_calls
|
327 |
-
):
|
328 |
-
tool_call_info = message_content.invalid_tool_calls[0]
|
329 |
-
accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
|
330 |
-
with tool_placeholder.expander(
|
331 |
-
"๐ง ๋๊ตฌ ํธ์ถ ์ ๋ณด (์ ํจํ์ง ์์)", expanded=True
|
332 |
-
):
|
333 |
-
st.markdown("".join(accumulated_tool))
|
334 |
-
# tool_call_chunks ์์ฑ์ด ์๋ ๊ฒฝ์ฐ ์ฒ๋ฆฌ
|
335 |
-
elif (
|
336 |
-
hasattr(message_content, "tool_call_chunks")
|
337 |
-
and message_content.tool_call_chunks
|
338 |
-
):
|
339 |
-
tool_call_chunk = message_content.tool_call_chunks[0]
|
340 |
-
accumulated_tool.append(
|
341 |
-
"\n```json\n" + str(tool_call_chunk) + "\n```\n"
|
342 |
-
)
|
343 |
-
with tool_placeholder.expander("๐ง ๋๊ตฌ ํธ์ถ ์ ๋ณด", expanded=True):
|
344 |
-
st.markdown("".join(accumulated_tool))
|
345 |
-
# additional_kwargs์ tool_calls๊ฐ ์๋ ๊ฒฝ์ฐ ์ฒ๋ฆฌ (๋ค์ํ ๋ชจ๋ธ ํธํ์ฑ ์ง์)
|
346 |
-
elif (
|
347 |
-
hasattr(message_content, "additional_kwargs")
|
348 |
-
and "tool_calls" in message_content.additional_kwargs
|
349 |
-
):
|
350 |
-
tool_call_info = message_content.additional_kwargs["tool_calls"][0]
|
351 |
-
accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n")
|
352 |
-
with tool_placeholder.expander("๐ง ๋๊ตฌ ํธ์ถ ์ ๋ณด", expanded=True):
|
353 |
-
st.markdown("".join(accumulated_tool))
|
354 |
-
# ๋๊ตฌ ๋ฉ์์ง์ธ ๊ฒฝ์ฐ ์ฒ๋ฆฌ (๋๊ตฌ์ ์๋ต)
|
355 |
-
elif isinstance(message_content, ToolMessage):
|
356 |
-
accumulated_tool.append(
|
357 |
-
"\n```json\n" + str(message_content.content) + "\n```\n"
|
358 |
-
)
|
359 |
-
with tool_placeholder.expander("๐ง ๋๊ตฌ ํธ๏ฟฝ๏ฟฝ๏ฟฝ ์ ๋ณด", expanded=True):
|
360 |
-
st.markdown("".join(accumulated_tool))
|
361 |
-
return None
|
362 |
-
|
363 |
-
return callback_func, accumulated_text, accumulated_tool
|
364 |
-
|
365 |
-
|
366 |
-
async def process_query(query, text_placeholder, tool_placeholder, timeout_seconds=60):
|
367 |
-
"""
|
368 |
-
์ฌ์ฉ์ ์ง๋ฌธ์ ์ฒ๋ฆฌํ๊ณ ์๋ต์ ์์ฑํฉ๋๋ค.
|
369 |
-
|
370 |
-
์ด ํจ์๋ ์ฌ์ฉ์์ ์ง๋ฌธ์ ์์ด์ ํธ์ ์ ๋ฌํ๊ณ , ์๋ต์ ์ค์๊ฐ์ผ๋ก ์คํธ๋ฆฌ๋ฐํ์ฌ ํ์ํฉ๋๋ค.
|
371 |
-
์ง์ ๋ ์๊ฐ ๋ด์ ์๋ต์ด ์๋ฃ๋์ง ์์ผ๋ฉด ํ์์์ ์ค๋ฅ๋ฅผ ๋ฐํํฉ๋๋ค.
|
372 |
-
|
373 |
-
๋งค๊ฐ๋ณ์:
|
374 |
-
query: ์ฌ์ฉ์๊ฐ ์
๋ ฅํ ์ง๋ฌธ ํ
์คํธ
|
375 |
-
text_placeholder: ํ
์คํธ ์๋ต์ ํ์ํ Streamlit ์ปดํฌ๋ํธ
|
376 |
-
tool_placeholder: ๋๊ตฌ ํธ์ถ ์ ๋ณด๋ฅผ ํ์ํ Streamlit ์ปดํฌ๋ํธ
|
377 |
-
timeout_seconds: ์๋ต ์์ฑ ์ ํ ์๊ฐ(์ด)
|
378 |
-
|
379 |
-
๋ฐํ๊ฐ:
|
380 |
-
response: ์์ด์ ํธ์ ์๋ต ๊ฐ์ฒด
|
381 |
-
final_text: ์ต์ข
ํ
์คํธ ์๋ต
|
382 |
-
final_tool: ์ต์ข
๋๊ตฌ ํธ์ถ ์ ๋ณด
|
383 |
-
"""
|
384 |
-
try:
|
385 |
-
if st.session_state.agent:
|
386 |
-
streaming_callback, accumulated_text_obj, accumulated_tool_obj = (
|
387 |
-
get_streaming_callback(text_placeholder, tool_placeholder)
|
388 |
-
)
|
389 |
-
try:
|
390 |
-
response = await asyncio.wait_for(
|
391 |
-
astream_graph(
|
392 |
-
st.session_state.agent,
|
393 |
-
{"messages": [HumanMessage(content=query)]},
|
394 |
-
callback=streaming_callback,
|
395 |
-
config=RunnableConfig(
|
396 |
-
recursion_limit=st.session_state.recursion_limit,
|
397 |
-
thread_id=st.session_state.thread_id,
|
398 |
-
),
|
399 |
-
),
|
400 |
-
timeout=timeout_seconds,
|
401 |
-
)
|
402 |
-
except asyncio.TimeoutError:
|
403 |
-
error_msg = f"โฑ๏ธ ์์ฒญ ์๊ฐ์ด {timeout_seconds}์ด๋ฅผ ์ด๊ณผํ์ต๋๋ค. ๋์ค์ ๋ค์ ์๋ํด ์ฃผ์ธ์."
|
404 |
-
return {"error": error_msg}, error_msg, ""
|
405 |
-
|
406 |
-
final_text = "".join(accumulated_text_obj)
|
407 |
-
final_tool = "".join(accumulated_tool_obj)
|
408 |
-
return response, final_text, final_tool
|
409 |
-
else:
|
410 |
-
return (
|
411 |
-
{"error": "๐ซ ์์ด์ ํธ๊ฐ ์ด๊ธฐํ๋์ง ์์์ต๋๋ค."},
|
412 |
-
"๐ซ ์์ด์ ํธ๊ฐ ์ด๊ธฐํ๋์ง ์์์ต๋๋ค.",
|
413 |
-
"",
|
414 |
-
)
|
415 |
-
except Exception as e:
|
416 |
-
import traceback
|
417 |
-
|
418 |
-
error_msg = f"โ ์ฟผ๋ฆฌ ์ฒ๋ฆฌ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}\n{traceback.format_exc()}"
|
419 |
-
return {"error": error_msg}, error_msg, ""
|
420 |
-
|
421 |
-
|
422 |
-
async def initialize_session(mcp_config=None):
|
423 |
-
"""
|
424 |
-
MCP ์ธ์
๊ณผ ์์ด์ ํธ๋ฅผ ์ด๊ธฐํํฉ๋๋ค.
|
425 |
-
|
426 |
-
๋งค๊ฐ๋ณ์:
|
427 |
-
mcp_config: MCP ๋๊ตฌ ์ค์ ์ ๋ณด(JSON). None์ธ ๊ฒฝ์ฐ ๊ธฐ๋ณธ ์ค์ ์ฌ์ฉ
|
428 |
-
|
429 |
-
๋ฐํ๊ฐ:
|
430 |
-
bool: ์ด๊ธฐํ ์ฑ๊ณต ์ฌ๋ถ
|
431 |
-
"""
|
432 |
-
with st.spinner("๐ MCP ์๋ฒ์ ์ฐ๊ฒฐ ์ค..."):
|
433 |
-
# ๋จผ์ ๊ธฐ์กด ํด๋ผ์ด์ธํธ๋ฅผ ์์ ํ๊ฒ ์ ๋ฆฌ
|
434 |
-
await cleanup_mcp_client()
|
435 |
-
|
436 |
-
if mcp_config is None:
|
437 |
-
# config.json ํ์ผ์์ ์ค์ ๋ก๋
|
438 |
-
mcp_config = load_config_from_json()
|
439 |
-
client = MultiServerMCPClient(mcp_config)
|
440 |
-
await client.__aenter__()
|
441 |
-
tools = client.get_tools()
|
442 |
-
st.session_state.tool_count = len(tools)
|
443 |
-
st.session_state.mcp_client = client
|
444 |
-
|
445 |
-
# ์ ํ๋ ๋ชจ๋ธ์ ๋ฐ๋ผ ์ ์ ํ ๋ชจ๋ธ ์ด๊ธฐํ
|
446 |
-
selected_model = st.session_state.selected_model
|
447 |
-
|
448 |
-
if selected_model in [
|
449 |
-
"claude-3-7-sonnet-latest",
|
450 |
-
"claude-3-5-sonnet-latest",
|
451 |
-
"claude-3-5-haiku-latest",
|
452 |
-
]:
|
453 |
-
model = ChatAnthropic(
|
454 |
-
model=selected_model,
|
455 |
-
temperature=0.1,
|
456 |
-
max_tokens=OUTPUT_TOKEN_INFO[selected_model]["max_tokens"],
|
457 |
-
)
|
458 |
-
else: # OpenAI ๋ชจ๋ธ ์ฌ์ฉ
|
459 |
-
model = ChatOpenAI(
|
460 |
-
model=selected_model,
|
461 |
-
temperature=0.1,
|
462 |
-
max_tokens=OUTPUT_TOKEN_INFO[selected_model]["max_tokens"],
|
463 |
-
)
|
464 |
-
agent = create_react_agent(
|
465 |
-
model,
|
466 |
-
tools,
|
467 |
-
checkpointer=MemorySaver(),
|
468 |
-
prompt=SYSTEM_PROMPT,
|
469 |
-
)
|
470 |
-
st.session_state.agent = agent
|
471 |
-
st.session_state.session_initialized = True
|
472 |
-
return True
|
473 |
-
|
474 |
-
|
475 |
-
# --- ์ฌ์ด๋๋ฐ: ์์คํ
์ค์ ์น์
---
|
476 |
-
with st.sidebar:
|
477 |
-
st.subheader("โ๏ธ ์์คํ
์ค์ ")
|
478 |
-
|
479 |
-
# ๋ชจ๋ธ ์ ํ ๊ธฐ๋ฅ
|
480 |
-
# ์ฌ์ฉ ๊ฐ๋ฅํ ๋ชจ๋ธ ๋ชฉ๋ก ์์ฑ
|
481 |
-
available_models = []
|
482 |
-
|
483 |
-
# Anthropic API ํค ํ์ธ
|
484 |
-
has_anthropic_key = os.environ.get("ANTHROPIC_API_KEY") is not None
|
485 |
-
if has_anthropic_key:
|
486 |
-
available_models.extend(
|
487 |
-
[
|
488 |
-
"claude-3-7-sonnet-latest",
|
489 |
-
"claude-3-5-sonnet-latest",
|
490 |
-
"claude-3-5-haiku-latest",
|
491 |
-
]
|
492 |
-
)
|
493 |
-
|
494 |
-
# OpenAI API ํค ํ์ธ
|
495 |
-
has_openai_key = os.environ.get("OPENAI_API_KEY") is not None
|
496 |
-
if has_openai_key:
|
497 |
-
available_models.extend(["gpt-4o", "gpt-4o-mini"])
|
498 |
-
|
499 |
-
# ์ฌ์ฉ ๊ฐ๋ฅํ ๋ชจ๋ธ์ด ์๋ ๊ฒฝ์ฐ ๋ฉ์์ง ํ์
|
500 |
-
if not available_models:
|
501 |
-
st.warning(
|
502 |
-
"โ ๏ธ API ํค๊ฐ ์ค์ ๋์ง ์์์ต๋๋ค. .env ํ์ผ์ ANTHROPIC_API_KEY ๋๋ OPENAI_API_KEY๋ฅผ ์ถ๊ฐํด์ฃผ์ธ์."
|
503 |
-
)
|
504 |
-
# ๊ธฐ๋ณธ๊ฐ์ผ๋ก Claude ๋ชจ๋ธ ์ถ๊ฐ (ํค๊ฐ ์์ด๋ UI๋ฅผ ๋ณด์ฌ์ฃผ๊ธฐ ์ํจ)
|
505 |
-
available_models = ["claude-3-7-sonnet-latest"]
|
506 |
-
|
507 |
-
# ๋ชจ๋ธ ์ ํ ๋๋กญ๋ค์ด
|
508 |
-
previous_model = st.session_state.selected_model
|
509 |
-
st.session_state.selected_model = st.selectbox(
|
510 |
-
"๐ค ์ฌ์ฉํ ๋ชจ๋ธ ์ ํ",
|
511 |
-
options=available_models,
|
512 |
-
index=(
|
513 |
-
available_models.index(st.session_state.selected_model)
|
514 |
-
if st.session_state.selected_model in available_models
|
515 |
-
else 0
|
516 |
-
),
|
517 |
-
help="Anthropic ๋ชจ๋ธ์ ANTHROPIC_API_KEY๊ฐ, OpenAI ๋ชจ๋ธ์ OPENAI_API_KEY๊ฐ ํ๊ฒฝ๋ณ์๋ก ์ค์ ๋์ด์ผ ํฉ๋๋ค.",
|
518 |
-
)
|
519 |
-
|
520 |
-
# ๋ชจ๋ธ์ด ๋ณ๊ฒฝ๋์์ ๋ ์ธ์
์ด๊ธฐํ ํ์ ์๋ฆผ
|
521 |
-
if (
|
522 |
-
previous_model != st.session_state.selected_model
|
523 |
-
and st.session_state.session_initialized
|
524 |
-
):
|
525 |
-
st.warning(
|
526 |
-
"โ ๏ธ ๋ชจ๋ธ์ด ๋ณ๊ฒฝ๋์์ต๋๋ค. '์ค์ ์ ์ฉํ๊ธฐ' ๋ฒํผ์ ๋๋ฌ ๋ณ๊ฒฝ์ฌํญ์ ์ ์ฉํ์ธ์."
|
527 |
-
)
|
528 |
-
|
529 |
-
# ํ์์์ ์ค์ ์ฌ๋ผ์ด๋ ์ถ๊ฐ
|
530 |
-
st.session_state.timeout_seconds = st.slider(
|
531 |
-
"โฑ๏ธ ์๋ต ์์ฑ ์ ํ ์๊ฐ(์ด)",
|
532 |
-
min_value=60,
|
533 |
-
max_value=300,
|
534 |
-
value=st.session_state.timeout_seconds,
|
535 |
-
step=10,
|
536 |
-
help="์์ด์ ํธ๊ฐ ์๋ต์ ์์ฑํ๋ ์ต๋ ์๊ฐ์ ์ค์ ํฉ๋๋ค. ๋ณต์กํ ์์
์ ๋ ๊ธด ์๊ฐ์ด ํ์ํ ์ ์์ต๋๋ค.",
|
537 |
-
)
|
538 |
-
|
539 |
-
st.session_state.recursion_limit = st.slider(
|
540 |
-
"โฑ๏ธ ์ฌ๊ท ํธ์ถ ์ ํ(ํ์)",
|
541 |
-
min_value=10,
|
542 |
-
max_value=200,
|
543 |
-
value=st.session_state.recursion_limit,
|
544 |
-
step=10,
|
545 |
-
help="์ฌ๊ท ํธ์ถ ์ ํ ํ์๋ฅผ ์ค์ ํฉ๋๋ค. ๋๋ฌด ๋์ ๊ฐ์ ์ค์ ํ๋ฉด ๋ฉ๋ชจ๋ฆฌ ๋ถ์กฑ ๋ฌธ์ ๊ฐ ๋ฐ์ํ ์ ์์ต๋๋ค.",
|
546 |
-
)
|
547 |
-
|
548 |
-
st.divider() # ๊ตฌ๋ถ์ ์ถ๊ฐ
|
549 |
-
|
550 |
-
# ๋๊ตฌ ์ค์ ์น์
์ถ๊ฐ
|
551 |
-
st.subheader("๐ง ๋๊ตฌ ์ค์ ")
|
552 |
-
|
553 |
-
# expander ์ํ๋ฅผ ์ธ์
์ํ๋ก ๊ด๋ฆฌ
|
554 |
-
if "mcp_tools_expander" not in st.session_state:
|
555 |
-
st.session_state.mcp_tools_expander = False
|
556 |
-
|
557 |
-
# MCP ๋๊ตฌ ์ถ๊ฐ ์ธํฐํ์ด์ค
|
558 |
-
with st.expander("๐งฐ MCP ๋๊ตฌ ์ถ๊ฐ", expanded=st.session_state.mcp_tools_expander):
|
559 |
-
# config.json ํ์ผ์์ ์ค์ ๋ก๋ํ์ฌ ํ์
|
560 |
-
loaded_config = load_config_from_json()
|
561 |
-
default_config_text = json.dumps(loaded_config, indent=2, ensure_ascii=False)
|
562 |
-
|
563 |
-
# pending config๊ฐ ์์ผ๋ฉด ๊ธฐ์กด mcp_config_text ๊ธฐ๋ฐ์ผ๋ก ์์ฑ
|
564 |
-
if "pending_mcp_config" not in st.session_state:
|
565 |
-
try:
|
566 |
-
st.session_state.pending_mcp_config = loaded_config
|
567 |
-
except Exception as e:
|
568 |
-
st.error(f"์ด๊ธฐ pending config ์ค์ ์คํจ: {e}")
|
569 |
-
|
570 |
-
# ๊ฐ๋ณ ๋๊ตฌ ์ถ๊ฐ๋ฅผ ์ํ UI
|
571 |
-
st.subheader("๋๊ตฌ ์ถ๊ฐ")
|
572 |
-
st.markdown(
|
573 |
-
"""
|
574 |
-
[์ด๋ป๊ฒ ์ค์ ํ๋์?](https://teddylee777.notion.site/MCP-1d324f35d12980c8b018e12afdf545a1?pvs=4)
|
575 |
-
|
576 |
-
โ ๏ธ **์ค์**: JSON์ ๋ฐ๋์ ์ค๊ดํธ(`{}`)๋ก ๊ฐ์ธ์ผ ํฉ๋๋ค."""
|
577 |
-
)
|
578 |
-
|
579 |
-
# ๋ณด๋ค ๋ช
ํํ ์์ ์ ๊ณต
|
580 |
-
example_json = {
|
581 |
-
"github": {
|
582 |
-
"command": "npx",
|
583 |
-
"args": [
|
584 |
-
"-y",
|
585 |
-
"@smithery/cli@latest",
|
586 |
-
"run",
|
587 |
-
"@smithery-ai/github",
|
588 |
-
"--config",
|
589 |
-
'{"githubPersonalAccessToken":"your_token_here"}',
|
590 |
-
],
|
591 |
-
"transport": "stdio",
|
592 |
-
}
|
593 |
-
}
|
594 |
-
|
595 |
-
default_text = json.dumps(example_json, indent=2, ensure_ascii=False)
|
596 |
-
|
597 |
-
new_tool_json = st.text_area(
|
598 |
-
"๋๊ตฌ JSON",
|
599 |
-
default_text,
|
600 |
-
height=250,
|
601 |
-
)
|
602 |
-
|
603 |
-
# ์ถ๊ฐํ๊ธฐ ๋ฒํผ
|
604 |
-
if st.button(
|
605 |
-
"๋๊ตฌ ์ถ๊ฐ",
|
606 |
-
type="primary",
|
607 |
-
key="add_tool_button",
|
608 |
-
use_container_width=True,
|
609 |
-
):
|
610 |
-
try:
|
611 |
-
# ์
๋ ฅ๊ฐ ๊ฒ์ฆ
|
612 |
-
if not new_tool_json.strip().startswith(
|
613 |
-
"{"
|
614 |
-
) or not new_tool_json.strip().endswith("}"):
|
615 |
-
st.error("JSON์ ์ค๊ดํธ({})๋ก ์์ํ๊ณ ๋๋์ผ ํฉ๋๋ค.")
|
616 |
-
st.markdown('์ฌ๋ฐ๋ฅธ ํ์: `{ "๋๊ตฌ์ด๋ฆ": { ... } }`')
|
617 |
-
else:
|
618 |
-
# JSON ํ์ฑ
|
619 |
-
parsed_tool = json.loads(new_tool_json)
|
620 |
-
|
621 |
-
# mcpServers ํ์์ธ์ง ํ์ธํ๊ณ ์ฒ๋ฆฌ
|
622 |
-
if "mcpServers" in parsed_tool:
|
623 |
-
# mcpServers ์์ ๋ด์ฉ์ ์ต์์๋ก ์ด๋
|
624 |
-
parsed_tool = parsed_tool["mcpServers"]
|
625 |
-
st.info(
|
626 |
-
"'mcpServers' ํ์์ด ๊ฐ์ง๋์์ต๋๋ค. ์๋์ผ๋ก ๋ณํํฉ๋๋ค."
|
627 |
-
)
|
628 |
-
|
629 |
-
# ์
๋ ฅ๋ ๋๊ตฌ ์ ํ์ธ
|
630 |
-
if len(parsed_tool) == 0:
|
631 |
-
st.error("์ต์ ํ๋ ์ด์์ ๋๊ตฌ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.")
|
632 |
-
else:
|
633 |
-
# ๋ชจ๋ ๋๊ตฌ์ ๋ํด ์ฒ๋ฆฌ
|
634 |
-
success_tools = []
|
635 |
-
for tool_name, tool_config in parsed_tool.items():
|
636 |
-
# URL ํ๋ ํ์ธ ๋ฐ transport ์ค์
|
637 |
-
if "url" in tool_config:
|
638 |
-
# URL์ด ์๋ ๊ฒฝ์ฐ transport๋ฅผ "sse"๋ก ์ค์
|
639 |
-
tool_config["transport"] = "sse"
|
640 |
-
st.info(
|
641 |
-
f"'{tool_name}' ๋๊ตฌ์ URL์ด ๊ฐ์ง๋์ด transport๋ฅผ 'sse'๋ก ์ค์ ํ์ต๋๋ค."
|
642 |
-
)
|
643 |
-
elif "transport" not in tool_config:
|
644 |
-
# URL์ด ์๊ณ transport๋ ์๋ ๊ฒฝ์ฐ ๊ธฐ๋ณธ๊ฐ "stdio" ์ค์
|
645 |
-
tool_config["transport"] = "stdio"
|
646 |
-
|
647 |
-
# ํ์ ํ๋ ํ์ธ
|
648 |
-
if (
|
649 |
-
"command" not in tool_config
|
650 |
-
and "url" not in tool_config
|
651 |
-
):
|
652 |
-
st.error(
|
653 |
-
f"'{tool_name}' ๋๊ตฌ ์ค์ ์๋ 'command' ๋๋ 'url' ํ๋๊ฐ ํ์ํฉ๋๋ค."
|
654 |
-
)
|
655 |
-
elif "command" in tool_config and "args" not in tool_config:
|
656 |
-
st.error(
|
657 |
-
f"'{tool_name}' ๋๊ตฌ ์ค์ ์๋ 'args' ํ๋๊ฐ ํ์ํฉ๋๋ค."
|
658 |
-
)
|
659 |
-
elif "command" in tool_config and not isinstance(
|
660 |
-
tool_config["args"], list
|
661 |
-
):
|
662 |
-
st.error(
|
663 |
-
f"'{tool_name}' ๋๊ตฌ์ 'args' ํ๋๋ ๋ฐ๋์ ๋ฐฐ์ด([]) ํ์์ด์ด์ผ ํฉ๋๋ค."
|
664 |
-
)
|
665 |
-
else:
|
666 |
-
# pending_mcp_config์ ๋๊ตฌ ์ถ๊ฐ
|
667 |
-
st.session_state.pending_mcp_config[tool_name] = (
|
668 |
-
tool_config
|
669 |
-
)
|
670 |
-
success_tools.append(tool_name)
|
671 |
-
|
672 |
-
# ์ฑ๊ณต ๋ฉ์์ง
|
673 |
-
if success_tools:
|
674 |
-
if len(success_tools) == 1:
|
675 |
-
st.success(
|
676 |
-
f"{success_tools[0]} ๋๊ตฌ๊ฐ ์ถ๊ฐ๋์์ต๋๋ค. ์ ์ฉํ๋ ค๋ฉด '์ค์ ์ ์ฉํ๊ธฐ' ๋ฒํผ์ ๋๋ฌ์ฃผ์ธ์."
|
677 |
-
)
|
678 |
-
else:
|
679 |
-
tool_names = ", ".join(success_tools)
|
680 |
-
st.success(
|
681 |
-
f"์ด {len(success_tools)}๊ฐ ๋๊ตฌ({tool_names})๊ฐ ์ถ๊ฐ๋์์ต๋๋ค. ์ ์ฉํ๋ ค๋ฉด '์ค์ ์ ์ฉํ๊ธฐ' ๋ฒํผ์ ๋๋ฌ์ฃผ์ธ์."
|
682 |
-
)
|
683 |
-
# ์ถ๊ฐ๋๋ฉด expander๋ฅผ ์ ์ด์ค
|
684 |
-
st.session_state.mcp_tools_expander = False
|
685 |
-
st.rerun()
|
686 |
-
except json.JSONDecodeError as e:
|
687 |
-
st.error(f"JSON ํ์ฑ ์๋ฌ: {e}")
|
688 |
-
st.markdown(
|
689 |
-
f"""
|
690 |
-
**์์ ๋ฐฉ๋ฒ**:
|
691 |
-
1. JSON ํ์์ด ์ฌ๋ฐ๋ฅธ์ง ํ์ธํ์ธ์.
|
692 |
-
2. ๋ชจ๋ ํค๋ ํฐ๋ฐ์ดํ(")๋ก ๊ฐ์ธ์ผ ํฉ๋๋ค.
|
693 |
-
3. ๋ฌธ์์ด ๊ฐ๋ ํฐ๋ฐ์ดํ(")๋ก ๊ฐ์ธ์ผ ํฉ๋๋ค.
|
694 |
-
4. ๋ฌธ์์ด ๋ด์์ ํฐ๋ฐ์ดํ๋ฅผ ์ฌ์ฉํ ๊ฒฝ์ฐ ์ด์ค์ผ์ดํ(\\")ํด์ผ ํฉ๋๋ค.
|
695 |
-
"""
|
696 |
-
)
|
697 |
-
except Exception as e:
|
698 |
-
st.error(f"์ค๋ฅ ๋ฐ์: {e}")
|
699 |
-
|
700 |
-
# ๋ฑ๋ก๋ ๋๊ตฌ ๋ชฉ๋ก ํ์ ๋ฐ ์ญ์ ๋ฒํผ ์ถ๊ฐ
|
701 |
-
with st.expander("๐ ๋ฑ๋ก๋ ๋๊ตฌ ๋ชฉ๋ก", expanded=True):
|
702 |
-
try:
|
703 |
-
pending_config = st.session_state.pending_mcp_config
|
704 |
-
except Exception as e:
|
705 |
-
st.error("์ ํจํ MCP ๋๊ตฌ ์ค์ ์ด ์๋๋๋ค.")
|
706 |
-
else:
|
707 |
-
# pending config์ ํค(๋๊ตฌ ์ด๋ฆ) ๋ชฉ๋ก์ ์ํํ๋ฉฐ ํ์
|
708 |
-
for tool_name in list(pending_config.keys()):
|
709 |
-
col1, col2 = st.columns([8, 2])
|
710 |
-
col1.markdown(f"- **{tool_name}**")
|
711 |
-
if col2.button("์ญ์ ", key=f"delete_{tool_name}"):
|
712 |
-
# pending config์์ ํด๋น ๋๊ตฌ ์ญ์ (์ฆ์ ๏ฟฝ๏ฟฝ์ฉ๋์ง๋ ์์)
|
713 |
-
del st.session_state.pending_mcp_config[tool_name]
|
714 |
-
st.success(
|
715 |
-
f"{tool_name} ๋๊ตฌ๊ฐ ์ญ์ ๋์์ต๋๋ค. ์ ์ฉํ๋ ค๋ฉด '์ค์ ์ ์ฉํ๊ธฐ' ๋ฒํผ์ ๋๋ฌ์ฃผ์ธ์."
|
716 |
-
)
|
717 |
-
|
718 |
-
st.divider() # ๊ตฌ๋ถ์ ์ถ๊ฐ
|
719 |
-
|
720 |
-
# --- ์ฌ์ด๋๋ฐ: ์์คํ
์ ๋ณด ๋ฐ ์์
๋ฒํผ ์น์
---
|
721 |
-
with st.sidebar:
|
722 |
-
st.subheader("๐ ์์คํ
์ ๋ณด")
|
723 |
-
st.write(f"๐ ๏ธ MCP ๋๊ตฌ ์: {st.session_state.get('tool_count', '์ด๊ธฐํ ์ค...')}")
|
724 |
-
selected_model_name = st.session_state.selected_model
|
725 |
-
st.write(f"๐ง ํ์ฌ ๋ชจ๋ธ: {selected_model_name}")
|
726 |
-
|
727 |
-
# ์ค์ ์ ์ฉํ๊ธฐ ๋ฒํผ์ ์ฌ๊ธฐ๋ก ์ด๋
|
728 |
-
if st.button(
|
729 |
-
"์ค์ ์ ์ฉํ๊ธฐ",
|
730 |
-
key="apply_button",
|
731 |
-
type="primary",
|
732 |
-
use_container_width=True,
|
733 |
-
):
|
734 |
-
# ์ ์ฉ ์ค ๋ฉ์์ง ํ์
|
735 |
-
apply_status = st.empty()
|
736 |
-
with apply_status.container():
|
737 |
-
st.warning("๐ ๋ณ๊ฒฝ์ฌํญ์ ์ ์ฉํ๊ณ ์์ต๋๋ค. ์ ์๋ง ๊ธฐ๋ค๋ ค์ฃผ์ธ์...")
|
738 |
-
progress_bar = st.progress(0)
|
739 |
-
|
740 |
-
# ์ค์ ์ ์ฅ
|
741 |
-
st.session_state.mcp_config_text = json.dumps(
|
742 |
-
st.session_state.pending_mcp_config, indent=2, ensure_ascii=False
|
743 |
-
)
|
744 |
-
|
745 |
-
# config.json ํ์ผ์ ์ค์ ์ ์ฅ
|
746 |
-
save_result = save_config_to_json(st.session_state.pending_mcp_config)
|
747 |
-
if not save_result:
|
748 |
-
st.error("โ ์ค์ ํ์ผ ์ ์ฅ์ ์คํจํ์ต๋๋ค.")
|
749 |
-
|
750 |
-
progress_bar.progress(15)
|
751 |
-
|
752 |
-
# ์ธ์
์ด๊ธฐํ ์ค๋น
|
753 |
-
st.session_state.session_initialized = False
|
754 |
-
st.session_state.agent = None
|
755 |
-
|
756 |
-
# ์งํ ์ํ ์
๋ฐ์ดํธ
|
757 |
-
progress_bar.progress(30)
|
758 |
-
|
759 |
-
# ์ด๊ธฐํ ์คํ
|
760 |
-
success = st.session_state.event_loop.run_until_complete(
|
761 |
-
initialize_session(st.session_state.pending_mcp_config)
|
762 |
-
)
|
763 |
-
|
764 |
-
# ์งํ ์ํ ์
๋ฐ์ดํธ
|
765 |
-
progress_bar.progress(100)
|
766 |
-
|
767 |
-
if success:
|
768 |
-
st.success("โ
์๋ก์ด ์ค์ ์ด ์ ์ฉ๋์์ต๋๋ค.")
|
769 |
-
# ๋๊ตฌ ์ถ๊ฐ expander ์ ๊ธฐ
|
770 |
-
if "mcp_tools_expander" in st.session_state:
|
771 |
-
st.session_state.mcp_tools_expander = False
|
772 |
-
else:
|
773 |
-
st.error("โ ์ค์ ์ ์ฉ์ ์คํจํ์์ต๋๋ค.")
|
774 |
-
|
775 |
-
# ํ์ด์ง ์๋ก๊ณ ์นจ
|
776 |
-
st.rerun()
|
777 |
-
|
778 |
-
st.divider() # ๊ตฌ๋ถ์ ์ถ๊ฐ
|
779 |
-
|
780 |
-
# ์์
๋ฒํผ ์น์
|
781 |
-
st.subheader("๐ ์์
")
|
782 |
-
|
783 |
-
# ๋ํ ์ด๊ธฐํ ๋ฒํผ
|
784 |
-
if st.button("๋ํ ์ด๊ธฐํ", use_container_width=True, type="primary"):
|
785 |
-
# thread_id ์ด๊ธฐํ
|
786 |
-
st.session_state.thread_id = random_uuid()
|
787 |
-
|
788 |
-
# ๋ํ ํ์คํ ๋ฆฌ ์ด๊ธฐํ
|
789 |
-
st.session_state.history = []
|
790 |
-
|
791 |
-
# ์๋ฆผ ๋ฉ์์ง
|
792 |
-
st.success("โ
๋ํ๊ฐ ์ด๊ธฐํ๋์์ต๋๋ค.")
|
793 |
-
|
794 |
-
# ํ์ด์ง ์๋ก๊ณ ์นจ
|
795 |
-
st.rerun()
|
796 |
-
|
797 |
-
# ๋ก๊ทธ์ธ ๊ธฐ๋ฅ์ด ํ์ฑํ๋ ๊ฒฝ์ฐ์๋ง ๋ก๊ทธ์์ ๋ฒํผ ํ์
|
798 |
-
if use_login and st.session_state.authenticated:
|
799 |
-
st.divider() # ๊ตฌ๋ถ์ ์ถ๊ฐ
|
800 |
-
if st.button("๋ก๊ทธ์์", use_container_width=True, type="secondary"):
|
801 |
-
st.session_state.authenticated = False
|
802 |
-
st.success("โ
๋ก๊ทธ์์ ๋์์ต๋๋ค.")
|
803 |
-
st.rerun()
|
804 |
-
|
805 |
-
# --- ๊ธฐ๋ณธ ์ธ์
์ด๊ธฐํ (์ด๊ธฐํ๋์ง ์์ ๊ฒฝ์ฐ) ---
|
806 |
-
if not st.session_state.session_initialized:
|
807 |
-
st.info(
|
808 |
-
"MCP ์๋ฒ์ ์์ด์ ํธ๊ฐ ์ด๊ธฐํ๋์ง ์์์ต๋๋ค. ์ผ์ชฝ ์ฌ์ด๋๋ฐ์ '์ค์ ์ ์ฉํ๊ธฐ' ๋ฒํผ์ ํด๋ฆญํ์ฌ ์ด๊ธฐํํด์ฃผ์ธ์."
|
809 |
-
)
|
810 |
-
|
811 |
-
|
812 |
-
# --- ๋ํ ๊ธฐ๋ก ์ถ๋ ฅ ---
|
813 |
-
print_message()
|
814 |
-
|
815 |
-
# --- ์ฌ์ฉ์ ์
๋ ฅ ๋ฐ ์ฒ๋ฆฌ ---
|
816 |
-
user_query = st.chat_input("๐ฌ ์ง๋ฌธ์ ์
๋ ฅํ์ธ์")
|
817 |
-
if user_query:
|
818 |
-
if st.session_state.session_initialized:
|
819 |
-
st.chat_message("user", avatar="๐งโ๐ป").markdown(user_query)
|
820 |
-
with st.chat_message("assistant", avatar="๐ค"):
|
821 |
-
tool_placeholder = st.empty()
|
822 |
-
text_placeholder = st.empty()
|
823 |
-
resp, final_text, final_tool = (
|
824 |
-
st.session_state.event_loop.run_until_complete(
|
825 |
-
process_query(
|
826 |
-
user_query,
|
827 |
-
text_placeholder,
|
828 |
-
tool_placeholder,
|
829 |
-
st.session_state.timeout_seconds,
|
830 |
-
)
|
831 |
-
)
|
832 |
-
)
|
833 |
-
if "error" in resp:
|
834 |
-
st.error(resp["error"])
|
835 |
-
else:
|
836 |
-
st.session_state.history.append({"role": "user", "content": user_query})
|
837 |
-
st.session_state.history.append(
|
838 |
-
{"role": "assistant", "content": final_text}
|
839 |
-
)
|
840 |
-
if final_tool.strip():
|
841 |
-
st.session_state.history.append(
|
842 |
-
{"role": "assistant_tool", "content": final_tool}
|
843 |
-
)
|
844 |
-
st.rerun()
|
845 |
-
else:
|
846 |
-
st.warning(
|
847 |
-
"โ ๏ธ MCP ์๋ฒ์ ์์ด์ ํธ๊ฐ ์ด๊ธฐํ๋์ง ์์์ต๋๋ค. ์ผ์ชฝ ์ฌ์ด๋๋ฐ์ '์ค์ ์ ์ฉํ๊ธฐ' ๋ฒํผ์ ํด๋ฆญํ์ฌ ์ด๊ธฐํํด์ฃผ์ธ์."
|
848 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config.json
CHANGED
@@ -5,5 +5,13 @@
|
|
5 |
"./mcp_server_time.py"
|
6 |
],
|
7 |
"transport": "stdio"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
}
|
9 |
}
|
|
|
5 |
"./mcp_server_time.py"
|
6 |
],
|
7 |
"transport": "stdio"
|
8 |
+
},
|
9 |
+
"qa": {
|
10 |
+
"transport": "sse",
|
11 |
+
"url": "http://10.15.56.148:8000/qa"
|
12 |
+
},
|
13 |
+
"review_generate": {
|
14 |
+
"transport": "sse",
|
15 |
+
"url": "http://10.15.56.148:8000/review"
|
16 |
}
|
17 |
}
|
run.sh
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
streamlit run app.py --server.address=0.0.0.0
|