jayavibhav commited on
Commit
99cb511
·
verified ·
1 Parent(s): 33949b4

Update all_nodes.txt

Browse files
Files changed (1) hide show
  1. all_nodes.txt +2020 -0
all_nodes.txt CHANGED
@@ -0,0 +1,2020 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ example workflow
2
+ {
3
+ "workflow_id": "simple-chatbot-v1",
4
+ "workflow_name": "Simple Chatbot",
5
+ "nodes": [
6
+ {
7
+ "id": "ChatInput-1",
8
+ "type": "ChatInput",
9
+ "data": {
10
+ "display_name": "User's Question",
11
+ "template": {
12
+ "input_value": {
13
+ "display_name": "Input",
14
+ "type": "string",
15
+ "value": "What is the capital of France?",
16
+ "is_handle": true
17
+ }
18
+ }
19
+ },
20
+ "resources": {
21
+ "cpu": 0.1,
22
+ "memory": "128Mi",
23
+ "gpu": "none"
24
+ }
25
+ },
26
+ {
27
+ "id": "Prompt-1",
28
+ "type": "Prompt",
29
+ "data": {
30
+ "display_name": "System Prompt",
31
+ "template": {
32
+ "prompt_template": {
33
+ "display_name": "Template",
34
+ "type": "string",
35
+ "value": "You are a helpful geography expert. The user asked: {input_value}",
36
+ "is_handle": true
37
+ }
38
+ }
39
+ },
40
+ "resources": {
41
+ "cpu": 0.1,
42
+ "memory": "128Mi",
43
+ "gpu": "none"
44
+ }
45
+ },
46
+ {
47
+ "id": "OpenAI-1",
48
+ "type": "OpenAIModel",
49
+ "data": {
50
+ "display_name": "OpenAI gpt-4o-mini",
51
+ "template": {
52
+ "model": {
53
+ "display_name": "Model",
54
+ "type": "options",
55
+ "options": ["gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo"],
56
+ "value": "gpt-4o-mini"
57
+ },
58
+ "api_key": {
59
+ "display_name": "API Key",
60
+ "type": "SecretStr",
61
+ "required": true,
62
+ "env_var": "OPENAI_API_KEY"
63
+ },
64
+ "prompt": {
65
+ "display_name": "Prompt",
66
+ "type": "string",
67
+ "is_handle": true
68
+ }
69
+ }
70
+ },
71
+ "resources": {
72
+ "cpu": 0.5,
73
+ "memory": "256Mi",
74
+ "gpu": "none"
75
+ }
76
+ },
77
+ {
78
+ "id": "ChatOutput-1",
79
+ "type": "ChatOutput",
80
+ "data": {
81
+ "display_name": "Final Answer",
82
+ "template": {
83
+ "response": {
84
+ "display_name": "Response",
85
+ "type": "string",
86
+ "is_handle": true
87
+ }
88
+ }
89
+ },
90
+ "resources": {
91
+ "cpu": 0.1,
92
+ "memory": "128Mi",
93
+ "gpu": "none"
94
+ }
95
+ }
96
+ ],
97
+ "edges": [
98
+ {
99
+ "source": "ChatInput-1",
100
+ "source_handle": "input_value",
101
+ "target": "Prompt-1",
102
+ "target_handle": "prompt_template"
103
+ },
104
+ {
105
+ "source": "Prompt-1",
106
+ "source_handle": "prompt_template",
107
+ "target": "OpenAI-1",
108
+ "target_handle": "prompt"
109
+ },
110
+ {
111
+ "source": "OpenAI-1",
112
+ "source_handle": "response",
113
+ "target": "ChatOutput-1",
114
+ "target_handle": "response"
115
+ }
116
+ ]
117
+ }
118
+
119
+
120
+ ## input node
121
+ {
122
+ "id": "Input-1",
123
+ "type": "Input",
124
+ "data": {
125
+ "display_name": "Source Data",
126
+ "template": {
127
+ "data_type": {
128
+ "display_name": "Data Type",
129
+ "type": "options",
130
+ "options": ["string", "image", "video", "audio", "file"],
131
+ "value": "string"
132
+ },
133
+ "value": {
134
+ "display_name": "Value or Path",
135
+ "type": "string",
136
+ "value": "This is the initial text."
137
+ },
138
+ "data": {
139
+ "display_name": "Output Data",
140
+ "type": "object",
141
+ "is_handle": true
142
+ }
143
+ }
144
+ },
145
+ "resources": {
146
+ "cpu": 0.1,
147
+ "memory": "128Mi",
148
+ "gpu": "none"
149
+ }
150
+ }
151
+
152
+
153
+ from typing import Any, Dict
154
+
155
+ def process_input(data_type: str, value: Any) -> Dict[str, Any]:
156
+ Packages the source data and its type for downstream nodes.
157
+ """
158
+ # The output is a dictionary containing both the type and the data/path.
159
+ # This gives the next node context on how to handle the value.
160
+ """
161
+ output_package = {
162
+ "type": data_type,
163
+ "value": value
164
+ }
165
+ return {"data": output_package}
166
+
167
+ process_input("string", "hi")
168
+
169
+ ## output node
170
+ """{
171
+ "id": "Output-1",
172
+ "type": "Output",
173
+ "data": {
174
+ "display_name": "Final Result",
175
+ "template": {
176
+ "input_data": {
177
+ "display_name": "Input Data",
178
+ "type": "object",
179
+ "is_handle": true
180
+ }
181
+ }
182
+ },
183
+ "resources": {
184
+ "cpu": 0.1,
185
+ "memory": "128Mi",
186
+ "gpu": "none"
187
+ }
188
+ }"""
189
+
190
+ from typing import Any, Dict
191
+
192
+ def process_output(input_data: Dict[str, Any]) -> None:
193
+ """
194
+ Receives the final data package and prints its contents.
195
+ """
196
+ # Unpacks the dictionary received from the upstream node.
197
+ data_type = input_data.get("type", "unknown")
198
+ value = input_data.get("value", "No value provided")
199
+
200
+ # print("--- Final Workflow Output ---")
201
+ # print(f" Data Type: {data_type}")
202
+ # print(f" Value/Path: {value}")
203
+ # print("-----------------------------")
204
+
205
+ dont print output, just return it
206
+
207
+ process_output({'type': 'string', 'value': 'hi'})
208
+
209
+ ## api request node
210
+ """{
211
+ "id": "APIRequest-1",
212
+ "type": "APIRequest",
213
+ "data": {
214
+ "display_name": "Get User Data",
215
+ "template": {
216
+ "url": {
217
+ "display_name": "URL",
218
+ "type": "string",
219
+ "value": "https://api.example.com/users/1"
220
+ },
221
+ "method": {
222
+ "display_name": "Method",
223
+ "type": "options",
224
+ "options": ["GET", "POST", "PUT", "DELETE"],
225
+ "value": "GET"
226
+ },
227
+ "headers": {
228
+ "display_name": "Headers (JSON)",
229
+ "type": "string",
230
+ "value": "{\"Authorization\": \"Bearer YOUR_TOKEN\"}"
231
+ },
232
+ "body": {
233
+ "display_name": "Request Body",
234
+ "type": "object",
235
+ "is_handle": true
236
+ },
237
+ "response": {
238
+ "display_name": "Response Data",
239
+ "type": "object",
240
+ "is_handle": true
241
+ }
242
+ }
243
+ },
244
+ "resources": {
245
+ "cpu": 0.2,
246
+ "memory": "256Mi",
247
+ "gpu": "none"
248
+ }
249
+ }"""
250
+
251
+ import requests
252
+ import json
253
+ from typing import Any, Dict
254
+
255
+ def process_api_request(url: str, method: str, headers: str, body: Dict = None) -> Dict[str, Any]:
256
+ """
257
+ Performs an HTTP request and returns the JSON response.
258
+ """
259
+ try:
260
+ parsed_headers = json.loads(headers)
261
+ except json.JSONDecodeError:
262
+ print("Warning: Headers are not valid JSON. Using empty headers.")
263
+ parsed_headers = {}
264
+
265
+ try:
266
+ response = requests.request(
267
+ method=method,
268
+ url=url,
269
+ headers=parsed_headers,
270
+ json=body,
271
+ timeout=10 # 10-second timeout
272
+ )
273
+ # Raise an exception for bad status codes (4xx or 5xx)
274
+ response.raise_for_status()
275
+
276
+ # The output is a dictionary containing the JSON response.
277
+ return {"response": response.json()}
278
+
279
+ except requests.exceptions.RequestException as e:
280
+ print(f"Error during API request: {e}")
281
+ # Return an error structure on failure
282
+ return {"response": {"error": str(e), "status_code": getattr(e.response, 'status_code', 500)}}
283
+
284
+ url = "https://jsonplaceholder.typicode.com/posts"
285
+ method = "GET"
286
+ headers = "{}" # empty JSON headers
287
+ body = None # GET requests typically don't send a JSON body
288
+
289
+ result = process_api_request(url, method, headers, body)
290
+ print(result)
291
+
292
+ url = "https://jsonplaceholder.typicode.com/posts"
293
+ method = "POST"
294
+ headers = '{"Content-Type": "application/json"}'
295
+ body = {
296
+ "title": "foo",
297
+ "body": "bar",
298
+ "userId": 1
299
+ }
300
+
301
+ result = process_api_request(url, method, headers, body)
302
+ print(result)
303
+
304
+ ## react agent tool
305
+ import os
306
+ import asyncio
307
+ from typing import List, Dict, Any
308
+ from llama_index.core.agent import ReActAgent
309
+ from llama_index.core.tools import FunctionTool
310
+ from llama_index.llms.openai import OpenAI
311
+ from duckduckgo_search import DDGS
312
+
313
+ # Set your API key
314
+ # os.environ["OPENAI_API_KEY"] = "your-api-key-here"
315
+
316
+ class WorkflowReActAgent:
317
+ """Complete working ReAct Agent with your workflow tools"""
318
+
319
+ def __init__(self, llm_model: str = "gpt-4o-mini"):
320
+ self.llm = OpenAI(model=llm_model, temperature=0.1)
321
+ self.tools = self._create_tools()
322
+ self.agent = ReActAgent.from_tools(
323
+ tools=self.tools,
324
+ llm=self.llm,
325
+ verbose=True,
326
+ max_iterations=8 # Reasonable limit
327
+ )
328
+
329
+ def _create_tools(self) -> List[FunctionTool]:
330
+ """Create tools that actually work and get used"""
331
+
332
+ # 🔍 Web Search Tool (using your exact implementation)
333
+ def web_search(query: str) -> str:
334
+ """Search the web for current information"""
335
+ try:
336
+ with DDGS() as ddgs:
337
+ results = []
338
+ gen = ddgs.text(query, safesearch="Off")
339
+ for i, result in enumerate(gen):
340
+ if i >= 3: # Limit results
341
+ break
342
+ results.append(f"• {result.get('title', '')}: {result.get('body', '')[:150]}...")
343
+
344
+ if results:
345
+ return f"Search results: {'; '.join(results)}"
346
+ else:
347
+ return f"No results found for '{query}'"
348
+
349
+ except Exception as e:
350
+ return f"Search error: {str(e)}"
351
+
352
+ # 🧮 Calculator Tool
353
+ def calculate(expression: str) -> str:
354
+ """Calculate mathematical expressions safely"""
355
+ try:
356
+ # Simple and safe evaluation
357
+ allowed_chars = "0123456789+-*/().,_ "
358
+ if all(c in allowed_chars for c in expression):
359
+ result = eval(expression)
360
+ return f"Result: {result}"
361
+ else:
362
+ return f"Invalid expression: {expression}"
363
+ except Exception as e:
364
+ return f"Math error: {str(e)}"
365
+
366
+ # 🐍 Python Executor Tool
367
+ def execute_python(code: str) -> str:
368
+ """Execute Python code and return results"""
369
+ import sys
370
+ from io import StringIO
371
+ import traceback
372
+
373
+ old_stdout = sys.stdout
374
+ sys.stdout = StringIO()
375
+
376
+ try:
377
+ local_scope = {}
378
+ exec(code, {"__builtins__": __builtins__}, local_scope)
379
+
380
+ output = sys.stdout.getvalue()
381
+
382
+ # Get result from the last line if it's an expression
383
+ lines = code.strip().split('\n')
384
+ if lines:
385
+ try:
386
+ result = eval(lines[-1], {}, local_scope)
387
+ return f"Result: {result}\nOutput: {output}".strip()
388
+ except:
389
+ pass
390
+
391
+ return f"Output: {output}".strip() if output else "Code executed successfully"
392
+
393
+ except Exception as e:
394
+ return f"Error: {str(e)}"
395
+ finally:
396
+ sys.stdout = old_stdout
397
+
398
+ # 🌐 API Request Tool
399
+ def api_request(url: str, method: str = "GET") -> str:
400
+ """Make HTTP API requests"""
401
+ import requests
402
+ try:
403
+ response = requests.request(method, url, timeout=10)
404
+ return f"Status: {response.status_code}\nResponse: {response.text[:300]}..."
405
+ except Exception as e:
406
+ return f"API error: {str(e)}"
407
+
408
+ # Convert to FunctionTool objects
409
+ return [
410
+ FunctionTool.from_defaults(
411
+ fn=web_search,
412
+ name="web_search",
413
+ description="Search the web for current information on any topic"
414
+ ),
415
+ FunctionTool.from_defaults(
416
+ fn=calculate,
417
+ name="calculate",
418
+ description="Calculate mathematical expressions and equations"
419
+ ),
420
+ FunctionTool.from_defaults(
421
+ fn=execute_python,
422
+ name="execute_python",
423
+ description="Execute Python code for data processing and calculations"
424
+ ),
425
+ FunctionTool.from_defaults(
426
+ fn=api_request,
427
+ name="api_request",
428
+ description="Make HTTP requests to APIs and web services"
429
+ )
430
+ ]
431
+
432
+ def chat(self, message: str) -> str:
433
+ """Chat with the ReAct agent"""
434
+ try:
435
+ response = self.agent.chat(message)
436
+ return str(response.response)
437
+ except Exception as e:
438
+ return f"Agent error: {str(e)}"
439
+
440
+ # 🚀 Usage Examples
441
+ def main():
442
+ """Test the working ReAct agent"""
443
+
444
+ agent = WorkflowReActAgent()
445
+
446
+ test_queries = [
447
+ "What's the current Bitcoin price and calculate 10% of it?",
448
+ "Search for news about SpaceX and tell me the latest",
449
+ "Calculate the compound interest: 1000 * (1.05)^10",
450
+ "Search for Python programming tips",
451
+ "What's 15 factorial divided by 12 factorial?",
452
+ "Find information about the latest iPhone and calculate its price in EUR if 1 USD = 0.92 EUR"
453
+ ]
454
+
455
+ print("🤖 WorkflowReActAgent Ready!")
456
+ print("=" * 60)
457
+
458
+ for i, query in enumerate(test_queries, 1):
459
+ print(f"\n🔸 Query {i}: {query}")
460
+ print("-" * 50)
461
+
462
+ response = agent.chat(query)
463
+ print(f"🎯 Response: {response}")
464
+ print("\n" + "="*60)
465
+
466
+ if __name__ == "__main__":
467
+ main()
468
+
469
+
470
+ ## web search tool
471
+ """{
472
+ "id": "WebSearch-1",
473
+ "type": "WebSearch",
474
+ "data": {
475
+ "display_name": "Search for News",
476
+ "template": {
477
+ "query": {
478
+ "display_name": "Search Query",
479
+ "type": "string",
480
+ "is_handle": true
481
+ },
482
+ "results": {
483
+ "display_name": "Search Results",
484
+ "type": "object",
485
+ "is_handle": true
486
+ }
487
+ }
488
+ },
489
+ "resources": {
490
+ "cpu": 0.2,
491
+ "memory": "256Mi",
492
+ "gpu": "none"
493
+ }
494
+ }"""
495
+
496
+ # First, install duckduckgo_search:
497
+ # pip install duckduckgo_search
498
+
499
+ import json
500
+ from typing import Any, Dict, List
501
+ from duckduckgo_search import DDGS
502
+
503
+ def process_web_search(query: str, max_results: int = 10) -> Dict[str, Any]:
504
+ if not query:
505
+ return {"results": []}
506
+
507
+ try:
508
+ # Use the DDGS client and its text() method
509
+ with DDGS() as ddgs:
510
+ gen = ddgs.text(query, safesearch="Off")
511
+ # Collect up to max_results items
512
+ results: List[Dict[str, str]] = [
513
+ {"title": r.get("title", ""), "link": r.get("href", ""), "snippet": r.get("body", "")}
514
+ for _, r in zip(range(max_results), gen)
515
+ ]
516
+ return {"results": results}
517
+
518
+ except Exception as e:
519
+ return {"results": {"error": str(e)}}
520
+
521
+
522
+ # import json
523
+ # from typing import Any
524
+ # from llama_index.tools import BaseTool, ToolMetadata
525
+
526
+ # class DuckDuckGoSearchTool(BaseTool):
527
+ # """A LlamaIndex tool that proxies to process_web_search."""
528
+ # metadata = ToolMetadata(
529
+ # name="duckduckgo_search",
530
+ # description="Performs a web search via DuckDuckGo and returns JSON results."
531
+ # )
532
+
533
+ # def __init__(self, max_results: int = 10):
534
+ # self.max_results = max_results
535
+
536
+ # def _run(self, query: str) -> str:
537
+ # # Call our search function and return a JSON string
538
+ # results = process_web_search(query, max_results=self.max_results)
539
+ # return json.dumps(results)
540
+
541
+ # async def _arun(self, query: str) -> str:
542
+ # # Async agents can await this
543
+ # results = process_web_search(query, max_results=self.max_results)
544
+ # return json.dumps(results)
545
+
546
+ # from llama_index import GPTVectorStoreIndex, ServiceContext
547
+ # from llama_index.agent.react import ReactAgent
548
+ # from llama_index.tools import ToolConfig
549
+
550
+ # # 1. Instantiate the tool
551
+ # search_tool = DuckDuckGoSearchTool(max_results=5)
552
+
553
+ # # 2. Create an agent and register tools
554
+ # agent = ReactAgent(
555
+ # tools=[search_tool],
556
+ # service_context=ServiceContext.from_defaults()
557
+ # )
558
+
559
+ # # 3. Run the agent with a natural‐language prompt
560
+ # response = agent.run("What are the top news about renewable energy?")
561
+ # print(response)
562
+
563
+
564
+ process_web_search(query="devil may cry")
565
+
566
+
567
+ ## execute python node
568
+ """{
569
+ "id": "ExecutePython-1",
570
+ "type": "ExecutePython",
571
+ "data": {
572
+ "display_name": "Custom Data Processing",
573
+ "template": {
574
+ "code": {
575
+ "display_name": "Python Code",
576
+ "type": "string",
577
+ "value": "def process(data):\n # Example: Extract titles from search results\n titles = [item['title'] for item in data]\n # The 'result' variable will be the output\n result = ', '.join(titles)\n return result"
578
+ },
579
+ "input_vars": {
580
+ "display_name": "Input Variables",
581
+ "type": "object",
582
+ "is_handle": true
583
+ },
584
+ "output_vars": {
585
+ "display_name": "Output Variables",
586
+ "type": "object",
587
+ "is_handle": true
588
+ }
589
+ }
590
+ },
591
+ "resources": {
592
+ "cpu": 0.5,
593
+ "memory": "512Mi",
594
+ "gpu": "none"
595
+ }
596
+ }"""
597
+
598
+ import sys
599
+ import traceback
600
+ from typing import Any, Dict
601
+
602
+ def process_execute_python(code: str, input_vars: Dict[str, Any] = None) -> Dict[str, Any]:
603
+ """
604
+ Executes a string of Python code within an isolated scope.
605
+ - If the code defines `process(data)`, calls it with `input_vars`.
606
+ - Otherwise, executes the code top-level and returns any printed output.
607
+ """
608
+ if input_vars is None:
609
+ input_vars = {}
610
+
611
+ # Capture stdout
612
+ from io import StringIO
613
+ old_stdout = sys.stdout
614
+ sys.stdout = StringIO()
615
+
616
+ local_scope: Dict[str, Any] = {}
617
+ try:
618
+ # Execute user code
619
+ exec(code, {}, local_scope)
620
+
621
+ if "process" in local_scope and callable(local_scope["process"]):
622
+ result = local_scope["process"](input_vars)
623
+ else:
624
+ # No process(): run as script
625
+ # (re-exec under a fresh namespace to capture prints)
626
+ exec(code, {}, {})
627
+ result = None
628
+
629
+ output = sys.stdout.getvalue()
630
+ return {"output_vars": result, "stdout": output}
631
+
632
+ except Exception:
633
+ err = traceback.format_exc()
634
+ return {"output_vars": None, "error": err}
635
+
636
+ finally:
637
+ sys.stdout = old_stdout
638
+
639
+ # 1. Code with process():
640
+ code1 = """
641
+ def process(data):
642
+ return {"sum": data.get("x",0) + data.get("y",0)}
643
+ """
644
+ print(process_execute_python(code1, {"x":5, "y":7}))
645
+ # → {'output_vars': {'sum': 12}, 'stdout': ''}
646
+
647
+ # 2. Standalone code:
648
+ code2 = 'print("Hello, world!")'
649
+ print(process_execute_python(code2))
650
+ # → {'output_vars': None, 'stdout': 'Hello, world!\n'}
651
+
652
+ # import json
653
+ # from typing import Any
654
+ # from llama_index.tools import BaseTool, ToolMetadata
655
+
656
+ # class ExecutePythonTool(BaseTool):
657
+ # """Executes arbitrary Python code strings in an isolated scope."""
658
+ # metadata = ToolMetadata(
659
+ # name="execute_python",
660
+ # description="Runs user-supplied Python code. Requires optional `process(data)` or runs script."
661
+ # )
662
+
663
+ # def _run(self, code: str) -> str:
664
+ # # Call the executor and serialize the dict result
665
+ # result = process_execute_python(code)
666
+ # return json.dumps(result)
667
+
668
+ # async def _arun(self, code: str) -> str:
669
+ # result = process_execute_python(code)
670
+ # return json.dumps(result)
671
+
672
+ # from llama_index.agent.react import ReactAgent
673
+ # from llama_index import ServiceContext
674
+
675
+ # tool = ExecutePythonTool()
676
+ # agent = ReactAgent(tools=[tool], service_context=ServiceContext.from_defaults())
677
+
678
+ # # Agent will call `execute_python` when needed.
679
+ # response = agent.run("Please run the Python code: print('Test')")
680
+ # print(response)
681
+
682
+
683
+ ## conditional logix
684
+ """{
685
+ "id": "ConditionalLogic-1",
686
+ "type": "ConditionalLogic",
687
+ "data": {
688
+ "display_name": "Check User Role",
689
+ "template": {
690
+ "operator": {
691
+ "display_name": "Operator",
692
+ "type": "options",
693
+ "options": ["==", "!=", ">", "<", ">=", "<=", "contains", "not contains"],
694
+ "value": "=="
695
+ },
696
+ "comparison_value": {
697
+ "display_name": "Comparison Value",
698
+ "type": "string",
699
+ "value": "admin"
700
+ },
701
+ "input_value": {
702
+ "display_name": "Input to Check",
703
+ "type": "any",
704
+ "is_handle": true
705
+ },
706
+ "true_output": {
707
+ "display_name": "Path if True",
708
+ "type": "any",
709
+ "is_handle": true
710
+ },
711
+ "false_output": {
712
+ "display_name": "Path if False",
713
+ "type": "any",
714
+ "is_handle": true
715
+ }
716
+ }
717
+ },
718
+ "resources": {
719
+ "cpu": 0.1,
720
+ "memory": "128Mi",
721
+ "gpu": "none"
722
+ }
723
+ }"""
724
+
725
+ from typing import Any, Dict
726
+
727
+ def process_conditional_logic(operator: str, comparison_value: str, input_value: Any) -> Dict[str, Any]:
728
+ """
729
+ Evaluates a condition and returns the input value on the appropriate output handle.
730
+ """
731
+ result = False
732
+ # Attempt to convert types for numeric comparison
733
+ try:
734
+ num_input = float(input_value)
735
+ num_comp = float(comparison_value)
736
+ except (ValueError, TypeError):
737
+ num_input, num_comp = None, None
738
+
739
+ # Evaluate condition
740
+ if operator == '==' : result = input_value == comparison_value
741
+ elif operator == '!=': result = input_value != comparison_value
742
+ elif operator == '>' and num_input is not None: result = num_input > num_comp
743
+ elif operator == '<' and num_input is not None: result = num_input < num_comp
744
+ elif operator == '>=' and num_input is not None: result = num_input >= num_comp
745
+ elif operator == '<=' and num_input is not None: result = num_input <= num_comp
746
+ elif operator == 'contains': result = str(comparison_value) in str(input_value)
747
+ elif operator == 'not contains': result = str(comparison_value) not in str(input_value)
748
+
749
+ # Return the input data on the correct output handle based on the result
750
+ if result:
751
+ # The key "true_output" matches the source_handle in the workflow edge
752
+ return {"true_output": input_value}
753
+ else:
754
+ # The key "false_output" matches the source_handle in the workflow edge
755
+ return {"false_output": input_value}
756
+
757
+ ## wait node
758
+ """{
759
+ "id": "Wait-1",
760
+ "type": "Wait",
761
+ "data": {
762
+ "display_name": "Wait for 5 Seconds",
763
+ "template": {
764
+ "duration": {
765
+ "display_name": "Duration (seconds)",
766
+ "type": "number",
767
+ "value": 5
768
+ },
769
+ "passthrough_input": {
770
+ "display_name": "Passthrough Data In",
771
+ "type": "any",
772
+ "is_handle": true
773
+ },
774
+ "passthrough_output": {
775
+ "display_name": "Passthrough Data Out",
776
+ "type": "any",
777
+ "is_handle": true
778
+ }
779
+ }
780
+ },
781
+ "resources": {
782
+ "cpu": 0.1,
783
+ "memory": "128Mi",
784
+ "gpu": "none"
785
+ }
786
+ }"""
787
+
788
+ import time
789
+ from typing import Any, Dict
790
+
791
+ def process_wait(duration: int, passthrough_input: Any = None) -> Dict[str, Any]:
792
+ """
793
+ Pauses execution for a given duration and then passes data through.
794
+ """
795
+ time.sleep(duration)
796
+ # The output key "passthrough_output" matches the source_handle
797
+ return {"passthrough_output": passthrough_input}
798
+
799
+ ## chat node
800
+ """{
801
+ "id": "ChatModel-1",
802
+ "type": "ChatModel",
803
+ "data": {
804
+ "display_name": "AI Assistant",
805
+ "template": {
806
+ "provider": {
807
+ "display_name": "Provider",
808
+ "type": "options",
809
+ "options": ["OpenAI", "Anthropic"],
810
+ "value": "OpenAI"
811
+ },
812
+ "model": {
813
+ "display_name": "Model Name",
814
+ "type": "string",
815
+ "value": "gpt-4o-mini"
816
+ },
817
+ "api_key": {
818
+ "display_name": "API Key",
819
+ "type": "SecretStr",
820
+ "required": true,
821
+ "env_var": "OPENAI_API_KEY"
822
+ },
823
+ "system_prompt": {
824
+ "display_name": "System Prompt (Optional)",
825
+ "type": "string",
826
+ "value": "You are a helpful assistant."
827
+ },
828
+ "prompt": {
829
+ "display_name": "Prompt",
830
+ "type": "string",
831
+ "is_handle": true
832
+ },
833
+ "response": {
834
+ "display_name": "Response",
835
+ "type": "string",
836
+ "is_handle": true
837
+ }
838
+ }
839
+ },
840
+ "resources": {
841
+ "cpu": 0.5,
842
+ "memory": "256Mi",
843
+ "gpu": "none"
844
+ }
845
+ }"""
846
+
847
+ import os
848
+ from typing import Any, Dict
849
+ from openai import OpenAI
850
+ from anthropic import Anthropic
851
+
852
+ def process_chat_model(provider: str, model: str, api_key: str, prompt: str, system_prompt: str = "") -> Dict[str, Any]:
853
+ """
854
+ Calls the specified chat model provider with a given prompt.
855
+ """
856
+ response_text = ""
857
+ if provider == "OpenAI":
858
+ client = OpenAI(api_key=api_key)
859
+ messages = []
860
+ if system_prompt:
861
+ messages.append({"role": "system", "content": system_prompt})
862
+ messages.append({"role": "user", "content": prompt})
863
+
864
+ completion = client.chat.completions.create(model=model, messages=messages)
865
+ response_text = completion.choices[0].message.content
866
+
867
+ elif provider == "Anthropic":
868
+ client = Anthropic(api_key=api_key)
869
+ message = client.messages.create(
870
+ model=model,
871
+ max_tokens=2048,
872
+ system=system_prompt,
873
+ messages=[{"role": "user", "content": prompt}]
874
+ )
875
+ response_text = message.content[0].text
876
+
877
+ return {"response": response_text}
878
+
879
+
880
+
881
+ def test_openai():
882
+ openai_key = os.getenv("OPENAI_API_KEY")
883
+ if not openai_key:
884
+ raise RuntimeError("Set the OPENAI_API_KEY environment variable.")
885
+ result = process_chat_model(
886
+ provider="OpenAI",
887
+ model="gpt-3.5-turbo",
888
+ api_key=openai_key,
889
+ system_prompt="You are a helpful assistant.",
890
+ prompt="What's the capital of France?"
891
+ )
892
+ print("OpenAI response:", result["response"])
893
+
894
+
895
+ def test_anthropic():
896
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY")
897
+ if not anthropic_key:
898
+ raise RuntimeError("Set the ANTHROPIC_API_KEY environment variable.")
899
+ result = process_chat_model(
900
+ provider="Anthropic",
901
+ model="claude-sonnet-4-20250514",
902
+ api_key=anthropic_key,
903
+ system_prompt="You are a concise assistant.",
904
+ prompt="List three benefits of renewable energy."
905
+ )
906
+ print("Anthropic response:", result["response"])
907
+
908
+
909
+ if __name__ == "__main__":
910
+ test_openai()
911
+ test_anthropic()
912
+
913
+ ## rag node 1 knowledge base
914
+ """{
915
+ "id": "KnowledgeBase-1",
916
+ "type": "KnowledgeBase",
917
+ "data": {
918
+ "display_name": "Create Product Docs KB",
919
+ "template": {
920
+ "kb_name": {
921
+ "display_name": "Knowledge Base Name",
922
+ "type": "string",
923
+ "value": "product-docs-v1"
924
+ },
925
+ "source_type": {
926
+ "display_name": "Source Type",
927
+ "type": "options",
928
+ "options": ["Directory", "URL"],
929
+ "value": "URL"
930
+ },
931
+ "path_or_url": {
932
+ "display_name": "Path or URL",
933
+ "type": "string",
934
+ "value": "https://docs.modal.com/get-started"
935
+ },
936
+ "knowledge_base": {
937
+ "display_name": "Knowledge Base Out",
938
+ "type": "object",
939
+ "is_handle": true
940
+ }
941
+ }
942
+ },
943
+ "resources": {
944
+ "cpu": 2.0,
945
+ "memory": "1Gi",
946
+ "gpu": "none"
947
+ }
948
+ }"""
949
+
950
+ import os
951
+ from typing import Any, Dict
952
+ from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings
953
+ from llama_index.readers.web import SimpleWebPageReader
954
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
955
+
956
+ def process_knowledge_base(kb_name: str, source_type: str, path_or_url: str) -> Dict[str, Any]:
957
+ """
958
+ Creates and persists a LlamaIndex VectorStoreIndex.
959
+ """
960
+ # Use a high-quality, local model for embeddings
961
+ Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
962
+
963
+ if source_type == "URL":
964
+ documents = SimpleWebPageReader(html_to_text=True).load_data([path_or_url])
965
+ else:
966
+ documents = SimpleDirectoryReader(input_dir=path_or_url).load_data()
967
+
968
+ index = VectorStoreIndex.from_documents(documents)
969
+
970
+ storage_path = os.path.join("./storage", kb_name)
971
+ index.storage_context.persist(persist_dir=storage_path)
972
+
973
+ # Return a reference object to the persisted index
974
+ return {"knowledge_base": {"name": kb_name, "path": storage_path}}
975
+
976
+ ## rag node 2 query
977
+ """{
978
+ "id": "RAGQuery-1",
979
+ "type": "RAGQuery",
980
+ "data": {
981
+ "display_name": "Retrieve & Augment Prompt",
982
+ "template": {
983
+ "query": {
984
+ "display_name": "Original Query",
985
+ "type": "string",
986
+ "is_handle": true
987
+ },
988
+ "knowledge_base": {
989
+ "display_name": "Knowledge Base",
990
+ "type": "object",
991
+ "is_handle": true
992
+ },
993
+ "rag_prompt": {
994
+ "display_name": "Augmented Prompt Out",
995
+ "type": "string",
996
+ "is_handle": true
997
+ }
998
+ }
999
+ },
1000
+ "resources": {
1001
+ "cpu": 1.0,
1002
+ "memory": "512Mi",
1003
+ "gpu": "none"
1004
+ }
1005
+ }"""
1006
+
1007
+ from typing import Any, Dict
1008
+ from llama_index.core import StorageContext, load_index_from_storage, Settings
1009
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
1010
+
1011
+ def process_rag_query(query: str, knowledge_base: Dict) -> Dict[str, Any]:
1012
+ """
1013
+ Retrieves context from a knowledge base and creates an augmented prompt.
1014
+ """
1015
+ Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
1016
+
1017
+ # Load the index from the path provided by the KnowledgeBase node
1018
+ storage_context = StorageContext.from_defaults(persist_dir=knowledge_base['path'])
1019
+ index = load_index_from_storage(storage_context)
1020
+
1021
+ retriever = index.as_retriever(similarity_top_k=3)
1022
+ retrieved_nodes = retriever.retrieve(query)
1023
+
1024
+ # Combine the retrieved text into a single context block
1025
+ context_str = "\n\n".join([node.get_content() for node in retrieved_nodes])
1026
+
1027
+ # Construct the final prompt for the ChatModel
1028
+ rag_prompt_template = (
1029
+ "Use the following context to answer the question. "
1030
+ "If the answer is not in the context, say you don't know.\n\n"
1031
+ "Context:\n{context}\n\n"
1032
+ "Question: {question}"
1033
+ )
1034
+
1035
+ final_prompt = rag_prompt_template.format(context=context_str, question=query)
1036
+
1037
+ return {"rag_prompt": final_prompt}
1038
+
1039
+ # --- Demo Execution ---
1040
+ if __name__ == "__main__":
1041
+ # 1. Build the KB from Modal docs
1042
+ kb_result = process_knowledge_base(
1043
+ kb_name="product-docs-v1",
1044
+ source_type="URL",
1045
+ path_or_url="https://modal.com/docs/guide"
1046
+ )
1047
+ print("Knowledge Base Created:", kb_result)
1048
+
1049
+ # 2. Run a RAG query
1050
+ user_query = "How do I get started with Modal?"
1051
+ rag_result = process_rag_query(user_query, kb_result["knowledge_base"])
1052
+ print("\nAugmented RAG Prompt:\n", rag_result["rag_prompt"])
1053
+
1054
+ ## speech to text
1055
+ """{
1056
+ "id": "HFSpeechToText-1",
1057
+ "type": "HFSpeechToText",
1058
+ "data": {
1059
+ "display_name": "Transcribe Audio (Whisper)",
1060
+ "template": {
1061
+ "model_id": {
1062
+ "display_name": "Model ID",
1063
+ "type": "string",
1064
+ "value": "openai/whisper-large-v3"
1065
+ },
1066
+ "audio_input": {
1067
+ "display_name": "Audio Input",
1068
+ "type": "object",
1069
+ "is_handle": true
1070
+ },
1071
+ "transcribed_text": {
1072
+ "display_name": "Transcribed Text",
1073
+ "type": "string",
1074
+ "is_handle": true
1075
+ }
1076
+ }
1077
+ },
1078
+ "resources": {
1079
+ "cpu": 1.0,
1080
+ "memory": "4Gi",
1081
+ "gpu": "T4"
1082
+ }
1083
+ }"""
1084
+
1085
+ import torch
1086
+ from transformers import pipeline
1087
+ from typing import Any, Dict
1088
+
1089
+ # --- In a real Modal app, this would be structured like this: ---
1090
+ #
1091
+ # import modal
1092
+ # image = modal.Image.debian_slim().pip_install("transformers", "torch", "librosa")
1093
+ # stub = modal.Stub("speech-to-text-model")
1094
+ #
1095
+ # @stub.cls(gpu="T4", image=image)
1096
+ # class WhisperModel:
1097
+ # def __init__(self):
1098
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
1099
+ # self.pipe = pipeline(
1100
+ # "automatic-speech-recognition",
1101
+ # model="openai/whisper-large-v3",
1102
+ # torch_dtype=torch.float16,
1103
+ # device=device,
1104
+ # )
1105
+ #
1106
+ # @modal.method()
1107
+ # def run_inference(self, audio_path):
1108
+ # # The function logic from below would be here.
1109
+ # ...
1110
+ # -------------------------------------------------------------------
1111
+
1112
+
1113
+ def process_hf_speech_to_text(model_id: str, audio_input: Dict[str, Any]) -> Dict[str, Any]:
1114
+ """
1115
+ Transcribes an audio file using a Hugging Face ASR pipeline.
1116
+
1117
+ NOTE: This function simulates the inference part of a stateful Modal class.
1118
+ The model pipeline should be loaded only once.
1119
+ """
1120
+ if audio_input.get("type") != "audio":
1121
+ raise ValueError("Input must be of type 'audio'.")
1122
+
1123
+ audio_path = audio_input["value"]
1124
+
1125
+ # --- This part would be inside the Modal class method ---
1126
+
1127
+ # In a real implementation, 'pipe' would be a class attribute (self.pipe)
1128
+ # loaded in the __init__ or @enter method.
1129
+ device = "cuda" if torch.cuda.is_available() else "cpu"
1130
+ pipe = pipeline(
1131
+ "automatic-speech-recognition",
1132
+ model=model_id,
1133
+ torch_dtype=torch.float16,
1134
+ device=device,
1135
+ )
1136
+
1137
+ outputs = pipe(
1138
+ audio_path,
1139
+ chunk_length_s=30,
1140
+ batch_size=24,
1141
+ return_timestamps=True,
1142
+ )
1143
+
1144
+ return {"transcribed_text": outputs["text"]}
1145
+
1146
+ ## text to speech
1147
+ """{
1148
+ "id": "HFTextToSpeech-1",
1149
+ "type": "HFTextToSpeech",
1150
+ "data": {
1151
+ "display_name": "Generate Speech",
1152
+ "template": {
1153
+ "model_id": {
1154
+ "display_name": "Model ID",
1155
+ "type": "string",
1156
+ "value": "microsoft/speecht5_tts"
1157
+ },
1158
+ "text_input": {
1159
+ "display_name": "Text Input",
1160
+ "type": "string",
1161
+ "is_handle": true
1162
+ },
1163
+ "audio_output": {
1164
+ "display_name": "Audio Output",
1165
+ "type": "object",
1166
+ "is_handle": true
1167
+ }
1168
+ }
1169
+ },
1170
+ "resources": {
1171
+ "cpu": 1.0,
1172
+ "memory": "4Gi",
1173
+ "gpu": "T4"
1174
+ }
1175
+ }"""
1176
+
1177
+ import torch
1178
+ from transformers import pipeline
1179
+ import soundfile as sf
1180
+ from typing import Any, Dict
1181
+
1182
+ def process_hf_text_to_speech(model_id: str, text_input: str) -> Dict[str, Any]:
1183
+ """
1184
+ Synthesizes speech from text using a Hugging Face TTS pipeline.
1185
+
1186
+ NOTE: Simulates the inference part of a stateful Modal class.
1187
+ """
1188
+ # --- This part would be inside the Modal class method ---
1189
+
1190
+ # The pipeline and embeddings would be loaded once in the class.
1191
+ pipe = pipeline("text-to-speech", model=model_id, device="cuda")
1192
+
1193
+ # SpeechT5 requires speaker embeddings for voice characteristics
1194
+ from transformers import SpeechT5HifiGan
1195
+ vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to("cuda")
1196
+
1197
+ # A dummy embedding for a generic voice
1198
+ import numpy as np
1199
+ speaker_embedding = np.random.rand(1, 512).astype(np.float32)
1200
+
1201
+ speech = pipe(text_input, forward_params={"speaker_embeddings": speaker_embedding})
1202
+
1203
+ # Save the output to a file and return the path
1204
+ output_path = "/tmp/output.wav"
1205
+ sf.write(output_path, speech["audio"], samplerate=speech["sampling_rate"])
1206
+
1207
+ return {"audio_output": {"type": "audio", "value": output_path}}
1208
+
1209
+ ## text generation
1210
+ """{
1211
+ "id": "HFTextGeneration-1",
1212
+ "type": "HFTextGeneration",
1213
+ "data": {
1214
+ "display_name": "Generate with Mistral",
1215
+ "template": {
1216
+ "model_id": {
1217
+ "display_name": "Model ID",
1218
+ "type": "string",
1219
+ "value": "mistralai/Mistral-7B-Instruct-v0.2"
1220
+ },
1221
+ "max_new_tokens": {
1222
+ "display_name": "Max New Tokens",
1223
+ "type": "number",
1224
+ "value": 256
1225
+ },
1226
+ "prompt": {
1227
+ "display_name": "Prompt",
1228
+ "type": "string",
1229
+ "is_handle": true
1230
+ },
1231
+ "generated_text": {
1232
+ "display_name": "Generated Text",
1233
+ "type": "string",
1234
+ "is_handle": true
1235
+ }
1236
+ }
1237
+ },
1238
+ "resources": {
1239
+ "cpu": 2.0,
1240
+ "memory": "24Gi",
1241
+ "gpu": "A10G"
1242
+ }
1243
+ }"""
1244
+
1245
+ import torch
1246
+ from transformers import pipeline
1247
+ from typing import Any, Dict
1248
+
1249
+ def process_hf_text_generation(model_id: str, prompt: str, max_new_tokens: int) -> Dict[str, Any]:
1250
+ """
1251
+ Generates text from a prompt using a Hugging Face LLM.
1252
+
1253
+ NOTE: Simulates the inference part of a stateful Modal class.
1254
+ """
1255
+ # --- This part would be inside the Modal class method ---
1256
+
1257
+ # The pipeline is loaded once on container start.
1258
+ pipe = pipeline(
1259
+ "text-generation",
1260
+ model=model_id,
1261
+ torch_dtype=torch.bfloat16,
1262
+ device_map="auto",
1263
+ )
1264
+
1265
+ messages = [{"role": "user", "content": prompt}]
1266
+
1267
+ # The pipeline needs the prompt to be formatted correctly for instruct models
1268
+ formatted_prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
1269
+
1270
+ outputs = pipe(
1271
+ formatted_prompt,
1272
+ max_new_tokens=max_new_tokens,
1273
+ do_sample=True,
1274
+ temperature=0.7,
1275
+ top_k=50,
1276
+ top_p=0.95,
1277
+ )
1278
+
1279
+ # Extract only the generated part of the text
1280
+ generated_text = outputs[0]["generated_text"]
1281
+ # The output includes the prompt, so we remove it.
1282
+ response_text = generated_text[len(formatted_prompt):]
1283
+
1284
+ return {"generated_text": response_text}
1285
+
1286
+ ## image generation
1287
+ """{
1288
+ "id": "HFImageGeneration-1",
1289
+ "type": "HFImageGeneration",
1290
+ "data": {
1291
+ "display_name": "Generate Image (SDXL)",
1292
+ "template": {
1293
+ "model_id": {
1294
+ "display_name": "Base Model ID",
1295
+ "type": "string",
1296
+ "value": "stabilityai/stable-diffusion-xl-base-1.0"
1297
+ },
1298
+ "lora_id": {
1299
+ "display_name": "LoRA Model ID (Optional)",
1300
+ "type": "string",
1301
+ "value": "nerijs/pixel-art-xl"
1302
+ },
1303
+ "prompt": {
1304
+ "display_name": "Prompt",
1305
+ "type": "string",
1306
+ "is_handle": true
1307
+ },
1308
+ "image_output": {
1309
+ "display_name": "Image Output",
1310
+ "type": "object",
1311
+ "is_handle": true
1312
+ }
1313
+ }
1314
+ },
1315
+ "resources": {
1316
+ "cpu": 2.0,
1317
+ "memory": "24Gi",
1318
+ "gpu": "A10G"
1319
+ }
1320
+ }"""
1321
+
1322
+ import torch
1323
+ from diffusers import StableDiffusionXLPipeline
1324
+ from typing import Any, Dict
1325
+
1326
+ def process_hf_image_generation(model_id: str, prompt: str, lora_id: str = None) -> Dict[str, Any]:
1327
+ """
1328
+ Generates an image using a Stable Diffusion pipeline, with optional LoRA.
1329
+
1330
+ NOTE: Simulates the inference part of a stateful Modal class.
1331
+ """
1332
+ # --- This part would be inside the Modal class method ---
1333
+
1334
+ # The base pipeline is loaded once.
1335
+ pipe = StableDiffusionXLPipeline.from_pretrained(
1336
+ model_id,
1337
+ torch_dtype=torch.float16,
1338
+ variant="fp16",
1339
+ use_safetensors=True
1340
+ ).to("cuda")
1341
+
1342
+ # If a LoRA is specified, load and fuse it.
1343
+ # In a real app, this logic would be more complex to handle multiple LoRAs.
1344
+ if lora_id:
1345
+ pipe.load_lora_weights(lora_id)
1346
+ pipe.fuse_lora()
1347
+
1348
+ # Generate the image
1349
+ image = pipe(prompt=prompt).images[0]
1350
+
1351
+ output_path = "/tmp/generated_image.png"
1352
+ image.save(output_path)
1353
+
1354
+ return {"image_output": {"type": "image", "value": output_path}}
1355
+
1356
+ ## captioning image to text
1357
+ """{
1358
+ "id": "HFVisionModel-1",
1359
+ "type": "HFVisionModel",
1360
+ "data": {
1361
+ "display_name": "Describe Image",
1362
+ "template": {
1363
+ "task": {
1364
+ "display_name": "Task",
1365
+ "type": "options",
1366
+ "options": ["image-to-text"],
1367
+ "value": "image-to-text"
1368
+ },
1369
+ "model_id": {
1370
+ "display_name": "Model ID",
1371
+ "type": "string",
1372
+ "value": "Salesforce/blip-image-captioning-large"
1373
+ },
1374
+ "image_input": {
1375
+ "display_name": "Image Input",
1376
+ "type": "object",
1377
+ "is_handle": true
1378
+ },
1379
+ "result": {
1380
+ "display_name": "Result",
1381
+ "type": "string",
1382
+ "is_handle": true
1383
+ }
1384
+ }
1385
+ },
1386
+ "resources": {
1387
+ "cpu": 1.0,
1388
+ "memory": "8Gi",
1389
+ "gpu": "T4"
1390
+ }
1391
+ }"""
1392
+
1393
+ from transformers import pipeline
1394
+ from PIL import Image
1395
+ from typing import Any, Dict
1396
+
1397
+ def process_hf_vision_model(task: str, model_id: str, image_input: Dict[str, Any]) -> Dict[str, Any]:
1398
+ """
1399
+ Performs a vision-based task, like image captioning.
1400
+
1401
+ NOTE: Simulates the inference part of a stateful Modal class.
1402
+ """
1403
+ if image_input.get("type") != "image":
1404
+ raise ValueError("Input must be of type 'image'.")
1405
+
1406
+ image_path = image_input["value"]
1407
+
1408
+ # --- This part would be inside the Modal class method ---
1409
+
1410
+ # The pipeline is loaded once.
1411
+ pipe = pipeline(task, model=model_id, device="cuda")
1412
+
1413
+ # Open the image file
1414
+ image = Image.open(image_path)
1415
+
1416
+ result = pipe(image)
1417
+
1418
+ # The output format for this pipeline is a list of dicts
1419
+ # e.g., [{'generated_text': 'a cat sitting on a couch'}]
1420
+ output_text = result[0]['generated_text']
1421
+
1422
+ return {"result": output_text}
1423
+
1424
+ import os
1425
+ from openai import OpenAI
1426
+
1427
+ client = OpenAI(
1428
+ base_url="https://api.studio.nebius.com/v1/",
1429
+ api_key=os.environ.get("NEBIUS_API_KEY")
1430
+ )
1431
+
1432
+ response = client.images.generate(
1433
+ model="black-forest-labs/flux-dev",
1434
+ response_format="b64_json",
1435
+ extra_body={
1436
+ "response_extension": "png",
1437
+ "width": 1024,
1438
+ "height": 1024,
1439
+ "num_inference_steps": 28,
1440
+ "negative_prompt": "",
1441
+ "seed": -1
1442
+ },
1443
+ prompt="pokemon"
1444
+ )
1445
+
1446
+ print(response.to_json())
1447
+
1448
+
1449
+ ## nebius image generation
1450
+ """{
1451
+ "id": "NebiusImage-1",
1452
+ "type": "NebiusImage",
1453
+ "data": {
1454
+ "display_name": "Nebius Image Generation",
1455
+ "template": {
1456
+ "model": {
1457
+ "display_name": "Model",
1458
+ "type": "options",
1459
+ "options": [
1460
+ "black-forest-labs/flux-dev",
1461
+ "black-forest-labs/flux-schnell",
1462
+ "stability-ai/sdxl"
1463
+ ],
1464
+ "value": "black-forest-labs/flux-dev"
1465
+ },
1466
+ "api_key": {
1467
+ "display_name": "Nebius API Key",
1468
+ "type": "SecretStr",
1469
+ "required": true,
1470
+ "env_var": "NEBIUS_API_KEY"
1471
+ },
1472
+ "prompt": {
1473
+ "display_name": "Prompt",
1474
+ "type": "string",
1475
+ "is_handle": true
1476
+ },
1477
+ "negative_prompt": {
1478
+ "display_name": "Negative Prompt (Optional)",
1479
+ "type": "string",
1480
+ "value": ""
1481
+ },
1482
+ "width": {
1483
+ "display_name": "Width",
1484
+ "type": "number",
1485
+ "value": 1024
1486
+ },
1487
+ "height": {
1488
+ "display_name": "Height",
1489
+ "type": "number",
1490
+ "value": 1024
1491
+ },
1492
+ "num_inference_steps": {
1493
+ "display_name": "Inference Steps",
1494
+ "type": "number",
1495
+ "value": 28
1496
+ },
1497
+ "seed": {
1498
+ "display_name": "Seed",
1499
+ "type": "number",
1500
+ "value": -1
1501
+ },
1502
+ "image_output": {
1503
+ "display_name": "Image Output",
1504
+ "type": "object",
1505
+ "is_handle": true
1506
+ }
1507
+ }
1508
+ },
1509
+ "resources": {
1510
+ "cpu": 0.2,
1511
+ "memory": "256Mi",
1512
+ "gpu": "none"
1513
+ }
1514
+ }"""
1515
+
1516
+ import os
1517
+ import base64
1518
+ from typing import Any, Dict
1519
+ from openai import OpenAI
1520
+
1521
+ def process_nebius_image(
1522
+ model: str,
1523
+ api_key: str,
1524
+ prompt: str,
1525
+ negative_prompt: str = "",
1526
+ width: int = 1024,
1527
+ height: int = 1024,
1528
+ num_inference_steps: int = 28,
1529
+ seed: int = -1
1530
+ ) -> Dict[str, Any]:
1531
+ """
1532
+ Generates an image using the Nebius AI Studio API.
1533
+ """
1534
+ if not api_key:
1535
+ raise ValueError("Nebius API key is missing.")
1536
+
1537
+ client = OpenAI(
1538
+ base_url="https://api.studio.nebius.com/v1/",
1539
+ api_key=api_key
1540
+ )
1541
+
1542
+ try:
1543
+ response = client.images.generate(
1544
+ model=model,
1545
+ response_format="b64_json",
1546
+ prompt=prompt,
1547
+ extra_body={
1548
+ "response_extension": "png",
1549
+ "width": width,
1550
+ "height": height,
1551
+ "num_inference_steps": num_inference_steps,
1552
+ "negative_prompt": negative_prompt,
1553
+ "seed": seed
1554
+ }
1555
+ )
1556
+
1557
+ # Extract the base64 encoded string
1558
+ b64_data = response.data[0].b64_json
1559
+
1560
+ # Decode the string and save the image to a file
1561
+ image_bytes = base64.b64decode(b64_data)
1562
+ output_path = "/tmp/nebius_image.png"
1563
+ with open(output_path, "wb") as f:
1564
+ f.write(image_bytes)
1565
+
1566
+ # Return a data package with the path to the generated image
1567
+ return {"image_output": {"type": "image", "value": output_path}}
1568
+
1569
+ except Exception as e:
1570
+ print(f"Error calling Nebius API: {e}")
1571
+ return {"image_output": {"error": str(e)}}
1572
+
1573
+ ## mcp new
1574
+ """{
1575
+ "id": "MCPConnection-1",
1576
+ "type": "MCPConnection",
1577
+ "data": {
1578
+ "display_name": "MCP Server Connection",
1579
+ "template": {
1580
+ "server_url": {
1581
+ "display_name": "MCP Server URL",
1582
+ "type": "string",
1583
+ "value": "http://localhost:8000/sse",
1584
+ "info": "URL to MCP server (HTTP/SSE or stdio command)"
1585
+ },
1586
+ "connection_type": {
1587
+ "display_name": "Connection Type",
1588
+ "type": "dropdown",
1589
+ "options": ["http", "stdio"],
1590
+ "value": "http"
1591
+ },
1592
+ "allowed_tools": {
1593
+ "display_name": "Allowed Tools (Optional)",
1594
+ "type": "list",
1595
+ "info": "Filter specific tools. Leave empty for all tools"
1596
+ },
1597
+ "api_key": {
1598
+ "display_name": "API Key (Optional)",
1599
+ "type": "SecretStr",
1600
+ "env_var": "MCP_API_KEY"
1601
+ },
1602
+ "mcp_tools_output": {
1603
+ "display_name": "MCP Tools Output",
1604
+ "type": "list",
1605
+ "is_handle": true
1606
+ }
1607
+ }
1608
+ },
1609
+ "resources": {
1610
+ "cpu": 0.1,
1611
+ "memory": "128Mi",
1612
+ "gpu": "none"
1613
+ }
1614
+ }
1615
+ """
1616
+
1617
+ """{
1618
+ "id": "MCPAgent-1",
1619
+ "type": "MCPAgent",
1620
+ "data": {
1621
+ "display_name": "MCP-Powered AI Agent",
1622
+ "template": {
1623
+ "mcp_tools_input": {
1624
+ "display_name": "MCP Tools Input",
1625
+ "type": "list",
1626
+ "is_handle": true
1627
+ },
1628
+ "llm_model": {
1629
+ "display_name": "LLM Model",
1630
+ "type": "dropdown",
1631
+ "options": ["gpt-4", "gpt-3.5-turbo", "gpt-4o", "gpt-4o-mini"],
1632
+ "value": "gpt-4o-mini"
1633
+ },
1634
+ "system_prompt": {
1635
+ "display_name": "System Prompt",
1636
+ "type": "text",
1637
+ "value": "You are a helpful AI assistant with access to various tools. Use the available tools to help answer user questions accurately.",
1638
+ "multiline": true
1639
+ },
1640
+ "user_query": {
1641
+ "display_name": "User Query",
1642
+ "type": "string",
1643
+ "is_handle": true
1644
+ },
1645
+ "max_iterations": {
1646
+ "display_name": "Max Iterations",
1647
+ "type": "int",
1648
+ "value": 10
1649
+ },
1650
+ "agent_response": {
1651
+ "display_name": "Agent Response",
1652
+ "type": "string",
1653
+ "is_handle": true
1654
+ }
1655
+ }
1656
+ },
1657
+ "resources": {
1658
+ "cpu": 0.5,
1659
+ "memory": "512Mi",
1660
+ "gpu": "none"
1661
+ }
1662
+ }
1663
+ """
1664
+
1665
+ import asyncio
1666
+ import os
1667
+ from typing import List, Optional, Dict, Any
1668
+ from llama_index.tools.mcp import BasicMCPClient, McpToolSpec, get_tools_from_mcp_url, aget_tools_from_mcp_url
1669
+ from llama_index.core.tools import FunctionTool
1670
+
1671
+ class MCPConnectionNode:
1672
+ """Node to connect to MCP servers and retrieve tools"""
1673
+
1674
+ def __init__(self):
1675
+ self.client = None
1676
+ self.tools = []
1677
+
1678
+ async def execute(self,
1679
+ server_url: str,
1680
+ connection_type: str = "http",
1681
+ allowed_tools: Optional[List[str]] = None,
1682
+ api_key: Optional[str] = None) -> Dict[str, Any]:
1683
+ """
1684
+ Connect to MCP server and retrieve available tools
1685
+ """
1686
+ try:
1687
+ # Set API key if provided
1688
+ if api_key:
1689
+ os.environ["MCP_API_KEY"] = api_key
1690
+
1691
+ print(f"🔌 Connecting to MCP server: {server_url}")
1692
+
1693
+ if connection_type == "http":
1694
+ # Use LlamaIndex's built-in function to get tools[2]
1695
+ tools = await aget_tools_from_mcp_url(
1696
+ server_url,
1697
+ allowed_tools=allowed_tools
1698
+ )
1699
+ else:
1700
+ # For stdio connections
1701
+ self.client = BasicMCPClient(server_url)
1702
+ mcp_tool_spec = McpToolSpec(
1703
+ client=self.client,
1704
+ allowed_tools=allowed_tools
1705
+ )
1706
+ tools = await mcp_tool_spec.to_tool_list_async()
1707
+
1708
+ self.tools = tools
1709
+
1710
+ print(f"✅ Successfully connected! Retrieved {len(tools)} tools:")
1711
+ for tool in tools:
1712
+ print(f" - {tool.metadata.name}: {tool.metadata.description}")
1713
+
1714
+ return {
1715
+ "success": True,
1716
+ "tools_count": len(tools),
1717
+ "tool_names": [tool.metadata.name for tool in tools],
1718
+ "mcp_tools_output": tools
1719
+ }
1720
+
1721
+ except Exception as e:
1722
+ print(f"❌ Connection failed: {str(e)}")
1723
+ return {
1724
+ "success": False,
1725
+ "error": str(e),
1726
+ "mcp_tools_output": []
1727
+ }
1728
+
1729
+ # Example usage
1730
+ async def mcp_connection_demo():
1731
+ node = MCPConnectionNode()
1732
+
1733
+ # Using a public MCP server (you'll need to replace with actual public servers)
1734
+ result = await node.execute(
1735
+ server_url="http://localhost:8000/sse", # Replace with public MCP server
1736
+ connection_type="http",
1737
+ allowed_tools=None # Get all tools
1738
+ )
1739
+
1740
+ return result
1741
+ from llama_index.core.agent import FunctionCallingAgentWorker, AgentRunner
1742
+ from llama_index.llms.openai import OpenAI
1743
+ from llama_index.core.tools import FunctionTool
1744
+ from typing import List, Dict, Any
1745
+ import os
1746
+
1747
+ class MCPAgentNode:
1748
+ """Node to create and run MCP-powered AI agents"""
1749
+
1750
+ def __init__(self):
1751
+ self.agent = None
1752
+ self.tools = []
1753
+
1754
+ async def execute(self,
1755
+ mcp_tools_input: List[FunctionTool],
1756
+ user_query: str,
1757
+ llm_model: str = "gpt-4o-mini",
1758
+ system_prompt: str = "You are a helpful AI assistant.",
1759
+ max_iterations: int = 10) -> Dict[str, Any]:
1760
+ """
1761
+ Create and run MCP-powered agent using FunctionCallingAgent
1762
+ """
1763
+ try:
1764
+ if not mcp_tools_input:
1765
+ return {
1766
+ "success": False,
1767
+ "error": "No MCP tools provided",
1768
+ "agent_response": "No tools available to process the query."
1769
+ }
1770
+
1771
+ print(f"🤖 Creating agent with {len(mcp_tools_input)} tools...")
1772
+
1773
+ # Initialize LLM[1]
1774
+ llm = OpenAI(
1775
+ model=llm_model,
1776
+ api_key=os.getenv("OPENAI_API_KEY"),
1777
+ temperature=0.1
1778
+ )
1779
+
1780
+ # Create function calling agent (more reliable than ReAct)[2]
1781
+ agent_worker = FunctionCallingAgentWorker.from_tools(
1782
+ tools=mcp_tools_input,
1783
+ llm=llm,
1784
+ verbose=True,
1785
+ system_prompt=system_prompt
1786
+ )
1787
+
1788
+ self.agent = AgentRunner(agent_worker)
1789
+
1790
+ print(f"💭 Processing query: {user_query}")
1791
+
1792
+ # Execute the query
1793
+ response = self.agent.chat(user_query)
1794
+
1795
+ return {
1796
+ "success": True,
1797
+ "agent_response": str(response.response),
1798
+ "user_query": user_query,
1799
+ "tools_used": len(mcp_tools_input)
1800
+ }
1801
+
1802
+ except Exception as e:
1803
+ print(f"❌ Agent execution failed: {str(e)}")
1804
+ return {
1805
+ "success": False,
1806
+ "error": str(e),
1807
+ "agent_response": f"Sorry, I encountered an error while processing your query: {str(e)}"
1808
+ }
1809
+
1810
+ # Example usage
1811
+ async def mcp_agent_demo(tools: List[FunctionTool]):
1812
+ node = MCPAgentNode()
1813
+
1814
+ result = await node.execute(
1815
+ mcp_tools_input=tools,
1816
+ user_query="What tools do you have available and what can you help me with?",
1817
+ llm_model="gpt-4o-mini",
1818
+ system_prompt="You are a helpful AI assistant. Use your available tools to provide accurate and useful responses."
1819
+ )
1820
+
1821
+ return result
1822
+
1823
+
1824
+ example
1825
+
1826
+ import asyncio
1827
+ import os
1828
+ from typing import List, Dict, Any
1829
+ from llama_index.core.tools import FunctionTool
1830
+ from llama_index.core.agent import FunctionCallingAgentWorker, AgentRunner
1831
+ from llama_index.llms.openai import OpenAI
1832
+
1833
+ class CompleteMCPWorkflowDemo:
1834
+ """Complete demo of MCP workflow with connection and agent nodes"""
1835
+
1836
+ def __init__(self):
1837
+ self.connection_node = MCPConnectionNode()
1838
+ self.agent_node = MCPAgentNode()
1839
+
1840
+ # Set your OpenAI API key
1841
+ # os.environ["OPENAI_API_KEY"] = "your-openai-api-key-here"
1842
+
1843
+ async def create_mock_mcp_tools(self) -> List[FunctionTool]:
1844
+ """
1845
+ Create mock MCP tools that simulate a real MCP server
1846
+ Replace this with actual MCP server connection when available
1847
+ """
1848
+ def get_weather(city: str, country: str = "US") -> str:
1849
+ """Get current weather information for a city"""
1850
+ weather_data = {
1851
+ "london": "Cloudy, 15°C, humidity 80%",
1852
+ "paris": "Sunny, 22°C, humidity 45%",
1853
+ "tokyo": "Rainy, 18°C, humidity 90%",
1854
+ "new york": "Partly cloudy, 20°C, humidity 55%"
1855
+ }
1856
+ result = weather_data.get(city.lower(), f"Weather data not available for {city}")
1857
+ return f"Weather in {city}, {country}: {result}"
1858
+
1859
+ def search_news(topic: str, limit: int = 5) -> str:
1860
+ """Search for latest news on a given topic"""
1861
+ news_items = [
1862
+ f"Breaking: New developments in {topic}",
1863
+ f"Analysis: {topic} trends for 2025",
1864
+ f"Expert opinion on {topic} industry changes",
1865
+ f"Research shows {topic} impact on society",
1866
+ f"Global {topic} market outlook"
1867
+ ]
1868
+ return f"Top {limit} news articles about {topic}:\n" + "\n".join(news_items[:limit])
1869
+
1870
+ def calculate_math(expression: str) -> str:
1871
+ """Calculate mathematical expressions safely"""
1872
+ try:
1873
+ # Simple and safe evaluation
1874
+ allowed_chars = "0123456789+-*/().,_ "
1875
+ if all(c in allowed_chars for c in expression):
1876
+ result = eval(expression)
1877
+ return f"Result: {expression} = {result}"
1878
+ else:
1879
+ return f"Invalid expression: {expression}"
1880
+ except Exception as e:
1881
+ return f"Error calculating {expression}: {str(e)}"
1882
+
1883
+ def get_company_info(company: str) -> str:
1884
+ """Get basic company information"""
1885
+ companies = {
1886
+ "openai": "OpenAI - AI research company, creator of GPT models",
1887
+ "microsoft": "Microsoft - Technology corporation, cloud computing and software",
1888
+ "google": "Google - Search engine and technology company",
1889
+ "amazon": "Amazon - E-commerce and cloud computing platform"
1890
+ }
1891
+ return companies.get(company.lower(), f"Company information not found for {company}")
1892
+
1893
+ # Convert to FunctionTool objects[2]
1894
+ tools = [
1895
+ FunctionTool.from_defaults(fn=get_weather),
1896
+ FunctionTool.from_defaults(fn=search_news),
1897
+ FunctionTool.from_defaults(fn=calculate_math),
1898
+ FunctionTool.from_defaults(fn=get_company_info)
1899
+ ]
1900
+
1901
+ return tools
1902
+
1903
+ async def run_complete_workflow(self):
1904
+ """
1905
+ Run the complete MCP workflow demonstration
1906
+ """
1907
+ print("🚀 Starting Complete MCP Workflow Demo")
1908
+ print("=" * 60)
1909
+
1910
+ # Step 1: Setup MCP Connection (simulated)
1911
+ print("\n📡 Step 1: Setting up MCP Connection...")
1912
+
1913
+ # In real implementation, this would connect to actual MCP server
1914
+ mock_tools = await self.create_mock_mcp_tools()
1915
+
1916
+ connection_result = {
1917
+ "success": True,
1918
+ "tools_count": len(mock_tools),
1919
+ "tool_names": [tool.metadata.name for tool in mock_tools],
1920
+ "mcp_tools_output": mock_tools
1921
+ }
1922
+
1923
+ if connection_result["success"]:
1924
+ print(f"✅ MCP Connection successful!")
1925
+ print(f"📋 Retrieved {connection_result['tools_count']} tools:")
1926
+ for tool_name in connection_result['tool_names']:
1927
+ print(f" - {tool_name}")
1928
+ else:
1929
+ print(f"❌ MCP Connection failed: {connection_result.get('error')}")
1930
+ return
1931
+
1932
+ # Step 2: Create and test MCP Agent
1933
+ print(f"\n🤖 Step 2: Creating MCP-Powered Agent...")
1934
+
1935
+ test_queries = [
1936
+ "What's the weather like in London?",
1937
+ "Search for news about artificial intelligence",
1938
+ "Calculate 15 * 8 + 32",
1939
+ "Tell me about OpenAI company",
1940
+ "What tools do you have and what can you help me with?"
1941
+ ]
1942
+
1943
+ for i, query in enumerate(test_queries, 1):
1944
+ print(f"\n💬 Query {i}: {query}")
1945
+ print("-" * 40)
1946
+
1947
+ agent_result = await self.agent_node.execute(
1948
+ mcp_tools_input=connection_result["mcp_tools_output"],
1949
+ user_query=query,
1950
+ llm_model="gpt-4o-mini",
1951
+ system_prompt="""You are a helpful AI assistant with access to weather, news, calculation, and company information tools.
1952
+
1953
+ When a user asks a question:
1954
+ 1. Determine which tool(s) can help answer their question
1955
+ 2. Use the appropriate tool(s) to gather information
1956
+ 3. Provide a clear, helpful response based on the tool results
1957
+
1958
+ Always be informative and explain what tools you used.""",
1959
+ max_iterations=5
1960
+ )
1961
+
1962
+ if agent_result["success"]:
1963
+ print(f"🎯 Agent Response:")
1964
+ print(f"{agent_result['agent_response']}")
1965
+ else:
1966
+ print(f"❌ Agent Error: {agent_result['error']}")
1967
+
1968
+ print("\n" + "="*50)
1969
+
1970
+ # Function to connect to real MCP servers when available
1971
+ async def connect_to_real_mcp_server(server_url: str):
1972
+ """
1973
+ Example of connecting to a real MCP server
1974
+ Replace server_url with actual public MCP servers
1975
+ """
1976
+ try:
1977
+ from llama_index.tools.mcp import aget_tools_from_mcp_url
1978
+
1979
+ print(f"🔌 Attempting to connect to: {server_url}")
1980
+ tools = await aget_tools_from_mcp_url(server_url)
1981
+
1982
+ print(f"✅ Connected successfully! Found {len(tools)} tools:")
1983
+ for tool in tools:
1984
+ print(f" - {tool.metadata.name}: {tool.metadata.description}")
1985
+
1986
+ return tools
1987
+
1988
+ except Exception as e:
1989
+ print(f"❌ Failed to connect to {server_url}: {e}")
1990
+ return []
1991
+
1992
+ # Main execution
1993
+ async def main():
1994
+ """Run the complete demo"""
1995
+
1996
+ # Option 1: Run with mock tools (works immediately)
1997
+ print("🎮 Running MCP Workflow Demo with Mock Tools")
1998
+ demo = CompleteMCPWorkflowDemo()
1999
+ await demo.run_complete_workflow()
2000
+
2001
+ # Option 2: Try connecting to real MCP servers (uncomment when available)
2002
+ # real_servers = [
2003
+ # "http://your-mcp-server.com:8000/sse",
2004
+ # "https://api.example.com/mcp"
2005
+ # ]
2006
+ #
2007
+ # for server_url in real_servers:
2008
+ # tools = await connect_to_real_mcp_server(server_url)
2009
+ # if tools:
2010
+ # # Use real tools with agent
2011
+ # agent_node = MCPAgentNode()
2012
+ # result = await agent_node.execute(
2013
+ # mcp_tools_input=tools,
2014
+ # user_query="What can you help me with?",
2015
+ # llm_model="gpt-4o-mini"
2016
+ # )
2017
+ # print(f"Real MCP Agent Response: {result}")
2018
+
2019
+ if __name__ == "__main__":
2020
+ asyncio.run(main())