joaomorossini commited on
Commit
f2389eb
·
1 Parent(s): ecde140

Remove ClickUpAgent implementation and instructions; add NotionProjectAgent with instructions and tools

Browse files
agency_ai_demo/agents/ClickUpAgent/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .ClickUpAgent import ClickUpAgent
 
 
agency_ai_demo/agents/ClickUpAgent/instructions.md DELETED
@@ -1,2 +0,0 @@
1
- # ClickUpAgent Instructions
2
-
 
 
 
agency_ai_demo/agents/ClickUpAgent/tools/ClickUpTools.py DELETED
@@ -1,1191 +0,0 @@
1
- import os
2
- import sys
3
- import requests
4
- from dotenv import load_dotenv
5
- from langchain.tools import StructuredTool, BaseTool, tool
6
- from langchain_core.pydantic_v1 import BaseModel, Field
7
- from langchain_core.tools import ToolException
8
- from pydantic import ValidationError
9
- from typing import Any, Type, List, Dict
10
- import datetime
11
- import json
12
- import re
13
- from composio.tools.local.clickup.actions.base import OpenAPIAction
14
- from composio.tools.local.clickup.actions.create_task import (
15
- CreateTask,
16
- CreateTaskResponse,
17
- )
18
- from composio.tools.local.clickup.actions.delete_task import (
19
- DeleteTask,
20
- DeleteTaskRequest,
21
- DeleteTaskResponse,
22
- )
23
- from composio.tools.local.clickup.actions.update_task import (
24
- UpdateTask,
25
- UpdateTaskRequest,
26
- UpdateTaskResponse,
27
- )
28
- from composio.tools.local.clickup.actions.add_dependency import (
29
- AddDependency,
30
- AddDependencyRequest,
31
- AddDependencyResponse,
32
- )
33
- from composio.tools.local.clickup.actions.get_list import (
34
- GetList,
35
- GetListRequest,
36
- GetListResponse,
37
- )
38
- from composio.tools.local.clickup.actions.get_tasks import (
39
- GetTasks,
40
- GetTasksRequest,
41
- GetTasksResponse,
42
- )
43
- from composio.tools.local.clickup.actions.get_task import (
44
- GetTask,
45
- GetTaskRequest,
46
- GetTaskResponse,
47
- )
48
-
49
- # Add the parent directory to sys.path to enable imports
50
- sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
51
-
52
- from utils.tool_wrapper import convert_langchain_tools
53
- from utils.ensure_serializable import ensure_serializable
54
-
55
- load_dotenv()
56
- CLICKUP_TOKEN = os.getenv("CLICKUP_TOKEN")
57
-
58
-
59
- class CreateTaskSchema(BaseModel):
60
- """Request schema for `CreateTask`"""
61
-
62
- list_id: int = Field(
63
- ...,
64
- alias="list_id",
65
- description="",
66
- )
67
- custom_task_ids: bool = Field(
68
- default=None,
69
- alias="custom_task_ids",
70
- description=(
71
- 'If you want to reference a task by it"s custom task id, this value must '
72
- "be `true`. "
73
- ),
74
- )
75
- team_id: int = Field(
76
- default=None,
77
- alias="team_id",
78
- description=(
79
- "Only used when the `custom_task_ids` parameter is set to `true`. For example: "
80
- "`custom_task_ids=true&team_id=123`. "
81
- ),
82
- )
83
- tags: List[str] = Field(
84
- default=None,
85
- alias="tags",
86
- description="",
87
- )
88
- description: str = Field(
89
- default=None,
90
- alias="description",
91
- description="Description",
92
- )
93
- name: str = Field(
94
- default=...,
95
- alias="name",
96
- description="Name",
97
- )
98
- assignees: List[int] = Field(
99
- ...,
100
- alias="assignees",
101
- description="To create tasks in ClickUp, the list of assignees is mandatory; NEVER assign the responsible parties on your own; the user must explicitly inform who the responsible parties are.",
102
- examples=[18951490, 48772077],
103
- )
104
- status: str = Field(
105
- default=None,
106
- alias="status",
107
- description="Status",
108
- )
109
- priority: int = Field(
110
- default=None,
111
- alias="priority",
112
- description="Priority",
113
- )
114
- due_date: int = Field(
115
- default=None,
116
- alias="due_date",
117
- description="Due Date",
118
- )
119
- due_date_time: bool = Field(
120
- default=None,
121
- alias="due_date_time",
122
- description="Due Date Time",
123
- )
124
- time_estimate: int = Field(
125
- default=None,
126
- alias="time_estimate",
127
- description="Time Estimate",
128
- )
129
- start_date: int = Field(
130
- default=None,
131
- alias="start_date",
132
- description="Start Date",
133
- )
134
- start_date_time: bool = Field(
135
- default=None,
136
- alias="start_date_time",
137
- description="Start Date Time",
138
- )
139
- notify_all: bool = Field(
140
- default=None,
141
- alias="notify_all",
142
- description=(
143
- "If `notify_all` is true, notifications will be sent to everyone including "
144
- "the creator of the comment. "
145
- ),
146
- )
147
- parent: str = Field(
148
- default=None,
149
- alias="parent",
150
- description=(
151
- "You can create a subtask by including an existing task ID. The `parent` "
152
- "task ID you include cannot be a subtask, and must be in the same List specified "
153
- "in the path parameter. "
154
- ),
155
- )
156
- links_to: str = Field(
157
- default=None,
158
- alias="links_to",
159
- description="Include a task ID to create a linked dependency with your new task.",
160
- )
161
- check_required_custom_fields: bool = Field(
162
- default=None,
163
- alias="check_required_custom_fields",
164
- description=(
165
- "When creating a task via API any required Custom Fields are ignored by default "
166
- "(`false`). You can enforce required Custom Fields by including `check_required_custom_fields: "
167
- "true`. "
168
- ),
169
- )
170
- custom_fields: List[dict] = Field(
171
- default=None,
172
- alias="custom_fields",
173
- description="[Filter by Custom Fields.](https://clickup.com/api)",
174
- )
175
- custom_item_id: int = Field(
176
- default=None,
177
- alias="custom_item_id",
178
- description=(
179
- 'To create a task that doesn"t use a custom task type, either don"t include '
180
- 'this field in the request body, or send `"null"`. To create this task '
181
- "as a Milestone, send a value of `1`. To use a custom task type, send the "
182
- "custom task type ID as defined in your Workspace, such as `2`. "
183
- ),
184
- )
185
-
186
-
187
- class CreateTaskTool(BaseTool):
188
- name: str = "create_task_tool"
189
- description: str = """
190
- Tool to create a new task in ClickUp based on the provided parameters.
191
- - Create Task:
192
- Invoke: "CreateTaskTool" with the appropriate parameters.
193
-
194
- Parameters:
195
- - list_id (required): The ID of the list to create the task in. Example: 901307715461
196
- - name (required): The name of the task
197
- - assignees (required): List of user IDs to assign to the task
198
-
199
- IMPORTANT
200
- - Always use 'date_to_timestamp' tool to convert dates from 'YYYY-MM-DD' to Unix millisecond timestamps before setting dates on ClickUp
201
- """
202
- args_schema: Type[BaseModel] = CreateTaskSchema
203
- headers: dict = {"Authorization": f"{CLICKUP_TOKEN}"}
204
-
205
- def __init__(self, **data):
206
- super().__init__(**data)
207
-
208
- def _run(self, **kwargs) -> Any:
209
- """Executes task creation in ClickUp"""
210
-
211
- # Log the received parameters to help debug
212
- print("\n==== CreateTaskTool._run received parameters: ====")
213
- print(f"kwargs: {kwargs}")
214
-
215
- # Extract list_id from different possible locations
216
- list_id = None
217
-
218
- # 1. Direct list_id parameter
219
- if "list_id" in kwargs:
220
- list_id = kwargs.get("list_id")
221
- print(f"Found list_id in direct parameter: {list_id}")
222
-
223
- # 2. Check if list_id is inside nested kwargs
224
- elif "kwargs" in kwargs and isinstance(kwargs["kwargs"], dict):
225
- list_id = kwargs["kwargs"].get("list_id")
226
- print(f"Found list_id in nested kwargs: {list_id}")
227
-
228
- # 3. Check if list_id is in a string format in any parameter
229
- for k, v in kwargs.items():
230
- if isinstance(v, str) and v.isdigit():
231
- try:
232
- list_id = int(v)
233
- print(f"Found list_id in parameter {k}: {list_id}")
234
- break
235
- except ValueError:
236
- pass
237
- elif isinstance(v, str) and "901307715461" in v:
238
- list_id = 901307715461
239
- print(f"Found list_id in parameter {k}: {list_id}")
240
- break
241
-
242
- # 4. Hardcoded fallback for this specific test case
243
- if not list_id:
244
- print("No list_id found in parameters, using hardcoded value 901307715461")
245
- list_id = 901307715461
246
-
247
- print(f"list_id being used: {list_id}")
248
- print("==== End parameters ====\n")
249
-
250
- action = CreateTask()
251
-
252
- url = f"{action.url}{action.path}".format(list_id=list_id)
253
- print(f"URL being used: {url}")
254
-
255
- # Make sure all parameters are JSON serializable and extract from kwargs if needed
256
- params = {}
257
-
258
- # If name is not directly in kwargs, try to find it
259
- if (
260
- "name" not in kwargs
261
- and "kwargs" in kwargs
262
- and isinstance(kwargs["kwargs"], dict)
263
- ):
264
- for k, v in kwargs["kwargs"].items():
265
- if k == "name" or (isinstance(v, str) and "API TEST TASK" in v):
266
- params["name"] = "API TEST TASK"
267
- break
268
-
269
- # If assignees is not directly in kwargs, try to find it
270
- if (
271
- "assignees" not in kwargs
272
- and "kwargs" in kwargs
273
- and isinstance(kwargs["kwargs"], dict)
274
- ):
275
- for k, v in kwargs["kwargs"].items():
276
- if k == "assignees" or (isinstance(v, str) and "81918955" in v):
277
- params["assignees"] = [81918955]
278
- break
279
-
280
- # Add any other parameters from kwargs
281
- for key, value in kwargs.items():
282
- if value is not None and key != "kwargs" and key != "list_id":
283
- params[key] = ensure_serializable(value)
284
-
285
- # For testing, ensure we have the minimum required parameters
286
- if "name" not in params:
287
- params["name"] = "API TEST TASK"
288
-
289
- if "assignees" not in params:
290
- params["assignees"] = [81918955]
291
-
292
- print(f"Request parameters: {params}")
293
-
294
- response = requests.post(url, headers=self.headers, json=params)
295
- print(f"Response status code: {response.status_code}")
296
-
297
- if response.status_code == 201:
298
- response_json = response.json()
299
- else:
300
- try:
301
- response_json = response.json()
302
- print(f"Error response: {response_json}")
303
- except requests.JSONDecodeError:
304
- response_json = {"error": "Invalid JSON response"}
305
- print("Could not decode JSON response")
306
-
307
- response = CreateTaskResponse(data=response_json)
308
- filtered_response = {
309
- "id": response.data.get("id"),
310
- "name": response.data.get("name"),
311
- "status": response.data.get("status", {}).get("status"),
312
- "assignees": response.data.get("assignees"),
313
- "due_date": response.data.get("due_date"),
314
- "error": response.data.get("err"),
315
- }
316
-
317
- print(f"Returning filtered response: {json.dumps(filtered_response, indent=2)}")
318
-
319
- return filtered_response
320
-
321
-
322
- class DeleteTaskTool(BaseTool):
323
- name: str = "delete_task_tool"
324
- description: str = """
325
- Tool to delete a task in ClickUp based on its ID.
326
- - Delete Task:
327
- Invoke: "DeleteTaskTool" with the appropriate parameters.
328
-
329
- Parameters:
330
- - task_id (required): The ID of the task to delete
331
- """
332
-
333
- args_schema: Type[BaseModel] = DeleteTaskRequest
334
- headers: dict = {"Authorization": f"{CLICKUP_TOKEN}"}
335
-
336
- def __init__(self, **data):
337
- super().__init__(**data)
338
-
339
- def _run(self, **kwargs) -> Any:
340
- """Executes a task deletion in ClickUp"""
341
-
342
- # Log the received parameters to help debug
343
- print("\n==== DeleteTaskTool._run received parameters: ====")
344
- print(f"kwargs: {kwargs}")
345
-
346
- # Extract task_id from different possible locations
347
- task_id = None
348
- task_name = None
349
-
350
- # 1. Direct task_id parameter
351
- if "task_id" in kwargs:
352
- task_id = kwargs.get("task_id")
353
- print(f"Found task_id in direct parameter: {task_id}")
354
-
355
- # 2. Check if task_id is inside nested kwargs
356
- if "kwargs" in kwargs and isinstance(kwargs["kwargs"], dict):
357
- task_id = task_id or kwargs["kwargs"].get("task_id")
358
- print(f"Found task_id in nested kwargs: {task_id}")
359
-
360
- # 3. Check if task_id is in FieldInfo format
361
- if "kwargs" in kwargs and hasattr(kwargs["kwargs"], "task_id"):
362
- if hasattr(kwargs["kwargs"].task_id, "default"):
363
- task_id = kwargs["kwargs"].task_id.default
364
- print(f"Found task_id in FieldInfo default: {task_id}")
365
-
366
- # 4. Check for task_id in description or raw query
367
- if "kwargs" in kwargs and hasattr(kwargs["kwargs"], "description"):
368
- desc = kwargs["kwargs"].description
369
- # Look for task ID pattern in the description
370
- task_id_match = re.search(r'task_id[=:]\s*["\']?([0-9a-z]{8,})["\']?', desc)
371
- if task_id_match:
372
- task_id = task_id_match.group(1)
373
- print(f"Found task_id in description: {task_id}")
374
-
375
- # Look for task name in the description
376
- task_name_match = re.search(r'task\s+["\']([^"\']+)["\']', desc)
377
- if task_name_match:
378
- task_name = task_name_match.group(1).strip()
379
- print(f"Found task_name in description: {task_name}")
380
-
381
- # 5. Check any string parameters for task_id
382
- for k, v in kwargs.items():
383
- if isinstance(v, str):
384
- # Check if the parameter contains a task ID pattern
385
- task_id_match = re.search(
386
- r'task_id[=:]\s*["\']?([0-9a-z]{8,})["\']?', v
387
- )
388
- if task_id_match:
389
- task_id = task_id_match.group(1)
390
- print(f"Found task_id in string parameter: {task_id}")
391
- break
392
-
393
- # Check for task name pattern in the string
394
- task_name_match = re.search(r'task\s+["\']([^"\']+)["\']', v)
395
- if task_name_match:
396
- task_name = task_name_match.group(1).strip()
397
- print(f"Found task_name in string parameter: {task_name}")
398
- break
399
-
400
- # 6. If task name found but no ID, try to lookup ID by name
401
- if not task_id and task_name:
402
- try:
403
- # Get all tasks in the list to find the task ID by name
404
- get_tasks_tool = GetTasksTool()
405
- tasks = get_tasks_tool._run(list_id=901307715461)
406
-
407
- # Find the task by name
408
- for task in tasks:
409
- if task.get("name") == task_name:
410
- task_id = task.get("id")
411
- print(f"Found task_id {task_id} for task name '{task_name}'")
412
- break
413
- except Exception as e:
414
- print(f"Error getting task ID from name: {e}")
415
-
416
- # 7. Hardcoded fallback for testing
417
- if not task_id and task_name:
418
- if task_name == "TEST TASK 2":
419
- task_id = "86a702gha" # Known ID of TEST TASK 2
420
- print(f"Using hardcoded task_id for 'TEST TASK 2': {task_id}")
421
- elif task_name == "TEST TASK":
422
- task_id = "86a700c6e" # Known ID of TEST TASK
423
- print(f"Using hardcoded task_id for 'TEST TASK': {task_id}")
424
-
425
- if not task_id:
426
- raise ToolException("task_id is required for deleting a task")
427
-
428
- print(f"task_id being used: {task_id}")
429
- print("==== End parameters ====\n")
430
-
431
- action = DeleteTask()
432
-
433
- url = f"{action.url}{action.path}".format(task_id=task_id)
434
- print(f"URL being used: {url}")
435
-
436
- # Make sure all parameters are JSON serializable
437
- params = {
438
- key: ensure_serializable(value)
439
- for key, value in kwargs.items()
440
- if value is not None and key != "kwargs" and key != "task_id"
441
- }
442
-
443
- response = requests.delete(url, headers=self.headers, params=params)
444
- print(f"Response status code: {response.status_code}")
445
-
446
- if response.status_code == 200:
447
- response_json = response.json()
448
- else:
449
- try:
450
- response_json = response.json()
451
- print(f"Error response: {response_json}")
452
- except requests.JSONDecodeError:
453
- response_json = {"error": "Invalid JSON response"}
454
- print("Could not decode JSON response")
455
-
456
- response = DeleteTaskResponse(data=response_json)
457
-
458
- result_message = f"Task '{task_name or task_id}' successfully deleted"
459
-
460
- if "err" in response.data:
461
- result_message = f"Error: {response.data['err']}"
462
-
463
- print(f"Result: {result_message}")
464
-
465
- return result_message
466
-
467
-
468
- class CustomUpdateTaskRequest(BaseModel):
469
- """Request schema for `UpdateTask`"""
470
-
471
- task_id: str = Field(
472
- ...,
473
- alias="task_id",
474
- description="",
475
- )
476
- custom_task_ids: bool = Field(
477
- default=None,
478
- alias="custom_task_ids",
479
- description=(
480
- 'If you want to reference a task by it"s custom task id, this value must '
481
- "be `true`. "
482
- ),
483
- )
484
- team_id: int = Field(
485
- default=None,
486
- alias="team_id",
487
- description=(
488
- "Only used when the `custom_task_ids` parameter is set to `true`. For example: "
489
- "`custom_task_ids=true&team_id=123`. "
490
- ),
491
- )
492
- description: str = Field(
493
- default=None,
494
- alias="description",
495
- description='To clear the task description, include `Description` with `" "`.',
496
- )
497
- custom_item_id: int = Field(
498
- default=None,
499
- alias="custom_item_id",
500
- description=(
501
- 'To convert an item using a custom task type into a task, send `"null"`. '
502
- " To update this task to be a Milestone, send a value of `1`. To use "
503
- "a custom task type, send the custom task type ID as defined in your Workspace, "
504
- "such as `2`. "
505
- ),
506
- )
507
- name: str = Field(
508
- default=None,
509
- alias="name",
510
- description="Name",
511
- )
512
- status: str = Field(
513
- default=None,
514
- alias="status",
515
- description="Status",
516
- )
517
- priority: int = Field(
518
- default=None,
519
- alias="priority",
520
- description="Priority",
521
- )
522
- due_date: int = Field(
523
- default=None,
524
- alias="due_date",
525
- description="Due Date in Unix millisecond timestamps",
526
- )
527
- due_date_time: bool = Field(
528
- default=None,
529
- alias="due_date_time",
530
- description="Due Date Time",
531
- )
532
- parent: str = Field(
533
- default=None,
534
- alias="parent",
535
- description=(
536
- 'You can move a subtask to another parent task by including `"parent"` with '
537
- 'a valid `task id`. You cannot convert a subtask to a task by setting `"parent"` '
538
- "to `null`. "
539
- ),
540
- )
541
- time_estimate: int = Field(
542
- default=None,
543
- alias="time_estimate",
544
- description="Time Estimate",
545
- )
546
- start_date: int = Field(
547
- default=None,
548
- alias="start_date",
549
- description="Start Date in Unix millisecond timestamps",
550
- )
551
- start_date_time: bool = Field(
552
- default=None,
553
- alias="start_date_time",
554
- description="Start Date Time",
555
- )
556
- assignees: List[Dict[str, List[int]]] = Field(
557
- default=[{"add": [], "rem": []}],
558
- description="List of user IDs to add or remove as assignees",
559
- examples=[{"add": [81918955, 82061927], "rem": [18951490, 48772077]}],
560
- )
561
- archived: bool = Field(
562
- default=None,
563
- alias="archived",
564
- description="Archived",
565
- )
566
-
567
-
568
- class CustomUpdateTask(OpenAPIAction):
569
- """Update a task by including one or more fields in the request body."""
570
-
571
- _tags = ["Tasks"]
572
- _display_name = "update_task"
573
- _request_schema = CustomUpdateTaskRequest
574
- _response_schema = CustomUpdateTaskRequest
575
-
576
- url = "https://api.clickup.com/api/v2"
577
- path = "/task/{task_id}"
578
- method = "put"
579
- operation_id = "Tasks_updateTaskFields"
580
- action_identifier = "/task/{task_id}_put"
581
-
582
- path_params = {"task_id": "task_id"}
583
- query_params = {"custom_task_ids": "custom_task_ids", "team_id": "team_id"}
584
- header_params = {}
585
- request_params = {
586
- "description": {"__alias": "description"},
587
- "custom_item_id": {"__alias": "custom_item_id"},
588
- "name": {"__alias": "name"},
589
- "status": {"__alias": "status"},
590
- "priority": {"__alias": "priority"},
591
- "due_date": {"__alias": "due_date"},
592
- "due_date_time": {"__alias": "due_date_time"},
593
- "parent": {"__alias": "parent"},
594
- "time_estimate": {"__alias": "time_estimate"},
595
- "start_date": {"__alias": "start_date"},
596
- "start_date_time": {"__alias": "start_date_time"},
597
- "assignees": {"__alias": "assignees"},
598
- "archived": {"__alias": "archived"},
599
- }
600
-
601
-
602
- class UpdateTaskTool(BaseTool):
603
- name: str = "update_task_tool"
604
- description: str = """
605
- Tool to update a task in ClickUp based on the provided parameters.
606
- - Update Task:
607
- Invoke: "UpdateTaskTool" with the appropriate parameters.
608
-
609
- Parameters:
610
- - task_id (required): The ID of the task to update
611
- - name (optional): New name for the task
612
- - status (optional): New status for the task
613
-
614
- IMPORTANT
615
- - Always use 'date_to_timestamp' tool to convert dates from 'YYYY-MM-DD' to Unix millisecond timestamps when setting dates on ClickUp
616
- """
617
- args_schema: Type[BaseModel] = CustomUpdateTaskRequest
618
- headers: dict = {"Authorization": f"{CLICKUP_TOKEN}"}
619
-
620
- def __init__(self, **data):
621
- super().__init__(**data)
622
-
623
- def _run(self, **kwargs) -> Any:
624
- """Executes task update in ClickUp"""
625
-
626
- # Log the received parameters to help debug
627
- print("\n==== UpdateTaskTool._run received parameters: ====")
628
- print(f"kwargs: {kwargs}")
629
-
630
- # Extract task_id from different possible locations
631
- task_id = None
632
- update_params = {}
633
- task_name_to_update = None
634
-
635
- # 1. Direct task_id parameter
636
- if "task_id" in kwargs:
637
- task_id = kwargs.get("task_id")
638
- print(f"Found task_id in direct parameter: {task_id}")
639
-
640
- # 2. Check if task_id is inside nested kwargs
641
- elif "kwargs" in kwargs and isinstance(kwargs["kwargs"], dict):
642
- task_id = kwargs["kwargs"].get("task_id")
643
- print(f"Found task_id in nested kwargs: {task_id}")
644
-
645
- # 3. Check if there's a task_id in the kwargs object of FieldInfo type
646
- elif "kwargs" in kwargs and hasattr(kwargs["kwargs"], "default"):
647
- # Try to parse it from the description if it contains the task ID
648
- if (
649
- hasattr(kwargs["kwargs"], "description")
650
- and kwargs["kwargs"].description
651
- ):
652
- desc = kwargs["kwargs"].description
653
- # Look for common task ID patterns (alphanumeric with at least 8 chars)
654
- task_id_match = re.search(r"(86a[0-9a-z]{5,})", desc)
655
- if task_id_match:
656
- task_id = task_id_match.group(1)
657
- print(f"Found task_id in FieldInfo description: {task_id}")
658
-
659
- # 4. Look for task name to update in parameters
660
- for k, v in kwargs.items():
661
- if isinstance(v, str):
662
- # Check if it looks like a task ID (alphanumeric pattern)
663
- if re.match(r"^[0-9a-z]{8,}$", v):
664
- task_id = v
665
- print(f"Found task_id in parameter {k}: {task_id}")
666
- break
667
-
668
- # Look for patterns like "Change 'TEST TASK 2' to 'TEST TASK 1000'"
669
- change_pattern = re.search(
670
- r"Change\s+['\"]?(.*?)['\"]?\s+to\s+['\"]?(.*?)['\"]?", v
671
- )
672
- if change_pattern:
673
- task_name_to_update = change_pattern.group(1).strip()
674
- new_name = change_pattern.group(2).strip()
675
- update_params["name"] = new_name
676
- print(
677
- f"Found task to update: '{task_name_to_update}' to '{new_name}'"
678
- )
679
- break
680
-
681
- # If string contains task names, extract them
682
- elif "TEST TASK" in v:
683
- if "TEST TASK 2" in v:
684
- task_name_to_update = "TEST TASK 2"
685
- else:
686
- task_name_to_update = "TEST TASK"
687
-
688
- # Look for new name in the string
689
- name_pattern = re.search(r"to\s+['\"]?(.*?)['\"]?(?:\s|$)", v)
690
- if name_pattern:
691
- new_name = name_pattern.group(1).strip()
692
- update_params["name"] = new_name
693
- print(
694
- f"Found task to update: '{task_name_to_update}' to '{new_name}'"
695
- )
696
-
697
- # 5. If we have a task name but no ID, look up the ID
698
- if not task_id and task_name_to_update:
699
- try:
700
- # Get all tasks in the list to find the task ID by name
701
- get_tasks_tool = GetTasksTool()
702
- tasks = get_tasks_tool._run(list_id=901307715461)
703
- # Find the task by name
704
- for task in tasks:
705
- if task.get("name") == task_name_to_update:
706
- task_id = task.get("id")
707
- print(
708
- f"Found task_id {task_id} for task name '{task_name_to_update}'"
709
- )
710
- break
711
- except Exception as e:
712
- print(f"Error getting task ID from name: {e}")
713
-
714
- # 6. Hardcoded fallback for testing
715
- if not task_id:
716
- # If the request is specifically about TEST TASK 2, use its ID
717
- if task_name_to_update == "TEST TASK 2":
718
- task_id = "86a702gha" # Known ID of TEST TASK 2
719
- print(f"Using hardcoded task_id for 'TEST TASK 2': {task_id}")
720
- # For general testing, use a fallback ID
721
- elif task_name_to_update == "TEST TASK":
722
- task_id = "86a700c6e" # Known ID of TEST TASK
723
- print(f"Using hardcoded task_id for 'TEST TASK': {task_id}")
724
- # If still no task_id, attempt to get the first task from the list
725
- else:
726
- try:
727
- get_tasks_tool = GetTasksTool()
728
- tasks = get_tasks_tool._run(list_id=901307715461)
729
- if tasks and len(tasks) > 0:
730
- task_id = tasks[0].get("id")
731
- print(f"Using first task from list as fallback: {task_id}")
732
- except Exception as e:
733
- print(f"Error getting fallback task ID: {e}")
734
-
735
- if not task_id:
736
- raise ToolException("task_id is required for updating a task")
737
-
738
- print(f"task_id being used: {task_id}")
739
- print("==== End parameters ====\n")
740
-
741
- action = CustomUpdateTask()
742
-
743
- url = f"{action.url}{action.path}".format(task_id=task_id)
744
- print(f"URL being used: {url}")
745
-
746
- # Add update parameters from kwargs
747
- for key, value in kwargs.items():
748
- if value is not None and key != "kwargs" and key != "task_id":
749
- update_params[key] = ensure_serializable(value)
750
-
751
- # Make sure all parameters are JSON serializable
752
- params = {
753
- k: ensure_serializable(v) for k, v in update_params.items() if v is not None
754
- }
755
-
756
- print(f"Update parameters: {params}")
757
-
758
- response = requests.put(url, headers=self.headers, json=params)
759
- print(f"Response status code: {response.status_code}")
760
-
761
- if response.status_code == 200:
762
- response_json = response.json()
763
- else:
764
- try:
765
- response_json = response.json()
766
- print(f"Error response: {response_json}")
767
- except requests.JSONDecodeError:
768
- response_json = {"error": "Invalid JSON response"}
769
- print("Could not decode JSON response")
770
-
771
- response = UpdateTaskResponse(data=response_json)
772
- filtered_response = {
773
- "id": response.data.get("id"),
774
- "name": response.data.get("name"),
775
- "status": response.data.get("status", {}).get("status"),
776
- "assignees": response.data.get("assignees"),
777
- "due_date": response.data.get("due_date"),
778
- "error": response.data.get("err"),
779
- }
780
-
781
- print(f"Returning filtered response: {json.dumps(filtered_response, indent=2)}")
782
-
783
- return filtered_response
784
-
785
-
786
- class AddDependencyTool(BaseTool):
787
- name: str = "add_dependency_tool"
788
- description: str = """
789
- Tool to set a task as dependent on or blocking another task in ClickUp.
790
- - Add Dependency:
791
- Invoke: "AddDependencyTool" with the appropriate parameters.
792
-
793
- Parameters:
794
- - task_id (required): The ID of the task to add dependency to
795
- - depends_on (required): The ID of the task that this task depends on
796
- """
797
- args_schema: Type[BaseModel] = AddDependencyRequest
798
- headers: dict = {"Authorization": f"{CLICKUP_TOKEN}"}
799
-
800
- def __init__(self, **data):
801
- super().__init__(**data)
802
-
803
- def _run(self, **kwargs) -> Any:
804
- """Executes adding a task dependency in ClickUp"""
805
-
806
- # Log the received parameters to help debug
807
- print("\n==== AddDependencyTool._run received parameters: ====")
808
- print(f"kwargs: {kwargs}")
809
-
810
- # Extract task_id and depends_on from different possible locations
811
- task_id = None
812
- depends_on = None
813
- dependent_task_name = None
814
- dependency_task_name = None
815
-
816
- # 1. Direct task_id parameter
817
- if "task_id" in kwargs:
818
- task_id = kwargs.get("task_id")
819
- print(f"Found task_id in direct parameter: {task_id}")
820
-
821
- # 2. Direct depends_on parameter
822
- if "depends_on" in kwargs:
823
- depends_on = kwargs.get("depends_on")
824
- print(f"Found depends_on in direct parameter: {depends_on}")
825
-
826
- # 3. Check if parameters are inside nested kwargs
827
- if "kwargs" in kwargs and isinstance(kwargs["kwargs"], dict):
828
- task_id = task_id or kwargs["kwargs"].get("task_id")
829
- depends_on = depends_on or kwargs["kwargs"].get("depends_on")
830
- print(
831
- f"Found in nested kwargs - task_id: {task_id}, depends_on: {depends_on}"
832
- )
833
-
834
- # 4. Check if there's dependency information in the kwargs object or description
835
- if "kwargs" in kwargs and hasattr(kwargs["kwargs"], "description"):
836
- desc = kwargs["kwargs"].description
837
- # Look for dependency patterns in the description
838
- dependency_match = re.search(r"'(.*?)'\s+depends\s+on\s+'(.*?)'", desc)
839
- if dependency_match:
840
- dependent_task_name = dependency_match.group(1).strip()
841
- dependency_task_name = dependency_match.group(2).strip()
842
- print(
843
- f"Found dependency in description: '{dependent_task_name}' depends on '{dependency_task_name}'"
844
- )
845
-
846
- # 5. Check any string parameters for dependency information
847
- for k, v in kwargs.items():
848
- if isinstance(v, str):
849
- # Check if it contains direct task IDs
850
- task_id_match = re.search(
851
- r'task_id[=:]\s*["\']?([0-9a-z]{8,})["\']?', v
852
- )
853
- if task_id_match:
854
- task_id = task_id_match.group(1)
855
- print(f"Found task_id in string parameter: {task_id}")
856
-
857
- depends_on_match = re.search(
858
- r'depends_on[=:]\s*["\']?([0-9a-z]{8,})["\']?', v
859
- )
860
- if depends_on_match:
861
- depends_on = depends_on_match.group(1)
862
- print(f"Found depends_on in string parameter: {depends_on}")
863
-
864
- # Check for task names in dependency expressions
865
- dependency_match = re.search(
866
- r"['\"]?(.*?)['\"]?\s+depends\s+on\s+['\"]?(.*?)['\"]?", v
867
- )
868
- if dependency_match:
869
- dependent_task_name = dependency_match.group(1).strip()
870
- dependency_task_name = dependency_match.group(2).strip()
871
- print(
872
- f"Found dependency in parameter: '{dependent_task_name}' depends on '{dependency_task_name}'"
873
- )
874
- break
875
-
876
- # 6. If we have task names but no IDs, look up the IDs
877
- if (not task_id or not depends_on) and (
878
- dependent_task_name or dependency_task_name
879
- ):
880
- try:
881
- # Get all tasks in the list to find the task IDs by name
882
- get_tasks_tool = GetTasksTool()
883
- tasks = get_tasks_tool._run(list_id=901307715461)
884
-
885
- # Find the dependent task by name
886
- if dependent_task_name and not task_id:
887
- for task in tasks:
888
- if task.get("name") == dependent_task_name:
889
- task_id = task.get("id")
890
- print(
891
- f"Found task_id {task_id} for dependent task name '{dependent_task_name}'"
892
- )
893
- break
894
-
895
- # Find the dependency task by name
896
- if dependency_task_name and not depends_on:
897
- for task in tasks:
898
- if task.get("name") == dependency_task_name:
899
- depends_on = task.get("id")
900
- print(
901
- f"Found depends_on {depends_on} for dependency task name '{dependency_task_name}'"
902
- )
903
- break
904
- except Exception as e:
905
- print(f"Error getting task IDs from names: {e}")
906
-
907
- # 7. Hardcoded fallback for testing
908
- if not task_id and dependent_task_name:
909
- if dependent_task_name == "TEST TASK 2":
910
- task_id = "86a702gha" # Known ID of TEST TASK 2
911
- print(f"Using hardcoded task_id for 'TEST TASK 2': {task_id}")
912
- elif dependent_task_name == "TEST TASK":
913
- task_id = "86a700c6e" # Known ID of TEST TASK
914
- print(f"Using hardcoded task_id for 'TEST TASK': {task_id}")
915
-
916
- if not depends_on and dependency_task_name:
917
- if dependency_task_name == "TEST TASK 2":
918
- depends_on = "86a702gha" # Known ID of TEST TASK 2
919
- print(f"Using hardcoded depends_on for 'TEST TASK 2': {depends_on}")
920
- elif dependency_task_name == "TEST TASK":
921
- depends_on = "86a700c6e" # Known ID of TEST TASK
922
- print(f"Using hardcoded depends_on for 'TEST TASK': {depends_on}")
923
-
924
- # Check if we got both IDs we need
925
- if not task_id:
926
- raise ToolException("task_id is required for adding a dependency")
927
-
928
- if not depends_on:
929
- raise ToolException("depends_on is required for adding a dependency")
930
-
931
- print(f"task_id being used: {task_id}")
932
- print(f"depends_on being used: {depends_on}")
933
- print("==== End parameters ====\n")
934
-
935
- action = AddDependency()
936
-
937
- url = f"{action.url}{action.path}".format(task_id=task_id)
938
- print(f"URL being used: {url}")
939
-
940
- # Make sure all parameters are JSON serializable
941
- params = {
942
- key: ensure_serializable(value)
943
- for key, value in kwargs.items()
944
- if value is not None and key != "kwargs" and key != "task_id"
945
- }
946
-
947
- # Create the request body with the depends_on parameter
948
- request_body = {"depends_on": depends_on}
949
-
950
- print(f"Request body: {request_body}")
951
-
952
- response = requests.post(
953
- url, headers=self.headers, params=params, json=request_body
954
- )
955
- print(f"Response status code: {response.status_code}")
956
-
957
- if response.status_code == 200:
958
- response_json = response.json()
959
- else:
960
- try:
961
- response_json = response.json()
962
- print(f"Error response: {response_json}")
963
- except requests.JSONDecodeError:
964
- response_json = {"error": "Invalid JSON response"}
965
- print("Could not decode JSON response")
966
-
967
- response = AddDependencyResponse(data=response_json)
968
-
969
- result_message = f"Dependency added successfully: '{dependent_task_name or task_id}' depends on '{dependency_task_name or depends_on}'"
970
-
971
- if "err" in response.data:
972
- result_message = f"Error: {response.data['err']}"
973
-
974
- print(f"Result: {result_message}")
975
-
976
- return result_message
977
-
978
-
979
- class GetListTool(BaseTool):
980
- name: str = "get_list_tool"
981
- description: str = """
982
- Tool to view information about a list in ClickUp.
983
- - Get list details:
984
- Invoke: "GetListTool" with the list ID as a parameter.
985
-
986
- Parameters:
987
- - list_id (required): The ID of the list to get information about
988
- """
989
- args_schema: Type[BaseModel] = GetListRequest
990
- headers: dict = {"Authorization": f"{CLICKUP_TOKEN}"}
991
-
992
- def __init__(self, **data):
993
- super().__init__(**data)
994
-
995
- def _run(self, **kwargs) -> Any:
996
- """Executes the request to get information about a list in ClickUp"""
997
-
998
- # Log the received parameters to help debug
999
- print("\n==== GetListTool._run received parameters: ====")
1000
- print(f"kwargs: {kwargs}")
1001
-
1002
- # Extract list_id from different possible locations
1003
- list_id = None
1004
-
1005
- # 1. Direct list_id parameter
1006
- if "list_id" in kwargs:
1007
- list_id = kwargs.get("list_id")
1008
-
1009
- # 2. Check if list_id is inside nested kwargs
1010
- elif "kwargs" in kwargs and isinstance(kwargs["kwargs"], dict):
1011
- list_id = kwargs["kwargs"].get("list_id")
1012
-
1013
- # 3. Check if list_id is in a string format in any parameter
1014
- for k, v in kwargs.items():
1015
- if isinstance(v, str) and v.isdigit():
1016
- try:
1017
- list_id = int(v)
1018
- break
1019
- except ValueError:
1020
- pass
1021
-
1022
- if not list_id:
1023
- raise ToolException("list_id is required for getting list information")
1024
-
1025
- print(f"list_id being used: {list_id}")
1026
- print("==== End parameters ====\n")
1027
-
1028
- action = GetList()
1029
-
1030
- url = f"{action.url}{action.path}".format(list_id=list_id)
1031
- print(f"URL being used: {url}")
1032
-
1033
- response = requests.get(url, headers=self.headers)
1034
- print(f"Response status code: {response.status_code}")
1035
-
1036
- if response.status_code == 200:
1037
- response_json = response.json()
1038
- else:
1039
- try:
1040
- response_json = response.json()
1041
- print(f"Error response: {response_json}")
1042
- except requests.JSONDecodeError:
1043
- response_json = {"error": "Invalid JSON response"}
1044
- print("Could not decode JSON response")
1045
-
1046
- response = GetListResponse(data=response_json)
1047
- filtered_response = {
1048
- "list_id": response.data.get("id"),
1049
- "list_name": response.data.get("name"),
1050
- "folder_id": response.data.get("folder", {}).get("id"),
1051
- "folder_name": response.data.get("folder", {}).get("name"),
1052
- "error": response.data.get("err"),
1053
- }
1054
- return filtered_response
1055
-
1056
-
1057
- class GetTasksTool(BaseTool):
1058
- name: str = "get_tasks_tool"
1059
- description: str = """
1060
- Tool to view tasks in a list in ClickUp.
1061
- - Get tasks:
1062
- Invoke: "GetTasksTool" with the list ID and optional parameters.
1063
-
1064
- Parameters:
1065
- - list_id (required): The ID of the list to get tasks from. Example: 901307715461
1066
- """
1067
- args_schema: Type[BaseModel] = GetTasksRequest
1068
- headers: dict = {"Authorization": f"{CLICKUP_TOKEN}"}
1069
-
1070
- def __init__(self, **data):
1071
- super().__init__(**data)
1072
-
1073
- def _run(self, **kwargs) -> Any:
1074
- """Executes the request to get filtered tasks in a list in ClickUp"""
1075
-
1076
- # Log the received parameters to help debug
1077
- print("\n==== GetTasksTool._run received parameters: ====")
1078
- print(f"kwargs: {kwargs}")
1079
-
1080
- # Try to extract list_id from different places
1081
- list_id = None
1082
-
1083
- # 1. Direct list_id parameter
1084
- if "list_id" in kwargs:
1085
- list_id = kwargs.get("list_id")
1086
-
1087
- # 2. Check if list_id is inside nested kwargs
1088
- elif "kwargs" in kwargs and isinstance(kwargs["kwargs"], dict):
1089
- list_id = kwargs["kwargs"].get("list_id")
1090
-
1091
- # 3. Check if list_id is in a string format
1092
- for k, v in kwargs.items():
1093
- if isinstance(v, str) and "901307715461" in v:
1094
- list_id = "901307715461"
1095
- break
1096
-
1097
- # 4. Hardcoded fallback for this specific test case
1098
- if not list_id:
1099
- print("No list_id found in parameters, using hardcoded value 901307715461")
1100
- list_id = 901307715461
1101
-
1102
- print(f"list_id being used: {list_id}")
1103
- print("==== End parameters ====\n")
1104
-
1105
- action = GetTasks()
1106
-
1107
- url = f"{action.url}{action.path}".format(list_id=list_id)
1108
-
1109
- # Log the constructed URL
1110
- print(f"URL being used: {url}")
1111
-
1112
- # Make sure all parameters are JSON serializable
1113
- query_params = {
1114
- k: ensure_serializable(v)
1115
- for k, v in kwargs.items()
1116
- if v is not None and k != "kwargs"
1117
- }
1118
-
1119
- response = requests.get(url, headers=self.headers, params=query_params)
1120
-
1121
- # Log the response status code
1122
- print(f"Response status code: {response.status_code}")
1123
-
1124
- if response.status_code == 200:
1125
- response_json = response.json()
1126
- else:
1127
- try:
1128
- response_json = response.json()
1129
- # Log error response
1130
- print(f"Error response: {response_json}")
1131
- except requests.JSONDecodeError:
1132
- response_json = {"error": "Invalid JSON response"}
1133
- print("Could not decode JSON response")
1134
-
1135
- response = GetTasksResponse(data=response_json)
1136
- filtered_response = []
1137
- for task in response.data.get("tasks", []):
1138
- task_info = {
1139
- "id": task.get("id"),
1140
- "name": task.get("name"),
1141
- "assignees": [
1142
- assignee.get("username") for assignee in task.get("assignees", [])
1143
- ],
1144
- "due_date": task.get("due_date"),
1145
- "date_created": task.get("date_created"),
1146
- "status": task.get("status", {}).get("status"),
1147
- "url": f"https://app.clickup.com/t/{task.get('id')}",
1148
- }
1149
- filtered_response.append(task_info)
1150
-
1151
- # Add error information if present
1152
- if response.data.get("error"):
1153
- filtered_response.append({"error": response.data.get("error")})
1154
-
1155
- # Log the final result we're returning
1156
- print(f"Returning filtered response with {len(filtered_response)} items")
1157
-
1158
- return filtered_response
1159
-
1160
-
1161
- # Util for converting dates
1162
- @tool
1163
- def date_to_timestamp(date_str: str) -> int:
1164
- """
1165
- ALWAYS use this tool to convert dates from 'YYYY-MM-DD' to Unix millisecond timestamps when setting dates on ClickUp
1166
-
1167
- :param date_str: Date in the format YYYY-MM-DD
1168
- :return: Unix timestamp in milliseconds
1169
- """
1170
- # Convert the date string to a datetime object
1171
- date = datetime.datetime.strptime(date_str, "%Y-%m-%d")
1172
-
1173
- # Get the timestamp in seconds and convert to milliseconds
1174
- timestamp_ms = int(date.timestamp() * 1000)
1175
-
1176
- return timestamp_ms
1177
-
1178
-
1179
- def initialize_clickup_tools():
1180
- clickup_tools = [
1181
- CreateTaskTool(),
1182
- DeleteTaskTool(),
1183
- UpdateTaskTool(),
1184
- AddDependencyTool(),
1185
- GetListTool(),
1186
- GetTasksTool(),
1187
- GetTaskTool(),
1188
- date_to_timestamp,
1189
- ]
1190
- agency_swarm_clickup_tools = convert_langchain_tools(clickup_tools)
1191
- return agency_swarm_clickup_tools
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
agency_ai_demo/agents/ClickUpAgent/tools/ExampleTool.py DELETED
@@ -1,30 +0,0 @@
1
- from agency_swarm.tools import BaseTool
2
- from pydantic import Field
3
- import os
4
-
5
- account_id = "MY_ACCOUNT_ID"
6
- api_key = os.getenv("MY_API_KEY") # or access_token = os.getenv("MY_ACCESS_TOKEN")
7
-
8
- class ExampleTool(BaseTool):
9
- """
10
- A brief description of what the custom tool does.
11
- The docstring should clearly explain the tool's purpose and functionality.
12
- It will be used by the agent to determine when to use this tool.
13
- """
14
-
15
- # Define the fields with descriptions using Pydantic Field
16
- example_field: str = Field(
17
- ..., description="Description of the example field, explaining its purpose and usage for the Agent."
18
- )
19
-
20
- def run(self):
21
- """
22
- The implementation of the run method, where the tool's main functionality is executed.
23
- This method should utilize the fields defined above to perform the task.
24
- Docstring is not required for this method and will not be used by the agent.
25
- """
26
- # Your custom tool logic goes here
27
- # do_something(self.example_field, api_key, account_id)
28
-
29
- # Return the result of the tool's operation as a string
30
- return "Result of ExampleTool operation"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
agency_ai_demo/agents/ClickUpAgent/tools/GetTaskTool.py DELETED
@@ -1,78 +0,0 @@
1
- import os
2
- import sys
3
- import requests
4
- from dotenv import load_dotenv
5
- from langchain.tools import BaseTool
6
- from langchain_core.pydantic_v1 import BaseModel
7
- from langchain_core.tools import ToolException
8
- from typing import Any, Type
9
- import re
10
- from composio.tools.local.clickup.actions.get_task import (
11
- GetTask,
12
- GetTaskRequest,
13
- GetTaskResponse,
14
- )
15
-
16
- # Add the parent directory to sys.path to enable imports
17
- sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
18
-
19
- # Add project utils
20
- from utils.tool_wrapper import convert_langchain_tools
21
- from utils.ensure_serializable import ensure_serializable
22
-
23
- load_dotenv()
24
-
25
-
26
- CLICKUP_TOKEN = os.getenv("CLICKUP_TOKEN")
27
-
28
-
29
- # TODO: FIX THIS TOOL
30
- class GetTaskTool(BaseTool):
31
- name: str = "get_task_tool"
32
- description: str = """
33
- Ferramenta para visualizar detalhes de uma tarefa no ClickUp.
34
- - Obter detalhes da tarefa:
35
- Invocar: "GetTaskTool" com o ID da tarefa e parâmetros opcionais.
36
- """
37
- args_schema: Type[BaseModel] = GetTaskRequest
38
- headers: dict = {"Authorization": f"{CLICKUP_TOKEN}"}
39
-
40
- def _init_(self, **data):
41
- super()._init_(**data)
42
-
43
- def _run(self, **kwargs) -> Any:
44
- """Executa a requisição para obter detalhes de uma tarefa no ClickUp"""
45
-
46
- action = GetTask()
47
-
48
- url = f"{action.url}{action.path}".format(task_id=kwargs.get("task_id"))
49
-
50
- query_params = {k: v for k, v in kwargs.items() if v is not None}
51
-
52
- response = requests.get(url, headers=self.headers, params=query_params)
53
-
54
- if response.status_code == 200:
55
- response_json = response.json()
56
- else:
57
- try:
58
- response_json = response.json()
59
- except requests.JSONDecodeError:
60
- response_json = {"error": "Invalid JSON response"}
61
-
62
- response = GetTaskResponse(data=response_json)
63
-
64
- filtered_response = {
65
- "id": response.data.get("id"),
66
- "name": response.data.get("name"),
67
- "status": response.data.get("status", {}).get("status"),
68
- "due_date": response.data.get("due_date"),
69
- "date_created": response.data.get("date_created"),
70
- "url": response.data.get("url"),
71
- "assignees": [
72
- {"id": assignee.get("id"), "username": assignee.get("username")}
73
- for assignee in response.data.get("assignees", [])
74
- ],
75
- "error": response.data.get("error"),
76
- }
77
-
78
- return filtered_response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
agency_ai_demo/agents/ClickUpAgent/tools/__init__.py DELETED
File without changes
agency_ai_demo/agents/{ClickUpAgent/ClickUpAgent.py → NotionProjectAgent/NotionProjectAgent.py} RENAMED
@@ -1,18 +1,36 @@
1
- from agency_swarm.agents import Agent
2
 
 
 
3
 
4
- class ClickUpAgent(Agent):
5
  def __init__(self):
 
 
 
 
 
 
 
 
 
6
  super().__init__(
7
- name="ClickUpAgent",
8
- description="Project Management Assistant who interacts with ClickUp for managing project tasks",
9
  instructions="./instructions.md",
10
  files_folder="./files",
11
  schemas_folder="./schemas",
12
- tools=[],
13
  tools_folder="./tools",
 
14
  temperature=0.3,
15
  max_prompt_tokens=25000,
 
 
 
 
 
 
16
  )
17
 
18
  def response_validator(self, message):
 
1
+ import os
2
 
3
+ from agency_swarm.agents import Agent
4
+ from .tools.GetTasks import GetTasksTool
5
 
6
+ class NotionProjectAgent(Agent):
7
  def __init__(self):
8
+ # Retrieve the Notion integration secret from the environment
9
+ try:
10
+ integration_secret = os.environ["NOTION_INTEGRATION_SECRET"]
11
+ except KeyError:
12
+ raise EnvironmentError(
13
+ "NOTION_INTEGRATION_SECRET environment variable is not set."
14
+ )
15
+
16
+ # Initialize the parent Agent class with updated parameters
17
  super().__init__(
18
+ name="NotionProjectAgent",
19
+ description="Project Management Assistant who tracks and updates project progress on Notion",
20
  instructions="./instructions.md",
21
  files_folder="./files",
22
  schemas_folder="./schemas",
23
+ tools=[GetTasksTool],
24
  tools_folder="./tools",
25
+ model="gpt-4o",
26
  temperature=0.3,
27
  max_prompt_tokens=25000,
28
+ # api_headers={
29
+ # "notion_tasks.json": {
30
+ # "Authorization": f"Bearer {integration_secret}",
31
+ # "Notion-Version": "2022-06-28",
32
+ # }
33
+ # },
34
  )
35
 
36
  def response_validator(self, message):
agency_ai_demo/agents/NotionProjectAgent/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .NotionProjectAgent import NotionProjectAgent
agency_ai_demo/agents/NotionProjectAgent/instructions.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NotionProjectAgent Instructions
2
+
3
+ Your team uses Notion to manage projects and tasks.
4
+ Users often refer to tasks by name, and sometimes there may not be an exact match, so you should look for the closest ones. If in doubt, provide the user with valid options to choose from or ask for more information if necessary.
5
+
6
+ ## NOTION_STRUCTURE
7
+
8
+ -> Database: The highest level of organization in Notion. Contains all your tasks.
9
+ --> Page: A task in a Notion database, assignable with due dates.
10
+ --> Subpage: A child page of a parent Page, assignable to different people.
11
+
12
+ ## DEFAULT_NOTION_IDS
13
+
14
+ Use these IDs unless specified otherwise
15
+
16
+ - Database ID: 1a88235ee2ff801e8f93d8ab2e14de1d
17
+
18
+ ## DATABASE PROPERTIES
19
+
20
+ - Task Name
21
+ - Status
22
+ - Priority
23
+ - Due Date
24
+ - Assigned To
25
+
26
+ ## WORKFLOWS
27
+
28
+ ### Create a high level WBS
29
+
30
+ When required to create a WBS, you may be prompted with information about the project scope and requirements and/or you may provided with information in a task (page) in the database. Understand the project and create a high level WBS containing 5 to 10 tasks, which cover the project scope for start to end.
agency_ai_demo/agents/NotionProjectAgent/tools/GetTasks.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from agency_swarm.tools import BaseTool
4
+ from pydantic import Field
5
+
6
+ load_dotenv()
7
+
8
+ notion_integration_secret = os.getenv("NOTION_INTEGRATION_SECRET")
9
+ notion_database_id = os.getenv("NOTION_DATABASE_ID")
10
+
11
+
12
+ class GetTasksTool(BaseTool):
13
+ """
14
+ Tool for retrieving tasks from a Notion database.
15
+ This tool allows querying tasks with optional filtering and sorting capabilities
16
+ based on properties like Due Date, Status, Priority, and Task Name.
17
+ """
18
+
19
+ # Add example_field with a default value to satisfy BaseTool validation
20
+ example_field: str = Field(
21
+ default="notion_tasks",
22
+ description="Identifier for this tool. Can be left at its default value."
23
+ )
24
+
25
+ status: str = Field(
26
+ default=None,
27
+ description="Filter tasks by status. Options: Backlog, In Progress, In Review, Testing, Completed."
28
+ )
29
+
30
+ priority: str = Field(
31
+ default=None,
32
+ description="Filter tasks by priority. Options: High, Medium, Low."
33
+ )
34
+
35
+ due_date_before: str = Field(
36
+ default=None,
37
+ description="Filter tasks due before this date (format: YYYY-MM-DD)."
38
+ )
39
+
40
+ due_date_after: str = Field(
41
+ default=None,
42
+ description="Filter tasks due after this date (format: YYYY-MM-DD)."
43
+ )
44
+
45
+ sort_by: str = Field(
46
+ default="Due Date",
47
+ description="Property to sort by. Options: Due Date, Status, Priority, Task Name."
48
+ )
49
+
50
+ sort_direction: str = Field(
51
+ default="ascending",
52
+ description="Sort direction. Options: ascending, descending."
53
+ )
54
+
55
+ def run(self):
56
+ """
57
+ Query a Notion database for tasks with optional filtering and sorting.
58
+
59
+ Returns:
60
+ dict: The JSON response from the Notion API containing the tasks.
61
+ """
62
+ import requests
63
+
64
+ # Use the database ID from the environment variable
65
+ database_id = os.getenv("NOTION_DATABASE_ID")
66
+
67
+ # Set up the API endpoint
68
+ url = f"https://api.notion.com/v1/databases/{database_id}/query"
69
+
70
+ # Set up the headers
71
+ headers = {
72
+ "Authorization": f"Bearer {os.getenv('NOTION_INTEGRATION_SECRET')}",
73
+ "Notion-Version": "2022-06-28",
74
+ "Content-Type": "application/json",
75
+ }
76
+
77
+ # Prepare the request body
78
+ data = {}
79
+
80
+ # Build filter
81
+ filters = []
82
+
83
+ if self.status:
84
+ filters.append({
85
+ "property": "Status",
86
+ "status": {
87
+ "equals": self.status
88
+ }
89
+ })
90
+
91
+ if self.priority:
92
+ filters.append({
93
+ "property": "Priority",
94
+ "select": {
95
+ "equals": self.priority
96
+ }
97
+ })
98
+
99
+ if self.due_date_before:
100
+ filters.append({
101
+ "property": "Due Date",
102
+ "date": {
103
+ "before": self.due_date_before
104
+ }
105
+ })
106
+
107
+ if self.due_date_after:
108
+ filters.append({
109
+ "property": "Due Date",
110
+ "date": {
111
+ "after": self.due_date_after
112
+ }
113
+ })
114
+
115
+ if filters:
116
+ if len(filters) > 1:
117
+ data["filter"] = {
118
+ "and": filters
119
+ }
120
+ else:
121
+ data["filter"] = filters[0]
122
+
123
+ # Add sorting
124
+ if self.sort_by:
125
+ data["sorts"] = [
126
+ {
127
+ "property": self.sort_by,
128
+ "direction": self.sort_direction
129
+ }
130
+ ]
131
+
132
+ # Make the request
133
+ response = requests.post(url, headers=headers, json=data)
134
+
135
+ # Return the JSON response
136
+ return response.json()
agency_ai_demo/agents/ResearchAndReportAgent/ResearchAndReportAgent.py CHANGED
@@ -11,6 +11,7 @@ class ResearchAndReportAgent(Agent):
11
  schemas_folder="./schemas",
12
  tools=[],
13
  tools_folder="./tools",
 
14
  temperature=0.3,
15
  max_prompt_tokens=25000,
16
  )
 
11
  schemas_folder="./schemas",
12
  tools=[],
13
  tools_folder="./tools",
14
+ model="gpt-4o",
15
  temperature=0.3,
16
  max_prompt_tokens=25000,
17
  )
agency_ai_demo/agents/TechnicalProjectManager/TechnicalProjectManager.py CHANGED
@@ -11,6 +11,7 @@ class TechnicalProjectManager(Agent):
11
  schemas_folder="./schemas",
12
  tools=[],
13
  tools_folder="./tools",
 
14
  temperature=0.3,
15
  max_prompt_tokens=25000,
16
  )
 
11
  schemas_folder="./schemas",
12
  tools=[],
13
  tools_folder="./tools",
14
+ model="gpt-4o",
15
  temperature=0.3,
16
  max_prompt_tokens=25000,
17
  )
agency_ai_demo/shared_files/agency_manifesto.md CHANGED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # VRSEN AI: Company Profile
2
+
3
+ ## Company Background and Mission
4
+
5
+ VRSEN AI is a boutique artificial intelligence agency founded by Arsenii Shatokhin. It began as a freelance venture and quickly grew into a dedicated agency with a team of around seven professionals. The company is headquartered in Tbilisi, Georgia, and positions itself as a partner for businesses looking to navigate the AI revolution.
6
+
7
+ VRSEN AI's mission is straightforward: to help businesses make the most of AI by providing personalized tools and solutions that fit each client's unique needs. They strive to be the top choice for businesses seeking custom AI solutions, leveraging state-of-the-art models (especially OpenAI's technology) to drive growth and efficiency. In line with this mission, VRSEN emphasizes a client-centric approach – taking time to understand each business and making AI adoption "easy" through tailored solutions.
8
+
9
+ ## Products and Solutions
10
+
11
+ VRSEN AI offers bespoke AI solutions that often center around generative AI and AI agents. The company's services are packaged into several solution offerings, including:
12
+
13
+ - **AI Support Hub**: A custom AI-powered support system that can instantly answer customer questions and reduce support ticket volume, helping cut customer support costs. This solution is aimed at improving customer service and reducing churn by providing quick, AI-driven support responses.
14
+
15
+ - **ChatGPT Website Widget**: An integrated chatbot widget for websites that uses ChatGPT to engage visitors. It personalizes user interactions and can boost sales by guiding customers or answering their queries in real time.
16
+
17
+ - **ChatGPT for Business**: A tailored deployment of ChatGPT integrated with a company's internal databases and knowledge. This allows organizations to have their own branded AI assistant that can leverage proprietary data for tasks like answering employee questions or assisting with analytics.
18
+
19
+ - **AI Consultation**: Advisory services where VRSEN's experts help clients clarify project requirements and craft a straightforward plan to bring an AI idea to life. This is essentially a consulting engagement to roadmap AI adoption or development strategies.
20
+
21
+ All these offerings are powered by advanced AI models (notably OpenAI's GPT technology) and are customized per client. VRSEN AI highlights that dozens of clients have implemented these solutions (e.g. 52 clients for the Support Hub, 35 for the Website Widget, etc., as indicated on their site).
22
+
23
+ In addition to bespoke solutions, VRSEN is developing a software platform for AI agents. The company refers to this SaaS product as "Agency AI," described as the first platform for deploying reliable AI-driven agencies at scale. Agency AI is built on VRSEN's real-world experience delivering production-ready AI agents, and it aims to let businesses spin up "agents-as-a-service" quickly and efficiently. This platform leverages VRSEN's internal frameworks (such as the open-source Agency Swarm framework) to orchestrate multiple AI agents working collaboratively. By investing in this product, VRSEN is moving beyond one-off projects into a scalable service model, which is a recent strategic development for the company.
24
+
25
+ ## Key Clients and Success Stories
26
+
27
+ VRSEN AI's client base ranges from startups to established enterprises, and the company showcases several high-profile partners and success stories. Notably, VRSEN highlights collaborations with companies like Stripe, Cisco, Product Marketing Alliance, ESM, and Hugo Pfohe. These partnerships suggest that even large tech firms (e.g. Cisco and Stripe) and industry communities (Product Marketing Alliance) have trusted VRSEN for AI solutions.
28
+
29
+ The company's case studies provide insight into the value delivered:
30
+
31
+ - **ESM (Enterprise Strategic Management)**: VRSEN developed a GPT-powered querying tool for ESM's vast demographic datasets. This "GPT-Query" solution transformed data analysis for the client – what used to take hours was reduced to about 5 minutes using AI. The integration of machine learning at ESM was significantly streamlined thanks to VRSEN's partnership.
32
+
33
+ - **Product Marketing Alliance (PMA)**: VRSEN built a tailored web application with AI Q&A capabilities for PMA's online community. This allowed PMA's members to retrieve information from the community's content archives instantly, instead of spending hours on manual searches. The result was a much more efficient information retrieval process for users.
34
+
35
+ - **Hugo Pfohe (Automotive Dealership)**: VRSEN implemented an AI-powered chatbot for an e-commerce platform in the automotive domain (likely for Hugo Pfohe, a car dealership). The chatbot was integrated with a live SQL database of vehicles to offer personalized car recommendations. This drove improvements in sales, user engagement, and lead generation by providing instant, tailored responses to potential car buyers.
36
+
37
+ Client testimonials on VRSEN's site reinforce these successes. For example, the VP of Product Marketing at one client attested that Arsenii and his team's AI solution led to "significant growth and efficiency" in their business. Another client CEO noted VRSEN's hands-on approach and valuable guidance throughout the project. These stories and endorsements illustrate VRSEN's track record in delivering impactful AI solutions across different industries.
38
+
39
+ ## Position in the AI Industry
40
+
41
+ Within the AI industry, VRSEN AI occupies a niche at the intersection of AI consulting and AI product development. As a small, specialized agency, it has carved out a reputation for expertise in AI agents and large language model (LLM) applications. VRSEN explicitly markets itself as being "at the forefront of the AI revolution," providing cutting-edge AI agent solutions to help businesses thrive.
42
+
43
+ One of VRSEN's distinguishing features is its leadership in the emerging field of AI agents (autonomous or semi-autonomous AI programs that can perform tasks or collaborate). The company's founder, Arsenii Shatokhin, is not only an AI consultant but also the creator of Agency Swarm, an open-source agent orchestration framework. Agency Swarm was born from Arsenii's effort to automate his own AI agency's workflows through AI. It allows developers to create a collaborative "swarm" of AI agents with distinct roles (like a virtual team consisting of roles such as CEO agent, developer agent, assistant agent, etc.) working together toward goals.
44
+
45
+ This framework has gained significant popularity in the AI developer community (garnering thousands of stars on GitHub and widespread use), to the point where VRSEN notes that Arsenii is "leading one of the most popular AI Agent Development Frameworks called Agency Swarm."
46
+
47
+ VRSEN's active role in developing such frameworks and sharing knowledge (e.g., Arsenii's YouTube channel with AI agent tutorials and the "Generative AI Podcast" he hosts) has bolstered its position as a thought leader in AI agents. In an industry where many companies are experimenting with generative AI, VRSEN stands out by focusing on reliable, production-ready AI systems and agent-based solutions. The development of its Agency AI platform further cements its innovative stance, as this product is pioneering the deployment of multi-agent AI systems at scale for businesses.
48
+
49
+ In summary, VRSEN AI's position in the industry is that of a nimble innovator – a company small in size but big in specialized expertise – giving it credibility among both clients and the AI developer community.
50
+
51
+ ## Competitive Advantage
52
+
53
+ VRSEN AI's competitive advantage lies in its combination of deep technical expertise and a highly customized, client-focused approach. A few key differentiators include:
54
+
55
+ - **Bespoke AI Solutions**: Unlike one-size-fits-all software vendors, VRSEN delivers custom AI development tailored to each client's needs. The team makes a point of closely understanding the client's business challenges and then crafting AI tools specifically for them. This personalization ensures that the solutions align with the client's workflows and goals, often integrating with the client's own data systems (as seen in their ChatGPT for Business offerings).
56
+
57
+ - **Expertise in AI Agents and LLMs**: VRSEN has specialized know-how in building AI agents and leveraging large language models. Their familiarity with frameworks like OpenAI's API, LangChain, vector databases, and their own Agency Swarm gives them an edge in developing sophisticated AI applications. Few agencies of their size have in-house frameworks with community recognition; this indicates a level of technical maturity and thought leadership.
58
+
59
+ - **Agility of a Small Team**: With a core team (under 10 people) of AI experts, software engineers, and UX designers, VRSEN can be very agile. They iterate quickly and often work hands-on with clients (the founder himself often engages in coding and problem-solving, according to client feedback). This nimbleness allows them to implement cutting-edge AI features faster than larger consultancies might.
60
+
61
+ - **Proven Track Record with Notable Clients**: Despite its size, VRSEN has delivered results for well-known companies (from tech firms to international businesses). These case studies and testimonials serve as social proof of the agency's capability. It demonstrates that VRSEN can compete with bigger players when it comes to delivering real business value through AI.
62
+
63
+ - **Education and Thought Leadership**: VRSEN (and Arsenii Shatokhin personally) actively educates the market via YouTube videos, podcasts, and open-source contributions. This not only markets their expertise but keeps them at the leading edge of AI trends. Clients benefit from this knowledge, as VRSEN's solutions are informed by the latest research and best practices in AI development.
64
+
65
+ In essence, VRSEN's advantage is being both highly innovative and client-friendly. They lower the barrier for businesses to adopt AI by handling the complexity (making it "easy" for clients), all while pushing the envelope in areas like autonomous agents. This combination of service and innovation distinguishes them in the competitive AI services industry.
66
+
67
+ ## Corporate Culture and Values
68
+
69
+ Despite its high-tech focus, VRSEN AI emphasizes a human-centric and collaborative corporate culture. The team is described as a group of "passionate innovators and problem solvers" who thrive on tackling challenging problems in AI. The work environment is collaborative and supportive, where every team member's contribution is recognized and valued. This suggests a flat or egalitarian culture typical of startups, in which open communication and teamwork are encouraged.
70
+
71
+ VRSEN also values efficiency, clear communication, and continual feedback in its operations. Given that the company works with clients globally, the team is remote-friendly and flexible – team members often need to be available across various time zones for meetings or calls. This global remote setup (team members are in different countries) requires trust and strong communication skills internally. The company's job postings explicitly mention the importance of conveying complex technical ideas in simple terms for clients, reflecting a culture that bridges technical excellence with client understanding.
72
+
73
+ Innovation is at the heart of VRSEN's identity, so it's likely that the culture encourages continuous learning and experimentation with new AI technologies. Being a small company, there is also an entrepreneurial spirit – employees often wear multiple hats and have significant ownership of their projects. Overall, VRSEN AI's corporate culture can be summed up as innovative, collaborative, and customer-focused: they are excited about pushing AI boundaries, work closely as a team, and remain aligned with what clients need.
74
+
75
+ ## Technical AI Project Manager Role at VRSEN AI
76
+
77
+ At VRSEN, a Technical AI Project Manager plays a critical role in ensuring the success of AI projects from conception to delivery. This role is a blend of project leadership, technical oversight, and client management. Based on VRSEN's job description for a Technical Project Manager, the responsibilities and expectations include:
78
+
79
+ - **Client Liaison**: Serving as the primary point of contact between clients and VRSEN's development team. The project manager needs to manage client communications proactively, understand client requirements, and keep the clients updated. Essentially, they "bridge the gap" between the esteemed clients and the developers.
80
+
81
+ - **Project Scoping and Planning**: Working closely with clients to scope out AI projects – defining the tasks, deliverables, and timelines for AI agent development engagements. This involves translating business needs into technical requirements and creating clear project plans that the team can execute.
82
+
83
+ - **Project Management and Delivery**: Overseeing the execution of projects to ensure they are delivered on time and meet quality standards. The Technical PM coordinates the development sprints, monitors progress, mitigates risks, and ensures that the AI solutions being built align with client expectations. They are responsible for timely delivery and maintaining high quality in outcomes.
84
+
85
+ - **Technical Guidance and Oversight**: Although this is a management role, it has a strong technical component. The Technical AI Project Manager is expected to engage in technical discussions with clients and provide expert opinions on AI agents and related technologies. They should be comfortable reviewing the team's work, giving constructive feedback to developers, and possibly even diving into code or technical problem-solving when needed. In fact, proficiency in Python (especially backend development) and experience with AI frameworks (OpenAI API, LangChain, Hugging Face, vector databases, etc.) are listed as necessary skills for this role.
86
+
87
+ - **Team Leadership and Feedback**: Providing leadership to the development team by setting expectations, clarifying tasks, and offering feedback. The project manager guides the team through challenges and ensures everyone is aligned on the project goals. They act not as a distant overseer but as a supportive leader who can mentor developers and help troubleshoot issues. Strong organizational skills are needed to juggle multiple projects simultaneously and keep everything on track.
88
+
89
+ - **Flexibility and Availability**: Since VRSEN works with a global client base, the Technical Project Manager should be available for meetings or sales calls across various time zones as required. This implies some non-traditional hours at times and a flexibility in schedule to accommodate international clients.
90
+
91
+ ### Expected Qualifications
92
+
93
+ A Technical AI Project Manager at VRSEN is generally expected to have a background that spans both engineering and project management. The job posting suggests candidates should have around 3-5 years of experience in relevant fields.
94
+
95
+ Key technical qualifications include:
96
+
97
+ - Strong Python skills
98
+ - Firsthand experience building AI applications or agents with modern tools (for example, being familiar with how to use OpenAI's APIs or implement AI agents in frameworks like Agency Swarm or LangChain)
99
+ - Knowledge of backend infrastructure (possibly some DevOps exposure) is also valuable, given the emphasis on reliable deployment of AI solutions
100
+
101
+ On the soft skills side:
102
+
103
+ - Exceptional communication is a must – the ability to clearly explain complex AI concepts to non-technical stakeholders
104
+ - Being highly organized and capable of managing multiple parallel projects is also highlighted
105
+
106
+ Because this role deals with cutting-edge AI projects, a passion for AI and staying updated on new developments would be expected too (even if not explicitly stated, it aligns with VRSEN's innovative culture).
107
+
108
+ In terms of role within the company, the Technical AI Project Manager is pivotal for VRSEN's operations. They ensure that the client's vision is accurately translated for the technical team and that the final AI solution meets the client's business objectives. Essentially, they wear the hats of project planner, tech lead, and client advisor all at once. VRSEN's job description notes that this role is key to "bridging the gap" between clients and developers, highlighting how central the position is in delivering successful projects. Given VRSEN's collaborative ethos, a person in this role would work closely with the founder and other team leads, and they would contribute to refining VRSEN's project delivery processes over time.
109
+
110
+ Overall, a Technical AI Project Manager at VRSEN AI is expected to drive projects to success by blending technical know-how with project leadership and excellent people skills. They play a major part in upholding VRSEN's reputation for quality and innovation in bespoke AI solutions, while also fostering a positive experience for clients through professional communication and reliable delivery.
111
+
112
+ ## Sources
113
+
114
+ - VRSEN AI – Official Website (About Us) [VRSEN.AI](https://vrsen.ai)
115
+ - VRSEN AI – Official Website (Solutions) [VRSEN.AI](https://vrsen.ai)
116
+ - VRSEN AI – Official Website (Case Studies) [VRSEN.AI](https://vrsen.ai)
117
+ - VRSEN AI – Official Website (Home page/client testimonials) [VRSEN.AI](https://vrsen.ai)
118
+ - VRSEN AI – Agents-as-a-Service Careers (Technical Project Manager role) [AGENTS.VRSEN.AI](https://agents.vrsen.ai)
119
+ - VRSEN AI – Agents-as-a-Service Careers (Technical AI Product Manager role, describing Agency AI) [AGENTS.VRSEN.AI](https://agents.vrsen.ai)
120
+ - Agency Swarm – Open Source Framework by VRSEN (Project Introduction) [VRSEN.GITHUB.IO](https://vrsen.github.io)
121
+ - Search result snippet referencing VRSEN founder and Agency Swarm [AGENTS.VRSEN.AI](https://agents.vrsen.ai) (confirmation of Agency Swarm's popularity and founder's leadership)
122
+ - Client testimonial from VRSEN site (ESM partnership) [VRSEN.AI](https://vrsen.ai)
agency_ai_demo/utils/ensure_serializable.py DELETED
@@ -1,30 +0,0 @@
1
- import json
2
-
3
-
4
- # Add a utility function to ensure all values are JSON serializable
5
- def ensure_serializable(obj):
6
- """
7
- Recursively ensure that an object and all its contents are JSON serializable.
8
- Handles Pydantic Field objects by extracting their values.
9
- """
10
- # Handle None
11
- if obj is None:
12
- return None
13
-
14
- # Check if it's a Field object (has certain common Field attributes)
15
- if hasattr(obj, "default") and hasattr(obj, "description"):
16
- # Return the default value or None
17
- if obj.default is not None:
18
- return obj.default
19
- return None
20
-
21
- # Handle dictionaries
22
- if isinstance(obj, dict):
23
- return {k: ensure_serializable(v) for k, v in obj.items()}
24
-
25
- # Handle lists
26
- if isinstance(obj, list):
27
- return [ensure_serializable(item) for item in obj]
28
-
29
- # Return other objects as is
30
- return obj
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
agency_ai_demo/utils/tool_wrapper.py DELETED
@@ -1,489 +0,0 @@
1
- import inspect
2
- from typing import Any, Callable, Dict, List, Optional, Type, Union, get_type_hints
3
- from pydantic import BaseModel, Field, create_model
4
- import functools
5
- import logging
6
-
7
- # Set up logging
8
- logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
9
- logger = logging.getLogger(__name__)
10
-
11
- # Import the necessary classes
12
- try:
13
- # For LangChain
14
- from langchain.tools import BaseTool as LangChainBaseTool
15
- from langchain.tools.base import ToolException
16
-
17
- LANGCHAIN_AVAILABLE = True
18
- except ImportError:
19
- logger.warning("LangChain not available. Providing mock classes for documentation.")
20
- LANGCHAIN_AVAILABLE = False
21
-
22
- class LangChainBaseTool:
23
- """Mock LangChain BaseTool for documentation"""
24
-
25
- name = "mock_tool"
26
- description = "Mock tool for documentation"
27
-
28
- def _run(self, *args, **kwargs):
29
- raise NotImplementedError("This is a mock class")
30
-
31
- class ToolException(Exception):
32
- """Mock ToolException for documentation"""
33
-
34
- pass
35
-
36
-
37
- try:
38
- # For agency-swarm
39
- from agency_swarm.tools.BaseTool import BaseTool as AgencySwarmBaseTool
40
-
41
- AGENCY_SWARM_AVAILABLE = True
42
- except ImportError:
43
- logger.warning(
44
- "Agency Swarm not available. Providing mock classes for documentation."
45
- )
46
- AGENCY_SWARM_AVAILABLE = False
47
-
48
- class AgencySwarmBaseTool:
49
- """Mock Agency Swarm BaseTool for documentation"""
50
-
51
- class ToolConfig:
52
- strict = True
53
- one_call_at_a_time = True
54
- output_as_result = False
55
- async_mode = None
56
-
57
- def run(self):
58
- raise NotImplementedError("This is a mock class")
59
-
60
-
61
- def convert_langchain_tool(
62
- lc_tool: Union[Type[LangChainBaseTool], Callable]
63
- ) -> Type[AgencySwarmBaseTool]:
64
- """
65
- Converts a LangChain tool (class-based or function-based) to an agency-swarm compatible tool.
66
-
67
- Args:
68
- lc_tool: Either a LangChain BaseTool class or a function decorated with @tool
69
-
70
- Returns:
71
- A new class that inherits from agency_swarm.tools.BaseTool
72
-
73
- Example:
74
- ```python
75
- from langchain.tools import BaseTool
76
-
77
- class MyLangChainTool(BaseTool):
78
- name = "my_tool"
79
- description = "A sample tool"
80
-
81
- def _run(self, param1: str, param2: int) -> str:
82
- return f"Processed {param1} and {param2}"
83
-
84
- # Convert to agency-swarm tool
85
- MyAgencySwarmTool = convert_langchain_tool(MyLangChainTool)
86
-
87
- # Use with agency-swarm
88
- agent = Agent(
89
- name="my_agent",
90
- tools=[MyAgencySwarmTool], # Pass the class, not an instance
91
- )
92
- ```
93
- """
94
- if not LANGCHAIN_AVAILABLE or not AGENCY_SWARM_AVAILABLE:
95
- raise ImportError(
96
- "Both LangChain and Agency Swarm must be available to convert tools."
97
- )
98
-
99
- # Check if input is a function (likely decorated with @tool)
100
- if callable(lc_tool) and not inspect.isclass(lc_tool):
101
- return _convert_function_tool(lc_tool)
102
-
103
- # If it's a class, ensure it's a subclass of LangChain's BaseTool
104
- if not issubclass(lc_tool, LangChainBaseTool):
105
- raise TypeError(f"Expected a LangChain BaseTool subclass, got {lc_tool}")
106
-
107
- # Extract metadata from the LangChain tool
108
- tool_name = getattr(lc_tool, "name", lc_tool.__name__)
109
- if tool_name is None:
110
- tool_name = lc_tool.__name__
111
-
112
- tool_description = getattr(lc_tool, "description", lc_tool.__doc__ or "")
113
-
114
- # Get the schema from the LangChain tool, if any
115
- schema_cls = getattr(lc_tool, "args_schema", None)
116
-
117
- # Create a new class that inherits from agency_swarm's BaseTool
118
- class ConvertedTool(AgencySwarmBaseTool):
119
- """
120
- Agency Swarm tool converted from LangChain tool.
121
- """
122
-
123
- # Set up the ToolConfig inner class
124
- class ToolConfig:
125
- strict: bool = True
126
- one_call_at_a_time: bool = True
127
- output_as_result: bool = False
128
- async_mode: None = None
129
-
130
- def run(self) -> Dict:
131
- """
132
- Execute the tool with runtime validation.
133
- """
134
- # Validate required fields based on the original schema
135
- validation_errors = self._validate_required_fields()
136
- if validation_errors:
137
- return {"success": False, "error": validation_errors}
138
-
139
- # Prepare args for the original tool's _run method
140
- kwargs = {
141
- field: getattr(self, field)
142
- for field in self._get_field_names()
143
- if hasattr(self, field) and getattr(self, field) is not None
144
- }
145
-
146
- try:
147
- # Call the original LangChain tool's _run method
148
- instance = lc_tool()
149
- result = instance._run(**kwargs)
150
-
151
- # If result is already a dict, return it
152
- if isinstance(result, dict):
153
- return result
154
-
155
- # Otherwise, wrap it in a success response
156
- return {"success": True, "result": result}
157
- except ToolException as e:
158
- # Convert LangChain's exceptions to structured errors
159
- return {"success": False, "error": str(e)}
160
- except Exception as e:
161
- # Handle unexpected errors
162
- logger.exception(f"Tool execution failed: {str(e)}")
163
- return {"success": False, "error": f"Tool execution failed: {str(e)}"}
164
-
165
- def _validate_required_fields(self) -> Optional[str]:
166
- """
167
- Validate required fields at runtime.
168
- """
169
- if not schema_cls:
170
- return None
171
-
172
- # Try different approaches to identify required fields based on Pydantic version
173
- missing_fields = []
174
- try:
175
- # Attempt to get field info based on Pydantic v1 style
176
- if hasattr(schema_cls, "__fields__"):
177
- for field_name, field_info in schema_cls.__fields__.items():
178
- if (
179
- field_info.required
180
- and getattr(self, field_name, None) is None
181
- ):
182
- missing_fields.append(field_name)
183
- # Try Pydantic v2 style
184
- elif hasattr(schema_cls, "model_fields"):
185
- for field_name, field_info in schema_cls.model_fields.items():
186
- if (
187
- field_info.is_required()
188
- and getattr(self, field_name, None) is None
189
- ):
190
- missing_fields.append(field_name)
191
- # Fallback to checking for ... (Ellipsis) in class variables
192
- else:
193
- for field_name in schema_cls.__annotations__:
194
- class_var = getattr(schema_cls, field_name, None)
195
- if class_var is ... and getattr(self, field_name, None) is None:
196
- missing_fields.append(field_name)
197
- except Exception as e:
198
- logger.warning(f"Error during field validation: {e}")
199
- # If all else fails, just check for None values
200
- for field_name in schema_cls.__annotations__:
201
- if getattr(self, field_name, None) is None:
202
- missing_fields.append(field_name)
203
-
204
- if missing_fields:
205
- return f"Missing required fields: {', '.join(missing_fields)}"
206
-
207
- return None
208
-
209
- def _get_field_names(self) -> List[str]:
210
- """
211
- Get all field names from the schema.
212
- """
213
- if schema_cls:
214
- return list(schema_cls.__annotations__.keys())
215
- return []
216
-
217
- # Set the tool name and description
218
- # Use safe string operations for the class name
219
- safe_name = "".join(
220
- c for c in tool_name.replace("-", "_") if c.isalnum() or c == "_"
221
- )
222
- if safe_name:
223
- class_name = safe_name[0].upper() + safe_name[1:] + "Converted"
224
- else:
225
- class_name = "ConvertedTool"
226
-
227
- ConvertedTool.__name__ = class_name
228
- ConvertedTool.__doc__ = tool_description
229
-
230
- # Add fields to the converted tool
231
- if schema_cls:
232
- _add_fields_from_schema(ConvertedTool, schema_cls)
233
- else:
234
- # If no schema_cls, try to infer from _run signature
235
- _add_fields_from_run_method(ConvertedTool, lc_tool)
236
-
237
- return ConvertedTool
238
-
239
-
240
- def _add_fields_from_schema(
241
- target_cls: Type[AgencySwarmBaseTool], schema_cls: Type[BaseModel]
242
- ) -> None:
243
- """
244
- Add fields from a Pydantic schema to the target class.
245
-
246
- Args:
247
- target_cls: The class to add fields to
248
- schema_cls: The Pydantic schema class to extract fields from
249
- """
250
- target_cls.__annotations__ = {}
251
-
252
- # Extract field descriptions based on Pydantic version
253
- field_descriptions = {}
254
- try:
255
- if hasattr(schema_cls, "__fields__"):
256
- # Pydantic v1
257
- for name, field in schema_cls.__fields__.items():
258
- field_descriptions[name] = field.field_info.description
259
- elif hasattr(schema_cls, "model_fields"):
260
- # Pydantic v2
261
- for name, field in schema_cls.model_fields.items():
262
- field_descriptions[name] = field.description
263
- except Exception as e:
264
- logger.warning(f"Error extracting field descriptions: {e}")
265
-
266
- # Add each field to the target class
267
- for field_name, field_type in schema_cls.__annotations__.items():
268
- description = field_descriptions.get(field_name, f"Parameter: {field_name}")
269
-
270
- # Make the type Optional if it's not already
271
- if not hasattr(field_type, "__origin__") or field_type.__origin__ is not Union:
272
- field_type = Optional[field_type]
273
-
274
- # Add the field to the target class with None as default
275
- setattr(target_cls, field_name, Field(None, description=description))
276
- target_cls.__annotations__[field_name] = field_type
277
-
278
-
279
- def _add_fields_from_run_method(
280
- target_cls: Type[AgencySwarmBaseTool], tool_cls: Type[LangChainBaseTool]
281
- ) -> None:
282
- """
283
- Add fields inferred from the _run method signature.
284
-
285
- Args:
286
- target_cls: The class to add fields to
287
- tool_cls: The LangChain tool class to extract fields from
288
- """
289
- target_cls.__annotations__ = {}
290
-
291
- # Get the signature of the _run method
292
- try:
293
- # First try to get the _run method from the class
294
- if hasattr(tool_cls, "_run"):
295
- run_method = tool_cls._run
296
- # If that fails, try to get it from an instance
297
- else:
298
- instance = tool_cls()
299
- run_method = instance._run
300
-
301
- signature = inspect.signature(run_method)
302
-
303
- # Skip 'self' parameter if present
304
- parameters = list(signature.parameters.items())
305
- if parameters and parameters[0][0] == "self":
306
- parameters = parameters[1:]
307
-
308
- for param_name, param in parameters:
309
- # Skip *args and **kwargs
310
- if param.kind in (
311
- inspect.Parameter.VAR_POSITIONAL,
312
- inspect.Parameter.VAR_KEYWORD,
313
- ):
314
- continue
315
-
316
- # Get the type annotation if available
317
- param_type = (
318
- param.annotation if param.annotation != inspect.Parameter.empty else Any
319
- )
320
-
321
- # Make the type Optional
322
- optional_type = Optional[param_type]
323
-
324
- # Add the field to the target class
325
- setattr(
326
- target_cls,
327
- param_name,
328
- Field(None, description=f"Parameter: {param_name}"),
329
- )
330
- target_cls.__annotations__[param_name] = optional_type
331
-
332
- except Exception as e:
333
- logger.warning(f"Error extracting fields from _run method: {e}")
334
-
335
-
336
- def _convert_function_tool(tool_func: Callable) -> Type[AgencySwarmBaseTool]:
337
- """
338
- Convert a function-based tool (decorated with @tool) to an agency-swarm compatible tool.
339
-
340
- Args:
341
- tool_func: A function decorated with @tool
342
-
343
- Returns:
344
- A new class that inherits from agency_swarm.tools.BaseTool
345
- """
346
- # Extract metadata from the function
347
- # Handle case where tool_func might be a StructuredTool instead of a direct function
348
- if hasattr(tool_func, "name"):
349
- tool_name = tool_func.name
350
- else:
351
- tool_name = getattr(tool_func, "__name__", "function_tool")
352
-
353
- tool_description = getattr(
354
- tool_func, "description", getattr(tool_func, "__doc__", "") or ""
355
- )
356
-
357
- # Get the signature of the underlying function
358
- if hasattr(tool_func, "_run"):
359
- func_to_inspect = tool_func._run
360
- elif hasattr(tool_func, "func"):
361
- func_to_inspect = tool_func.func
362
- else:
363
- func_to_inspect = tool_func
364
-
365
- signature = inspect.signature(func_to_inspect)
366
-
367
- # Create a new class that inherits from agency_swarm's BaseTool
368
- class ConvertedFunctionTool(AgencySwarmBaseTool):
369
- """
370
- Agency Swarm tool converted from a LangChain function tool.
371
- """
372
-
373
- # Set up the ToolConfig inner class
374
- class ToolConfig:
375
- strict: bool = True
376
- one_call_at_a_time: bool = True
377
- output_as_result: bool = False
378
- async_mode: None = None
379
-
380
- def run(self) -> Dict:
381
- """
382
- Execute the tool with runtime validation.
383
- """
384
- # Prepare args for the original function
385
- kwargs = {
386
- param_name: getattr(self, param_name, None)
387
- for param_name in signature.parameters
388
- if hasattr(self, param_name) and getattr(self, param_name) is not None
389
- }
390
-
391
- # Validate required parameters
392
- missing_params = []
393
- for param_name, param in signature.parameters.items():
394
- if (
395
- param.default == inspect.Parameter.empty # No default value
396
- and kwargs.get(param_name) is None # Not provided
397
- ):
398
- missing_params.append(param_name)
399
-
400
- if missing_params:
401
- return {
402
- "success": False,
403
- "error": f"Missing required parameters: {', '.join(missing_params)}",
404
- }
405
-
406
- try:
407
- # Call the original function or tool
408
- if hasattr(tool_func, "_run"):
409
- result = tool_func._run(**kwargs)
410
- elif hasattr(tool_func, "__call__"):
411
- result = tool_func(**kwargs)
412
- else:
413
- result = func_to_inspect(**kwargs)
414
-
415
- # If result is already a dict, return it
416
- if isinstance(result, dict):
417
- return result
418
-
419
- # Otherwise, wrap it in a success response
420
- return {"success": True, "result": result}
421
- except Exception as e:
422
- # Handle errors
423
- logger.exception(f"Tool execution failed: {str(e)}")
424
- return {"success": False, "error": f"Tool execution failed: {str(e)}"}
425
-
426
- # Set the tool name and description
427
- # Use safe string operations for the class name
428
- safe_name = "".join(
429
- c for c in tool_name.replace("-", "_") if c.isalnum() or c == "_"
430
- )
431
- if safe_name:
432
- class_name = safe_name[0].upper() + safe_name[1:] + "Tool"
433
- else:
434
- class_name = "ConvertedFunctionTool"
435
-
436
- ConvertedFunctionTool.__name__ = class_name
437
- ConvertedFunctionTool.__doc__ = tool_description
438
-
439
- # Set up annotations dictionary
440
- ConvertedFunctionTool.__annotations__ = {}
441
-
442
- # Add fields based on function parameters
443
- for param_name, param in signature.parameters.items():
444
- # Get the type annotation if available
445
- param_type = (
446
- param.annotation if param.annotation != inspect.Parameter.empty else Any
447
- )
448
-
449
- # Make it Optional
450
- optional_type = Optional[param_type]
451
-
452
- # Get description from docstring if available
453
- param_description = f"Parameter: {param_name}"
454
-
455
- # Add the field to the class
456
- setattr(
457
- ConvertedFunctionTool,
458
- param_name,
459
- Field(None, description=param_description),
460
- )
461
- ConvertedFunctionTool.__annotations__[param_name] = optional_type
462
-
463
- return ConvertedFunctionTool
464
-
465
-
466
- # Batch conversion helper
467
- def convert_langchain_tools(
468
- lc_tools: List[Union[Type[LangChainBaseTool], Callable]]
469
- ) -> List[Type[AgencySwarmBaseTool]]:
470
- """
471
- Convert multiple LangChain tools to agency-swarm compatible tools.
472
-
473
- Args:
474
- lc_tools: A list of LangChain BaseTool classes or functions decorated with @tool
475
-
476
- Returns:
477
- A list of converted agency-swarm tool classes
478
- """
479
- converted_tools = []
480
- for tool in lc_tools:
481
- try:
482
- converted_tool = convert_langchain_tool(tool)
483
- converted_tools.append(converted_tool)
484
- except Exception as e:
485
- logger.error(
486
- f"Failed to convert tool {getattr(tool, 'name', getattr(tool, '__name__', 'unknown'))}: {e}"
487
- )
488
-
489
- return converted_tools