Spaces:
Running
Running
agent-flow
/
src
/backend
/base
/langflow
/initial_setup
/starter_projects
/Sequential Tasks Agents .json
{ | |
"data": { | |
"edges": [ | |
{ | |
"animated": false, | |
"className": "", | |
"data": { | |
"sourceHandle": { | |
"dataType": "Prompt", | |
"id": "Prompt-rPwbg", | |
"name": "prompt", | |
"output_types": [ | |
"Message" | |
] | |
}, | |
"targetHandle": { | |
"fieldName": "system_prompt", | |
"id": "Agent-rH74C", | |
"inputTypes": [ | |
"Message" | |
], | |
"type": "str" | |
} | |
}, | |
"id": "reactflow__edge-Prompt-rPwbg{œdataTypeœ:œPromptœ,œidœ:œPrompt-rPwbgœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-Agent-rH74C{œfieldNameœ:œsystem_promptœ,œidœ:œAgent-rH74Cœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", | |
"source": "Prompt-rPwbg", | |
"sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-rPwbgœ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}", | |
"target": "Agent-rH74C", | |
"targetHandle": "{œfieldNameœ: œsystem_promptœ, œidœ: œAgent-rH74Cœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" | |
}, | |
{ | |
"animated": false, | |
"className": "", | |
"data": { | |
"sourceHandle": { | |
"dataType": "Prompt", | |
"id": "Prompt-DGXf4", | |
"name": "prompt", | |
"output_types": [ | |
"Message" | |
] | |
}, | |
"targetHandle": { | |
"fieldName": "system_prompt", | |
"id": "Agent-vIPAK", | |
"inputTypes": [ | |
"Message" | |
], | |
"type": "str" | |
} | |
}, | |
"id": "reactflow__edge-Prompt-DGXf4{œdataTypeœ:œPromptœ,œidœ:œPrompt-DGXf4œ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-Agent-vIPAK{œfieldNameœ:œsystem_promptœ,œidœ:œAgent-vIPAKœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", | |
"source": "Prompt-DGXf4", | |
"sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-DGXf4œ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}", | |
"target": "Agent-vIPAK", | |
"targetHandle": "{œfieldNameœ: œsystem_promptœ, œidœ: œAgent-vIPAKœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" | |
}, | |
{ | |
"animated": false, | |
"className": "", | |
"data": { | |
"sourceHandle": { | |
"dataType": "Agent", | |
"id": "Agent-rH74C", | |
"name": "response", | |
"output_types": [ | |
"Message" | |
] | |
}, | |
"targetHandle": { | |
"fieldName": "input_value", | |
"id": "ChatOutput-oAzS1", | |
"inputTypes": [ | |
"Message" | |
], | |
"type": "str" | |
} | |
}, | |
"id": "reactflow__edge-Agent-rH74C{œdataTypeœ:œAgentœ,œidœ:œAgent-rH74Cœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-oAzS1{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-oAzS1œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", | |
"source": "Agent-rH74C", | |
"sourceHandle": "{œdataTypeœ: œAgentœ, œidœ: œAgent-rH74Cœ, œnameœ: œresponseœ, œoutput_typesœ: [œMessageœ]}", | |
"target": "ChatOutput-oAzS1", | |
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-oAzS1œ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" | |
}, | |
{ | |
"animated": false, | |
"className": "", | |
"data": { | |
"sourceHandle": { | |
"dataType": "CalculatorTool", | |
"id": "CalculatorTool-xo5ux", | |
"name": "api_build_tool", | |
"output_types": [ | |
"Tool" | |
] | |
}, | |
"targetHandle": { | |
"fieldName": "tools", | |
"id": "Agent-rH74C", | |
"inputTypes": [ | |
"Tool", | |
"BaseTool", | |
"StructuredTool" | |
], | |
"type": "other" | |
} | |
}, | |
"id": "reactflow__edge-CalculatorTool-xo5ux{œdataTypeœ:œCalculatorToolœ,œidœ:œCalculatorTool-xo5uxœ,œnameœ:œapi_build_toolœ,œoutput_typesœ:[œToolœ]}-Agent-rH74C{œfieldNameœ:œtoolsœ,œidœ:œAgent-rH74Cœ,œinputTypesœ:[œToolœ,œBaseToolœ,œStructuredToolœ],œtypeœ:œotherœ}", | |
"source": "CalculatorTool-xo5ux", | |
"sourceHandle": "{œdataTypeœ: œCalculatorToolœ, œidœ: œCalculatorTool-xo5uxœ, œnameœ: œapi_build_toolœ, œoutput_typesœ: [œToolœ]}", | |
"target": "Agent-rH74C", | |
"targetHandle": "{œfieldNameœ: œtoolsœ, œidœ: œAgent-rH74Cœ, œinputTypesœ: [œToolœ, œBaseToolœ, œStructuredToolœ], œtypeœ: œotherœ}" | |
}, | |
{ | |
"animated": false, | |
"className": "", | |
"data": { | |
"sourceHandle": { | |
"dataType": "YahooFinanceTool", | |
"id": "YahooFinanceTool-YmOKx", | |
"name": "api_build_tool", | |
"output_types": [ | |
"Tool" | |
] | |
}, | |
"targetHandle": { | |
"fieldName": "tools", | |
"id": "Agent-vIPAK", | |
"inputTypes": [ | |
"Tool", | |
"BaseTool", | |
"StructuredTool" | |
], | |
"type": "other" | |
} | |
}, | |
"id": "reactflow__edge-YahooFinanceTool-YmOKx{œdataTypeœ:œYahooFinanceToolœ,œidœ:œYahooFinanceTool-YmOKxœ,œnameœ:œapi_build_toolœ,œoutput_typesœ:[œToolœ]}-Agent-vIPAK{œfieldNameœ:œtoolsœ,œidœ:œAgent-vIPAKœ,œinputTypesœ:[œToolœ,œBaseToolœ,œStructuredToolœ],œtypeœ:œotherœ}", | |
"source": "YahooFinanceTool-YmOKx", | |
"sourceHandle": "{œdataTypeœ: œYahooFinanceToolœ, œidœ: œYahooFinanceTool-YmOKxœ, œnameœ: œapi_build_toolœ, œoutput_typesœ: [œToolœ]}", | |
"target": "Agent-vIPAK", | |
"targetHandle": "{œfieldNameœ: œtoolsœ, œidœ: œAgent-vIPAKœ, œinputTypesœ: [œToolœ, œBaseToolœ, œStructuredToolœ], œtypeœ: œotherœ}" | |
}, | |
{ | |
"animated": false, | |
"className": "", | |
"data": { | |
"sourceHandle": { | |
"dataType": "Agent", | |
"id": "Agent-vIPAK", | |
"name": "response", | |
"output_types": [ | |
"Message" | |
] | |
}, | |
"targetHandle": { | |
"fieldName": "finance_agent_output", | |
"id": "Prompt-rPwbg", | |
"inputTypes": [ | |
"Message", | |
"Text" | |
], | |
"type": "str" | |
} | |
}, | |
"id": "reactflow__edge-Agent-vIPAK{œdataTypeœ:œAgentœ,œidœ:œAgent-vIPAKœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-Prompt-rPwbg{œfieldNameœ:œfinance_agent_outputœ,œidœ:œPrompt-rPwbgœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", | |
"source": "Agent-vIPAK", | |
"sourceHandle": "{œdataTypeœ: œAgentœ, œidœ: œAgent-vIPAKœ, œnameœ: œresponseœ, œoutput_typesœ: [œMessageœ]}", | |
"target": "Prompt-rPwbg", | |
"targetHandle": "{œfieldNameœ: œfinance_agent_outputœ, œidœ: œPrompt-rPwbgœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" | |
}, | |
{ | |
"animated": false, | |
"className": "", | |
"data": { | |
"sourceHandle": { | |
"dataType": "ChatInput", | |
"id": "ChatInput-3mEtf", | |
"name": "message", | |
"output_types": [ | |
"Message" | |
] | |
}, | |
"targetHandle": { | |
"fieldName": "input_value", | |
"id": "Agent-uaR2o", | |
"inputTypes": [ | |
"Message" | |
], | |
"type": "str" | |
} | |
}, | |
"id": "reactflow__edge-ChatInput-3mEtf{œdataTypeœ:œChatInputœ,œidœ:œChatInput-3mEtfœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Agent-uaR2o{œfieldNameœ:œinput_valueœ,œidœ:œAgent-uaR2oœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", | |
"source": "ChatInput-3mEtf", | |
"sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-3mEtfœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", | |
"target": "Agent-uaR2o", | |
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œAgent-uaR2oœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" | |
}, | |
{ | |
"animated": false, | |
"className": "", | |
"data": { | |
"sourceHandle": { | |
"dataType": "Prompt", | |
"id": "Prompt-BS8ii", | |
"name": "prompt", | |
"output_types": [ | |
"Message" | |
] | |
}, | |
"targetHandle": { | |
"fieldName": "system_prompt", | |
"id": "Agent-uaR2o", | |
"inputTypes": [ | |
"Message" | |
], | |
"type": "str" | |
} | |
}, | |
"id": "reactflow__edge-Prompt-BS8ii{œdataTypeœ:œPromptœ,œidœ:œPrompt-BS8iiœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-Agent-uaR2o{œfieldNameœ:œsystem_promptœ,œidœ:œAgent-uaR2oœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", | |
"source": "Prompt-BS8ii", | |
"sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-BS8iiœ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}", | |
"target": "Agent-uaR2o", | |
"targetHandle": "{œfieldNameœ: œsystem_promptœ, œidœ: œAgent-uaR2oœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" | |
}, | |
{ | |
"animated": false, | |
"className": "", | |
"data": { | |
"sourceHandle": { | |
"dataType": "TavilyAISearch", | |
"id": "TavilyAISearch-YfG8u", | |
"name": "api_build_tool", | |
"output_types": [ | |
"Tool" | |
] | |
}, | |
"targetHandle": { | |
"fieldName": "tools", | |
"id": "Agent-uaR2o", | |
"inputTypes": [ | |
"Tool", | |
"BaseTool", | |
"StructuredTool" | |
], | |
"type": "other" | |
} | |
}, | |
"id": "reactflow__edge-TavilyAISearch-YfG8u{œdataTypeœ:œTavilyAISearchœ,œidœ:œTavilyAISearch-YfG8uœ,œnameœ:œapi_build_toolœ,œoutput_typesœ:[œToolœ]}-Agent-uaR2o{œfieldNameœ:œtoolsœ,œidœ:œAgent-uaR2oœ,œinputTypesœ:[œToolœ,œBaseToolœ,œStructuredToolœ],œtypeœ:œotherœ}", | |
"source": "TavilyAISearch-YfG8u", | |
"sourceHandle": "{œdataTypeœ: œTavilyAISearchœ, œidœ: œTavilyAISearch-YfG8uœ, œnameœ: œapi_build_toolœ, œoutput_typesœ: [œToolœ]}", | |
"target": "Agent-uaR2o", | |
"targetHandle": "{œfieldNameœ: œtoolsœ, œidœ: œAgent-uaR2oœ, œinputTypesœ: [œToolœ, œBaseToolœ, œStructuredToolœ], œtypeœ: œotherœ}" | |
}, | |
{ | |
"animated": false, | |
"className": "", | |
"data": { | |
"sourceHandle": { | |
"dataType": "Agent", | |
"id": "Agent-uaR2o", | |
"name": "response", | |
"output_types": [ | |
"Message" | |
] | |
}, | |
"targetHandle": { | |
"fieldName": "input_value", | |
"id": "Agent-vIPAK", | |
"inputTypes": [ | |
"Message" | |
], | |
"type": "str" | |
} | |
}, | |
"id": "reactflow__edge-Agent-uaR2o{œdataTypeœ:œAgentœ,œidœ:œAgent-uaR2oœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-Agent-vIPAK{œfieldNameœ:œinput_valueœ,œidœ:œAgent-vIPAKœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", | |
"source": "Agent-uaR2o", | |
"sourceHandle": "{œdataTypeœ: œAgentœ, œidœ: œAgent-uaR2oœ, œnameœ: œresponseœ, œoutput_typesœ: [œMessageœ]}", | |
"target": "Agent-vIPAK", | |
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œAgent-vIPAKœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" | |
}, | |
{ | |
"animated": false, | |
"className": "", | |
"data": { | |
"sourceHandle": { | |
"dataType": "Agent", | |
"id": "Agent-uaR2o", | |
"name": "response", | |
"output_types": [ | |
"Message" | |
] | |
}, | |
"targetHandle": { | |
"fieldName": "research_agent_output", | |
"id": "Prompt-rPwbg", | |
"inputTypes": [ | |
"Message", | |
"Text" | |
], | |
"type": "str" | |
} | |
}, | |
"id": "reactflow__edge-Agent-uaR2o{œdataTypeœ:œAgentœ,œidœ:œAgent-uaR2oœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-Prompt-rPwbg{œfieldNameœ:œresearch_agent_outputœ,œidœ:œPrompt-rPwbgœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", | |
"source": "Agent-uaR2o", | |
"sourceHandle": "{œdataTypeœ: œAgentœ, œidœ: œAgent-uaR2oœ, œnameœ: œresponseœ, œoutput_typesœ: [œMessageœ]}", | |
"target": "Prompt-rPwbg", | |
"targetHandle": "{œfieldNameœ: œresearch_agent_outputœ, œidœ: œPrompt-rPwbgœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" | |
} | |
], | |
"nodes": [ | |
{ | |
"data": { | |
"description": "Display a chat message in the Playground.", | |
"display_name": "Chat Output", | |
"id": "ChatOutput-oAzS1", | |
"node": { | |
"base_classes": [ | |
"Message" | |
], | |
"beta": false, | |
"conditional_paths": [], | |
"custom_fields": {}, | |
"description": "Display a chat message in the Playground.", | |
"display_name": "Chat Output", | |
"documentation": "", | |
"edited": false, | |
"field_order": [ | |
"input_value", | |
"should_store_message", | |
"sender", | |
"sender_name", | |
"session_id", | |
"data_template", | |
"background_color", | |
"chat_icon", | |
"text_color" | |
], | |
"frozen": false, | |
"icon": "MessagesSquare", | |
"legacy": false, | |
"lf_version": "1.0.19.post2", | |
"metadata": {}, | |
"output_types": [], | |
"outputs": [ | |
{ | |
"cache": true, | |
"display_name": "Message", | |
"method": "message_response", | |
"name": "message", | |
"selected": "Message", | |
"types": [ | |
"Message" | |
], | |
"value": "__UNDEFINED__" | |
} | |
], | |
"pinned": false, | |
"template": { | |
"_type": "Component", | |
"background_color": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Background Color", | |
"dynamic": false, | |
"info": "The background color of the icon.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "background_color", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"chat_icon": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Icon", | |
"dynamic": false, | |
"info": "The icon of the message.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "chat_icon", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"code": { | |
"advanced": true, | |
"dynamic": true, | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "code", | |
"password": false, | |
"placeholder": "", | |
"required": true, | |
"show": true, | |
"title_case": false, | |
"type": "code", | |
"value": "from langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MessageTextInput, Output\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n\n inputs = [\n MessageInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n source_dict[\"source\"] = source\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n message = self.input_value if isinstance(self.input_value, Message) else Message(text=self.input_value)\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" | |
}, | |
"data_template": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Data Template", | |
"dynamic": false, | |
"info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "data_template", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "{text}" | |
}, | |
"input_value": { | |
"_input_type": "MessageInput", | |
"advanced": false, | |
"display_name": "Text", | |
"dynamic": false, | |
"info": "Message to be passed as output.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "input_value", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"sender": { | |
"_input_type": "DropdownInput", | |
"advanced": true, | |
"combobox": false, | |
"display_name": "Sender Type", | |
"dynamic": false, | |
"info": "Type of sender.", | |
"name": "sender", | |
"options": [ | |
"Machine", | |
"User" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "Machine" | |
}, | |
"sender_name": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Sender Name", | |
"dynamic": false, | |
"info": "Name of the sender.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "sender_name", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "AI" | |
}, | |
"session_id": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Session ID", | |
"dynamic": false, | |
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "session_id", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"should_store_message": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Store Messages", | |
"dynamic": false, | |
"info": "Store the message in the history.", | |
"list": false, | |
"name": "should_store_message", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
}, | |
"text_color": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Text Color", | |
"dynamic": false, | |
"info": "The text color of the name", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "text_color", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
} | |
}, | |
"tool_mode": false | |
}, | |
"type": "ChatOutput" | |
}, | |
"dragging": false, | |
"height": 234, | |
"id": "ChatOutput-oAzS1", | |
"position": { | |
"x": 1239.222567317785, | |
"y": -920.0283175735606 | |
}, | |
"positionAbsolute": { | |
"x": 1239.222567317785, | |
"y": -920.0283175735606 | |
}, | |
"selected": false, | |
"type": "genericNode", | |
"width": 320 | |
}, | |
{ | |
"data": { | |
"description": "Define the agent's instructions, then enter a task to complete using tools.", | |
"display_name": "Finance Agent", | |
"id": "Agent-vIPAK", | |
"node": { | |
"base_classes": [ | |
"Message" | |
], | |
"beta": false, | |
"conditional_paths": [], | |
"custom_fields": {}, | |
"description": "Define the agent's instructions, then enter a task to complete using tools.", | |
"display_name": "Finance Agent", | |
"documentation": "", | |
"edited": false, | |
"field_order": [ | |
"agent_llm", | |
"max_tokens", | |
"model_kwargs", | |
"json_mode", | |
"output_schema", | |
"model_name", | |
"openai_api_base", | |
"api_key", | |
"temperature", | |
"seed", | |
"output_parser", | |
"system_prompt", | |
"tools", | |
"input_value", | |
"handle_parsing_errors", | |
"verbose", | |
"max_iterations", | |
"agent_description", | |
"memory", | |
"sender", | |
"sender_name", | |
"n_messages", | |
"session_id", | |
"order", | |
"template", | |
"add_current_date_tool" | |
], | |
"frozen": false, | |
"icon": "bot", | |
"legacy": false, | |
"lf_version": "1.0.19.post2", | |
"metadata": {}, | |
"output_types": [], | |
"outputs": [ | |
{ | |
"cache": true, | |
"display_name": "Response", | |
"method": "message_response", | |
"name": "response", | |
"selected": "Message", | |
"types": [ | |
"Message" | |
], | |
"value": "__UNDEFINED__" | |
} | |
], | |
"pinned": false, | |
"template": { | |
"_type": "Component", | |
"add_current_date_tool": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Current Date", | |
"dynamic": false, | |
"info": "If true, will add a tool to the agent that returns the current date.", | |
"list": false, | |
"name": "add_current_date_tool", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
}, | |
"agent_description": { | |
"_input_type": "MultilineInput", | |
"advanced": true, | |
"display_name": "Agent Description", | |
"dynamic": false, | |
"info": "The description of the agent. This is only used when in Tool Mode. Defaults to 'A helpful assistant with access to the following tools:' and tools are added dynamically.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "agent_description", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "A helpful assistant with access to the following tools:" | |
}, | |
"agent_llm": { | |
"_input_type": "DropdownInput", | |
"advanced": false, | |
"combobox": false, | |
"display_name": "Model Provider", | |
"dynamic": false, | |
"info": "The provider of the language model that the agent will use to generate responses.", | |
"input_types": [], | |
"name": "agent_llm", | |
"options": [ | |
"Amazon Bedrock", | |
"Anthropic", | |
"Azure OpenAI", | |
"Groq", | |
"NVIDIA", | |
"OpenAI", | |
"Custom" | |
], | |
"placeholder": "", | |
"real_time_refresh": true, | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "OpenAI" | |
}, | |
"api_key": { | |
"_input_type": "SecretStrInput", | |
"advanced": false, | |
"display_name": "OpenAI API Key", | |
"dynamic": false, | |
"info": "The OpenAI API Key to use for the OpenAI model.", | |
"input_types": [ | |
"Message" | |
], | |
"load_from_db": false, | |
"name": "api_key", | |
"password": true, | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"type": "str", | |
"value": "OPENAI_API_KEY" | |
}, | |
"code": { | |
"advanced": true, | |
"dynamic": true, | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "code", | |
"password": false, | |
"placeholder": "", | |
"required": true, | |
"show": true, | |
"title_case": false, | |
"type": "code", | |
"value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n llm_model, display_name = self.get_llm()\n self.model_name = get_model_name(llm_model, display_name=display_name)\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.chat_history = await self.get_memory_data()\n\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise ValueError(msg)\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n raise ValueError(msg)\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n def update_build_config(self, build_config: dotdict, field_value: str, field_name: str | None = None) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = component_class.update_build_config(build_config, field_value, field_name)\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = component_class.update_build_config(build_config, field_value, field_name)\n\n return build_config\n" | |
}, | |
"handle_parsing_errors": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Handle Parse Errors", | |
"dynamic": false, | |
"info": "Should the Agent fix errors when reading user input for better processing?", | |
"list": false, | |
"name": "handle_parsing_errors", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
}, | |
"input_value": { | |
"_input_type": "MessageTextInput", | |
"advanced": false, | |
"display_name": "Input", | |
"dynamic": false, | |
"info": "The input provided by the user for the agent to process.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "input_value", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": true, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"json_mode": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "JSON Mode", | |
"dynamic": false, | |
"info": "If True, it will output JSON regardless of passing a schema.", | |
"list": false, | |
"name": "json_mode", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": false | |
}, | |
"max_iterations": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Max Iterations", | |
"dynamic": false, | |
"info": "The maximum number of attempts the agent can make to complete its task before it stops.", | |
"list": false, | |
"name": "max_iterations", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": 15 | |
}, | |
"max_tokens": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Max Tokens", | |
"dynamic": false, | |
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", | |
"list": false, | |
"name": "max_tokens", | |
"placeholder": "", | |
"range_spec": { | |
"max": 128000, | |
"min": 0, | |
"step": 0.1, | |
"step_type": "float" | |
}, | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": "" | |
}, | |
"memory": { | |
"_input_type": "HandleInput", | |
"advanced": true, | |
"display_name": "External Memory", | |
"dynamic": false, | |
"info": "Retrieve messages from an external memory. If empty, it will use the Langflow tables.", | |
"input_types": [ | |
"BaseChatMessageHistory" | |
], | |
"list": false, | |
"name": "memory", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "other", | |
"value": "" | |
}, | |
"model_kwargs": { | |
"_input_type": "DictInput", | |
"advanced": true, | |
"display_name": "Model Kwargs", | |
"dynamic": false, | |
"info": "Additional keyword arguments to pass to the model.", | |
"list": false, | |
"name": "model_kwargs", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_input": true, | |
"type": "dict", | |
"value": {} | |
}, | |
"model_name": { | |
"_input_type": "DropdownInput", | |
"advanced": false, | |
"combobox": false, | |
"display_name": "Model Name", | |
"dynamic": false, | |
"info": "", | |
"name": "model_name", | |
"options": [ | |
"gpt-4o-mini", | |
"gpt-4o", | |
"gpt-4-turbo", | |
"gpt-4-turbo-preview", | |
"gpt-4", | |
"gpt-3.5-turbo", | |
"gpt-3.5-turbo-0125" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "gpt-4o-mini" | |
}, | |
"n_messages": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Number of Messages", | |
"dynamic": false, | |
"info": "Number of messages to retrieve.", | |
"list": false, | |
"name": "n_messages", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": 100 | |
}, | |
"openai_api_base": { | |
"_input_type": "StrInput", | |
"advanced": true, | |
"display_name": "OpenAI API Base", | |
"dynamic": false, | |
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", | |
"list": false, | |
"load_from_db": false, | |
"name": "openai_api_base", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"order": { | |
"_input_type": "DropdownInput", | |
"advanced": true, | |
"combobox": false, | |
"display_name": "Order", | |
"dynamic": false, | |
"info": "Order of the messages.", | |
"name": "order", | |
"options": [ | |
"Ascending", | |
"Descending" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "Ascending" | |
}, | |
"output_parser": { | |
"_input_type": "HandleInput", | |
"advanced": true, | |
"display_name": "Output Parser", | |
"dynamic": false, | |
"info": "The parser to use to parse the output of the model", | |
"input_types": [ | |
"OutputParser" | |
], | |
"list": false, | |
"name": "output_parser", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "other", | |
"value": "" | |
}, | |
"output_schema": { | |
"_input_type": "DictInput", | |
"advanced": true, | |
"display_name": "Schema", | |
"dynamic": false, | |
"info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled. [DEPRECATED]", | |
"list": true, | |
"name": "output_schema", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_input": true, | |
"type": "dict", | |
"value": {} | |
}, | |
"seed": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Seed", | |
"dynamic": false, | |
"info": "The seed controls the reproducibility of the job.", | |
"list": false, | |
"name": "seed", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": 1 | |
}, | |
"sender": { | |
"_input_type": "DropdownInput", | |
"advanced": true, | |
"combobox": false, | |
"display_name": "Sender Type", | |
"dynamic": false, | |
"info": "Filter by sender type.", | |
"name": "sender", | |
"options": [ | |
"Machine", | |
"User", | |
"Machine and User" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "Machine and User" | |
}, | |
"sender_name": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Sender Name", | |
"dynamic": false, | |
"info": "Filter by sender name.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "sender_name", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "Finance Agent" | |
}, | |
"session_id": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Session ID", | |
"dynamic": false, | |
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "session_id", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"system_prompt": { | |
"_input_type": "MultilineInput", | |
"advanced": false, | |
"display_name": "Agent Instructions", | |
"dynamic": false, | |
"info": "System Prompt: Initial instructions and context provided to guide the agent's behavior.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "system_prompt", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "You are the chief editor of a prestigious publication known for transforming complex information into clear, engaging content. Review and refine the researcher's document about {topic}.\n\nYour editing process should:\n- Verify and challenge any questionable claims\n- Restructure content for better flow and readability\n- Remove redundancies and unclear statements\n- Add context where needed\n- Ensure balanced coverage of the topic\n- Transform technical language into accessible explanations\n\nMaintain high editorial standards while making the content engaging for an educated general audience. Present the revised version in a clean, well-structured format." | |
}, | |
"temperature": { | |
"_input_type": "FloatInput", | |
"advanced": true, | |
"display_name": "Temperature", | |
"dynamic": false, | |
"info": "", | |
"list": false, | |
"name": "temperature", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "float", | |
"value": 0.1 | |
}, | |
"template": { | |
"_input_type": "MultilineInput", | |
"advanced": true, | |
"display_name": "Template", | |
"dynamic": false, | |
"info": "The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "template", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "{sender_name}: {text}" | |
}, | |
"tools": { | |
"_input_type": "HandleInput", | |
"advanced": false, | |
"display_name": "Tools", | |
"dynamic": false, | |
"info": "These are the tools that the agent can use to help with tasks.", | |
"input_types": [ | |
"Tool", | |
"BaseTool", | |
"StructuredTool" | |
], | |
"list": true, | |
"name": "tools", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "other", | |
"value": "" | |
}, | |
"verbose": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Verbose", | |
"dynamic": false, | |
"info": "", | |
"list": false, | |
"name": "verbose", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
} | |
}, | |
"tool_mode": false | |
}, | |
"type": "Agent" | |
}, | |
"dragging": false, | |
"height": 650, | |
"id": "Agent-vIPAK", | |
"position": { | |
"x": 45.70736046026991, | |
"y": -1369.035463408626 | |
}, | |
"positionAbsolute": { | |
"x": 45.70736046026991, | |
"y": -1369.035463408626 | |
}, | |
"selected": false, | |
"type": "genericNode", | |
"width": 320 | |
}, | |
{ | |
"data": { | |
"description": "Define the agent's instructions, then enter a task to complete using tools.", | |
"display_name": "Analysis & Editor Agent", | |
"id": "Agent-rH74C", | |
"node": { | |
"base_classes": [ | |
"Message" | |
], | |
"beta": false, | |
"conditional_paths": [], | |
"custom_fields": {}, | |
"description": "Define the agent's instructions, then enter a task to complete using tools.", | |
"display_name": "Analysis & Editor Agent", | |
"documentation": "", | |
"edited": false, | |
"field_order": [ | |
"agent_llm", | |
"max_tokens", | |
"model_kwargs", | |
"json_mode", | |
"output_schema", | |
"model_name", | |
"openai_api_base", | |
"api_key", | |
"temperature", | |
"seed", | |
"output_parser", | |
"system_prompt", | |
"tools", | |
"input_value", | |
"handle_parsing_errors", | |
"verbose", | |
"max_iterations", | |
"agent_description", | |
"memory", | |
"sender", | |
"sender_name", | |
"n_messages", | |
"session_id", | |
"order", | |
"template", | |
"add_current_date_tool" | |
], | |
"frozen": false, | |
"icon": "bot", | |
"legacy": false, | |
"lf_version": "1.0.19.post2", | |
"metadata": {}, | |
"output_types": [], | |
"outputs": [ | |
{ | |
"cache": true, | |
"display_name": "Response", | |
"method": "message_response", | |
"name": "response", | |
"selected": "Message", | |
"types": [ | |
"Message" | |
], | |
"value": "__UNDEFINED__" | |
} | |
], | |
"pinned": false, | |
"template": { | |
"_type": "Component", | |
"add_current_date_tool": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Current Date", | |
"dynamic": false, | |
"info": "If true, will add a tool to the agent that returns the current date.", | |
"list": false, | |
"name": "add_current_date_tool", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
}, | |
"agent_description": { | |
"_input_type": "MultilineInput", | |
"advanced": true, | |
"display_name": "Agent Description", | |
"dynamic": false, | |
"info": "The description of the agent. This is only used when in Tool Mode. Defaults to 'A helpful assistant with access to the following tools:' and tools are added dynamically.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "agent_description", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "A helpful assistant with access to the following tools:" | |
}, | |
"agent_llm": { | |
"_input_type": "DropdownInput", | |
"advanced": false, | |
"combobox": false, | |
"display_name": "Model Provider", | |
"dynamic": false, | |
"info": "The provider of the language model that the agent will use to generate responses.", | |
"input_types": [], | |
"name": "agent_llm", | |
"options": [ | |
"Amazon Bedrock", | |
"Anthropic", | |
"Azure OpenAI", | |
"Groq", | |
"NVIDIA", | |
"OpenAI", | |
"Custom" | |
], | |
"placeholder": "", | |
"real_time_refresh": true, | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "OpenAI" | |
}, | |
"api_key": { | |
"_input_type": "SecretStrInput", | |
"advanced": false, | |
"display_name": "OpenAI API Key", | |
"dynamic": false, | |
"info": "The OpenAI API Key to use for the OpenAI model.", | |
"input_types": [ | |
"Message" | |
], | |
"load_from_db": false, | |
"name": "api_key", | |
"password": true, | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"type": "str", | |
"value": "OPENAI_API_KEY" | |
}, | |
"code": { | |
"advanced": true, | |
"dynamic": true, | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "code", | |
"password": false, | |
"placeholder": "", | |
"required": true, | |
"show": true, | |
"title_case": false, | |
"type": "code", | |
"value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n llm_model, display_name = self.get_llm()\n self.model_name = get_model_name(llm_model, display_name=display_name)\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.chat_history = await self.get_memory_data()\n\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise ValueError(msg)\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n raise ValueError(msg)\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n def update_build_config(self, build_config: dotdict, field_value: str, field_name: str | None = None) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = component_class.update_build_config(build_config, field_value, field_name)\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = component_class.update_build_config(build_config, field_value, field_name)\n\n return build_config\n" | |
}, | |
"handle_parsing_errors": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Handle Parse Errors", | |
"dynamic": false, | |
"info": "Should the Agent fix errors when reading user input for better processing?", | |
"list": false, | |
"name": "handle_parsing_errors", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
}, | |
"input_value": { | |
"_input_type": "MessageTextInput", | |
"advanced": false, | |
"display_name": "Input", | |
"dynamic": false, | |
"info": "The input provided by the user for the agent to process.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "input_value", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": true, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "Start the analysis" | |
}, | |
"json_mode": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "JSON Mode", | |
"dynamic": false, | |
"info": "If True, it will output JSON regardless of passing a schema.", | |
"list": false, | |
"name": "json_mode", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": false | |
}, | |
"max_iterations": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Max Iterations", | |
"dynamic": false, | |
"info": "The maximum number of attempts the agent can make to complete its task before it stops.", | |
"list": false, | |
"name": "max_iterations", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": 15 | |
}, | |
"max_tokens": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Max Tokens", | |
"dynamic": false, | |
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", | |
"list": false, | |
"name": "max_tokens", | |
"placeholder": "", | |
"range_spec": { | |
"max": 128000, | |
"min": 0, | |
"step": 0.1, | |
"step_type": "float" | |
}, | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": "" | |
}, | |
"memory": { | |
"_input_type": "HandleInput", | |
"advanced": true, | |
"display_name": "External Memory", | |
"dynamic": false, | |
"info": "Retrieve messages from an external memory. If empty, it will use the Langflow tables.", | |
"input_types": [ | |
"BaseChatMessageHistory" | |
], | |
"list": false, | |
"name": "memory", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "other", | |
"value": "" | |
}, | |
"model_kwargs": { | |
"_input_type": "DictInput", | |
"advanced": true, | |
"display_name": "Model Kwargs", | |
"dynamic": false, | |
"info": "Additional keyword arguments to pass to the model.", | |
"list": false, | |
"name": "model_kwargs", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_input": true, | |
"type": "dict", | |
"value": {} | |
}, | |
"model_name": { | |
"_input_type": "DropdownInput", | |
"advanced": false, | |
"combobox": false, | |
"display_name": "Model Name", | |
"dynamic": false, | |
"info": "", | |
"name": "model_name", | |
"options": [ | |
"gpt-4o-mini", | |
"gpt-4o", | |
"gpt-4-turbo", | |
"gpt-4-turbo-preview", | |
"gpt-4", | |
"gpt-3.5-turbo", | |
"gpt-3.5-turbo-0125" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "gpt-4o-mini" | |
}, | |
"n_messages": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Number of Messages", | |
"dynamic": false, | |
"info": "Number of messages to retrieve.", | |
"list": false, | |
"name": "n_messages", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": 100 | |
}, | |
"openai_api_base": { | |
"_input_type": "StrInput", | |
"advanced": true, | |
"display_name": "OpenAI API Base", | |
"dynamic": false, | |
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", | |
"list": false, | |
"load_from_db": false, | |
"name": "openai_api_base", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"order": { | |
"_input_type": "DropdownInput", | |
"advanced": true, | |
"combobox": false, | |
"display_name": "Order", | |
"dynamic": false, | |
"info": "Order of the messages.", | |
"name": "order", | |
"options": [ | |
"Ascending", | |
"Descending" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "Ascending" | |
}, | |
"output_parser": { | |
"_input_type": "HandleInput", | |
"advanced": true, | |
"display_name": "Output Parser", | |
"dynamic": false, | |
"info": "The parser to use to parse the output of the model", | |
"input_types": [ | |
"OutputParser" | |
], | |
"list": false, | |
"name": "output_parser", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "other", | |
"value": "" | |
}, | |
"output_schema": { | |
"_input_type": "DictInput", | |
"advanced": true, | |
"display_name": "Schema", | |
"dynamic": false, | |
"info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled. [DEPRECATED]", | |
"list": true, | |
"name": "output_schema", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_input": true, | |
"type": "dict", | |
"value": {} | |
}, | |
"seed": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Seed", | |
"dynamic": false, | |
"info": "The seed controls the reproducibility of the job.", | |
"list": false, | |
"name": "seed", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": 1 | |
}, | |
"sender": { | |
"_input_type": "DropdownInput", | |
"advanced": true, | |
"combobox": false, | |
"display_name": "Sender Type", | |
"dynamic": false, | |
"info": "Filter by sender type.", | |
"name": "sender", | |
"options": [ | |
"Machine", | |
"User", | |
"Machine and User" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "Machine and User" | |
}, | |
"sender_name": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Sender Name", | |
"dynamic": false, | |
"info": "Filter by sender name.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "sender_name", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "Analysis & Editor Agent" | |
}, | |
"session_id": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Session ID", | |
"dynamic": false, | |
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "session_id", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"system_prompt": { | |
"_input_type": "MultilineInput", | |
"advanced": false, | |
"display_name": "Agent Instructions", | |
"dynamic": false, | |
"info": "System Prompt: Initial instructions and context provided to guide the agent's behavior.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "system_prompt", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "You are a brilliant comedy writer known for making complex topics entertaining and memorable. Using the editor's refined document about {topic}, create an engaging, humorous blog post.\n\nYour approach should:\n- Find unexpected angles and amusing parallels\n- Use clever wordplay and wit (avoid cheap jokes)\n- Maintain accuracy while being entertaining\n- Include relatable examples and analogies\n- Keep a smart, sophisticated tone\n- Make the topic more approachable through humor\n\nCreate a blog post that makes people laugh while actually teaching them about {topic}. The humor should enhance, not overshadow, the educational value." | |
}, | |
"temperature": { | |
"_input_type": "FloatInput", | |
"advanced": true, | |
"display_name": "Temperature", | |
"dynamic": false, | |
"info": "", | |
"list": false, | |
"name": "temperature", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "float", | |
"value": 0.1 | |
}, | |
"template": { | |
"_input_type": "MultilineInput", | |
"advanced": true, | |
"display_name": "Template", | |
"dynamic": false, | |
"info": "The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "template", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "{sender_name}: {text}" | |
}, | |
"tools": { | |
"_input_type": "HandleInput", | |
"advanced": false, | |
"display_name": "Tools", | |
"dynamic": false, | |
"info": "These are the tools that the agent can use to help with tasks.", | |
"input_types": [ | |
"Tool", | |
"BaseTool", | |
"StructuredTool" | |
], | |
"list": true, | |
"name": "tools", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "other", | |
"value": "" | |
}, | |
"verbose": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Verbose", | |
"dynamic": false, | |
"info": "", | |
"list": false, | |
"name": "verbose", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
} | |
}, | |
"tool_mode": false | |
}, | |
"type": "Agent" | |
}, | |
"dragging": false, | |
"height": 650, | |
"id": "Agent-rH74C", | |
"position": { | |
"x": 815.1900903820148, | |
"y": -1365.4053932711827 | |
}, | |
"positionAbsolute": { | |
"x": 815.1900903820148, | |
"y": -1365.4053932711827 | |
}, | |
"selected": false, | |
"type": "genericNode", | |
"width": 320 | |
}, | |
{ | |
"data": { | |
"description": "Create a prompt template with dynamic variables.", | |
"display_name": "Prompt", | |
"id": "Prompt-BS8ii", | |
"node": { | |
"base_classes": [ | |
"Message" | |
], | |
"beta": false, | |
"conditional_paths": [], | |
"custom_fields": { | |
"template": [] | |
}, | |
"description": "Create a prompt template with dynamic variables.", | |
"display_name": "Prompt", | |
"documentation": "", | |
"edited": false, | |
"error": null, | |
"field_order": [ | |
"template" | |
], | |
"frozen": true, | |
"full_path": null, | |
"icon": "prompts", | |
"is_composition": null, | |
"is_input": null, | |
"is_output": null, | |
"legacy": false, | |
"lf_version": "1.0.19.post2", | |
"metadata": {}, | |
"name": "", | |
"output_types": [], | |
"outputs": [ | |
{ | |
"cache": true, | |
"display_name": "Prompt Message", | |
"method": "build_prompt", | |
"name": "prompt", | |
"selected": "Message", | |
"types": [ | |
"Message" | |
], | |
"value": "__UNDEFINED__" | |
} | |
], | |
"pinned": false, | |
"template": { | |
"_type": "Component", | |
"code": { | |
"advanced": true, | |
"dynamic": true, | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "code", | |
"password": false, | |
"placeholder": "", | |
"required": true, | |
"show": true, | |
"title_case": false, | |
"type": "code", | |
"value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n" | |
}, | |
"template": { | |
"_input_type": "PromptInput", | |
"advanced": false, | |
"display_name": "Template", | |
"dynamic": false, | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"name": "template", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"type": "prompt", | |
"value": "# Expert Research Agent Protocol\n\n[Previous content remains the same, but adding this critical section about image handling:]\n\n## Image and Visual Data Handling\nWhen using Tavily Search with images enabled:\n\n1. Image Collection\n - Always enable include_images in Tavily search\n - Collect relevant stock charts, product images, and news photos\n - Save image URLs from reliable sources\n - Focus on recent, high-quality images\n\n2. Image Categories to Collect\n - Product showcase images\n - Stock performance charts\n - Company facilities\n - Key executive photos\n - Recent event images\n - Market share visualizations\n\n3. Image Documentation\n - Include full image URL\n - Add clear descriptions\n - Note image source and date\n - Explain image relevance\n\n4. Image Presentation in Output\n ```markdown\n \n - Source: [Source Name]\n - Date: [Image Date]\n - Context: [Brief explanation of image relevance]\n ```\n\n## Output Structure\nPresent your findings in this format:\n\n### Company Overview\n[Comprehensive overview based on search results]\n\n### Recent Developments\n[Latest news and announcements with dates]\n\n### Market Context\n[Industry trends and competitive position]\n\n### Visual Insights\n[Reference relevant images from search]\n\n### Key Risk Factors\n[Identified risks and challenges]\n\n### Sources\n[List of key sources consulted]\n\nRemember to:\n- Use Markdown formatting for clear structure\n- Include dates for all time-sensitive information\n- Quote significant statistics and statements\n- Reference any included images\n- Highlight conflicting information or viewpoints\n- Pass all gathered data to the Finance Agent for detailed financial analysis" | |
}, | |
"tool_placeholder": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Tool Placeholder", | |
"dynamic": false, | |
"info": "A placeholder input for tool mode.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "tool_placeholder", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": true, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
} | |
}, | |
"tool_mode": false | |
}, | |
"type": "Prompt" | |
}, | |
"dragging": false, | |
"height": 260, | |
"id": "Prompt-BS8ii", | |
"position": { | |
"x": -1142.2312935529987, | |
"y": -1107.442614776065 | |
}, | |
"positionAbsolute": { | |
"x": -1142.2312935529987, | |
"y": -1107.442614776065 | |
}, | |
"selected": false, | |
"type": "genericNode", | |
"width": 320 | |
}, | |
{ | |
"data": { | |
"description": "Create a prompt template with dynamic variables.", | |
"display_name": "Prompt", | |
"id": "Prompt-DGXf4", | |
"node": { | |
"base_classes": [ | |
"Message" | |
], | |
"beta": false, | |
"conditional_paths": [], | |
"custom_fields": { | |
"template": [] | |
}, | |
"description": "Create a prompt template with dynamic variables.", | |
"display_name": "Prompt", | |
"documentation": "", | |
"edited": false, | |
"error": null, | |
"field_order": [ | |
"template" | |
], | |
"frozen": false, | |
"full_path": null, | |
"icon": "prompts", | |
"is_composition": null, | |
"is_input": null, | |
"is_output": null, | |
"legacy": false, | |
"lf_version": "1.0.19.post2", | |
"metadata": {}, | |
"name": "", | |
"output_types": [], | |
"outputs": [ | |
{ | |
"cache": true, | |
"display_name": "Prompt Message", | |
"method": "build_prompt", | |
"name": "prompt", | |
"selected": "Message", | |
"types": [ | |
"Message" | |
], | |
"value": "__UNDEFINED__" | |
} | |
], | |
"pinned": false, | |
"template": { | |
"_type": "Component", | |
"code": { | |
"advanced": true, | |
"dynamic": true, | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "code", | |
"password": false, | |
"placeholder": "", | |
"required": true, | |
"show": true, | |
"title_case": false, | |
"type": "code", | |
"value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n" | |
}, | |
"template": { | |
"_input_type": "PromptInput", | |
"advanced": false, | |
"display_name": "Template", | |
"dynamic": false, | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"name": "template", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"type": "prompt", | |
"value": "# Financial Analysis Expert Protocol\n\nYou are an elite financial analyst with access to Yahoo Finance tools. Your role is to perform comprehensive financial analysis based on the research provided and the data available through Yahoo Finance methods.\n\n## CRITICAL: Stock Symbol Usage\n- Always use correct stock ticker symbols in UPPERCASE format\n- Examples of valid symbols:\n * AAPL (Apple Inc.)\n * MSFT (Microsoft)\n * NVDA (NVIDIA)\n * GOOGL (Alphabet/Google)\n * TSLA (Tesla)\n- Invalid formats to avoid:\n * ❌ Apple (company name instead of symbol)\n * ❌ aapl (lowercase)\n * ❌ $AAPL (with dollar sign)\n * ❌ AAPL.US (with extension)\n\n## Data Collection Strategy\n\n1. Initial Symbol Verification\n - Confirm valid stock symbol format before any analysis\n - Use get_info first to verify symbol validity\n - Cross-reference with get_fast_info to ensure data availability\n - If symbol is invalid, immediately report the error\n\n2. Core Company Analysis\n - Get basic info (get_info): Full company details\n - Fast metrics (get_fast_info): Quick market data\n - Earnings data (get_earnings): Performance history\n - Calendar events (get_calendar): Upcoming events\n\n3. Financial Statement Analysis\n - Income statements (get_income_stmt)\n - Balance sheets (get_balance_sheet)\n - Cash flow statements (get_cashflow)\n\n4. Market Intelligence\n - Latest recommendations (get_recommendations)\n - Recommendation trends (get_recommendations_summary)\n - Recent rating changes (get_upgrades_downgrades)\n - Breaking news (get_news, specify number of articles needed)\n\n5. Ownership Structure\n - Institutional holdings (get_institutional_holders)\n - Major stakeholders (get_major_holders)\n - Fund ownership (get_mutualfund_holders)\n - Insider activity:\n * Recent purchases (get_insider_purchases)\n * Transaction history (get_insider_transactions)\n * Insider roster (get_insider_roster_holders)\n\n6. Historical Patterns\n - Corporate actions (get_actions)\n - Dividend history (get_dividends)\n - Split history (get_splits)\n - Capital gains (get_capital_gains)\n - Regulatory filings (get_sec_filings)\n - ESG metrics (get_sustainability)\n\n## Analysis Framework\n\n1. Profitability Metrics\n - Revenue trends\n - Margin analysis\n - Efficiency ratios\n - Return metrics\n\n2. Financial Health\n - Liquidity ratios\n - Debt analysis\n - Working capital\n - Cash flow quality\n\n3. Growth Assessment\n - Historical rates\n - Future projections\n - Market opportunity\n - Expansion plans\n\n4. Risk Evaluation\n - Financial risks\n - Market position\n - Operational challenges\n - Competitive threats\n\n## Output Structure\n\n### Symbol Information\n[Confirm stock symbol and basic company information]\n\n### Financial Overview\n[Key metrics summary with actual numbers]\n\n### Profitability Analysis\n[Detailed profit metrics with comparisons]\n\n### Balance Sheet Review\n[Asset and liability analysis]\n\n### Cash Flow Assessment\n[Cash generation and usage patterns]\n\n### Market Sentiment\n[Analyst views and institutional activity]\n\n### Growth Analysis\n[Historical and projected growth]\n\n### Risk Factors\n[Comprehensive risk assessment]\n\nRemember to:\n- ALWAYS verify stock symbol validity first\n- Use exact numbers from the data\n- Compare with industry standards\n- Highlight significant trends\n- Flag data anomalies\n- Identify key risks\n- Provide metric context\n- Focus on material information\n\nPass your comprehensive financial analysis to the Analysis & Editor Agent for final synthesis and recommendations. Include any invalid symbol errors or data availability issues in your report." | |
}, | |
"tool_placeholder": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Tool Placeholder", | |
"dynamic": false, | |
"info": "A placeholder input for tool mode.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "tool_placeholder", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": true, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
} | |
}, | |
"tool_mode": false | |
}, | |
"type": "Prompt" | |
}, | |
"dragging": false, | |
"height": 260, | |
"id": "Prompt-DGXf4", | |
"position": { | |
"x": -344.9674638932195, | |
"y": -1280.1782190739505 | |
}, | |
"positionAbsolute": { | |
"x": -344.9674638932195, | |
"y": -1280.1782190739505 | |
}, | |
"selected": false, | |
"type": "genericNode", | |
"width": 320 | |
}, | |
{ | |
"data": { | |
"description": "Create a prompt template with dynamic variables.", | |
"display_name": "Prompt", | |
"id": "Prompt-rPwbg", | |
"node": { | |
"base_classes": [ | |
"Message" | |
], | |
"beta": false, | |
"conditional_paths": [], | |
"custom_fields": { | |
"template": [ | |
"research_agent_output", | |
"finance_agent_output" | |
] | |
}, | |
"description": "Create a prompt template with dynamic variables.", | |
"display_name": "Prompt", | |
"documentation": "", | |
"edited": false, | |
"error": null, | |
"field_order": [ | |
"template" | |
], | |
"frozen": false, | |
"full_path": null, | |
"icon": "prompts", | |
"is_composition": null, | |
"is_input": null, | |
"is_output": null, | |
"legacy": false, | |
"lf_version": "1.0.19.post2", | |
"metadata": {}, | |
"name": "", | |
"output_types": [], | |
"outputs": [ | |
{ | |
"cache": true, | |
"display_name": "Prompt Message", | |
"method": "build_prompt", | |
"name": "prompt", | |
"selected": "Message", | |
"types": [ | |
"Message" | |
], | |
"value": "__UNDEFINED__" | |
} | |
], | |
"pinned": false, | |
"template": { | |
"_type": "Component", | |
"code": { | |
"advanced": true, | |
"dynamic": true, | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "code", | |
"password": false, | |
"placeholder": "", | |
"required": true, | |
"show": true, | |
"title_case": false, | |
"type": "code", | |
"value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n" | |
}, | |
"finance_agent_output": { | |
"advanced": false, | |
"display_name": "finance_agent_output", | |
"dynamic": false, | |
"field_type": "str", | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"input_types": [ | |
"Message", | |
"Text" | |
], | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "finance_agent_output", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"type": "str", | |
"value": "" | |
}, | |
"research_agent_output": { | |
"advanced": false, | |
"display_name": "research_agent_output", | |
"dynamic": false, | |
"field_type": "str", | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"input_types": [ | |
"Message", | |
"Text" | |
], | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "research_agent_output", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"type": "str", | |
"value": "" | |
}, | |
"template": { | |
"_input_type": "PromptInput", | |
"advanced": false, | |
"display_name": "Template", | |
"dynamic": false, | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"name": "template", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"type": "prompt", | |
"value": "# Investment Analysis & Editorial Protocol\n\nYou are an elite financial analyst and editorial expert responsible for creating the final investment analysis report. Your role is to synthesize research and financial data into a visually appealing, data-rich investment analysis using proper markdown formatting.\n\n## Input Processing\n1. Research Agent Input (Visual + Market Research):\n - Market research and news\n - Industry trends\n - Competitive analysis\n - Images and charts\n - News sentiment\n - {research_agent_output}\n\n2. Finance Agent Input (Quantitative Data):\n - Detailed financial metrics\n - Stock statistics\n - Analyst ratings\n - Growth metrics\n - Risk factors\n - {finance_agent_output}\n\n## Output Format Requirements\n\n1. Header Format\n Use single # for main title, increment for subsections\n \n2. Image Placement\n - Place images immediately after relevant sections\n - Use proper markdown format: \n - Always include source and context\n - Use *italics* for image captions\n\n3. Table Formatting\n - Use standard markdown tables\n - Align numbers right, text left\n - Include header separators\n - Keep consistent column widths\n\n4. Data Presentation\n - Use bold (**) for key metrics\n - Include percentage changes\n - Show comparisons\n - Include trends (↑/↓)\n\n## Report Structure\n\n# Investment Analysis Report: [Company Name] ($SYMBOL)\n*Generated: [Date] | Type: Comprehensive Evaluation*\n\n[Executive Summary - 3 paragraphs max]\n\n## Quick Take\n- **Recommendation**: [BUY/HOLD/SELL]\n- **Target Price**: $XXX\n- **Risk Level**: [LOW/MEDIUM/HIGH]\n- **Investment Horizon**: [SHORT/MEDIUM/LONG]-term\n\n## Market Analysis\n[Insert most relevant market image here]\n*Source: [Name] - [Context]*\n\n### Industry Position\n- Market share data\n- Competitive analysis\n- Recent developments\n\n## Financial Health\n| Metric | Value | YoY Change | Industry Avg |\n|:-------|------:|-----------:|-------------:|\n| Revenue | $XXX | XX% | $XXX |\n[Additional metrics]\n\n### Key Performance Indicators\n- **Revenue Growth**: XX%\n- **Profit Margin**: XX%\n- **ROE**: XX%\n\n## Growth Drivers\n1. Short-term Catalysts\n2. Long-term Opportunities\n3. Innovation Pipeline\n\n## Risk Assessment\n| Risk Factor | Severity | Probability | Impact |\n|:------------|:---------|:------------|:-------|\n| [Risk 1] | HIGH/MED/LOW | H/M/L | Details |\n\n## Technical Analysis\n[Insert technical chart]\n*Source: [Name] - Analysis of key technical indicators*\n\n## Investment Strategy\n### Long-term (18+ months)\n- Entry points\n- Position sizing\n- Risk management\n\n### Medium-term (6-18 months)\n- Technical levels\n- Catalysts timeline\n\n### Short-term (0-6 months)\n- Support/Resistance\n- Trading parameters\n\n## Price Targets\n- **Bear Case**: $XXX (-XX%)\n- **Base Case**: $XXX\n- **Bull Case**: $XXX (+XX%)\n\n## Monitoring Checklist\n1. [Metric 1]\n2. [Metric 2]\n3. [Metric 3]\n\n## Visual Evidence\n[Insert additional relevant images]\n*Source: [Name] - [Specific context and analysis]*\n\n*Disclaimer: This analysis is for informational purposes only. Always conduct your own research before making investment decisions.*\n\n## Output Requirements\n\n1. Visual Excellence\n - Strategic image placement\n - Clear data visualization\n - Consistent formatting\n - Professional appearance\n\n2. Data Accuracy\n - Cross-reference numbers\n - Verify calculations\n - Include trends\n - Show comparisons\n\n3. Action Focus\n - Clear recommendations\n - Specific entry/exit points\n - Risk management guidelines\n - Monitoring triggers\n\n4. Professional Standards\n - No spelling errors\n - Consistent formatting\n - Proper citations\n - Clear attribution\n\nRemember:\n- Never use triple backticks\n- Include all images with proper markdown\n- Maintain consistent formatting\n- Provide specific, actionable insights\n- Use emojis sparingly and professionally\n- Cross-validate all data points" | |
}, | |
"tool_placeholder": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Tool Placeholder", | |
"dynamic": false, | |
"info": "A placeholder input for tool mode.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "tool_placeholder", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": true, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
} | |
}, | |
"tool_mode": false | |
}, | |
"type": "Prompt" | |
}, | |
"dragging": false, | |
"height": 433, | |
"id": "Prompt-rPwbg", | |
"position": { | |
"x": 416.02309796632085, | |
"y": -1081.5957453651372 | |
}, | |
"positionAbsolute": { | |
"x": 416.02309796632085, | |
"y": -1081.5957453651372 | |
}, | |
"selected": false, | |
"type": "genericNode", | |
"width": 320 | |
}, | |
{ | |
"data": { | |
"id": "ChatInput-3mEtf", | |
"node": { | |
"base_classes": [ | |
"Message" | |
], | |
"beta": false, | |
"conditional_paths": [], | |
"custom_fields": {}, | |
"description": "Get chat inputs from the Playground.", | |
"display_name": "Chat Input", | |
"documentation": "", | |
"edited": false, | |
"field_order": [ | |
"input_value", | |
"should_store_message", | |
"sender", | |
"sender_name", | |
"session_id", | |
"files", | |
"background_color", | |
"chat_icon", | |
"text_color" | |
], | |
"frozen": true, | |
"icon": "MessagesSquare", | |
"legacy": false, | |
"lf_version": "1.0.19.post2", | |
"metadata": {}, | |
"output_types": [], | |
"outputs": [ | |
{ | |
"cache": true, | |
"display_name": "Message", | |
"method": "message_response", | |
"name": "message", | |
"selected": "Message", | |
"types": [ | |
"Message" | |
], | |
"value": "__UNDEFINED__" | |
} | |
], | |
"pinned": false, | |
"template": { | |
"_type": "Component", | |
"background_color": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Background Color", | |
"dynamic": false, | |
"info": "The background color of the icon.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "background_color", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"chat_icon": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Icon", | |
"dynamic": false, | |
"info": "The icon of the message.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "chat_icon", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"code": { | |
"advanced": true, | |
"dynamic": true, | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "code", | |
"password": false, | |
"placeholder": "", | |
"required": true, | |
"show": true, | |
"title_case": false, | |
"type": "code", | |
"value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, FileInput, MessageTextInput, MultilineInput, Output\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_USER, MESSAGE_SENDER_USER\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\"background_color\": background_color, \"text_color\": text_color, \"icon\": icon},\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" | |
}, | |
"files": { | |
"_input_type": "FileInput", | |
"advanced": true, | |
"display_name": "Files", | |
"dynamic": false, | |
"fileTypes": [ | |
"txt", | |
"md", | |
"mdx", | |
"csv", | |
"json", | |
"yaml", | |
"yml", | |
"xml", | |
"html", | |
"htm", | |
"pdf", | |
"docx", | |
"py", | |
"sh", | |
"sql", | |
"js", | |
"ts", | |
"tsx", | |
"jpg", | |
"jpeg", | |
"png", | |
"bmp", | |
"image" | |
], | |
"file_path": "", | |
"info": "Files to be sent with the message.", | |
"list": true, | |
"name": "files", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "file", | |
"value": "" | |
}, | |
"input_value": { | |
"_input_type": "MultilineInput", | |
"advanced": false, | |
"display_name": "Text", | |
"dynamic": false, | |
"info": "Message to be passed as input.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "input_value", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "Should I invest in Tesla (TSLA) stock right now? Please analyze the company's current position, market trends, financial health, and provide a clear investment recommendation." | |
}, | |
"sender": { | |
"_input_type": "DropdownInput", | |
"advanced": true, | |
"combobox": false, | |
"display_name": "Sender Type", | |
"dynamic": false, | |
"info": "Type of sender.", | |
"name": "sender", | |
"options": [ | |
"Machine", | |
"User" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "User" | |
}, | |
"sender_name": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Sender Name", | |
"dynamic": false, | |
"info": "Name of the sender.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "sender_name", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "User" | |
}, | |
"session_id": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Session ID", | |
"dynamic": false, | |
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "session_id", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"should_store_message": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Store Messages", | |
"dynamic": false, | |
"info": "Store the message in the history.", | |
"list": false, | |
"name": "should_store_message", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
}, | |
"text_color": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Text Color", | |
"dynamic": false, | |
"info": "The text color of the name", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "text_color", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
} | |
}, | |
"tool_mode": false | |
}, | |
"type": "ChatInput" | |
}, | |
"dragging": false, | |
"height": 234, | |
"id": "ChatInput-3mEtf", | |
"position": { | |
"x": -1510.6054210793818, | |
"y": -947.702056394023 | |
}, | |
"positionAbsolute": { | |
"x": -1510.6054210793818, | |
"y": -947.702056394023 | |
}, | |
"selected": false, | |
"type": "genericNode", | |
"width": 320 | |
}, | |
{ | |
"data": { | |
"id": "CalculatorTool-xo5ux", | |
"node": { | |
"base_classes": [ | |
"Data", | |
"Tool" | |
], | |
"beta": false, | |
"conditional_paths": [], | |
"custom_fields": {}, | |
"description": "Perform basic arithmetic operations on a given expression.", | |
"display_name": "Calculator", | |
"documentation": "", | |
"edited": false, | |
"field_order": [ | |
"expression" | |
], | |
"frozen": false, | |
"icon": "calculator", | |
"legacy": false, | |
"lf_version": "1.0.19.post2", | |
"metadata": {}, | |
"output_types": [], | |
"outputs": [ | |
{ | |
"cache": true, | |
"display_name": "Data", | |
"method": "run_model", | |
"name": "api_run_model", | |
"required_inputs": [], | |
"selected": "Data", | |
"types": [ | |
"Data" | |
], | |
"value": "__UNDEFINED__" | |
}, | |
{ | |
"cache": true, | |
"display_name": "Tool", | |
"method": "build_tool", | |
"name": "api_build_tool", | |
"required_inputs": [], | |
"selected": "Tool", | |
"types": [ | |
"Tool" | |
], | |
"value": "__UNDEFINED__" | |
} | |
], | |
"pinned": false, | |
"template": { | |
"_type": "Component", | |
"code": { | |
"advanced": true, | |
"dynamic": true, | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "code", | |
"password": false, | |
"placeholder": "", | |
"required": true, | |
"show": true, | |
"title_case": false, | |
"type": "code", | |
"value": "import ast\nimport operator\n\nfrom langchain.tools import StructuredTool\nfrom langchain_core.tools import ToolException\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import MessageTextInput\nfrom langflow.schema import Data\n\n\nclass CalculatorToolComponent(LCToolComponent):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n name = \"CalculatorTool\"\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n ),\n ]\n\n class CalculatorToolSchema(BaseModel):\n expression: str = Field(..., description=\"The arithmetic expression to evaluate.\")\n\n def run_model(self) -> list[Data]:\n return self._evaluate_expression(self.expression)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"calculator\",\n description=\"Evaluate basic arithmetic expressions. Input should be a string containing the expression.\",\n func=self._eval_expr_with_error,\n args_schema=self.CalculatorToolSchema,\n )\n\n def _eval_expr(self, node):\n # Define the allowed operators\n operators = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n if isinstance(node, ast.Num):\n return node.n\n if isinstance(node, ast.BinOp):\n return operators[type(node.op)](self._eval_expr(node.left), self._eval_expr(node.right))\n if isinstance(node, ast.UnaryOp):\n return operators[type(node.op)](self._eval_expr(node.operand))\n if isinstance(node, ast.Call):\n msg = (\n \"Function calls like sqrt(), sin(), cos() etc. are not supported. \"\n \"Only basic arithmetic operations (+, -, *, /, **) are allowed.\"\n )\n raise TypeError(msg)\n msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(msg)\n\n def _eval_expr_with_error(self, expression: str) -> list[Data]:\n try:\n return self._evaluate_expression(expression)\n except Exception as e:\n raise ToolException(str(e)) from e\n\n def _evaluate_expression(self, expression: str) -> list[Data]:\n try:\n # Parse the expression and evaluate it\n tree = ast.parse(expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n # Format the result to a reasonable number of decimal places\n formatted_result = f\"{result:.6f}\".rstrip(\"0\").rstrip(\".\")\n\n self.status = formatted_result\n return [Data(data={\"result\": formatted_result})]\n\n except (SyntaxError, TypeError, KeyError) as e:\n error_message = f\"Invalid expression: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message, \"input\": expression})]\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return [Data(data={\"error\": error_message, \"input\": expression})]\n except Exception as e: # noqa: BLE001\n logger.opt(exception=True).debug(\"Error evaluating expression\")\n error_message = f\"Error: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message, \"input\": expression})]\n" | |
}, | |
"expression": { | |
"_input_type": "MessageTextInput", | |
"advanced": false, | |
"display_name": "Expression", | |
"dynamic": false, | |
"info": "The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "expression", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": true, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
} | |
}, | |
"tool_mode": false | |
}, | |
"type": "CalculatorTool" | |
}, | |
"dragging": false, | |
"height": 254, | |
"id": "CalculatorTool-xo5ux", | |
"position": { | |
"x": 415.51528601650625, | |
"y": -603.8178818852236 | |
}, | |
"positionAbsolute": { | |
"x": 415.51528601650625, | |
"y": -603.8178818852236 | |
}, | |
"selected": false, | |
"type": "genericNode", | |
"width": 320 | |
}, | |
{ | |
"data": { | |
"description": "**Tavily AI** is a search engine optimized for LLMs and RAG, aimed at efficient, quick, and persistent search results. It can be used independently or as an agent tool.\n\nNote: Check 'Advanced' for all options.\n", | |
"display_name": "Tavily AI Search", | |
"id": "TavilyAISearch-YfG8u", | |
"node": { | |
"base_classes": [ | |
"Data", | |
"Tool" | |
], | |
"beta": false, | |
"conditional_paths": [], | |
"custom_fields": {}, | |
"description": "**Tavily AI** is a search engine optimized for LLMs and RAG, aimed at efficient, quick, and persistent search results. It can be used independently or as an agent tool.\n\nNote: Check 'Advanced' for all options.\n", | |
"display_name": "Tavily AI Search", | |
"documentation": "https://docs.tavily.com/", | |
"edited": false, | |
"field_order": [ | |
"api_key", | |
"query", | |
"search_depth", | |
"topic", | |
"max_results", | |
"include_images", | |
"include_answer" | |
], | |
"frozen": false, | |
"icon": "TavilyIcon", | |
"legacy": false, | |
"lf_version": "1.0.19.post2", | |
"metadata": {}, | |
"output_types": [], | |
"outputs": [ | |
{ | |
"cache": true, | |
"display_name": "Data", | |
"method": "run_model", | |
"name": "api_run_model", | |
"required_inputs": [ | |
"api_key" | |
], | |
"selected": "Data", | |
"types": [ | |
"Data" | |
], | |
"value": "__UNDEFINED__" | |
}, | |
{ | |
"cache": true, | |
"display_name": "Tool", | |
"method": "build_tool", | |
"name": "api_build_tool", | |
"required_inputs": [ | |
"api_key" | |
], | |
"selected": "Tool", | |
"types": [ | |
"Tool" | |
], | |
"value": "__UNDEFINED__" | |
} | |
], | |
"pinned": false, | |
"template": { | |
"_type": "Component", | |
"api_key": { | |
"_input_type": "SecretStrInput", | |
"advanced": false, | |
"display_name": "Tavily API Key", | |
"dynamic": false, | |
"info": "Your Tavily API Key.", | |
"input_types": [ | |
"Message" | |
], | |
"load_from_db": true, | |
"name": "api_key", | |
"password": true, | |
"placeholder": "", | |
"required": true, | |
"show": true, | |
"title_case": false, | |
"type": "str", | |
"value": "" | |
}, | |
"code": { | |
"advanced": true, | |
"dynamic": true, | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "code", | |
"password": false, | |
"placeholder": "", | |
"required": true, | |
"show": true, | |
"title_case": false, | |
"type": "code", | |
"value": "from enum import Enum\n\nimport httpx\nfrom langchain.tools import StructuredTool\nfrom langchain_core.tools import ToolException\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass TavilySearchDepth(Enum):\n BASIC = \"basic\"\n ADVANCED = \"advanced\"\n\n\nclass TavilySearchTopic(Enum):\n GENERAL = \"general\"\n NEWS = \"news\"\n\n\nclass TavilySearchSchema(BaseModel):\n query: str = Field(..., description=\"The search query you want to execute with Tavily.\")\n search_depth: TavilySearchDepth = Field(TavilySearchDepth.BASIC, description=\"The depth of the search.\")\n topic: TavilySearchTopic = Field(TavilySearchTopic.GENERAL, description=\"The category of the search.\")\n max_results: int = Field(5, description=\"The maximum number of search results to return.\")\n include_images: bool = Field(default=False, description=\"Include a list of query-related images in the response.\")\n include_answer: bool = Field(default=False, description=\"Include a short answer to original query.\")\n\n\nclass TavilySearchToolComponent(LCToolComponent):\n display_name = \"Tavily AI Search\"\n description = \"\"\"**Tavily AI** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results. It can be used independently or as an agent tool.\n\nNote: Check 'Advanced' for all options.\n\"\"\"\n icon = \"TavilyIcon\"\n name = \"TavilyAISearch\"\n documentation = \"https://docs.tavily.com/\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=list(TavilySearchDepth),\n value=TavilySearchDepth.ADVANCED,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=list(TavilySearchTopic),\n value=TavilySearchTopic.GENERAL,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n ]\n\n def run_model(self) -> list[Data]:\n # Convert string values to enum instances with validation\n try:\n search_depth_enum = (\n self.search_depth\n if isinstance(self.search_depth, TavilySearchDepth)\n else TavilySearchDepth(str(self.search_depth).lower())\n )\n except ValueError as e:\n error_message = f\"Invalid search depth value: {e!s}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n\n try:\n topic_enum = (\n self.topic if isinstance(self.topic, TavilySearchTopic) else TavilySearchTopic(str(self.topic).lower())\n )\n except ValueError as e:\n error_message = f\"Invalid topic value: {e!s}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n\n return self._tavily_search(\n self.query,\n search_depth=search_depth_enum,\n topic=topic_enum,\n max_results=self.max_results,\n include_images=self.include_images,\n include_answer=self.include_answer,\n )\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"tavily_search\",\n description=\"Perform a web search using the Tavily API.\",\n func=self._tavily_search,\n args_schema=TavilySearchSchema,\n )\n\n def _tavily_search(\n self,\n query: str,\n *,\n search_depth: TavilySearchDepth = TavilySearchDepth.BASIC,\n topic: TavilySearchTopic = TavilySearchTopic.GENERAL,\n max_results: int = 5,\n include_images: bool = False,\n include_answer: bool = False,\n ) -> list[Data]:\n # Validate enum values\n if not isinstance(search_depth, TavilySearchDepth):\n msg = f\"Invalid search_depth value: {search_depth}\"\n raise TypeError(msg)\n if not isinstance(topic, TavilySearchTopic):\n msg = f\"Invalid topic value: {topic}\"\n raise TypeError(msg)\n\n try:\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n payload = {\n \"api_key\": self.api_key,\n \"query\": query,\n \"search_depth\": search_depth.value,\n \"topic\": topic.value,\n \"max_results\": max_results,\n \"include_images\": include_images,\n \"include_answer\": include_answer,\n }\n\n with httpx.Client() as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = [\n Data(\n data={\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": result.get(\"content\"),\n \"score\": result.get(\"score\"),\n }\n )\n for result in search_results.get(\"results\", [])\n ]\n\n if include_answer and search_results.get(\"answer\"):\n data_results.insert(0, Data(data={\"answer\": search_results[\"answer\"]}))\n\n if include_images and search_results.get(\"images\"):\n data_results.append(Data(data={\"images\": search_results[\"images\"]}))\n\n self.status = data_results # type: ignore[assignment]\n\n except httpx.HTTPStatusError as e:\n error_message = f\"HTTP error: {e.response.status_code} - {e.response.text}\"\n logger.debug(error_message)\n self.status = error_message\n raise ToolException(error_message) from e\n except Exception as e:\n error_message = f\"Unexpected error: {e}\"\n logger.opt(exception=True).debug(\"Error running Tavily Search\")\n self.status = error_message\n raise ToolException(error_message) from e\n return data_results\n" | |
}, | |
"include_answer": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Include Answer", | |
"dynamic": false, | |
"info": "Include a short answer to original query.", | |
"list": false, | |
"name": "include_answer", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
}, | |
"include_images": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Include Images", | |
"dynamic": false, | |
"info": "Include a list of query-related images in the response.", | |
"list": false, | |
"name": "include_images", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
}, | |
"max_results": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Max Results", | |
"dynamic": false, | |
"info": "The maximum number of search results to return.", | |
"list": false, | |
"name": "max_results", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": 5 | |
}, | |
"query": { | |
"_input_type": "MessageTextInput", | |
"advanced": false, | |
"display_name": "Search Query", | |
"dynamic": false, | |
"info": "The search query you want to execute with Tavily.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "query", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"search_depth": { | |
"_input_type": "DropdownInput", | |
"advanced": true, | |
"combobox": false, | |
"display_name": "Search Depth", | |
"dynamic": false, | |
"info": "The depth of the search.", | |
"load_from_db": false, | |
"name": "search_depth", | |
"options": [ | |
"basic", | |
"advanced" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "advanced" | |
}, | |
"topic": { | |
"_input_type": "DropdownInput", | |
"advanced": true, | |
"combobox": false, | |
"display_name": "Search Topic", | |
"dynamic": false, | |
"info": "The category of the search.", | |
"load_from_db": false, | |
"name": "topic", | |
"options": [ | |
"general", | |
"news" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "general" | |
} | |
}, | |
"tool_mode": false | |
}, | |
"type": "TavilyAISearch" | |
}, | |
"dragging": false, | |
"height": 394, | |
"id": "TavilyAISearch-YfG8u", | |
"position": { | |
"x": -1132.8634419233736, | |
"y": -770.0391255413992 | |
}, | |
"positionAbsolute": { | |
"x": -1132.8634419233736, | |
"y": -770.0391255413992 | |
}, | |
"selected": false, | |
"type": "genericNode", | |
"width": 320 | |
}, | |
{ | |
"data": { | |
"id": "note-8beLl", | |
"node": { | |
"description": "# Sequential Tasks Agents\n\n## Overview\nThis flow demonstrates how to chain multiple AI agents for comprehensive research and analysis. Each agent specializes in different aspects of the research process, building upon the previous agent's work.\n\n## How to Use the Flow\n\n1. **Input Your Query** 🎯\n - Be specific and clear\n - Include key aspects you want analyzed\n - Examples:\n ```\n Good: \"Should I invest in Tesla (TSLA)? Focus on AI development impact\"\n Bad: \"Tell me about Tesla\"\n ```\n\n2. **Research Agent Process** 🔍\n - Utilizes Tavily Search for comprehensive research\n\n\n3. **Specialized Analysis** 📊\n - Each agent adds unique value:\n ```\n Research Agent → Deep Research & Context\n ↓\n Finance Agent → Data Analysis & Metrics\n ↓\n Editor Agent → Final Synthesis & Report\n ```\n\n4. **Output Format** 📝\n - Structured report\n - Embedded images and charts\n - Data-backed insights\n - Clear recommendations\n\n## Pro Tips\n\n### Query Construction\n- Include specific points of interest\n- Mention required metrics or data points\n- Specify time frames if relevant\n\n### Flow Customization\n- Modify agent prompts for different use cases\n- Add or remove tools as needed\n\n## Common Applications\n- Investment Research\n- Market Analysis\n- Competitive Intelligence\n- Industry Reports\n- Technology Impact Studies\n\n⚡ **Best Practice**: Start with a test query to understand the flow's capabilities before running complex analyses.\n\n---\n*Note: This flow template uses financial analysis as an example but can be adapted for any research-intensive task requiring multiple perspectives and data sources.*", | |
"display_name": "", | |
"documentation": "", | |
"template": {} | |
}, | |
"type": "note" | |
}, | |
"dragging": false, | |
"height": 800, | |
"id": "note-8beLl", | |
"position": { | |
"x": -2122.739127560837, | |
"y": -1302.6582482086806 | |
}, | |
"positionAbsolute": { | |
"x": -2122.739127560837, | |
"y": -1302.6582482086806 | |
}, | |
"resizing": false, | |
"selected": false, | |
"style": { | |
"height": 800, | |
"width": 600 | |
}, | |
"type": "noteNode", | |
"width": 600 | |
}, | |
{ | |
"data": { | |
"id": "note-tB2J2", | |
"node": { | |
"description": "## What Are Sequential Task Agents?\nA system where multiple AI agents work in sequence, each specializing in specific tasks and passing their output to the next agent in the chain. Think of it as an assembly line where each agent adds value to the final result.\n\n## How It Works\n1. **First Agent** → **Second Agent** → **Third Agent** → **Final Output**\n - Each agent receives input from the previous one\n - Processes and enhances the information\n - Passes refined output forward\n\n## Key Benefits\n- **Specialization**: Each agent focuses on specific tasks\n- **Progressive Refinement**: Information gets enhanced at each step\n- **Structured Output**: Final result combines multiple perspectives\n- **Quality Control**: Each agent validates and improves previous work\n\n## Building Your Own Sequence\n1. **Plan Your Chain**\n - Identify distinct tasks\n - Determine logical order\n - Define input/output requirements\n\n2. **Configure Agents**\n - Give each agent clear instructions\n - Ensure compatible outputs/inputs\n - Set appropriate tools for each agent\n\n3. **Connect the Flow**\n - Link agents in proper order\n - Test data flow between agents\n - Verify final output format\n\n## Example Applications\n- Research → Analysis → Report Writing\n- Data Collection → Processing → Visualization\n- Content Research → Writing → Editing\n- Market Analysis → Financial Review → Investment Advice\n\n⭐ **Pro Tip**: The strength of sequential agents comes from how well they complement each other's capabilities.\n\nThis template uses financial analysis as an example, but you can adapt it for any multi-step process requiring different expertise at each stage.", | |
"display_name": "", | |
"documentation": "", | |
"template": { | |
"backgroundColor": "blue" | |
} | |
}, | |
"type": "note" | |
}, | |
"dragging": false, | |
"height": 800, | |
"id": "note-tB2J2", | |
"position": { | |
"x": -1456.0688717707517, | |
"y": -1916.6876704866322 | |
}, | |
"positionAbsolute": { | |
"x": -1456.0688717707517, | |
"y": -1916.6876704866322 | |
}, | |
"resizing": false, | |
"selected": false, | |
"style": { | |
"height": 800, | |
"width": 600 | |
}, | |
"type": "noteNode", | |
"width": 600 | |
}, | |
{ | |
"data": { | |
"id": "YahooFinanceTool-YmOKx", | |
"node": { | |
"base_classes": [ | |
"Data", | |
"Tool" | |
], | |
"beta": false, | |
"conditional_paths": [], | |
"custom_fields": {}, | |
"description": "Access financial data and market information using Yahoo Finance.", | |
"display_name": "Yahoo Finance", | |
"documentation": "", | |
"edited": false, | |
"field_order": [ | |
"symbol", | |
"method", | |
"num_news" | |
], | |
"frozen": false, | |
"icon": "trending-up", | |
"legacy": false, | |
"lf_version": "1.0.19.post2", | |
"metadata": {}, | |
"output_types": [], | |
"outputs": [ | |
{ | |
"cache": true, | |
"display_name": "Data", | |
"method": "run_model", | |
"name": "api_run_model", | |
"required_inputs": [], | |
"selected": "Data", | |
"types": [ | |
"Data" | |
], | |
"value": "__UNDEFINED__" | |
}, | |
{ | |
"cache": true, | |
"display_name": "Tool", | |
"method": "build_tool", | |
"name": "api_build_tool", | |
"required_inputs": [], | |
"selected": "Tool", | |
"types": [ | |
"Tool" | |
], | |
"value": "__UNDEFINED__" | |
} | |
], | |
"pinned": false, | |
"template": { | |
"_type": "Component", | |
"code": { | |
"advanced": true, | |
"dynamic": true, | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "code", | |
"password": false, | |
"placeholder": "", | |
"required": true, | |
"show": true, | |
"title_case": false, | |
"type": "code", | |
"value": "import ast\nimport pprint\nfrom enum import Enum\n\nimport yfinance as yf\nfrom langchain.tools import StructuredTool\nfrom langchain_core.tools import ToolException\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import DropdownInput, IntInput, MessageTextInput\nfrom langflow.schema import Data\n\n\nclass YahooFinanceMethod(Enum):\n GET_INFO = \"get_info\"\n GET_NEWS = \"get_news\"\n GET_ACTIONS = \"get_actions\"\n GET_ANALYSIS = \"get_analysis\"\n GET_BALANCE_SHEET = \"get_balance_sheet\"\n GET_CALENDAR = \"get_calendar\"\n GET_CASHFLOW = \"get_cashflow\"\n GET_INSTITUTIONAL_HOLDERS = \"get_institutional_holders\"\n GET_RECOMMENDATIONS = \"get_recommendations\"\n GET_SUSTAINABILITY = \"get_sustainability\"\n GET_MAJOR_HOLDERS = \"get_major_holders\"\n GET_MUTUALFUND_HOLDERS = \"get_mutualfund_holders\"\n GET_INSIDER_PURCHASES = \"get_insider_purchases\"\n GET_INSIDER_TRANSACTIONS = \"get_insider_transactions\"\n GET_INSIDER_ROSTER_HOLDERS = \"get_insider_roster_holders\"\n GET_DIVIDENDS = \"get_dividends\"\n GET_CAPITAL_GAINS = \"get_capital_gains\"\n GET_SPLITS = \"get_splits\"\n GET_SHARES = \"get_shares\"\n GET_FAST_INFO = \"get_fast_info\"\n GET_SEC_FILINGS = \"get_sec_filings\"\n GET_RECOMMENDATIONS_SUMMARY = \"get_recommendations_summary\"\n GET_UPGRADES_DOWNGRADES = \"get_upgrades_downgrades\"\n GET_EARNINGS = \"get_earnings\"\n GET_INCOME_STMT = \"get_income_stmt\"\n\n\nclass YahooFinanceSchema(BaseModel):\n symbol: str = Field(..., description=\"The stock symbol to retrieve data for.\")\n method: YahooFinanceMethod = Field(YahooFinanceMethod.GET_INFO, description=\"The type of data to retrieve.\")\n num_news: int | None = Field(5, description=\"The number of news articles to retrieve.\")\n\n\nclass YfinanceToolComponent(LCToolComponent):\n display_name = \"Yahoo Finance\"\n description = \"\"\"Uses [yfinance](https://pypi.org/project/yfinance/) (unofficial package) \\\nto access financial data and market information from Yahoo Finance.\"\"\"\n icon = \"trending-up\"\n name = \"YahooFinanceTool\"\n\n inputs = [\n MessageTextInput(\n name=\"symbol\",\n display_name=\"Stock Symbol\",\n info=\"The stock symbol to retrieve data for (e.g., AAPL, GOOG).\",\n ),\n DropdownInput(\n name=\"method\",\n display_name=\"Data Method\",\n info=\"The type of data to retrieve.\",\n options=list(YahooFinanceMethod),\n value=\"get_news\",\n ),\n IntInput(\n name=\"num_news\",\n display_name=\"Number of News\",\n info=\"The number of news articles to retrieve (only applicable for get_news).\",\n value=5,\n ),\n ]\n\n def run_model(self) -> list[Data]:\n return self._yahoo_finance_tool(\n self.symbol,\n self.method,\n self.num_news,\n )\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"yahoo_finance\",\n description=\"Access financial data and market information from Yahoo Finance.\",\n func=self._yahoo_finance_tool,\n args_schema=YahooFinanceSchema,\n )\n\n def _yahoo_finance_tool(\n self,\n symbol: str,\n method: YahooFinanceMethod,\n num_news: int | None = 5,\n ) -> list[Data]:\n ticker = yf.Ticker(symbol)\n\n try:\n if method == YahooFinanceMethod.GET_INFO:\n result = ticker.info\n elif method == YahooFinanceMethod.GET_NEWS:\n result = ticker.news[:num_news]\n else:\n result = getattr(ticker, method.value)()\n\n result = pprint.pformat(result)\n\n if method == YahooFinanceMethod.GET_NEWS:\n data_list = [Data(data=article) for article in ast.literal_eval(result)]\n else:\n data_list = [Data(data={\"result\": result})]\n\n except Exception as e:\n error_message = f\"Error retrieving data: {e}\"\n logger.debug(error_message)\n self.status = error_message\n raise ToolException(error_message) from e\n\n return data_list\n" | |
}, | |
"method": { | |
"_input_type": "DropdownInput", | |
"advanced": false, | |
"combobox": false, | |
"display_name": "Data Method", | |
"dynamic": false, | |
"info": "The type of data to retrieve.", | |
"name": "method", | |
"options": [ | |
"get_info", | |
"get_news", | |
"get_actions", | |
"get_analysis", | |
"get_balance_sheet", | |
"get_calendar", | |
"get_cashflow", | |
"get_institutional_holders", | |
"get_recommendations", | |
"get_sustainability", | |
"get_major_holders", | |
"get_mutualfund_holders", | |
"get_insider_purchases", | |
"get_insider_transactions", | |
"get_insider_roster_holders", | |
"get_dividends", | |
"get_capital_gains", | |
"get_splits", | |
"get_shares", | |
"get_fast_info", | |
"get_sec_filings", | |
"get_recommendations_summary", | |
"get_upgrades_downgrades", | |
"get_earnings", | |
"get_income_stmt" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "get_news" | |
}, | |
"num_news": { | |
"_input_type": "IntInput", | |
"advanced": false, | |
"display_name": "Number of News", | |
"dynamic": false, | |
"info": "The number of news articles to retrieve (only applicable for get_news).", | |
"list": false, | |
"name": "num_news", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": 5 | |
}, | |
"symbol": { | |
"_input_type": "MessageTextInput", | |
"advanced": false, | |
"display_name": "Stock Symbol", | |
"dynamic": false, | |
"info": "The stock symbol to retrieve data for (e.g., AAPL, GOOG).", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "symbol", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "AAPL" | |
} | |
}, | |
"tool_mode": false | |
}, | |
"type": "YahooFinanceTool" | |
}, | |
"dragging": false, | |
"height": 475, | |
"id": "YahooFinanceTool-YmOKx", | |
"position": { | |
"x": -338.2658218008318, | |
"y": -945.7435123503128 | |
}, | |
"positionAbsolute": { | |
"x": -338.2658218008318, | |
"y": -945.7435123503128 | |
}, | |
"selected": false, | |
"type": "genericNode", | |
"width": 320 | |
}, | |
{ | |
"data": { | |
"description": "Define the agent's instructions, then enter a task to complete using tools.", | |
"display_name": "Researcher Agent", | |
"id": "Agent-uaR2o", | |
"node": { | |
"base_classes": [ | |
"Message" | |
], | |
"beta": false, | |
"conditional_paths": [], | |
"custom_fields": {}, | |
"description": "Define the agent's instructions, then enter a task to complete using tools.", | |
"display_name": "Researcher Agent", | |
"documentation": "", | |
"edited": false, | |
"field_order": [ | |
"agent_llm", | |
"max_tokens", | |
"model_kwargs", | |
"json_mode", | |
"output_schema", | |
"model_name", | |
"openai_api_base", | |
"api_key", | |
"temperature", | |
"seed", | |
"output_parser", | |
"system_prompt", | |
"tools", | |
"input_value", | |
"handle_parsing_errors", | |
"verbose", | |
"max_iterations", | |
"agent_description", | |
"memory", | |
"sender", | |
"sender_name", | |
"n_messages", | |
"session_id", | |
"order", | |
"template", | |
"add_current_date_tool" | |
], | |
"frozen": true, | |
"icon": "bot", | |
"legacy": false, | |
"lf_version": "1.0.19.post2", | |
"metadata": {}, | |
"output_types": [], | |
"outputs": [ | |
{ | |
"cache": true, | |
"display_name": "Response", | |
"method": "message_response", | |
"name": "response", | |
"selected": "Message", | |
"types": [ | |
"Message" | |
], | |
"value": "__UNDEFINED__" | |
} | |
], | |
"pinned": false, | |
"template": { | |
"_type": "Component", | |
"add_current_date_tool": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Current Date", | |
"dynamic": false, | |
"info": "If true, will add a tool to the agent that returns the current date.", | |
"list": false, | |
"name": "add_current_date_tool", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
}, | |
"agent_description": { | |
"_input_type": "MultilineInput", | |
"advanced": true, | |
"display_name": "Agent Description", | |
"dynamic": false, | |
"info": "The description of the agent. This is only used when in Tool Mode. Defaults to 'A helpful assistant with access to the following tools:' and tools are added dynamically.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "agent_description", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "A helpful assistant with access to the following tools:" | |
}, | |
"agent_llm": { | |
"_input_type": "DropdownInput", | |
"advanced": false, | |
"combobox": false, | |
"display_name": "Model Provider", | |
"dynamic": false, | |
"info": "The provider of the language model that the agent will use to generate responses.", | |
"input_types": [], | |
"name": "agent_llm", | |
"options": [ | |
"Amazon Bedrock", | |
"Anthropic", | |
"Azure OpenAI", | |
"Groq", | |
"NVIDIA", | |
"OpenAI", | |
"Custom" | |
], | |
"placeholder": "", | |
"real_time_refresh": true, | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "OpenAI" | |
}, | |
"api_key": { | |
"_input_type": "SecretStrInput", | |
"advanced": false, | |
"display_name": "OpenAI API Key", | |
"dynamic": false, | |
"info": "The OpenAI API Key to use for the OpenAI model.", | |
"input_types": [ | |
"Message" | |
], | |
"load_from_db": false, | |
"name": "api_key", | |
"password": true, | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"type": "str", | |
"value": "OPENAI_API_KEY" | |
}, | |
"code": { | |
"advanced": true, | |
"dynamic": true, | |
"fileTypes": [], | |
"file_path": "", | |
"info": "", | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "code", | |
"password": false, | |
"placeholder": "", | |
"required": true, | |
"show": true, | |
"title_case": false, | |
"type": "code", | |
"value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n llm_model, display_name = self.get_llm()\n self.model_name = get_model_name(llm_model, display_name=display_name)\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.chat_history = await self.get_memory_data()\n\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise ValueError(msg)\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n raise ValueError(msg)\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n def update_build_config(self, build_config: dotdict, field_value: str, field_name: str | None = None) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = component_class.update_build_config(build_config, field_value, field_name)\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = component_class.update_build_config(build_config, field_value, field_name)\n\n return build_config\n" | |
}, | |
"handle_parsing_errors": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Handle Parse Errors", | |
"dynamic": false, | |
"info": "Should the Agent fix errors when reading user input for better processing?", | |
"list": false, | |
"name": "handle_parsing_errors", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
}, | |
"input_value": { | |
"_input_type": "MessageTextInput", | |
"advanced": false, | |
"display_name": "Input", | |
"dynamic": false, | |
"info": "The input provided by the user for the agent to process.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "input_value", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": true, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"json_mode": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "JSON Mode", | |
"dynamic": false, | |
"info": "If True, it will output JSON regardless of passing a schema.", | |
"list": false, | |
"name": "json_mode", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": false | |
}, | |
"max_iterations": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Max Iterations", | |
"dynamic": false, | |
"info": "The maximum number of attempts the agent can make to complete its task before it stops.", | |
"list": false, | |
"name": "max_iterations", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": 15 | |
}, | |
"max_tokens": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Max Tokens", | |
"dynamic": false, | |
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", | |
"list": false, | |
"name": "max_tokens", | |
"placeholder": "", | |
"range_spec": { | |
"max": 128000, | |
"min": 0, | |
"step": 0.1, | |
"step_type": "float" | |
}, | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": "" | |
}, | |
"memory": { | |
"_input_type": "HandleInput", | |
"advanced": true, | |
"display_name": "External Memory", | |
"dynamic": false, | |
"info": "Retrieve messages from an external memory. If empty, it will use the Langflow tables.", | |
"input_types": [ | |
"BaseChatMessageHistory" | |
], | |
"list": false, | |
"name": "memory", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "other", | |
"value": "" | |
}, | |
"model_kwargs": { | |
"_input_type": "DictInput", | |
"advanced": true, | |
"display_name": "Model Kwargs", | |
"dynamic": false, | |
"info": "Additional keyword arguments to pass to the model.", | |
"list": false, | |
"name": "model_kwargs", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_input": true, | |
"type": "dict", | |
"value": {} | |
}, | |
"model_name": { | |
"_input_type": "DropdownInput", | |
"advanced": false, | |
"combobox": false, | |
"display_name": "Model Name", | |
"dynamic": false, | |
"info": "", | |
"name": "model_name", | |
"options": [ | |
"gpt-4o-mini", | |
"gpt-4o", | |
"gpt-4-turbo", | |
"gpt-4-turbo-preview", | |
"gpt-4", | |
"gpt-3.5-turbo", | |
"gpt-3.5-turbo-0125" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "gpt-4o-mini" | |
}, | |
"n_messages": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Number of Messages", | |
"dynamic": false, | |
"info": "Number of messages to retrieve.", | |
"list": false, | |
"name": "n_messages", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": 100 | |
}, | |
"openai_api_base": { | |
"_input_type": "StrInput", | |
"advanced": true, | |
"display_name": "OpenAI API Base", | |
"dynamic": false, | |
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", | |
"list": false, | |
"load_from_db": false, | |
"name": "openai_api_base", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"order": { | |
"_input_type": "DropdownInput", | |
"advanced": true, | |
"combobox": false, | |
"display_name": "Order", | |
"dynamic": false, | |
"info": "Order of the messages.", | |
"name": "order", | |
"options": [ | |
"Ascending", | |
"Descending" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "Ascending" | |
}, | |
"output_parser": { | |
"_input_type": "HandleInput", | |
"advanced": true, | |
"display_name": "Output Parser", | |
"dynamic": false, | |
"info": "The parser to use to parse the output of the model", | |
"input_types": [ | |
"OutputParser" | |
], | |
"list": false, | |
"name": "output_parser", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "other", | |
"value": "" | |
}, | |
"output_schema": { | |
"_input_type": "DictInput", | |
"advanced": true, | |
"display_name": "Schema", | |
"dynamic": false, | |
"info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled. [DEPRECATED]", | |
"list": true, | |
"name": "output_schema", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_input": true, | |
"type": "dict", | |
"value": {} | |
}, | |
"seed": { | |
"_input_type": "IntInput", | |
"advanced": true, | |
"display_name": "Seed", | |
"dynamic": false, | |
"info": "The seed controls the reproducibility of the job.", | |
"list": false, | |
"name": "seed", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "int", | |
"value": 1 | |
}, | |
"sender": { | |
"_input_type": "DropdownInput", | |
"advanced": true, | |
"combobox": false, | |
"display_name": "Sender Type", | |
"dynamic": false, | |
"info": "Filter by sender type.", | |
"name": "sender", | |
"options": [ | |
"Machine", | |
"User", | |
"Machine and User" | |
], | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "Machine and User" | |
}, | |
"sender_name": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Sender Name", | |
"dynamic": false, | |
"info": "Filter by sender name.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "sender_name", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"session_id": { | |
"_input_type": "MessageTextInput", | |
"advanced": true, | |
"display_name": "Session ID", | |
"dynamic": false, | |
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"name": "session_id", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "" | |
}, | |
"system_prompt": { | |
"_input_type": "MultilineInput", | |
"advanced": false, | |
"display_name": "Agent Instructions", | |
"dynamic": false, | |
"info": "System Prompt: Initial instructions and context provided to guide the agent's behavior.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "system_prompt", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "You are a helpful assistant that can use tools to answer questions and perform tasks." | |
}, | |
"temperature": { | |
"_input_type": "FloatInput", | |
"advanced": true, | |
"display_name": "Temperature", | |
"dynamic": false, | |
"info": "", | |
"list": false, | |
"name": "temperature", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "float", | |
"value": 0.1 | |
}, | |
"template": { | |
"_input_type": "MultilineInput", | |
"advanced": true, | |
"display_name": "Template", | |
"dynamic": false, | |
"info": "The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.", | |
"input_types": [ | |
"Message" | |
], | |
"list": false, | |
"load_from_db": false, | |
"multiline": true, | |
"name": "template", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"tool_mode": false, | |
"trace_as_input": true, | |
"trace_as_metadata": true, | |
"type": "str", | |
"value": "{sender_name}: {text}" | |
}, | |
"tools": { | |
"_input_type": "HandleInput", | |
"advanced": false, | |
"display_name": "Tools", | |
"dynamic": false, | |
"info": "These are the tools that the agent can use to help with tasks.", | |
"input_types": [ | |
"Tool", | |
"BaseTool", | |
"StructuredTool" | |
], | |
"list": true, | |
"name": "tools", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "other", | |
"value": "" | |
}, | |
"verbose": { | |
"_input_type": "BoolInput", | |
"advanced": true, | |
"display_name": "Verbose", | |
"dynamic": false, | |
"info": "", | |
"list": false, | |
"name": "verbose", | |
"placeholder": "", | |
"required": false, | |
"show": true, | |
"title_case": false, | |
"trace_as_metadata": true, | |
"type": "bool", | |
"value": true | |
} | |
}, | |
"tool_mode": false | |
}, | |
"type": "Agent" | |
}, | |
"dragging": false, | |
"height": 650, | |
"id": "Agent-uaR2o", | |
"position": { | |
"x": -715.1798010873374, | |
"y": -1342.256094001045 | |
}, | |
"positionAbsolute": { | |
"x": -715.1798010873374, | |
"y": -1342.256094001045 | |
}, | |
"selected": false, | |
"type": "genericNode", | |
"width": 320 | |
}, | |
{ | |
"data": { | |
"id": "note-9ZCze", | |
"node": { | |
"description": "## Get your API key at [https://tavily.com](https://tavily.com)\n", | |
"display_name": "", | |
"documentation": "", | |
"template": { | |
"backgroundColor": "rose" | |
} | |
}, | |
"type": "note" | |
}, | |
"dragging": false, | |
"height": 324, | |
"id": "note-9ZCze", | |
"position": { | |
"x": -1144.3898055225054, | |
"y": -844.3506743985376 | |
}, | |
"positionAbsolute": { | |
"x": -1144.3898055225054, | |
"y": -844.3506743985376 | |
}, | |
"resizing": false, | |
"selected": false, | |
"style": { | |
"height": 324, | |
"width": 347 | |
}, | |
"type": "noteNode", | |
"width": 347 | |
}, | |
{ | |
"data": { | |
"id": "note-ynS9s", | |
"node": { | |
"description": "## Configure the agent by obtaining your OpenAI API key from [platform.openai.com](https://platform.openai.com). Under \"Model Provider\", choose:\n- OpenAI: Default, requires only API key\n- Anthropic/Azure/Groq/NVIDIA: Each requires their own API keys\n- Custom: Use your own model endpoint + authentication\n\nSelect model and input API key before running the flow.", | |
"display_name": "", | |
"documentation": "", | |
"template": { | |
"backgroundColor": "rose" | |
} | |
}, | |
"type": "note" | |
}, | |
"dragging": false, | |
"height": 324, | |
"id": "note-ynS9s", | |
"position": { | |
"x": -739.4383746675942, | |
"y": -1672.0874594411662 | |
}, | |
"positionAbsolute": { | |
"x": -739.4383746675942, | |
"y": -1672.0874594411662 | |
}, | |
"resizing": false, | |
"selected": false, | |
"style": { | |
"height": 324, | |
"width": 370 | |
}, | |
"type": "noteNode", | |
"width": 370 | |
} | |
], | |
"viewport": { | |
"x": 988.287937756906, | |
"y": 1011.1045224025538, | |
"zoom": 0.5418943314819052 | |
} | |
}, | |
"description": "starterProjects.sequentialTasks.description", | |
"endpoint_name": null, | |
"gradient": "1", | |
"icon": "ListChecks", | |
"id": "673f26a7-66f4-410a-8ccb-3e635c022023", | |
"is_component": false, | |
"last_tested_version": "1.0.19.post2", | |
"name": "starterProjects.sequentialTasks.name", | |
"tags": [ | |
"assistants", | |
"agents", | |
"web-scraping" | |
] | |
} |