openfree commited on
Commit
7e5c19b
ยท
verified ยท
1 Parent(s): 7b8981c

Upload workflowbuilder.py

Browse files
src/backend/gradio_workflowbuilder/workflowbuilder.py ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict, List, Optional, Union, Callable
4
+ from pathlib import Path
5
+ import json
6
+ from gradio.components import Component
7
+
8
+ COMP_DIR = Path(__file__).resolve().parent / "templates" / "component" # โฌ…๏ธ ์ถ”๊ฐ€
9
+
10
+ class WorkflowBuilder(Component):
11
+ """
12
+ Professional Workflow Builder component with support for 25+ node types
13
+ inspired by n8n and Langflow for AI agent development and MCP integration.
14
+ """
15
+
16
+ # โ–ถ๏ธ Gradio 4.x : ์ด๋ฒคํŠธ ์ด๋ฆ„์„ ๋ฌธ์ž์—ด๋กœ ์ง์ ‘ ๋ช…์‹œ
17
+ EVENTS = ["change", "input"]
18
+
19
+ def __init__(
20
+ self,
21
+ value: Optional[Dict[str, Any]] = None,
22
+ label: Optional[str] = None,
23
+ info: Optional[str] = None,
24
+ show_label: Optional[bool] = None,
25
+ container: bool = True,
26
+ scale: Optional[int] = None,
27
+ min_width: int = 160,
28
+ visible: bool = True,
29
+ elem_id: Optional[str] = None,
30
+ elem_classes: Optional[List[str]] = None,
31
+ render: bool = True,
32
+ **kwargs,
33
+
34
+ ):
35
+ # ์ด ๋ถ€๋ถ„์ด ๋นˆ ์›Œํฌํ”Œ๋กœ์šฐ์ธ์ง€ ํ™•์ธ
36
+ if value is None:
37
+ value = {"nodes": [], "edges": []}
38
+
39
+ """
40
+ Parameters:
41
+ value: Default workflow data with nodes and edges
42
+ label: Component label
43
+ info: Additional component information
44
+ show_label: Whether to show the label
45
+ container: Whether to use container styling
46
+ scale: Relative width scale
47
+ min_width: Minimum width in pixels
48
+ visible: Whether component is visible
49
+ elem_id: HTML element ID
50
+ elem_classes: CSS classes
51
+ render: Whether to render immediately
52
+ """
53
+
54
+ # Validate the workflow data
55
+ if not isinstance(value, dict):
56
+ raise ValueError("Workflow value must be a dictionary")
57
+
58
+ if "nodes" not in value:
59
+ value["nodes"] = []
60
+ if "edges" not in value:
61
+ value["edges"] = []
62
+
63
+ super().__init__(
64
+ label=label,
65
+ info=info,
66
+ show_label=show_label,
67
+ container=container,
68
+ scale=scale,
69
+ min_width=min_width,
70
+ visible=visible,
71
+ elem_id=elem_id,
72
+ elem_classes=elem_classes,
73
+ render=render,
74
+ value=value,
75
+ **kwargs,
76
+ )
77
+
78
+ def preprocess(self, payload: Dict[str, Any]) -> Dict[str, Any]:
79
+ """
80
+ Process workflow data from frontend
81
+ """
82
+ if payload is None:
83
+ return {"nodes": [], "edges": []}
84
+
85
+ # Validate and clean the workflow data
86
+ workflow = self._validate_workflow(payload)
87
+ return workflow
88
+
89
+ def postprocess(self, value: Dict[str, Any]) -> Dict[str, Any]:
90
+ """
91
+ Process workflow data for frontend
92
+ """
93
+ if value is None:
94
+ return {"nodes": [], "edges": []}
95
+
96
+ # Ensure proper structure
97
+ if not isinstance(value, dict):
98
+ return {"nodes": [], "edges": []}
99
+
100
+ return {
101
+ "nodes": value.get("nodes", []),
102
+ "edges": value.get("edges", [])
103
+ }
104
+
105
+ def _validate_workflow(self, workflow: Dict[str, Any]) -> Dict[str, Any]:
106
+ """
107
+ Validate workflow structure and node configurations
108
+ """
109
+ if not isinstance(workflow, dict):
110
+ return {"nodes": [], "edges": []}
111
+
112
+ nodes = workflow.get("nodes", [])
113
+ edges = workflow.get("edges", [])
114
+
115
+ # Validate each node
116
+ validated_nodes = []
117
+ for node in nodes:
118
+ if self._validate_node(node):
119
+ validated_nodes.append(node)
120
+
121
+ # Validate each edge
122
+ validated_edges = []
123
+ node_ids = {node["id"] for node in validated_nodes}
124
+ for edge in edges:
125
+ if self._validate_edge(edge, node_ids):
126
+ validated_edges.append(edge)
127
+
128
+ return {
129
+ "nodes": validated_nodes,
130
+ "edges": validated_edges
131
+ }
132
+
133
+ def _validate_node(self, node: Dict[str, Any]) -> bool:
134
+ """
135
+ Validate individual node structure and properties
136
+ """
137
+ required_fields = ["id", "type", "position", "data"]
138
+
139
+ # Check required fields
140
+ if not all(field in node for field in required_fields):
141
+ return False
142
+
143
+ # Validate node type
144
+ if not self._is_valid_node_type(node["type"]):
145
+ return False
146
+
147
+ # Validate position
148
+ position = node["position"]
149
+ if not isinstance(position, dict) or "x" not in position or "y" not in position:
150
+ return False
151
+
152
+ # Validate node data based on type
153
+ return self._validate_node_data(node["type"], node["data"])
154
+
155
+ def _validate_edge(self, edge: Dict[str, Any], valid_node_ids: set) -> bool:
156
+ """
157
+ Validate edge connections
158
+ """
159
+ required_fields = ["id", "source", "target"]
160
+
161
+ if not all(field in edge for field in required_fields):
162
+ return False
163
+
164
+ # Check if source and target nodes exist
165
+ return (edge["source"] in valid_node_ids and
166
+ edge["target"] in valid_node_ids)
167
+
168
+ def _is_valid_node_type(self, node_type: str) -> bool:
169
+ """
170
+ Check if node type is supported
171
+ """
172
+ # All the node types from your frontend
173
+
174
+
175
+
176
+
177
+ supported_types = {
178
+ # ๐Ÿ†• [CUSTOM] --------------------------------------------------
179
+ "llmNode", # ๋ฒ”์šฉ LLM ๋…ธ๋“œ (AI Processing)
180
+ "textNode", # ๊ฐ„๋‹จํ•œ Markdown/Text ๋…ธ๋“œ
181
+ # --------------------------------------------------------------
182
+
183
+
184
+
185
+
186
+
187
+
188
+ # Input/Output Nodes
189
+ "ChatInput", "ChatOutput", "Input", "Output",
190
+
191
+ # AI & Language Models
192
+ "OpenAIModel", "ChatModel", "Prompt", "HFTextGeneration",
193
+
194
+ # API & Web
195
+ "APIRequest", "WebSearch",
196
+
197
+ # Data Processing
198
+ "ExecutePython", "ConditionalLogic", "Wait",
199
+
200
+ # RAG & Knowledge
201
+ "KnowledgeBase", "RAGQuery",
202
+
203
+ # Speech & Vision
204
+ "HFSpeechToText", "HFTextToSpeech", "HFVisionModel",
205
+
206
+ # Image Generation
207
+ "HFImageGeneration", "NebiusImage",
208
+
209
+ # MCP Integration
210
+ "MCPConnection", "MCPAgent",
211
+
212
+ # Legacy types (for backward compatibility)
213
+ "textInput", "fileInput", "numberInput", "llm", "textProcessor",
214
+ "conditional", "textOutput", "fileOutput", "chartOutput",
215
+ "apiCall", "dataTransform", "webhook", "schedule", "manualTrigger",
216
+ "emailTrigger", "httpRequest", "googleSheets", "database", "csvFile",
217
+ "openaiChat", "claudeChat", "huggingFace", "textEmbedding",
218
+ "codeNode", "functionNode", "setNode", "jsonParse",
219
+ "ifCondition", "switchNode", "merge", "waitNode",
220
+ "email", "slack", "discord", "telegram",
221
+ "fileUpload", "awsS3", "googleDrive", "ftp",
222
+ "dateTime", "crypto", "validator", "regex"
223
+ }
224
+
225
+ return node_type in supported_types
226
+
227
+ def _validate_node_data(self, node_type: str, data: Dict[str, Any]) -> bool:
228
+ """
229
+ Validate node data based on node type
230
+ """
231
+ if not isinstance(data, dict):
232
+ return False
233
+
234
+ # Define required fields for each node type
235
+ required_fields = {
236
+
237
+
238
+ # ๐Ÿ†• [CUSTOM] --------------------------------------------------
239
+ "llmNode": ["template"], # provider ยท model ๋“ฑ์€ template ๋‚ด๋ถ€์— ์กด์žฌ
240
+ "textNode": ["template"], # { "text": {...} }
241
+ # --------------------------------------------------------------
242
+
243
+
244
+
245
+ # Input/Output Nodes
246
+ "ChatInput": ["display_name", "template"],
247
+ "ChatOutput": ["display_name", "template"],
248
+ "Input": ["display_name", "template"],
249
+ "Output": ["display_name", "template"],
250
+
251
+ # AI & Language Models
252
+ "OpenAIModel": ["display_name", "template"],
253
+ "ChatModel": ["display_name", "template"],
254
+ "Prompt": ["display_name", "template"],
255
+ "HFTextGeneration": ["display_name", "template"],
256
+
257
+ # API & Web
258
+ "APIRequest": ["display_name", "template"],
259
+ "WebSearch": ["display_name", "template"],
260
+
261
+ # Data Processing
262
+ "ExecutePython": ["display_name", "template"],
263
+ "ConditionalLogic": ["display_name", "template"],
264
+ "Wait": ["display_name", "template"],
265
+
266
+ # RAG & Knowledge
267
+ "KnowledgeBase": ["display_name", "template"],
268
+ "RAGQuery": ["display_name", "template"],
269
+
270
+ # Speech & Vision
271
+ "HFSpeechToText": ["display_name", "template"],
272
+ "HFTextToSpeech": ["display_name", "template"],
273
+ "HFVisionModel": ["display_name", "template"],
274
+
275
+ # Image Generation
276
+ "HFImageGeneration": ["display_name", "template"],
277
+ "NebiusImage": ["display_name", "template"],
278
+
279
+ # MCP Integration
280
+ "MCPConnection": ["display_name", "template"],
281
+ "MCPAgent": ["display_name", "template"],
282
+
283
+ # Legacy types
284
+ "webhook": ["method", "path"],
285
+ "httpRequest": ["method", "url"],
286
+ "openaiChat": ["model"],
287
+ "claudeChat": ["model"],
288
+ "codeNode": ["language", "code"],
289
+ "ifCondition": ["conditions"],
290
+ "email": ["fromEmail", "toEmail", "subject"],
291
+ "awsS3": ["operation", "bucketName"]
292
+ }
293
+
294
+ # Check required fields for this node type
295
+ if node_type in required_fields:
296
+ required = required_fields[node_type]
297
+ if not all(field in data for field in required):
298
+ return False
299
+
300
+ return True
301
+
302
+ def api_info(self) -> Dict[str, Any]:
303
+ """
304
+ API information for the component
305
+ """
306
+ return {
307
+ "info": {
308
+ "type": "object",
309
+ "properties": {
310
+ "nodes": {
311
+ "type": "array",
312
+ "items": {
313
+ "type": "object",
314
+ "properties": {
315
+ "id": {"type": "string"},
316
+ "type": {"type": "string"},
317
+ "position": {
318
+ "type": "object",
319
+ "properties": {
320
+ "x": {"type": "number"},
321
+ "y": {"type": "number"}
322
+ }
323
+ },
324
+ "data": {"type": "object"}
325
+ }
326
+ }
327
+ },
328
+ "edges": {
329
+ "type": "array",
330
+ "items": {
331
+ "type": "object",
332
+ "properties": {
333
+ "id": {"type": "string"},
334
+ "source": {"type": "string"},
335
+ "target": {"type": "string"}
336
+ }
337
+ }
338
+ }
339
+ }
340
+ }
341
+ }
342
+
343
+ def example_payload(self) -> Dict[str, Any]:
344
+ """
345
+ Example payload for the component
346
+ """
347
+ return {"nodes": [], "edges": []} # ๋นˆ ์›Œํฌํ”Œ๋กœ์šฐ ๋ฐ˜ํ™˜
348
+
349
+
350
+
351
+ def example_value(self) -> Dict[str, Any]:
352
+ """
353
+ Example value for the component
354
+ """
355
+
356
+ return {"nodes": [], "edges": []} # ๋นˆ ์›Œํฌํ”Œ๋กœ์šฐ ๋ฐ˜ํ™˜
357
+
358
+
359
+ # Utility functions for workflow analysis and execution
360
+ class WorkflowAnalyzer:
361
+ """
362
+ Analyze workflow configurations and provide insights
363
+ """
364
+
365
+ @staticmethod
366
+ def analyze_workflow(workflow: Dict[str, Any]) -> Dict[str, Any]:
367
+ """
368
+ Provide detailed analysis of a workflow
369
+ """
370
+ nodes = workflow.get("nodes", [])
371
+ edges = workflow.get("edges", [])
372
+
373
+ # Count node types
374
+ node_types = {}
375
+ for node in nodes:
376
+ node_type = node.get("type", "unknown")
377
+ node_types[node_type] = node_types.get(node_type, 0) + 1
378
+
379
+ # Analyze workflow complexity
380
+ complexity = "Simple"
381
+ if len(nodes) > 10:
382
+ complexity = "Complex"
383
+ elif len(nodes) > 5:
384
+ complexity = "Medium"
385
+
386
+ # Check for potential issues
387
+ issues = []
388
+
389
+ # Check for disconnected nodes
390
+ connected_nodes = set()
391
+ for edge in edges:
392
+ connected_nodes.add(edge["source"])
393
+ connected_nodes.add(edge["target"])
394
+
395
+ disconnected = [node["id"] for node in nodes if node["id"] not in connected_nodes]
396
+ if disconnected:
397
+ issues.append(f"Disconnected nodes: {', '.join(disconnected)}")
398
+
399
+ # Check for missing required fields and API keys
400
+ for node in nodes:
401
+ node_type = node.get("type")
402
+ data = node.get("data", {})
403
+
404
+ # Check for required API keys
405
+ if node_type == "OpenAIModel" and not data.get("template", {}).get("api_key", {}).get("value"):
406
+ issues.append(f"Node {node['id']} missing OpenAI API key")
407
+ elif node_type == "ChatModel" and not data.get("template", {}).get("api_key", {}).get("value"):
408
+ issues.append(f"Node {node['id']} missing API key")
409
+ elif node_type == "NebiusImage" and not data.get("template", {}).get("api_key", {}).get("value"):
410
+ issues.append(f"Node {node['id']} missing Nebius API key")
411
+
412
+ # Check for required model configurations
413
+ if node_type in ["OpenAIModel", "ChatModel", "HFTextGeneration"] and not data.get("template", {}).get("model", {}).get("value"):
414
+ issues.append(f"Node {node['id']} missing model configuration")
415
+
416
+ # Check for required templates
417
+ if node_type in ["Prompt", "ChatInput", "ChatOutput"] and not data.get("template"):
418
+ issues.append(f"Node {node['id']} missing template configuration")
419
+
420
+ # Analyze node categories
421
+ input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "Input"]]
422
+ processing_nodes = [n for n in nodes if n.get("type") in [
423
+ "OpenAIModel", "ChatModel", "Prompt", "HFTextGeneration",
424
+ "ExecutePython", "ConditionalLogic", "Wait", "APIRequest",
425
+ "WebSearch", "KnowledgeBase", "RAGQuery"
426
+ ]]
427
+ output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "Output"]]
428
+ ai_nodes = [n for n in nodes if n.get("type") in [
429
+ "OpenAIModel", "ChatModel", "HFTextGeneration", "HFImageGeneration",
430
+ "NebiusImage", "HFSpeechToText", "HFTextToSpeech", "HFVisionModel"
431
+ ]]
432
+
433
+ return {
434
+ "total_nodes": len(nodes),
435
+ "total_edges": len(edges),
436
+ "node_types": node_types,
437
+ "complexity": complexity,
438
+ "issues": issues,
439
+ "is_valid": len(issues) == 0,
440
+ "categories": {
441
+ "input_nodes": len(input_nodes),
442
+ "processing_nodes": len(processing_nodes),
443
+ "output_nodes": len(output_nodes),
444
+ "ai_nodes": len(ai_nodes)
445
+ }
446
+ }
447
+
448
+ @staticmethod
449
+ def validate_for_execution(workflow: Dict[str, Any]) -> Dict[str, Any]:
450
+ """
451
+ Validate if workflow is ready for execution
452
+ """
453
+ analysis = WorkflowAnalyzer.analyze_workflow(workflow)
454
+
455
+ # Additional execution-specific checks
456
+ nodes = workflow.get("nodes", [])
457
+
458
+ # Check for entry points (input nodes)
459
+ input_types = {"ChatInput", "Input"}
460
+ inputs = [n for n in nodes if n.get("type") in input_types]
461
+
462
+ if not inputs:
463
+ analysis["issues"].append("No input nodes found - workflow needs an entry point")
464
+
465
+ # Check for output nodes
466
+ output_types = {"ChatOutput", "Output"}
467
+ outputs = [n for n in nodes if n.get("type") in output_types]
468
+
469
+ if not outputs:
470
+ analysis["issues"].append("No output nodes found - workflow needs an exit point")
471
+
472
+ # Check for required environment variables
473
+ env_vars = set()
474
+ for node in nodes:
475
+ data = node.get("data", {})
476
+ template = data.get("template", {})
477
+ for field in template.values():
478
+ if isinstance(field, dict) and field.get("type") == "SecretStr":
479
+ env_var = field.get("env_var")
480
+ if env_var:
481
+ env_vars.add(env_var)
482
+
483
+ if env_vars:
484
+ analysis["required_env_vars"] = list(env_vars)
485
+
486
+ analysis["is_executable"] = len(analysis["issues"]) == 0
487
+
488
+ return analysis
489
+
490
+
491
+ # Export the main component
492
+ __all__ = ["WorkflowBuilder", "WorkflowAnalyzer"]