openfree commited on
Commit
7b8981c
·
verified ·
1 Parent(s): d782a70

Delete src/backend/gradio_workflowbuilder/workflowbuilder.py

Browse files
src/backend/gradio_workflowbuilder/workflowbuilder.py DELETED
@@ -1,588 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import Any, Dict, List, Optional, Union, Callable
4
- from pathlib import Path
5
- import json
6
- from gradio.components import Component
7
-
8
- COMP_DIR = Path(__file__).resolve().parent / "templates" / "component" # ⬅️ 추가
9
-
10
- class WorkflowBuilder(Component):
11
- """
12
- Professional Workflow Builder component with support for 25+ node types
13
- inspired by n8n and Langflow for AI agent development and MCP integration.
14
- """
15
-
16
- # ▶️ Gradio 4.x : 이벤트 이름을 문자열로 직접 명시
17
- EVENTS = ["change", "input"]
18
-
19
- def __init__(
20
- self,
21
- value: Optional[Dict[str, Any]] = None,
22
- label: Optional[str] = None,
23
- info: Optional[str] = None,
24
- show_label: Optional[bool] = None,
25
- container: bool = True,
26
- scale: Optional[int] = None,
27
- min_width: int = 160,
28
- visible: bool = True,
29
- elem_id: Optional[str] = None,
30
- elem_classes: Optional[List[str]] = None,
31
- render: bool = True,
32
- **kwargs,
33
- ):
34
- """
35
- Parameters:
36
- value: Default workflow data with nodes and edges
37
- label: Component label
38
- info: Additional component information
39
- show_label: Whether to show the label
40
- container: Whether to use container styling
41
- scale: Relative width scale
42
- min_width: Minimum width in pixels
43
- visible: Whether component is visible
44
- elem_id: HTML element ID
45
- elem_classes: CSS classes
46
- render: Whether to render immediately
47
- """
48
-
49
- # Initialize with empty workflow if no value provided
50
- if value is None:
51
- value = {"nodes": [], "edges": []}
52
-
53
- # Validate the workflow data
54
- if not isinstance(value, dict):
55
- raise ValueError("Workflow value must be a dictionary")
56
-
57
- if "nodes" not in value:
58
- value["nodes"] = []
59
- if "edges" not in value:
60
- value["edges"] = []
61
-
62
- super().__init__(
63
- label=label,
64
- info=info,
65
- show_label=show_label,
66
- container=container,
67
- scale=scale,
68
- min_width=min_width,
69
- visible=visible,
70
- elem_id=elem_id,
71
- elem_classes=elem_classes,
72
- render=render,
73
- value=value,
74
- **kwargs,
75
- )
76
-
77
- def preprocess(self, payload: Dict[str, Any]) -> Dict[str, Any]:
78
- """
79
- Process workflow data from frontend
80
- """
81
- if payload is None:
82
- return {"nodes": [], "edges": []}
83
-
84
- # Validate and clean the workflow data
85
- workflow = self._validate_workflow(payload)
86
- return workflow
87
-
88
- def postprocess(self, value: Dict[str, Any]) -> Dict[str, Any]:
89
- """
90
- Process workflow data for frontend
91
- """
92
- if value is None:
93
- return {"nodes": [], "edges": []}
94
-
95
- # Ensure proper structure
96
- if not isinstance(value, dict):
97
- return {"nodes": [], "edges": []}
98
-
99
- return {
100
- "nodes": value.get("nodes", []),
101
- "edges": value.get("edges", [])
102
- }
103
-
104
- def _validate_workflow(self, workflow: Dict[str, Any]) -> Dict[str, Any]:
105
- """
106
- Validate workflow structure and node configurations
107
- """
108
- if not isinstance(workflow, dict):
109
- return {"nodes": [], "edges": []}
110
-
111
- nodes = workflow.get("nodes", [])
112
- edges = workflow.get("edges", [])
113
-
114
- # Validate each node
115
- validated_nodes = []
116
- for node in nodes:
117
- if self._validate_node(node):
118
- validated_nodes.append(node)
119
-
120
- # Validate each edge
121
- validated_edges = []
122
- node_ids = {node["id"] for node in validated_nodes}
123
- for edge in edges:
124
- if self._validate_edge(edge, node_ids):
125
- validated_edges.append(edge)
126
-
127
- return {
128
- "nodes": validated_nodes,
129
- "edges": validated_edges
130
- }
131
-
132
- def _validate_node(self, node: Dict[str, Any]) -> bool:
133
- """
134
- Validate individual node structure and properties
135
- """
136
- required_fields = ["id", "type", "position", "data"]
137
-
138
- # Check required fields
139
- if not all(field in node for field in required_fields):
140
- return False
141
-
142
- # Validate node type
143
- if not self._is_valid_node_type(node["type"]):
144
- return False
145
-
146
- # Validate position
147
- position = node["position"]
148
- if not isinstance(position, dict) or "x" not in position or "y" not in position:
149
- return False
150
-
151
- # Validate node data based on type
152
- return self._validate_node_data(node["type"], node["data"])
153
-
154
- def _validate_edge(self, edge: Dict[str, Any], valid_node_ids: set) -> bool:
155
- """
156
- Validate edge connections
157
- """
158
- required_fields = ["id", "source", "target"]
159
-
160
- if not all(field in edge for field in required_fields):
161
- return False
162
-
163
- # Check if source and target nodes exist
164
- return (edge["source"] in valid_node_ids and
165
- edge["target"] in valid_node_ids)
166
-
167
- def _is_valid_node_type(self, node_type: str) -> bool:
168
- """
169
- Check if node type is supported
170
- """
171
- # All the node types from your frontend
172
-
173
-
174
-
175
-
176
- supported_types = {
177
- # 🆕 [CUSTOM] --------------------------------------------------
178
- "llmNode", # 범용 LLM 노드 (AI Processing)
179
- "textNode", # 간단한 Markdown/Text 노드
180
- # --------------------------------------------------------------
181
-
182
-
183
-
184
-
185
-
186
-
187
- # Input/Output Nodes
188
- "ChatInput", "ChatOutput", "Input", "Output",
189
-
190
- # AI & Language Models
191
- "OpenAIModel", "ChatModel", "Prompt", "HFTextGeneration",
192
-
193
- # API & Web
194
- "APIRequest", "WebSearch",
195
-
196
- # Data Processing
197
- "ExecutePython", "ConditionalLogic", "Wait",
198
-
199
- # RAG & Knowledge
200
- "KnowledgeBase", "RAGQuery",
201
-
202
- # Speech & Vision
203
- "HFSpeechToText", "HFTextToSpeech", "HFVisionModel",
204
-
205
- # Image Generation
206
- "HFImageGeneration", "NebiusImage",
207
-
208
- # MCP Integration
209
- "MCPConnection", "MCPAgent",
210
-
211
- # Legacy types (for backward compatibility)
212
- "textInput", "fileInput", "numberInput", "llm", "textProcessor",
213
- "conditional", "textOutput", "fileOutput", "chartOutput",
214
- "apiCall", "dataTransform", "webhook", "schedule", "manualTrigger",
215
- "emailTrigger", "httpRequest", "googleSheets", "database", "csvFile",
216
- "openaiChat", "claudeChat", "huggingFace", "textEmbedding",
217
- "codeNode", "functionNode", "setNode", "jsonParse",
218
- "ifCondition", "switchNode", "merge", "waitNode",
219
- "email", "slack", "discord", "telegram",
220
- "fileUpload", "awsS3", "googleDrive", "ftp",
221
- "dateTime", "crypto", "validator", "regex"
222
- }
223
-
224
- return node_type in supported_types
225
-
226
- def _validate_node_data(self, node_type: str, data: Dict[str, Any]) -> bool:
227
- """
228
- Validate node data based on node type
229
- """
230
- if not isinstance(data, dict):
231
- return False
232
-
233
- # Define required fields for each node type
234
- required_fields = {
235
-
236
-
237
- # 🆕 [CUSTOM] --------------------------------------------------
238
- "llmNode": ["template"], # provider · model 등은 template 내부에 존재
239
- "textNode": ["template"], # { "text": {...} }
240
- # --------------------------------------------------------------
241
-
242
-
243
-
244
- # Input/Output Nodes
245
- "ChatInput": ["display_name", "template"],
246
- "ChatOutput": ["display_name", "template"],
247
- "Input": ["display_name", "template"],
248
- "Output": ["display_name", "template"],
249
-
250
- # AI & Language Models
251
- "OpenAIModel": ["display_name", "template"],
252
- "ChatModel": ["display_name", "template"],
253
- "Prompt": ["display_name", "template"],
254
- "HFTextGeneration": ["display_name", "template"],
255
-
256
- # API & Web
257
- "APIRequest": ["display_name", "template"],
258
- "WebSearch": ["display_name", "template"],
259
-
260
- # Data Processing
261
- "ExecutePython": ["display_name", "template"],
262
- "ConditionalLogic": ["display_name", "template"],
263
- "Wait": ["display_name", "template"],
264
-
265
- # RAG & Knowledge
266
- "KnowledgeBase": ["display_name", "template"],
267
- "RAGQuery": ["display_name", "template"],
268
-
269
- # Speech & Vision
270
- "HFSpeechToText": ["display_name", "template"],
271
- "HFTextToSpeech": ["display_name", "template"],
272
- "HFVisionModel": ["display_name", "template"],
273
-
274
- # Image Generation
275
- "HFImageGeneration": ["display_name", "template"],
276
- "NebiusImage": ["display_name", "template"],
277
-
278
- # MCP Integration
279
- "MCPConnection": ["display_name", "template"],
280
- "MCPAgent": ["display_name", "template"],
281
-
282
- # Legacy types
283
- "webhook": ["method", "path"],
284
- "httpRequest": ["method", "url"],
285
- "openaiChat": ["model"],
286
- "claudeChat": ["model"],
287
- "codeNode": ["language", "code"],
288
- "ifCondition": ["conditions"],
289
- "email": ["fromEmail", "toEmail", "subject"],
290
- "awsS3": ["operation", "bucketName"]
291
- }
292
-
293
- # Check required fields for this node type
294
- if node_type in required_fields:
295
- required = required_fields[node_type]
296
- if not all(field in data for field in required):
297
- return False
298
-
299
- return True
300
-
301
- def api_info(self) -> Dict[str, Any]:
302
- """
303
- API information for the component
304
- """
305
- return {
306
- "info": {
307
- "type": "object",
308
- "properties": {
309
- "nodes": {
310
- "type": "array",
311
- "items": {
312
- "type": "object",
313
- "properties": {
314
- "id": {"type": "string"},
315
- "type": {"type": "string"},
316
- "position": {
317
- "type": "object",
318
- "properties": {
319
- "x": {"type": "number"},
320
- "y": {"type": "number"}
321
- }
322
- },
323
- "data": {"type": "object"}
324
- }
325
- }
326
- },
327
- "edges": {
328
- "type": "array",
329
- "items": {
330
- "type": "object",
331
- "properties": {
332
- "id": {"type": "string"},
333
- "source": {"type": "string"},
334
- "target": {"type": "string"}
335
- }
336
- }
337
- }
338
- }
339
- }
340
- }
341
-
342
- def example_payload(self) -> Dict[str, Any]:
343
- """
344
- Example payload for the component
345
- """
346
- return {
347
- "nodes": [
348
- {
349
- "id": "ChatInput-1",
350
- "type": "ChatInput",
351
- "position": {"x": 100, "y": 100},
352
- "data": {
353
- "display_name": "User's Question",
354
- "template": {
355
- "input_value": {
356
- "display_name": "Input",
357
- "type": "string",
358
- "value": "What is the capital of France?",
359
- "is_handle": True
360
- }
361
- }
362
- }
363
- },
364
- {
365
- "id": "Prompt-1",
366
- "type": "Prompt",
367
- "position": {"x": 300, "y": 100},
368
- "data": {
369
- "display_name": "System Prompt",
370
- "template": {
371
- "prompt_template": {
372
- "display_name": "Template",
373
- "type": "string",
374
- "value": "You are a helpful geography expert. The user asked: {input_value}",
375
- "is_handle": True
376
- }
377
- }
378
- }
379
- },
380
- {
381
- "id": "OpenAI-1",
382
- "type": "OpenAIModel",
383
- "position": {"x": 500, "y": 100},
384
- "data": {
385
- "display_name": "OpenAI gpt-4o-mini",
386
- "template": {
387
- "model": {
388
- "display_name": "Model",
389
- "type": "options",
390
- "options": ["gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo"],
391
- "value": "gpt-4o-mini"
392
- },
393
- "api_key": {
394
- "display_name": "API Key",
395
- "type": "SecretStr",
396
- "required": True,
397
- "env_var": "OPENAI_API_KEY"
398
- },
399
- "prompt": {
400
- "display_name": "Prompt",
401
- "type": "string",
402
- "is_handle": True
403
- }
404
- }
405
- }
406
- },
407
- {
408
- "id": "ChatOutput-1",
409
- "type": "ChatOutput",
410
- "position": {"x": 700, "y": 100},
411
- "data": {
412
- "display_name": "Final Answer",
413
- "template": {
414
- "response": {
415
- "display_name": "Response",
416
- "type": "string",
417
- "is_handle": True
418
- }
419
- }
420
- }
421
- }
422
- ],
423
- "edges": [
424
- {
425
- "id": "e1",
426
- "source": "ChatInput-1",
427
- "source_handle": "input_value",
428
- "target": "Prompt-1",
429
- "target_handle": "prompt_template"
430
- },
431
- {
432
- "id": "e2",
433
- "source": "Prompt-1",
434
- "source_handle": "prompt_template",
435
- "target": "OpenAI-1",
436
- "target_handle": "prompt"
437
- },
438
- {
439
- "id": "e3",
440
- "source": "OpenAI-1",
441
- "source_handle": "response",
442
- "target": "ChatOutput-1",
443
- "target_handle": "response"
444
- }
445
- ]
446
- }
447
-
448
- def example_value(self) -> Dict[str, Any]:
449
- """
450
- Example value for the component
451
- """
452
- return self.example_payload()
453
-
454
-
455
- # Utility functions for workflow analysis and execution
456
- class WorkflowAnalyzer:
457
- """
458
- Analyze workflow configurations and provide insights
459
- """
460
-
461
- @staticmethod
462
- def analyze_workflow(workflow: Dict[str, Any]) -> Dict[str, Any]:
463
- """
464
- Provide detailed analysis of a workflow
465
- """
466
- nodes = workflow.get("nodes", [])
467
- edges = workflow.get("edges", [])
468
-
469
- # Count node types
470
- node_types = {}
471
- for node in nodes:
472
- node_type = node.get("type", "unknown")
473
- node_types[node_type] = node_types.get(node_type, 0) + 1
474
-
475
- # Analyze workflow complexity
476
- complexity = "Simple"
477
- if len(nodes) > 10:
478
- complexity = "Complex"
479
- elif len(nodes) > 5:
480
- complexity = "Medium"
481
-
482
- # Check for potential issues
483
- issues = []
484
-
485
- # Check for disconnected nodes
486
- connected_nodes = set()
487
- for edge in edges:
488
- connected_nodes.add(edge["source"])
489
- connected_nodes.add(edge["target"])
490
-
491
- disconnected = [node["id"] for node in nodes if node["id"] not in connected_nodes]
492
- if disconnected:
493
- issues.append(f"Disconnected nodes: {', '.join(disconnected)}")
494
-
495
- # Check for missing required fields and API keys
496
- for node in nodes:
497
- node_type = node.get("type")
498
- data = node.get("data", {})
499
-
500
- # Check for required API keys
501
- if node_type == "OpenAIModel" and not data.get("template", {}).get("api_key", {}).get("value"):
502
- issues.append(f"Node {node['id']} missing OpenAI API key")
503
- elif node_type == "ChatModel" and not data.get("template", {}).get("api_key", {}).get("value"):
504
- issues.append(f"Node {node['id']} missing API key")
505
- elif node_type == "NebiusImage" and not data.get("template", {}).get("api_key", {}).get("value"):
506
- issues.append(f"Node {node['id']} missing Nebius API key")
507
-
508
- # Check for required model configurations
509
- if node_type in ["OpenAIModel", "ChatModel", "HFTextGeneration"] and not data.get("template", {}).get("model", {}).get("value"):
510
- issues.append(f"Node {node['id']} missing model configuration")
511
-
512
- # Check for required templates
513
- if node_type in ["Prompt", "ChatInput", "ChatOutput"] and not data.get("template"):
514
- issues.append(f"Node {node['id']} missing template configuration")
515
-
516
- # Analyze node categories
517
- input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "Input"]]
518
- processing_nodes = [n for n in nodes if n.get("type") in [
519
- "OpenAIModel", "ChatModel", "Prompt", "HFTextGeneration",
520
- "ExecutePython", "ConditionalLogic", "Wait", "APIRequest",
521
- "WebSearch", "KnowledgeBase", "RAGQuery"
522
- ]]
523
- output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "Output"]]
524
- ai_nodes = [n for n in nodes if n.get("type") in [
525
- "OpenAIModel", "ChatModel", "HFTextGeneration", "HFImageGeneration",
526
- "NebiusImage", "HFSpeechToText", "HFTextToSpeech", "HFVisionModel"
527
- ]]
528
-
529
- return {
530
- "total_nodes": len(nodes),
531
- "total_edges": len(edges),
532
- "node_types": node_types,
533
- "complexity": complexity,
534
- "issues": issues,
535
- "is_valid": len(issues) == 0,
536
- "categories": {
537
- "input_nodes": len(input_nodes),
538
- "processing_nodes": len(processing_nodes),
539
- "output_nodes": len(output_nodes),
540
- "ai_nodes": len(ai_nodes)
541
- }
542
- }
543
-
544
- @staticmethod
545
- def validate_for_execution(workflow: Dict[str, Any]) -> Dict[str, Any]:
546
- """
547
- Validate if workflow is ready for execution
548
- """
549
- analysis = WorkflowAnalyzer.analyze_workflow(workflow)
550
-
551
- # Additional execution-specific checks
552
- nodes = workflow.get("nodes", [])
553
-
554
- # Check for entry points (input nodes)
555
- input_types = {"ChatInput", "Input"}
556
- inputs = [n for n in nodes if n.get("type") in input_types]
557
-
558
- if not inputs:
559
- analysis["issues"].append("No input nodes found - workflow needs an entry point")
560
-
561
- # Check for output nodes
562
- output_types = {"ChatOutput", "Output"}
563
- outputs = [n for n in nodes if n.get("type") in output_types]
564
-
565
- if not outputs:
566
- analysis["issues"].append("No output nodes found - workflow needs an exit point")
567
-
568
- # Check for required environment variables
569
- env_vars = set()
570
- for node in nodes:
571
- data = node.get("data", {})
572
- template = data.get("template", {})
573
- for field in template.values():
574
- if isinstance(field, dict) and field.get("type") == "SecretStr":
575
- env_var = field.get("env_var")
576
- if env_var:
577
- env_vars.add(env_var)
578
-
579
- if env_vars:
580
- analysis["required_env_vars"] = list(env_vars)
581
-
582
- analysis["is_executable"] = len(analysis["issues"]) == 0
583
-
584
- return analysis
585
-
586
-
587
- # Export the main component
588
- __all__ = ["WorkflowBuilder", "WorkflowAnalyzer"]