openfree commited on
Commit
3392d08
ยท
verified ยท
1 Parent(s): c7f0d07

Upload 9 files

Browse files
concept_map_generator.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import graphviz
2
+ import json
3
+ from tempfile import NamedTemporaryFile
4
+ import os
5
+ from graph_generator_utils import add_nodes_and_edges
6
+
7
+ def generate_concept_map(json_input: str, output_format: str) -> str:
8
+ """
9
+ Generates a concept map from JSON input.
10
+
11
+ Args:
12
+ json_input (str): A JSON string describing the concept map structure.
13
+ It must follow the Expected JSON Format Example below.
14
+
15
+ Expected JSON Format Example:
16
+ {
17
+ "central_node": "Artificial Intelligence (AI)",
18
+ "nodes": [
19
+ {
20
+ "id": "ml_fundamental",
21
+ "label": "Machine Learning",
22
+ "relationship": "is essential for",
23
+ "subnodes": [
24
+ {
25
+ "id": "dl_branch",
26
+ "label": "Deep Learning",
27
+ "relationship": "for example",
28
+ "subnodes": [
29
+ {
30
+ "id": "cnn_example",
31
+ "label": "CNNs",
32
+ "relationship": "for example"
33
+ },
34
+ {
35
+ "id": "rnn_example",
36
+ "label": "RNNs",
37
+ "relationship": "for example"
38
+ }
39
+ ]
40
+ },
41
+ {
42
+ "id": "rl_branch",
43
+ "label": "Reinforcement Learning",
44
+ "relationship": "for example",
45
+ "subnodes": [
46
+ {
47
+ "id": "qlearning_example",
48
+ "label": "Q-Learning",
49
+ "relationship": "example"
50
+ },
51
+ {
52
+ "id": "pg_example",
53
+ "label": "Policy Gradients",
54
+ "relationship": "example"
55
+ }
56
+ ]
57
+ }
58
+ ]
59
+ },
60
+ {
61
+ "id": "ai_types",
62
+ "label": "Types",
63
+ "relationship": "formed by",
64
+ "subnodes": [
65
+ {
66
+ "id": "agi_type",
67
+ "label": "AGI",
68
+ "relationship": "this is",
69
+ "subnodes": [
70
+ {
71
+ "id": "strong_ai",
72
+ "label": "Strong AI",
73
+ "relationship": "provoked by",
74
+ "subnodes": [
75
+ {
76
+ "id": "human_intel",
77
+ "label": "Human-level Intel.",
78
+ "relationship": "of"
79
+ }
80
+ ]
81
+ }
82
+ ]
83
+ },
84
+ {
85
+ "id": "ani_type",
86
+ "label": "ANI",
87
+ "relationship": "this is",
88
+ "subnodes": [
89
+ {
90
+ "id": "weak_ai",
91
+ "label": "Weak AI",
92
+ "relationship": "provoked by",
93
+ "subnodes": [
94
+ {
95
+ "id": "narrow_tasks",
96
+ "label": "Narrow Tasks",
97
+ "relationship": "of"
98
+ }
99
+ ]
100
+ }
101
+ ]
102
+ }
103
+ ]
104
+ },
105
+ {
106
+ "id": "ai_capabilities",
107
+ "label": "Capabilities",
108
+ "relationship": "change",
109
+ "subnodes": [
110
+ {
111
+ "id": "data_proc",
112
+ "label": "Data Processing",
113
+ "relationship": "can",
114
+ "subnodes": [
115
+ {
116
+ "id": "big_data",
117
+ "label": "Big Data",
118
+ "relationship": "as",
119
+ "subnodes": [
120
+ {
121
+ "id": "analysis_example",
122
+ "label": "Data Analysis",
123
+ "relationship": "example"
124
+ },
125
+ {
126
+ "id": "prediction_example",
127
+ "label": "Prediction",
128
+ "relationship": "example"
129
+ }
130
+ ]
131
+ }
132
+ ]
133
+ },
134
+ {
135
+ "id": "decision_making",
136
+ "label": "Decision Making",
137
+ "relationship": "can be",
138
+ "subnodes": [
139
+ {
140
+ "id": "automation",
141
+ "label": "Automation",
142
+ "relationship": "as",
143
+ "subnodes": [
144
+ {
145
+ "id": "robotics_example",
146
+ "label": "Robotics",
147
+ "relationship": "Example"},
148
+ {
149
+ "id": "autonomous_example",
150
+ "label": "Autonomous Vehicles",
151
+ "relationship": "of one"
152
+ }
153
+ ]
154
+ }
155
+ ]
156
+ },
157
+ {
158
+ "id": "problem_solving",
159
+ "label": "Problem Solving",
160
+ "relationship": "can",
161
+ "subnodes": [
162
+ {
163
+ "id": "optimization",
164
+ "label": "Optimization",
165
+ "relationship": "as is",
166
+ "subnodes": [
167
+ {
168
+ "id": "algorithms_example",
169
+ "label": "Algorithms",
170
+ "relationship": "for example"
171
+ }
172
+ ]
173
+ }
174
+ ]
175
+ }
176
+ ]
177
+ }
178
+ ]
179
+ }
180
+
181
+ Returns:
182
+ str: The filepath to the generated PNG image file.
183
+ """
184
+ try:
185
+ if not json_input.strip():
186
+ return "Error: Empty input"
187
+
188
+ data = json.loads(json_input)
189
+
190
+ if 'central_node' not in data or 'nodes' not in data:
191
+ raise ValueError("Missing required fields: central_node or nodes")
192
+
193
+ # ํ•œ๊ธ€ ํฐํŠธ ์„ค์ •
194
+ # ํ™˜๊ฒฝ ๋ณ€์ˆ˜์—์„œ ํฐํŠธ ๊ฒฝ๋กœ ๊ฐ€์ ธ์˜ค๊ธฐ
195
+ font_path = os.environ.get('KOREAN_FONT_PATH', '')
196
+
197
+ # Graphviz๋Š” ์‹œ์Šคํ…œ ํฐํŠธ๋ฅผ ์‚ฌ์šฉํ•˜๋ฏ€๋กœ ํฐํŠธ ์ด๋ฆ„์œผ๋กœ ์ง€์ •
198
+ # NanumGothic์ด ์‹œ์Šคํ…œ์— ์„ค์น˜๋˜์–ด ์žˆ์–ด์•ผ ํ•จ
199
+ korean_font = 'NanumGothic'
200
+
201
+ dot = graphviz.Digraph(
202
+ name='ConceptMap',
203
+ format='png',
204
+ graph_attr={
205
+ 'rankdir': 'TB', # Top-to-Bottom layout (vertical hierarchy)
206
+ 'splines': 'ortho', # Straight lines
207
+ 'bgcolor': 'white', # White background
208
+ 'pad': '0.5', # Padding around the graph
209
+ 'fontname': korean_font, # ๊ทธ๋ž˜ํ”„ ์ „์ฒด ํฐํŠธ ์„ค์ •
210
+ 'charset': 'UTF-8' # UTF-8 ์ธ์ฝ”๋”ฉ
211
+ },
212
+ node_attr={
213
+ 'fontname': korean_font # ๋ชจ๋“  ๋…ธ๋“œ์˜ ๊ธฐ๋ณธ ํฐํŠธ
214
+ },
215
+ edge_attr={
216
+ 'fontname': korean_font # ๋ชจ๋“  ์—ฃ์ง€์˜ ๊ธฐ๋ณธ ํฐํŠธ
217
+ }
218
+ )
219
+
220
+ base_color = '#19191a' # Hardcoded base color
221
+
222
+ # Central node styling (rounded box, dark color)
223
+ dot.node(
224
+ 'central',
225
+ data['central_node'],
226
+ shape='box', # Rectangular shape
227
+ style='filled,rounded', # Filled and rounded corners
228
+ fillcolor=base_color, # Darkest color
229
+ fontcolor='white', # White text for dark background
230
+ fontsize='16', # Larger font for central node
231
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ๋ช…์‹œ์  ์ง€์ •
232
+ )
233
+
234
+ # Add child nodes and edges recursively starting from depth 1
235
+ add_nodes_and_edges(dot, 'central', data.get('nodes', []), current_depth=1, base_color=base_color)
236
+
237
+ with NamedTemporaryFile(delete=False, suffix=f'.{output_format}') as tmp:
238
+ dot.render(tmp.name, format=output_format, cleanup=True)
239
+ return f"{tmp.name}.{output_format}"
240
+
241
+ except json.JSONDecodeError:
242
+ return "Error: Invalid JSON format"
243
+ except Exception as e:
244
+ return f"Error: {str(e)}"
graph_generator_utils.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import graphviz
2
+ import os
3
+
4
+ def add_nodes_and_edges(dot: graphviz.Digraph, parent_id: str, nodes_list: list, current_depth: int, base_color: str):
5
+ """
6
+ Recursively adds nodes and edges to a Graphviz Digraph object,
7
+ applying a color gradient and consistent styling.
8
+ Args:
9
+ dot (graphviz.Digraph): The Graphviz Digraph object to modify.
10
+ parent_id (str): The ID of the parent node for the current set of nodes.
11
+ nodes_list (list): A list of dictionaries, each representing a node
12
+ with 'id', 'label', 'relationship', and optional 'subnodes'.
13
+ current_depth (int): The current depth in the graph hierarchy (0 for central node).
14
+ base_color (str): The hexadecimal base color for the deepest nodes.
15
+ """
16
+ # ํ•œ๊ธ€ ํฐํŠธ ์„ค์ •
17
+ korean_font = 'NanumGothic'
18
+
19
+ # Calculate color for current depth, making it lighter
20
+ # This factor determines how quickly the color lightens per level.
21
+ lightening_factor = 0.12
22
+
23
+ # Convert base_color hex to RGB for interpolation
24
+ # Ensure base_color is a valid hex string before converting
25
+ if not isinstance(base_color, str) or not base_color.startswith('#') or len(base_color) != 7:
26
+ base_color = '#19191a' # Fallback to default dark if invalid
27
+ base_r = int(base_color[1:3], 16)
28
+ base_g = int(base_color[3:5], 16)
29
+ base_b = int(base_color[5:7], 16)
30
+
31
+ # Calculate current node color by blending towards white
32
+ current_r = base_r + int((255 - base_r) * current_depth * lightening_factor)
33
+ current_g = base_g + int((255 - base_g) * current_depth * lightening_factor)
34
+ current_b = base_b + int((255 - base_b) * current_depth * lightening_factor)
35
+
36
+ # Clamp values to 255 to stay within valid RGB range
37
+ current_r = min(255, current_r)
38
+ current_g = min(255, current_g)
39
+ current_b = min(255, current_b)
40
+
41
+ node_fill_color = f'#{current_r:02x}{current_g:02x}{current_b:02x}'
42
+
43
+ # Font color: white for dark nodes, black for very light nodes for readability
44
+ font_color = 'white' if current_depth * lightening_factor < 0.6 else 'black'
45
+
46
+ # Edge colors and font sizes
47
+ edge_color = '#4a4a4a' # Dark gray for lines
48
+ # Font size adjusts based on depth, ensuring a minimum size
49
+ font_size = max(9, 14 - (current_depth * 2))
50
+ edge_font_size = max(7, 10 - (current_depth * 1))
51
+
52
+ for node in nodes_list:
53
+ node_id = node.get('id')
54
+ label = node.get('label')
55
+ relationship = node.get('relationship')
56
+
57
+ # Basic validation for node data
58
+ if not all([node_id, label, relationship]):
59
+ raise ValueError(f"Invalid node: {node}")
60
+
61
+ # Add node with specified style and Korean font
62
+ dot.node(
63
+ node_id,
64
+ label,
65
+ shape='box', # All nodes are rectangular
66
+ style='filled,rounded', # Filled and rounded corners
67
+ fillcolor=node_fill_color,
68
+ fontcolor=font_color,
69
+ fontsize=str(font_size),
70
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
71
+ )
72
+
73
+ # Add edge from parent to current node with Korean font
74
+ dot.edge(
75
+ parent_id,
76
+ node_id,
77
+ label=relationship,
78
+ color=edge_color,
79
+ fontcolor=edge_color, # Edge label color also dark gray
80
+ fontsize=str(edge_font_size),
81
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
82
+ )
83
+
84
+ # Recursively call for subnodes
85
+ if 'subnodes' in node:
86
+ add_nodes_and_edges(dot, node_id, node['subnodes'], current_depth + 1, base_color)
packages (7).txt ADDED
@@ -0,0 +1 @@
 
 
1
+ graphviz
process_flow_generator.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import graphviz
2
+ import json
3
+ from tempfile import NamedTemporaryFile
4
+ import os
5
+
6
+ def generate_process_flow_diagram(json_input: str, output_format: str) -> str:
7
+ """
8
+ Generates a Process Flow Diagram (Flowchart) from JSON input.
9
+
10
+ Args:
11
+ json_input (str): A JSON string describing the process flow structure.
12
+ It must follow the Expected JSON Format Example below.
13
+
14
+ Expected JSON Format Example:
15
+ {
16
+ "start_node": "Start Inference Request",
17
+ "nodes": [
18
+ {
19
+ "id": "user_input",
20
+ "label": "Receive User Input (Data)",
21
+ "type": "io"
22
+ },
23
+ {
24
+ "id": "preprocess_data",
25
+ "label": "Preprocess Data",
26
+ "type": "process"
27
+ },
28
+ {
29
+ "id": "validate_data",
30
+ "label": "Validate Data Format/Type",
31
+ "type": "decision"
32
+ },
33
+ {
34
+ "id": "data_valid_yes",
35
+ "label": "Data Valid?",
36
+ "type": "decision"
37
+ },
38
+ {
39
+ "id": "load_model",
40
+ "label": "Load AI Model (if not cached)",
41
+ "type": "process"
42
+ },
43
+ {
44
+ "id": "run_inference",
45
+ "label": "Run AI Model Inference",
46
+ "type": "process"
47
+ },
48
+ {
49
+ "id": "postprocess_output",
50
+ "label": "Postprocess Model Output",
51
+ "type": "process"
52
+ },
53
+ {
54
+ "id": "send_response",
55
+ "label": "Send Response to User",
56
+ "type": "io"
57
+ },
58
+ {
59
+ "id": "log_error",
60
+ "label": "Log Error & Notify User",
61
+ "type": "process"
62
+ },
63
+ {
64
+ "id": "end_inference_process",
65
+ "label": "End Inference Process",
66
+ "type": "end"
67
+ }
68
+ ],
69
+ "connections": [
70
+ {"from": "start_node", "to": "user_input", "label": "Request"},
71
+ {"from": "user_input", "to": "preprocess_data", "label": "Data Received"},
72
+ {"from": "preprocess_data", "to": "validate_data", "label": "Cleaned"},
73
+ {"from": "validate_data", "to": "data_valid_yes", "label": "Check"},
74
+ {"from": "data_valid_yes", "to": "load_model", "label": "Yes"},
75
+ {"from": "data_valid_yes", "to": "log_error", "label": "No"},
76
+ {"from": "load_model", "to": "run_inference", "label": "Model Ready"},
77
+ {"from": "run_inference", "to": "postprocess_output", "label": "Output Generated"},
78
+ {"from": "postprocess_output", "to": "send_response", "label": "Ready"},
79
+ {"from": "send_response", "to": "end_inference_process", "label": "Response Sent"},
80
+ {"from": "log_error", "to": "end_inference_process", "label": "Error Handled"}
81
+ ]
82
+ }
83
+
84
+ Returns:
85
+ str: The filepath to the generated PNG image file.
86
+ """
87
+ try:
88
+ if not json_input.strip():
89
+ return "Error: Empty input"
90
+
91
+ data = json.loads(json_input)
92
+
93
+ # Validate required top-level keys for a flowchart
94
+ if 'start_node' not in data or 'nodes' not in data or 'connections' not in data:
95
+ raise ValueError("Missing required fields: 'start_node', 'nodes', or 'connections'")
96
+
97
+ # Define specific node shapes for flowchart types
98
+ node_shapes = {
99
+ "process": "box", # Rectangle for processes
100
+ "decision": "diamond", # Diamond for decisions
101
+ "start": "oval", # Oval for start
102
+ "end": "oval", # Oval for end
103
+ "io": "parallelogram", # Input/Output
104
+ "document": "note", # Document symbol
105
+ "default": "box" # Fallback
106
+ }
107
+
108
+ # ํ•œ๊ธ€ ํฐํŠธ ์„ค์ •
109
+ # GDFONTPATH๊ฐ€ ์„ค์ •๋˜์–ด ์žˆ์œผ๋ฉด ํฐํŠธ ํŒŒ์ผ๋ช…(ํ™•์žฅ์ž ์ œ์™ธ) ์‚ฌ์šฉ
110
+ korean_font = 'NanumGothic-Regular'
111
+
112
+ dot = graphviz.Digraph(
113
+ name='ProcessFlowDiagram',
114
+ format='png',
115
+ graph_attr={
116
+ 'rankdir': 'TB', # Top-to-Bottom flow is common for flowcharts
117
+ 'splines': 'ortho', # Straight lines with 90-degree bends
118
+ 'bgcolor': 'white', # White background
119
+ 'pad': '0.5', # Padding around the graph
120
+ 'nodesep': '0.6', # Spacing between nodes on same rank
121
+ 'ranksep': '0.8', # Spacing between ranks
122
+ 'fontname': korean_font, # ๊ทธ๋ž˜ํ”„ ์ „์ฒด ํ•œ๊ธ€ ํฐํŠธ
123
+ 'charset': 'UTF-8' # UTF-8 ์ธ์ฝ”๋”ฉ
124
+ },
125
+ node_attr={
126
+ 'fontname': korean_font # ๋ชจ๋“  ๋…ธ๋“œ์˜ ๊ธฐ๋ณธ ํฐํŠธ
127
+ },
128
+ edge_attr={
129
+ 'fontname': korean_font # ๋ชจ๋“  ์—ฃ์ง€์˜ ๊ธฐ๋ณธ ํฐํŠธ
130
+ }
131
+ )
132
+
133
+ base_color = '#19191a' # Hardcoded base color
134
+
135
+ fill_color_for_nodes = base_color
136
+ font_color_for_nodes = 'white' if base_color == '#19191a' or base_color.lower() in ['#000000', '#19191a'] else 'black'
137
+
138
+ # Store all nodes by ID for easy lookup
139
+ all_defined_nodes = {node['id']: node for node in data['nodes']}
140
+
141
+ # Add start node explicitly
142
+ start_node_id = data['start_node']
143
+ dot.node(
144
+ start_node_id,
145
+ start_node_id, # Label is typically the ID itself for start/end
146
+ shape=node_shapes['start'],
147
+ style='filled,rounded',
148
+ fillcolor='#2196F3', # A distinct blue for Start
149
+ fontcolor='white',
150
+ fontsize='14',
151
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
152
+ )
153
+
154
+ # Add all other nodes (process, decision, etc.)
155
+ for node_id, node_info in all_defined_nodes.items():
156
+ if node_id == start_node_id: # Skip if it's the start node, already added
157
+ continue
158
+
159
+ node_type = node_info.get("type", "default")
160
+ shape = node_shapes.get(node_type, "box")
161
+
162
+ node_label = node_info['label']
163
+
164
+ # Use distinct color for end node if it exists
165
+ if node_type == 'end':
166
+ dot.node(
167
+ node_id,
168
+ node_label,
169
+ shape=shape,
170
+ style='filled,rounded',
171
+ fillcolor='#F44336', # A distinct red for End
172
+ fontcolor='white',
173
+ fontsize='14',
174
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
175
+ )
176
+ else: # Regular process, decision, etc. nodes use the selected base color
177
+ dot.node(
178
+ node_id,
179
+ node_label,
180
+ shape=shape,
181
+ style='filled,rounded',
182
+ fillcolor=fill_color_for_nodes,
183
+ fontcolor=font_color_for_nodes,
184
+ fontsize='14',
185
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
186
+ )
187
+
188
+ # Add connections (edges)
189
+ for connection in data['connections']:
190
+ dot.edge(
191
+ connection['from'],
192
+ connection['to'],
193
+ label=connection.get('label', ''),
194
+ color='#4a4a4a', # Dark gray for lines
195
+ fontcolor='#4a4a4a',
196
+ fontsize='10',
197
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
198
+ )
199
+
200
+ with NamedTemporaryFile(delete=False, suffix=f'.{output_format}') as tmp:
201
+ dot.render(tmp.name, format=output_format, cleanup=True)
202
+ return f"{tmp.name}.{output_format}"
203
+
204
+ except json.JSONDecodeError:
205
+ return "Error: Invalid JSON format"
206
+ except Exception as e:
207
+ return f"Error: {str(e)}"
radial_diagram_generator.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import graphviz
2
+ import json
3
+ from tempfile import NamedTemporaryFile
4
+ import os
5
+ from graph_generator_utils import add_nodes_and_edges
6
+
7
+ def generate_radial_diagram(json_input: str, output_format: str) -> str:
8
+ """
9
+ Generates a radial (center-expanded) diagram from JSON input.
10
+
11
+ Args:
12
+ json_input (str): A JSON string describing the radial diagram structure.
13
+ It must follow the Expected JSON Format Example below.
14
+
15
+ Expected JSON Format Example:
16
+ {
17
+ "central_node": "AI Core Concepts & Domains",
18
+ "nodes": [
19
+ {
20
+ "id": "foundational_ml",
21
+ "label": "Foundational ML",
22
+ "relationship": "builds on",
23
+ "subnodes": [
24
+ {"id": "supervised_l", "label": "Supervised Learning", "relationship": "e.g."},
25
+ {"id": "unsupervised_l", "label": "Unsupervised Learning", "relationship": "e.g."}
26
+ ]
27
+ },
28
+ {
29
+ "id": "dl_architectures",
30
+ "label": "Deep Learning Arch.",
31
+ "relationship": "evolved from",
32
+ "subnodes": [
33
+ {"id": "cnns_rad", "label": "CNNs", "relationship": "e.g."},
34
+ {"id": "rnns_rad", "label": "RNNs", "relationship": "e.g."}
35
+ ]
36
+ },
37
+ {
38
+ "id": "major_applications",
39
+ "label": "Major AI Applications",
40
+ "relationship": "applied in",
41
+ "subnodes": [
42
+ {"id": "nlp_rad", "label": "Natural Language Processing", "relationship": "e.g."},
43
+ {"id": "cv_rad", "label": "Computer Vision", "relationship": "e.g."}
44
+ ]
45
+ },
46
+ {
47
+ "id": "ethical_concerns",
48
+ "label": "Ethical AI Concerns",
49
+ "relationship": "addresses",
50
+ "subnodes": [
51
+ {"id": "fairness_rad", "label": "Fairness & Bias", "relationship": "e.g."},
52
+ {"id": "explainability", "label": "Explainability (XAI)", "relationship": "e.g."}
53
+ ]
54
+ },
55
+ {
56
+ "id": "future_trends",
57
+ "label": "Future AI Trends",
58
+ "relationship": "looking at",
59
+ "subnodes": [
60
+ {"id": "agi_future", "label": "AGI Development", "relationship": "e.g."},
61
+ {"id": "quantum_ai", "label": "Quantum AI", "relationship": "e.g."}
62
+ ]
63
+ }
64
+ ]
65
+ }
66
+
67
+ Returns:
68
+ str: The filepath to the generated PNG image file.
69
+ """
70
+ try:
71
+ if not json_input.strip():
72
+ return "Error: Empty input"
73
+
74
+ data = json.loads(json_input)
75
+
76
+ if 'central_node' not in data or 'nodes' not in data:
77
+ raise ValueError("Missing required fields: central_node or nodes")
78
+
79
+ # ํ•œ๊ธ€ ํฐํŠธ ์„ค์ •
80
+ # GDFONTPATH๊ฐ€ ์„ค์ •๋˜์–ด ์žˆ์œผ๋ฉด ํฐํŠธ ํŒŒ์ผ๋ช…(ํ™•์žฅ์ž ์ œ์™ธ) ์‚ฌ์šฉ
81
+ korean_font = 'NanumGothic-Regular'
82
+
83
+ dot = graphviz.Digraph(
84
+ name='RadialDiagram',
85
+ format='png',
86
+ engine='neato', # Use 'neato' or 'fdp' for radial/force-directed layout
87
+ graph_attr={
88
+ 'overlap': 'false', # Prevent node overlap
89
+ 'splines': 'true', # Smooth splines for edges
90
+ 'bgcolor': 'white', # White background
91
+ 'pad': '0.5', # Padding around the graph
92
+ 'layout': 'neato', # Explicitly set layout engine for consistency
93
+ 'fontname': korean_font, # ๊ทธ๋ž˜ํ”„ ์ „์ฒด ํ•œ๊ธ€ ํฐํŠธ
94
+ 'charset': 'UTF-8' # UTF-8 ์ธ์ฝ”๋”ฉ
95
+ },
96
+ node_attr={
97
+ 'fixedsize': 'false', # Allow nodes to resize based on content
98
+ 'fontname': korean_font # ๋ชจ๋“  ๋…ธ๋“œ์˜ ๊ธฐ๋ณธ ํฐํŠธ
99
+ },
100
+ edge_attr={
101
+ 'fontname': korean_font # ๋ชจ๋“  ์—ฃ์ง€์˜ ๊ธฐ๋ณธ ํฐํŠธ
102
+ }
103
+ )
104
+
105
+ base_color = '#19191a' # Hardcoded base color
106
+
107
+ dot.node(
108
+ 'central',
109
+ data['central_node'],
110
+ shape='box', # Rectangular shape
111
+ style='filled,rounded', # Filled and rounded corners
112
+ fillcolor=base_color, # Darkest color
113
+ fontcolor='white', # White text for dark background
114
+ fontsize='16', # Larger font for central node
115
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ๋ช…์‹œ์  ์ง€์ •
116
+ )
117
+
118
+ add_nodes_and_edges(dot, 'central', data.get('nodes', []), current_depth=1, base_color=base_color)
119
+
120
+ with NamedTemporaryFile(delete=False, suffix=f'.{output_format}') as tmp:
121
+ dot.render(tmp.name, format=output_format, cleanup=True)
122
+ return f"{tmp.name}.{output_format}"
123
+
124
+ except json.JSONDecodeError:
125
+ return "Error: Invalid JSON format"
126
+ except Exception as e:
127
+ return f"Error: {str(e)}"
ruff (3).toml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ line-length = 120
2
+ target-version = "py310"
3
+
4
+ lint.select = ["I"]
sample_data.py ADDED
@@ -0,0 +1,690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Enhanced Sample Data for AI Diagram Generator
2
+ # Modern and practical AI-related sample data
3
+
4
+ CONCEPT_MAP_JSON = """
5
+ {
6
+ "central_node": "Generative AI Ecosystem",
7
+ "nodes": [
8
+ {
9
+ "id": "foundation_models",
10
+ "label": "Foundation Models",
11
+ "relationship": "powered by",
12
+ "subnodes": [
13
+ {
14
+ "id": "llm_models",
15
+ "label": "Large Language Models",
16
+ "relationship": "includes",
17
+ "subnodes": [
18
+ {
19
+ "id": "gpt_family",
20
+ "label": "GPT Family",
21
+ "relationship": "such as",
22
+ "subnodes": [
23
+ {
24
+ "id": "gpt4",
25
+ "label": "GPT-4",
26
+ "relationship": "latest version"
27
+ },
28
+ {
29
+ "id": "chatgpt",
30
+ "label": "ChatGPT",
31
+ "relationship": "application"
32
+ }
33
+ ]
34
+ },
35
+ {
36
+ "id": "claude_family",
37
+ "label": "Claude Family",
38
+ "relationship": "such as",
39
+ "subnodes": [
40
+ {
41
+ "id": "claude3",
42
+ "label": "Claude 3 Opus",
43
+ "relationship": "advanced model"
44
+ },
45
+ {
46
+ "id": "claude_sonnet",
47
+ "label": "Claude Sonnet",
48
+ "relationship": "balanced model"
49
+ }
50
+ ]
51
+ },
52
+ {
53
+ "id": "opensource_llm",
54
+ "label": "Open Source LLMs",
55
+ "relationship": "alternatives",
56
+ "subnodes": [
57
+ {
58
+ "id": "llama3",
59
+ "label": "LLaMA 3",
60
+ "relationship": "by Meta"
61
+ },
62
+ {
63
+ "id": "mistral",
64
+ "label": "Mistral 7B",
65
+ "relationship": "efficient"
66
+ }
67
+ ]
68
+ }
69
+ ]
70
+ },
71
+ {
72
+ "id": "multimodal_models",
73
+ "label": "Multimodal Models",
74
+ "relationship": "extends to",
75
+ "subnodes": [
76
+ {
77
+ "id": "vision_language",
78
+ "label": "Vision-Language Models",
79
+ "relationship": "including",
80
+ "subnodes": [
81
+ {
82
+ "id": "dall_e3",
83
+ "label": "DALL-E 3",
84
+ "relationship": "image generation"
85
+ },
86
+ {
87
+ "id": "midjourney",
88
+ "label": "Midjourney v6",
89
+ "relationship": "artistic creation"
90
+ }
91
+ ]
92
+ },
93
+ {
94
+ "id": "video_models",
95
+ "label": "Video Generation",
96
+ "relationship": "emerging with",
97
+ "subnodes": [
98
+ {
99
+ "id": "sora",
100
+ "label": "Sora",
101
+ "relationship": "by OpenAI"
102
+ },
103
+ {
104
+ "id": "runway",
105
+ "label": "Runway Gen-2",
106
+ "relationship": "creative tools"
107
+ }
108
+ ]
109
+ }
110
+ ]
111
+ }
112
+ ]
113
+ },
114
+ {
115
+ "id": "ai_applications",
116
+ "label": "Real-World Applications",
117
+ "relationship": "deployed in",
118
+ "subnodes": [
119
+ {
120
+ "id": "enterprise_ai",
121
+ "label": "Enterprise Solutions",
122
+ "relationship": "transforming",
123
+ "subnodes": [
124
+ {
125
+ "id": "ai_assistants",
126
+ "label": "AI Assistants",
127
+ "relationship": "like",
128
+ "subnodes": [
129
+ {
130
+ "id": "copilot",
131
+ "label": "GitHub Copilot",
132
+ "relationship": "code generation"
133
+ },
134
+ {
135
+ "id": "duet_ai",
136
+ "label": "Google Duet AI",
137
+ "relationship": "workspace integration"
138
+ }
139
+ ]
140
+ },
141
+ {
142
+ "id": "automation",
143
+ "label": "Process Automation",
144
+ "relationship": "through",
145
+ "subnodes": [
146
+ {
147
+ "id": "rpa_ai",
148
+ "label": "AI-Enhanced RPA",
149
+ "relationship": "intelligent automation"
150
+ },
151
+ {
152
+ "id": "document_ai",
153
+ "label": "Document Intelligence",
154
+ "relationship": "data extraction"
155
+ }
156
+ ]
157
+ }
158
+ ]
159
+ },
160
+ {
161
+ "id": "creative_ai",
162
+ "label": "Creative Industries",
163
+ "relationship": "revolutionizing",
164
+ "subnodes": [
165
+ {
166
+ "id": "content_creation",
167
+ "label": "Content Generation",
168
+ "relationship": "enabling",
169
+ "subnodes": [
170
+ {
171
+ "id": "marketing_ai",
172
+ "label": "Marketing Copy",
173
+ "relationship": "automated creation"
174
+ },
175
+ {
176
+ "id": "game_assets",
177
+ "label": "Game Asset Generation",
178
+ "relationship": "procedural design"
179
+ }
180
+ ]
181
+ }
182
+ ]
183
+ }
184
+ ]
185
+ },
186
+ {
187
+ "id": "ai_infrastructure",
188
+ "label": "AI Infrastructure",
189
+ "relationship": "supported by",
190
+ "subnodes": [
191
+ {
192
+ "id": "compute_resources",
193
+ "label": "Compute Resources",
194
+ "relationship": "requiring",
195
+ "subnodes": [
196
+ {
197
+ "id": "gpu_clusters",
198
+ "label": "GPU Clusters",
199
+ "relationship": "powered by",
200
+ "subnodes": [
201
+ {
202
+ "id": "nvidia_h100",
203
+ "label": "NVIDIA H100",
204
+ "relationship": "latest generation"
205
+ },
206
+ {
207
+ "id": "tpu_v5",
208
+ "label": "Google TPU v5",
209
+ "relationship": "specialized chips"
210
+ }
211
+ ]
212
+ }
213
+ ]
214
+ },
215
+ {
216
+ "id": "mlops_platform",
217
+ "label": "MLOps Platforms",
218
+ "relationship": "managed by",
219
+ "subnodes": [
220
+ {
221
+ "id": "model_serving",
222
+ "label": "Model Serving",
223
+ "relationship": "via",
224
+ "subnodes": [
225
+ {
226
+ "id": "huggingface",
227
+ "label": "Hugging Face",
228
+ "relationship": "model hub"
229
+ },
230
+ {
231
+ "id": "vertex_ai",
232
+ "label": "Vertex AI",
233
+ "relationship": "Google Cloud"
234
+ }
235
+ ]
236
+ }
237
+ ]
238
+ }
239
+ ]
240
+ }
241
+ ]
242
+ }
243
+ """
244
+
245
+ SYNOPTIC_CHART_JSON = """
246
+ {
247
+ "central_node": "Modern AI Development Pipeline",
248
+ "nodes": [
249
+ {
250
+ "id": "phase1",
251
+ "label": "I. Data Engineering & Preparation",
252
+ "relationship": "begins with",
253
+ "subnodes": [
254
+ {
255
+ "id": "data_strategy",
256
+ "label": "1. Data Strategy",
257
+ "relationship": "establishing",
258
+ "subnodes": [
259
+ {"id": "data_sources", "label": "1.1. Multi-Source Integration", "relationship": "includes", "subnodes": []},
260
+ {"id": "data_governance", "label": "1.2. Privacy & Compliance", "relationship": "ensures", "subnodes": []}
261
+ ]
262
+ },
263
+ {
264
+ "id": "data_pipeline",
265
+ "label": "2. Data Pipeline",
266
+ "relationship": "building",
267
+ "subnodes": [
268
+ {"id": "etl_process", "label": "2.1. ETL/ELT Workflows", "relationship": "implements", "subnodes": []},
269
+ {"id": "feature_store", "label": "2.2. Feature Store Setup", "relationship": "centralizes", "subnodes": []}
270
+ ]
271
+ }
272
+ ]
273
+ },
274
+ {
275
+ "id": "phase2",
276
+ "label": "II. Model Development & Training",
277
+ "relationship": "continues to",
278
+ "subnodes": [
279
+ {
280
+ "id": "experimentation",
281
+ "label": "1. Experimentation Platform",
282
+ "relationship": "enabling",
283
+ "subnodes": [
284
+ {"id": "experiment_tracking", "label": "1.1. MLflow/W&B Integration", "relationship": "tracks", "subnodes": []},
285
+ {"id": "hyperparameter_opt", "label": "1.2. AutoML & HPO", "relationship": "optimizes", "subnodes": []}
286
+ ]
287
+ },
288
+ {
289
+ "id": "distributed_training",
290
+ "label": "2. Scalable Training",
291
+ "relationship": "leveraging",
292
+ "subnodes": [
293
+ {"id": "multi_gpu", "label": "2.1. Multi-GPU Training", "relationship": "accelerates", "subnodes": []},
294
+ {"id": "model_parallelism", "label": "2.2. Model Parallelism", "relationship": "enables", "subnodes": []}
295
+ ]
296
+ }
297
+ ]
298
+ },
299
+ {
300
+ "id": "phase3",
301
+ "label": "III. Deployment & Operations",
302
+ "relationship": "culminates in",
303
+ "subnodes": [
304
+ {
305
+ "id": "model_deployment",
306
+ "label": "1. Production Deployment",
307
+ "relationship": "implementing",
308
+ "subnodes": [
309
+ {"id": "edge_deployment", "label": "1.1. Edge Computing", "relationship": "optimizes", "subnodes": []},
310
+ {"id": "api_gateway", "label": "1.2. API Management", "relationship": "exposes", "subnodes": []}
311
+ ]
312
+ },
313
+ {
314
+ "id": "monitoring_ops",
315
+ "label": "2. AI Operations",
316
+ "relationship": "maintaining",
317
+ "subnodes": [
318
+ {"id": "model_monitoring", "label": "2.1. Drift Detection", "relationship": "monitors", "subnodes": []},
319
+ {"id": "feedback_loop", "label": "2.2. Continuous Learning", "relationship": "improves", "subnodes": []}
320
+ ]
321
+ }
322
+ ]
323
+ }
324
+ ]
325
+ }
326
+ """
327
+
328
+ RADIAL_DIAGRAM_JSON = """
329
+ {
330
+ "central_node": "AI Innovation Hub",
331
+ "nodes": [
332
+ {
333
+ "id": "emerging_tech",
334
+ "label": "Emerging Technologies",
335
+ "relationship": "advancing through",
336
+ "subnodes": [
337
+ {"id": "quantum_ml", "label": "Quantum Machine Learning", "relationship": "breakthrough in", "subnodes": []},
338
+ {"id": "neuromorphic", "label": "Neuromorphic Computing", "relationship": "inspired by", "subnodes": []}
339
+ ]
340
+ },
341
+ {
342
+ "id": "ai_safety",
343
+ "label": "AI Safety & Ethics",
344
+ "relationship": "ensuring through",
345
+ "subnodes": [
346
+ {"id": "alignment", "label": "AI Alignment Research", "relationship": "focusing on", "subnodes": []},
347
+ {"id": "explainable_ai", "label": "Explainable AI (XAI)", "relationship": "providing", "subnodes": []}
348
+ ]
349
+ },
350
+ {
351
+ "id": "industry_impact",
352
+ "label": "Industry Transformation",
353
+ "relationship": "disrupting",
354
+ "subnodes": [
355
+ {"id": "healthcare_ai", "label": "AI in Healthcare", "relationship": "revolutionizing", "subnodes": []},
356
+ {"id": "fintech_ai", "label": "AI in Finance", "relationship": "automating", "subnodes": []}
357
+ ]
358
+ },
359
+ {
360
+ "id": "research_frontiers",
361
+ "label": "Research Frontiers",
362
+ "relationship": "exploring",
363
+ "subnodes": [
364
+ {"id": "agi_research", "label": "AGI Development", "relationship": "pursuing", "subnodes": []},
365
+ {"id": "consciousness_ai", "label": "Machine Consciousness", "relationship": "investigating", "subnodes": []}
366
+ ]
367
+ },
368
+ {
369
+ "id": "ai_ecosystem",
370
+ "label": "AI Ecosystem",
371
+ "relationship": "building",
372
+ "subnodes": [
373
+ {"id": "open_source", "label": "Open Source Community", "relationship": "contributing to", "subnodes": []},
374
+ {"id": "ai_startups", "label": "AI Startup Ecosystem", "relationship": "innovating in", "subnodes": []}
375
+ ]
376
+ }
377
+ ]
378
+ }
379
+ """
380
+
381
+ PROCESS_FLOW_JSON = """
382
+ {
383
+ "start_node": "AI Request Initiated",
384
+ "nodes": [
385
+ {
386
+ "id": "auth_check",
387
+ "label": "Authentication & Rate Limiting",
388
+ "type": "process"
389
+ },
390
+ {
391
+ "id": "auth_valid",
392
+ "label": "Valid Request?",
393
+ "type": "decision"
394
+ },
395
+ {
396
+ "id": "request_router",
397
+ "label": "Request Router",
398
+ "type": "process"
399
+ },
400
+ {
401
+ "id": "cache_check",
402
+ "label": "Check Response Cache",
403
+ "type": "decision"
404
+ },
405
+ {
406
+ "id": "model_selector",
407
+ "label": "Model Selection Logic",
408
+ "type": "process"
409
+ },
410
+ {
411
+ "id": "load_balancer",
412
+ "label": "Load Balancer",
413
+ "type": "process"
414
+ },
415
+ {
416
+ "id": "inference_engine",
417
+ "label": "Run Inference",
418
+ "type": "process"
419
+ },
420
+ {
421
+ "id": "quality_check",
422
+ "label": "Quality Assessment",
423
+ "type": "decision"
424
+ },
425
+ {
426
+ "id": "post_processor",
427
+ "label": "Post-Processing & Filtering",
428
+ "type": "process"
429
+ },
430
+ {
431
+ "id": "response_cache",
432
+ "label": "Update Cache",
433
+ "type": "process"
434
+ },
435
+ {
436
+ "id": "analytics_log",
437
+ "label": "Log Analytics",
438
+ "type": "process"
439
+ },
440
+ {
441
+ "id": "deliver_response",
442
+ "label": "Deliver Response",
443
+ "type": "io"
444
+ },
445
+ {
446
+ "id": "error_handler",
447
+ "label": "Error Handler",
448
+ "type": "process"
449
+ },
450
+ {
451
+ "id": "fallback_model",
452
+ "label": "Fallback Model",
453
+ "type": "process"
454
+ },
455
+ {
456
+ "id": "end_process",
457
+ "label": "Complete",
458
+ "type": "end"
459
+ }
460
+ ],
461
+ "connections": [
462
+ {"from": "start_node", "to": "auth_check", "label": "Begin"},
463
+ {"from": "auth_check", "to": "auth_valid", "label": "Verify"},
464
+ {"from": "auth_valid", "to": "request_router", "label": "Authorized"},
465
+ {"from": "auth_valid", "to": "error_handler", "label": "Denied"},
466
+ {"from": "request_router", "to": "cache_check", "label": "Route"},
467
+ {"from": "cache_check", "to": "deliver_response", "label": "Cache Hit"},
468
+ {"from": "cache_check", "to": "model_selector", "label": "Cache Miss"},
469
+ {"from": "model_selector", "to": "load_balancer", "label": "Selected"},
470
+ {"from": "load_balancer", "to": "inference_engine", "label": "Assigned"},
471
+ {"from": "inference_engine", "to": "quality_check", "label": "Generated"},
472
+ {"from": "quality_check", "to": "post_processor", "label": "Pass"},
473
+ {"from": "quality_check", "to": "fallback_model", "label": "Fail"},
474
+ {"from": "fallback_model", "to": "post_processor", "label": "Retry"},
475
+ {"from": "post_processor", "to": "response_cache", "label": "Processed"},
476
+ {"from": "response_cache", "to": "analytics_log", "label": "Cached"},
477
+ {"from": "analytics_log", "to": "deliver_response", "label": "Logged"},
478
+ {"from": "deliver_response", "to": "end_process", "label": "Delivered"},
479
+ {"from": "error_handler", "to": "end_process", "label": "Handled"}
480
+ ]
481
+ }
482
+ """
483
+
484
+ WBS_DIAGRAM_JSON = """
485
+ {
486
+ "project_title": "Enterprise AI Platform Implementation",
487
+ "phases": [
488
+ {
489
+ "id": "phase_foundation",
490
+ "label": "Foundation & Architecture",
491
+ "tasks": [
492
+ {
493
+ "id": "task_requirements",
494
+ "label": "Requirements Analysis",
495
+ "subtasks": [
496
+ {
497
+ "id": "subtask_stakeholder",
498
+ "label": "Stakeholder Mapping",
499
+ "sub_subtasks": [
500
+ {
501
+ "id": "ss_task_interviews",
502
+ "label": "Executive Interviews",
503
+ "sub_sub_subtasks": [
504
+ {
505
+ "id": "sss_task_schedule",
506
+ "label": "Schedule Sessions",
507
+ "final_level_tasks": [
508
+ {"id": "ft_prep_materials", "label": "Prepare Materials"}
509
+ ]
510
+ }
511
+ ]
512
+ }
513
+ ]
514
+ },
515
+ {
516
+ "id": "subtask_technical",
517
+ "label": "Technical Assessment",
518
+ "sub_subtasks": [
519
+ {
520
+ "id": "ss_task_infra",
521
+ "label": "Infrastructure Audit",
522
+ "sub_sub_subtasks": [
523
+ {
524
+ "id": "sss_task_compute",
525
+ "label": "Compute Resources",
526
+ "final_level_tasks": [
527
+ {"id": "ft_gpu_assessment", "label": "GPU Capacity Planning"}
528
+ ]
529
+ }
530
+ ]
531
+ }
532
+ ]
533
+ }
534
+ ]
535
+ },
536
+ {
537
+ "id": "task_architecture",
538
+ "label": "Solution Architecture",
539
+ "subtasks": [
540
+ {
541
+ "id": "subtask_design",
542
+ "label": "System Design",
543
+ "sub_subtasks": [
544
+ {
545
+ "id": "ss_task_microservices",
546
+ "label": "Microservices Architecture",
547
+ "sub_sub_subtasks": [
548
+ {
549
+ "id": "sss_task_api_design",
550
+ "label": "API Design",
551
+ "final_level_tasks": [
552
+ {"id": "ft_api_specs", "label": "OpenAPI Specifications"}
553
+ ]
554
+ }
555
+ ]
556
+ }
557
+ ]
558
+ }
559
+ ]
560
+ }
561
+ ]
562
+ },
563
+ {
564
+ "id": "phase_implementation",
565
+ "label": "Core Implementation",
566
+ "tasks": [
567
+ {
568
+ "id": "task_data_platform",
569
+ "label": "Data Platform",
570
+ "subtasks": [
571
+ {
572
+ "id": "subtask_data_lake",
573
+ "label": "Data Lake Setup",
574
+ "sub_subtasks": [
575
+ {
576
+ "id": "ss_task_storage",
577
+ "label": "Storage Configuration",
578
+ "sub_sub_subtasks": [
579
+ {
580
+ "id": "sss_task_s3_setup",
581
+ "label": "S3 Bucket Setup",
582
+ "final_level_tasks": [
583
+ {"id": "ft_lifecycle_policies", "label": "Lifecycle Policies"}
584
+ ]
585
+ }
586
+ ]
587
+ }
588
+ ]
589
+ },
590
+ {
591
+ "id": "subtask_streaming",
592
+ "label": "Real-time Pipeline",
593
+ "sub_subtasks": [
594
+ {
595
+ "id": "ss_task_kafka",
596
+ "label": "Kafka Implementation",
597
+ "sub_sub_subtasks": [
598
+ {
599
+ "id": "sss_task_topics",
600
+ "label": "Topic Design",
601
+ "final_level_tasks": [
602
+ {"id": "ft_schema_registry", "label": "Schema Registry"}
603
+ ]
604
+ }
605
+ ]
606
+ }
607
+ ]
608
+ }
609
+ ]
610
+ },
611
+ {
612
+ "id": "task_ml_platform",
613
+ "label": "ML Platform",
614
+ "subtasks": [
615
+ {
616
+ "id": "subtask_mlops",
617
+ "label": "MLOps Setup",
618
+ "sub_subtasks": [
619
+ {
620
+ "id": "ss_task_ci_cd",
621
+ "label": "CI/CD Pipeline",
622
+ "sub_sub_subtasks": [
623
+ {
624
+ "id": "sss_task_github_actions",
625
+ "label": "GitHub Actions",
626
+ "final_level_tasks": [
627
+ {"id": "ft_model_registry", "label": "Model Registry Integration"}
628
+ ]
629
+ }
630
+ ]
631
+ }
632
+ ]
633
+ }
634
+ ]
635
+ }
636
+ ]
637
+ },
638
+ {
639
+ "id": "phase_deployment",
640
+ "label": "Deployment & Operations",
641
+ "tasks": [
642
+ {
643
+ "id": "task_production",
644
+ "label": "Production Deployment",
645
+ "subtasks": [
646
+ {
647
+ "id": "subtask_kubernetes",
648
+ "label": "Kubernetes Orchestration",
649
+ "sub_subtasks": [
650
+ {
651
+ "id": "ss_task_helm",
652
+ "label": "Helm Charts",
653
+ "sub_sub_subtasks": [
654
+ {
655
+ "id": "sss_task_autoscaling",
656
+ "label": "Auto-scaling Config",
657
+ "final_level_tasks": [
658
+ {"id": "ft_hpa_setup", "label": "HPA Configuration"}
659
+ ]
660
+ }
661
+ ]
662
+ }
663
+ ]
664
+ },
665
+ {
666
+ "id": "subtask_monitoring",
667
+ "label": "Observability Stack",
668
+ "sub_subtasks": [
669
+ {
670
+ "id": "ss_task_prometheus",
671
+ "label": "Prometheus Setup",
672
+ "sub_sub_subtasks": [
673
+ {
674
+ "id": "sss_task_grafana",
675
+ "label": "Grafana Dashboards",
676
+ "final_level_tasks": [
677
+ {"id": "ft_alert_rules", "label": "Alert Configuration"}
678
+ ]
679
+ }
680
+ ]
681
+ }
682
+ ]
683
+ }
684
+ ]
685
+ }
686
+ ]
687
+ }
688
+ ]
689
+ }
690
+ """
synoptic_chart_generator.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import graphviz
2
+ import json
3
+ from tempfile import NamedTemporaryFile
4
+ import os
5
+ from graph_generator_utils import add_nodes_and_edges
6
+
7
+ def generate_synoptic_chart(json_input: str, output_format: str) -> str:
8
+ """
9
+ Generates a synoptic chart (horizontal flowchart) from JSON input.
10
+
11
+ Args:
12
+ json_input (str): A JSON string describing the synoptic chart structure.
13
+ It must follow the Expected JSON Format Example below.
14
+
15
+ Expected JSON Format Example:
16
+ {
17
+ "central_node": "AI Project Lifecycle",
18
+ "nodes": [
19
+ {
20
+ "id": "phase1",
21
+ "label": "I. Problem Definition & Data Acquisition",
22
+ "relationship": "Starts with",
23
+ "subnodes": [
24
+ {
25
+ "id": "sub1_1",
26
+ "label": "1. Problem Formulation",
27
+ "relationship": "Involves",
28
+ "subnodes": [
29
+ {"id": "sub1_1_1", "label": "1.1. Identify Business Need", "relationship": "e.g."},
30
+ {"id": "sub1_1_2", "label": "1.2. Define KPIs", "relationship": "e.g."}
31
+ ]
32
+ },
33
+ {
34
+ "id": "sub1_2",
35
+ "label": "2. Data Collection",
36
+ "relationship": "Followed by",
37
+ "subnodes": [
38
+ {"id": "sub1_2_1", "label": "2.1. Source Data", "relationship": "from"},
39
+ {"id": "sub1_2_2", "label": "2.2. Data Cleaning", "relationship": "includes"}
40
+ ]
41
+ }
42
+ ]
43
+ },
44
+ {
45
+ "id": "phase2",
46
+ "label": "II. Model Development",
47
+ "relationship": "Proceeds to",
48
+ "subnodes": [
49
+ {
50
+ "id": "sub2_1",
51
+ "label": "1. Feature Engineering",
52
+ "relationship": "Comprises",
53
+ "subnodes": [
54
+ {"id": "sub2_1_1", "label": "1.1. Feature Selection", "relationship": "e.g."},
55
+ {"id": "sub2_1_2", "label": "1.2. Feature Transformation", "relationship": "e.g."}
56
+ ]
57
+ },
58
+ {
59
+ "id": "sub2_2",
60
+ "label": "2. Model Training",
61
+ "relationship": "Involves",
62
+ "subnodes": [
63
+ {"id": "sub2_2_1", "label": "2.1. Algorithm Selection", "relationship": "uses"},
64
+ {"id": "sub2_2_2", "label": "2.2. Hyperparameter Tuning", "relationship": "optimizes"}
65
+ ]
66
+ }
67
+ ]
68
+ },
69
+ {
70
+ "id": "phase3",
71
+ "label": "III. Evaluation & Deployment",
72
+ "relationship": "Culminates in",
73
+ "subnodes": [
74
+ {
75
+ "id": "sub3_1",
76
+ "label": "1. Model Evaluation",
77
+ "relationship": "Includes",
78
+ "subnodes": [
79
+ {"id": "sub3_1_1", "label": "1.1. Performance Metrics", "relationship": "measures"},
80
+ {"id": "sub3_1_2", "label": "1.2. Bias & Fairness Audits", "relationship": "ensures"}
81
+ ]
82
+ },
83
+ {
84
+ "id": "sub3_2",
85
+ "label": "2. Deployment & Monitoring",
86
+ "relationship": "Requires",
87
+ "subnodes": [
88
+ {"id": "sub3_2_1", "label": "2.1. API/Integration Development", "relationship": "for"},
89
+ {"id": "sub3_2_2", "label": "2.2. Continuous Monitoring", "relationship": "ensures"}
90
+ ]
91
+ }
92
+ ]
93
+ }
94
+ ]
95
+ }
96
+
97
+ Returns:
98
+ str: The filepath to the generated PNG image file.
99
+ """
100
+ try:
101
+ if not json_input.strip():
102
+ return "Error: Empty input"
103
+
104
+ data = json.loads(json_input)
105
+
106
+ if 'central_node' not in data or 'nodes' not in data:
107
+ raise ValueError("Missing required fields: central_node or nodes")
108
+
109
+ # ํ•œ๊ธ€ ํฐํŠธ ์„ค์ •
110
+ # GDFONTPATH๊ฐ€ ์„ค์ •๋˜์–ด ์žˆ์œผ๋ฉด ํฐํŠธ ํŒŒ์ผ๋ช…(ํ™•์žฅ์ž ์ œ์™ธ) ์‚ฌ์šฉ
111
+ korean_font = 'NanumGothic-Regular'
112
+
113
+ dot = graphviz.Digraph(
114
+ name='SynopticChart',
115
+ format='png',
116
+ graph_attr={
117
+ 'rankdir': 'LR', # Left-to-Right layout (horizontal hierarchy)
118
+ 'splines': 'ortho', # Straight lines
119
+ 'bgcolor': 'white', # White background
120
+ 'pad': '0.5', # Padding around the graph
121
+ 'ranksep': '0.7', # Reduced horizontal separation between ranks (columns)
122
+ 'nodesep': '0.3', # Adjusted vertical separation between nodes in the same rank
123
+ 'fontname': korean_font, # ๊ทธ๋ž˜ํ”„ ์ „์ฒด ํ•œ๊ธ€ ํฐํŠธ
124
+ 'charset': 'UTF-8' # UTF-8 ์ธ์ฝ”๋”ฉ
125
+ },
126
+ node_attr={
127
+ 'fontname': korean_font # ๋ชจ๋“  ๋…ธ๋“œ์˜ ๊ธฐ๋ณธ ํฐํŠธ
128
+ },
129
+ edge_attr={
130
+ 'fontname': korean_font # ๋ชจ๋“  ์—ฃ์ง€์˜ ๊ธฐ๋ณธ ํฐํŠธ
131
+ }
132
+ )
133
+
134
+ base_color = '#19191a'
135
+
136
+ dot.node(
137
+ 'central',
138
+ data['central_node'],
139
+ shape='box', # Rectangular shape
140
+ style='filled,rounded', # Filled and rounded corners
141
+ fillcolor=base_color, # Darkest color
142
+ fontcolor='white', # White text for dark background
143
+ fontsize='16', # Larger font for central node
144
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ๋ช…์‹œ์  ์ง€์ •
145
+ )
146
+
147
+ add_nodes_and_edges(dot, 'central', data.get('nodes', []), current_depth=1, base_color=base_color)
148
+
149
+ with NamedTemporaryFile(delete=False, suffix=f'.{output_format}') as tmp:
150
+ dot.render(tmp.name, format=output_format, cleanup=True)
151
+ return f"{tmp.name}.{output_format}"
152
+
153
+ except json.JSONDecodeError:
154
+ return "Error: Invalid JSON format"
155
+ except Exception as e:
156
+ return f"Error: {str(e)}"
wbs_diagram_generator.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import graphviz
2
+ import json
3
+ from tempfile import NamedTemporaryFile
4
+ import os
5
+
6
+ def generate_wbs_diagram(json_input: str, output_format: str) -> str:
7
+ """
8
+ Generates a Work Breakdown Structure (WBS) Diagram from JSON input.
9
+
10
+ Args:
11
+ json_input (str): A JSON string describing the WBS structure.
12
+ It must follow the Expected JSON Format Example below.
13
+
14
+ Expected JSON Format Example:
15
+ {
16
+ "project_title": "AI Model Development Project",
17
+ "phases": [
18
+ {
19
+ "id": "phase_prep",
20
+ "label": "Preparation",
21
+ "tasks": [
22
+ {
23
+ "id": "task_1_1_vision",
24
+ "label": "Identify Vision",
25
+ "subtasks": [
26
+ {
27
+ "id": "subtask_1_1_1_design_staff",
28
+ "label": "Design & Staffing",
29
+ "sub_subtasks": [
30
+ {
31
+ "id": "ss_task_1_1_1_1_env_setup",
32
+ "label": "Environment Setup",
33
+ "sub_sub_subtasks": [
34
+ {
35
+ "id": "sss_task_1_1_1_1_1_lib_install",
36
+ "label": "Install Libraries",
37
+ "final_level_tasks": [
38
+ {"id": "ft_1_1_1_1_1_1_data_access", "label": "Grant Data Access"}
39
+ ]
40
+ }
41
+ ]
42
+ }
43
+ ]
44
+ }
45
+ ]
46
+ }
47
+ ]
48
+ },
49
+ {
50
+ "id": "phase_plan",
51
+ "label": "Planning",
52
+ "tasks": [
53
+ {
54
+ "id": "task_2_1_cost_analysis",
55
+ "label": "Cost Analysis",
56
+ "subtasks": [
57
+ {
58
+ "id": "subtask_2_1_1_benefit_analysis",
59
+ "label": "Benefit Analysis",
60
+ "sub_subtasks": [
61
+ {
62
+ "id": "ss_task_2_1_1_1_risk_assess",
63
+ "label": "AI Risk Assessment",
64
+ "sub_sub_subtasks": [
65
+ {
66
+ "id": "sss_task_2_1_1_1_1_model_selection",
67
+ "label": "Model Selection",
68
+ "final_level_tasks": [
69
+ {"id": "ft_2_1_1_1_1_1_data_strategy", "label": "Data Strategy"}
70
+ ]
71
+ }
72
+ ]
73
+ }
74
+ ]
75
+ }
76
+ ]
77
+ }
78
+ ]
79
+ },
80
+ {
81
+ "id": "phase_dev",
82
+ "label": "Development",
83
+ "tasks": [
84
+ {
85
+ "id": "task_3_1_change_mgmt",
86
+ "label": "Data Preprocessing",
87
+ "subtasks": [
88
+ {
89
+ "id": "subtask_3_1_1_implementation",
90
+ "label": "Feature Engineering",
91
+ "sub_subtasks": [
92
+ {
93
+ "id": "ss_task_3_1_1_1_beta_testing",
94
+ "label": "Model Training",
95
+ "sub_sub_subtasks": [
96
+ {
97
+ "id": "sss_task_3_1_1_1_1_other_task",
98
+ "label": "Model Evaluation",
99
+ "final_level_tasks": [
100
+ {"id": "ft_3_1_1_1_1_1_hyperparam_tune", "label": "Hyperparameter Tuning"}
101
+ ]
102
+ }
103
+ ]
104
+ }
105
+ ]
106
+ }
107
+ ]
108
+ }
109
+ ]
110
+ }
111
+ ]
112
+ }
113
+
114
+ Returns:
115
+ str: The filepath to the generated PNG image file.
116
+ """
117
+ try:
118
+ if not json_input.strip():
119
+ return "Error: Empty input"
120
+
121
+ data = json.loads(json_input)
122
+
123
+ if 'project_title' not in data or 'phases' not in data:
124
+ raise ValueError("Missing required fields: project_title or phases")
125
+
126
+ # ํ•œ๊ธ€ ํฐํŠธ ์„ค์ •
127
+ # GDFONTPATH๊ฐ€ ์„ค์ •๋˜์–ด ์žˆ์œผ๋ฉด ํฐํŠธ ํŒŒ์ผ๋ช…(ํ™•์žฅ์ž ์ œ์™ธ) ์‚ฌ์šฉ
128
+ korean_font = 'NanumGothic-Regular'
129
+
130
+ dot = graphviz.Digraph(
131
+ name='WBSDiagram',
132
+ graph_attr={
133
+ 'rankdir': 'TB', # Top-to-Bottom hierarchy
134
+ 'splines': 'polyline', # polyline์œผ๋กœ ๋ณ€๊ฒฝ (ortho ๋Œ€์‹ )
135
+ 'bgcolor': 'white', # White background
136
+ 'pad': '0.5', # Padding
137
+ 'ranksep': '0.6', # Adjust vertical separation between ranks
138
+ 'nodesep': '0.5', # Adjust horizontal separation between nodes
139
+ 'fontname': korean_font, # ๊ทธ๋ž˜ํ”„ ์ „์ฒด ํ•œ๊ธ€ ํฐํŠธ
140
+ 'charset': 'UTF-8' # UTF-8 ์ธ์ฝ”๋”ฉ
141
+ },
142
+ node_attr={
143
+ 'fontname': korean_font # ๋ชจ๋“  ๋…ธ๋“œ์˜ ๊ธฐ๋ณธ ํฐํŠธ
144
+ },
145
+ edge_attr={
146
+ 'fontname': korean_font # ๋ชจ๋“  ์—ฃ์ง€์˜ ๊ธฐ๋ณธ ํฐํŠธ
147
+ }
148
+ )
149
+
150
+ base_color = '#19191a' # Hardcoded base color
151
+
152
+ # ID ์ •๊ทœํ™” ํ•จ์ˆ˜ - ํ•œ๊ธ€ ID๋ฅผ ์•ˆ์ „ํ•œ ํ˜•ํƒœ๋กœ ๋ณ€ํ™˜
153
+ def normalize_id(id_str):
154
+ """๋…ธ๋“œ ID๋ฅผ ์•ˆ์ „ํ•œ ํ˜•ํƒœ๋กœ ๋ณ€ํ™˜"""
155
+ import re
156
+ # ์˜๋ฌธ, ์ˆซ์ž, ์–ธ๋”์Šค์ฝ”์–ด๋งŒ ํ—ˆ์šฉ
157
+ safe_id = re.sub(r'[^a-zA-Z0-9_]', '_', str(id_str))
158
+ # ์ˆซ์ž๋กœ ์‹œ์ž‘ํ•˜๋ฉด 'n_' ์ ‘๋‘์‚ฌ ์ถ”๊ฐ€
159
+ if safe_id and safe_id[0].isdigit():
160
+ safe_id = 'n_' + safe_id
161
+ # ๋นˆ ๋ฌธ์ž์—ด์ด๋ฉด ๊ธฐ๋ณธ๊ฐ’
162
+ if not safe_id:
163
+ safe_id = 'node_' + str(hash(id_str))
164
+ return safe_id
165
+
166
+ # Project Title node (main node)
167
+ dot.node(
168
+ 'project_root',
169
+ data['project_title'],
170
+ shape='box',
171
+ style='filled,rounded',
172
+ fillcolor=base_color,
173
+ fontcolor='white',
174
+ fontsize='18',
175
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
176
+ )
177
+
178
+ # Helper for color and font based on depth for WBS
179
+ def get_gradient_color(depth, base_hex_color, lightening_factor=0.12):
180
+ base_r = int(base_hex_color[1:3], 16)
181
+ base_g = int(base_hex_color[3:5], 16)
182
+ base_b = int(base_hex_color[5:7], 16)
183
+
184
+ current_r = base_r + int((255 - base_r) * depth * lightening_factor)
185
+ current_g = base_g + int((255 - base_g) * depth * lightening_factor)
186
+ current_b = base_b + int((255 - base_b) * depth * lightening_factor)
187
+
188
+ return f'#{min(255, current_r):02x}{min(255, current_g):02x}{min(255, current_b):02x}'
189
+
190
+ def get_font_color_for_background(depth, base_hex_color, lightening_factor=0.12):
191
+ base_r = int(base_hex_color[1:3], 16)
192
+ base_g = int(base_hex_color[3:5], 16)
193
+ base_b = int(base_hex_color[5:7], 16)
194
+ current_r = base_r + (255 - base_r) * depth * lightening_factor
195
+ current_g = base_g + (255 - base_g) * depth * lightening_factor
196
+ current_b = base_b + (255 - base_b) * depth * lightening_factor
197
+
198
+ luminance = (0.2126 * current_r + 0.7152 * current_g + 0.0722 * current_b) / 255
199
+ return 'white' if luminance < 0.5 else 'black'
200
+
201
+ def _add_wbs_nodes_recursive(parent_id, current_level_tasks, current_depth):
202
+ for task_data in current_level_tasks:
203
+ task_id = task_data.get('id')
204
+ task_label = task_data.get('label')
205
+
206
+ if not all([task_id, task_label]):
207
+ raise ValueError(f"Invalid task data at depth {current_depth}: {task_data}")
208
+
209
+ # ID ์ •๊ทœํ™”
210
+ safe_task_id = normalize_id(task_id)
211
+
212
+ node_fill_color = get_gradient_color(current_depth, base_color)
213
+ node_font_color = get_font_color_for_background(current_depth, base_color)
214
+ font_size = max(9, 14 - (current_depth * 2))
215
+
216
+ dot.node(
217
+ safe_task_id,
218
+ task_label,
219
+ shape='box',
220
+ style='filled,rounded',
221
+ fillcolor=node_fill_color,
222
+ fontcolor=node_font_color,
223
+ fontsize=str(font_size),
224
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
225
+ )
226
+ dot.edge(parent_id, safe_task_id, color='#4a4a4a', arrowhead='none', fontname=korean_font)
227
+
228
+ # Recursively call for next level of tasks (subtasks, sub_subtasks, etc.)
229
+ # This handles arbitrary nested keys like 'subtasks', 'sub_subtasks', 'final_level_tasks'
230
+ next_level_keys = ['tasks', 'subtasks', 'sub_subtasks', 'sub_sub_subtasks', 'final_level_tasks']
231
+ for key_idx, key in enumerate(next_level_keys):
232
+ if key in task_data and isinstance(task_data[key], list):
233
+ _add_wbs_nodes_recursive(safe_task_id, task_data[key], current_depth + 1)
234
+ break # Only process the first found sub-level key
235
+
236
+ # Process phases (level 1 from project_root)
237
+ phase_depth = 1
238
+ for phase in data['phases']:
239
+ phase_id = phase.get('id')
240
+ phase_label = phase.get('label')
241
+
242
+ if not all([phase_id, phase_label]):
243
+ raise ValueError(f"Invalid phase data: {phase}")
244
+
245
+ # ID ์ •๊ทœํ™”
246
+ safe_phase_id = normalize_id(phase_id)
247
+
248
+ phase_fill_color = get_gradient_color(phase_depth, base_color)
249
+ phase_font_color = get_font_color_for_background(phase_depth, base_color)
250
+ font_size_phase = max(9, 14 - (phase_depth * 2))
251
+
252
+ dot.node(
253
+ safe_phase_id,
254
+ phase_label,
255
+ shape='box',
256
+ style='filled,rounded',
257
+ fillcolor=phase_fill_color,
258
+ fontcolor=phase_font_color,
259
+ fontsize=str(font_size_phase),
260
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
261
+ )
262
+ dot.edge('project_root', safe_phase_id, color='#4a4a4a', arrowhead='none', fontname=korean_font)
263
+
264
+ # Start recursion for tasks under this phase
265
+ if 'tasks' in phase and isinstance(phase['tasks'], list):
266
+ _add_wbs_nodes_recursive(safe_phase_id, phase['tasks'], phase_depth + 1)
267
+
268
+ # ๋ Œ๋”๋ง
269
+ try:
270
+ with NamedTemporaryFile(delete=False, suffix='.gv', prefix='wbs_') as tmp:
271
+ # ํŒŒ์ผ ์ด๋ฆ„์—์„œ .gv ํ™•์žฅ์ž ์ œ๊ฑฐ
272
+ output_filename = tmp.name[:-3] # '.gv' ์ œ๊ฑฐ
273
+ output_path = dot.render(output_filename, format=output_format, cleanup=True)
274
+ return output_path
275
+ except Exception as render_error:
276
+ # ๋ Œ๋”๋ง ์‹คํŒจ ์‹œ ๊ฐ„๋‹จํ•œ ์—๋Ÿฌ ๋ฉ”์‹œ์ง€
277
+ return f"Error: Failed to render diagram - {str(render_error).split(';')[0]}"
278
+
279
+ except json.JSONDecodeError as e:
280
+ return "Error: Invalid JSON format"
281
+ except Exception as e:
282
+ # ์—๋Ÿฌ ๋ฉ”์‹œ์ง€๋ฅผ ๊ฐ„๋‹จํ•˜๊ฒŒ ์œ ์ง€
283
+ error_msg = str(e).split('\n')[0] # ์ฒซ ์ค„๋งŒ ์‚ฌ์šฉ
284
+ if len(error_msg) > 100:
285
+ error_msg = error_msg[:100] + "..."
286
+ return f"Error: {error_msg}"