File size: 15,319 Bytes
aa98b19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2de095a
aa98b19
 
2de095a
 
 
 
 
 
 
 
aa98b19
 
2de095a
aa98b19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2de095a
aa98b19
2de095a
aa98b19
 
 
 
2de095a
aa98b19
2de095a
aa98b19
 
 
 
 
2de095a
aa98b19
2de095a
aa98b19
 
 
 
2de095a
aa98b19
 
2de095a
aa98b19
 
 
 
 
 
2de095a
 
aa98b19
 
 
2de095a
aa98b19
2de095a
aa98b19
 
2de095a
 
 
aa98b19
2de095a
aa98b19
 
 
 
 
 
 
 
2de095a
aa98b19
 
 
2de095a
aa98b19
 
 
 
2de095a
aa98b19
2de095a
aa98b19
 
 
 
 
2de095a
aa98b19
 
 
2de095a
aa98b19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2de095a
aa98b19
 
 
2de095a
aa98b19
 
 
 
2de095a
aa98b19
 
 
 
 
 
 
2de095a
aa98b19
 
 
 
 
 
 
 
 
 
 
 
2de095a
aa98b19
 
2de095a
 
 
 
 
 
 
aa98b19
 
2de095a
aa98b19
 
 
 
 
 
 
2de095a
aa98b19
 
 
2de095a
aa98b19
2de095a
aa98b19
 
2de095a
 
 
aa98b19
2de095a
aa98b19
 
 
 
 
 
 
 
2de095a
aa98b19
 
 
2de095a
aa98b19
 
2de095a
aa98b19
 
2de095a
aa98b19
 
 
 
 
 
 
 
 
 
 
2de095a
aa98b19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2de095a
aa98b19
 
 
 
 
 
 
2de095a
aa98b19
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
from typing import Any, Dict, List, Callable, Optional
from langchain_core.messages import BaseMessage
from langchain_core.runnables import RunnableConfig
from langgraph.graph.state import CompiledStateGraph
import uuid


def random_uuid():
    return str(uuid.uuid4())


async def astream_graph(
    graph: CompiledStateGraph,
    inputs: dict,
    config: Optional[RunnableConfig] = None,
    node_names: List[str] = [],
    callback: Optional[Callable] = None,
    stream_mode: str = "messages",
    include_subgraphs: bool = False,
) -> Dict[str, Any]:
    """
    A function that asynchronously streams and directly outputs the execution results of LangGraph.

    Args:
        graph (CompiledStateGraph): The compiled LangGraph object to execute
        inputs (dict): Input dictionary to pass to the graph
        config (Optional[RunnableConfig]): Execution configuration (optional)
        node_names (List[str], optional): List of node names to output. Default is empty list
        callback (Optional[Callable], optional): Callback function for processing each chunk. Default is None
            The callback function receives a dictionary in the form {"node": str, "content": Any}.
        stream_mode (str, optional): Streaming mode ("messages" or "updates"). Default is "messages"
        include_subgraphs (bool, optional): Whether to include subgraphs. Default is False

    Returns:
        Dict[str, Any]: Final result (optional)
    """
    config = config or {}
    final_result = {}

    def format_namespace(namespace):
        return namespace[-1].split(":")[0] if len(namespace) > 0 else "root graph"

    prev_node = ""

    if stream_mode == "messages":
        async for chunk_msg, metadata in graph.astream(
            inputs, config, stream_mode=stream_mode
        ):
            curr_node = metadata["langgraph_node"]
            final_result = {
                "node": curr_node,
                "content": chunk_msg,
                "metadata": metadata,
            }

            # Only process if node_names is empty or current node is in node_names
            if not node_names or curr_node in node_names:
                # Execute callback function if it exists
                if callback:
                    result = callback({"node": curr_node, "content": chunk_msg})
                    if hasattr(result, "__await__"):
                        await result
                # Default output if no callback
                else:
                    # Only output separator when node changes
                    if curr_node != prev_node:
                        print("\n" + "=" * 50)
                        print(f"πŸ”„ Node: \033[1;36m{curr_node}\033[0m πŸ”„")
                        print("- " * 25)

                    # Handle Claude/Anthropic model token chunks - always extract text only
                    if hasattr(chunk_msg, "content"):
                        # List form content (Anthropic/Claude style)
                        if isinstance(chunk_msg.content, list):
                            for item in chunk_msg.content:
                                if isinstance(item, dict) and "text" in item:
                                    print(item["text"], end="", flush=True)
                        # String form content
                        elif isinstance(chunk_msg.content, str):
                            print(chunk_msg.content, end="", flush=True)
                    # Handle other forms of chunk_msg
                    else:
                        print(chunk_msg, end="", flush=True)

                prev_node = curr_node

    elif stream_mode == "updates":
        # Error fix: Change unpacking method
        # Some graphs like REACT agents return only a single dictionary
        async for chunk in graph.astream(
            inputs, config, stream_mode=stream_mode, subgraphs=include_subgraphs
        ):
            # Branch processing method based on return format
            if isinstance(chunk, tuple) and len(chunk) == 2:
                # Expected format: (namespace, chunk_dict)
                namespace, node_chunks = chunk
            else:
                # Case where only single dictionary is returned (REACT agents, etc.)
                namespace = []  # Empty namespace (root graph)
                node_chunks = chunk  # chunk itself is the node chunk dictionary

            # Check if it's a dictionary and process items
            if isinstance(node_chunks, dict):
                for node_name, node_chunk in node_chunks.items():
                    final_result = {
                        "node": node_name,
                        "content": node_chunk,
                        "namespace": namespace,
                    }

                    # Only filter if node_names is not empty
                    if len(node_names) > 0 and node_name not in node_names:
                        continue

                    # Execute callback function if it exists
                    if callback is not None:
                        result = callback({"node": node_name, "content": node_chunk})
                        if hasattr(result, "__await__"):
                            await result
                    # Default output if no callback
                    else:
                        # Only output separator when node changes (same as messages mode)
                        if node_name != prev_node:
                            print("\n" + "=" * 50)
                            print(f"πŸ”„ Node: \033[1;36m{node_name}\033[0m πŸ”„")
                            print("- " * 25)

                        # Output node chunk data - process with text focus
                        if isinstance(node_chunk, dict):
                            for k, v in node_chunk.items():
                                if isinstance(v, BaseMessage):
                                    # Handle cases where BaseMessage's content attribute is text or list
                                    if hasattr(v, "content"):
                                        if isinstance(v.content, list):
                                            for item in v.content:
                                                if (
                                                    isinstance(item, dict)
                                                    and "text" in item
                                                ):
                                                    print(
                                                        item["text"], end="", flush=True
                                                    )
                                        else:
                                            print(v.content, end="", flush=True)
                                    else:
                                        v.pretty_print()
                                elif isinstance(v, list):
                                    for list_item in v:
                                        if isinstance(list_item, BaseMessage):
                                            if hasattr(list_item, "content"):
                                                if isinstance(list_item.content, list):
                                                    for item in list_item.content:
                                                        if (
                                                            isinstance(item, dict)
                                                            and "text" in item
                                                        ):
                                                            print(
                                                                item["text"],
                                                                end="",
                                                                flush=True,
                                                            )
                                                else:
                                                    print(
                                                        list_item.content,
                                                        end="",
                                                        flush=True,
                                                    )
                                            else:
                                                list_item.pretty_print()
                                        elif (
                                            isinstance(list_item, dict)
                                            and "text" in list_item
                                        ):
                                            print(list_item["text"], end="", flush=True)
                                        else:
                                            print(list_item, end="", flush=True)
                                elif isinstance(v, dict) and "text" in v:
                                    print(v["text"], end="", flush=True)
                                else:
                                    print(v, end="", flush=True)
                        elif node_chunk is not None:
                            if hasattr(node_chunk, "__iter__") and not isinstance(
                                node_chunk, str
                            ):
                                for item in node_chunk:
                                    if isinstance(item, dict) and "text" in item:
                                        print(item["text"], end="", flush=True)
                                    else:
                                        print(item, end="", flush=True)
                            else:
                                print(node_chunk, end="", flush=True)

                        # Don't output separator here (same as messages mode)

                    prev_node = node_name
            else:
                # Output entire chunk if not a dictionary
                print("\n" + "=" * 50)
                print(f"πŸ”„ Raw output πŸ”„")
                print("- " * 25)
                print(node_chunks, end="", flush=True)
                # Don't output separator here
                final_result = {"content": node_chunks}

    else:
        raise ValueError(
            f"Invalid stream_mode: {stream_mode}. Must be 'messages' or 'updates'."
        )

    # Return final result as needed
    return final_result


async def ainvoke_graph(
    graph: CompiledStateGraph,
    inputs: dict,
    config: Optional[RunnableConfig] = None,
    node_names: List[str] = [],
    callback: Optional[Callable] = None,
    include_subgraphs: bool = True,
) -> Dict[str, Any]:
    """
    A function that asynchronously streams and outputs the execution results of LangGraph apps.

    Args:
        graph (CompiledStateGraph): The compiled LangGraph object to execute
        inputs (dict): Input dictionary to pass to the graph
        config (Optional[RunnableConfig]): Execution configuration (optional)
        node_names (List[str], optional): List of node names to output. Default is empty list
        callback (Optional[Callable], optional): Callback function for processing each chunk. Default is None
            The callback function receives a dictionary in the form {"node": str, "content": Any}.
        include_subgraphs (bool, optional): Whether to include subgraphs. Default is True

    Returns:
        Dict[str, Any]: Final result (last node's output)
    """
    config = config or {}
    final_result = {}

    def format_namespace(namespace):
        return namespace[-1].split(":")[0] if len(namespace) > 0 else "root graph"

    # Include subgraph output through subgraphs parameter
    async for chunk in graph.astream(
        inputs, config, stream_mode="updates", subgraphs=include_subgraphs
    ):
        # Branch processing method based on return format
        if isinstance(chunk, tuple) and len(chunk) == 2:
            # Expected format: (namespace, chunk_dict)
            namespace, node_chunks = chunk
        else:
            # Case where only single dictionary is returned (REACT agents, etc.)
            namespace = []  # Empty namespace (root graph)
            node_chunks = chunk  # chunk itself is the node chunk dictionary

        # Check if it's a dictionary and process items
        if isinstance(node_chunks, dict):
            for node_name, node_chunk in node_chunks.items():
                final_result = {
                    "node": node_name,
                    "content": node_chunk,
                    "namespace": namespace,
                }

                # Only filter if node_names is not empty
                if node_names and node_name not in node_names:
                    continue

                # Execute callback function if it exists
                if callback is not None:
                    result = callback({"node": node_name, "content": node_chunk})
                    # Await if it's a coroutine
                    if hasattr(result, "__await__"):
                        await result
                # Default output if no callback
                else:
                    print("\n" + "=" * 50)
                    formatted_namespace = format_namespace(namespace)
                    if formatted_namespace == "root graph":
                        print(f"πŸ”„ Node: \033[1;36m{node_name}\033[0m πŸ”„")
                    else:
                        print(
                            f"πŸ”„ Node: \033[1;36m{node_name}\033[0m in [\033[1;33m{formatted_namespace}\033[0m] πŸ”„"
                        )
                    print("- " * 25)

                    # Output node chunk data
                    if isinstance(node_chunk, dict):
                        for k, v in node_chunk.items():
                            if isinstance(v, BaseMessage):
                                v.pretty_print()
                            elif isinstance(v, list):
                                for list_item in v:
                                    if isinstance(list_item, BaseMessage):
                                        list_item.pretty_print()
                                    else:
                                        print(list_item)
                            elif isinstance(v, dict):
                                for node_chunk_key, node_chunk_value in v.items():
                                    print(f"{node_chunk_key}:\n{node_chunk_value}")
                            else:
                                print(f"\033[1;32m{k}\033[0m:\n{v}")
                    elif node_chunk is not None:
                        if hasattr(node_chunk, "__iter__") and not isinstance(
                            node_chunk, str
                        ):
                            for item in node_chunk:
                                print(item)
                        else:
                            print(node_chunk)
                    print("=" * 50)
        else:
            # Output entire chunk if not a dictionary
            print("\n" + "=" * 50)
            print(f"πŸ”„ Raw output πŸ”„")
            print("- " * 25)
            print(node_chunks)
            print("=" * 50)
            final_result = {"content": node_chunks}

    # Return final result
    return final_result