applied-ai-018 commited on
Commit
0a85c08
·
verified ·
1 Parent(s): af75000

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/asteroidal.py +170 -0
  2. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/broadcasting.py +155 -0
  3. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/chains.py +172 -0
  4. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/cycles.py +1231 -0
  5. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/d_separation.py +722 -0
  6. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/distance_measures.py +951 -0
  7. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/distance_regular.py +238 -0
  8. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/euler.py +469 -0
  9. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/__init__.py +11 -0
  10. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/capacityscaling.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/mincost.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/preflowpush.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/utils.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/boykovkolmogorov.py +369 -0
  16. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/capacityscaling.py +407 -0
  17. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/dinitz_alg.py +237 -0
  18. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/edmondskarp.py +241 -0
  19. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/gomory_hu.py +177 -0
  20. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/maxflow.py +601 -0
  21. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/mincost.py +356 -0
  22. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/networksimplex.py +666 -0
  23. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/preflowpush.py +425 -0
  24. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py +300 -0
  25. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py +128 -0
  26. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_maxflow.py +560 -0
  27. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py +157 -0
  28. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_mincost.py +476 -0
  29. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py +387 -0
  30. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/utils.py +189 -0
  31. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/graph_hashing.py +322 -0
  32. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/graphical.py +483 -0
  33. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/hybrid.py +195 -0
  34. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/node_classification.py +218 -0
  35. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/planarity.py +1402 -0
  36. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/polynomials.py +305 -0
  37. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/richclub.py +138 -0
  38. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/__pycache__/astar.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/__pycache__/dense.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/__pycache__/generic.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/similarity.py +1777 -0
  48. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/simple_paths.py +937 -0
  49. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/structuralholes.py +283 -0
  50. env-llmeval/lib/python3.10/site-packages/networkx/algorithms/threshold.py +979 -0
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/asteroidal.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Algorithms for asteroidal triples and asteroidal numbers in graphs.
3
+
4
+ An asteroidal triple in a graph G is a set of three non-adjacent vertices
5
+ u, v and w such that there exist a path between any two of them that avoids
6
+ closed neighborhood of the third. More formally, v_j, v_k belongs to the same
7
+ connected component of G - N[v_i], where N[v_i] denotes the closed neighborhood
8
+ of v_i. A graph which does not contain any asteroidal triples is called
9
+ an AT-free graph. The class of AT-free graphs is a graph class for which
10
+ many NP-complete problems are solvable in polynomial time. Amongst them,
11
+ independent set and coloring.
12
+ """
13
+ import networkx as nx
14
+ from networkx.utils import not_implemented_for
15
+
16
+ __all__ = ["is_at_free", "find_asteroidal_triple"]
17
+
18
+
19
+ @not_implemented_for("directed")
20
+ @not_implemented_for("multigraph")
21
+ @nx._dispatchable
22
+ def find_asteroidal_triple(G):
23
+ r"""Find an asteroidal triple in the given graph.
24
+
25
+ An asteroidal triple is a triple of non-adjacent vertices such that
26
+ there exists a path between any two of them which avoids the closed
27
+ neighborhood of the third. It checks all independent triples of vertices
28
+ and whether they are an asteroidal triple or not. This is done with the
29
+ help of a data structure called a component structure.
30
+ A component structure encodes information about which vertices belongs to
31
+ the same connected component when the closed neighborhood of a given vertex
32
+ is removed from the graph. The algorithm used to check is the trivial
33
+ one, outlined in [1]_, which has a runtime of
34
+ :math:`O(|V||\overline{E} + |V||E|)`, where the second term is the
35
+ creation of the component structure.
36
+
37
+ Parameters
38
+ ----------
39
+ G : NetworkX Graph
40
+ The graph to check whether is AT-free or not
41
+
42
+ Returns
43
+ -------
44
+ list or None
45
+ An asteroidal triple is returned as a list of nodes. If no asteroidal
46
+ triple exists, i.e. the graph is AT-free, then None is returned.
47
+ The returned value depends on the certificate parameter. The default
48
+ option is a bool which is True if the graph is AT-free, i.e. the
49
+ given graph contains no asteroidal triples, and False otherwise, i.e.
50
+ if the graph contains at least one asteroidal triple.
51
+
52
+ Notes
53
+ -----
54
+ The component structure and the algorithm is described in [1]_. The current
55
+ implementation implements the trivial algorithm for simple graphs.
56
+
57
+ References
58
+ ----------
59
+ .. [1] Ekkehard Köhler,
60
+ "Recognizing Graphs without asteroidal triples",
61
+ Journal of Discrete Algorithms 2, pages 439-452, 2004.
62
+ https://www.sciencedirect.com/science/article/pii/S157086670400019X
63
+ """
64
+ V = set(G.nodes)
65
+
66
+ if len(V) < 6:
67
+ # An asteroidal triple cannot exist in a graph with 5 or less vertices.
68
+ return None
69
+
70
+ component_structure = create_component_structure(G)
71
+ E_complement = set(nx.complement(G).edges)
72
+
73
+ for e in E_complement:
74
+ u = e[0]
75
+ v = e[1]
76
+ u_neighborhood = set(G[u]).union([u])
77
+ v_neighborhood = set(G[v]).union([v])
78
+ union_of_neighborhoods = u_neighborhood.union(v_neighborhood)
79
+ for w in V - union_of_neighborhoods:
80
+ # Check for each pair of vertices whether they belong to the
81
+ # same connected component when the closed neighborhood of the
82
+ # third is removed.
83
+ if (
84
+ component_structure[u][v] == component_structure[u][w]
85
+ and component_structure[v][u] == component_structure[v][w]
86
+ and component_structure[w][u] == component_structure[w][v]
87
+ ):
88
+ return [u, v, w]
89
+ return None
90
+
91
+
92
+ @not_implemented_for("directed")
93
+ @not_implemented_for("multigraph")
94
+ @nx._dispatchable
95
+ def is_at_free(G):
96
+ """Check if a graph is AT-free.
97
+
98
+ The method uses the `find_asteroidal_triple` method to recognize
99
+ an AT-free graph. If no asteroidal triple is found the graph is
100
+ AT-free and True is returned. If at least one asteroidal triple is
101
+ found the graph is not AT-free and False is returned.
102
+
103
+ Parameters
104
+ ----------
105
+ G : NetworkX Graph
106
+ The graph to check whether is AT-free or not.
107
+
108
+ Returns
109
+ -------
110
+ bool
111
+ True if G is AT-free and False otherwise.
112
+
113
+ Examples
114
+ --------
115
+ >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)])
116
+ >>> nx.is_at_free(G)
117
+ True
118
+
119
+ >>> G = nx.cycle_graph(6)
120
+ >>> nx.is_at_free(G)
121
+ False
122
+ """
123
+ return find_asteroidal_triple(G) is None
124
+
125
+
126
+ @not_implemented_for("directed")
127
+ @not_implemented_for("multigraph")
128
+ @nx._dispatchable
129
+ def create_component_structure(G):
130
+ r"""Create component structure for G.
131
+
132
+ A *component structure* is an `nxn` array, denoted `c`, where `n` is
133
+ the number of vertices, where each row and column corresponds to a vertex.
134
+
135
+ .. math::
136
+ c_{uv} = \begin{cases} 0, if v \in N[u] \\
137
+ k, if v \in component k of G \setminus N[u] \end{cases}
138
+
139
+ Where `k` is an arbitrary label for each component. The structure is used
140
+ to simplify the detection of asteroidal triples.
141
+
142
+ Parameters
143
+ ----------
144
+ G : NetworkX Graph
145
+ Undirected, simple graph.
146
+
147
+ Returns
148
+ -------
149
+ component_structure : dictionary
150
+ A dictionary of dictionaries, keyed by pairs of vertices.
151
+
152
+ """
153
+ V = set(G.nodes)
154
+ component_structure = {}
155
+ for v in V:
156
+ label = 0
157
+ closed_neighborhood = set(G[v]).union({v})
158
+ row_dict = {}
159
+ for u in closed_neighborhood:
160
+ row_dict[u] = 0
161
+
162
+ G_reduced = G.subgraph(set(G.nodes) - closed_neighborhood)
163
+ for cc in nx.connected_components(G_reduced):
164
+ label += 1
165
+ for u in cc:
166
+ row_dict[u] = label
167
+
168
+ component_structure[v] = row_dict
169
+
170
+ return component_structure
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/broadcasting.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Routines to calculate the broadcast time of certain graphs.
2
+
3
+ Broadcasting is an information dissemination problem in which a node in a graph,
4
+ called the originator, must distribute a message to all other nodes by placing
5
+ a series of calls along the edges of the graph. Once informed, other nodes aid
6
+ the originator in distributing the message.
7
+
8
+ The broadcasting must be completed as quickly as possible subject to the
9
+ following constraints:
10
+ - Each call requires one unit of time.
11
+ - A node can only participate in one call per unit of time.
12
+ - Each call only involves two adjacent nodes: a sender and a receiver.
13
+ """
14
+
15
+ import networkx as nx
16
+ from networkx import NetworkXError
17
+ from networkx.utils import not_implemented_for
18
+
19
+ __all__ = [
20
+ "tree_broadcast_center",
21
+ "tree_broadcast_time",
22
+ ]
23
+
24
+
25
+ def _get_max_broadcast_value(G, U, v, values):
26
+ adj = sorted(set(G.neighbors(v)) & U, key=values.get, reverse=True)
27
+ return max(values[u] + i for i, u in enumerate(adj, start=1))
28
+
29
+
30
+ def _get_broadcast_centers(G, v, values, target):
31
+ adj = sorted(G.neighbors(v), key=values.get, reverse=True)
32
+ j = next(i for i, u in enumerate(adj, start=1) if values[u] + i == target)
33
+ return set([v] + adj[:j])
34
+
35
+
36
+ @not_implemented_for("directed")
37
+ @not_implemented_for("multigraph")
38
+ @nx._dispatchable
39
+ def tree_broadcast_center(G):
40
+ """Return the Broadcast Center of the tree `G`.
41
+
42
+ The broadcast center of a graph G denotes the set of nodes having
43
+ minimum broadcast time [1]_. This is a linear algorithm for determining
44
+ the broadcast center of a tree with ``N`` nodes, as a by-product it also
45
+ determines the broadcast time from the broadcast center.
46
+
47
+ Parameters
48
+ ----------
49
+ G : undirected graph
50
+ The graph should be an undirected tree
51
+
52
+ Returns
53
+ -------
54
+ BC : (int, set) tuple
55
+ minimum broadcast number of the tree, set of broadcast centers
56
+
57
+ Raises
58
+ ------
59
+ NetworkXNotImplemented
60
+ If the graph is directed or is a multigraph.
61
+
62
+ References
63
+ ----------
64
+ .. [1] Slater, P.J., Cockayne, E.J., Hedetniemi, S.T,
65
+ Information dissemination in trees. SIAM J.Comput. 10(4), 692–701 (1981)
66
+ """
67
+ # Assert that the graph G is a tree
68
+ if not nx.is_tree(G):
69
+ NetworkXError("Input graph is not a tree")
70
+ # step 0
71
+ if G.number_of_nodes() == 2:
72
+ return 1, set(G.nodes())
73
+ if G.number_of_nodes() == 1:
74
+ return 0, set(G.nodes())
75
+
76
+ # step 1
77
+ U = {node for node, deg in G.degree if deg == 1}
78
+ values = {n: 0 for n in U}
79
+ T = G.copy()
80
+ T.remove_nodes_from(U)
81
+
82
+ # step 2
83
+ W = {node for node, deg in T.degree if deg == 1}
84
+ values.update((w, G.degree[w] - 1) for w in W)
85
+
86
+ # step 3
87
+ while T.number_of_nodes() >= 2:
88
+ # step 4
89
+ w = min(W, key=lambda n: values[n])
90
+ v = next(T.neighbors(w))
91
+
92
+ # step 5
93
+ U.add(w)
94
+ W.remove(w)
95
+ T.remove_node(w)
96
+
97
+ # step 6
98
+ if T.degree(v) == 1:
99
+ # update t(v)
100
+ values.update({v: _get_max_broadcast_value(G, U, v, values)})
101
+ W.add(v)
102
+
103
+ # step 7
104
+ v = nx.utils.arbitrary_element(T)
105
+ b_T = _get_max_broadcast_value(G, U, v, values)
106
+ return b_T, _get_broadcast_centers(G, v, values, b_T)
107
+
108
+
109
+ @not_implemented_for("directed")
110
+ @not_implemented_for("multigraph")
111
+ @nx._dispatchable
112
+ def tree_broadcast_time(G, node=None):
113
+ """Return the Broadcast Time of the tree `G`.
114
+
115
+ The minimum broadcast time of a node is defined as the minimum amount
116
+ of time required to complete broadcasting starting from the
117
+ originator. The broadcast time of a graph is the maximum over
118
+ all nodes of the minimum broadcast time from that node [1]_.
119
+ This function returns the minimum broadcast time of `node`.
120
+ If `node` is None the broadcast time for the graph is returned.
121
+
122
+ Parameters
123
+ ----------
124
+ G : undirected graph
125
+ The graph should be an undirected tree
126
+ node: int, optional
127
+ index of starting node. If `None`, the algorithm returns the broadcast
128
+ time of the tree.
129
+
130
+ Returns
131
+ -------
132
+ BT : int
133
+ Broadcast Time of a node in a tree
134
+
135
+ Raises
136
+ ------
137
+ NetworkXNotImplemented
138
+ If the graph is directed or is a multigraph.
139
+
140
+ References
141
+ ----------
142
+ .. [1] Harutyunyan, H. A. and Li, Z.
143
+ "A Simple Construction of Broadcast Graphs."
144
+ In Computing and Combinatorics. COCOON 2019
145
+ (Ed. D. Z. Du and C. Tian.) Springer, pp. 240-253, 2019.
146
+ """
147
+ b_T, b_C = tree_broadcast_center(G)
148
+ if node is not None:
149
+ return b_T + min(nx.shortest_path_length(G, node, u) for u in b_C)
150
+ dist_from_center = dict.fromkeys(G, len(G))
151
+ for u in b_C:
152
+ for v, dist in nx.shortest_path_length(G, u).items():
153
+ if dist < dist_from_center[v]:
154
+ dist_from_center[v] = dist
155
+ return b_T + max(dist_from_center.values())
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/chains.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions for finding chains in a graph."""
2
+
3
+ import networkx as nx
4
+ from networkx.utils import not_implemented_for
5
+
6
+ __all__ = ["chain_decomposition"]
7
+
8
+
9
+ @not_implemented_for("directed")
10
+ @not_implemented_for("multigraph")
11
+ @nx._dispatchable
12
+ def chain_decomposition(G, root=None):
13
+ """Returns the chain decomposition of a graph.
14
+
15
+ The *chain decomposition* of a graph with respect a depth-first
16
+ search tree is a set of cycles or paths derived from the set of
17
+ fundamental cycles of the tree in the following manner. Consider
18
+ each fundamental cycle with respect to the given tree, represented
19
+ as a list of edges beginning with the nontree edge oriented away
20
+ from the root of the tree. For each fundamental cycle, if it
21
+ overlaps with any previous fundamental cycle, just take the initial
22
+ non-overlapping segment, which is a path instead of a cycle. Each
23
+ cycle or path is called a *chain*. For more information, see [1]_.
24
+
25
+ Parameters
26
+ ----------
27
+ G : undirected graph
28
+
29
+ root : node (optional)
30
+ A node in the graph `G`. If specified, only the chain
31
+ decomposition for the connected component containing this node
32
+ will be returned. This node indicates the root of the depth-first
33
+ search tree.
34
+
35
+ Yields
36
+ ------
37
+ chain : list
38
+ A list of edges representing a chain. There is no guarantee on
39
+ the orientation of the edges in each chain (for example, if a
40
+ chain includes the edge joining nodes 1 and 2, the chain may
41
+ include either (1, 2) or (2, 1)).
42
+
43
+ Raises
44
+ ------
45
+ NodeNotFound
46
+ If `root` is not in the graph `G`.
47
+
48
+ Examples
49
+ --------
50
+ >>> G = nx.Graph([(0, 1), (1, 4), (3, 4), (3, 5), (4, 5)])
51
+ >>> list(nx.chain_decomposition(G))
52
+ [[(4, 5), (5, 3), (3, 4)]]
53
+
54
+ Notes
55
+ -----
56
+ The worst-case running time of this implementation is linear in the
57
+ number of nodes and number of edges [1]_.
58
+
59
+ References
60
+ ----------
61
+ .. [1] Jens M. Schmidt (2013). "A simple test on 2-vertex-
62
+ and 2-edge-connectivity." *Information Processing Letters*,
63
+ 113, 241–244. Elsevier. <https://doi.org/10.1016/j.ipl.2013.01.016>
64
+
65
+ """
66
+
67
+ def _dfs_cycle_forest(G, root=None):
68
+ """Builds a directed graph composed of cycles from the given graph.
69
+
70
+ `G` is an undirected simple graph. `root` is a node in the graph
71
+ from which the depth-first search is started.
72
+
73
+ This function returns both the depth-first search cycle graph
74
+ (as a :class:`~networkx.DiGraph`) and the list of nodes in
75
+ depth-first preorder. The depth-first search cycle graph is a
76
+ directed graph whose edges are the edges of `G` oriented toward
77
+ the root if the edge is a tree edge and away from the root if
78
+ the edge is a non-tree edge. If `root` is not specified, this
79
+ performs a depth-first search on each connected component of `G`
80
+ and returns a directed forest instead.
81
+
82
+ If `root` is not in the graph, this raises :exc:`KeyError`.
83
+
84
+ """
85
+ # Create a directed graph from the depth-first search tree with
86
+ # root node `root` in which tree edges are directed toward the
87
+ # root and nontree edges are directed away from the root. For
88
+ # each node with an incident nontree edge, this creates a
89
+ # directed cycle starting with the nontree edge and returning to
90
+ # that node.
91
+ #
92
+ # The `parent` node attribute stores the parent of each node in
93
+ # the DFS tree. The `nontree` edge attribute indicates whether
94
+ # the edge is a tree edge or a nontree edge.
95
+ #
96
+ # We also store the order of the nodes found in the depth-first
97
+ # search in the `nodes` list.
98
+ H = nx.DiGraph()
99
+ nodes = []
100
+ for u, v, d in nx.dfs_labeled_edges(G, source=root):
101
+ if d == "forward":
102
+ # `dfs_labeled_edges()` yields (root, root, 'forward')
103
+ # if it is beginning the search on a new connected
104
+ # component.
105
+ if u == v:
106
+ H.add_node(v, parent=None)
107
+ nodes.append(v)
108
+ else:
109
+ H.add_node(v, parent=u)
110
+ H.add_edge(v, u, nontree=False)
111
+ nodes.append(v)
112
+ # `dfs_labeled_edges` considers nontree edges in both
113
+ # orientations, so we need to not add the edge if it its
114
+ # other orientation has been added.
115
+ elif d == "nontree" and v not in H[u]:
116
+ H.add_edge(v, u, nontree=True)
117
+ else:
118
+ # Do nothing on 'reverse' edges; we only care about
119
+ # forward and nontree edges.
120
+ pass
121
+ return H, nodes
122
+
123
+ def _build_chain(G, u, v, visited):
124
+ """Generate the chain starting from the given nontree edge.
125
+
126
+ `G` is a DFS cycle graph as constructed by
127
+ :func:`_dfs_cycle_graph`. The edge (`u`, `v`) is a nontree edge
128
+ that begins a chain. `visited` is a set representing the nodes
129
+ in `G` that have already been visited.
130
+
131
+ This function yields the edges in an initial segment of the
132
+ fundamental cycle of `G` starting with the nontree edge (`u`,
133
+ `v`) that includes all the edges up until the first node that
134
+ appears in `visited`. The tree edges are given by the 'parent'
135
+ node attribute. The `visited` set is updated to add each node in
136
+ an edge yielded by this function.
137
+
138
+ """
139
+ while v not in visited:
140
+ yield u, v
141
+ visited.add(v)
142
+ u, v = v, G.nodes[v]["parent"]
143
+ yield u, v
144
+
145
+ # Check if the root is in the graph G. If not, raise NodeNotFound
146
+ if root is not None and root not in G:
147
+ raise nx.NodeNotFound(f"Root node {root} is not in graph")
148
+
149
+ # Create a directed version of H that has the DFS edges directed
150
+ # toward the root and the nontree edges directed away from the root
151
+ # (in each connected component).
152
+ H, nodes = _dfs_cycle_forest(G, root)
153
+
154
+ # Visit the nodes again in DFS order. For each node, and for each
155
+ # nontree edge leaving that node, compute the fundamental cycle for
156
+ # that nontree edge starting with that edge. If the fundamental
157
+ # cycle overlaps with any visited nodes, just take the prefix of the
158
+ # cycle up to the point of visited nodes.
159
+ #
160
+ # We repeat this process for each connected component (implicitly,
161
+ # since `nodes` already has a list of the nodes grouped by connected
162
+ # component).
163
+ visited = set()
164
+ for u in nodes:
165
+ visited.add(u)
166
+ # For each nontree edge going out of node u...
167
+ edges = ((u, v) for u, v, d in H.out_edges(u, data="nontree") if d)
168
+ for u, v in edges:
169
+ # Create the cycle or cycle prefix starting with the
170
+ # nontree edge.
171
+ chain = list(_build_chain(H, u, v, visited))
172
+ yield chain
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/cycles.py ADDED
@@ -0,0 +1,1231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ========================
3
+ Cycle finding algorithms
4
+ ========================
5
+ """
6
+
7
+ from collections import Counter, defaultdict
8
+ from itertools import combinations, product
9
+ from math import inf
10
+
11
+ import networkx as nx
12
+ from networkx.utils import not_implemented_for, pairwise
13
+
14
+ __all__ = [
15
+ "cycle_basis",
16
+ "simple_cycles",
17
+ "recursive_simple_cycles",
18
+ "find_cycle",
19
+ "minimum_cycle_basis",
20
+ "chordless_cycles",
21
+ "girth",
22
+ ]
23
+
24
+
25
+ @not_implemented_for("directed")
26
+ @not_implemented_for("multigraph")
27
+ @nx._dispatchable
28
+ def cycle_basis(G, root=None):
29
+ """Returns a list of cycles which form a basis for cycles of G.
30
+
31
+ A basis for cycles of a network is a minimal collection of
32
+ cycles such that any cycle in the network can be written
33
+ as a sum of cycles in the basis. Here summation of cycles
34
+ is defined as "exclusive or" of the edges. Cycle bases are
35
+ useful, e.g. when deriving equations for electric circuits
36
+ using Kirchhoff's Laws.
37
+
38
+ Parameters
39
+ ----------
40
+ G : NetworkX Graph
41
+ root : node, optional
42
+ Specify starting node for basis.
43
+
44
+ Returns
45
+ -------
46
+ A list of cycle lists. Each cycle list is a list of nodes
47
+ which forms a cycle (loop) in G.
48
+
49
+ Examples
50
+ --------
51
+ >>> G = nx.Graph()
52
+ >>> nx.add_cycle(G, [0, 1, 2, 3])
53
+ >>> nx.add_cycle(G, [0, 3, 4, 5])
54
+ >>> nx.cycle_basis(G, 0)
55
+ [[3, 4, 5, 0], [1, 2, 3, 0]]
56
+
57
+ Notes
58
+ -----
59
+ This is adapted from algorithm CACM 491 [1]_.
60
+
61
+ References
62
+ ----------
63
+ .. [1] Paton, K. An algorithm for finding a fundamental set of
64
+ cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518.
65
+
66
+ See Also
67
+ --------
68
+ simple_cycles
69
+ minimum_cycle_basis
70
+ """
71
+ gnodes = dict.fromkeys(G) # set-like object that maintains node order
72
+ cycles = []
73
+ while gnodes: # loop over connected components
74
+ if root is None:
75
+ root = gnodes.popitem()[0]
76
+ stack = [root]
77
+ pred = {root: root}
78
+ used = {root: set()}
79
+ while stack: # walk the spanning tree finding cycles
80
+ z = stack.pop() # use last-in so cycles easier to find
81
+ zused = used[z]
82
+ for nbr in G[z]:
83
+ if nbr not in used: # new node
84
+ pred[nbr] = z
85
+ stack.append(nbr)
86
+ used[nbr] = {z}
87
+ elif nbr == z: # self loops
88
+ cycles.append([z])
89
+ elif nbr not in zused: # found a cycle
90
+ pn = used[nbr]
91
+ cycle = [nbr, z]
92
+ p = pred[z]
93
+ while p not in pn:
94
+ cycle.append(p)
95
+ p = pred[p]
96
+ cycle.append(p)
97
+ cycles.append(cycle)
98
+ used[nbr].add(z)
99
+ for node in pred:
100
+ gnodes.pop(node, None)
101
+ root = None
102
+ return cycles
103
+
104
+
105
+ @nx._dispatchable
106
+ def simple_cycles(G, length_bound=None):
107
+ """Find simple cycles (elementary circuits) of a graph.
108
+
109
+ A `simple cycle`, or `elementary circuit`, is a closed path where
110
+ no node appears twice. In a directed graph, two simple cycles are distinct
111
+ if they are not cyclic permutations of each other. In an undirected graph,
112
+ two simple cycles are distinct if they are not cyclic permutations of each
113
+ other nor of the other's reversal.
114
+
115
+ Optionally, the cycles are bounded in length. In the unbounded case, we use
116
+ a nonrecursive, iterator/generator version of Johnson's algorithm [1]_. In
117
+ the bounded case, we use a version of the algorithm of Gupta and
118
+ Suzumura[2]_. There may be better algorithms for some cases [3]_ [4]_ [5]_.
119
+
120
+ The algorithms of Johnson, and Gupta and Suzumura, are enhanced by some
121
+ well-known preprocessing techniques. When G is directed, we restrict our
122
+ attention to strongly connected components of G, generate all simple cycles
123
+ containing a certain node, remove that node, and further decompose the
124
+ remainder into strongly connected components. When G is undirected, we
125
+ restrict our attention to biconnected components, generate all simple cycles
126
+ containing a particular edge, remove that edge, and further decompose the
127
+ remainder into biconnected components.
128
+
129
+ Note that multigraphs are supported by this function -- and in undirected
130
+ multigraphs, a pair of parallel edges is considered a cycle of length 2.
131
+ Likewise, self-loops are considered to be cycles of length 1. We define
132
+ cycles as sequences of nodes; so the presence of loops and parallel edges
133
+ does not change the number of simple cycles in a graph.
134
+
135
+ Parameters
136
+ ----------
137
+ G : NetworkX DiGraph
138
+ A directed graph
139
+
140
+ length_bound : int or None, optional (default=None)
141
+ If length_bound is an int, generate all simple cycles of G with length at
142
+ most length_bound. Otherwise, generate all simple cycles of G.
143
+
144
+ Yields
145
+ ------
146
+ list of nodes
147
+ Each cycle is represented by a list of nodes along the cycle.
148
+
149
+ Examples
150
+ --------
151
+ >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]
152
+ >>> G = nx.DiGraph(edges)
153
+ >>> sorted(nx.simple_cycles(G))
154
+ [[0], [0, 1, 2], [0, 2], [1, 2], [2]]
155
+
156
+ To filter the cycles so that they don't include certain nodes or edges,
157
+ copy your graph and eliminate those nodes or edges before calling.
158
+ For example, to exclude self-loops from the above example:
159
+
160
+ >>> H = G.copy()
161
+ >>> H.remove_edges_from(nx.selfloop_edges(G))
162
+ >>> sorted(nx.simple_cycles(H))
163
+ [[0, 1, 2], [0, 2], [1, 2]]
164
+
165
+ Notes
166
+ -----
167
+ When length_bound is None, the time complexity is $O((n+e)(c+1))$ for $n$
168
+ nodes, $e$ edges and $c$ simple circuits. Otherwise, when length_bound > 1,
169
+ the time complexity is $O((c+n)(k-1)d^k)$ where $d$ is the average degree of
170
+ the nodes of G and $k$ = length_bound.
171
+
172
+ Raises
173
+ ------
174
+ ValueError
175
+ when length_bound < 0.
176
+
177
+ References
178
+ ----------
179
+ .. [1] Finding all the elementary circuits of a directed graph.
180
+ D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
181
+ https://doi.org/10.1137/0204007
182
+ .. [2] Finding All Bounded-Length Simple Cycles in a Directed Graph
183
+ A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094
184
+ .. [3] Enumerating the cycles of a digraph: a new preprocessing strategy.
185
+ G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982.
186
+ .. [4] A search strategy for the elementary cycles of a directed graph.
187
+ J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS,
188
+ v. 16, no. 2, 192-204, 1976.
189
+ .. [5] Optimal Listing of Cycles and st-Paths in Undirected Graphs
190
+ R. Ferreira and R. Grossi and A. Marino and N. Pisanti and R. Rizzi and
191
+ G. Sacomoto https://arxiv.org/abs/1205.2766
192
+
193
+ See Also
194
+ --------
195
+ cycle_basis
196
+ chordless_cycles
197
+ """
198
+
199
+ if length_bound is not None:
200
+ if length_bound == 0:
201
+ return
202
+ elif length_bound < 0:
203
+ raise ValueError("length bound must be non-negative")
204
+
205
+ directed = G.is_directed()
206
+ yield from ([v] for v, Gv in G.adj.items() if v in Gv)
207
+
208
+ if length_bound is not None and length_bound == 1:
209
+ return
210
+
211
+ if G.is_multigraph() and not directed:
212
+ visited = set()
213
+ for u, Gu in G.adj.items():
214
+ multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited)
215
+ yield from ([u, v] for v, m in multiplicity if m > 1)
216
+ visited.add(u)
217
+
218
+ # explicitly filter out loops; implicitly filter out parallel edges
219
+ if directed:
220
+ G = nx.DiGraph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u)
221
+ else:
222
+ G = nx.Graph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u)
223
+
224
+ # this case is not strictly necessary but improves performance
225
+ if length_bound is not None and length_bound == 2:
226
+ if directed:
227
+ visited = set()
228
+ for u, Gu in G.adj.items():
229
+ yield from (
230
+ [v, u] for v in visited.intersection(Gu) if G.has_edge(v, u)
231
+ )
232
+ visited.add(u)
233
+ return
234
+
235
+ if directed:
236
+ yield from _directed_cycle_search(G, length_bound)
237
+ else:
238
+ yield from _undirected_cycle_search(G, length_bound)
239
+
240
+
241
+ def _directed_cycle_search(G, length_bound):
242
+ """A dispatch function for `simple_cycles` for directed graphs.
243
+
244
+ We generate all cycles of G through binary partition.
245
+
246
+ 1. Pick a node v in G which belongs to at least one cycle
247
+ a. Generate all cycles of G which contain the node v.
248
+ b. Recursively generate all cycles of G \\ v.
249
+
250
+ This is accomplished through the following:
251
+
252
+ 1. Compute the strongly connected components SCC of G.
253
+ 2. Select and remove a biconnected component C from BCC. Select a
254
+ non-tree edge (u, v) of a depth-first search of G[C].
255
+ 3. For each simple cycle P containing v in G[C], yield P.
256
+ 4. Add the biconnected components of G[C \\ v] to BCC.
257
+
258
+ If the parameter length_bound is not None, then step 3 will be limited to
259
+ simple cycles of length at most length_bound.
260
+
261
+ Parameters
262
+ ----------
263
+ G : NetworkX DiGraph
264
+ A directed graph
265
+
266
+ length_bound : int or None
267
+ If length_bound is an int, generate all simple cycles of G with length at most length_bound.
268
+ Otherwise, generate all simple cycles of G.
269
+
270
+ Yields
271
+ ------
272
+ list of nodes
273
+ Each cycle is represented by a list of nodes along the cycle.
274
+ """
275
+
276
+ scc = nx.strongly_connected_components
277
+ components = [c for c in scc(G) if len(c) >= 2]
278
+ while components:
279
+ c = components.pop()
280
+ Gc = G.subgraph(c)
281
+ v = next(iter(c))
282
+ if length_bound is None:
283
+ yield from _johnson_cycle_search(Gc, [v])
284
+ else:
285
+ yield from _bounded_cycle_search(Gc, [v], length_bound)
286
+ # delete v after searching G, to make sure we can find v
287
+ G.remove_node(v)
288
+ components.extend(c for c in scc(Gc) if len(c) >= 2)
289
+
290
+
291
+ def _undirected_cycle_search(G, length_bound):
292
+ """A dispatch function for `simple_cycles` for undirected graphs.
293
+
294
+ We generate all cycles of G through binary partition.
295
+
296
+ 1. Pick an edge (u, v) in G which belongs to at least one cycle
297
+ a. Generate all cycles of G which contain the edge (u, v)
298
+ b. Recursively generate all cycles of G \\ (u, v)
299
+
300
+ This is accomplished through the following:
301
+
302
+ 1. Compute the biconnected components BCC of G.
303
+ 2. Select and remove a biconnected component C from BCC. Select a
304
+ non-tree edge (u, v) of a depth-first search of G[C].
305
+ 3. For each (v -> u) path P remaining in G[C] \\ (u, v), yield P.
306
+ 4. Add the biconnected components of G[C] \\ (u, v) to BCC.
307
+
308
+ If the parameter length_bound is not None, then step 3 will be limited to simple paths
309
+ of length at most length_bound.
310
+
311
+ Parameters
312
+ ----------
313
+ G : NetworkX Graph
314
+ An undirected graph
315
+
316
+ length_bound : int or None
317
+ If length_bound is an int, generate all simple cycles of G with length at most length_bound.
318
+ Otherwise, generate all simple cycles of G.
319
+
320
+ Yields
321
+ ------
322
+ list of nodes
323
+ Each cycle is represented by a list of nodes along the cycle.
324
+ """
325
+
326
+ bcc = nx.biconnected_components
327
+ components = [c for c in bcc(G) if len(c) >= 3]
328
+ while components:
329
+ c = components.pop()
330
+ Gc = G.subgraph(c)
331
+ uv = list(next(iter(Gc.edges)))
332
+ G.remove_edge(*uv)
333
+ # delete (u, v) before searching G, to avoid fake 3-cycles [u, v, u]
334
+ if length_bound is None:
335
+ yield from _johnson_cycle_search(Gc, uv)
336
+ else:
337
+ yield from _bounded_cycle_search(Gc, uv, length_bound)
338
+ components.extend(c for c in bcc(Gc) if len(c) >= 3)
339
+
340
+
341
+ class _NeighborhoodCache(dict):
342
+ """Very lightweight graph wrapper which caches neighborhoods as list.
343
+
344
+ This dict subclass uses the __missing__ functionality to query graphs for
345
+ their neighborhoods, and store the result as a list. This is used to avoid
346
+ the performance penalty incurred by subgraph views.
347
+ """
348
+
349
+ def __init__(self, G):
350
+ self.G = G
351
+
352
+ def __missing__(self, v):
353
+ Gv = self[v] = list(self.G[v])
354
+ return Gv
355
+
356
+
357
+ def _johnson_cycle_search(G, path):
358
+ """The main loop of the cycle-enumeration algorithm of Johnson.
359
+
360
+ Parameters
361
+ ----------
362
+ G : NetworkX Graph or DiGraph
363
+ A graph
364
+
365
+ path : list
366
+ A cycle prefix. All cycles generated will begin with this prefix.
367
+
368
+ Yields
369
+ ------
370
+ list of nodes
371
+ Each cycle is represented by a list of nodes along the cycle.
372
+
373
+ References
374
+ ----------
375
+ .. [1] Finding all the elementary circuits of a directed graph.
376
+ D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
377
+ https://doi.org/10.1137/0204007
378
+
379
+ """
380
+
381
+ G = _NeighborhoodCache(G)
382
+ blocked = set(path)
383
+ B = defaultdict(set) # graph portions that yield no elementary circuit
384
+ start = path[0]
385
+ stack = [iter(G[path[-1]])]
386
+ closed = [False]
387
+ while stack:
388
+ nbrs = stack[-1]
389
+ for w in nbrs:
390
+ if w == start:
391
+ yield path[:]
392
+ closed[-1] = True
393
+ elif w not in blocked:
394
+ path.append(w)
395
+ closed.append(False)
396
+ stack.append(iter(G[w]))
397
+ blocked.add(w)
398
+ break
399
+ else: # no more nbrs
400
+ stack.pop()
401
+ v = path.pop()
402
+ if closed.pop():
403
+ if closed:
404
+ closed[-1] = True
405
+ unblock_stack = {v}
406
+ while unblock_stack:
407
+ u = unblock_stack.pop()
408
+ if u in blocked:
409
+ blocked.remove(u)
410
+ unblock_stack.update(B[u])
411
+ B[u].clear()
412
+ else:
413
+ for w in G[v]:
414
+ B[w].add(v)
415
+
416
+
417
+ def _bounded_cycle_search(G, path, length_bound):
418
+ """The main loop of the cycle-enumeration algorithm of Gupta and Suzumura.
419
+
420
+ Parameters
421
+ ----------
422
+ G : NetworkX Graph or DiGraph
423
+ A graph
424
+
425
+ path : list
426
+ A cycle prefix. All cycles generated will begin with this prefix.
427
+
428
+ length_bound: int
429
+ A length bound. All cycles generated will have length at most length_bound.
430
+
431
+ Yields
432
+ ------
433
+ list of nodes
434
+ Each cycle is represented by a list of nodes along the cycle.
435
+
436
+ References
437
+ ----------
438
+ .. [1] Finding All Bounded-Length Simple Cycles in a Directed Graph
439
+ A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094
440
+
441
+ """
442
+ G = _NeighborhoodCache(G)
443
+ lock = {v: 0 for v in path}
444
+ B = defaultdict(set)
445
+ start = path[0]
446
+ stack = [iter(G[path[-1]])]
447
+ blen = [length_bound]
448
+ while stack:
449
+ nbrs = stack[-1]
450
+ for w in nbrs:
451
+ if w == start:
452
+ yield path[:]
453
+ blen[-1] = 1
454
+ elif len(path) < lock.get(w, length_bound):
455
+ path.append(w)
456
+ blen.append(length_bound)
457
+ lock[w] = len(path)
458
+ stack.append(iter(G[w]))
459
+ break
460
+ else:
461
+ stack.pop()
462
+ v = path.pop()
463
+ bl = blen.pop()
464
+ if blen:
465
+ blen[-1] = min(blen[-1], bl)
466
+ if bl < length_bound:
467
+ relax_stack = [(bl, v)]
468
+ while relax_stack:
469
+ bl, u = relax_stack.pop()
470
+ if lock.get(u, length_bound) < length_bound - bl + 1:
471
+ lock[u] = length_bound - bl + 1
472
+ relax_stack.extend((bl + 1, w) for w in B[u].difference(path))
473
+ else:
474
+ for w in G[v]:
475
+ B[w].add(v)
476
+
477
+
478
+ @nx._dispatchable
479
+ def chordless_cycles(G, length_bound=None):
480
+ """Find simple chordless cycles of a graph.
481
+
482
+ A `simple cycle` is a closed path where no node appears twice. In a simple
483
+ cycle, a `chord` is an additional edge between two nodes in the cycle. A
484
+ `chordless cycle` is a simple cycle without chords. Said differently, a
485
+ chordless cycle is a cycle C in a graph G where the number of edges in the
486
+ induced graph G[C] is equal to the length of `C`.
487
+
488
+ Note that some care must be taken in the case that G is not a simple graph
489
+ nor a simple digraph. Some authors limit the definition of chordless cycles
490
+ to have a prescribed minimum length; we do not.
491
+
492
+ 1. We interpret self-loops to be chordless cycles, except in multigraphs
493
+ with multiple loops in parallel. Likewise, in a chordless cycle of
494
+ length greater than 1, there can be no nodes with self-loops.
495
+
496
+ 2. We interpret directed two-cycles to be chordless cycles, except in
497
+ multi-digraphs when any edge in a two-cycle has a parallel copy.
498
+
499
+ 3. We interpret parallel pairs of undirected edges as two-cycles, except
500
+ when a third (or more) parallel edge exists between the two nodes.
501
+
502
+ 4. Generalizing the above, edges with parallel clones may not occur in
503
+ chordless cycles.
504
+
505
+ In a directed graph, two chordless cycles are distinct if they are not
506
+ cyclic permutations of each other. In an undirected graph, two chordless
507
+ cycles are distinct if they are not cyclic permutations of each other nor of
508
+ the other's reversal.
509
+
510
+ Optionally, the cycles are bounded in length.
511
+
512
+ We use an algorithm strongly inspired by that of Dias et al [1]_. It has
513
+ been modified in the following ways:
514
+
515
+ 1. Recursion is avoided, per Python's limitations
516
+
517
+ 2. The labeling function is not necessary, because the starting paths
518
+ are chosen (and deleted from the host graph) to prevent multiple
519
+ occurrences of the same path
520
+
521
+ 3. The search is optionally bounded at a specified length
522
+
523
+ 4. Support for directed graphs is provided by extending cycles along
524
+ forward edges, and blocking nodes along forward and reverse edges
525
+
526
+ 5. Support for multigraphs is provided by omitting digons from the set
527
+ of forward edges
528
+
529
+ Parameters
530
+ ----------
531
+ G : NetworkX DiGraph
532
+ A directed graph
533
+
534
+ length_bound : int or None, optional (default=None)
535
+ If length_bound is an int, generate all simple cycles of G with length at
536
+ most length_bound. Otherwise, generate all simple cycles of G.
537
+
538
+ Yields
539
+ ------
540
+ list of nodes
541
+ Each cycle is represented by a list of nodes along the cycle.
542
+
543
+ Examples
544
+ --------
545
+ >>> sorted(list(nx.chordless_cycles(nx.complete_graph(4))))
546
+ [[1, 0, 2], [1, 0, 3], [2, 0, 3], [2, 1, 3]]
547
+
548
+ Notes
549
+ -----
550
+ When length_bound is None, and the graph is simple, the time complexity is
551
+ $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ chordless cycles.
552
+
553
+ Raises
554
+ ------
555
+ ValueError
556
+ when length_bound < 0.
557
+
558
+ References
559
+ ----------
560
+ .. [1] Efficient enumeration of chordless cycles
561
+ E. Dias and D. Castonguay and H. Longo and W.A.R. Jradi
562
+ https://arxiv.org/abs/1309.1051
563
+
564
+ See Also
565
+ --------
566
+ simple_cycles
567
+ """
568
+
569
+ if length_bound is not None:
570
+ if length_bound == 0:
571
+ return
572
+ elif length_bound < 0:
573
+ raise ValueError("length bound must be non-negative")
574
+
575
+ directed = G.is_directed()
576
+ multigraph = G.is_multigraph()
577
+
578
+ if multigraph:
579
+ yield from ([v] for v, Gv in G.adj.items() if len(Gv.get(v, ())) == 1)
580
+ else:
581
+ yield from ([v] for v, Gv in G.adj.items() if v in Gv)
582
+
583
+ if length_bound is not None and length_bound == 1:
584
+ return
585
+
586
+ # Nodes with loops cannot belong to longer cycles. Let's delete them here.
587
+ # also, we implicitly reduce the multiplicity of edges down to 1 in the case
588
+ # of multiedges.
589
+ if directed:
590
+ F = nx.DiGraph((u, v) for u, Gu in G.adj.items() if u not in Gu for v in Gu)
591
+ B = F.to_undirected(as_view=False)
592
+ else:
593
+ F = nx.Graph((u, v) for u, Gu in G.adj.items() if u not in Gu for v in Gu)
594
+ B = None
595
+
596
+ # If we're given a multigraph, we have a few cases to consider with parallel
597
+ # edges.
598
+ #
599
+ # 1. If we have 2 or more edges in parallel between the nodes (u, v), we
600
+ # must not construct longer cycles along (u, v).
601
+ # 2. If G is not directed, then a pair of parallel edges between (u, v) is a
602
+ # chordless cycle unless there exists a third (or more) parallel edge.
603
+ # 3. If G is directed, then parallel edges do not form cycles, but do
604
+ # preclude back-edges from forming cycles (handled in the next section),
605
+ # Thus, if an edge (u, v) is duplicated and the reverse (v, u) is also
606
+ # present, then we remove both from F.
607
+ #
608
+ # In directed graphs, we need to consider both directions that edges can
609
+ # take, so iterate over all edges (u, v) and possibly (v, u). In undirected
610
+ # graphs, we need to be a little careful to only consider every edge once,
611
+ # so we use a "visited" set to emulate node-order comparisons.
612
+
613
+ if multigraph:
614
+ if not directed:
615
+ B = F.copy()
616
+ visited = set()
617
+ for u, Gu in G.adj.items():
618
+ if directed:
619
+ multiplicity = ((v, len(Guv)) for v, Guv in Gu.items())
620
+ for v, m in multiplicity:
621
+ if m > 1:
622
+ F.remove_edges_from(((u, v), (v, u)))
623
+ else:
624
+ multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited)
625
+ for v, m in multiplicity:
626
+ if m == 2:
627
+ yield [u, v]
628
+ if m > 1:
629
+ F.remove_edge(u, v)
630
+ visited.add(u)
631
+
632
+ # If we're given a directed graphs, we need to think about digons. If we
633
+ # have two edges (u, v) and (v, u), then that's a two-cycle. If either edge
634
+ # was duplicated above, then we removed both from F. So, any digons we find
635
+ # here are chordless. After finding digons, we remove their edges from F
636
+ # to avoid traversing them in the search for chordless cycles.
637
+ if directed:
638
+ for u, Fu in F.adj.items():
639
+ digons = [[u, v] for v in Fu if F.has_edge(v, u)]
640
+ yield from digons
641
+ F.remove_edges_from(digons)
642
+ F.remove_edges_from(e[::-1] for e in digons)
643
+
644
+ if length_bound is not None and length_bound == 2:
645
+ return
646
+
647
+ # Now, we prepare to search for cycles. We have removed all cycles of
648
+ # lengths 1 and 2, so F is a simple graph or simple digraph. We repeatedly
649
+ # separate digraphs into their strongly connected components, and undirected
650
+ # graphs into their biconnected components. For each component, we pick a
651
+ # node v, search for chordless cycles based at each "stem" (u, v, w), and
652
+ # then remove v from that component before separating the graph again.
653
+ if directed:
654
+ separate = nx.strongly_connected_components
655
+
656
+ # Directed stems look like (u -> v -> w), so we use the product of
657
+ # predecessors of v with successors of v.
658
+ def stems(C, v):
659
+ for u, w in product(C.pred[v], C.succ[v]):
660
+ if not G.has_edge(u, w): # omit stems with acyclic chords
661
+ yield [u, v, w], F.has_edge(w, u)
662
+
663
+ else:
664
+ separate = nx.biconnected_components
665
+
666
+ # Undirected stems look like (u ~ v ~ w), but we must not also search
667
+ # (w ~ v ~ u), so we use combinations of v's neighbors of length 2.
668
+ def stems(C, v):
669
+ yield from (([u, v, w], F.has_edge(w, u)) for u, w in combinations(C[v], 2))
670
+
671
+ components = [c for c in separate(F) if len(c) > 2]
672
+ while components:
673
+ c = components.pop()
674
+ v = next(iter(c))
675
+ Fc = F.subgraph(c)
676
+ Fcc = Bcc = None
677
+ for S, is_triangle in stems(Fc, v):
678
+ if is_triangle:
679
+ yield S
680
+ else:
681
+ if Fcc is None:
682
+ Fcc = _NeighborhoodCache(Fc)
683
+ Bcc = Fcc if B is None else _NeighborhoodCache(B.subgraph(c))
684
+ yield from _chordless_cycle_search(Fcc, Bcc, S, length_bound)
685
+
686
+ components.extend(c for c in separate(F.subgraph(c - {v})) if len(c) > 2)
687
+
688
+
689
+ def _chordless_cycle_search(F, B, path, length_bound):
690
+ """The main loop for chordless cycle enumeration.
691
+
692
+ This algorithm is strongly inspired by that of Dias et al [1]_. It has been
693
+ modified in the following ways:
694
+
695
+ 1. Recursion is avoided, per Python's limitations
696
+
697
+ 2. The labeling function is not necessary, because the starting paths
698
+ are chosen (and deleted from the host graph) to prevent multiple
699
+ occurrences of the same path
700
+
701
+ 3. The search is optionally bounded at a specified length
702
+
703
+ 4. Support for directed graphs is provided by extending cycles along
704
+ forward edges, and blocking nodes along forward and reverse edges
705
+
706
+ 5. Support for multigraphs is provided by omitting digons from the set
707
+ of forward edges
708
+
709
+ Parameters
710
+ ----------
711
+ F : _NeighborhoodCache
712
+ A graph of forward edges to follow in constructing cycles
713
+
714
+ B : _NeighborhoodCache
715
+ A graph of blocking edges to prevent the production of chordless cycles
716
+
717
+ path : list
718
+ A cycle prefix. All cycles generated will begin with this prefix.
719
+
720
+ length_bound : int
721
+ A length bound. All cycles generated will have length at most length_bound.
722
+
723
+
724
+ Yields
725
+ ------
726
+ list of nodes
727
+ Each cycle is represented by a list of nodes along the cycle.
728
+
729
+ References
730
+ ----------
731
+ .. [1] Efficient enumeration of chordless cycles
732
+ E. Dias and D. Castonguay and H. Longo and W.A.R. Jradi
733
+ https://arxiv.org/abs/1309.1051
734
+
735
+ """
736
+ blocked = defaultdict(int)
737
+ target = path[0]
738
+ blocked[path[1]] = 1
739
+ for w in path[1:]:
740
+ for v in B[w]:
741
+ blocked[v] += 1
742
+
743
+ stack = [iter(F[path[2]])]
744
+ while stack:
745
+ nbrs = stack[-1]
746
+ for w in nbrs:
747
+ if blocked[w] == 1 and (length_bound is None or len(path) < length_bound):
748
+ Fw = F[w]
749
+ if target in Fw:
750
+ yield path + [w]
751
+ else:
752
+ Bw = B[w]
753
+ if target in Bw:
754
+ continue
755
+ for v in Bw:
756
+ blocked[v] += 1
757
+ path.append(w)
758
+ stack.append(iter(Fw))
759
+ break
760
+ else:
761
+ stack.pop()
762
+ for v in B[path.pop()]:
763
+ blocked[v] -= 1
764
+
765
+
766
+ @not_implemented_for("undirected")
767
+ @nx._dispatchable(mutates_input=True)
768
+ def recursive_simple_cycles(G):
769
+ """Find simple cycles (elementary circuits) of a directed graph.
770
+
771
+ A `simple cycle`, or `elementary circuit`, is a closed path where
772
+ no node appears twice. Two elementary circuits are distinct if they
773
+ are not cyclic permutations of each other.
774
+
775
+ This version uses a recursive algorithm to build a list of cycles.
776
+ You should probably use the iterator version called simple_cycles().
777
+ Warning: This recursive version uses lots of RAM!
778
+ It appears in NetworkX for pedagogical value.
779
+
780
+ Parameters
781
+ ----------
782
+ G : NetworkX DiGraph
783
+ A directed graph
784
+
785
+ Returns
786
+ -------
787
+ A list of cycles, where each cycle is represented by a list of nodes
788
+ along the cycle.
789
+
790
+ Example:
791
+
792
+ >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]
793
+ >>> G = nx.DiGraph(edges)
794
+ >>> nx.recursive_simple_cycles(G)
795
+ [[0], [2], [0, 1, 2], [0, 2], [1, 2]]
796
+
797
+ Notes
798
+ -----
799
+ The implementation follows pp. 79-80 in [1]_.
800
+
801
+ The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$
802
+ elementary circuits.
803
+
804
+ References
805
+ ----------
806
+ .. [1] Finding all the elementary circuits of a directed graph.
807
+ D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
808
+ https://doi.org/10.1137/0204007
809
+
810
+ See Also
811
+ --------
812
+ simple_cycles, cycle_basis
813
+ """
814
+
815
+ # Jon Olav Vik, 2010-08-09
816
+ def _unblock(thisnode):
817
+ """Recursively unblock and remove nodes from B[thisnode]."""
818
+ if blocked[thisnode]:
819
+ blocked[thisnode] = False
820
+ while B[thisnode]:
821
+ _unblock(B[thisnode].pop())
822
+
823
+ def circuit(thisnode, startnode, component):
824
+ closed = False # set to True if elementary path is closed
825
+ path.append(thisnode)
826
+ blocked[thisnode] = True
827
+ for nextnode in component[thisnode]: # direct successors of thisnode
828
+ if nextnode == startnode:
829
+ result.append(path[:])
830
+ closed = True
831
+ elif not blocked[nextnode]:
832
+ if circuit(nextnode, startnode, component):
833
+ closed = True
834
+ if closed:
835
+ _unblock(thisnode)
836
+ else:
837
+ for nextnode in component[thisnode]:
838
+ if thisnode not in B[nextnode]: # TODO: use set for speedup?
839
+ B[nextnode].append(thisnode)
840
+ path.pop() # remove thisnode from path
841
+ return closed
842
+
843
+ path = [] # stack of nodes in current path
844
+ blocked = defaultdict(bool) # vertex: blocked from search?
845
+ B = defaultdict(list) # graph portions that yield no elementary circuit
846
+ result = [] # list to accumulate the circuits found
847
+
848
+ # Johnson's algorithm exclude self cycle edges like (v, v)
849
+ # To be backward compatible, we record those cycles in advance
850
+ # and then remove from subG
851
+ for v in G:
852
+ if G.has_edge(v, v):
853
+ result.append([v])
854
+ G.remove_edge(v, v)
855
+
856
+ # Johnson's algorithm requires some ordering of the nodes.
857
+ # They might not be sortable so we assign an arbitrary ordering.
858
+ ordering = dict(zip(G, range(len(G))))
859
+ for s in ordering:
860
+ # Build the subgraph induced by s and following nodes in the ordering
861
+ subgraph = G.subgraph(node for node in G if ordering[node] >= ordering[s])
862
+ # Find the strongly connected component in the subgraph
863
+ # that contains the least node according to the ordering
864
+ strongcomp = nx.strongly_connected_components(subgraph)
865
+ mincomp = min(strongcomp, key=lambda ns: min(ordering[n] for n in ns))
866
+ component = G.subgraph(mincomp)
867
+ if len(component) > 1:
868
+ # smallest node in the component according to the ordering
869
+ startnode = min(component, key=ordering.__getitem__)
870
+ for node in component:
871
+ blocked[node] = False
872
+ B[node][:] = []
873
+ dummy = circuit(startnode, startnode, component)
874
+ return result
875
+
876
+
877
+ @nx._dispatchable
878
+ def find_cycle(G, source=None, orientation=None):
879
+ """Returns a cycle found via depth-first traversal.
880
+
881
+ The cycle is a list of edges indicating the cyclic path.
882
+ Orientation of directed edges is controlled by `orientation`.
883
+
884
+ Parameters
885
+ ----------
886
+ G : graph
887
+ A directed/undirected graph/multigraph.
888
+
889
+ source : node, list of nodes
890
+ The node from which the traversal begins. If None, then a source
891
+ is chosen arbitrarily and repeatedly until all edges from each node in
892
+ the graph are searched.
893
+
894
+ orientation : None | 'original' | 'reverse' | 'ignore' (default: None)
895
+ For directed graphs and directed multigraphs, edge traversals need not
896
+ respect the original orientation of the edges.
897
+ When set to 'reverse' every edge is traversed in the reverse direction.
898
+ When set to 'ignore', every edge is treated as undirected.
899
+ When set to 'original', every edge is treated as directed.
900
+ In all three cases, the yielded edge tuples add a last entry to
901
+ indicate the direction in which that edge was traversed.
902
+ If orientation is None, the yielded edge has no direction indicated.
903
+ The direction is respected, but not reported.
904
+
905
+ Returns
906
+ -------
907
+ edges : directed edges
908
+ A list of directed edges indicating the path taken for the loop.
909
+ If no cycle is found, then an exception is raised.
910
+ For graphs, an edge is of the form `(u, v)` where `u` and `v`
911
+ are the tail and head of the edge as determined by the traversal.
912
+ For multigraphs, an edge is of the form `(u, v, key)`, where `key` is
913
+ the key of the edge. When the graph is directed, then `u` and `v`
914
+ are always in the order of the actual directed edge.
915
+ If orientation is not None then the edge tuple is extended to include
916
+ the direction of traversal ('forward' or 'reverse') on that edge.
917
+
918
+ Raises
919
+ ------
920
+ NetworkXNoCycle
921
+ If no cycle was found.
922
+
923
+ Examples
924
+ --------
925
+ In this example, we construct a DAG and find, in the first call, that there
926
+ are no directed cycles, and so an exception is raised. In the second call,
927
+ we ignore edge orientations and find that there is an undirected cycle.
928
+ Note that the second call finds a directed cycle while effectively
929
+ traversing an undirected graph, and so, we found an "undirected cycle".
930
+ This means that this DAG structure does not form a directed tree (which
931
+ is also known as a polytree).
932
+
933
+ >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)])
934
+ >>> nx.find_cycle(G, orientation="original")
935
+ Traceback (most recent call last):
936
+ ...
937
+ networkx.exception.NetworkXNoCycle: No cycle found.
938
+ >>> list(nx.find_cycle(G, orientation="ignore"))
939
+ [(0, 1, 'forward'), (1, 2, 'forward'), (0, 2, 'reverse')]
940
+
941
+ See Also
942
+ --------
943
+ simple_cycles
944
+ """
945
+ if not G.is_directed() or orientation in (None, "original"):
946
+
947
+ def tailhead(edge):
948
+ return edge[:2]
949
+
950
+ elif orientation == "reverse":
951
+
952
+ def tailhead(edge):
953
+ return edge[1], edge[0]
954
+
955
+ elif orientation == "ignore":
956
+
957
+ def tailhead(edge):
958
+ if edge[-1] == "reverse":
959
+ return edge[1], edge[0]
960
+ return edge[:2]
961
+
962
+ explored = set()
963
+ cycle = []
964
+ final_node = None
965
+ for start_node in G.nbunch_iter(source):
966
+ if start_node in explored:
967
+ # No loop is possible.
968
+ continue
969
+
970
+ edges = []
971
+ # All nodes seen in this iteration of edge_dfs
972
+ seen = {start_node}
973
+ # Nodes in active path.
974
+ active_nodes = {start_node}
975
+ previous_head = None
976
+
977
+ for edge in nx.edge_dfs(G, start_node, orientation):
978
+ # Determine if this edge is a continuation of the active path.
979
+ tail, head = tailhead(edge)
980
+ if head in explored:
981
+ # Then we've already explored it. No loop is possible.
982
+ continue
983
+ if previous_head is not None and tail != previous_head:
984
+ # This edge results from backtracking.
985
+ # Pop until we get a node whose head equals the current tail.
986
+ # So for example, we might have:
987
+ # (0, 1), (1, 2), (2, 3), (1, 4)
988
+ # which must become:
989
+ # (0, 1), (1, 4)
990
+ while True:
991
+ try:
992
+ popped_edge = edges.pop()
993
+ except IndexError:
994
+ edges = []
995
+ active_nodes = {tail}
996
+ break
997
+ else:
998
+ popped_head = tailhead(popped_edge)[1]
999
+ active_nodes.remove(popped_head)
1000
+
1001
+ if edges:
1002
+ last_head = tailhead(edges[-1])[1]
1003
+ if tail == last_head:
1004
+ break
1005
+ edges.append(edge)
1006
+
1007
+ if head in active_nodes:
1008
+ # We have a loop!
1009
+ cycle.extend(edges)
1010
+ final_node = head
1011
+ break
1012
+ else:
1013
+ seen.add(head)
1014
+ active_nodes.add(head)
1015
+ previous_head = head
1016
+
1017
+ if cycle:
1018
+ break
1019
+ else:
1020
+ explored.update(seen)
1021
+
1022
+ else:
1023
+ assert len(cycle) == 0
1024
+ raise nx.exception.NetworkXNoCycle("No cycle found.")
1025
+
1026
+ # We now have a list of edges which ends on a cycle.
1027
+ # So we need to remove from the beginning edges that are not relevant.
1028
+
1029
+ for i, edge in enumerate(cycle):
1030
+ tail, head = tailhead(edge)
1031
+ if tail == final_node:
1032
+ break
1033
+
1034
+ return cycle[i:]
1035
+
1036
+
1037
+ @not_implemented_for("directed")
1038
+ @not_implemented_for("multigraph")
1039
+ @nx._dispatchable(edge_attrs="weight")
1040
+ def minimum_cycle_basis(G, weight=None):
1041
+ """Returns a minimum weight cycle basis for G
1042
+
1043
+ Minimum weight means a cycle basis for which the total weight
1044
+ (length for unweighted graphs) of all the cycles is minimum.
1045
+
1046
+ Parameters
1047
+ ----------
1048
+ G : NetworkX Graph
1049
+ weight: string
1050
+ name of the edge attribute to use for edge weights
1051
+
1052
+ Returns
1053
+ -------
1054
+ A list of cycle lists. Each cycle list is a list of nodes
1055
+ which forms a cycle (loop) in G. Note that the nodes are not
1056
+ necessarily returned in a order by which they appear in the cycle
1057
+
1058
+ Examples
1059
+ --------
1060
+ >>> G = nx.Graph()
1061
+ >>> nx.add_cycle(G, [0, 1, 2, 3])
1062
+ >>> nx.add_cycle(G, [0, 3, 4, 5])
1063
+ >>> nx.minimum_cycle_basis(G)
1064
+ [[5, 4, 3, 0], [3, 2, 1, 0]]
1065
+
1066
+ References:
1067
+ [1] Kavitha, Telikepalli, et al. "An O(m^2n) Algorithm for
1068
+ Minimum Cycle Basis of Graphs."
1069
+ http://link.springer.com/article/10.1007/s00453-007-9064-z
1070
+ [2] de Pina, J. 1995. Applications of shortest path methods.
1071
+ Ph.D. thesis, University of Amsterdam, Netherlands
1072
+
1073
+ See Also
1074
+ --------
1075
+ simple_cycles, cycle_basis
1076
+ """
1077
+ # We first split the graph in connected subgraphs
1078
+ return sum(
1079
+ (_min_cycle_basis(G.subgraph(c), weight) for c in nx.connected_components(G)),
1080
+ [],
1081
+ )
1082
+
1083
+
1084
+ def _min_cycle_basis(G, weight):
1085
+ cb = []
1086
+ # We extract the edges not in a spanning tree. We do not really need a
1087
+ # *minimum* spanning tree. That is why we call the next function with
1088
+ # weight=None. Depending on implementation, it may be faster as well
1089
+ tree_edges = list(nx.minimum_spanning_edges(G, weight=None, data=False))
1090
+ chords = G.edges - tree_edges - {(v, u) for u, v in tree_edges}
1091
+
1092
+ # We maintain a set of vectors orthogonal to sofar found cycles
1093
+ set_orth = [{edge} for edge in chords]
1094
+ while set_orth:
1095
+ base = set_orth.pop()
1096
+ # kth cycle is "parallel" to kth vector in set_orth
1097
+ cycle_edges = _min_cycle(G, base, weight)
1098
+ cb.append([v for u, v in cycle_edges])
1099
+
1100
+ # now update set_orth so that k+1,k+2... th elements are
1101
+ # orthogonal to the newly found cycle, as per [p. 336, 1]
1102
+ set_orth = [
1103
+ (
1104
+ {e for e in orth if e not in base if e[::-1] not in base}
1105
+ | {e for e in base if e not in orth if e[::-1] not in orth}
1106
+ )
1107
+ if sum((e in orth or e[::-1] in orth) for e in cycle_edges) % 2
1108
+ else orth
1109
+ for orth in set_orth
1110
+ ]
1111
+ return cb
1112
+
1113
+
1114
+ def _min_cycle(G, orth, weight):
1115
+ """
1116
+ Computes the minimum weight cycle in G,
1117
+ orthogonal to the vector orth as per [p. 338, 1]
1118
+ Use (u, 1) to indicate the lifted copy of u (denoted u' in paper).
1119
+ """
1120
+ Gi = nx.Graph()
1121
+
1122
+ # Add 2 copies of each edge in G to Gi.
1123
+ # If edge is in orth, add cross edge; otherwise in-plane edge
1124
+ for u, v, wt in G.edges(data=weight, default=1):
1125
+ if (u, v) in orth or (v, u) in orth:
1126
+ Gi.add_edges_from([(u, (v, 1)), ((u, 1), v)], Gi_weight=wt)
1127
+ else:
1128
+ Gi.add_edges_from([(u, v), ((u, 1), (v, 1))], Gi_weight=wt)
1129
+
1130
+ # find the shortest length in Gi between n and (n, 1) for each n
1131
+ # Note: Use "Gi_weight" for name of weight attribute
1132
+ spl = nx.shortest_path_length
1133
+ lift = {n: spl(Gi, source=n, target=(n, 1), weight="Gi_weight") for n in G}
1134
+
1135
+ # Now compute that short path in Gi, which translates to a cycle in G
1136
+ start = min(lift, key=lift.get)
1137
+ end = (start, 1)
1138
+ min_path_i = nx.shortest_path(Gi, source=start, target=end, weight="Gi_weight")
1139
+
1140
+ # Now we obtain the actual path, re-map nodes in Gi to those in G
1141
+ min_path = [n if n in G else n[0] for n in min_path_i]
1142
+
1143
+ # Now remove the edges that occur two times
1144
+ # two passes: flag which edges get kept, then build it
1145
+ edgelist = list(pairwise(min_path))
1146
+ edgeset = set()
1147
+ for e in edgelist:
1148
+ if e in edgeset:
1149
+ edgeset.remove(e)
1150
+ elif e[::-1] in edgeset:
1151
+ edgeset.remove(e[::-1])
1152
+ else:
1153
+ edgeset.add(e)
1154
+
1155
+ min_edgelist = []
1156
+ for e in edgelist:
1157
+ if e in edgeset:
1158
+ min_edgelist.append(e)
1159
+ edgeset.remove(e)
1160
+ elif e[::-1] in edgeset:
1161
+ min_edgelist.append(e[::-1])
1162
+ edgeset.remove(e[::-1])
1163
+
1164
+ return min_edgelist
1165
+
1166
+
1167
+ @not_implemented_for("directed")
1168
+ @not_implemented_for("multigraph")
1169
+ @nx._dispatchable
1170
+ def girth(G):
1171
+ """Returns the girth of the graph.
1172
+
1173
+ The girth of a graph is the length of its shortest cycle, or infinity if
1174
+ the graph is acyclic. The algorithm follows the description given on the
1175
+ Wikipedia page [1]_, and runs in time O(mn) on a graph with m edges and n
1176
+ nodes.
1177
+
1178
+ Parameters
1179
+ ----------
1180
+ G : NetworkX Graph
1181
+
1182
+ Returns
1183
+ -------
1184
+ int or math.inf
1185
+
1186
+ Examples
1187
+ --------
1188
+ All examples below (except P_5) can easily be checked using Wikipedia,
1189
+ which has a page for each of these famous graphs.
1190
+
1191
+ >>> nx.girth(nx.chvatal_graph())
1192
+ 4
1193
+ >>> nx.girth(nx.tutte_graph())
1194
+ 4
1195
+ >>> nx.girth(nx.petersen_graph())
1196
+ 5
1197
+ >>> nx.girth(nx.heawood_graph())
1198
+ 6
1199
+ >>> nx.girth(nx.pappus_graph())
1200
+ 6
1201
+ >>> nx.girth(nx.path_graph(5))
1202
+ inf
1203
+
1204
+ References
1205
+ ----------
1206
+ .. [1] `Wikipedia: Girth <https://en.wikipedia.org/wiki/Girth_(graph_theory)>`_
1207
+
1208
+ """
1209
+ girth = depth_limit = inf
1210
+ tree_edge = nx.algorithms.traversal.breadth_first_search.TREE_EDGE
1211
+ level_edge = nx.algorithms.traversal.breadth_first_search.LEVEL_EDGE
1212
+ for n in G:
1213
+ # run a BFS from source n, keeping track of distances; since we want
1214
+ # the shortest cycle, no need to explore beyond the current minimum length
1215
+ depth = {n: 0}
1216
+ for u, v, label in nx.bfs_labeled_edges(G, n):
1217
+ du = depth[u]
1218
+ if du > depth_limit:
1219
+ break
1220
+ if label is tree_edge:
1221
+ depth[v] = du + 1
1222
+ else:
1223
+ # if (u, v) is a level edge, the length is du + du + 1 (odd)
1224
+ # otherwise, it's a forward edge; length is du + (du + 1) + 1 (even)
1225
+ delta = label is level_edge
1226
+ length = du + du + 2 - delta
1227
+ if length < girth:
1228
+ girth = length
1229
+ depth_limit = du - delta
1230
+
1231
+ return girth
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/d_separation.py ADDED
@@ -0,0 +1,722 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Algorithm for testing d-separation in DAGs.
3
+
4
+ *d-separation* is a test for conditional independence in probability
5
+ distributions that can be factorized using DAGs. It is a purely
6
+ graphical test that uses the underlying graph and makes no reference
7
+ to the actual distribution parameters. See [1]_ for a formal
8
+ definition.
9
+
10
+ The implementation is based on the conceptually simple linear time
11
+ algorithm presented in [2]_. Refer to [3]_, [4]_ for a couple of
12
+ alternative algorithms.
13
+
14
+ The functional interface in NetworkX consists of three functions:
15
+
16
+ - `find_minimal_d_separator` returns a minimal d-separator set ``z``.
17
+ That is, removing any node or nodes from it makes it no longer a d-separator.
18
+ - `is_d_separator` checks if a given set is a d-separator.
19
+ - `is_minimal_d_separator` checks if a given set is a minimal d-separator.
20
+
21
+ D-separators
22
+ ------------
23
+
24
+ Here, we provide a brief overview of d-separation and related concepts that
25
+ are relevant for understanding it:
26
+
27
+ The ideas of d-separation and d-connection relate to paths being open or blocked.
28
+
29
+ - A "path" is a sequence of nodes connected in order by edges. Unlike for most
30
+ graph theory analysis, the direction of the edges is ignored. Thus the path
31
+ can be thought of as a traditional path on the undirected version of the graph.
32
+ - A "candidate d-separator" ``z`` is a set of nodes being considered as
33
+ possibly blocking all paths between two prescribed sets ``x`` and ``y`` of nodes.
34
+ We refer to each node in the candidate d-separator as "known".
35
+ - A "collider" node on a path is a node that is a successor of its two neighbor
36
+ nodes on the path. That is, ``c`` is a collider if the edge directions
37
+ along the path look like ``... u -> c <- v ...``.
38
+ - If a collider node or any of its descendants are "known", the collider
39
+ is called an "open collider". Otherwise it is a "blocking collider".
40
+ - Any path can be "blocked" in two ways. If the path contains a "known" node
41
+ that is not a collider, the path is blocked. Also, if the path contains a
42
+ collider that is not a "known" node, the path is blocked.
43
+ - A path is "open" if it is not blocked. That is, it is open if every node is
44
+ either an open collider or not a "known". Said another way, every
45
+ "known" in the path is a collider and every collider is open (has a
46
+ "known" as a inclusive descendant). The concept of "open path" is meant to
47
+ demonstrate a probabilistic conditional dependence between two nodes given
48
+ prescribed knowledge ("known" nodes).
49
+ - Two sets ``x`` and ``y`` of nodes are "d-separated" by a set of nodes ``z``
50
+ if all paths between nodes in ``x`` and nodes in ``y`` are blocked. That is,
51
+ if there are no open paths from any node in ``x`` to any node in ``y``.
52
+ Such a set ``z`` is a "d-separator" of ``x`` and ``y``.
53
+ - A "minimal d-separator" is a d-separator ``z`` for which no node or subset
54
+ of nodes can be removed with it still being a d-separator.
55
+
56
+ The d-separator blocks some paths between ``x`` and ``y`` but opens others.
57
+ Nodes in the d-separator block paths if the nodes are not colliders.
58
+ But if a collider or its descendant nodes are in the d-separation set, the
59
+ colliders are open, allowing a path through that collider.
60
+
61
+ Illustration of D-separation with examples
62
+ ------------------------------------------
63
+
64
+ A pair of two nodes, ``u`` and ``v``, are d-connected if there is a path
65
+ from ``u`` to ``v`` that is not blocked. That means, there is an open
66
+ path from ``u`` to ``v``.
67
+
68
+ For example, if the d-separating set is the empty set, then the following paths are
69
+ open between ``u`` and ``v``:
70
+
71
+ - u <- n -> v
72
+ - u -> w -> ... -> n -> v
73
+
74
+ If on the other hand, ``n`` is in the d-separating set, then ``n`` blocks
75
+ those paths between ``u`` and ``v``.
76
+
77
+ Colliders block a path if they and their descendants are not included
78
+ in the d-separating set. An example of a path that is blocked when the
79
+ d-separating set is empty is:
80
+
81
+ - u -> w -> ... -> n <- v
82
+
83
+ The node ``n`` is a collider in this path and is not in the d-separating set.
84
+ So ``n`` blocks this path. However, if ``n`` or a descendant of ``n`` is
85
+ included in the d-separating set, then the path through the collider
86
+ at ``n`` (... -> n <- ...) is "open".
87
+
88
+ D-separation is concerned with blocking all paths between nodes from ``x`` to ``y``.
89
+ A d-separating set between ``x`` and ``y`` is one where all paths are blocked.
90
+
91
+ D-separation and its applications in probability
92
+ ------------------------------------------------
93
+
94
+ D-separation is commonly used in probabilistic causal-graph models. D-separation
95
+ connects the idea of probabilistic "dependence" with separation in a graph. If
96
+ one assumes the causal Markov condition [5]_, (every node is conditionally
97
+ independent of its non-descendants, given its parents) then d-separation implies
98
+ conditional independence in probability distributions.
99
+ Symmetrically, d-connection implies dependence.
100
+
101
+ The intuition is as follows. The edges on a causal graph indicate which nodes
102
+ influence the outcome of other nodes directly. An edge from u to v
103
+ implies that the outcome of event ``u`` influences the probabilities for
104
+ the outcome of event ``v``. Certainly knowing ``u`` changes predictions for ``v``.
105
+ But also knowing ``v`` changes predictions for ``u``. The outcomes are dependent.
106
+ Furthermore, an edge from ``v`` to ``w`` would mean that ``w`` and ``v`` are dependent
107
+ and thus that ``u`` could indirectly influence ``w``.
108
+
109
+ Without any knowledge about the system (candidate d-separating set is empty)
110
+ a causal graph ``u -> v -> w`` allows all three nodes to be dependent. But
111
+ if we know the outcome of ``v``, the conditional probabilities of outcomes for
112
+ ``u`` and ``w`` are independent of each other. That is, once we know the outcome
113
+ for ```v`, the probabilities for ``w`` do not depend on the outcome for ``u``.
114
+ This is the idea behind ``v`` blocking the path if it is "known" (in the candidate
115
+ d-separating set).
116
+
117
+ The same argument works whether the direction of the edges are both
118
+ left-going and when both arrows head out from the middle. Having a "known"
119
+ node on a path blocks the collider-free path because those relationships
120
+ make the conditional probabilities independent.
121
+
122
+ The direction of the causal edges does impact dependence precisely in the
123
+ case of a collider e.g. ``u -> v <- w``. In that situation, both ``u`` and ``w``
124
+ influence ``v```. But they do not directly influence each other. So without any
125
+ knowledge of any outcomes, ``u`` and ``w`` are independent. That is the idea behind
126
+ colliders blocking the path. But, if ``v`` is known, the conditional probabilities
127
+ of ``u`` and ``w`` can be dependent. This is the heart of Berkson's Paradox [6]_.
128
+ For example, suppose ``u`` and ``w`` are boolean events (they either happen or do not)
129
+ and ``v`` represents the outcome "at least one of ``u`` and ``w`` occur". Then knowing
130
+ ``v`` is true makes the conditional probabilities of ``u`` and ``w`` dependent.
131
+ Essentially, knowing that at least one of them is true raises the probability of
132
+ each. But further knowledge that ``w`` is true (or false) change the conditional
133
+ probability of ``u`` to either the original value or 1. So the conditional
134
+ probability of ``u`` depends on the outcome of ``w`` even though there is no
135
+ causal relationship between them. When a collider is known, dependence can
136
+ occur across paths through that collider. This is the reason open colliders
137
+ do not block paths.
138
+
139
+ Furthermore, even if ``v`` is not "known", if one of its descendants is "known"
140
+ we can use that information to know more about ``v`` which again makes
141
+ ``u`` and ``w`` potentially dependent. Suppose the chance of ``n`` occurring
142
+ is much higher when ``v`` occurs ("at least one of ``u`` and ``w`` occur").
143
+ Then if we know ``n`` occurred, it is more likely that ``v`` occurred and that
144
+ makes the chance of ``u`` and ``w`` dependent. This is the idea behind why
145
+ a collider does no block a path if any descendant of the collider is "known".
146
+
147
+ When two sets of nodes ``x`` and ``y`` are d-separated by a set ``z``,
148
+ it means that given the outcomes of the nodes in ``z``, the probabilities
149
+ of outcomes of the nodes in ``x`` are independent of the outcomes of the
150
+ nodes in ``y`` and vice versa.
151
+
152
+ Examples
153
+ --------
154
+ A Hidden Markov Model with 5 observed states and 5 hidden states
155
+ where the hidden states have causal relationships resulting in
156
+ a path results in the following causal network. We check that
157
+ early states along the path are separated from late state in
158
+ the path by the d-separator of the middle hidden state.
159
+ Thus if we condition on the middle hidden state, the early
160
+ state probabilities are independent of the late state outcomes.
161
+
162
+ >>> G = nx.DiGraph()
163
+ >>> G.add_edges_from(
164
+ ... [
165
+ ... ("H1", "H2"),
166
+ ... ("H2", "H3"),
167
+ ... ("H3", "H4"),
168
+ ... ("H4", "H5"),
169
+ ... ("H1", "O1"),
170
+ ... ("H2", "O2"),
171
+ ... ("H3", "O3"),
172
+ ... ("H4", "O4"),
173
+ ... ("H5", "O5"),
174
+ ... ]
175
+ ... )
176
+ >>> x, y, z = ({"H1", "O1"}, {"H5", "O5"}, {"H3"})
177
+ >>> nx.is_d_separator(G, x, y, z)
178
+ True
179
+ >>> nx.is_minimal_d_separator(G, x, y, z)
180
+ True
181
+ >>> nx.is_minimal_d_separator(G, x, y, z | {"O3"})
182
+ False
183
+ >>> z = nx.find_minimal_d_separator(G, x | y, {"O2", "O3", "O4"})
184
+ >>> z == {"H2", "H4"}
185
+ True
186
+
187
+ If no minimal_d_separator exists, `None` is returned
188
+
189
+ >>> other_z = nx.find_minimal_d_separator(G, x | y, {"H2", "H3"})
190
+ >>> other_z is None
191
+ True
192
+
193
+
194
+ References
195
+ ----------
196
+
197
+ .. [1] Pearl, J. (2009). Causality. Cambridge: Cambridge University Press.
198
+
199
+ .. [2] Darwiche, A. (2009). Modeling and reasoning with Bayesian networks.
200
+ Cambridge: Cambridge University Press.
201
+
202
+ .. [3] Shachter, Ross D. "Bayes-ball: The rational pastime (for
203
+ determining irrelevance and requisite information in belief networks
204
+ and influence diagrams)." In Proceedings of the Fourteenth Conference
205
+ on Uncertainty in Artificial Intelligence (UAI), (pp. 480–487). 1998.
206
+
207
+ .. [4] Koller, D., & Friedman, N. (2009).
208
+ Probabilistic graphical models: principles and techniques. The MIT Press.
209
+
210
+ .. [5] https://en.wikipedia.org/wiki/Causal_Markov_condition
211
+
212
+ .. [6] https://en.wikipedia.org/wiki/Berkson%27s_paradox
213
+
214
+ """
215
+
216
+ from collections import deque
217
+ from itertools import chain
218
+
219
+ import networkx as nx
220
+ from networkx.utils import UnionFind, not_implemented_for
221
+
222
+ __all__ = [
223
+ "is_d_separator",
224
+ "is_minimal_d_separator",
225
+ "find_minimal_d_separator",
226
+ "d_separated",
227
+ "minimal_d_separator",
228
+ ]
229
+
230
+
231
+ @not_implemented_for("undirected")
232
+ @nx._dispatchable
233
+ def is_d_separator(G, x, y, z):
234
+ """Return whether node sets `x` and `y` are d-separated by `z`.
235
+
236
+ Parameters
237
+ ----------
238
+ G : nx.DiGraph
239
+ A NetworkX DAG.
240
+
241
+ x : node or set of nodes
242
+ First node or set of nodes in `G`.
243
+
244
+ y : node or set of nodes
245
+ Second node or set of nodes in `G`.
246
+
247
+ z : node or set of nodes
248
+ Potential separator (set of conditioning nodes in `G`). Can be empty set.
249
+
250
+ Returns
251
+ -------
252
+ b : bool
253
+ A boolean that is true if `x` is d-separated from `y` given `z` in `G`.
254
+
255
+ Raises
256
+ ------
257
+ NetworkXError
258
+ The *d-separation* test is commonly used on disjoint sets of
259
+ nodes in acyclic directed graphs. Accordingly, the algorithm
260
+ raises a :exc:`NetworkXError` if the node sets are not
261
+ disjoint or if the input graph is not a DAG.
262
+
263
+ NodeNotFound
264
+ If any of the input nodes are not found in the graph,
265
+ a :exc:`NodeNotFound` exception is raised
266
+
267
+ Notes
268
+ -----
269
+ A d-separating set in a DAG is a set of nodes that
270
+ blocks all paths between the two sets. Nodes in `z`
271
+ block a path if they are part of the path and are not a collider,
272
+ or a descendant of a collider. Also colliders that are not in `z`
273
+ block a path. A collider structure along a path
274
+ is ``... -> c <- ...`` where ``c`` is the collider node.
275
+
276
+ https://en.wikipedia.org/wiki/Bayesian_network#d-separation
277
+ """
278
+ try:
279
+ x = {x} if x in G else x
280
+ y = {y} if y in G else y
281
+ z = {z} if z in G else z
282
+
283
+ intersection = x & y or x & z or y & z
284
+ if intersection:
285
+ raise nx.NetworkXError(
286
+ f"The sets are not disjoint, with intersection {intersection}"
287
+ )
288
+
289
+ set_v = x | y | z
290
+ if set_v - G.nodes:
291
+ raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are not found in G")
292
+ except TypeError:
293
+ raise nx.NodeNotFound("One of x, y, or z is not a node or a set of nodes in G")
294
+
295
+ if not nx.is_directed_acyclic_graph(G):
296
+ raise nx.NetworkXError("graph should be directed acyclic")
297
+
298
+ # contains -> and <-> edges from starting node T
299
+ forward_deque = deque([])
300
+ forward_visited = set()
301
+
302
+ # contains <- and - edges from starting node T
303
+ backward_deque = deque(x)
304
+ backward_visited = set()
305
+
306
+ ancestors_or_z = set().union(*[nx.ancestors(G, node) for node in x]) | z | x
307
+
308
+ while forward_deque or backward_deque:
309
+ if backward_deque:
310
+ node = backward_deque.popleft()
311
+ backward_visited.add(node)
312
+ if node in y:
313
+ return False
314
+ if node in z:
315
+ continue
316
+
317
+ # add <- edges to backward deque
318
+ backward_deque.extend(G.pred[node].keys() - backward_visited)
319
+ # add -> edges to forward deque
320
+ forward_deque.extend(G.succ[node].keys() - forward_visited)
321
+
322
+ if forward_deque:
323
+ node = forward_deque.popleft()
324
+ forward_visited.add(node)
325
+ if node in y:
326
+ return False
327
+
328
+ # Consider if -> node <- is opened due to ancestor of node in z
329
+ if node in ancestors_or_z:
330
+ # add <- edges to backward deque
331
+ backward_deque.extend(G.pred[node].keys() - backward_visited)
332
+ if node not in z:
333
+ # add -> edges to forward deque
334
+ forward_deque.extend(G.succ[node].keys() - forward_visited)
335
+
336
+ return True
337
+
338
+
339
+ @not_implemented_for("undirected")
340
+ @nx._dispatchable
341
+ def find_minimal_d_separator(G, x, y, *, included=None, restricted=None):
342
+ """Returns a minimal d-separating set between `x` and `y` if possible
343
+
344
+ A d-separating set in a DAG is a set of nodes that blocks all
345
+ paths between the two sets of nodes, `x` and `y`. This function
346
+ constructs a d-separating set that is "minimal", meaning no nodes can
347
+ be removed without it losing the d-separating property for `x` and `y`.
348
+ If no d-separating sets exist for `x` and `y`, this returns `None`.
349
+
350
+ In a DAG there may be more than one minimal d-separator between two
351
+ sets of nodes. Minimal d-separators are not always unique. This function
352
+ returns one minimal d-separator, or `None` if no d-separator exists.
353
+
354
+ Uses the algorithm presented in [1]_. The complexity of the algorithm
355
+ is :math:`O(m)`, where :math:`m` stands for the number of edges in
356
+ the subgraph of G consisting of only the ancestors of `x` and `y`.
357
+ For full details, see [1]_.
358
+
359
+ Parameters
360
+ ----------
361
+ G : graph
362
+ A networkx DAG.
363
+ x : set | node
364
+ A node or set of nodes in the graph.
365
+ y : set | node
366
+ A node or set of nodes in the graph.
367
+ included : set | node | None
368
+ A node or set of nodes which must be included in the found separating set,
369
+ default is None, which means the empty set.
370
+ restricted : set | node | None
371
+ Restricted node or set of nodes to consider. Only these nodes can be in
372
+ the found separating set, default is None meaning all nodes in ``G``.
373
+
374
+ Returns
375
+ -------
376
+ z : set | None
377
+ The minimal d-separating set, if at least one d-separating set exists,
378
+ otherwise None.
379
+
380
+ Raises
381
+ ------
382
+ NetworkXError
383
+ Raises a :exc:`NetworkXError` if the input graph is not a DAG
384
+ or if node sets `x`, `y`, and `included` are not disjoint.
385
+
386
+ NodeNotFound
387
+ If any of the input nodes are not found in the graph,
388
+ a :exc:`NodeNotFound` exception is raised.
389
+
390
+ References
391
+ ----------
392
+ .. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding
393
+ minimal d-separators in linear time and applications." In
394
+ Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020.
395
+ """
396
+ if not nx.is_directed_acyclic_graph(G):
397
+ raise nx.NetworkXError("graph should be directed acyclic")
398
+
399
+ try:
400
+ x = {x} if x in G else x
401
+ y = {y} if y in G else y
402
+
403
+ if included is None:
404
+ included = set()
405
+ elif included in G:
406
+ included = {included}
407
+
408
+ if restricted is None:
409
+ restricted = set(G)
410
+ elif restricted in G:
411
+ restricted = {restricted}
412
+
413
+ set_y = x | y | included | restricted
414
+ if set_y - G.nodes:
415
+ raise nx.NodeNotFound(f"The node(s) {set_y - G.nodes} are not found in G")
416
+ except TypeError:
417
+ raise nx.NodeNotFound(
418
+ "One of x, y, included or restricted is not a node or set of nodes in G"
419
+ )
420
+
421
+ if not included <= restricted:
422
+ raise nx.NetworkXError(
423
+ f"Included nodes {included} must be in restricted nodes {restricted}"
424
+ )
425
+
426
+ intersection = x & y or x & included or y & included
427
+ if intersection:
428
+ raise nx.NetworkXError(
429
+ f"The sets x, y, included are not disjoint. Overlap: {intersection}"
430
+ )
431
+
432
+ nodeset = x | y | included
433
+ ancestors_x_y_included = nodeset.union(*[nx.ancestors(G, node) for node in nodeset])
434
+
435
+ z_init = restricted & (ancestors_x_y_included - (x | y))
436
+
437
+ x_closure = _reachable(G, x, ancestors_x_y_included, z_init)
438
+ if x_closure & y:
439
+ return None
440
+
441
+ z_updated = z_init & (x_closure | included)
442
+ y_closure = _reachable(G, y, ancestors_x_y_included, z_updated)
443
+ return z_updated & (y_closure | included)
444
+
445
+
446
+ @not_implemented_for("undirected")
447
+ @nx._dispatchable
448
+ def is_minimal_d_separator(G, x, y, z, *, included=None, restricted=None):
449
+ """Determine if `z` is a minimal d-separator for `x` and `y`.
450
+
451
+ A d-separator, `z`, in a DAG is a set of nodes that blocks
452
+ all paths from nodes in set `x` to nodes in set `y`.
453
+ A minimal d-separator is a d-separator `z` such that removing
454
+ any subset of nodes makes it no longer a d-separator.
455
+
456
+ Note: This function checks whether `z` is a d-separator AND is
457
+ minimal. One can use the function `is_d_separator` to only check if
458
+ `z` is a d-separator. See examples below.
459
+
460
+ Parameters
461
+ ----------
462
+ G : nx.DiGraph
463
+ A NetworkX DAG.
464
+ x : node | set
465
+ A node or set of nodes in the graph.
466
+ y : node | set
467
+ A node or set of nodes in the graph.
468
+ z : node | set
469
+ The node or set of nodes to check if it is a minimal d-separating set.
470
+ The function :func:`is_d_separator` is called inside this function
471
+ to verify that `z` is in fact a d-separator.
472
+ included : set | node | None
473
+ A node or set of nodes which must be included in the found separating set,
474
+ default is ``None``, which means the empty set.
475
+ restricted : set | node | None
476
+ Restricted node or set of nodes to consider. Only these nodes can be in
477
+ the found separating set, default is ``None`` meaning all nodes in ``G``.
478
+
479
+ Returns
480
+ -------
481
+ bool
482
+ Whether or not the set `z` is a minimal d-separator subject to
483
+ `restricted` nodes and `included` node constraints.
484
+
485
+ Examples
486
+ --------
487
+ >>> G = nx.path_graph([0, 1, 2, 3], create_using=nx.DiGraph)
488
+ >>> G.add_node(4)
489
+ >>> nx.is_minimal_d_separator(G, 0, 2, {1})
490
+ True
491
+ >>> # since {1} is the minimal d-separator, {1, 3, 4} is not minimal
492
+ >>> nx.is_minimal_d_separator(G, 0, 2, {1, 3, 4})
493
+ False
494
+ >>> # alternatively, if we only want to check that {1, 3, 4} is a d-separator
495
+ >>> nx.is_d_separator(G, 0, 2, {1, 3, 4})
496
+ True
497
+
498
+ Raises
499
+ ------
500
+ NetworkXError
501
+ Raises a :exc:`NetworkXError` if the input graph is not a DAG.
502
+
503
+ NodeNotFound
504
+ If any of the input nodes are not found in the graph,
505
+ a :exc:`NodeNotFound` exception is raised.
506
+
507
+ References
508
+ ----------
509
+ .. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding
510
+ minimal d-separators in linear time and applications." In
511
+ Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020.
512
+
513
+ Notes
514
+ -----
515
+ This function works on verifying that a set is minimal and
516
+ d-separating between two nodes. Uses criterion (a), (b), (c) on
517
+ page 4 of [1]_. a) closure(`x`) and `y` are disjoint. b) `z` contains
518
+ all nodes from `included` and is contained in the `restricted`
519
+ nodes and in the union of ancestors of `x`, `y`, and `included`.
520
+ c) the nodes in `z` not in `included` are contained in both
521
+ closure(x) and closure(y). The closure of a set is the set of nodes
522
+ connected to the set by a directed path in G.
523
+
524
+ The complexity is :math:`O(m)`, where :math:`m` stands for the
525
+ number of edges in the subgraph of G consisting of only the
526
+ ancestors of `x` and `y`.
527
+
528
+ For full details, see [1]_.
529
+ """
530
+ if not nx.is_directed_acyclic_graph(G):
531
+ raise nx.NetworkXError("graph should be directed acyclic")
532
+
533
+ try:
534
+ x = {x} if x in G else x
535
+ y = {y} if y in G else y
536
+ z = {z} if z in G else z
537
+
538
+ if included is None:
539
+ included = set()
540
+ elif included in G:
541
+ included = {included}
542
+
543
+ if restricted is None:
544
+ restricted = set(G)
545
+ elif restricted in G:
546
+ restricted = {restricted}
547
+
548
+ set_y = x | y | included | restricted
549
+ if set_y - G.nodes:
550
+ raise nx.NodeNotFound(f"The node(s) {set_y - G.nodes} are not found in G")
551
+ except TypeError:
552
+ raise nx.NodeNotFound(
553
+ "One of x, y, z, included or restricted is not a node or set of nodes in G"
554
+ )
555
+
556
+ if not included <= z:
557
+ raise nx.NetworkXError(
558
+ f"Included nodes {included} must be in proposed separating set z {x}"
559
+ )
560
+ if not z <= restricted:
561
+ raise nx.NetworkXError(
562
+ f"Separating set {z} must be contained in restricted set {restricted}"
563
+ )
564
+
565
+ intersection = x.intersection(y) or x.intersection(z) or y.intersection(z)
566
+ if intersection:
567
+ raise nx.NetworkXError(
568
+ f"The sets are not disjoint, with intersection {intersection}"
569
+ )
570
+
571
+ nodeset = x | y | included
572
+ ancestors_x_y_included = nodeset.union(*[nx.ancestors(G, n) for n in nodeset])
573
+
574
+ # criterion (a) -- check that z is actually a separator
575
+ x_closure = _reachable(G, x, ancestors_x_y_included, z)
576
+ if x_closure & y:
577
+ return False
578
+
579
+ # criterion (b) -- basic constraint; included and restricted already checked above
580
+ if not (z <= ancestors_x_y_included):
581
+ return False
582
+
583
+ # criterion (c) -- check that z is minimal
584
+ y_closure = _reachable(G, y, ancestors_x_y_included, z)
585
+ if not ((z - included) <= (x_closure & y_closure)):
586
+ return False
587
+ return True
588
+
589
+
590
+ @not_implemented_for("undirected")
591
+ def _reachable(G, x, a, z):
592
+ """Modified Bayes-Ball algorithm for finding d-connected nodes.
593
+
594
+ Find all nodes in `a` that are d-connected to those in `x` by
595
+ those in `z`. This is an implementation of the function
596
+ `REACHABLE` in [1]_ (which is itself a modification of the
597
+ Bayes-Ball algorithm [2]_) when restricted to DAGs.
598
+
599
+ Parameters
600
+ ----------
601
+ G : nx.DiGraph
602
+ A NetworkX DAG.
603
+ x : node | set
604
+ A node in the DAG, or a set of nodes.
605
+ a : node | set
606
+ A (set of) node(s) in the DAG containing the ancestors of `x`.
607
+ z : node | set
608
+ The node or set of nodes conditioned on when checking d-connectedness.
609
+
610
+ Returns
611
+ -------
612
+ w : set
613
+ The closure of `x` in `a` with respect to d-connectedness
614
+ given `z`.
615
+
616
+ References
617
+ ----------
618
+ .. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding
619
+ minimal d-separators in linear time and applications." In
620
+ Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020.
621
+
622
+ .. [2] Shachter, Ross D. "Bayes-ball: The rational pastime
623
+ (for determining irrelevance and requisite information in
624
+ belief networks and influence diagrams)." In Proceedings of the
625
+ Fourteenth Conference on Uncertainty in Artificial Intelligence
626
+ (UAI), (pp. 480–487). 1998.
627
+ """
628
+
629
+ def _pass(e, v, f, n):
630
+ """Whether a ball entering node `v` along edge `e` passes to `n` along `f`.
631
+
632
+ Boolean function defined on page 6 of [1]_.
633
+
634
+ Parameters
635
+ ----------
636
+ e : bool
637
+ Directed edge by which the ball got to node `v`; `True` iff directed into `v`.
638
+ v : node
639
+ Node where the ball is.
640
+ f : bool
641
+ Directed edge connecting nodes `v` and `n`; `True` iff directed `n`.
642
+ n : node
643
+ Checking whether the ball passes to this node.
644
+
645
+ Returns
646
+ -------
647
+ b : bool
648
+ Whether the ball passes or not.
649
+
650
+ References
651
+ ----------
652
+ .. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding
653
+ minimal d-separators in linear time and applications." In
654
+ Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020.
655
+ """
656
+ is_element_of_A = n in a
657
+ # almost_definite_status = True # always true for DAGs; not so for RCGs
658
+ collider_if_in_Z = v not in z or (e and not f)
659
+ return is_element_of_A and collider_if_in_Z # and almost_definite_status
660
+
661
+ queue = deque([])
662
+ for node in x:
663
+ if bool(G.pred[node]):
664
+ queue.append((True, node))
665
+ if bool(G.succ[node]):
666
+ queue.append((False, node))
667
+ processed = queue.copy()
668
+
669
+ while any(queue):
670
+ e, v = queue.popleft()
671
+ preds = ((False, n) for n in G.pred[v])
672
+ succs = ((True, n) for n in G.succ[v])
673
+ f_n_pairs = chain(preds, succs)
674
+ for f, n in f_n_pairs:
675
+ if (f, n) not in processed and _pass(e, v, f, n):
676
+ queue.append((f, n))
677
+ processed.append((f, n))
678
+
679
+ return {w for (_, w) in processed}
680
+
681
+
682
+ # Deprecated functions:
683
+ def d_separated(G, x, y, z):
684
+ """Return whether nodes sets ``x`` and ``y`` are d-separated by ``z``.
685
+
686
+ .. deprecated:: 3.3
687
+
688
+ This function is deprecated and will be removed in NetworkX v3.5.
689
+ Please use `is_d_separator(G, x, y, z)`.
690
+
691
+ """
692
+ import warnings
693
+
694
+ warnings.warn(
695
+ "d_separated is deprecated and will be removed in NetworkX v3.5."
696
+ "Please use `is_d_separator(G, x, y, z)`.",
697
+ category=DeprecationWarning,
698
+ stacklevel=2,
699
+ )
700
+ return nx.is_d_separator(G, x, y, z)
701
+
702
+
703
+ def minimal_d_separator(G, u, v):
704
+ """Returns a minimal_d-separating set between `x` and `y` if possible
705
+
706
+ .. deprecated:: 3.3
707
+
708
+ minimal_d_separator is deprecated and will be removed in NetworkX v3.5.
709
+ Please use `find_minimal_d_separator(G, x, y)`.
710
+
711
+ """
712
+ import warnings
713
+
714
+ warnings.warn(
715
+ (
716
+ "This function is deprecated and will be removed in NetworkX v3.5."
717
+ "Please use `is_d_separator(G, x, y)`."
718
+ ),
719
+ category=DeprecationWarning,
720
+ stacklevel=2,
721
+ )
722
+ return nx.find_minimal_d_separator(G, u, v)
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/distance_measures.py ADDED
@@ -0,0 +1,951 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Graph diameter, radius, eccentricity and other properties."""
2
+
3
+ import networkx as nx
4
+ from networkx.utils import not_implemented_for
5
+
6
+ __all__ = [
7
+ "eccentricity",
8
+ "diameter",
9
+ "radius",
10
+ "periphery",
11
+ "center",
12
+ "barycenter",
13
+ "resistance_distance",
14
+ "kemeny_constant",
15
+ "effective_graph_resistance",
16
+ ]
17
+
18
+
19
+ def _extrema_bounding(G, compute="diameter", weight=None):
20
+ """Compute requested extreme distance metric of undirected graph G
21
+
22
+ Computation is based on smart lower and upper bounds, and in practice
23
+ linear in the number of nodes, rather than quadratic (except for some
24
+ border cases such as complete graphs or circle shaped graphs).
25
+
26
+ Parameters
27
+ ----------
28
+ G : NetworkX graph
29
+ An undirected graph
30
+
31
+ compute : string denoting the requesting metric
32
+ "diameter" for the maximal eccentricity value,
33
+ "radius" for the minimal eccentricity value,
34
+ "periphery" for the set of nodes with eccentricity equal to the diameter,
35
+ "center" for the set of nodes with eccentricity equal to the radius,
36
+ "eccentricities" for the maximum distance from each node to all other nodes in G
37
+
38
+ weight : string, function, or None
39
+ If this is a string, then edge weights will be accessed via the
40
+ edge attribute with this key (that is, the weight of the edge
41
+ joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
42
+ such edge attribute exists, the weight of the edge is assumed to
43
+ be one.
44
+
45
+ If this is a function, the weight of an edge is the value
46
+ returned by the function. The function must accept exactly three
47
+ positional arguments: the two endpoints of an edge and the
48
+ dictionary of edge attributes for that edge. The function must
49
+ return a number.
50
+
51
+ If this is None, every edge has weight/distance/cost 1.
52
+
53
+ Weights stored as floating point values can lead to small round-off
54
+ errors in distances. Use integer weights to avoid this.
55
+
56
+ Weights should be positive, since they are distances.
57
+
58
+ Returns
59
+ -------
60
+ value : value of the requested metric
61
+ int for "diameter" and "radius" or
62
+ list of nodes for "center" and "periphery" or
63
+ dictionary of eccentricity values keyed by node for "eccentricities"
64
+
65
+ Raises
66
+ ------
67
+ NetworkXError
68
+ If the graph consists of multiple components
69
+ ValueError
70
+ If `compute` is not one of "diameter", "radius", "periphery", "center", or "eccentricities".
71
+
72
+ Notes
73
+ -----
74
+ This algorithm was proposed in [1]_ and discussed further in [2]_ and [3]_.
75
+
76
+ References
77
+ ----------
78
+ .. [1] F. W. Takes, W. A. Kosters,
79
+ "Determining the diameter of small world networks."
80
+ Proceedings of the 20th ACM international conference on Information and knowledge management, 2011
81
+ https://dl.acm.org/doi/abs/10.1145/2063576.2063748
82
+ .. [2] F. W. Takes, W. A. Kosters,
83
+ "Computing the Eccentricity Distribution of Large Graphs."
84
+ Algorithms, 2013
85
+ https://www.mdpi.com/1999-4893/6/1/100
86
+ .. [3] M. Borassi, P. Crescenzi, M. Habib, W. A. Kosters, A. Marino, F. W. Takes,
87
+ "Fast diameter and radius BFS-based computation in (weakly connected) real-world graphs: With an application to the six degrees of separation games. "
88
+ Theoretical Computer Science, 2015
89
+ https://www.sciencedirect.com/science/article/pii/S0304397515001644
90
+ """
91
+ # init variables
92
+ degrees = dict(G.degree()) # start with the highest degree node
93
+ minlowernode = max(degrees, key=degrees.get)
94
+ N = len(degrees) # number of nodes
95
+ # alternate between smallest lower and largest upper bound
96
+ high = False
97
+ # status variables
98
+ ecc_lower = dict.fromkeys(G, 0)
99
+ ecc_upper = dict.fromkeys(G, N)
100
+ candidates = set(G)
101
+
102
+ # (re)set bound extremes
103
+ minlower = N
104
+ maxlower = 0
105
+ minupper = N
106
+ maxupper = 0
107
+
108
+ # repeat the following until there are no more candidates
109
+ while candidates:
110
+ if high:
111
+ current = maxuppernode # select node with largest upper bound
112
+ else:
113
+ current = minlowernode # select node with smallest lower bound
114
+ high = not high
115
+
116
+ # get distances from/to current node and derive eccentricity
117
+ dist = nx.shortest_path_length(G, source=current, weight=weight)
118
+
119
+ if len(dist) != N:
120
+ msg = "Cannot compute metric because graph is not connected."
121
+ raise nx.NetworkXError(msg)
122
+ current_ecc = max(dist.values())
123
+
124
+ # print status update
125
+ # print ("ecc of " + str(current) + " (" + str(ecc_lower[current]) + "/"
126
+ # + str(ecc_upper[current]) + ", deg: " + str(dist[current]) + ") is "
127
+ # + str(current_ecc))
128
+ # print(ecc_upper)
129
+
130
+ # (re)set bound extremes
131
+ maxuppernode = None
132
+ minlowernode = None
133
+
134
+ # update node bounds
135
+ for i in candidates:
136
+ # update eccentricity bounds
137
+ d = dist[i]
138
+ ecc_lower[i] = low = max(ecc_lower[i], max(d, (current_ecc - d)))
139
+ ecc_upper[i] = upp = min(ecc_upper[i], current_ecc + d)
140
+
141
+ # update min/max values of lower and upper bounds
142
+ minlower = min(ecc_lower[i], minlower)
143
+ maxlower = max(ecc_lower[i], maxlower)
144
+ minupper = min(ecc_upper[i], minupper)
145
+ maxupper = max(ecc_upper[i], maxupper)
146
+
147
+ # update candidate set
148
+ if compute == "diameter":
149
+ ruled_out = {
150
+ i
151
+ for i in candidates
152
+ if ecc_upper[i] <= maxlower and 2 * ecc_lower[i] >= maxupper
153
+ }
154
+ elif compute == "radius":
155
+ ruled_out = {
156
+ i
157
+ for i in candidates
158
+ if ecc_lower[i] >= minupper and ecc_upper[i] + 1 <= 2 * minlower
159
+ }
160
+ elif compute == "periphery":
161
+ ruled_out = {
162
+ i
163
+ for i in candidates
164
+ if ecc_upper[i] < maxlower
165
+ and (maxlower == maxupper or ecc_lower[i] > maxupper)
166
+ }
167
+ elif compute == "center":
168
+ ruled_out = {
169
+ i
170
+ for i in candidates
171
+ if ecc_lower[i] > minupper
172
+ and (minlower == minupper or ecc_upper[i] + 1 < 2 * minlower)
173
+ }
174
+ elif compute == "eccentricities":
175
+ ruled_out = set()
176
+ else:
177
+ msg = "compute must be one of 'diameter', 'radius', 'periphery', 'center', 'eccentricities'"
178
+ raise ValueError(msg)
179
+
180
+ ruled_out.update(i for i in candidates if ecc_lower[i] == ecc_upper[i])
181
+ candidates -= ruled_out
182
+
183
+ # for i in ruled_out:
184
+ # print("removing %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"%
185
+ # (i,ecc_upper[i],maxlower,ecc_lower[i],maxupper))
186
+ # print("node %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"%
187
+ # (4,ecc_upper[4],maxlower,ecc_lower[4],maxupper))
188
+ # print("NODE 4: %g"%(ecc_upper[4] <= maxlower))
189
+ # print("NODE 4: %g"%(2 * ecc_lower[4] >= maxupper))
190
+ # print("NODE 4: %g"%(ecc_upper[4] <= maxlower
191
+ # and 2 * ecc_lower[4] >= maxupper))
192
+
193
+ # updating maxuppernode and minlowernode for selection in next round
194
+ for i in candidates:
195
+ if (
196
+ minlowernode is None
197
+ or (
198
+ ecc_lower[i] == ecc_lower[minlowernode]
199
+ and degrees[i] > degrees[minlowernode]
200
+ )
201
+ or (ecc_lower[i] < ecc_lower[minlowernode])
202
+ ):
203
+ minlowernode = i
204
+
205
+ if (
206
+ maxuppernode is None
207
+ or (
208
+ ecc_upper[i] == ecc_upper[maxuppernode]
209
+ and degrees[i] > degrees[maxuppernode]
210
+ )
211
+ or (ecc_upper[i] > ecc_upper[maxuppernode])
212
+ ):
213
+ maxuppernode = i
214
+
215
+ # print status update
216
+ # print (" min=" + str(minlower) + "/" + str(minupper) +
217
+ # " max=" + str(maxlower) + "/" + str(maxupper) +
218
+ # " candidates: " + str(len(candidates)))
219
+ # print("cand:",candidates)
220
+ # print("ecc_l",ecc_lower)
221
+ # print("ecc_u",ecc_upper)
222
+ # wait = input("press Enter to continue")
223
+
224
+ # return the correct value of the requested metric
225
+ if compute == "diameter":
226
+ return maxlower
227
+ if compute == "radius":
228
+ return minupper
229
+ if compute == "periphery":
230
+ p = [v for v in G if ecc_lower[v] == maxlower]
231
+ return p
232
+ if compute == "center":
233
+ c = [v for v in G if ecc_upper[v] == minupper]
234
+ return c
235
+ if compute == "eccentricities":
236
+ return ecc_lower
237
+ return None
238
+
239
+
240
+ @nx._dispatchable(edge_attrs="weight")
241
+ def eccentricity(G, v=None, sp=None, weight=None):
242
+ """Returns the eccentricity of nodes in G.
243
+
244
+ The eccentricity of a node v is the maximum distance from v to
245
+ all other nodes in G.
246
+
247
+ Parameters
248
+ ----------
249
+ G : NetworkX graph
250
+ A graph
251
+
252
+ v : node, optional
253
+ Return value of specified node
254
+
255
+ sp : dict of dicts, optional
256
+ All pairs shortest path lengths as a dictionary of dictionaries
257
+
258
+ weight : string, function, or None (default=None)
259
+ If this is a string, then edge weights will be accessed via the
260
+ edge attribute with this key (that is, the weight of the edge
261
+ joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
262
+ such edge attribute exists, the weight of the edge is assumed to
263
+ be one.
264
+
265
+ If this is a function, the weight of an edge is the value
266
+ returned by the function. The function must accept exactly three
267
+ positional arguments: the two endpoints of an edge and the
268
+ dictionary of edge attributes for that edge. The function must
269
+ return a number.
270
+
271
+ If this is None, every edge has weight/distance/cost 1.
272
+
273
+ Weights stored as floating point values can lead to small round-off
274
+ errors in distances. Use integer weights to avoid this.
275
+
276
+ Weights should be positive, since they are distances.
277
+
278
+ Returns
279
+ -------
280
+ ecc : dictionary
281
+ A dictionary of eccentricity values keyed by node.
282
+
283
+ Examples
284
+ --------
285
+ >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
286
+ >>> dict(nx.eccentricity(G))
287
+ {1: 2, 2: 3, 3: 2, 4: 2, 5: 3}
288
+
289
+ >>> dict(nx.eccentricity(G, v=[1, 5])) # This returns the eccentricity of node 1 & 5
290
+ {1: 2, 5: 3}
291
+
292
+ """
293
+ # if v is None: # none, use entire graph
294
+ # nodes=G.nodes()
295
+ # elif v in G: # is v a single node
296
+ # nodes=[v]
297
+ # else: # assume v is a container of nodes
298
+ # nodes=v
299
+ order = G.order()
300
+ e = {}
301
+ for n in G.nbunch_iter(v):
302
+ if sp is None:
303
+ length = nx.shortest_path_length(G, source=n, weight=weight)
304
+
305
+ L = len(length)
306
+ else:
307
+ try:
308
+ length = sp[n]
309
+ L = len(length)
310
+ except TypeError as err:
311
+ raise nx.NetworkXError('Format of "sp" is invalid.') from err
312
+ if L != order:
313
+ if G.is_directed():
314
+ msg = (
315
+ "Found infinite path length because the digraph is not"
316
+ " strongly connected"
317
+ )
318
+ else:
319
+ msg = "Found infinite path length because the graph is not" " connected"
320
+ raise nx.NetworkXError(msg)
321
+
322
+ e[n] = max(length.values())
323
+
324
+ if v in G:
325
+ return e[v] # return single value
326
+ return e
327
+
328
+
329
+ @nx._dispatchable(edge_attrs="weight")
330
+ def diameter(G, e=None, usebounds=False, weight=None):
331
+ """Returns the diameter of the graph G.
332
+
333
+ The diameter is the maximum eccentricity.
334
+
335
+ Parameters
336
+ ----------
337
+ G : NetworkX graph
338
+ A graph
339
+
340
+ e : eccentricity dictionary, optional
341
+ A precomputed dictionary of eccentricities.
342
+
343
+ weight : string, function, or None
344
+ If this is a string, then edge weights will be accessed via the
345
+ edge attribute with this key (that is, the weight of the edge
346
+ joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
347
+ such edge attribute exists, the weight of the edge is assumed to
348
+ be one.
349
+
350
+ If this is a function, the weight of an edge is the value
351
+ returned by the function. The function must accept exactly three
352
+ positional arguments: the two endpoints of an edge and the
353
+ dictionary of edge attributes for that edge. The function must
354
+ return a number.
355
+
356
+ If this is None, every edge has weight/distance/cost 1.
357
+
358
+ Weights stored as floating point values can lead to small round-off
359
+ errors in distances. Use integer weights to avoid this.
360
+
361
+ Weights should be positive, since they are distances.
362
+
363
+ Returns
364
+ -------
365
+ d : integer
366
+ Diameter of graph
367
+
368
+ Examples
369
+ --------
370
+ >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
371
+ >>> nx.diameter(G)
372
+ 3
373
+
374
+ See Also
375
+ --------
376
+ eccentricity
377
+ """
378
+ if usebounds is True and e is None and not G.is_directed():
379
+ return _extrema_bounding(G, compute="diameter", weight=weight)
380
+ if e is None:
381
+ e = eccentricity(G, weight=weight)
382
+ return max(e.values())
383
+
384
+
385
+ @nx._dispatchable(edge_attrs="weight")
386
+ def periphery(G, e=None, usebounds=False, weight=None):
387
+ """Returns the periphery of the graph G.
388
+
389
+ The periphery is the set of nodes with eccentricity equal to the diameter.
390
+
391
+ Parameters
392
+ ----------
393
+ G : NetworkX graph
394
+ A graph
395
+
396
+ e : eccentricity dictionary, optional
397
+ A precomputed dictionary of eccentricities.
398
+
399
+ weight : string, function, or None
400
+ If this is a string, then edge weights will be accessed via the
401
+ edge attribute with this key (that is, the weight of the edge
402
+ joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
403
+ such edge attribute exists, the weight of the edge is assumed to
404
+ be one.
405
+
406
+ If this is a function, the weight of an edge is the value
407
+ returned by the function. The function must accept exactly three
408
+ positional arguments: the two endpoints of an edge and the
409
+ dictionary of edge attributes for that edge. The function must
410
+ return a number.
411
+
412
+ If this is None, every edge has weight/distance/cost 1.
413
+
414
+ Weights stored as floating point values can lead to small round-off
415
+ errors in distances. Use integer weights to avoid this.
416
+
417
+ Weights should be positive, since they are distances.
418
+
419
+ Returns
420
+ -------
421
+ p : list
422
+ List of nodes in periphery
423
+
424
+ Examples
425
+ --------
426
+ >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
427
+ >>> nx.periphery(G)
428
+ [2, 5]
429
+
430
+ See Also
431
+ --------
432
+ barycenter
433
+ center
434
+ """
435
+ if usebounds is True and e is None and not G.is_directed():
436
+ return _extrema_bounding(G, compute="periphery", weight=weight)
437
+ if e is None:
438
+ e = eccentricity(G, weight=weight)
439
+ diameter = max(e.values())
440
+ p = [v for v in e if e[v] == diameter]
441
+ return p
442
+
443
+
444
+ @nx._dispatchable(edge_attrs="weight")
445
+ def radius(G, e=None, usebounds=False, weight=None):
446
+ """Returns the radius of the graph G.
447
+
448
+ The radius is the minimum eccentricity.
449
+
450
+ Parameters
451
+ ----------
452
+ G : NetworkX graph
453
+ A graph
454
+
455
+ e : eccentricity dictionary, optional
456
+ A precomputed dictionary of eccentricities.
457
+
458
+ weight : string, function, or None
459
+ If this is a string, then edge weights will be accessed via the
460
+ edge attribute with this key (that is, the weight of the edge
461
+ joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
462
+ such edge attribute exists, the weight of the edge is assumed to
463
+ be one.
464
+
465
+ If this is a function, the weight of an edge is the value
466
+ returned by the function. The function must accept exactly three
467
+ positional arguments: the two endpoints of an edge and the
468
+ dictionary of edge attributes for that edge. The function must
469
+ return a number.
470
+
471
+ If this is None, every edge has weight/distance/cost 1.
472
+
473
+ Weights stored as floating point values can lead to small round-off
474
+ errors in distances. Use integer weights to avoid this.
475
+
476
+ Weights should be positive, since they are distances.
477
+
478
+ Returns
479
+ -------
480
+ r : integer
481
+ Radius of graph
482
+
483
+ Examples
484
+ --------
485
+ >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
486
+ >>> nx.radius(G)
487
+ 2
488
+
489
+ """
490
+ if usebounds is True and e is None and not G.is_directed():
491
+ return _extrema_bounding(G, compute="radius", weight=weight)
492
+ if e is None:
493
+ e = eccentricity(G, weight=weight)
494
+ return min(e.values())
495
+
496
+
497
+ @nx._dispatchable(edge_attrs="weight")
498
+ def center(G, e=None, usebounds=False, weight=None):
499
+ """Returns the center of the graph G.
500
+
501
+ The center is the set of nodes with eccentricity equal to radius.
502
+
503
+ Parameters
504
+ ----------
505
+ G : NetworkX graph
506
+ A graph
507
+
508
+ e : eccentricity dictionary, optional
509
+ A precomputed dictionary of eccentricities.
510
+
511
+ weight : string, function, or None
512
+ If this is a string, then edge weights will be accessed via the
513
+ edge attribute with this key (that is, the weight of the edge
514
+ joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
515
+ such edge attribute exists, the weight of the edge is assumed to
516
+ be one.
517
+
518
+ If this is a function, the weight of an edge is the value
519
+ returned by the function. The function must accept exactly three
520
+ positional arguments: the two endpoints of an edge and the
521
+ dictionary of edge attributes for that edge. The function must
522
+ return a number.
523
+
524
+ If this is None, every edge has weight/distance/cost 1.
525
+
526
+ Weights stored as floating point values can lead to small round-off
527
+ errors in distances. Use integer weights to avoid this.
528
+
529
+ Weights should be positive, since they are distances.
530
+
531
+ Returns
532
+ -------
533
+ c : list
534
+ List of nodes in center
535
+
536
+ Examples
537
+ --------
538
+ >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
539
+ >>> list(nx.center(G))
540
+ [1, 3, 4]
541
+
542
+ See Also
543
+ --------
544
+ barycenter
545
+ periphery
546
+ """
547
+ if usebounds is True and e is None and not G.is_directed():
548
+ return _extrema_bounding(G, compute="center", weight=weight)
549
+ if e is None:
550
+ e = eccentricity(G, weight=weight)
551
+ radius = min(e.values())
552
+ p = [v for v in e if e[v] == radius]
553
+ return p
554
+
555
+
556
+ @nx._dispatchable(edge_attrs="weight", mutates_input={"attr": 2})
557
+ def barycenter(G, weight=None, attr=None, sp=None):
558
+ r"""Calculate barycenter of a connected graph, optionally with edge weights.
559
+
560
+ The :dfn:`barycenter` a
561
+ :func:`connected <networkx.algorithms.components.is_connected>` graph
562
+ :math:`G` is the subgraph induced by the set of its nodes :math:`v`
563
+ minimizing the objective function
564
+
565
+ .. math::
566
+
567
+ \sum_{u \in V(G)} d_G(u, v),
568
+
569
+ where :math:`d_G` is the (possibly weighted) :func:`path length
570
+ <networkx.algorithms.shortest_paths.generic.shortest_path_length>`.
571
+ The barycenter is also called the :dfn:`median`. See [West01]_, p. 78.
572
+
573
+ Parameters
574
+ ----------
575
+ G : :class:`networkx.Graph`
576
+ The connected graph :math:`G`.
577
+ weight : :class:`str`, optional
578
+ Passed through to
579
+ :func:`~networkx.algorithms.shortest_paths.generic.shortest_path_length`.
580
+ attr : :class:`str`, optional
581
+ If given, write the value of the objective function to each node's
582
+ `attr` attribute. Otherwise do not store the value.
583
+ sp : dict of dicts, optional
584
+ All pairs shortest path lengths as a dictionary of dictionaries
585
+
586
+ Returns
587
+ -------
588
+ list
589
+ Nodes of `G` that induce the barycenter of `G`.
590
+
591
+ Raises
592
+ ------
593
+ NetworkXNoPath
594
+ If `G` is disconnected. `G` may appear disconnected to
595
+ :func:`barycenter` if `sp` is given but is missing shortest path
596
+ lengths for any pairs.
597
+ ValueError
598
+ If `sp` and `weight` are both given.
599
+
600
+ Examples
601
+ --------
602
+ >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
603
+ >>> nx.barycenter(G)
604
+ [1, 3, 4]
605
+
606
+ See Also
607
+ --------
608
+ center
609
+ periphery
610
+ """
611
+ if sp is None:
612
+ sp = nx.shortest_path_length(G, weight=weight)
613
+ else:
614
+ sp = sp.items()
615
+ if weight is not None:
616
+ raise ValueError("Cannot use both sp, weight arguments together")
617
+ smallest, barycenter_vertices, n = float("inf"), [], len(G)
618
+ for v, dists in sp:
619
+ if len(dists) < n:
620
+ raise nx.NetworkXNoPath(
621
+ f"Input graph {G} is disconnected, so every induced subgraph "
622
+ "has infinite barycentricity."
623
+ )
624
+ barycentricity = sum(dists.values())
625
+ if attr is not None:
626
+ G.nodes[v][attr] = barycentricity
627
+ if barycentricity < smallest:
628
+ smallest = barycentricity
629
+ barycenter_vertices = [v]
630
+ elif barycentricity == smallest:
631
+ barycenter_vertices.append(v)
632
+ if attr is not None:
633
+ nx._clear_cache(G)
634
+ return barycenter_vertices
635
+
636
+
637
+ @not_implemented_for("directed")
638
+ @nx._dispatchable(edge_attrs="weight")
639
+ def resistance_distance(G, nodeA=None, nodeB=None, weight=None, invert_weight=True):
640
+ """Returns the resistance distance between pairs of nodes in graph G.
641
+
642
+ The resistance distance between two nodes of a graph is akin to treating
643
+ the graph as a grid of resistors with a resistance equal to the provided
644
+ weight [1]_, [2]_.
645
+
646
+ If weight is not provided, then a weight of 1 is used for all edges.
647
+
648
+ If two nodes are the same, the resistance distance is zero.
649
+
650
+ Parameters
651
+ ----------
652
+ G : NetworkX graph
653
+ A graph
654
+
655
+ nodeA : node or None, optional (default=None)
656
+ A node within graph G.
657
+ If None, compute resistance distance using all nodes as source nodes.
658
+
659
+ nodeB : node or None, optional (default=None)
660
+ A node within graph G.
661
+ If None, compute resistance distance using all nodes as target nodes.
662
+
663
+ weight : string or None, optional (default=None)
664
+ The edge data key used to compute the resistance distance.
665
+ If None, then each edge has weight 1.
666
+
667
+ invert_weight : boolean (default=True)
668
+ Proper calculation of resistance distance requires building the
669
+ Laplacian matrix with the reciprocal of the weight. Not required
670
+ if the weight is already inverted. Weight cannot be zero.
671
+
672
+ Returns
673
+ -------
674
+ rd : dict or float
675
+ If `nodeA` and `nodeB` are given, resistance distance between `nodeA`
676
+ and `nodeB`. If `nodeA` or `nodeB` is unspecified (the default), a
677
+ dictionary of nodes with resistance distances as the value.
678
+
679
+ Raises
680
+ ------
681
+ NetworkXNotImplemented
682
+ If `G` is a directed graph.
683
+
684
+ NetworkXError
685
+ If `G` is not connected, or contains no nodes,
686
+ or `nodeA` is not in `G` or `nodeB` is not in `G`.
687
+
688
+ Examples
689
+ --------
690
+ >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
691
+ >>> round(nx.resistance_distance(G, 1, 3), 10)
692
+ 0.625
693
+
694
+ Notes
695
+ -----
696
+ The implementation is based on Theorem A in [2]_. Self-loops are ignored.
697
+ Multi-edges are contracted in one edge with weight equal to the harmonic sum of the weights.
698
+
699
+ References
700
+ ----------
701
+ .. [1] Wikipedia
702
+ "Resistance distance."
703
+ https://en.wikipedia.org/wiki/Resistance_distance
704
+ .. [2] D. J. Klein and M. Randic.
705
+ Resistance distance.
706
+ J. of Math. Chem. 12:81-95, 1993.
707
+ """
708
+ import numpy as np
709
+
710
+ if len(G) == 0:
711
+ raise nx.NetworkXError("Graph G must contain at least one node.")
712
+ if not nx.is_connected(G):
713
+ raise nx.NetworkXError("Graph G must be strongly connected.")
714
+ if nodeA is not None and nodeA not in G:
715
+ raise nx.NetworkXError("Node A is not in graph G.")
716
+ if nodeB is not None and nodeB not in G:
717
+ raise nx.NetworkXError("Node B is not in graph G.")
718
+
719
+ G = G.copy()
720
+ node_list = list(G)
721
+
722
+ # Invert weights
723
+ if invert_weight and weight is not None:
724
+ if G.is_multigraph():
725
+ for u, v, k, d in G.edges(keys=True, data=True):
726
+ d[weight] = 1 / d[weight]
727
+ else:
728
+ for u, v, d in G.edges(data=True):
729
+ d[weight] = 1 / d[weight]
730
+
731
+ # Compute resistance distance using the Pseudo-inverse of the Laplacian
732
+ # Self-loops are ignored
733
+ L = nx.laplacian_matrix(G, weight=weight).todense()
734
+ Linv = np.linalg.pinv(L, hermitian=True)
735
+
736
+ # Return relevant distances
737
+ if nodeA is not None and nodeB is not None:
738
+ i = node_list.index(nodeA)
739
+ j = node_list.index(nodeB)
740
+ return Linv.item(i, i) + Linv.item(j, j) - Linv.item(i, j) - Linv.item(j, i)
741
+
742
+ elif nodeA is not None:
743
+ i = node_list.index(nodeA)
744
+ d = {}
745
+ for n in G:
746
+ j = node_list.index(n)
747
+ d[n] = Linv.item(i, i) + Linv.item(j, j) - Linv.item(i, j) - Linv.item(j, i)
748
+ return d
749
+
750
+ elif nodeB is not None:
751
+ j = node_list.index(nodeB)
752
+ d = {}
753
+ for n in G:
754
+ i = node_list.index(n)
755
+ d[n] = Linv.item(i, i) + Linv.item(j, j) - Linv.item(i, j) - Linv.item(j, i)
756
+ return d
757
+
758
+ else:
759
+ d = {}
760
+ for n in G:
761
+ i = node_list.index(n)
762
+ d[n] = {}
763
+ for n2 in G:
764
+ j = node_list.index(n2)
765
+ d[n][n2] = (
766
+ Linv.item(i, i)
767
+ + Linv.item(j, j)
768
+ - Linv.item(i, j)
769
+ - Linv.item(j, i)
770
+ )
771
+ return d
772
+
773
+
774
+ @not_implemented_for("directed")
775
+ @nx._dispatchable(edge_attrs="weight")
776
+ def effective_graph_resistance(G, weight=None, invert_weight=True):
777
+ """Returns the Effective graph resistance of G.
778
+
779
+ Also known as the Kirchhoff index.
780
+
781
+ The effective graph resistance is defined as the sum
782
+ of the resistance distance of every node pair in G [1]_.
783
+
784
+ If weight is not provided, then a weight of 1 is used for all edges.
785
+
786
+ The effective graph resistance of a disconnected graph is infinite.
787
+
788
+ Parameters
789
+ ----------
790
+ G : NetworkX graph
791
+ A graph
792
+
793
+ weight : string or None, optional (default=None)
794
+ The edge data key used to compute the effective graph resistance.
795
+ If None, then each edge has weight 1.
796
+
797
+ invert_weight : boolean (default=True)
798
+ Proper calculation of resistance distance requires building the
799
+ Laplacian matrix with the reciprocal of the weight. Not required
800
+ if the weight is already inverted. Weight cannot be zero.
801
+
802
+ Returns
803
+ -------
804
+ RG : float
805
+ The effective graph resistance of `G`.
806
+
807
+ Raises
808
+ ------
809
+ NetworkXNotImplemented
810
+ If `G` is a directed graph.
811
+
812
+ NetworkXError
813
+ If `G` does not contain any nodes.
814
+
815
+ Examples
816
+ --------
817
+ >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
818
+ >>> round(nx.effective_graph_resistance(G), 10)
819
+ 10.25
820
+
821
+ Notes
822
+ -----
823
+ The implementation is based on Theorem 2.2 in [2]_. Self-loops are ignored.
824
+ Multi-edges are contracted in one edge with weight equal to the harmonic sum of the weights.
825
+
826
+ References
827
+ ----------
828
+ .. [1] Wolfram
829
+ "Kirchhoff Index."
830
+ https://mathworld.wolfram.com/KirchhoffIndex.html
831
+ .. [2] W. Ellens, F. M. Spieksma, P. Van Mieghem, A. Jamakovic, R. E. Kooij.
832
+ Effective graph resistance.
833
+ Lin. Alg. Appl. 435:2491-2506, 2011.
834
+ """
835
+ import numpy as np
836
+
837
+ if len(G) == 0:
838
+ raise nx.NetworkXError("Graph G must contain at least one node.")
839
+
840
+ # Disconnected graphs have infinite Effective graph resistance
841
+ if not nx.is_connected(G):
842
+ return float("inf")
843
+
844
+ # Invert weights
845
+ G = G.copy()
846
+ if invert_weight and weight is not None:
847
+ if G.is_multigraph():
848
+ for u, v, k, d in G.edges(keys=True, data=True):
849
+ d[weight] = 1 / d[weight]
850
+ else:
851
+ for u, v, d in G.edges(data=True):
852
+ d[weight] = 1 / d[weight]
853
+
854
+ # Get Laplacian eigenvalues
855
+ mu = np.sort(nx.laplacian_spectrum(G, weight=weight))
856
+
857
+ # Compute Effective graph resistance based on spectrum of the Laplacian
858
+ # Self-loops are ignored
859
+ return float(np.sum(1 / mu[1:]) * G.number_of_nodes())
860
+
861
+
862
+ @nx.utils.not_implemented_for("directed")
863
+ @nx._dispatchable(edge_attrs="weight")
864
+ def kemeny_constant(G, *, weight=None):
865
+ """Returns the Kemeny constant of the given graph.
866
+
867
+ The *Kemeny constant* (or Kemeny's constant) of a graph `G`
868
+ can be computed by regarding the graph as a Markov chain.
869
+ The Kemeny constant is then the expected number of time steps
870
+ to transition from a starting state i to a random destination state
871
+ sampled from the Markov chain's stationary distribution.
872
+ The Kemeny constant is independent of the chosen initial state [1]_.
873
+
874
+ The Kemeny constant measures the time needed for spreading
875
+ across a graph. Low values indicate a closely connected graph
876
+ whereas high values indicate a spread-out graph.
877
+
878
+ If weight is not provided, then a weight of 1 is used for all edges.
879
+
880
+ Since `G` represents a Markov chain, the weights must be positive.
881
+
882
+ Parameters
883
+ ----------
884
+ G : NetworkX graph
885
+
886
+ weight : string or None, optional (default=None)
887
+ The edge data key used to compute the Kemeny constant.
888
+ If None, then each edge has weight 1.
889
+
890
+ Returns
891
+ -------
892
+ float
893
+ The Kemeny constant of the graph `G`.
894
+
895
+ Raises
896
+ ------
897
+ NetworkXNotImplemented
898
+ If the graph `G` is directed.
899
+
900
+ NetworkXError
901
+ If the graph `G` is not connected, or contains no nodes,
902
+ or has edges with negative weights.
903
+
904
+ Examples
905
+ --------
906
+ >>> G = nx.complete_graph(5)
907
+ >>> round(nx.kemeny_constant(G), 10)
908
+ 3.2
909
+
910
+ Notes
911
+ -----
912
+ The implementation is based on equation (3.3) in [2]_.
913
+ Self-loops are allowed and indicate a Markov chain where
914
+ the state can remain the same. Multi-edges are contracted
915
+ in one edge with weight equal to the sum of the weights.
916
+
917
+ References
918
+ ----------
919
+ .. [1] Wikipedia
920
+ "Kemeny's constant."
921
+ https://en.wikipedia.org/wiki/Kemeny%27s_constant
922
+ .. [2] Lovász L.
923
+ Random walks on graphs: A survey.
924
+ Paul Erdös is Eighty, vol. 2, Bolyai Society,
925
+ Mathematical Studies, Keszthely, Hungary (1993), pp. 1-46
926
+ """
927
+ import numpy as np
928
+ import scipy as sp
929
+
930
+ if len(G) == 0:
931
+ raise nx.NetworkXError("Graph G must contain at least one node.")
932
+ if not nx.is_connected(G):
933
+ raise nx.NetworkXError("Graph G must be connected.")
934
+ if nx.is_negatively_weighted(G, weight=weight):
935
+ raise nx.NetworkXError("The weights of graph G must be nonnegative.")
936
+
937
+ # Compute matrix H = D^-1/2 A D^-1/2
938
+ A = nx.adjacency_matrix(G, weight=weight)
939
+ n, m = A.shape
940
+ diags = A.sum(axis=1)
941
+ with np.errstate(divide="ignore"):
942
+ diags_sqrt = 1.0 / np.sqrt(diags)
943
+ diags_sqrt[np.isinf(diags_sqrt)] = 0
944
+ DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format="csr"))
945
+ H = DH @ (A @ DH)
946
+
947
+ # Compute eigenvalues of H
948
+ eig = np.sort(sp.linalg.eigvalsh(H.todense()))
949
+
950
+ # Compute the Kemeny constant
951
+ return float(np.sum(1 / (1 - eig[:-1])))
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/distance_regular.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =======================
3
+ Distance-regular graphs
4
+ =======================
5
+ """
6
+
7
+ import networkx as nx
8
+ from networkx.utils import not_implemented_for
9
+
10
+ from .distance_measures import diameter
11
+
12
+ __all__ = [
13
+ "is_distance_regular",
14
+ "is_strongly_regular",
15
+ "intersection_array",
16
+ "global_parameters",
17
+ ]
18
+
19
+
20
+ @nx._dispatchable
21
+ def is_distance_regular(G):
22
+ """Returns True if the graph is distance regular, False otherwise.
23
+
24
+ A connected graph G is distance-regular if for any nodes x,y
25
+ and any integers i,j=0,1,...,d (where d is the graph
26
+ diameter), the number of vertices at distance i from x and
27
+ distance j from y depends only on i,j and the graph distance
28
+ between x and y, independently of the choice of x and y.
29
+
30
+ Parameters
31
+ ----------
32
+ G: Networkx graph (undirected)
33
+
34
+ Returns
35
+ -------
36
+ bool
37
+ True if the graph is Distance Regular, False otherwise
38
+
39
+ Examples
40
+ --------
41
+ >>> G = nx.hypercube_graph(6)
42
+ >>> nx.is_distance_regular(G)
43
+ True
44
+
45
+ See Also
46
+ --------
47
+ intersection_array, global_parameters
48
+
49
+ Notes
50
+ -----
51
+ For undirected and simple graphs only
52
+
53
+ References
54
+ ----------
55
+ .. [1] Brouwer, A. E.; Cohen, A. M.; and Neumaier, A.
56
+ Distance-Regular Graphs. New York: Springer-Verlag, 1989.
57
+ .. [2] Weisstein, Eric W. "Distance-Regular Graph."
58
+ http://mathworld.wolfram.com/Distance-RegularGraph.html
59
+
60
+ """
61
+ try:
62
+ intersection_array(G)
63
+ return True
64
+ except nx.NetworkXError:
65
+ return False
66
+
67
+
68
+ def global_parameters(b, c):
69
+ """Returns global parameters for a given intersection array.
70
+
71
+ Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d
72
+ such that for any 2 vertices x,y in G at a distance i=d(x,y), there
73
+ are exactly c_i neighbors of y at a distance of i-1 from x and b_i
74
+ neighbors of y at a distance of i+1 from x.
75
+
76
+ Thus, a distance regular graph has the global parameters,
77
+ [[c_0,a_0,b_0],[c_1,a_1,b_1],......,[c_d,a_d,b_d]] for the
78
+ intersection array [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d]
79
+ where a_i+b_i+c_i=k , k= degree of every vertex.
80
+
81
+ Parameters
82
+ ----------
83
+ b : list
84
+
85
+ c : list
86
+
87
+ Returns
88
+ -------
89
+ iterable
90
+ An iterable over three tuples.
91
+
92
+ Examples
93
+ --------
94
+ >>> G = nx.dodecahedral_graph()
95
+ >>> b, c = nx.intersection_array(G)
96
+ >>> list(nx.global_parameters(b, c))
97
+ [(0, 0, 3), (1, 0, 2), (1, 1, 1), (1, 1, 1), (2, 0, 1), (3, 0, 0)]
98
+
99
+ References
100
+ ----------
101
+ .. [1] Weisstein, Eric W. "Global Parameters."
102
+ From MathWorld--A Wolfram Web Resource.
103
+ http://mathworld.wolfram.com/GlobalParameters.html
104
+
105
+ See Also
106
+ --------
107
+ intersection_array
108
+ """
109
+ return ((y, b[0] - x - y, x) for x, y in zip(b + [0], [0] + c))
110
+
111
+
112
+ @not_implemented_for("directed")
113
+ @not_implemented_for("multigraph")
114
+ @nx._dispatchable
115
+ def intersection_array(G):
116
+ """Returns the intersection array of a distance-regular graph.
117
+
118
+ Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d
119
+ such that for any 2 vertices x,y in G at a distance i=d(x,y), there
120
+ are exactly c_i neighbors of y at a distance of i-1 from x and b_i
121
+ neighbors of y at a distance of i+1 from x.
122
+
123
+ A distance regular graph's intersection array is given by,
124
+ [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d]
125
+
126
+ Parameters
127
+ ----------
128
+ G: Networkx graph (undirected)
129
+
130
+ Returns
131
+ -------
132
+ b,c: tuple of lists
133
+
134
+ Examples
135
+ --------
136
+ >>> G = nx.icosahedral_graph()
137
+ >>> nx.intersection_array(G)
138
+ ([5, 2, 1], [1, 2, 5])
139
+
140
+ References
141
+ ----------
142
+ .. [1] Weisstein, Eric W. "Intersection Array."
143
+ From MathWorld--A Wolfram Web Resource.
144
+ http://mathworld.wolfram.com/IntersectionArray.html
145
+
146
+ See Also
147
+ --------
148
+ global_parameters
149
+ """
150
+ # test for regular graph (all degrees must be equal)
151
+ if len(G) == 0:
152
+ raise nx.NetworkXPointlessConcept("Graph has no nodes.")
153
+ degree = iter(G.degree())
154
+ (_, k) = next(degree)
155
+ for _, knext in degree:
156
+ if knext != k:
157
+ raise nx.NetworkXError("Graph is not distance regular.")
158
+ k = knext
159
+ path_length = dict(nx.all_pairs_shortest_path_length(G))
160
+ diameter = max(max(path_length[n].values()) for n in path_length)
161
+ bint = {} # 'b' intersection array
162
+ cint = {} # 'c' intersection array
163
+ for u in G:
164
+ for v in G:
165
+ try:
166
+ i = path_length[u][v]
167
+ except KeyError as err: # graph must be connected
168
+ raise nx.NetworkXError("Graph is not distance regular.") from err
169
+ # number of neighbors of v at a distance of i-1 from u
170
+ c = len([n for n in G[v] if path_length[n][u] == i - 1])
171
+ # number of neighbors of v at a distance of i+1 from u
172
+ b = len([n for n in G[v] if path_length[n][u] == i + 1])
173
+ # b,c are independent of u and v
174
+ if cint.get(i, c) != c or bint.get(i, b) != b:
175
+ raise nx.NetworkXError("Graph is not distance regular")
176
+ bint[i] = b
177
+ cint[i] = c
178
+ return (
179
+ [bint.get(j, 0) for j in range(diameter)],
180
+ [cint.get(j + 1, 0) for j in range(diameter)],
181
+ )
182
+
183
+
184
+ # TODO There is a definition for directed strongly regular graphs.
185
+ @not_implemented_for("directed")
186
+ @not_implemented_for("multigraph")
187
+ @nx._dispatchable
188
+ def is_strongly_regular(G):
189
+ """Returns True if and only if the given graph is strongly
190
+ regular.
191
+
192
+ An undirected graph is *strongly regular* if
193
+
194
+ * it is regular,
195
+ * each pair of adjacent vertices has the same number of neighbors in
196
+ common,
197
+ * each pair of nonadjacent vertices has the same number of neighbors
198
+ in common.
199
+
200
+ Each strongly regular graph is a distance-regular graph.
201
+ Conversely, if a distance-regular graph has diameter two, then it is
202
+ a strongly regular graph. For more information on distance-regular
203
+ graphs, see :func:`is_distance_regular`.
204
+
205
+ Parameters
206
+ ----------
207
+ G : NetworkX graph
208
+ An undirected graph.
209
+
210
+ Returns
211
+ -------
212
+ bool
213
+ Whether `G` is strongly regular.
214
+
215
+ Examples
216
+ --------
217
+
218
+ The cycle graph on five vertices is strongly regular. It is
219
+ two-regular, each pair of adjacent vertices has no shared neighbors,
220
+ and each pair of nonadjacent vertices has one shared neighbor::
221
+
222
+ >>> G = nx.cycle_graph(5)
223
+ >>> nx.is_strongly_regular(G)
224
+ True
225
+
226
+ """
227
+ # Here is an alternate implementation based directly on the
228
+ # definition of strongly regular graphs:
229
+ #
230
+ # return (all_equal(G.degree().values())
231
+ # and all_equal(len(common_neighbors(G, u, v))
232
+ # for u, v in G.edges())
233
+ # and all_equal(len(common_neighbors(G, u, v))
234
+ # for u, v in non_edges(G)))
235
+ #
236
+ # We instead use the fact that a distance-regular graph of diameter
237
+ # two is strongly regular.
238
+ return is_distance_regular(G) and diameter(G) == 2
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/euler.py ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Eulerian circuits and graphs.
3
+ """
4
+ from itertools import combinations
5
+
6
+ import networkx as nx
7
+
8
+ from ..utils import arbitrary_element, not_implemented_for
9
+
10
+ __all__ = [
11
+ "is_eulerian",
12
+ "eulerian_circuit",
13
+ "eulerize",
14
+ "is_semieulerian",
15
+ "has_eulerian_path",
16
+ "eulerian_path",
17
+ ]
18
+
19
+
20
+ @nx._dispatchable
21
+ def is_eulerian(G):
22
+ """Returns True if and only if `G` is Eulerian.
23
+
24
+ A graph is *Eulerian* if it has an Eulerian circuit. An *Eulerian
25
+ circuit* is a closed walk that includes each edge of a graph exactly
26
+ once.
27
+
28
+ Graphs with isolated vertices (i.e. vertices with zero degree) are not
29
+ considered to have Eulerian circuits. Therefore, if the graph is not
30
+ connected (or not strongly connected, for directed graphs), this function
31
+ returns False.
32
+
33
+ Parameters
34
+ ----------
35
+ G : NetworkX graph
36
+ A graph, either directed or undirected.
37
+
38
+ Examples
39
+ --------
40
+ >>> nx.is_eulerian(nx.DiGraph({0: [3], 1: [2], 2: [3], 3: [0, 1]}))
41
+ True
42
+ >>> nx.is_eulerian(nx.complete_graph(5))
43
+ True
44
+ >>> nx.is_eulerian(nx.petersen_graph())
45
+ False
46
+
47
+ If you prefer to allow graphs with isolated vertices to have Eulerian circuits,
48
+ you can first remove such vertices and then call `is_eulerian` as below example shows.
49
+
50
+ >>> G = nx.Graph([(0, 1), (1, 2), (0, 2)])
51
+ >>> G.add_node(3)
52
+ >>> nx.is_eulerian(G)
53
+ False
54
+
55
+ >>> G.remove_nodes_from(list(nx.isolates(G)))
56
+ >>> nx.is_eulerian(G)
57
+ True
58
+
59
+
60
+ """
61
+ if G.is_directed():
62
+ # Every node must have equal in degree and out degree and the
63
+ # graph must be strongly connected
64
+ return all(
65
+ G.in_degree(n) == G.out_degree(n) for n in G
66
+ ) and nx.is_strongly_connected(G)
67
+ # An undirected Eulerian graph has no vertices of odd degree and
68
+ # must be connected.
69
+ return all(d % 2 == 0 for v, d in G.degree()) and nx.is_connected(G)
70
+
71
+
72
+ @nx._dispatchable
73
+ def is_semieulerian(G):
74
+ """Return True iff `G` is semi-Eulerian.
75
+
76
+ G is semi-Eulerian if it has an Eulerian path but no Eulerian circuit.
77
+
78
+ See Also
79
+ --------
80
+ has_eulerian_path
81
+ is_eulerian
82
+ """
83
+ return has_eulerian_path(G) and not is_eulerian(G)
84
+
85
+
86
+ def _find_path_start(G):
87
+ """Return a suitable starting vertex for an Eulerian path.
88
+
89
+ If no path exists, return None.
90
+ """
91
+ if not has_eulerian_path(G):
92
+ return None
93
+
94
+ if is_eulerian(G):
95
+ return arbitrary_element(G)
96
+
97
+ if G.is_directed():
98
+ v1, v2 = (v for v in G if G.in_degree(v) != G.out_degree(v))
99
+ # Determines which is the 'start' node (as opposed to the 'end')
100
+ if G.out_degree(v1) > G.in_degree(v1):
101
+ return v1
102
+ else:
103
+ return v2
104
+
105
+ else:
106
+ # In an undirected graph randomly choose one of the possibilities
107
+ start = [v for v in G if G.degree(v) % 2 != 0][0]
108
+ return start
109
+
110
+
111
+ def _simplegraph_eulerian_circuit(G, source):
112
+ if G.is_directed():
113
+ degree = G.out_degree
114
+ edges = G.out_edges
115
+ else:
116
+ degree = G.degree
117
+ edges = G.edges
118
+ vertex_stack = [source]
119
+ last_vertex = None
120
+ while vertex_stack:
121
+ current_vertex = vertex_stack[-1]
122
+ if degree(current_vertex) == 0:
123
+ if last_vertex is not None:
124
+ yield (last_vertex, current_vertex)
125
+ last_vertex = current_vertex
126
+ vertex_stack.pop()
127
+ else:
128
+ _, next_vertex = arbitrary_element(edges(current_vertex))
129
+ vertex_stack.append(next_vertex)
130
+ G.remove_edge(current_vertex, next_vertex)
131
+
132
+
133
+ def _multigraph_eulerian_circuit(G, source):
134
+ if G.is_directed():
135
+ degree = G.out_degree
136
+ edges = G.out_edges
137
+ else:
138
+ degree = G.degree
139
+ edges = G.edges
140
+ vertex_stack = [(source, None)]
141
+ last_vertex = None
142
+ last_key = None
143
+ while vertex_stack:
144
+ current_vertex, current_key = vertex_stack[-1]
145
+ if degree(current_vertex) == 0:
146
+ if last_vertex is not None:
147
+ yield (last_vertex, current_vertex, last_key)
148
+ last_vertex, last_key = current_vertex, current_key
149
+ vertex_stack.pop()
150
+ else:
151
+ triple = arbitrary_element(edges(current_vertex, keys=True))
152
+ _, next_vertex, next_key = triple
153
+ vertex_stack.append((next_vertex, next_key))
154
+ G.remove_edge(current_vertex, next_vertex, next_key)
155
+
156
+
157
+ @nx._dispatchable
158
+ def eulerian_circuit(G, source=None, keys=False):
159
+ """Returns an iterator over the edges of an Eulerian circuit in `G`.
160
+
161
+ An *Eulerian circuit* is a closed walk that includes each edge of a
162
+ graph exactly once.
163
+
164
+ Parameters
165
+ ----------
166
+ G : NetworkX graph
167
+ A graph, either directed or undirected.
168
+
169
+ source : node, optional
170
+ Starting node for circuit.
171
+
172
+ keys : bool
173
+ If False, edges generated by this function will be of the form
174
+ ``(u, v)``. Otherwise, edges will be of the form ``(u, v, k)``.
175
+ This option is ignored unless `G` is a multigraph.
176
+
177
+ Returns
178
+ -------
179
+ edges : iterator
180
+ An iterator over edges in the Eulerian circuit.
181
+
182
+ Raises
183
+ ------
184
+ NetworkXError
185
+ If the graph is not Eulerian.
186
+
187
+ See Also
188
+ --------
189
+ is_eulerian
190
+
191
+ Notes
192
+ -----
193
+ This is a linear time implementation of an algorithm adapted from [1]_.
194
+
195
+ For general information about Euler tours, see [2]_.
196
+
197
+ References
198
+ ----------
199
+ .. [1] J. Edmonds, E. L. Johnson.
200
+ Matching, Euler tours and the Chinese postman.
201
+ Mathematical programming, Volume 5, Issue 1 (1973), 111-114.
202
+ .. [2] https://en.wikipedia.org/wiki/Eulerian_path
203
+
204
+ Examples
205
+ --------
206
+ To get an Eulerian circuit in an undirected graph::
207
+
208
+ >>> G = nx.complete_graph(3)
209
+ >>> list(nx.eulerian_circuit(G))
210
+ [(0, 2), (2, 1), (1, 0)]
211
+ >>> list(nx.eulerian_circuit(G, source=1))
212
+ [(1, 2), (2, 0), (0, 1)]
213
+
214
+ To get the sequence of vertices in an Eulerian circuit::
215
+
216
+ >>> [u for u, v in nx.eulerian_circuit(G)]
217
+ [0, 2, 1]
218
+
219
+ """
220
+ if not is_eulerian(G):
221
+ raise nx.NetworkXError("G is not Eulerian.")
222
+ if G.is_directed():
223
+ G = G.reverse()
224
+ else:
225
+ G = G.copy()
226
+ if source is None:
227
+ source = arbitrary_element(G)
228
+ if G.is_multigraph():
229
+ for u, v, k in _multigraph_eulerian_circuit(G, source):
230
+ if keys:
231
+ yield u, v, k
232
+ else:
233
+ yield u, v
234
+ else:
235
+ yield from _simplegraph_eulerian_circuit(G, source)
236
+
237
+
238
+ @nx._dispatchable
239
+ def has_eulerian_path(G, source=None):
240
+ """Return True iff `G` has an Eulerian path.
241
+
242
+ An Eulerian path is a path in a graph which uses each edge of a graph
243
+ exactly once. If `source` is specified, then this function checks
244
+ whether an Eulerian path that starts at node `source` exists.
245
+
246
+ A directed graph has an Eulerian path iff:
247
+ - at most one vertex has out_degree - in_degree = 1,
248
+ - at most one vertex has in_degree - out_degree = 1,
249
+ - every other vertex has equal in_degree and out_degree,
250
+ - and all of its vertices belong to a single connected
251
+ component of the underlying undirected graph.
252
+
253
+ If `source` is not None, an Eulerian path starting at `source` exists if no
254
+ other node has out_degree - in_degree = 1. This is equivalent to either
255
+ there exists an Eulerian circuit or `source` has out_degree - in_degree = 1
256
+ and the conditions above hold.
257
+
258
+ An undirected graph has an Eulerian path iff:
259
+ - exactly zero or two vertices have odd degree,
260
+ - and all of its vertices belong to a single connected component.
261
+
262
+ If `source` is not None, an Eulerian path starting at `source` exists if
263
+ either there exists an Eulerian circuit or `source` has an odd degree and the
264
+ conditions above hold.
265
+
266
+ Graphs with isolated vertices (i.e. vertices with zero degree) are not considered
267
+ to have an Eulerian path. Therefore, if the graph is not connected (or not strongly
268
+ connected, for directed graphs), this function returns False.
269
+
270
+ Parameters
271
+ ----------
272
+ G : NetworkX Graph
273
+ The graph to find an euler path in.
274
+
275
+ source : node, optional
276
+ Starting node for path.
277
+
278
+ Returns
279
+ -------
280
+ Bool : True if G has an Eulerian path.
281
+
282
+ Examples
283
+ --------
284
+ If you prefer to allow graphs with isolated vertices to have Eulerian path,
285
+ you can first remove such vertices and then call `has_eulerian_path` as below example shows.
286
+
287
+ >>> G = nx.Graph([(0, 1), (1, 2), (0, 2)])
288
+ >>> G.add_node(3)
289
+ >>> nx.has_eulerian_path(G)
290
+ False
291
+
292
+ >>> G.remove_nodes_from(list(nx.isolates(G)))
293
+ >>> nx.has_eulerian_path(G)
294
+ True
295
+
296
+ See Also
297
+ --------
298
+ is_eulerian
299
+ eulerian_path
300
+ """
301
+ if nx.is_eulerian(G):
302
+ return True
303
+
304
+ if G.is_directed():
305
+ ins = G.in_degree
306
+ outs = G.out_degree
307
+ # Since we know it is not eulerian, outs - ins must be 1 for source
308
+ if source is not None and outs[source] - ins[source] != 1:
309
+ return False
310
+
311
+ unbalanced_ins = 0
312
+ unbalanced_outs = 0
313
+ for v in G:
314
+ if ins[v] - outs[v] == 1:
315
+ unbalanced_ins += 1
316
+ elif outs[v] - ins[v] == 1:
317
+ unbalanced_outs += 1
318
+ elif ins[v] != outs[v]:
319
+ return False
320
+
321
+ return (
322
+ unbalanced_ins <= 1 and unbalanced_outs <= 1 and nx.is_weakly_connected(G)
323
+ )
324
+ else:
325
+ # We know it is not eulerian, so degree of source must be odd.
326
+ if source is not None and G.degree[source] % 2 != 1:
327
+ return False
328
+
329
+ # Sum is 2 since we know it is not eulerian (which implies sum is 0)
330
+ return sum(d % 2 == 1 for v, d in G.degree()) == 2 and nx.is_connected(G)
331
+
332
+
333
+ @nx._dispatchable
334
+ def eulerian_path(G, source=None, keys=False):
335
+ """Return an iterator over the edges of an Eulerian path in `G`.
336
+
337
+ Parameters
338
+ ----------
339
+ G : NetworkX Graph
340
+ The graph in which to look for an eulerian path.
341
+ source : node or None (default: None)
342
+ The node at which to start the search. None means search over all
343
+ starting nodes.
344
+ keys : Bool (default: False)
345
+ Indicates whether to yield edge 3-tuples (u, v, edge_key).
346
+ The default yields edge 2-tuples
347
+
348
+ Yields
349
+ ------
350
+ Edge tuples along the eulerian path.
351
+
352
+ Warning: If `source` provided is not the start node of an Euler path
353
+ will raise error even if an Euler Path exists.
354
+ """
355
+ if not has_eulerian_path(G, source):
356
+ raise nx.NetworkXError("Graph has no Eulerian paths.")
357
+ if G.is_directed():
358
+ G = G.reverse()
359
+ if source is None or nx.is_eulerian(G) is False:
360
+ source = _find_path_start(G)
361
+ if G.is_multigraph():
362
+ for u, v, k in _multigraph_eulerian_circuit(G, source):
363
+ if keys:
364
+ yield u, v, k
365
+ else:
366
+ yield u, v
367
+ else:
368
+ yield from _simplegraph_eulerian_circuit(G, source)
369
+ else:
370
+ G = G.copy()
371
+ if source is None:
372
+ source = _find_path_start(G)
373
+ if G.is_multigraph():
374
+ if keys:
375
+ yield from reversed(
376
+ [(v, u, k) for u, v, k in _multigraph_eulerian_circuit(G, source)]
377
+ )
378
+ else:
379
+ yield from reversed(
380
+ [(v, u) for u, v, k in _multigraph_eulerian_circuit(G, source)]
381
+ )
382
+ else:
383
+ yield from reversed(
384
+ [(v, u) for u, v in _simplegraph_eulerian_circuit(G, source)]
385
+ )
386
+
387
+
388
+ @not_implemented_for("directed")
389
+ @nx._dispatchable(returns_graph=True)
390
+ def eulerize(G):
391
+ """Transforms a graph into an Eulerian graph.
392
+
393
+ If `G` is Eulerian the result is `G` as a MultiGraph, otherwise the result is a smallest
394
+ (in terms of the number of edges) multigraph whose underlying simple graph is `G`.
395
+
396
+ Parameters
397
+ ----------
398
+ G : NetworkX graph
399
+ An undirected graph
400
+
401
+ Returns
402
+ -------
403
+ G : NetworkX multigraph
404
+
405
+ Raises
406
+ ------
407
+ NetworkXError
408
+ If the graph is not connected.
409
+
410
+ See Also
411
+ --------
412
+ is_eulerian
413
+ eulerian_circuit
414
+
415
+ References
416
+ ----------
417
+ .. [1] J. Edmonds, E. L. Johnson.
418
+ Matching, Euler tours and the Chinese postman.
419
+ Mathematical programming, Volume 5, Issue 1 (1973), 111-114.
420
+ .. [2] https://en.wikipedia.org/wiki/Eulerian_path
421
+ .. [3] http://web.math.princeton.edu/math_alive/5/Notes1.pdf
422
+
423
+ Examples
424
+ --------
425
+ >>> G = nx.complete_graph(10)
426
+ >>> H = nx.eulerize(G)
427
+ >>> nx.is_eulerian(H)
428
+ True
429
+
430
+ """
431
+ if G.order() == 0:
432
+ raise nx.NetworkXPointlessConcept("Cannot Eulerize null graph")
433
+ if not nx.is_connected(G):
434
+ raise nx.NetworkXError("G is not connected")
435
+ odd_degree_nodes = [n for n, d in G.degree() if d % 2 == 1]
436
+ G = nx.MultiGraph(G)
437
+ if len(odd_degree_nodes) == 0:
438
+ return G
439
+
440
+ # get all shortest paths between vertices of odd degree
441
+ odd_deg_pairs_paths = [
442
+ (m, {n: nx.shortest_path(G, source=m, target=n)})
443
+ for m, n in combinations(odd_degree_nodes, 2)
444
+ ]
445
+
446
+ # use the number of vertices in a graph + 1 as an upper bound on
447
+ # the maximum length of a path in G
448
+ upper_bound_on_max_path_length = len(G) + 1
449
+
450
+ # use "len(G) + 1 - len(P)",
451
+ # where P is a shortest path between vertices n and m,
452
+ # as edge-weights in a new graph
453
+ # store the paths in the graph for easy indexing later
454
+ Gp = nx.Graph()
455
+ for n, Ps in odd_deg_pairs_paths:
456
+ for m, P in Ps.items():
457
+ if n != m:
458
+ Gp.add_edge(
459
+ m, n, weight=upper_bound_on_max_path_length - len(P), path=P
460
+ )
461
+
462
+ # find the minimum weight matching of edges in the weighted graph
463
+ best_matching = nx.Graph(list(nx.max_weight_matching(Gp)))
464
+
465
+ # duplicate each edge along each path in the set of paths in Gp
466
+ for m, n in best_matching.edges():
467
+ path = Gp[m][n]["path"]
468
+ G.add_edges_from(nx.utils.pairwise(path))
469
+ return G
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .maxflow import *
2
+ from .mincost import *
3
+ from .boykovkolmogorov import *
4
+ from .dinitz_alg import *
5
+ from .edmondskarp import *
6
+ from .gomory_hu import *
7
+ from .preflowpush import *
8
+ from .shortestaugmentingpath import *
9
+ from .capacityscaling import *
10
+ from .networksimplex import *
11
+ from .utils import build_flow_dict, build_residual_network
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (514 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/capacityscaling.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/mincost.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/preflowpush.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/boykovkolmogorov.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Boykov-Kolmogorov algorithm for maximum flow problems.
3
+ """
4
+ from collections import deque
5
+ from operator import itemgetter
6
+
7
+ import networkx as nx
8
+ from networkx.algorithms.flow.utils import build_residual_network
9
+
10
+ __all__ = ["boykov_kolmogorov"]
11
+
12
+
13
+ @nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
14
+ def boykov_kolmogorov(
15
+ G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None
16
+ ):
17
+ r"""Find a maximum single-commodity flow using Boykov-Kolmogorov algorithm.
18
+
19
+ This function returns the residual network resulting after computing
20
+ the maximum flow. See below for details about the conventions
21
+ NetworkX uses for defining residual networks.
22
+
23
+ This algorithm has worse case complexity $O(n^2 m |C|)$ for $n$ nodes, $m$
24
+ edges, and $|C|$ the cost of the minimum cut [1]_. This implementation
25
+ uses the marking heuristic defined in [2]_ which improves its running
26
+ time in many practical problems.
27
+
28
+ Parameters
29
+ ----------
30
+ G : NetworkX graph
31
+ Edges of the graph are expected to have an attribute called
32
+ 'capacity'. If this attribute is not present, the edge is
33
+ considered to have infinite capacity.
34
+
35
+ s : node
36
+ Source node for the flow.
37
+
38
+ t : node
39
+ Sink node for the flow.
40
+
41
+ capacity : string
42
+ Edges of the graph G are expected to have an attribute capacity
43
+ that indicates how much flow the edge can support. If this
44
+ attribute is not present, the edge is considered to have
45
+ infinite capacity. Default value: 'capacity'.
46
+
47
+ residual : NetworkX graph
48
+ Residual network on which the algorithm is to be executed. If None, a
49
+ new residual network is created. Default value: None.
50
+
51
+ value_only : bool
52
+ If True compute only the value of the maximum flow. This parameter
53
+ will be ignored by this algorithm because it is not applicable.
54
+
55
+ cutoff : integer, float
56
+ If specified, the algorithm will terminate when the flow value reaches
57
+ or exceeds the cutoff. In this case, it may be unable to immediately
58
+ determine a minimum cut. Default value: None.
59
+
60
+ Returns
61
+ -------
62
+ R : NetworkX DiGraph
63
+ Residual network after computing the maximum flow.
64
+
65
+ Raises
66
+ ------
67
+ NetworkXError
68
+ The algorithm does not support MultiGraph and MultiDiGraph. If
69
+ the input graph is an instance of one of these two classes, a
70
+ NetworkXError is raised.
71
+
72
+ NetworkXUnbounded
73
+ If the graph has a path of infinite capacity, the value of a
74
+ feasible flow on the graph is unbounded above and the function
75
+ raises a NetworkXUnbounded.
76
+
77
+ See also
78
+ --------
79
+ :meth:`maximum_flow`
80
+ :meth:`minimum_cut`
81
+ :meth:`preflow_push`
82
+ :meth:`shortest_augmenting_path`
83
+
84
+ Notes
85
+ -----
86
+ The residual network :samp:`R` from an input graph :samp:`G` has the
87
+ same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
88
+ of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
89
+ self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
90
+ in :samp:`G`.
91
+
92
+ For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
93
+ is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
94
+ in :samp:`G` or zero otherwise. If the capacity is infinite,
95
+ :samp:`R[u][v]['capacity']` will have a high arbitrary finite value
96
+ that does not affect the solution of the problem. This value is stored in
97
+ :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
98
+ :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
99
+ satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
100
+
101
+ The flow value, defined as the total flow into :samp:`t`, the sink, is
102
+ stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not
103
+ specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such
104
+ that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
105
+ :samp:`s`-:samp:`t` cut.
106
+
107
+ Examples
108
+ --------
109
+ >>> from networkx.algorithms.flow import boykov_kolmogorov
110
+
111
+ The functions that implement flow algorithms and output a residual
112
+ network, such as this one, are not imported to the base NetworkX
113
+ namespace, so you have to explicitly import them from the flow package.
114
+
115
+ >>> G = nx.DiGraph()
116
+ >>> G.add_edge("x", "a", capacity=3.0)
117
+ >>> G.add_edge("x", "b", capacity=1.0)
118
+ >>> G.add_edge("a", "c", capacity=3.0)
119
+ >>> G.add_edge("b", "c", capacity=5.0)
120
+ >>> G.add_edge("b", "d", capacity=4.0)
121
+ >>> G.add_edge("d", "e", capacity=2.0)
122
+ >>> G.add_edge("c", "y", capacity=2.0)
123
+ >>> G.add_edge("e", "y", capacity=3.0)
124
+ >>> R = boykov_kolmogorov(G, "x", "y")
125
+ >>> flow_value = nx.maximum_flow_value(G, "x", "y")
126
+ >>> flow_value
127
+ 3.0
128
+ >>> flow_value == R.graph["flow_value"]
129
+ True
130
+
131
+ A nice feature of the Boykov-Kolmogorov algorithm is that a partition
132
+ of the nodes that defines a minimum cut can be easily computed based
133
+ on the search trees used during the algorithm. These trees are stored
134
+ in the graph attribute `trees` of the residual network.
135
+
136
+ >>> source_tree, target_tree = R.graph["trees"]
137
+ >>> partition = (set(source_tree), set(G) - set(source_tree))
138
+
139
+ Or equivalently:
140
+
141
+ >>> partition = (set(G) - set(target_tree), set(target_tree))
142
+
143
+ References
144
+ ----------
145
+ .. [1] Boykov, Y., & Kolmogorov, V. (2004). An experimental comparison
146
+ of min-cut/max-flow algorithms for energy minimization in vision.
147
+ Pattern Analysis and Machine Intelligence, IEEE Transactions on,
148
+ 26(9), 1124-1137.
149
+ https://doi.org/10.1109/TPAMI.2004.60
150
+
151
+ .. [2] Vladimir Kolmogorov. Graph-based Algorithms for Multi-camera
152
+ Reconstruction Problem. PhD thesis, Cornell University, CS Department,
153
+ 2003. pp. 109-114.
154
+ https://web.archive.org/web/20170809091249/https://pub.ist.ac.at/~vnk/papers/thesis.pdf
155
+
156
+ """
157
+ R = boykov_kolmogorov_impl(G, s, t, capacity, residual, cutoff)
158
+ R.graph["algorithm"] = "boykov_kolmogorov"
159
+ nx._clear_cache(R)
160
+ return R
161
+
162
+
163
+ def boykov_kolmogorov_impl(G, s, t, capacity, residual, cutoff):
164
+ if s not in G:
165
+ raise nx.NetworkXError(f"node {str(s)} not in graph")
166
+ if t not in G:
167
+ raise nx.NetworkXError(f"node {str(t)} not in graph")
168
+ if s == t:
169
+ raise nx.NetworkXError("source and sink are the same node")
170
+
171
+ if residual is None:
172
+ R = build_residual_network(G, capacity)
173
+ else:
174
+ R = residual
175
+
176
+ # Initialize/reset the residual network.
177
+ # This is way too slow
178
+ # nx.set_edge_attributes(R, 0, 'flow')
179
+ for u in R:
180
+ for e in R[u].values():
181
+ e["flow"] = 0
182
+
183
+ # Use an arbitrary high value as infinite. It is computed
184
+ # when building the residual network.
185
+ INF = R.graph["inf"]
186
+
187
+ if cutoff is None:
188
+ cutoff = INF
189
+
190
+ R_succ = R.succ
191
+ R_pred = R.pred
192
+
193
+ def grow():
194
+ """Bidirectional breadth-first search for the growth stage.
195
+
196
+ Returns a connecting edge, that is and edge that connects
197
+ a node from the source search tree with a node from the
198
+ target search tree.
199
+ The first node in the connecting edge is always from the
200
+ source tree and the last node from the target tree.
201
+ """
202
+ while active:
203
+ u = active[0]
204
+ if u in source_tree:
205
+ this_tree = source_tree
206
+ other_tree = target_tree
207
+ neighbors = R_succ
208
+ else:
209
+ this_tree = target_tree
210
+ other_tree = source_tree
211
+ neighbors = R_pred
212
+ for v, attr in neighbors[u].items():
213
+ if attr["capacity"] - attr["flow"] > 0:
214
+ if v not in this_tree:
215
+ if v in other_tree:
216
+ return (u, v) if this_tree is source_tree else (v, u)
217
+ this_tree[v] = u
218
+ dist[v] = dist[u] + 1
219
+ timestamp[v] = timestamp[u]
220
+ active.append(v)
221
+ elif v in this_tree and _is_closer(u, v):
222
+ this_tree[v] = u
223
+ dist[v] = dist[u] + 1
224
+ timestamp[v] = timestamp[u]
225
+ _ = active.popleft()
226
+ return None, None
227
+
228
+ def augment(u, v):
229
+ """Augmentation stage.
230
+
231
+ Reconstruct path and determine its residual capacity.
232
+ We start from a connecting edge, which links a node
233
+ from the source tree to a node from the target tree.
234
+ The connecting edge is the output of the grow function
235
+ and the input of this function.
236
+ """
237
+ attr = R_succ[u][v]
238
+ flow = min(INF, attr["capacity"] - attr["flow"])
239
+ path = [u]
240
+ # Trace a path from u to s in source_tree.
241
+ w = u
242
+ while w != s:
243
+ n = w
244
+ w = source_tree[n]
245
+ attr = R_pred[n][w]
246
+ flow = min(flow, attr["capacity"] - attr["flow"])
247
+ path.append(w)
248
+ path.reverse()
249
+ # Trace a path from v to t in target_tree.
250
+ path.append(v)
251
+ w = v
252
+ while w != t:
253
+ n = w
254
+ w = target_tree[n]
255
+ attr = R_succ[n][w]
256
+ flow = min(flow, attr["capacity"] - attr["flow"])
257
+ path.append(w)
258
+ # Augment flow along the path and check for saturated edges.
259
+ it = iter(path)
260
+ u = next(it)
261
+ these_orphans = []
262
+ for v in it:
263
+ R_succ[u][v]["flow"] += flow
264
+ R_succ[v][u]["flow"] -= flow
265
+ if R_succ[u][v]["flow"] == R_succ[u][v]["capacity"]:
266
+ if v in source_tree:
267
+ source_tree[v] = None
268
+ these_orphans.append(v)
269
+ if u in target_tree:
270
+ target_tree[u] = None
271
+ these_orphans.append(u)
272
+ u = v
273
+ orphans.extend(sorted(these_orphans, key=dist.get))
274
+ return flow
275
+
276
+ def adopt():
277
+ """Adoption stage.
278
+
279
+ Reconstruct search trees by adopting or discarding orphans.
280
+ During augmentation stage some edges got saturated and thus
281
+ the source and target search trees broke down to forests, with
282
+ orphans as roots of some of its trees. We have to reconstruct
283
+ the search trees rooted to source and target before we can grow
284
+ them again.
285
+ """
286
+ while orphans:
287
+ u = orphans.popleft()
288
+ if u in source_tree:
289
+ tree = source_tree
290
+ neighbors = R_pred
291
+ else:
292
+ tree = target_tree
293
+ neighbors = R_succ
294
+ nbrs = ((n, attr, dist[n]) for n, attr in neighbors[u].items() if n in tree)
295
+ for v, attr, d in sorted(nbrs, key=itemgetter(2)):
296
+ if attr["capacity"] - attr["flow"] > 0:
297
+ if _has_valid_root(v, tree):
298
+ tree[u] = v
299
+ dist[u] = dist[v] + 1
300
+ timestamp[u] = time
301
+ break
302
+ else:
303
+ nbrs = (
304
+ (n, attr, dist[n]) for n, attr in neighbors[u].items() if n in tree
305
+ )
306
+ for v, attr, d in sorted(nbrs, key=itemgetter(2)):
307
+ if attr["capacity"] - attr["flow"] > 0:
308
+ if v not in active:
309
+ active.append(v)
310
+ if tree[v] == u:
311
+ tree[v] = None
312
+ orphans.appendleft(v)
313
+ if u in active:
314
+ active.remove(u)
315
+ del tree[u]
316
+
317
+ def _has_valid_root(n, tree):
318
+ path = []
319
+ v = n
320
+ while v is not None:
321
+ path.append(v)
322
+ if v in (s, t):
323
+ base_dist = 0
324
+ break
325
+ elif timestamp[v] == time:
326
+ base_dist = dist[v]
327
+ break
328
+ v = tree[v]
329
+ else:
330
+ return False
331
+ length = len(path)
332
+ for i, u in enumerate(path, 1):
333
+ dist[u] = base_dist + length - i
334
+ timestamp[u] = time
335
+ return True
336
+
337
+ def _is_closer(u, v):
338
+ return timestamp[v] <= timestamp[u] and dist[v] > dist[u] + 1
339
+
340
+ source_tree = {s: None}
341
+ target_tree = {t: None}
342
+ active = deque([s, t])
343
+ orphans = deque()
344
+ flow_value = 0
345
+ # data structures for the marking heuristic
346
+ time = 1
347
+ timestamp = {s: time, t: time}
348
+ dist = {s: 0, t: 0}
349
+ while flow_value < cutoff:
350
+ # Growth stage
351
+ u, v = grow()
352
+ if u is None:
353
+ break
354
+ time += 1
355
+ # Augmentation stage
356
+ flow_value += augment(u, v)
357
+ # Adoption stage
358
+ adopt()
359
+
360
+ if flow_value * 2 > INF:
361
+ raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.")
362
+
363
+ # Add source and target tree in a graph attribute.
364
+ # A partition that defines a minimum cut can be directly
365
+ # computed from the search trees as explained in the docstrings.
366
+ R.graph["trees"] = (source_tree, target_tree)
367
+ # Add the standard flow_value graph attribute.
368
+ R.graph["flow_value"] = flow_value
369
+ return R
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/capacityscaling.py ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Capacity scaling minimum cost flow algorithm.
3
+ """
4
+
5
+ __all__ = ["capacity_scaling"]
6
+
7
+ from itertools import chain
8
+ from math import log
9
+
10
+ import networkx as nx
11
+
12
+ from ...utils import BinaryHeap, arbitrary_element, not_implemented_for
13
+
14
+
15
+ def _detect_unboundedness(R):
16
+ """Detect infinite-capacity negative cycles."""
17
+ G = nx.DiGraph()
18
+ G.add_nodes_from(R)
19
+
20
+ # Value simulating infinity.
21
+ inf = R.graph["inf"]
22
+ # True infinity.
23
+ f_inf = float("inf")
24
+ for u in R:
25
+ for v, e in R[u].items():
26
+ # Compute the minimum weight of infinite-capacity (u, v) edges.
27
+ w = f_inf
28
+ for k, e in e.items():
29
+ if e["capacity"] == inf:
30
+ w = min(w, e["weight"])
31
+ if w != f_inf:
32
+ G.add_edge(u, v, weight=w)
33
+
34
+ if nx.negative_edge_cycle(G):
35
+ raise nx.NetworkXUnbounded(
36
+ "Negative cost cycle of infinite capacity found. "
37
+ "Min cost flow may be unbounded below."
38
+ )
39
+
40
+
41
+ @not_implemented_for("undirected")
42
+ def _build_residual_network(G, demand, capacity, weight):
43
+ """Build a residual network and initialize a zero flow."""
44
+ if sum(G.nodes[u].get(demand, 0) for u in G) != 0:
45
+ raise nx.NetworkXUnfeasible("Sum of the demands should be 0.")
46
+
47
+ R = nx.MultiDiGraph()
48
+ R.add_nodes_from(
49
+ (u, {"excess": -G.nodes[u].get(demand, 0), "potential": 0}) for u in G
50
+ )
51
+
52
+ inf = float("inf")
53
+ # Detect selfloops with infinite capacities and negative weights.
54
+ for u, v, e in nx.selfloop_edges(G, data=True):
55
+ if e.get(weight, 0) < 0 and e.get(capacity, inf) == inf:
56
+ raise nx.NetworkXUnbounded(
57
+ "Negative cost cycle of infinite capacity found. "
58
+ "Min cost flow may be unbounded below."
59
+ )
60
+
61
+ # Extract edges with positive capacities. Self loops excluded.
62
+ if G.is_multigraph():
63
+ edge_list = [
64
+ (u, v, k, e)
65
+ for u, v, k, e in G.edges(data=True, keys=True)
66
+ if u != v and e.get(capacity, inf) > 0
67
+ ]
68
+ else:
69
+ edge_list = [
70
+ (u, v, 0, e)
71
+ for u, v, e in G.edges(data=True)
72
+ if u != v and e.get(capacity, inf) > 0
73
+ ]
74
+ # Simulate infinity with the larger of the sum of absolute node imbalances
75
+ # the sum of finite edge capacities or any positive value if both sums are
76
+ # zero. This allows the infinite-capacity edges to be distinguished for
77
+ # unboundedness detection and directly participate in residual capacity
78
+ # calculation.
79
+ inf = (
80
+ max(
81
+ sum(abs(R.nodes[u]["excess"]) for u in R),
82
+ 2
83
+ * sum(
84
+ e[capacity]
85
+ for u, v, k, e in edge_list
86
+ if capacity in e and e[capacity] != inf
87
+ ),
88
+ )
89
+ or 1
90
+ )
91
+ for u, v, k, e in edge_list:
92
+ r = min(e.get(capacity, inf), inf)
93
+ w = e.get(weight, 0)
94
+ # Add both (u, v) and (v, u) into the residual network marked with the
95
+ # original key. (key[1] == True) indicates the (u, v) is in the
96
+ # original network.
97
+ R.add_edge(u, v, key=(k, True), capacity=r, weight=w, flow=0)
98
+ R.add_edge(v, u, key=(k, False), capacity=0, weight=-w, flow=0)
99
+
100
+ # Record the value simulating infinity.
101
+ R.graph["inf"] = inf
102
+
103
+ _detect_unboundedness(R)
104
+
105
+ return R
106
+
107
+
108
+ def _build_flow_dict(G, R, capacity, weight):
109
+ """Build a flow dictionary from a residual network."""
110
+ inf = float("inf")
111
+ flow_dict = {}
112
+ if G.is_multigraph():
113
+ for u in G:
114
+ flow_dict[u] = {}
115
+ for v, es in G[u].items():
116
+ flow_dict[u][v] = {
117
+ # Always saturate negative selfloops.
118
+ k: (
119
+ 0
120
+ if (
121
+ u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0
122
+ )
123
+ else e[capacity]
124
+ )
125
+ for k, e in es.items()
126
+ }
127
+ for v, es in R[u].items():
128
+ if v in flow_dict[u]:
129
+ flow_dict[u][v].update(
130
+ (k[0], e["flow"]) for k, e in es.items() if e["flow"] > 0
131
+ )
132
+ else:
133
+ for u in G:
134
+ flow_dict[u] = {
135
+ # Always saturate negative selfloops.
136
+ v: (
137
+ 0
138
+ if (u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0)
139
+ else e[capacity]
140
+ )
141
+ for v, e in G[u].items()
142
+ }
143
+ flow_dict[u].update(
144
+ (v, e["flow"])
145
+ for v, es in R[u].items()
146
+ for e in es.values()
147
+ if e["flow"] > 0
148
+ )
149
+ return flow_dict
150
+
151
+
152
+ @nx._dispatchable(
153
+ node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}
154
+ )
155
+ def capacity_scaling(
156
+ G, demand="demand", capacity="capacity", weight="weight", heap=BinaryHeap
157
+ ):
158
+ r"""Find a minimum cost flow satisfying all demands in digraph G.
159
+
160
+ This is a capacity scaling successive shortest augmenting path algorithm.
161
+
162
+ G is a digraph with edge costs and capacities and in which nodes
163
+ have demand, i.e., they want to send or receive some amount of
164
+ flow. A negative demand means that the node wants to send flow, a
165
+ positive demand means that the node want to receive flow. A flow on
166
+ the digraph G satisfies all demand if the net flow into each node
167
+ is equal to the demand of that node.
168
+
169
+ Parameters
170
+ ----------
171
+ G : NetworkX graph
172
+ DiGraph or MultiDiGraph on which a minimum cost flow satisfying all
173
+ demands is to be found.
174
+
175
+ demand : string
176
+ Nodes of the graph G are expected to have an attribute demand
177
+ that indicates how much flow a node wants to send (negative
178
+ demand) or receive (positive demand). Note that the sum of the
179
+ demands should be 0 otherwise the problem in not feasible. If
180
+ this attribute is not present, a node is considered to have 0
181
+ demand. Default value: 'demand'.
182
+
183
+ capacity : string
184
+ Edges of the graph G are expected to have an attribute capacity
185
+ that indicates how much flow the edge can support. If this
186
+ attribute is not present, the edge is considered to have
187
+ infinite capacity. Default value: 'capacity'.
188
+
189
+ weight : string
190
+ Edges of the graph G are expected to have an attribute weight
191
+ that indicates the cost incurred by sending one unit of flow on
192
+ that edge. If not present, the weight is considered to be 0.
193
+ Default value: 'weight'.
194
+
195
+ heap : class
196
+ Type of heap to be used in the algorithm. It should be a subclass of
197
+ :class:`MinHeap` or implement a compatible interface.
198
+
199
+ If a stock heap implementation is to be used, :class:`BinaryHeap` is
200
+ recommended over :class:`PairingHeap` for Python implementations without
201
+ optimized attribute accesses (e.g., CPython) despite a slower
202
+ asymptotic running time. For Python implementations with optimized
203
+ attribute accesses (e.g., PyPy), :class:`PairingHeap` provides better
204
+ performance. Default value: :class:`BinaryHeap`.
205
+
206
+ Returns
207
+ -------
208
+ flowCost : integer
209
+ Cost of a minimum cost flow satisfying all demands.
210
+
211
+ flowDict : dictionary
212
+ If G is a digraph, a dict-of-dicts keyed by nodes such that
213
+ flowDict[u][v] is the flow on edge (u, v).
214
+ If G is a MultiDiGraph, a dict-of-dicts-of-dicts keyed by nodes
215
+ so that flowDict[u][v][key] is the flow on edge (u, v, key).
216
+
217
+ Raises
218
+ ------
219
+ NetworkXError
220
+ This exception is raised if the input graph is not directed,
221
+ not connected.
222
+
223
+ NetworkXUnfeasible
224
+ This exception is raised in the following situations:
225
+
226
+ * The sum of the demands is not zero. Then, there is no
227
+ flow satisfying all demands.
228
+ * There is no flow satisfying all demand.
229
+
230
+ NetworkXUnbounded
231
+ This exception is raised if the digraph G has a cycle of
232
+ negative cost and infinite capacity. Then, the cost of a flow
233
+ satisfying all demands is unbounded below.
234
+
235
+ Notes
236
+ -----
237
+ This algorithm does not work if edge weights are floating-point numbers.
238
+
239
+ See also
240
+ --------
241
+ :meth:`network_simplex`
242
+
243
+ Examples
244
+ --------
245
+ A simple example of a min cost flow problem.
246
+
247
+ >>> G = nx.DiGraph()
248
+ >>> G.add_node("a", demand=-5)
249
+ >>> G.add_node("d", demand=5)
250
+ >>> G.add_edge("a", "b", weight=3, capacity=4)
251
+ >>> G.add_edge("a", "c", weight=6, capacity=10)
252
+ >>> G.add_edge("b", "d", weight=1, capacity=9)
253
+ >>> G.add_edge("c", "d", weight=2, capacity=5)
254
+ >>> flowCost, flowDict = nx.capacity_scaling(G)
255
+ >>> flowCost
256
+ 24
257
+ >>> flowDict
258
+ {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}}
259
+
260
+ It is possible to change the name of the attributes used for the
261
+ algorithm.
262
+
263
+ >>> G = nx.DiGraph()
264
+ >>> G.add_node("p", spam=-4)
265
+ >>> G.add_node("q", spam=2)
266
+ >>> G.add_node("a", spam=-2)
267
+ >>> G.add_node("d", spam=-1)
268
+ >>> G.add_node("t", spam=2)
269
+ >>> G.add_node("w", spam=3)
270
+ >>> G.add_edge("p", "q", cost=7, vacancies=5)
271
+ >>> G.add_edge("p", "a", cost=1, vacancies=4)
272
+ >>> G.add_edge("q", "d", cost=2, vacancies=3)
273
+ >>> G.add_edge("t", "q", cost=1, vacancies=2)
274
+ >>> G.add_edge("a", "t", cost=2, vacancies=4)
275
+ >>> G.add_edge("d", "w", cost=3, vacancies=4)
276
+ >>> G.add_edge("t", "w", cost=4, vacancies=1)
277
+ >>> flowCost, flowDict = nx.capacity_scaling(
278
+ ... G, demand="spam", capacity="vacancies", weight="cost"
279
+ ... )
280
+ >>> flowCost
281
+ 37
282
+ >>> flowDict
283
+ {'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}}
284
+ """
285
+ R = _build_residual_network(G, demand, capacity, weight)
286
+
287
+ inf = float("inf")
288
+ # Account cost of negative selfloops.
289
+ flow_cost = sum(
290
+ 0
291
+ if e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0
292
+ else e[capacity] * e[weight]
293
+ for u, v, e in nx.selfloop_edges(G, data=True)
294
+ )
295
+
296
+ # Determine the maximum edge capacity.
297
+ wmax = max(chain([-inf], (e["capacity"] for u, v, e in R.edges(data=True))))
298
+ if wmax == -inf:
299
+ # Residual network has no edges.
300
+ return flow_cost, _build_flow_dict(G, R, capacity, weight)
301
+
302
+ R_nodes = R.nodes
303
+ R_succ = R.succ
304
+
305
+ delta = 2 ** int(log(wmax, 2))
306
+ while delta >= 1:
307
+ # Saturate Δ-residual edges with negative reduced costs to achieve
308
+ # Δ-optimality.
309
+ for u in R:
310
+ p_u = R_nodes[u]["potential"]
311
+ for v, es in R_succ[u].items():
312
+ for k, e in es.items():
313
+ flow = e["capacity"] - e["flow"]
314
+ if e["weight"] - p_u + R_nodes[v]["potential"] < 0:
315
+ flow = e["capacity"] - e["flow"]
316
+ if flow >= delta:
317
+ e["flow"] += flow
318
+ R_succ[v][u][(k[0], not k[1])]["flow"] -= flow
319
+ R_nodes[u]["excess"] -= flow
320
+ R_nodes[v]["excess"] += flow
321
+ # Determine the Δ-active nodes.
322
+ S = set()
323
+ T = set()
324
+ S_add = S.add
325
+ S_remove = S.remove
326
+ T_add = T.add
327
+ T_remove = T.remove
328
+ for u in R:
329
+ excess = R_nodes[u]["excess"]
330
+ if excess >= delta:
331
+ S_add(u)
332
+ elif excess <= -delta:
333
+ T_add(u)
334
+ # Repeatedly augment flow from S to T along shortest paths until
335
+ # Δ-feasibility is achieved.
336
+ while S and T:
337
+ s = arbitrary_element(S)
338
+ t = None
339
+ # Search for a shortest path in terms of reduce costs from s to
340
+ # any t in T in the Δ-residual network.
341
+ d = {}
342
+ pred = {s: None}
343
+ h = heap()
344
+ h_insert = h.insert
345
+ h_get = h.get
346
+ h_insert(s, 0)
347
+ while h:
348
+ u, d_u = h.pop()
349
+ d[u] = d_u
350
+ if u in T:
351
+ # Path found.
352
+ t = u
353
+ break
354
+ p_u = R_nodes[u]["potential"]
355
+ for v, es in R_succ[u].items():
356
+ if v in d:
357
+ continue
358
+ wmin = inf
359
+ # Find the minimum-weighted (u, v) Δ-residual edge.
360
+ for k, e in es.items():
361
+ if e["capacity"] - e["flow"] >= delta:
362
+ w = e["weight"]
363
+ if w < wmin:
364
+ wmin = w
365
+ kmin = k
366
+ emin = e
367
+ if wmin == inf:
368
+ continue
369
+ # Update the distance label of v.
370
+ d_v = d_u + wmin - p_u + R_nodes[v]["potential"]
371
+ if h_insert(v, d_v):
372
+ pred[v] = (u, kmin, emin)
373
+ if t is not None:
374
+ # Augment Δ units of flow from s to t.
375
+ while u != s:
376
+ v = u
377
+ u, k, e = pred[v]
378
+ e["flow"] += delta
379
+ R_succ[v][u][(k[0], not k[1])]["flow"] -= delta
380
+ # Account node excess and deficit.
381
+ R_nodes[s]["excess"] -= delta
382
+ R_nodes[t]["excess"] += delta
383
+ if R_nodes[s]["excess"] < delta:
384
+ S_remove(s)
385
+ if R_nodes[t]["excess"] > -delta:
386
+ T_remove(t)
387
+ # Update node potentials.
388
+ d_t = d[t]
389
+ for u, d_u in d.items():
390
+ R_nodes[u]["potential"] -= d_u - d_t
391
+ else:
392
+ # Path not found.
393
+ S_remove(s)
394
+ delta //= 2
395
+
396
+ if any(R.nodes[u]["excess"] != 0 for u in R):
397
+ raise nx.NetworkXUnfeasible("No flow satisfying all demands.")
398
+
399
+ # Calculate the flow cost.
400
+ for u in R:
401
+ for v, es in R_succ[u].items():
402
+ for e in es.values():
403
+ flow = e["flow"]
404
+ if flow > 0:
405
+ flow_cost += flow * e["weight"]
406
+
407
+ return flow_cost, _build_flow_dict(G, R, capacity, weight)
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/dinitz_alg.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Dinitz' algorithm for maximum flow problems.
3
+ """
4
+ from collections import deque
5
+
6
+ import networkx as nx
7
+ from networkx.algorithms.flow.utils import build_residual_network
8
+ from networkx.utils import pairwise
9
+
10
+ __all__ = ["dinitz"]
11
+
12
+
13
+ @nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
14
+ def dinitz(G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None):
15
+ """Find a maximum single-commodity flow using Dinitz' algorithm.
16
+
17
+ This function returns the residual network resulting after computing
18
+ the maximum flow. See below for details about the conventions
19
+ NetworkX uses for defining residual networks.
20
+
21
+ This algorithm has a running time of $O(n^2 m)$ for $n$ nodes and $m$
22
+ edges [1]_.
23
+
24
+
25
+ Parameters
26
+ ----------
27
+ G : NetworkX graph
28
+ Edges of the graph are expected to have an attribute called
29
+ 'capacity'. If this attribute is not present, the edge is
30
+ considered to have infinite capacity.
31
+
32
+ s : node
33
+ Source node for the flow.
34
+
35
+ t : node
36
+ Sink node for the flow.
37
+
38
+ capacity : string
39
+ Edges of the graph G are expected to have an attribute capacity
40
+ that indicates how much flow the edge can support. If this
41
+ attribute is not present, the edge is considered to have
42
+ infinite capacity. Default value: 'capacity'.
43
+
44
+ residual : NetworkX graph
45
+ Residual network on which the algorithm is to be executed. If None, a
46
+ new residual network is created. Default value: None.
47
+
48
+ value_only : bool
49
+ If True compute only the value of the maximum flow. This parameter
50
+ will be ignored by this algorithm because it is not applicable.
51
+
52
+ cutoff : integer, float
53
+ If specified, the algorithm will terminate when the flow value reaches
54
+ or exceeds the cutoff. In this case, it may be unable to immediately
55
+ determine a minimum cut. Default value: None.
56
+
57
+ Returns
58
+ -------
59
+ R : NetworkX DiGraph
60
+ Residual network after computing the maximum flow.
61
+
62
+ Raises
63
+ ------
64
+ NetworkXError
65
+ The algorithm does not support MultiGraph and MultiDiGraph. If
66
+ the input graph is an instance of one of these two classes, a
67
+ NetworkXError is raised.
68
+
69
+ NetworkXUnbounded
70
+ If the graph has a path of infinite capacity, the value of a
71
+ feasible flow on the graph is unbounded above and the function
72
+ raises a NetworkXUnbounded.
73
+
74
+ See also
75
+ --------
76
+ :meth:`maximum_flow`
77
+ :meth:`minimum_cut`
78
+ :meth:`preflow_push`
79
+ :meth:`shortest_augmenting_path`
80
+
81
+ Notes
82
+ -----
83
+ The residual network :samp:`R` from an input graph :samp:`G` has the
84
+ same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
85
+ of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
86
+ self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
87
+ in :samp:`G`.
88
+
89
+ For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
90
+ is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
91
+ in :samp:`G` or zero otherwise. If the capacity is infinite,
92
+ :samp:`R[u][v]['capacity']` will have a high arbitrary finite value
93
+ that does not affect the solution of the problem. This value is stored in
94
+ :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
95
+ :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
96
+ satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
97
+
98
+ The flow value, defined as the total flow into :samp:`t`, the sink, is
99
+ stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not
100
+ specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such
101
+ that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
102
+ :samp:`s`-:samp:`t` cut.
103
+
104
+ Examples
105
+ --------
106
+ >>> from networkx.algorithms.flow import dinitz
107
+
108
+ The functions that implement flow algorithms and output a residual
109
+ network, such as this one, are not imported to the base NetworkX
110
+ namespace, so you have to explicitly import them from the flow package.
111
+
112
+ >>> G = nx.DiGraph()
113
+ >>> G.add_edge("x", "a", capacity=3.0)
114
+ >>> G.add_edge("x", "b", capacity=1.0)
115
+ >>> G.add_edge("a", "c", capacity=3.0)
116
+ >>> G.add_edge("b", "c", capacity=5.0)
117
+ >>> G.add_edge("b", "d", capacity=4.0)
118
+ >>> G.add_edge("d", "e", capacity=2.0)
119
+ >>> G.add_edge("c", "y", capacity=2.0)
120
+ >>> G.add_edge("e", "y", capacity=3.0)
121
+ >>> R = dinitz(G, "x", "y")
122
+ >>> flow_value = nx.maximum_flow_value(G, "x", "y")
123
+ >>> flow_value
124
+ 3.0
125
+ >>> flow_value == R.graph["flow_value"]
126
+ True
127
+
128
+ References
129
+ ----------
130
+ .. [1] Dinitz' Algorithm: The Original Version and Even's Version.
131
+ 2006. Yefim Dinitz. In Theoretical Computer Science. Lecture
132
+ Notes in Computer Science. Volume 3895. pp 218-240.
133
+ https://doi.org/10.1007/11685654_10
134
+
135
+ """
136
+ R = dinitz_impl(G, s, t, capacity, residual, cutoff)
137
+ R.graph["algorithm"] = "dinitz"
138
+ nx._clear_cache(R)
139
+ return R
140
+
141
+
142
+ def dinitz_impl(G, s, t, capacity, residual, cutoff):
143
+ if s not in G:
144
+ raise nx.NetworkXError(f"node {str(s)} not in graph")
145
+ if t not in G:
146
+ raise nx.NetworkXError(f"node {str(t)} not in graph")
147
+ if s == t:
148
+ raise nx.NetworkXError("source and sink are the same node")
149
+
150
+ if residual is None:
151
+ R = build_residual_network(G, capacity)
152
+ else:
153
+ R = residual
154
+
155
+ # Initialize/reset the residual network.
156
+ for u in R:
157
+ for e in R[u].values():
158
+ e["flow"] = 0
159
+
160
+ # Use an arbitrary high value as infinite. It is computed
161
+ # when building the residual network.
162
+ INF = R.graph["inf"]
163
+
164
+ if cutoff is None:
165
+ cutoff = INF
166
+
167
+ R_succ = R.succ
168
+ R_pred = R.pred
169
+
170
+ def breath_first_search():
171
+ parents = {}
172
+ vertex_dist = {s: 0}
173
+ queue = deque([(s, 0)])
174
+ # Record all the potential edges of shortest augmenting paths
175
+ while queue:
176
+ if t in parents:
177
+ break
178
+ u, dist = queue.popleft()
179
+ for v, attr in R_succ[u].items():
180
+ if attr["capacity"] - attr["flow"] > 0:
181
+ if v in parents:
182
+ if vertex_dist[v] == dist + 1:
183
+ parents[v].append(u)
184
+ else:
185
+ parents[v] = deque([u])
186
+ vertex_dist[v] = dist + 1
187
+ queue.append((v, dist + 1))
188
+ return parents
189
+
190
+ def depth_first_search(parents):
191
+ # DFS to find all the shortest augmenting paths
192
+ """Build a path using DFS starting from the sink"""
193
+ total_flow = 0
194
+ u = t
195
+ # path also functions as a stack
196
+ path = [u]
197
+ # The loop ends with no augmenting path left in the layered graph
198
+ while True:
199
+ if len(parents[u]) > 0:
200
+ v = parents[u][0]
201
+ path.append(v)
202
+ else:
203
+ path.pop()
204
+ if len(path) == 0:
205
+ break
206
+ v = path[-1]
207
+ parents[v].popleft()
208
+ # Augment the flow along the path found
209
+ if v == s:
210
+ flow = INF
211
+ for u, v in pairwise(path):
212
+ flow = min(flow, R_pred[u][v]["capacity"] - R_pred[u][v]["flow"])
213
+ for u, v in pairwise(reversed(path)):
214
+ R_pred[v][u]["flow"] += flow
215
+ R_pred[u][v]["flow"] -= flow
216
+ # Find the proper node to continue the search
217
+ if R_pred[v][u]["capacity"] - R_pred[v][u]["flow"] == 0:
218
+ parents[v].popleft()
219
+ while path[-1] != v:
220
+ path.pop()
221
+ total_flow += flow
222
+ v = path[-1]
223
+ u = v
224
+ return total_flow
225
+
226
+ flow_value = 0
227
+ while flow_value < cutoff:
228
+ parents = breath_first_search()
229
+ if t not in parents:
230
+ break
231
+ this_flow = depth_first_search(parents)
232
+ if this_flow * 2 > INF:
233
+ raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.")
234
+ flow_value += this_flow
235
+
236
+ R.graph["flow_value"] = flow_value
237
+ return R
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/edmondskarp.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Edmonds-Karp algorithm for maximum flow problems.
3
+ """
4
+
5
+ import networkx as nx
6
+ from networkx.algorithms.flow.utils import build_residual_network
7
+
8
+ __all__ = ["edmonds_karp"]
9
+
10
+
11
+ def edmonds_karp_core(R, s, t, cutoff):
12
+ """Implementation of the Edmonds-Karp algorithm."""
13
+ R_nodes = R.nodes
14
+ R_pred = R.pred
15
+ R_succ = R.succ
16
+
17
+ inf = R.graph["inf"]
18
+
19
+ def augment(path):
20
+ """Augment flow along a path from s to t."""
21
+ # Determine the path residual capacity.
22
+ flow = inf
23
+ it = iter(path)
24
+ u = next(it)
25
+ for v in it:
26
+ attr = R_succ[u][v]
27
+ flow = min(flow, attr["capacity"] - attr["flow"])
28
+ u = v
29
+ if flow * 2 > inf:
30
+ raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.")
31
+ # Augment flow along the path.
32
+ it = iter(path)
33
+ u = next(it)
34
+ for v in it:
35
+ R_succ[u][v]["flow"] += flow
36
+ R_succ[v][u]["flow"] -= flow
37
+ u = v
38
+ return flow
39
+
40
+ def bidirectional_bfs():
41
+ """Bidirectional breadth-first search for an augmenting path."""
42
+ pred = {s: None}
43
+ q_s = [s]
44
+ succ = {t: None}
45
+ q_t = [t]
46
+ while True:
47
+ q = []
48
+ if len(q_s) <= len(q_t):
49
+ for u in q_s:
50
+ for v, attr in R_succ[u].items():
51
+ if v not in pred and attr["flow"] < attr["capacity"]:
52
+ pred[v] = u
53
+ if v in succ:
54
+ return v, pred, succ
55
+ q.append(v)
56
+ if not q:
57
+ return None, None, None
58
+ q_s = q
59
+ else:
60
+ for u in q_t:
61
+ for v, attr in R_pred[u].items():
62
+ if v not in succ and attr["flow"] < attr["capacity"]:
63
+ succ[v] = u
64
+ if v in pred:
65
+ return v, pred, succ
66
+ q.append(v)
67
+ if not q:
68
+ return None, None, None
69
+ q_t = q
70
+
71
+ # Look for shortest augmenting paths using breadth-first search.
72
+ flow_value = 0
73
+ while flow_value < cutoff:
74
+ v, pred, succ = bidirectional_bfs()
75
+ if pred is None:
76
+ break
77
+ path = [v]
78
+ # Trace a path from s to v.
79
+ u = v
80
+ while u != s:
81
+ u = pred[u]
82
+ path.append(u)
83
+ path.reverse()
84
+ # Trace a path from v to t.
85
+ u = v
86
+ while u != t:
87
+ u = succ[u]
88
+ path.append(u)
89
+ flow_value += augment(path)
90
+
91
+ return flow_value
92
+
93
+
94
+ def edmonds_karp_impl(G, s, t, capacity, residual, cutoff):
95
+ """Implementation of the Edmonds-Karp algorithm."""
96
+ if s not in G:
97
+ raise nx.NetworkXError(f"node {str(s)} not in graph")
98
+ if t not in G:
99
+ raise nx.NetworkXError(f"node {str(t)} not in graph")
100
+ if s == t:
101
+ raise nx.NetworkXError("source and sink are the same node")
102
+
103
+ if residual is None:
104
+ R = build_residual_network(G, capacity)
105
+ else:
106
+ R = residual
107
+
108
+ # Initialize/reset the residual network.
109
+ for u in R:
110
+ for e in R[u].values():
111
+ e["flow"] = 0
112
+
113
+ if cutoff is None:
114
+ cutoff = float("inf")
115
+ R.graph["flow_value"] = edmonds_karp_core(R, s, t, cutoff)
116
+
117
+ return R
118
+
119
+
120
+ @nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
121
+ def edmonds_karp(
122
+ G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None
123
+ ):
124
+ """Find a maximum single-commodity flow using the Edmonds-Karp algorithm.
125
+
126
+ This function returns the residual network resulting after computing
127
+ the maximum flow. See below for details about the conventions
128
+ NetworkX uses for defining residual networks.
129
+
130
+ This algorithm has a running time of $O(n m^2)$ for $n$ nodes and $m$
131
+ edges.
132
+
133
+
134
+ Parameters
135
+ ----------
136
+ G : NetworkX graph
137
+ Edges of the graph are expected to have an attribute called
138
+ 'capacity'. If this attribute is not present, the edge is
139
+ considered to have infinite capacity.
140
+
141
+ s : node
142
+ Source node for the flow.
143
+
144
+ t : node
145
+ Sink node for the flow.
146
+
147
+ capacity : string
148
+ Edges of the graph G are expected to have an attribute capacity
149
+ that indicates how much flow the edge can support. If this
150
+ attribute is not present, the edge is considered to have
151
+ infinite capacity. Default value: 'capacity'.
152
+
153
+ residual : NetworkX graph
154
+ Residual network on which the algorithm is to be executed. If None, a
155
+ new residual network is created. Default value: None.
156
+
157
+ value_only : bool
158
+ If True compute only the value of the maximum flow. This parameter
159
+ will be ignored by this algorithm because it is not applicable.
160
+
161
+ cutoff : integer, float
162
+ If specified, the algorithm will terminate when the flow value reaches
163
+ or exceeds the cutoff. In this case, it may be unable to immediately
164
+ determine a minimum cut. Default value: None.
165
+
166
+ Returns
167
+ -------
168
+ R : NetworkX DiGraph
169
+ Residual network after computing the maximum flow.
170
+
171
+ Raises
172
+ ------
173
+ NetworkXError
174
+ The algorithm does not support MultiGraph and MultiDiGraph. If
175
+ the input graph is an instance of one of these two classes, a
176
+ NetworkXError is raised.
177
+
178
+ NetworkXUnbounded
179
+ If the graph has a path of infinite capacity, the value of a
180
+ feasible flow on the graph is unbounded above and the function
181
+ raises a NetworkXUnbounded.
182
+
183
+ See also
184
+ --------
185
+ :meth:`maximum_flow`
186
+ :meth:`minimum_cut`
187
+ :meth:`preflow_push`
188
+ :meth:`shortest_augmenting_path`
189
+
190
+ Notes
191
+ -----
192
+ The residual network :samp:`R` from an input graph :samp:`G` has the
193
+ same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
194
+ of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
195
+ self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
196
+ in :samp:`G`.
197
+
198
+ For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
199
+ is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
200
+ in :samp:`G` or zero otherwise. If the capacity is infinite,
201
+ :samp:`R[u][v]['capacity']` will have a high arbitrary finite value
202
+ that does not affect the solution of the problem. This value is stored in
203
+ :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
204
+ :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
205
+ satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
206
+
207
+ The flow value, defined as the total flow into :samp:`t`, the sink, is
208
+ stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not
209
+ specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such
210
+ that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
211
+ :samp:`s`-:samp:`t` cut.
212
+
213
+ Examples
214
+ --------
215
+ >>> from networkx.algorithms.flow import edmonds_karp
216
+
217
+ The functions that implement flow algorithms and output a residual
218
+ network, such as this one, are not imported to the base NetworkX
219
+ namespace, so you have to explicitly import them from the flow package.
220
+
221
+ >>> G = nx.DiGraph()
222
+ >>> G.add_edge("x", "a", capacity=3.0)
223
+ >>> G.add_edge("x", "b", capacity=1.0)
224
+ >>> G.add_edge("a", "c", capacity=3.0)
225
+ >>> G.add_edge("b", "c", capacity=5.0)
226
+ >>> G.add_edge("b", "d", capacity=4.0)
227
+ >>> G.add_edge("d", "e", capacity=2.0)
228
+ >>> G.add_edge("c", "y", capacity=2.0)
229
+ >>> G.add_edge("e", "y", capacity=3.0)
230
+ >>> R = edmonds_karp(G, "x", "y")
231
+ >>> flow_value = nx.maximum_flow_value(G, "x", "y")
232
+ >>> flow_value
233
+ 3.0
234
+ >>> flow_value == R.graph["flow_value"]
235
+ True
236
+
237
+ """
238
+ R = edmonds_karp_impl(G, s, t, capacity, residual, cutoff)
239
+ R.graph["algorithm"] = "edmonds_karp"
240
+ nx._clear_cache(R)
241
+ return R
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/gomory_hu.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Gomory-Hu tree of undirected Graphs.
3
+ """
4
+ import networkx as nx
5
+ from networkx.utils import not_implemented_for
6
+
7
+ from .edmondskarp import edmonds_karp
8
+ from .utils import build_residual_network
9
+
10
+ default_flow_func = edmonds_karp
11
+
12
+ __all__ = ["gomory_hu_tree"]
13
+
14
+
15
+ @not_implemented_for("directed")
16
+ @nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
17
+ def gomory_hu_tree(G, capacity="capacity", flow_func=None):
18
+ r"""Returns the Gomory-Hu tree of an undirected graph G.
19
+
20
+ A Gomory-Hu tree of an undirected graph with capacities is a
21
+ weighted tree that represents the minimum s-t cuts for all s-t
22
+ pairs in the graph.
23
+
24
+ It only requires `n-1` minimum cut computations instead of the
25
+ obvious `n(n-1)/2`. The tree represents all s-t cuts as the
26
+ minimum cut value among any pair of nodes is the minimum edge
27
+ weight in the shortest path between the two nodes in the
28
+ Gomory-Hu tree.
29
+
30
+ The Gomory-Hu tree also has the property that removing the
31
+ edge with the minimum weight in the shortest path between
32
+ any two nodes leaves two connected components that form
33
+ a partition of the nodes in G that defines the minimum s-t
34
+ cut.
35
+
36
+ See Examples section below for details.
37
+
38
+ Parameters
39
+ ----------
40
+ G : NetworkX graph
41
+ Undirected graph
42
+
43
+ capacity : string
44
+ Edges of the graph G are expected to have an attribute capacity
45
+ that indicates how much flow the edge can support. If this
46
+ attribute is not present, the edge is considered to have
47
+ infinite capacity. Default value: 'capacity'.
48
+
49
+ flow_func : function
50
+ Function to perform the underlying flow computations. Default value
51
+ :func:`edmonds_karp`. This function performs better in sparse graphs
52
+ with right tailed degree distributions.
53
+ :func:`shortest_augmenting_path` will perform better in denser
54
+ graphs.
55
+
56
+ Returns
57
+ -------
58
+ Tree : NetworkX graph
59
+ A NetworkX graph representing the Gomory-Hu tree of the input graph.
60
+
61
+ Raises
62
+ ------
63
+ NetworkXNotImplemented
64
+ Raised if the input graph is directed.
65
+
66
+ NetworkXError
67
+ Raised if the input graph is an empty Graph.
68
+
69
+ Examples
70
+ --------
71
+ >>> G = nx.karate_club_graph()
72
+ >>> nx.set_edge_attributes(G, 1, "capacity")
73
+ >>> T = nx.gomory_hu_tree(G)
74
+ >>> # The value of the minimum cut between any pair
75
+ ... # of nodes in G is the minimum edge weight in the
76
+ ... # shortest path between the two nodes in the
77
+ ... # Gomory-Hu tree.
78
+ ... def minimum_edge_weight_in_shortest_path(T, u, v):
79
+ ... path = nx.shortest_path(T, u, v, weight="weight")
80
+ ... return min((T[u][v]["weight"], (u, v)) for (u, v) in zip(path, path[1:]))
81
+ >>> u, v = 0, 33
82
+ >>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v)
83
+ >>> cut_value
84
+ 10
85
+ >>> nx.minimum_cut_value(G, u, v)
86
+ 10
87
+ >>> # The Gomory-Hu tree also has the property that removing the
88
+ ... # edge with the minimum weight in the shortest path between
89
+ ... # any two nodes leaves two connected components that form
90
+ ... # a partition of the nodes in G that defines the minimum s-t
91
+ ... # cut.
92
+ ... cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v)
93
+ >>> T.remove_edge(*edge)
94
+ >>> U, V = list(nx.connected_components(T))
95
+ >>> # Thus U and V form a partition that defines a minimum cut
96
+ ... # between u and v in G. You can compute the edge cut set,
97
+ ... # that is, the set of edges that if removed from G will
98
+ ... # disconnect u from v in G, with this information:
99
+ ... cutset = set()
100
+ >>> for x, nbrs in ((n, G[n]) for n in U):
101
+ ... cutset.update((x, y) for y in nbrs if y in V)
102
+ >>> # Because we have set the capacities of all edges to 1
103
+ ... # the cutset contains ten edges
104
+ ... len(cutset)
105
+ 10
106
+ >>> # You can use any maximum flow algorithm for the underlying
107
+ ... # flow computations using the argument flow_func
108
+ ... from networkx.algorithms import flow
109
+ >>> T = nx.gomory_hu_tree(G, flow_func=flow.boykov_kolmogorov)
110
+ >>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v)
111
+ >>> cut_value
112
+ 10
113
+ >>> nx.minimum_cut_value(G, u, v, flow_func=flow.boykov_kolmogorov)
114
+ 10
115
+
116
+ Notes
117
+ -----
118
+ This implementation is based on Gusfield approach [1]_ to compute
119
+ Gomory-Hu trees, which does not require node contractions and has
120
+ the same computational complexity than the original method.
121
+
122
+ See also
123
+ --------
124
+ :func:`minimum_cut`
125
+ :func:`maximum_flow`
126
+
127
+ References
128
+ ----------
129
+ .. [1] Gusfield D: Very simple methods for all pairs network flow analysis.
130
+ SIAM J Comput 19(1):143-155, 1990.
131
+
132
+ """
133
+ if flow_func is None:
134
+ flow_func = default_flow_func
135
+
136
+ if len(G) == 0: # empty graph
137
+ msg = "Empty Graph does not have a Gomory-Hu tree representation"
138
+ raise nx.NetworkXError(msg)
139
+
140
+ # Start the tree as a star graph with an arbitrary node at the center
141
+ tree = {}
142
+ labels = {}
143
+ iter_nodes = iter(G)
144
+ root = next(iter_nodes)
145
+ for n in iter_nodes:
146
+ tree[n] = root
147
+
148
+ # Reuse residual network
149
+ R = build_residual_network(G, capacity)
150
+
151
+ # For all the leaves in the star graph tree (that is n-1 nodes).
152
+ for source in tree:
153
+ # Find neighbor in the tree
154
+ target = tree[source]
155
+ # compute minimum cut
156
+ cut_value, partition = nx.minimum_cut(
157
+ G, source, target, capacity=capacity, flow_func=flow_func, residual=R
158
+ )
159
+ labels[(source, target)] = cut_value
160
+ # Update the tree
161
+ # Source will always be in partition[0] and target in partition[1]
162
+ for node in partition[0]:
163
+ if node != source and node in tree and tree[node] == target:
164
+ tree[node] = source
165
+ labels[node, source] = labels.get((node, target), cut_value)
166
+ #
167
+ if target != root and tree[target] in partition[0]:
168
+ labels[source, tree[target]] = labels[target, tree[target]]
169
+ labels[target, source] = cut_value
170
+ tree[source] = tree[target]
171
+ tree[target] = source
172
+
173
+ # Build the tree
174
+ T = nx.Graph()
175
+ T.add_nodes_from(G)
176
+ T.add_weighted_edges_from(((u, v, labels[u, v]) for u, v in tree.items()))
177
+ return T
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/maxflow.py ADDED
@@ -0,0 +1,601 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Maximum flow (and minimum cut) algorithms on capacitated graphs.
3
+ """
4
+ import networkx as nx
5
+
6
+ from .boykovkolmogorov import boykov_kolmogorov
7
+ from .dinitz_alg import dinitz
8
+ from .edmondskarp import edmonds_karp
9
+ from .preflowpush import preflow_push
10
+ from .shortestaugmentingpath import shortest_augmenting_path
11
+ from .utils import build_flow_dict
12
+
13
+ # Define the default flow function for computing maximum flow.
14
+ default_flow_func = preflow_push
15
+
16
+ __all__ = ["maximum_flow", "maximum_flow_value", "minimum_cut", "minimum_cut_value"]
17
+
18
+
19
+ @nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")})
20
+ def maximum_flow(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs):
21
+ """Find a maximum single-commodity flow.
22
+
23
+ Parameters
24
+ ----------
25
+ flowG : NetworkX graph
26
+ Edges of the graph are expected to have an attribute called
27
+ 'capacity'. If this attribute is not present, the edge is
28
+ considered to have infinite capacity.
29
+
30
+ _s : node
31
+ Source node for the flow.
32
+
33
+ _t : node
34
+ Sink node for the flow.
35
+
36
+ capacity : string
37
+ Edges of the graph G are expected to have an attribute capacity
38
+ that indicates how much flow the edge can support. If this
39
+ attribute is not present, the edge is considered to have
40
+ infinite capacity. Default value: 'capacity'.
41
+
42
+ flow_func : function
43
+ A function for computing the maximum flow among a pair of nodes
44
+ in a capacitated graph. The function has to accept at least three
45
+ parameters: a Graph or Digraph, a source node, and a target node.
46
+ And return a residual network that follows NetworkX conventions
47
+ (see Notes). If flow_func is None, the default maximum
48
+ flow function (:meth:`preflow_push`) is used. See below for
49
+ alternative algorithms. The choice of the default function may change
50
+ from version to version and should not be relied on. Default value:
51
+ None.
52
+
53
+ kwargs : Any other keyword parameter is passed to the function that
54
+ computes the maximum flow.
55
+
56
+ Returns
57
+ -------
58
+ flow_value : integer, float
59
+ Value of the maximum flow, i.e., net outflow from the source.
60
+
61
+ flow_dict : dict
62
+ A dictionary containing the value of the flow that went through
63
+ each edge.
64
+
65
+ Raises
66
+ ------
67
+ NetworkXError
68
+ The algorithm does not support MultiGraph and MultiDiGraph. If
69
+ the input graph is an instance of one of these two classes, a
70
+ NetworkXError is raised.
71
+
72
+ NetworkXUnbounded
73
+ If the graph has a path of infinite capacity, the value of a
74
+ feasible flow on the graph is unbounded above and the function
75
+ raises a NetworkXUnbounded.
76
+
77
+ See also
78
+ --------
79
+ :meth:`maximum_flow_value`
80
+ :meth:`minimum_cut`
81
+ :meth:`minimum_cut_value`
82
+ :meth:`edmonds_karp`
83
+ :meth:`preflow_push`
84
+ :meth:`shortest_augmenting_path`
85
+
86
+ Notes
87
+ -----
88
+ The function used in the flow_func parameter has to return a residual
89
+ network that follows NetworkX conventions:
90
+
91
+ The residual network :samp:`R` from an input graph :samp:`G` has the
92
+ same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
93
+ of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
94
+ self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
95
+ in :samp:`G`.
96
+
97
+ For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
98
+ is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
99
+ in :samp:`G` or zero otherwise. If the capacity is infinite,
100
+ :samp:`R[u][v]['capacity']` will have a high arbitrary finite value
101
+ that does not affect the solution of the problem. This value is stored in
102
+ :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
103
+ :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
104
+ satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
105
+
106
+ The flow value, defined as the total flow into :samp:`t`, the sink, is
107
+ stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
108
+ only edges :samp:`(u, v)` such that
109
+ :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
110
+ :samp:`s`-:samp:`t` cut.
111
+
112
+ Specific algorithms may store extra data in :samp:`R`.
113
+
114
+ The function should supports an optional boolean parameter value_only. When
115
+ True, it can optionally terminate the algorithm as soon as the maximum flow
116
+ value and the minimum cut can be determined.
117
+
118
+ Examples
119
+ --------
120
+ >>> G = nx.DiGraph()
121
+ >>> G.add_edge("x", "a", capacity=3.0)
122
+ >>> G.add_edge("x", "b", capacity=1.0)
123
+ >>> G.add_edge("a", "c", capacity=3.0)
124
+ >>> G.add_edge("b", "c", capacity=5.0)
125
+ >>> G.add_edge("b", "d", capacity=4.0)
126
+ >>> G.add_edge("d", "e", capacity=2.0)
127
+ >>> G.add_edge("c", "y", capacity=2.0)
128
+ >>> G.add_edge("e", "y", capacity=3.0)
129
+
130
+ maximum_flow returns both the value of the maximum flow and a
131
+ dictionary with all flows.
132
+
133
+ >>> flow_value, flow_dict = nx.maximum_flow(G, "x", "y")
134
+ >>> flow_value
135
+ 3.0
136
+ >>> print(flow_dict["x"]["b"])
137
+ 1.0
138
+
139
+ You can also use alternative algorithms for computing the
140
+ maximum flow by using the flow_func parameter.
141
+
142
+ >>> from networkx.algorithms.flow import shortest_augmenting_path
143
+ >>> flow_value == nx.maximum_flow(G, "x", "y", flow_func=shortest_augmenting_path)[0]
144
+ True
145
+
146
+ """
147
+ if flow_func is None:
148
+ if kwargs:
149
+ raise nx.NetworkXError(
150
+ "You have to explicitly set a flow_func if"
151
+ " you need to pass parameters via kwargs."
152
+ )
153
+ flow_func = default_flow_func
154
+
155
+ if not callable(flow_func):
156
+ raise nx.NetworkXError("flow_func has to be callable.")
157
+
158
+ R = flow_func(flowG, _s, _t, capacity=capacity, value_only=False, **kwargs)
159
+ flow_dict = build_flow_dict(flowG, R)
160
+
161
+ return (R.graph["flow_value"], flow_dict)
162
+
163
+
164
+ @nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")})
165
+ def maximum_flow_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs):
166
+ """Find the value of maximum single-commodity flow.
167
+
168
+ Parameters
169
+ ----------
170
+ flowG : NetworkX graph
171
+ Edges of the graph are expected to have an attribute called
172
+ 'capacity'. If this attribute is not present, the edge is
173
+ considered to have infinite capacity.
174
+
175
+ _s : node
176
+ Source node for the flow.
177
+
178
+ _t : node
179
+ Sink node for the flow.
180
+
181
+ capacity : string
182
+ Edges of the graph G are expected to have an attribute capacity
183
+ that indicates how much flow the edge can support. If this
184
+ attribute is not present, the edge is considered to have
185
+ infinite capacity. Default value: 'capacity'.
186
+
187
+ flow_func : function
188
+ A function for computing the maximum flow among a pair of nodes
189
+ in a capacitated graph. The function has to accept at least three
190
+ parameters: a Graph or Digraph, a source node, and a target node.
191
+ And return a residual network that follows NetworkX conventions
192
+ (see Notes). If flow_func is None, the default maximum
193
+ flow function (:meth:`preflow_push`) is used. See below for
194
+ alternative algorithms. The choice of the default function may change
195
+ from version to version and should not be relied on. Default value:
196
+ None.
197
+
198
+ kwargs : Any other keyword parameter is passed to the function that
199
+ computes the maximum flow.
200
+
201
+ Returns
202
+ -------
203
+ flow_value : integer, float
204
+ Value of the maximum flow, i.e., net outflow from the source.
205
+
206
+ Raises
207
+ ------
208
+ NetworkXError
209
+ The algorithm does not support MultiGraph and MultiDiGraph. If
210
+ the input graph is an instance of one of these two classes, a
211
+ NetworkXError is raised.
212
+
213
+ NetworkXUnbounded
214
+ If the graph has a path of infinite capacity, the value of a
215
+ feasible flow on the graph is unbounded above and the function
216
+ raises a NetworkXUnbounded.
217
+
218
+ See also
219
+ --------
220
+ :meth:`maximum_flow`
221
+ :meth:`minimum_cut`
222
+ :meth:`minimum_cut_value`
223
+ :meth:`edmonds_karp`
224
+ :meth:`preflow_push`
225
+ :meth:`shortest_augmenting_path`
226
+
227
+ Notes
228
+ -----
229
+ The function used in the flow_func parameter has to return a residual
230
+ network that follows NetworkX conventions:
231
+
232
+ The residual network :samp:`R` from an input graph :samp:`G` has the
233
+ same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
234
+ of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
235
+ self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
236
+ in :samp:`G`.
237
+
238
+ For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
239
+ is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
240
+ in :samp:`G` or zero otherwise. If the capacity is infinite,
241
+ :samp:`R[u][v]['capacity']` will have a high arbitrary finite value
242
+ that does not affect the solution of the problem. This value is stored in
243
+ :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
244
+ :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
245
+ satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
246
+
247
+ The flow value, defined as the total flow into :samp:`t`, the sink, is
248
+ stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
249
+ only edges :samp:`(u, v)` such that
250
+ :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
251
+ :samp:`s`-:samp:`t` cut.
252
+
253
+ Specific algorithms may store extra data in :samp:`R`.
254
+
255
+ The function should supports an optional boolean parameter value_only. When
256
+ True, it can optionally terminate the algorithm as soon as the maximum flow
257
+ value and the minimum cut can be determined.
258
+
259
+ Examples
260
+ --------
261
+ >>> G = nx.DiGraph()
262
+ >>> G.add_edge("x", "a", capacity=3.0)
263
+ >>> G.add_edge("x", "b", capacity=1.0)
264
+ >>> G.add_edge("a", "c", capacity=3.0)
265
+ >>> G.add_edge("b", "c", capacity=5.0)
266
+ >>> G.add_edge("b", "d", capacity=4.0)
267
+ >>> G.add_edge("d", "e", capacity=2.0)
268
+ >>> G.add_edge("c", "y", capacity=2.0)
269
+ >>> G.add_edge("e", "y", capacity=3.0)
270
+
271
+ maximum_flow_value computes only the value of the
272
+ maximum flow:
273
+
274
+ >>> flow_value = nx.maximum_flow_value(G, "x", "y")
275
+ >>> flow_value
276
+ 3.0
277
+
278
+ You can also use alternative algorithms for computing the
279
+ maximum flow by using the flow_func parameter.
280
+
281
+ >>> from networkx.algorithms.flow import shortest_augmenting_path
282
+ >>> flow_value == nx.maximum_flow_value(G, "x", "y", flow_func=shortest_augmenting_path)
283
+ True
284
+
285
+ """
286
+ if flow_func is None:
287
+ if kwargs:
288
+ raise nx.NetworkXError(
289
+ "You have to explicitly set a flow_func if"
290
+ " you need to pass parameters via kwargs."
291
+ )
292
+ flow_func = default_flow_func
293
+
294
+ if not callable(flow_func):
295
+ raise nx.NetworkXError("flow_func has to be callable.")
296
+
297
+ R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs)
298
+
299
+ return R.graph["flow_value"]
300
+
301
+
302
+ @nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")})
303
+ def minimum_cut(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs):
304
+ """Compute the value and the node partition of a minimum (s, t)-cut.
305
+
306
+ Use the max-flow min-cut theorem, i.e., the capacity of a minimum
307
+ capacity cut is equal to the flow value of a maximum flow.
308
+
309
+ Parameters
310
+ ----------
311
+ flowG : NetworkX graph
312
+ Edges of the graph are expected to have an attribute called
313
+ 'capacity'. If this attribute is not present, the edge is
314
+ considered to have infinite capacity.
315
+
316
+ _s : node
317
+ Source node for the flow.
318
+
319
+ _t : node
320
+ Sink node for the flow.
321
+
322
+ capacity : string
323
+ Edges of the graph G are expected to have an attribute capacity
324
+ that indicates how much flow the edge can support. If this
325
+ attribute is not present, the edge is considered to have
326
+ infinite capacity. Default value: 'capacity'.
327
+
328
+ flow_func : function
329
+ A function for computing the maximum flow among a pair of nodes
330
+ in a capacitated graph. The function has to accept at least three
331
+ parameters: a Graph or Digraph, a source node, and a target node.
332
+ And return a residual network that follows NetworkX conventions
333
+ (see Notes). If flow_func is None, the default maximum
334
+ flow function (:meth:`preflow_push`) is used. See below for
335
+ alternative algorithms. The choice of the default function may change
336
+ from version to version and should not be relied on. Default value:
337
+ None.
338
+
339
+ kwargs : Any other keyword parameter is passed to the function that
340
+ computes the maximum flow.
341
+
342
+ Returns
343
+ -------
344
+ cut_value : integer, float
345
+ Value of the minimum cut.
346
+
347
+ partition : pair of node sets
348
+ A partitioning of the nodes that defines a minimum cut.
349
+
350
+ Raises
351
+ ------
352
+ NetworkXUnbounded
353
+ If the graph has a path of infinite capacity, all cuts have
354
+ infinite capacity and the function raises a NetworkXError.
355
+
356
+ See also
357
+ --------
358
+ :meth:`maximum_flow`
359
+ :meth:`maximum_flow_value`
360
+ :meth:`minimum_cut_value`
361
+ :meth:`edmonds_karp`
362
+ :meth:`preflow_push`
363
+ :meth:`shortest_augmenting_path`
364
+
365
+ Notes
366
+ -----
367
+ The function used in the flow_func parameter has to return a residual
368
+ network that follows NetworkX conventions:
369
+
370
+ The residual network :samp:`R` from an input graph :samp:`G` has the
371
+ same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
372
+ of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
373
+ self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
374
+ in :samp:`G`.
375
+
376
+ For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
377
+ is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
378
+ in :samp:`G` or zero otherwise. If the capacity is infinite,
379
+ :samp:`R[u][v]['capacity']` will have a high arbitrary finite value
380
+ that does not affect the solution of the problem. This value is stored in
381
+ :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
382
+ :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
383
+ satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
384
+
385
+ The flow value, defined as the total flow into :samp:`t`, the sink, is
386
+ stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
387
+ only edges :samp:`(u, v)` such that
388
+ :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
389
+ :samp:`s`-:samp:`t` cut.
390
+
391
+ Specific algorithms may store extra data in :samp:`R`.
392
+
393
+ The function should supports an optional boolean parameter value_only. When
394
+ True, it can optionally terminate the algorithm as soon as the maximum flow
395
+ value and the minimum cut can be determined.
396
+
397
+ Examples
398
+ --------
399
+ >>> G = nx.DiGraph()
400
+ >>> G.add_edge("x", "a", capacity=3.0)
401
+ >>> G.add_edge("x", "b", capacity=1.0)
402
+ >>> G.add_edge("a", "c", capacity=3.0)
403
+ >>> G.add_edge("b", "c", capacity=5.0)
404
+ >>> G.add_edge("b", "d", capacity=4.0)
405
+ >>> G.add_edge("d", "e", capacity=2.0)
406
+ >>> G.add_edge("c", "y", capacity=2.0)
407
+ >>> G.add_edge("e", "y", capacity=3.0)
408
+
409
+ minimum_cut computes both the value of the
410
+ minimum cut and the node partition:
411
+
412
+ >>> cut_value, partition = nx.minimum_cut(G, "x", "y")
413
+ >>> reachable, non_reachable = partition
414
+
415
+ 'partition' here is a tuple with the two sets of nodes that define
416
+ the minimum cut. You can compute the cut set of edges that induce
417
+ the minimum cut as follows:
418
+
419
+ >>> cutset = set()
420
+ >>> for u, nbrs in ((n, G[n]) for n in reachable):
421
+ ... cutset.update((u, v) for v in nbrs if v in non_reachable)
422
+ >>> print(sorted(cutset))
423
+ [('c', 'y'), ('x', 'b')]
424
+ >>> cut_value == sum(G.edges[u, v]["capacity"] for (u, v) in cutset)
425
+ True
426
+
427
+ You can also use alternative algorithms for computing the
428
+ minimum cut by using the flow_func parameter.
429
+
430
+ >>> from networkx.algorithms.flow import shortest_augmenting_path
431
+ >>> cut_value == nx.minimum_cut(G, "x", "y", flow_func=shortest_augmenting_path)[0]
432
+ True
433
+
434
+ """
435
+ if flow_func is None:
436
+ if kwargs:
437
+ raise nx.NetworkXError(
438
+ "You have to explicitly set a flow_func if"
439
+ " you need to pass parameters via kwargs."
440
+ )
441
+ flow_func = default_flow_func
442
+
443
+ if not callable(flow_func):
444
+ raise nx.NetworkXError("flow_func has to be callable.")
445
+
446
+ if kwargs.get("cutoff") is not None and flow_func is preflow_push:
447
+ raise nx.NetworkXError("cutoff should not be specified.")
448
+
449
+ R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs)
450
+ # Remove saturated edges from the residual network
451
+ cutset = [(u, v, d) for u, v, d in R.edges(data=True) if d["flow"] == d["capacity"]]
452
+ R.remove_edges_from(cutset)
453
+
454
+ # Then, reachable and non reachable nodes from source in the
455
+ # residual network form the node partition that defines
456
+ # the minimum cut.
457
+ non_reachable = set(dict(nx.shortest_path_length(R, target=_t)))
458
+ partition = (set(flowG) - non_reachable, non_reachable)
459
+ # Finally add again cutset edges to the residual network to make
460
+ # sure that it is reusable.
461
+ if cutset is not None:
462
+ R.add_edges_from(cutset)
463
+ return (R.graph["flow_value"], partition)
464
+
465
+
466
+ @nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")})
467
+ def minimum_cut_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs):
468
+ """Compute the value of a minimum (s, t)-cut.
469
+
470
+ Use the max-flow min-cut theorem, i.e., the capacity of a minimum
471
+ capacity cut is equal to the flow value of a maximum flow.
472
+
473
+ Parameters
474
+ ----------
475
+ flowG : NetworkX graph
476
+ Edges of the graph are expected to have an attribute called
477
+ 'capacity'. If this attribute is not present, the edge is
478
+ considered to have infinite capacity.
479
+
480
+ _s : node
481
+ Source node for the flow.
482
+
483
+ _t : node
484
+ Sink node for the flow.
485
+
486
+ capacity : string
487
+ Edges of the graph G are expected to have an attribute capacity
488
+ that indicates how much flow the edge can support. If this
489
+ attribute is not present, the edge is considered to have
490
+ infinite capacity. Default value: 'capacity'.
491
+
492
+ flow_func : function
493
+ A function for computing the maximum flow among a pair of nodes
494
+ in a capacitated graph. The function has to accept at least three
495
+ parameters: a Graph or Digraph, a source node, and a target node.
496
+ And return a residual network that follows NetworkX conventions
497
+ (see Notes). If flow_func is None, the default maximum
498
+ flow function (:meth:`preflow_push`) is used. See below for
499
+ alternative algorithms. The choice of the default function may change
500
+ from version to version and should not be relied on. Default value:
501
+ None.
502
+
503
+ kwargs : Any other keyword parameter is passed to the function that
504
+ computes the maximum flow.
505
+
506
+ Returns
507
+ -------
508
+ cut_value : integer, float
509
+ Value of the minimum cut.
510
+
511
+ Raises
512
+ ------
513
+ NetworkXUnbounded
514
+ If the graph has a path of infinite capacity, all cuts have
515
+ infinite capacity and the function raises a NetworkXError.
516
+
517
+ See also
518
+ --------
519
+ :meth:`maximum_flow`
520
+ :meth:`maximum_flow_value`
521
+ :meth:`minimum_cut`
522
+ :meth:`edmonds_karp`
523
+ :meth:`preflow_push`
524
+ :meth:`shortest_augmenting_path`
525
+
526
+ Notes
527
+ -----
528
+ The function used in the flow_func parameter has to return a residual
529
+ network that follows NetworkX conventions:
530
+
531
+ The residual network :samp:`R` from an input graph :samp:`G` has the
532
+ same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
533
+ of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
534
+ self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
535
+ in :samp:`G`.
536
+
537
+ For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
538
+ is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
539
+ in :samp:`G` or zero otherwise. If the capacity is infinite,
540
+ :samp:`R[u][v]['capacity']` will have a high arbitrary finite value
541
+ that does not affect the solution of the problem. This value is stored in
542
+ :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
543
+ :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
544
+ satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
545
+
546
+ The flow value, defined as the total flow into :samp:`t`, the sink, is
547
+ stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
548
+ only edges :samp:`(u, v)` such that
549
+ :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
550
+ :samp:`s`-:samp:`t` cut.
551
+
552
+ Specific algorithms may store extra data in :samp:`R`.
553
+
554
+ The function should supports an optional boolean parameter value_only. When
555
+ True, it can optionally terminate the algorithm as soon as the maximum flow
556
+ value and the minimum cut can be determined.
557
+
558
+ Examples
559
+ --------
560
+ >>> G = nx.DiGraph()
561
+ >>> G.add_edge("x", "a", capacity=3.0)
562
+ >>> G.add_edge("x", "b", capacity=1.0)
563
+ >>> G.add_edge("a", "c", capacity=3.0)
564
+ >>> G.add_edge("b", "c", capacity=5.0)
565
+ >>> G.add_edge("b", "d", capacity=4.0)
566
+ >>> G.add_edge("d", "e", capacity=2.0)
567
+ >>> G.add_edge("c", "y", capacity=2.0)
568
+ >>> G.add_edge("e", "y", capacity=3.0)
569
+
570
+ minimum_cut_value computes only the value of the
571
+ minimum cut:
572
+
573
+ >>> cut_value = nx.minimum_cut_value(G, "x", "y")
574
+ >>> cut_value
575
+ 3.0
576
+
577
+ You can also use alternative algorithms for computing the
578
+ minimum cut by using the flow_func parameter.
579
+
580
+ >>> from networkx.algorithms.flow import shortest_augmenting_path
581
+ >>> cut_value == nx.minimum_cut_value(G, "x", "y", flow_func=shortest_augmenting_path)
582
+ True
583
+
584
+ """
585
+ if flow_func is None:
586
+ if kwargs:
587
+ raise nx.NetworkXError(
588
+ "You have to explicitly set a flow_func if"
589
+ " you need to pass parameters via kwargs."
590
+ )
591
+ flow_func = default_flow_func
592
+
593
+ if not callable(flow_func):
594
+ raise nx.NetworkXError("flow_func has to be callable.")
595
+
596
+ if kwargs.get("cutoff") is not None and flow_func is preflow_push:
597
+ raise nx.NetworkXError("cutoff should not be specified.")
598
+
599
+ R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs)
600
+
601
+ return R.graph["flow_value"]
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/mincost.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Minimum cost flow algorithms on directed connected graphs.
3
+ """
4
+
5
+ __all__ = ["min_cost_flow_cost", "min_cost_flow", "cost_of_flow", "max_flow_min_cost"]
6
+
7
+ import networkx as nx
8
+
9
+
10
+ @nx._dispatchable(
11
+ node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}
12
+ )
13
+ def min_cost_flow_cost(G, demand="demand", capacity="capacity", weight="weight"):
14
+ r"""Find the cost of a minimum cost flow satisfying all demands in digraph G.
15
+
16
+ G is a digraph with edge costs and capacities and in which nodes
17
+ have demand, i.e., they want to send or receive some amount of
18
+ flow. A negative demand means that the node wants to send flow, a
19
+ positive demand means that the node want to receive flow. A flow on
20
+ the digraph G satisfies all demand if the net flow into each node
21
+ is equal to the demand of that node.
22
+
23
+ Parameters
24
+ ----------
25
+ G : NetworkX graph
26
+ DiGraph on which a minimum cost flow satisfying all demands is
27
+ to be found.
28
+
29
+ demand : string
30
+ Nodes of the graph G are expected to have an attribute demand
31
+ that indicates how much flow a node wants to send (negative
32
+ demand) or receive (positive demand). Note that the sum of the
33
+ demands should be 0 otherwise the problem in not feasible. If
34
+ this attribute is not present, a node is considered to have 0
35
+ demand. Default value: 'demand'.
36
+
37
+ capacity : string
38
+ Edges of the graph G are expected to have an attribute capacity
39
+ that indicates how much flow the edge can support. If this
40
+ attribute is not present, the edge is considered to have
41
+ infinite capacity. Default value: 'capacity'.
42
+
43
+ weight : string
44
+ Edges of the graph G are expected to have an attribute weight
45
+ that indicates the cost incurred by sending one unit of flow on
46
+ that edge. If not present, the weight is considered to be 0.
47
+ Default value: 'weight'.
48
+
49
+ Returns
50
+ -------
51
+ flowCost : integer, float
52
+ Cost of a minimum cost flow satisfying all demands.
53
+
54
+ Raises
55
+ ------
56
+ NetworkXError
57
+ This exception is raised if the input graph is not directed or
58
+ not connected.
59
+
60
+ NetworkXUnfeasible
61
+ This exception is raised in the following situations:
62
+
63
+ * The sum of the demands is not zero. Then, there is no
64
+ flow satisfying all demands.
65
+ * There is no flow satisfying all demand.
66
+
67
+ NetworkXUnbounded
68
+ This exception is raised if the digraph G has a cycle of
69
+ negative cost and infinite capacity. Then, the cost of a flow
70
+ satisfying all demands is unbounded below.
71
+
72
+ See also
73
+ --------
74
+ cost_of_flow, max_flow_min_cost, min_cost_flow, network_simplex
75
+
76
+ Notes
77
+ -----
78
+ This algorithm is not guaranteed to work if edge weights or demands
79
+ are floating point numbers (overflows and roundoff errors can
80
+ cause problems). As a workaround you can use integer numbers by
81
+ multiplying the relevant edge attributes by a convenient
82
+ constant factor (eg 100).
83
+
84
+ Examples
85
+ --------
86
+ A simple example of a min cost flow problem.
87
+
88
+ >>> G = nx.DiGraph()
89
+ >>> G.add_node("a", demand=-5)
90
+ >>> G.add_node("d", demand=5)
91
+ >>> G.add_edge("a", "b", weight=3, capacity=4)
92
+ >>> G.add_edge("a", "c", weight=6, capacity=10)
93
+ >>> G.add_edge("b", "d", weight=1, capacity=9)
94
+ >>> G.add_edge("c", "d", weight=2, capacity=5)
95
+ >>> flowCost = nx.min_cost_flow_cost(G)
96
+ >>> flowCost
97
+ 24
98
+ """
99
+ return nx.network_simplex(G, demand=demand, capacity=capacity, weight=weight)[0]
100
+
101
+
102
+ @nx._dispatchable(
103
+ node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}
104
+ )
105
+ def min_cost_flow(G, demand="demand", capacity="capacity", weight="weight"):
106
+ r"""Returns a minimum cost flow satisfying all demands in digraph G.
107
+
108
+ G is a digraph with edge costs and capacities and in which nodes
109
+ have demand, i.e., they want to send or receive some amount of
110
+ flow. A negative demand means that the node wants to send flow, a
111
+ positive demand means that the node want to receive flow. A flow on
112
+ the digraph G satisfies all demand if the net flow into each node
113
+ is equal to the demand of that node.
114
+
115
+ Parameters
116
+ ----------
117
+ G : NetworkX graph
118
+ DiGraph on which a minimum cost flow satisfying all demands is
119
+ to be found.
120
+
121
+ demand : string
122
+ Nodes of the graph G are expected to have an attribute demand
123
+ that indicates how much flow a node wants to send (negative
124
+ demand) or receive (positive demand). Note that the sum of the
125
+ demands should be 0 otherwise the problem in not feasible. If
126
+ this attribute is not present, a node is considered to have 0
127
+ demand. Default value: 'demand'.
128
+
129
+ capacity : string
130
+ Edges of the graph G are expected to have an attribute capacity
131
+ that indicates how much flow the edge can support. If this
132
+ attribute is not present, the edge is considered to have
133
+ infinite capacity. Default value: 'capacity'.
134
+
135
+ weight : string
136
+ Edges of the graph G are expected to have an attribute weight
137
+ that indicates the cost incurred by sending one unit of flow on
138
+ that edge. If not present, the weight is considered to be 0.
139
+ Default value: 'weight'.
140
+
141
+ Returns
142
+ -------
143
+ flowDict : dictionary
144
+ Dictionary of dictionaries keyed by nodes such that
145
+ flowDict[u][v] is the flow edge (u, v).
146
+
147
+ Raises
148
+ ------
149
+ NetworkXError
150
+ This exception is raised if the input graph is not directed or
151
+ not connected.
152
+
153
+ NetworkXUnfeasible
154
+ This exception is raised in the following situations:
155
+
156
+ * The sum of the demands is not zero. Then, there is no
157
+ flow satisfying all demands.
158
+ * There is no flow satisfying all demand.
159
+
160
+ NetworkXUnbounded
161
+ This exception is raised if the digraph G has a cycle of
162
+ negative cost and infinite capacity. Then, the cost of a flow
163
+ satisfying all demands is unbounded below.
164
+
165
+ See also
166
+ --------
167
+ cost_of_flow, max_flow_min_cost, min_cost_flow_cost, network_simplex
168
+
169
+ Notes
170
+ -----
171
+ This algorithm is not guaranteed to work if edge weights or demands
172
+ are floating point numbers (overflows and roundoff errors can
173
+ cause problems). As a workaround you can use integer numbers by
174
+ multiplying the relevant edge attributes by a convenient
175
+ constant factor (eg 100).
176
+
177
+ Examples
178
+ --------
179
+ A simple example of a min cost flow problem.
180
+
181
+ >>> G = nx.DiGraph()
182
+ >>> G.add_node("a", demand=-5)
183
+ >>> G.add_node("d", demand=5)
184
+ >>> G.add_edge("a", "b", weight=3, capacity=4)
185
+ >>> G.add_edge("a", "c", weight=6, capacity=10)
186
+ >>> G.add_edge("b", "d", weight=1, capacity=9)
187
+ >>> G.add_edge("c", "d", weight=2, capacity=5)
188
+ >>> flowDict = nx.min_cost_flow(G)
189
+ >>> flowDict
190
+ {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}}
191
+ """
192
+ return nx.network_simplex(G, demand=demand, capacity=capacity, weight=weight)[1]
193
+
194
+
195
+ @nx._dispatchable(edge_attrs={"weight": 0})
196
+ def cost_of_flow(G, flowDict, weight="weight"):
197
+ """Compute the cost of the flow given by flowDict on graph G.
198
+
199
+ Note that this function does not check for the validity of the
200
+ flow flowDict. This function will fail if the graph G and the
201
+ flow don't have the same edge set.
202
+
203
+ Parameters
204
+ ----------
205
+ G : NetworkX graph
206
+ DiGraph on which a minimum cost flow satisfying all demands is
207
+ to be found.
208
+
209
+ weight : string
210
+ Edges of the graph G are expected to have an attribute weight
211
+ that indicates the cost incurred by sending one unit of flow on
212
+ that edge. If not present, the weight is considered to be 0.
213
+ Default value: 'weight'.
214
+
215
+ flowDict : dictionary
216
+ Dictionary of dictionaries keyed by nodes such that
217
+ flowDict[u][v] is the flow edge (u, v).
218
+
219
+ Returns
220
+ -------
221
+ cost : Integer, float
222
+ The total cost of the flow. This is given by the sum over all
223
+ edges of the product of the edge's flow and the edge's weight.
224
+
225
+ See also
226
+ --------
227
+ max_flow_min_cost, min_cost_flow, min_cost_flow_cost, network_simplex
228
+
229
+ Notes
230
+ -----
231
+ This algorithm is not guaranteed to work if edge weights or demands
232
+ are floating point numbers (overflows and roundoff errors can
233
+ cause problems). As a workaround you can use integer numbers by
234
+ multiplying the relevant edge attributes by a convenient
235
+ constant factor (eg 100).
236
+
237
+ Examples
238
+ --------
239
+ >>> G = nx.DiGraph()
240
+ >>> G.add_node("a", demand=-5)
241
+ >>> G.add_node("d", demand=5)
242
+ >>> G.add_edge("a", "b", weight=3, capacity=4)
243
+ >>> G.add_edge("a", "c", weight=6, capacity=10)
244
+ >>> G.add_edge("b", "d", weight=1, capacity=9)
245
+ >>> G.add_edge("c", "d", weight=2, capacity=5)
246
+ >>> flowDict = nx.min_cost_flow(G)
247
+ >>> flowDict
248
+ {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}}
249
+ >>> nx.cost_of_flow(G, flowDict)
250
+ 24
251
+ """
252
+ return sum((flowDict[u][v] * d.get(weight, 0) for u, v, d in G.edges(data=True)))
253
+
254
+
255
+ @nx._dispatchable(edge_attrs={"capacity": float("inf"), "weight": 0})
256
+ def max_flow_min_cost(G, s, t, capacity="capacity", weight="weight"):
257
+ """Returns a maximum (s, t)-flow of minimum cost.
258
+
259
+ G is a digraph with edge costs and capacities. There is a source
260
+ node s and a sink node t. This function finds a maximum flow from
261
+ s to t whose total cost is minimized.
262
+
263
+ Parameters
264
+ ----------
265
+ G : NetworkX graph
266
+ DiGraph on which a minimum cost flow satisfying all demands is
267
+ to be found.
268
+
269
+ s: node label
270
+ Source of the flow.
271
+
272
+ t: node label
273
+ Destination of the flow.
274
+
275
+ capacity: string
276
+ Edges of the graph G are expected to have an attribute capacity
277
+ that indicates how much flow the edge can support. If this
278
+ attribute is not present, the edge is considered to have
279
+ infinite capacity. Default value: 'capacity'.
280
+
281
+ weight: string
282
+ Edges of the graph G are expected to have an attribute weight
283
+ that indicates the cost incurred by sending one unit of flow on
284
+ that edge. If not present, the weight is considered to be 0.
285
+ Default value: 'weight'.
286
+
287
+ Returns
288
+ -------
289
+ flowDict: dictionary
290
+ Dictionary of dictionaries keyed by nodes such that
291
+ flowDict[u][v] is the flow edge (u, v).
292
+
293
+ Raises
294
+ ------
295
+ NetworkXError
296
+ This exception is raised if the input graph is not directed or
297
+ not connected.
298
+
299
+ NetworkXUnbounded
300
+ This exception is raised if there is an infinite capacity path
301
+ from s to t in G. In this case there is no maximum flow. This
302
+ exception is also raised if the digraph G has a cycle of
303
+ negative cost and infinite capacity. Then, the cost of a flow
304
+ is unbounded below.
305
+
306
+ See also
307
+ --------
308
+ cost_of_flow, min_cost_flow, min_cost_flow_cost, network_simplex
309
+
310
+ Notes
311
+ -----
312
+ This algorithm is not guaranteed to work if edge weights or demands
313
+ are floating point numbers (overflows and roundoff errors can
314
+ cause problems). As a workaround you can use integer numbers by
315
+ multiplying the relevant edge attributes by a convenient
316
+ constant factor (eg 100).
317
+
318
+ Examples
319
+ --------
320
+ >>> G = nx.DiGraph()
321
+ >>> G.add_edges_from(
322
+ ... [
323
+ ... (1, 2, {"capacity": 12, "weight": 4}),
324
+ ... (1, 3, {"capacity": 20, "weight": 6}),
325
+ ... (2, 3, {"capacity": 6, "weight": -3}),
326
+ ... (2, 6, {"capacity": 14, "weight": 1}),
327
+ ... (3, 4, {"weight": 9}),
328
+ ... (3, 5, {"capacity": 10, "weight": 5}),
329
+ ... (4, 2, {"capacity": 19, "weight": 13}),
330
+ ... (4, 5, {"capacity": 4, "weight": 0}),
331
+ ... (5, 7, {"capacity": 28, "weight": 2}),
332
+ ... (6, 5, {"capacity": 11, "weight": 1}),
333
+ ... (6, 7, {"weight": 8}),
334
+ ... (7, 4, {"capacity": 6, "weight": 6}),
335
+ ... ]
336
+ ... )
337
+ >>> mincostFlow = nx.max_flow_min_cost(G, 1, 7)
338
+ >>> mincost = nx.cost_of_flow(G, mincostFlow)
339
+ >>> mincost
340
+ 373
341
+ >>> from networkx.algorithms.flow import maximum_flow
342
+ >>> maxFlow = maximum_flow(G, 1, 7)[1]
343
+ >>> nx.cost_of_flow(G, maxFlow) >= mincost
344
+ True
345
+ >>> mincostFlowValue = sum((mincostFlow[u][7] for u in G.predecessors(7))) - sum(
346
+ ... (mincostFlow[7][v] for v in G.successors(7))
347
+ ... )
348
+ >>> mincostFlowValue == nx.maximum_flow_value(G, 1, 7)
349
+ True
350
+
351
+ """
352
+ maxFlow = nx.maximum_flow_value(G, s, t, capacity=capacity)
353
+ H = nx.DiGraph(G)
354
+ H.add_node(s, demand=-maxFlow)
355
+ H.add_node(t, demand=maxFlow)
356
+ return min_cost_flow(H, capacity=capacity, weight=weight)
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/networksimplex.py ADDED
@@ -0,0 +1,666 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Minimum cost flow algorithms on directed connected graphs.
3
+ """
4
+
5
+ __all__ = ["network_simplex"]
6
+
7
+ from itertools import chain, islice, repeat
8
+ from math import ceil, sqrt
9
+
10
+ import networkx as nx
11
+ from networkx.utils import not_implemented_for
12
+
13
+
14
+ class _DataEssentialsAndFunctions:
15
+ def __init__(
16
+ self, G, multigraph, demand="demand", capacity="capacity", weight="weight"
17
+ ):
18
+ # Number all nodes and edges and hereafter reference them using ONLY their numbers
19
+ self.node_list = list(G) # nodes
20
+ self.node_indices = {u: i for i, u in enumerate(self.node_list)} # node indices
21
+ self.node_demands = [
22
+ G.nodes[u].get(demand, 0) for u in self.node_list
23
+ ] # node demands
24
+
25
+ self.edge_sources = [] # edge sources
26
+ self.edge_targets = [] # edge targets
27
+ if multigraph:
28
+ self.edge_keys = [] # edge keys
29
+ self.edge_indices = {} # edge indices
30
+ self.edge_capacities = [] # edge capacities
31
+ self.edge_weights = [] # edge weights
32
+
33
+ if not multigraph:
34
+ edges = G.edges(data=True)
35
+ else:
36
+ edges = G.edges(data=True, keys=True)
37
+
38
+ inf = float("inf")
39
+ edges = (e for e in edges if e[0] != e[1] and e[-1].get(capacity, inf) != 0)
40
+ for i, e in enumerate(edges):
41
+ self.edge_sources.append(self.node_indices[e[0]])
42
+ self.edge_targets.append(self.node_indices[e[1]])
43
+ if multigraph:
44
+ self.edge_keys.append(e[2])
45
+ self.edge_indices[e[:-1]] = i
46
+ self.edge_capacities.append(e[-1].get(capacity, inf))
47
+ self.edge_weights.append(e[-1].get(weight, 0))
48
+
49
+ # spanning tree specific data to be initialized
50
+
51
+ self.edge_count = None # number of edges
52
+ self.edge_flow = None # edge flows
53
+ self.node_potentials = None # node potentials
54
+ self.parent = None # parent nodes
55
+ self.parent_edge = None # edges to parents
56
+ self.subtree_size = None # subtree sizes
57
+ self.next_node_dft = None # next nodes in depth-first thread
58
+ self.prev_node_dft = None # previous nodes in depth-first thread
59
+ self.last_descendent_dft = None # last descendants in depth-first thread
60
+ self._spanning_tree_initialized = (
61
+ False # False until initialize_spanning_tree() is called
62
+ )
63
+
64
+ def initialize_spanning_tree(self, n, faux_inf):
65
+ self.edge_count = len(self.edge_indices) # number of edges
66
+ self.edge_flow = list(
67
+ chain(repeat(0, self.edge_count), (abs(d) for d in self.node_demands))
68
+ ) # edge flows
69
+ self.node_potentials = [
70
+ faux_inf if d <= 0 else -faux_inf for d in self.node_demands
71
+ ] # node potentials
72
+ self.parent = list(chain(repeat(-1, n), [None])) # parent nodes
73
+ self.parent_edge = list(
74
+ range(self.edge_count, self.edge_count + n)
75
+ ) # edges to parents
76
+ self.subtree_size = list(chain(repeat(1, n), [n + 1])) # subtree sizes
77
+ self.next_node_dft = list(
78
+ chain(range(1, n), [-1, 0])
79
+ ) # next nodes in depth-first thread
80
+ self.prev_node_dft = list(range(-1, n)) # previous nodes in depth-first thread
81
+ self.last_descendent_dft = list(
82
+ chain(range(n), [n - 1])
83
+ ) # last descendants in depth-first thread
84
+ self._spanning_tree_initialized = True # True only if all the assignments pass
85
+
86
+ def find_apex(self, p, q):
87
+ """
88
+ Find the lowest common ancestor of nodes p and q in the spanning tree.
89
+ """
90
+ size_p = self.subtree_size[p]
91
+ size_q = self.subtree_size[q]
92
+ while True:
93
+ while size_p < size_q:
94
+ p = self.parent[p]
95
+ size_p = self.subtree_size[p]
96
+ while size_p > size_q:
97
+ q = self.parent[q]
98
+ size_q = self.subtree_size[q]
99
+ if size_p == size_q:
100
+ if p != q:
101
+ p = self.parent[p]
102
+ size_p = self.subtree_size[p]
103
+ q = self.parent[q]
104
+ size_q = self.subtree_size[q]
105
+ else:
106
+ return p
107
+
108
+ def trace_path(self, p, w):
109
+ """
110
+ Returns the nodes and edges on the path from node p to its ancestor w.
111
+ """
112
+ Wn = [p]
113
+ We = []
114
+ while p != w:
115
+ We.append(self.parent_edge[p])
116
+ p = self.parent[p]
117
+ Wn.append(p)
118
+ return Wn, We
119
+
120
+ def find_cycle(self, i, p, q):
121
+ """
122
+ Returns the nodes and edges on the cycle containing edge i == (p, q)
123
+ when the latter is added to the spanning tree.
124
+
125
+ The cycle is oriented in the direction from p to q.
126
+ """
127
+ w = self.find_apex(p, q)
128
+ Wn, We = self.trace_path(p, w)
129
+ Wn.reverse()
130
+ We.reverse()
131
+ if We != [i]:
132
+ We.append(i)
133
+ WnR, WeR = self.trace_path(q, w)
134
+ del WnR[-1]
135
+ Wn += WnR
136
+ We += WeR
137
+ return Wn, We
138
+
139
+ def augment_flow(self, Wn, We, f):
140
+ """
141
+ Augment f units of flow along a cycle represented by Wn and We.
142
+ """
143
+ for i, p in zip(We, Wn):
144
+ if self.edge_sources[i] == p:
145
+ self.edge_flow[i] += f
146
+ else:
147
+ self.edge_flow[i] -= f
148
+
149
+ def trace_subtree(self, p):
150
+ """
151
+ Yield the nodes in the subtree rooted at a node p.
152
+ """
153
+ yield p
154
+ l = self.last_descendent_dft[p]
155
+ while p != l:
156
+ p = self.next_node_dft[p]
157
+ yield p
158
+
159
+ def remove_edge(self, s, t):
160
+ """
161
+ Remove an edge (s, t) where parent[t] == s from the spanning tree.
162
+ """
163
+ size_t = self.subtree_size[t]
164
+ prev_t = self.prev_node_dft[t]
165
+ last_t = self.last_descendent_dft[t]
166
+ next_last_t = self.next_node_dft[last_t]
167
+ # Remove (s, t).
168
+ self.parent[t] = None
169
+ self.parent_edge[t] = None
170
+ # Remove the subtree rooted at t from the depth-first thread.
171
+ self.next_node_dft[prev_t] = next_last_t
172
+ self.prev_node_dft[next_last_t] = prev_t
173
+ self.next_node_dft[last_t] = t
174
+ self.prev_node_dft[t] = last_t
175
+ # Update the subtree sizes and last descendants of the (old) ancestors
176
+ # of t.
177
+ while s is not None:
178
+ self.subtree_size[s] -= size_t
179
+ if self.last_descendent_dft[s] == last_t:
180
+ self.last_descendent_dft[s] = prev_t
181
+ s = self.parent[s]
182
+
183
+ def make_root(self, q):
184
+ """
185
+ Make a node q the root of its containing subtree.
186
+ """
187
+ ancestors = []
188
+ while q is not None:
189
+ ancestors.append(q)
190
+ q = self.parent[q]
191
+ ancestors.reverse()
192
+ for p, q in zip(ancestors, islice(ancestors, 1, None)):
193
+ size_p = self.subtree_size[p]
194
+ last_p = self.last_descendent_dft[p]
195
+ prev_q = self.prev_node_dft[q]
196
+ last_q = self.last_descendent_dft[q]
197
+ next_last_q = self.next_node_dft[last_q]
198
+ # Make p a child of q.
199
+ self.parent[p] = q
200
+ self.parent[q] = None
201
+ self.parent_edge[p] = self.parent_edge[q]
202
+ self.parent_edge[q] = None
203
+ self.subtree_size[p] = size_p - self.subtree_size[q]
204
+ self.subtree_size[q] = size_p
205
+ # Remove the subtree rooted at q from the depth-first thread.
206
+ self.next_node_dft[prev_q] = next_last_q
207
+ self.prev_node_dft[next_last_q] = prev_q
208
+ self.next_node_dft[last_q] = q
209
+ self.prev_node_dft[q] = last_q
210
+ if last_p == last_q:
211
+ self.last_descendent_dft[p] = prev_q
212
+ last_p = prev_q
213
+ # Add the remaining parts of the subtree rooted at p as a subtree
214
+ # of q in the depth-first thread.
215
+ self.prev_node_dft[p] = last_q
216
+ self.next_node_dft[last_q] = p
217
+ self.next_node_dft[last_p] = q
218
+ self.prev_node_dft[q] = last_p
219
+ self.last_descendent_dft[q] = last_p
220
+
221
+ def add_edge(self, i, p, q):
222
+ """
223
+ Add an edge (p, q) to the spanning tree where q is the root of a subtree.
224
+ """
225
+ last_p = self.last_descendent_dft[p]
226
+ next_last_p = self.next_node_dft[last_p]
227
+ size_q = self.subtree_size[q]
228
+ last_q = self.last_descendent_dft[q]
229
+ # Make q a child of p.
230
+ self.parent[q] = p
231
+ self.parent_edge[q] = i
232
+ # Insert the subtree rooted at q into the depth-first thread.
233
+ self.next_node_dft[last_p] = q
234
+ self.prev_node_dft[q] = last_p
235
+ self.prev_node_dft[next_last_p] = last_q
236
+ self.next_node_dft[last_q] = next_last_p
237
+ # Update the subtree sizes and last descendants of the (new) ancestors
238
+ # of q.
239
+ while p is not None:
240
+ self.subtree_size[p] += size_q
241
+ if self.last_descendent_dft[p] == last_p:
242
+ self.last_descendent_dft[p] = last_q
243
+ p = self.parent[p]
244
+
245
+ def update_potentials(self, i, p, q):
246
+ """
247
+ Update the potentials of the nodes in the subtree rooted at a node
248
+ q connected to its parent p by an edge i.
249
+ """
250
+ if q == self.edge_targets[i]:
251
+ d = self.node_potentials[p] - self.edge_weights[i] - self.node_potentials[q]
252
+ else:
253
+ d = self.node_potentials[p] + self.edge_weights[i] - self.node_potentials[q]
254
+ for q in self.trace_subtree(q):
255
+ self.node_potentials[q] += d
256
+
257
+ def reduced_cost(self, i):
258
+ """Returns the reduced cost of an edge i."""
259
+ c = (
260
+ self.edge_weights[i]
261
+ - self.node_potentials[self.edge_sources[i]]
262
+ + self.node_potentials[self.edge_targets[i]]
263
+ )
264
+ return c if self.edge_flow[i] == 0 else -c
265
+
266
+ def find_entering_edges(self):
267
+ """Yield entering edges until none can be found."""
268
+ if self.edge_count == 0:
269
+ return
270
+
271
+ # Entering edges are found by combining Dantzig's rule and Bland's
272
+ # rule. The edges are cyclically grouped into blocks of size B. Within
273
+ # each block, Dantzig's rule is applied to find an entering edge. The
274
+ # blocks to search is determined following Bland's rule.
275
+ B = int(ceil(sqrt(self.edge_count))) # pivot block size
276
+ M = (self.edge_count + B - 1) // B # number of blocks needed to cover all edges
277
+ m = 0 # number of consecutive blocks without eligible
278
+ # entering edges
279
+ f = 0 # first edge in block
280
+ while m < M:
281
+ # Determine the next block of edges.
282
+ l = f + B
283
+ if l <= self.edge_count:
284
+ edges = range(f, l)
285
+ else:
286
+ l -= self.edge_count
287
+ edges = chain(range(f, self.edge_count), range(l))
288
+ f = l
289
+ # Find the first edge with the lowest reduced cost.
290
+ i = min(edges, key=self.reduced_cost)
291
+ c = self.reduced_cost(i)
292
+ if c >= 0:
293
+ # No entering edge found in the current block.
294
+ m += 1
295
+ else:
296
+ # Entering edge found.
297
+ if self.edge_flow[i] == 0:
298
+ p = self.edge_sources[i]
299
+ q = self.edge_targets[i]
300
+ else:
301
+ p = self.edge_targets[i]
302
+ q = self.edge_sources[i]
303
+ yield i, p, q
304
+ m = 0
305
+ # All edges have nonnegative reduced costs. The current flow is
306
+ # optimal.
307
+
308
+ def residual_capacity(self, i, p):
309
+ """Returns the residual capacity of an edge i in the direction away
310
+ from its endpoint p.
311
+ """
312
+ return (
313
+ self.edge_capacities[i] - self.edge_flow[i]
314
+ if self.edge_sources[i] == p
315
+ else self.edge_flow[i]
316
+ )
317
+
318
+ def find_leaving_edge(self, Wn, We):
319
+ """Returns the leaving edge in a cycle represented by Wn and We."""
320
+ j, s = min(
321
+ zip(reversed(We), reversed(Wn)),
322
+ key=lambda i_p: self.residual_capacity(*i_p),
323
+ )
324
+ t = self.edge_targets[j] if self.edge_sources[j] == s else self.edge_sources[j]
325
+ return j, s, t
326
+
327
+
328
+ @not_implemented_for("undirected")
329
+ @nx._dispatchable(
330
+ node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}
331
+ )
332
+ def network_simplex(G, demand="demand", capacity="capacity", weight="weight"):
333
+ r"""Find a minimum cost flow satisfying all demands in digraph G.
334
+
335
+ This is a primal network simplex algorithm that uses the leaving
336
+ arc rule to prevent cycling.
337
+
338
+ G is a digraph with edge costs and capacities and in which nodes
339
+ have demand, i.e., they want to send or receive some amount of
340
+ flow. A negative demand means that the node wants to send flow, a
341
+ positive demand means that the node want to receive flow. A flow on
342
+ the digraph G satisfies all demand if the net flow into each node
343
+ is equal to the demand of that node.
344
+
345
+ Parameters
346
+ ----------
347
+ G : NetworkX graph
348
+ DiGraph on which a minimum cost flow satisfying all demands is
349
+ to be found.
350
+
351
+ demand : string
352
+ Nodes of the graph G are expected to have an attribute demand
353
+ that indicates how much flow a node wants to send (negative
354
+ demand) or receive (positive demand). Note that the sum of the
355
+ demands should be 0 otherwise the problem in not feasible. If
356
+ this attribute is not present, a node is considered to have 0
357
+ demand. Default value: 'demand'.
358
+
359
+ capacity : string
360
+ Edges of the graph G are expected to have an attribute capacity
361
+ that indicates how much flow the edge can support. If this
362
+ attribute is not present, the edge is considered to have
363
+ infinite capacity. Default value: 'capacity'.
364
+
365
+ weight : string
366
+ Edges of the graph G are expected to have an attribute weight
367
+ that indicates the cost incurred by sending one unit of flow on
368
+ that edge. If not present, the weight is considered to be 0.
369
+ Default value: 'weight'.
370
+
371
+ Returns
372
+ -------
373
+ flowCost : integer, float
374
+ Cost of a minimum cost flow satisfying all demands.
375
+
376
+ flowDict : dictionary
377
+ Dictionary of dictionaries keyed by nodes such that
378
+ flowDict[u][v] is the flow edge (u, v).
379
+
380
+ Raises
381
+ ------
382
+ NetworkXError
383
+ This exception is raised if the input graph is not directed or
384
+ not connected.
385
+
386
+ NetworkXUnfeasible
387
+ This exception is raised in the following situations:
388
+
389
+ * The sum of the demands is not zero. Then, there is no
390
+ flow satisfying all demands.
391
+ * There is no flow satisfying all demand.
392
+
393
+ NetworkXUnbounded
394
+ This exception is raised if the digraph G has a cycle of
395
+ negative cost and infinite capacity. Then, the cost of a flow
396
+ satisfying all demands is unbounded below.
397
+
398
+ Notes
399
+ -----
400
+ This algorithm is not guaranteed to work if edge weights or demands
401
+ are floating point numbers (overflows and roundoff errors can
402
+ cause problems). As a workaround you can use integer numbers by
403
+ multiplying the relevant edge attributes by a convenient
404
+ constant factor (eg 100).
405
+
406
+ See also
407
+ --------
408
+ cost_of_flow, max_flow_min_cost, min_cost_flow, min_cost_flow_cost
409
+
410
+ Examples
411
+ --------
412
+ A simple example of a min cost flow problem.
413
+
414
+ >>> G = nx.DiGraph()
415
+ >>> G.add_node("a", demand=-5)
416
+ >>> G.add_node("d", demand=5)
417
+ >>> G.add_edge("a", "b", weight=3, capacity=4)
418
+ >>> G.add_edge("a", "c", weight=6, capacity=10)
419
+ >>> G.add_edge("b", "d", weight=1, capacity=9)
420
+ >>> G.add_edge("c", "d", weight=2, capacity=5)
421
+ >>> flowCost, flowDict = nx.network_simplex(G)
422
+ >>> flowCost
423
+ 24
424
+ >>> flowDict
425
+ {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}}
426
+
427
+ The mincost flow algorithm can also be used to solve shortest path
428
+ problems. To find the shortest path between two nodes u and v,
429
+ give all edges an infinite capacity, give node u a demand of -1 and
430
+ node v a demand a 1. Then run the network simplex. The value of a
431
+ min cost flow will be the distance between u and v and edges
432
+ carrying positive flow will indicate the path.
433
+
434
+ >>> G = nx.DiGraph()
435
+ >>> G.add_weighted_edges_from(
436
+ ... [
437
+ ... ("s", "u", 10),
438
+ ... ("s", "x", 5),
439
+ ... ("u", "v", 1),
440
+ ... ("u", "x", 2),
441
+ ... ("v", "y", 1),
442
+ ... ("x", "u", 3),
443
+ ... ("x", "v", 5),
444
+ ... ("x", "y", 2),
445
+ ... ("y", "s", 7),
446
+ ... ("y", "v", 6),
447
+ ... ]
448
+ ... )
449
+ >>> G.add_node("s", demand=-1)
450
+ >>> G.add_node("v", demand=1)
451
+ >>> flowCost, flowDict = nx.network_simplex(G)
452
+ >>> flowCost == nx.shortest_path_length(G, "s", "v", weight="weight")
453
+ True
454
+ >>> sorted([(u, v) for u in flowDict for v in flowDict[u] if flowDict[u][v] > 0])
455
+ [('s', 'x'), ('u', 'v'), ('x', 'u')]
456
+ >>> nx.shortest_path(G, "s", "v", weight="weight")
457
+ ['s', 'x', 'u', 'v']
458
+
459
+ It is possible to change the name of the attributes used for the
460
+ algorithm.
461
+
462
+ >>> G = nx.DiGraph()
463
+ >>> G.add_node("p", spam=-4)
464
+ >>> G.add_node("q", spam=2)
465
+ >>> G.add_node("a", spam=-2)
466
+ >>> G.add_node("d", spam=-1)
467
+ >>> G.add_node("t", spam=2)
468
+ >>> G.add_node("w", spam=3)
469
+ >>> G.add_edge("p", "q", cost=7, vacancies=5)
470
+ >>> G.add_edge("p", "a", cost=1, vacancies=4)
471
+ >>> G.add_edge("q", "d", cost=2, vacancies=3)
472
+ >>> G.add_edge("t", "q", cost=1, vacancies=2)
473
+ >>> G.add_edge("a", "t", cost=2, vacancies=4)
474
+ >>> G.add_edge("d", "w", cost=3, vacancies=4)
475
+ >>> G.add_edge("t", "w", cost=4, vacancies=1)
476
+ >>> flowCost, flowDict = nx.network_simplex(
477
+ ... G, demand="spam", capacity="vacancies", weight="cost"
478
+ ... )
479
+ >>> flowCost
480
+ 37
481
+ >>> flowDict
482
+ {'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}}
483
+
484
+ References
485
+ ----------
486
+ .. [1] Z. Kiraly, P. Kovacs.
487
+ Efficient implementation of minimum-cost flow algorithms.
488
+ Acta Universitatis Sapientiae, Informatica 4(1):67--118. 2012.
489
+ .. [2] R. Barr, F. Glover, D. Klingman.
490
+ Enhancement of spanning tree labeling procedures for network
491
+ optimization.
492
+ INFOR 17(1):16--34. 1979.
493
+ """
494
+ ###########################################################################
495
+ # Problem essentials extraction and sanity check
496
+ ###########################################################################
497
+
498
+ if len(G) == 0:
499
+ raise nx.NetworkXError("graph has no nodes")
500
+
501
+ multigraph = G.is_multigraph()
502
+
503
+ # extracting data essential to problem
504
+ DEAF = _DataEssentialsAndFunctions(
505
+ G, multigraph, demand=demand, capacity=capacity, weight=weight
506
+ )
507
+
508
+ ###########################################################################
509
+ # Quick Error Detection
510
+ ###########################################################################
511
+
512
+ inf = float("inf")
513
+ for u, d in zip(DEAF.node_list, DEAF.node_demands):
514
+ if abs(d) == inf:
515
+ raise nx.NetworkXError(f"node {u!r} has infinite demand")
516
+ for e, w in zip(DEAF.edge_indices, DEAF.edge_weights):
517
+ if abs(w) == inf:
518
+ raise nx.NetworkXError(f"edge {e!r} has infinite weight")
519
+ if not multigraph:
520
+ edges = nx.selfloop_edges(G, data=True)
521
+ else:
522
+ edges = nx.selfloop_edges(G, data=True, keys=True)
523
+ for e in edges:
524
+ if abs(e[-1].get(weight, 0)) == inf:
525
+ raise nx.NetworkXError(f"edge {e[:-1]!r} has infinite weight")
526
+
527
+ ###########################################################################
528
+ # Quick Infeasibility Detection
529
+ ###########################################################################
530
+
531
+ if sum(DEAF.node_demands) != 0:
532
+ raise nx.NetworkXUnfeasible("total node demand is not zero")
533
+ for e, c in zip(DEAF.edge_indices, DEAF.edge_capacities):
534
+ if c < 0:
535
+ raise nx.NetworkXUnfeasible(f"edge {e!r} has negative capacity")
536
+ if not multigraph:
537
+ edges = nx.selfloop_edges(G, data=True)
538
+ else:
539
+ edges = nx.selfloop_edges(G, data=True, keys=True)
540
+ for e in edges:
541
+ if e[-1].get(capacity, inf) < 0:
542
+ raise nx.NetworkXUnfeasible(f"edge {e[:-1]!r} has negative capacity")
543
+
544
+ ###########################################################################
545
+ # Initialization
546
+ ###########################################################################
547
+
548
+ # Add a dummy node -1 and connect all existing nodes to it with infinite-
549
+ # capacity dummy edges. Node -1 will serve as the root of the
550
+ # spanning tree of the network simplex method. The new edges will used to
551
+ # trivially satisfy the node demands and create an initial strongly
552
+ # feasible spanning tree.
553
+ for i, d in enumerate(DEAF.node_demands):
554
+ # Must be greater-than here. Zero-demand nodes must have
555
+ # edges pointing towards the root to ensure strong feasibility.
556
+ if d > 0:
557
+ DEAF.edge_sources.append(-1)
558
+ DEAF.edge_targets.append(i)
559
+ else:
560
+ DEAF.edge_sources.append(i)
561
+ DEAF.edge_targets.append(-1)
562
+ faux_inf = (
563
+ 3
564
+ * max(
565
+ chain(
566
+ [
567
+ sum(c for c in DEAF.edge_capacities if c < inf),
568
+ sum(abs(w) for w in DEAF.edge_weights),
569
+ ],
570
+ (abs(d) for d in DEAF.node_demands),
571
+ )
572
+ )
573
+ or 1
574
+ )
575
+
576
+ n = len(DEAF.node_list) # number of nodes
577
+ DEAF.edge_weights.extend(repeat(faux_inf, n))
578
+ DEAF.edge_capacities.extend(repeat(faux_inf, n))
579
+
580
+ # Construct the initial spanning tree.
581
+ DEAF.initialize_spanning_tree(n, faux_inf)
582
+
583
+ ###########################################################################
584
+ # Pivot loop
585
+ ###########################################################################
586
+
587
+ for i, p, q in DEAF.find_entering_edges():
588
+ Wn, We = DEAF.find_cycle(i, p, q)
589
+ j, s, t = DEAF.find_leaving_edge(Wn, We)
590
+ DEAF.augment_flow(Wn, We, DEAF.residual_capacity(j, s))
591
+ # Do nothing more if the entering edge is the same as the leaving edge.
592
+ if i != j:
593
+ if DEAF.parent[t] != s:
594
+ # Ensure that s is the parent of t.
595
+ s, t = t, s
596
+ if We.index(i) > We.index(j):
597
+ # Ensure that q is in the subtree rooted at t.
598
+ p, q = q, p
599
+ DEAF.remove_edge(s, t)
600
+ DEAF.make_root(q)
601
+ DEAF.add_edge(i, p, q)
602
+ DEAF.update_potentials(i, p, q)
603
+
604
+ ###########################################################################
605
+ # Infeasibility and unboundedness detection
606
+ ###########################################################################
607
+
608
+ if any(DEAF.edge_flow[i] != 0 for i in range(-n, 0)):
609
+ raise nx.NetworkXUnfeasible("no flow satisfies all node demands")
610
+
611
+ if any(DEAF.edge_flow[i] * 2 >= faux_inf for i in range(DEAF.edge_count)) or any(
612
+ e[-1].get(capacity, inf) == inf and e[-1].get(weight, 0) < 0
613
+ for e in nx.selfloop_edges(G, data=True)
614
+ ):
615
+ raise nx.NetworkXUnbounded("negative cycle with infinite capacity found")
616
+
617
+ ###########################################################################
618
+ # Flow cost calculation and flow dict construction
619
+ ###########################################################################
620
+
621
+ del DEAF.edge_flow[DEAF.edge_count :]
622
+ flow_cost = sum(w * x for w, x in zip(DEAF.edge_weights, DEAF.edge_flow))
623
+ flow_dict = {n: {} for n in DEAF.node_list}
624
+
625
+ def add_entry(e):
626
+ """Add a flow dict entry."""
627
+ d = flow_dict[e[0]]
628
+ for k in e[1:-2]:
629
+ try:
630
+ d = d[k]
631
+ except KeyError:
632
+ t = {}
633
+ d[k] = t
634
+ d = t
635
+ d[e[-2]] = e[-1]
636
+
637
+ DEAF.edge_sources = (
638
+ DEAF.node_list[s] for s in DEAF.edge_sources
639
+ ) # Use original nodes.
640
+ DEAF.edge_targets = (
641
+ DEAF.node_list[t] for t in DEAF.edge_targets
642
+ ) # Use original nodes.
643
+ if not multigraph:
644
+ for e in zip(DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_flow):
645
+ add_entry(e)
646
+ edges = G.edges(data=True)
647
+ else:
648
+ for e in zip(
649
+ DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_keys, DEAF.edge_flow
650
+ ):
651
+ add_entry(e)
652
+ edges = G.edges(data=True, keys=True)
653
+ for e in edges:
654
+ if e[0] != e[1]:
655
+ if e[-1].get(capacity, inf) == 0:
656
+ add_entry(e[:-1] + (0,))
657
+ else:
658
+ w = e[-1].get(weight, 0)
659
+ if w >= 0:
660
+ add_entry(e[:-1] + (0,))
661
+ else:
662
+ c = e[-1][capacity]
663
+ flow_cost += w * c
664
+ add_entry(e[:-1] + (c,))
665
+
666
+ return flow_cost, flow_dict
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/preflowpush.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Highest-label preflow-push algorithm for maximum flow problems.
3
+ """
4
+
5
+ from collections import deque
6
+ from itertools import islice
7
+
8
+ import networkx as nx
9
+
10
+ from ...utils import arbitrary_element
11
+ from .utils import (
12
+ CurrentEdge,
13
+ GlobalRelabelThreshold,
14
+ Level,
15
+ build_residual_network,
16
+ detect_unboundedness,
17
+ )
18
+
19
+ __all__ = ["preflow_push"]
20
+
21
+
22
+ def preflow_push_impl(G, s, t, capacity, residual, global_relabel_freq, value_only):
23
+ """Implementation of the highest-label preflow-push algorithm."""
24
+ if s not in G:
25
+ raise nx.NetworkXError(f"node {str(s)} not in graph")
26
+ if t not in G:
27
+ raise nx.NetworkXError(f"node {str(t)} not in graph")
28
+ if s == t:
29
+ raise nx.NetworkXError("source and sink are the same node")
30
+
31
+ if global_relabel_freq is None:
32
+ global_relabel_freq = 0
33
+ if global_relabel_freq < 0:
34
+ raise nx.NetworkXError("global_relabel_freq must be nonnegative.")
35
+
36
+ if residual is None:
37
+ R = build_residual_network(G, capacity)
38
+ else:
39
+ R = residual
40
+
41
+ detect_unboundedness(R, s, t)
42
+
43
+ R_nodes = R.nodes
44
+ R_pred = R.pred
45
+ R_succ = R.succ
46
+
47
+ # Initialize/reset the residual network.
48
+ for u in R:
49
+ R_nodes[u]["excess"] = 0
50
+ for e in R_succ[u].values():
51
+ e["flow"] = 0
52
+
53
+ def reverse_bfs(src):
54
+ """Perform a reverse breadth-first search from src in the residual
55
+ network.
56
+ """
57
+ heights = {src: 0}
58
+ q = deque([(src, 0)])
59
+ while q:
60
+ u, height = q.popleft()
61
+ height += 1
62
+ for v, attr in R_pred[u].items():
63
+ if v not in heights and attr["flow"] < attr["capacity"]:
64
+ heights[v] = height
65
+ q.append((v, height))
66
+ return heights
67
+
68
+ # Initialize heights of the nodes.
69
+ heights = reverse_bfs(t)
70
+
71
+ if s not in heights:
72
+ # t is not reachable from s in the residual network. The maximum flow
73
+ # must be zero.
74
+ R.graph["flow_value"] = 0
75
+ return R
76
+
77
+ n = len(R)
78
+ # max_height represents the height of the highest level below level n with
79
+ # at least one active node.
80
+ max_height = max(heights[u] for u in heights if u != s)
81
+ heights[s] = n
82
+
83
+ grt = GlobalRelabelThreshold(n, R.size(), global_relabel_freq)
84
+
85
+ # Initialize heights and 'current edge' data structures of the nodes.
86
+ for u in R:
87
+ R_nodes[u]["height"] = heights[u] if u in heights else n + 1
88
+ R_nodes[u]["curr_edge"] = CurrentEdge(R_succ[u])
89
+
90
+ def push(u, v, flow):
91
+ """Push flow units of flow from u to v."""
92
+ R_succ[u][v]["flow"] += flow
93
+ R_succ[v][u]["flow"] -= flow
94
+ R_nodes[u]["excess"] -= flow
95
+ R_nodes[v]["excess"] += flow
96
+
97
+ # The maximum flow must be nonzero now. Initialize the preflow by
98
+ # saturating all edges emanating from s.
99
+ for u, attr in R_succ[s].items():
100
+ flow = attr["capacity"]
101
+ if flow > 0:
102
+ push(s, u, flow)
103
+
104
+ # Partition nodes into levels.
105
+ levels = [Level() for i in range(2 * n)]
106
+ for u in R:
107
+ if u != s and u != t:
108
+ level = levels[R_nodes[u]["height"]]
109
+ if R_nodes[u]["excess"] > 0:
110
+ level.active.add(u)
111
+ else:
112
+ level.inactive.add(u)
113
+
114
+ def activate(v):
115
+ """Move a node from the inactive set to the active set of its level."""
116
+ if v != s and v != t:
117
+ level = levels[R_nodes[v]["height"]]
118
+ if v in level.inactive:
119
+ level.inactive.remove(v)
120
+ level.active.add(v)
121
+
122
+ def relabel(u):
123
+ """Relabel a node to create an admissible edge."""
124
+ grt.add_work(len(R_succ[u]))
125
+ return (
126
+ min(
127
+ R_nodes[v]["height"]
128
+ for v, attr in R_succ[u].items()
129
+ if attr["flow"] < attr["capacity"]
130
+ )
131
+ + 1
132
+ )
133
+
134
+ def discharge(u, is_phase1):
135
+ """Discharge a node until it becomes inactive or, during phase 1 (see
136
+ below), its height reaches at least n. The node is known to have the
137
+ largest height among active nodes.
138
+ """
139
+ height = R_nodes[u]["height"]
140
+ curr_edge = R_nodes[u]["curr_edge"]
141
+ # next_height represents the next height to examine after discharging
142
+ # the current node. During phase 1, it is capped to below n.
143
+ next_height = height
144
+ levels[height].active.remove(u)
145
+ while True:
146
+ v, attr = curr_edge.get()
147
+ if height == R_nodes[v]["height"] + 1 and attr["flow"] < attr["capacity"]:
148
+ flow = min(R_nodes[u]["excess"], attr["capacity"] - attr["flow"])
149
+ push(u, v, flow)
150
+ activate(v)
151
+ if R_nodes[u]["excess"] == 0:
152
+ # The node has become inactive.
153
+ levels[height].inactive.add(u)
154
+ break
155
+ try:
156
+ curr_edge.move_to_next()
157
+ except StopIteration:
158
+ # We have run off the end of the adjacency list, and there can
159
+ # be no more admissible edges. Relabel the node to create one.
160
+ height = relabel(u)
161
+ if is_phase1 and height >= n - 1:
162
+ # Although the node is still active, with a height at least
163
+ # n - 1, it is now known to be on the s side of the minimum
164
+ # s-t cut. Stop processing it until phase 2.
165
+ levels[height].active.add(u)
166
+ break
167
+ # The first relabel operation after global relabeling may not
168
+ # increase the height of the node since the 'current edge' data
169
+ # structure is not rewound. Use height instead of (height - 1)
170
+ # in case other active nodes at the same level are missed.
171
+ next_height = height
172
+ R_nodes[u]["height"] = height
173
+ return next_height
174
+
175
+ def gap_heuristic(height):
176
+ """Apply the gap heuristic."""
177
+ # Move all nodes at levels (height + 1) to max_height to level n + 1.
178
+ for level in islice(levels, height + 1, max_height + 1):
179
+ for u in level.active:
180
+ R_nodes[u]["height"] = n + 1
181
+ for u in level.inactive:
182
+ R_nodes[u]["height"] = n + 1
183
+ levels[n + 1].active.update(level.active)
184
+ level.active.clear()
185
+ levels[n + 1].inactive.update(level.inactive)
186
+ level.inactive.clear()
187
+
188
+ def global_relabel(from_sink):
189
+ """Apply the global relabeling heuristic."""
190
+ src = t if from_sink else s
191
+ heights = reverse_bfs(src)
192
+ if not from_sink:
193
+ # s must be reachable from t. Remove t explicitly.
194
+ del heights[t]
195
+ max_height = max(heights.values())
196
+ if from_sink:
197
+ # Also mark nodes from which t is unreachable for relabeling. This
198
+ # serves the same purpose as the gap heuristic.
199
+ for u in R:
200
+ if u not in heights and R_nodes[u]["height"] < n:
201
+ heights[u] = n + 1
202
+ else:
203
+ # Shift the computed heights because the height of s is n.
204
+ for u in heights:
205
+ heights[u] += n
206
+ max_height += n
207
+ del heights[src]
208
+ for u, new_height in heights.items():
209
+ old_height = R_nodes[u]["height"]
210
+ if new_height != old_height:
211
+ if u in levels[old_height].active:
212
+ levels[old_height].active.remove(u)
213
+ levels[new_height].active.add(u)
214
+ else:
215
+ levels[old_height].inactive.remove(u)
216
+ levels[new_height].inactive.add(u)
217
+ R_nodes[u]["height"] = new_height
218
+ return max_height
219
+
220
+ # Phase 1: Find the maximum preflow by pushing as much flow as possible to
221
+ # t.
222
+
223
+ height = max_height
224
+ while height > 0:
225
+ # Discharge active nodes in the current level.
226
+ while True:
227
+ level = levels[height]
228
+ if not level.active:
229
+ # All active nodes in the current level have been discharged.
230
+ # Move to the next lower level.
231
+ height -= 1
232
+ break
233
+ # Record the old height and level for the gap heuristic.
234
+ old_height = height
235
+ old_level = level
236
+ u = arbitrary_element(level.active)
237
+ height = discharge(u, True)
238
+ if grt.is_reached():
239
+ # Global relabeling heuristic: Recompute the exact heights of
240
+ # all nodes.
241
+ height = global_relabel(True)
242
+ max_height = height
243
+ grt.clear_work()
244
+ elif not old_level.active and not old_level.inactive:
245
+ # Gap heuristic: If the level at old_height is empty (a 'gap'),
246
+ # a minimum cut has been identified. All nodes with heights
247
+ # above old_height can have their heights set to n + 1 and not
248
+ # be further processed before a maximum preflow is found.
249
+ gap_heuristic(old_height)
250
+ height = old_height - 1
251
+ max_height = height
252
+ else:
253
+ # Update the height of the highest level with at least one
254
+ # active node.
255
+ max_height = max(max_height, height)
256
+
257
+ # A maximum preflow has been found. The excess at t is the maximum flow
258
+ # value.
259
+ if value_only:
260
+ R.graph["flow_value"] = R_nodes[t]["excess"]
261
+ return R
262
+
263
+ # Phase 2: Convert the maximum preflow into a maximum flow by returning the
264
+ # excess to s.
265
+
266
+ # Relabel all nodes so that they have accurate heights.
267
+ height = global_relabel(False)
268
+ grt.clear_work()
269
+
270
+ # Continue to discharge the active nodes.
271
+ while height > n:
272
+ # Discharge active nodes in the current level.
273
+ while True:
274
+ level = levels[height]
275
+ if not level.active:
276
+ # All active nodes in the current level have been discharged.
277
+ # Move to the next lower level.
278
+ height -= 1
279
+ break
280
+ u = arbitrary_element(level.active)
281
+ height = discharge(u, False)
282
+ if grt.is_reached():
283
+ # Global relabeling heuristic.
284
+ height = global_relabel(False)
285
+ grt.clear_work()
286
+
287
+ R.graph["flow_value"] = R_nodes[t]["excess"]
288
+ return R
289
+
290
+
291
+ @nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
292
+ def preflow_push(
293
+ G, s, t, capacity="capacity", residual=None, global_relabel_freq=1, value_only=False
294
+ ):
295
+ r"""Find a maximum single-commodity flow using the highest-label
296
+ preflow-push algorithm.
297
+
298
+ This function returns the residual network resulting after computing
299
+ the maximum flow. See below for details about the conventions
300
+ NetworkX uses for defining residual networks.
301
+
302
+ This algorithm has a running time of $O(n^2 \sqrt{m})$ for $n$ nodes and
303
+ $m$ edges.
304
+
305
+
306
+ Parameters
307
+ ----------
308
+ G : NetworkX graph
309
+ Edges of the graph are expected to have an attribute called
310
+ 'capacity'. If this attribute is not present, the edge is
311
+ considered to have infinite capacity.
312
+
313
+ s : node
314
+ Source node for the flow.
315
+
316
+ t : node
317
+ Sink node for the flow.
318
+
319
+ capacity : string
320
+ Edges of the graph G are expected to have an attribute capacity
321
+ that indicates how much flow the edge can support. If this
322
+ attribute is not present, the edge is considered to have
323
+ infinite capacity. Default value: 'capacity'.
324
+
325
+ residual : NetworkX graph
326
+ Residual network on which the algorithm is to be executed. If None, a
327
+ new residual network is created. Default value: None.
328
+
329
+ global_relabel_freq : integer, float
330
+ Relative frequency of applying the global relabeling heuristic to speed
331
+ up the algorithm. If it is None, the heuristic is disabled. Default
332
+ value: 1.
333
+
334
+ value_only : bool
335
+ If False, compute a maximum flow; otherwise, compute a maximum preflow
336
+ which is enough for computing the maximum flow value. Default value:
337
+ False.
338
+
339
+ Returns
340
+ -------
341
+ R : NetworkX DiGraph
342
+ Residual network after computing the maximum flow.
343
+
344
+ Raises
345
+ ------
346
+ NetworkXError
347
+ The algorithm does not support MultiGraph and MultiDiGraph. If
348
+ the input graph is an instance of one of these two classes, a
349
+ NetworkXError is raised.
350
+
351
+ NetworkXUnbounded
352
+ If the graph has a path of infinite capacity, the value of a
353
+ feasible flow on the graph is unbounded above and the function
354
+ raises a NetworkXUnbounded.
355
+
356
+ See also
357
+ --------
358
+ :meth:`maximum_flow`
359
+ :meth:`minimum_cut`
360
+ :meth:`edmonds_karp`
361
+ :meth:`shortest_augmenting_path`
362
+
363
+ Notes
364
+ -----
365
+ The residual network :samp:`R` from an input graph :samp:`G` has the
366
+ same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
367
+ of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
368
+ self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
369
+ in :samp:`G`. For each node :samp:`u` in :samp:`R`,
370
+ :samp:`R.nodes[u]['excess']` represents the difference between flow into
371
+ :samp:`u` and flow out of :samp:`u`.
372
+
373
+ For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
374
+ is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
375
+ in :samp:`G` or zero otherwise. If the capacity is infinite,
376
+ :samp:`R[u][v]['capacity']` will have a high arbitrary finite value
377
+ that does not affect the solution of the problem. This value is stored in
378
+ :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
379
+ :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
380
+ satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
381
+
382
+ The flow value, defined as the total flow into :samp:`t`, the sink, is
383
+ stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
384
+ only edges :samp:`(u, v)` such that
385
+ :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
386
+ :samp:`s`-:samp:`t` cut.
387
+
388
+ Examples
389
+ --------
390
+ >>> from networkx.algorithms.flow import preflow_push
391
+
392
+ The functions that implement flow algorithms and output a residual
393
+ network, such as this one, are not imported to the base NetworkX
394
+ namespace, so you have to explicitly import them from the flow package.
395
+
396
+ >>> G = nx.DiGraph()
397
+ >>> G.add_edge("x", "a", capacity=3.0)
398
+ >>> G.add_edge("x", "b", capacity=1.0)
399
+ >>> G.add_edge("a", "c", capacity=3.0)
400
+ >>> G.add_edge("b", "c", capacity=5.0)
401
+ >>> G.add_edge("b", "d", capacity=4.0)
402
+ >>> G.add_edge("d", "e", capacity=2.0)
403
+ >>> G.add_edge("c", "y", capacity=2.0)
404
+ >>> G.add_edge("e", "y", capacity=3.0)
405
+ >>> R = preflow_push(G, "x", "y")
406
+ >>> flow_value = nx.maximum_flow_value(G, "x", "y")
407
+ >>> flow_value == R.graph["flow_value"]
408
+ True
409
+ >>> # preflow_push also stores the maximum flow value
410
+ >>> # in the excess attribute of the sink node t
411
+ >>> flow_value == R.nodes["y"]["excess"]
412
+ True
413
+ >>> # For some problems, you might only want to compute a
414
+ >>> # maximum preflow.
415
+ >>> R = preflow_push(G, "x", "y", value_only=True)
416
+ >>> flow_value == R.graph["flow_value"]
417
+ True
418
+ >>> flow_value == R.nodes["y"]["excess"]
419
+ True
420
+
421
+ """
422
+ R = preflow_push_impl(G, s, t, capacity, residual, global_relabel_freq, value_only)
423
+ R.graph["algorithm"] = "preflow_push"
424
+ nx._clear_cache(R)
425
+ return R
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Shortest augmenting path algorithm for maximum flow problems.
3
+ """
4
+
5
+ from collections import deque
6
+
7
+ import networkx as nx
8
+
9
+ from .edmondskarp import edmonds_karp_core
10
+ from .utils import CurrentEdge, build_residual_network
11
+
12
+ __all__ = ["shortest_augmenting_path"]
13
+
14
+
15
+ def shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff):
16
+ """Implementation of the shortest augmenting path algorithm."""
17
+ if s not in G:
18
+ raise nx.NetworkXError(f"node {str(s)} not in graph")
19
+ if t not in G:
20
+ raise nx.NetworkXError(f"node {str(t)} not in graph")
21
+ if s == t:
22
+ raise nx.NetworkXError("source and sink are the same node")
23
+
24
+ if residual is None:
25
+ R = build_residual_network(G, capacity)
26
+ else:
27
+ R = residual
28
+
29
+ R_nodes = R.nodes
30
+ R_pred = R.pred
31
+ R_succ = R.succ
32
+
33
+ # Initialize/reset the residual network.
34
+ for u in R:
35
+ for e in R_succ[u].values():
36
+ e["flow"] = 0
37
+
38
+ # Initialize heights of the nodes.
39
+ heights = {t: 0}
40
+ q = deque([(t, 0)])
41
+ while q:
42
+ u, height = q.popleft()
43
+ height += 1
44
+ for v, attr in R_pred[u].items():
45
+ if v not in heights and attr["flow"] < attr["capacity"]:
46
+ heights[v] = height
47
+ q.append((v, height))
48
+
49
+ if s not in heights:
50
+ # t is not reachable from s in the residual network. The maximum flow
51
+ # must be zero.
52
+ R.graph["flow_value"] = 0
53
+ return R
54
+
55
+ n = len(G)
56
+ m = R.size() / 2
57
+
58
+ # Initialize heights and 'current edge' data structures of the nodes.
59
+ for u in R:
60
+ R_nodes[u]["height"] = heights[u] if u in heights else n
61
+ R_nodes[u]["curr_edge"] = CurrentEdge(R_succ[u])
62
+
63
+ # Initialize counts of nodes in each level.
64
+ counts = [0] * (2 * n - 1)
65
+ for u in R:
66
+ counts[R_nodes[u]["height"]] += 1
67
+
68
+ inf = R.graph["inf"]
69
+
70
+ def augment(path):
71
+ """Augment flow along a path from s to t."""
72
+ # Determine the path residual capacity.
73
+ flow = inf
74
+ it = iter(path)
75
+ u = next(it)
76
+ for v in it:
77
+ attr = R_succ[u][v]
78
+ flow = min(flow, attr["capacity"] - attr["flow"])
79
+ u = v
80
+ if flow * 2 > inf:
81
+ raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.")
82
+ # Augment flow along the path.
83
+ it = iter(path)
84
+ u = next(it)
85
+ for v in it:
86
+ R_succ[u][v]["flow"] += flow
87
+ R_succ[v][u]["flow"] -= flow
88
+ u = v
89
+ return flow
90
+
91
+ def relabel(u):
92
+ """Relabel a node to create an admissible edge."""
93
+ height = n - 1
94
+ for v, attr in R_succ[u].items():
95
+ if attr["flow"] < attr["capacity"]:
96
+ height = min(height, R_nodes[v]["height"])
97
+ return height + 1
98
+
99
+ if cutoff is None:
100
+ cutoff = float("inf")
101
+
102
+ # Phase 1: Look for shortest augmenting paths using depth-first search.
103
+
104
+ flow_value = 0
105
+ path = [s]
106
+ u = s
107
+ d = n if not two_phase else int(min(m**0.5, 2 * n ** (2.0 / 3)))
108
+ done = R_nodes[s]["height"] >= d
109
+ while not done:
110
+ height = R_nodes[u]["height"]
111
+ curr_edge = R_nodes[u]["curr_edge"]
112
+ # Depth-first search for the next node on the path to t.
113
+ while True:
114
+ v, attr = curr_edge.get()
115
+ if height == R_nodes[v]["height"] + 1 and attr["flow"] < attr["capacity"]:
116
+ # Advance to the next node following an admissible edge.
117
+ path.append(v)
118
+ u = v
119
+ break
120
+ try:
121
+ curr_edge.move_to_next()
122
+ except StopIteration:
123
+ counts[height] -= 1
124
+ if counts[height] == 0:
125
+ # Gap heuristic: If relabeling causes a level to become
126
+ # empty, a minimum cut has been identified. The algorithm
127
+ # can now be terminated.
128
+ R.graph["flow_value"] = flow_value
129
+ return R
130
+ height = relabel(u)
131
+ if u == s and height >= d:
132
+ if not two_phase:
133
+ # t is disconnected from s in the residual network. No
134
+ # more augmenting paths exist.
135
+ R.graph["flow_value"] = flow_value
136
+ return R
137
+ else:
138
+ # t is at least d steps away from s. End of phase 1.
139
+ done = True
140
+ break
141
+ counts[height] += 1
142
+ R_nodes[u]["height"] = height
143
+ if u != s:
144
+ # After relabeling, the last edge on the path is no longer
145
+ # admissible. Retreat one step to look for an alternative.
146
+ path.pop()
147
+ u = path[-1]
148
+ break
149
+ if u == t:
150
+ # t is reached. Augment flow along the path and reset it for a new
151
+ # depth-first search.
152
+ flow_value += augment(path)
153
+ if flow_value >= cutoff:
154
+ R.graph["flow_value"] = flow_value
155
+ return R
156
+ path = [s]
157
+ u = s
158
+
159
+ # Phase 2: Look for shortest augmenting paths using breadth-first search.
160
+ flow_value += edmonds_karp_core(R, s, t, cutoff - flow_value)
161
+
162
+ R.graph["flow_value"] = flow_value
163
+ return R
164
+
165
+
166
+ @nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
167
+ def shortest_augmenting_path(
168
+ G,
169
+ s,
170
+ t,
171
+ capacity="capacity",
172
+ residual=None,
173
+ value_only=False,
174
+ two_phase=False,
175
+ cutoff=None,
176
+ ):
177
+ r"""Find a maximum single-commodity flow using the shortest augmenting path
178
+ algorithm.
179
+
180
+ This function returns the residual network resulting after computing
181
+ the maximum flow. See below for details about the conventions
182
+ NetworkX uses for defining residual networks.
183
+
184
+ This algorithm has a running time of $O(n^2 m)$ for $n$ nodes and $m$
185
+ edges.
186
+
187
+
188
+ Parameters
189
+ ----------
190
+ G : NetworkX graph
191
+ Edges of the graph are expected to have an attribute called
192
+ 'capacity'. If this attribute is not present, the edge is
193
+ considered to have infinite capacity.
194
+
195
+ s : node
196
+ Source node for the flow.
197
+
198
+ t : node
199
+ Sink node for the flow.
200
+
201
+ capacity : string
202
+ Edges of the graph G are expected to have an attribute capacity
203
+ that indicates how much flow the edge can support. If this
204
+ attribute is not present, the edge is considered to have
205
+ infinite capacity. Default value: 'capacity'.
206
+
207
+ residual : NetworkX graph
208
+ Residual network on which the algorithm is to be executed. If None, a
209
+ new residual network is created. Default value: None.
210
+
211
+ value_only : bool
212
+ If True compute only the value of the maximum flow. This parameter
213
+ will be ignored by this algorithm because it is not applicable.
214
+
215
+ two_phase : bool
216
+ If True, a two-phase variant is used. The two-phase variant improves
217
+ the running time on unit-capacity networks from $O(nm)$ to
218
+ $O(\min(n^{2/3}, m^{1/2}) m)$. Default value: False.
219
+
220
+ cutoff : integer, float
221
+ If specified, the algorithm will terminate when the flow value reaches
222
+ or exceeds the cutoff. In this case, it may be unable to immediately
223
+ determine a minimum cut. Default value: None.
224
+
225
+ Returns
226
+ -------
227
+ R : NetworkX DiGraph
228
+ Residual network after computing the maximum flow.
229
+
230
+ Raises
231
+ ------
232
+ NetworkXError
233
+ The algorithm does not support MultiGraph and MultiDiGraph. If
234
+ the input graph is an instance of one of these two classes, a
235
+ NetworkXError is raised.
236
+
237
+ NetworkXUnbounded
238
+ If the graph has a path of infinite capacity, the value of a
239
+ feasible flow on the graph is unbounded above and the function
240
+ raises a NetworkXUnbounded.
241
+
242
+ See also
243
+ --------
244
+ :meth:`maximum_flow`
245
+ :meth:`minimum_cut`
246
+ :meth:`edmonds_karp`
247
+ :meth:`preflow_push`
248
+
249
+ Notes
250
+ -----
251
+ The residual network :samp:`R` from an input graph :samp:`G` has the
252
+ same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
253
+ of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
254
+ self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
255
+ in :samp:`G`.
256
+
257
+ For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
258
+ is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
259
+ in :samp:`G` or zero otherwise. If the capacity is infinite,
260
+ :samp:`R[u][v]['capacity']` will have a high arbitrary finite value
261
+ that does not affect the solution of the problem. This value is stored in
262
+ :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
263
+ :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
264
+ satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
265
+
266
+ The flow value, defined as the total flow into :samp:`t`, the sink, is
267
+ stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not
268
+ specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such
269
+ that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
270
+ :samp:`s`-:samp:`t` cut.
271
+
272
+ Examples
273
+ --------
274
+ >>> from networkx.algorithms.flow import shortest_augmenting_path
275
+
276
+ The functions that implement flow algorithms and output a residual
277
+ network, such as this one, are not imported to the base NetworkX
278
+ namespace, so you have to explicitly import them from the flow package.
279
+
280
+ >>> G = nx.DiGraph()
281
+ >>> G.add_edge("x", "a", capacity=3.0)
282
+ >>> G.add_edge("x", "b", capacity=1.0)
283
+ >>> G.add_edge("a", "c", capacity=3.0)
284
+ >>> G.add_edge("b", "c", capacity=5.0)
285
+ >>> G.add_edge("b", "d", capacity=4.0)
286
+ >>> G.add_edge("d", "e", capacity=2.0)
287
+ >>> G.add_edge("c", "y", capacity=2.0)
288
+ >>> G.add_edge("e", "y", capacity=3.0)
289
+ >>> R = shortest_augmenting_path(G, "x", "y")
290
+ >>> flow_value = nx.maximum_flow_value(G, "x", "y")
291
+ >>> flow_value
292
+ 3.0
293
+ >>> flow_value == R.graph["flow_value"]
294
+ True
295
+
296
+ """
297
+ R = shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff)
298
+ R.graph["algorithm"] = "shortest_augmenting_path"
299
+ nx._clear_cache(R)
300
+ return R
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import combinations
2
+
3
+ import pytest
4
+
5
+ import networkx as nx
6
+ from networkx.algorithms.flow import (
7
+ boykov_kolmogorov,
8
+ dinitz,
9
+ edmonds_karp,
10
+ preflow_push,
11
+ shortest_augmenting_path,
12
+ )
13
+
14
+ flow_funcs = [
15
+ boykov_kolmogorov,
16
+ dinitz,
17
+ edmonds_karp,
18
+ preflow_push,
19
+ shortest_augmenting_path,
20
+ ]
21
+
22
+
23
+ class TestGomoryHuTree:
24
+ def minimum_edge_weight(self, T, u, v):
25
+ path = nx.shortest_path(T, u, v, weight="weight")
26
+ return min((T[u][v]["weight"], (u, v)) for (u, v) in zip(path, path[1:]))
27
+
28
+ def compute_cutset(self, G, T_orig, edge):
29
+ T = T_orig.copy()
30
+ T.remove_edge(*edge)
31
+ U, V = list(nx.connected_components(T))
32
+ cutset = set()
33
+ for x, nbrs in ((n, G[n]) for n in U):
34
+ cutset.update((x, y) for y in nbrs if y in V)
35
+ return cutset
36
+
37
+ def test_default_flow_function_karate_club_graph(self):
38
+ G = nx.karate_club_graph()
39
+ nx.set_edge_attributes(G, 1, "capacity")
40
+ T = nx.gomory_hu_tree(G)
41
+ assert nx.is_tree(T)
42
+ for u, v in combinations(G, 2):
43
+ cut_value, edge = self.minimum_edge_weight(T, u, v)
44
+ assert nx.minimum_cut_value(G, u, v) == cut_value
45
+
46
+ def test_karate_club_graph(self):
47
+ G = nx.karate_club_graph()
48
+ nx.set_edge_attributes(G, 1, "capacity")
49
+ for flow_func in flow_funcs:
50
+ T = nx.gomory_hu_tree(G, flow_func=flow_func)
51
+ assert nx.is_tree(T)
52
+ for u, v in combinations(G, 2):
53
+ cut_value, edge = self.minimum_edge_weight(T, u, v)
54
+ assert nx.minimum_cut_value(G, u, v) == cut_value
55
+
56
+ def test_davis_southern_women_graph(self):
57
+ G = nx.davis_southern_women_graph()
58
+ nx.set_edge_attributes(G, 1, "capacity")
59
+ for flow_func in flow_funcs:
60
+ T = nx.gomory_hu_tree(G, flow_func=flow_func)
61
+ assert nx.is_tree(T)
62
+ for u, v in combinations(G, 2):
63
+ cut_value, edge = self.minimum_edge_weight(T, u, v)
64
+ assert nx.minimum_cut_value(G, u, v) == cut_value
65
+
66
+ def test_florentine_families_graph(self):
67
+ G = nx.florentine_families_graph()
68
+ nx.set_edge_attributes(G, 1, "capacity")
69
+ for flow_func in flow_funcs:
70
+ T = nx.gomory_hu_tree(G, flow_func=flow_func)
71
+ assert nx.is_tree(T)
72
+ for u, v in combinations(G, 2):
73
+ cut_value, edge = self.minimum_edge_weight(T, u, v)
74
+ assert nx.minimum_cut_value(G, u, v) == cut_value
75
+
76
+ @pytest.mark.slow
77
+ def test_les_miserables_graph_cutset(self):
78
+ G = nx.les_miserables_graph()
79
+ nx.set_edge_attributes(G, 1, "capacity")
80
+ for flow_func in flow_funcs:
81
+ T = nx.gomory_hu_tree(G, flow_func=flow_func)
82
+ assert nx.is_tree(T)
83
+ for u, v in combinations(G, 2):
84
+ cut_value, edge = self.minimum_edge_weight(T, u, v)
85
+ assert nx.minimum_cut_value(G, u, v) == cut_value
86
+
87
+ def test_karate_club_graph_cutset(self):
88
+ G = nx.karate_club_graph()
89
+ nx.set_edge_attributes(G, 1, "capacity")
90
+ T = nx.gomory_hu_tree(G)
91
+ assert nx.is_tree(T)
92
+ u, v = 0, 33
93
+ cut_value, edge = self.minimum_edge_weight(T, u, v)
94
+ cutset = self.compute_cutset(G, T, edge)
95
+ assert cut_value == len(cutset)
96
+
97
+ def test_wikipedia_example(self):
98
+ # Example from https://en.wikipedia.org/wiki/Gomory%E2%80%93Hu_tree
99
+ G = nx.Graph()
100
+ G.add_weighted_edges_from(
101
+ (
102
+ (0, 1, 1),
103
+ (0, 2, 7),
104
+ (1, 2, 1),
105
+ (1, 3, 3),
106
+ (1, 4, 2),
107
+ (2, 4, 4),
108
+ (3, 4, 1),
109
+ (3, 5, 6),
110
+ (4, 5, 2),
111
+ )
112
+ )
113
+ for flow_func in flow_funcs:
114
+ T = nx.gomory_hu_tree(G, capacity="weight", flow_func=flow_func)
115
+ assert nx.is_tree(T)
116
+ for u, v in combinations(G, 2):
117
+ cut_value, edge = self.minimum_edge_weight(T, u, v)
118
+ assert nx.minimum_cut_value(G, u, v, capacity="weight") == cut_value
119
+
120
+ def test_directed_raises(self):
121
+ with pytest.raises(nx.NetworkXNotImplemented):
122
+ G = nx.DiGraph()
123
+ T = nx.gomory_hu_tree(G)
124
+
125
+ def test_empty_raises(self):
126
+ with pytest.raises(nx.NetworkXError):
127
+ G = nx.empty_graph()
128
+ T = nx.gomory_hu_tree(G)
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_maxflow.py ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Maximum flow algorithms test suite.
2
+ """
3
+ import pytest
4
+
5
+ import networkx as nx
6
+ from networkx.algorithms.flow import (
7
+ boykov_kolmogorov,
8
+ build_flow_dict,
9
+ build_residual_network,
10
+ dinitz,
11
+ edmonds_karp,
12
+ preflow_push,
13
+ shortest_augmenting_path,
14
+ )
15
+
16
+ flow_funcs = {
17
+ boykov_kolmogorov,
18
+ dinitz,
19
+ edmonds_karp,
20
+ preflow_push,
21
+ shortest_augmenting_path,
22
+ }
23
+
24
+ max_min_funcs = {nx.maximum_flow, nx.minimum_cut}
25
+ flow_value_funcs = {nx.maximum_flow_value, nx.minimum_cut_value}
26
+ interface_funcs = max_min_funcs & flow_value_funcs
27
+ all_funcs = flow_funcs & interface_funcs
28
+
29
+
30
+ def compute_cutset(G, partition):
31
+ reachable, non_reachable = partition
32
+ cutset = set()
33
+ for u, nbrs in ((n, G[n]) for n in reachable):
34
+ cutset.update((u, v) for v in nbrs if v in non_reachable)
35
+ return cutset
36
+
37
+
38
+ def validate_flows(G, s, t, flowDict, solnValue, capacity, flow_func):
39
+ errmsg = f"Assertion failed in function: {flow_func.__name__}"
40
+ assert set(G) == set(flowDict), errmsg
41
+ for u in G:
42
+ assert set(G[u]) == set(flowDict[u]), errmsg
43
+ excess = {u: 0 for u in flowDict}
44
+ for u in flowDict:
45
+ for v, flow in flowDict[u].items():
46
+ if capacity in G[u][v]:
47
+ assert flow <= G[u][v][capacity]
48
+ assert flow >= 0, errmsg
49
+ excess[u] -= flow
50
+ excess[v] += flow
51
+ for u, exc in excess.items():
52
+ if u == s:
53
+ assert exc == -solnValue, errmsg
54
+ elif u == t:
55
+ assert exc == solnValue, errmsg
56
+ else:
57
+ assert exc == 0, errmsg
58
+
59
+
60
+ def validate_cuts(G, s, t, solnValue, partition, capacity, flow_func):
61
+ errmsg = f"Assertion failed in function: {flow_func.__name__}"
62
+ assert all(n in G for n in partition[0]), errmsg
63
+ assert all(n in G for n in partition[1]), errmsg
64
+ cutset = compute_cutset(G, partition)
65
+ assert all(G.has_edge(u, v) for (u, v) in cutset), errmsg
66
+ assert solnValue == sum(G[u][v][capacity] for (u, v) in cutset), errmsg
67
+ H = G.copy()
68
+ H.remove_edges_from(cutset)
69
+ if not G.is_directed():
70
+ assert not nx.is_connected(H), errmsg
71
+ else:
72
+ assert not nx.is_strongly_connected(H), errmsg
73
+
74
+
75
+ def compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity="capacity"):
76
+ for flow_func in flow_funcs:
77
+ errmsg = f"Assertion failed in function: {flow_func.__name__}"
78
+ R = flow_func(G, s, t, capacity)
79
+ # Test both legacy and new implementations.
80
+ flow_value = R.graph["flow_value"]
81
+ flow_dict = build_flow_dict(G, R)
82
+ assert flow_value == solnValue, errmsg
83
+ validate_flows(G, s, t, flow_dict, solnValue, capacity, flow_func)
84
+ # Minimum cut
85
+ cut_value, partition = nx.minimum_cut(
86
+ G, s, t, capacity=capacity, flow_func=flow_func
87
+ )
88
+ validate_cuts(G, s, t, solnValue, partition, capacity, flow_func)
89
+
90
+
91
+ class TestMaxflowMinCutCommon:
92
+ def test_graph1(self):
93
+ # Trivial undirected graph
94
+ G = nx.Graph()
95
+ G.add_edge(1, 2, capacity=1.0)
96
+
97
+ solnFlows = {1: {2: 1.0}, 2: {1: 1.0}}
98
+
99
+ compare_flows_and_cuts(G, 1, 2, solnFlows, 1.0)
100
+
101
+ def test_graph2(self):
102
+ # A more complex undirected graph
103
+ # adapted from https://web.archive.org/web/20220815055650/https://www.topcoder.com/thrive/articles/Maximum%20Flow:%20Part%20One
104
+ G = nx.Graph()
105
+ G.add_edge("x", "a", capacity=3.0)
106
+ G.add_edge("x", "b", capacity=1.0)
107
+ G.add_edge("a", "c", capacity=3.0)
108
+ G.add_edge("b", "c", capacity=5.0)
109
+ G.add_edge("b", "d", capacity=4.0)
110
+ G.add_edge("d", "e", capacity=2.0)
111
+ G.add_edge("c", "y", capacity=2.0)
112
+ G.add_edge("e", "y", capacity=3.0)
113
+
114
+ H = {
115
+ "x": {"a": 3, "b": 1},
116
+ "a": {"c": 3, "x": 3},
117
+ "b": {"c": 1, "d": 2, "x": 1},
118
+ "c": {"a": 3, "b": 1, "y": 2},
119
+ "d": {"b": 2, "e": 2},
120
+ "e": {"d": 2, "y": 2},
121
+ "y": {"c": 2, "e": 2},
122
+ }
123
+
124
+ compare_flows_and_cuts(G, "x", "y", H, 4.0)
125
+
126
+ def test_digraph1(self):
127
+ # The classic directed graph example
128
+ G = nx.DiGraph()
129
+ G.add_edge("a", "b", capacity=1000.0)
130
+ G.add_edge("a", "c", capacity=1000.0)
131
+ G.add_edge("b", "c", capacity=1.0)
132
+ G.add_edge("b", "d", capacity=1000.0)
133
+ G.add_edge("c", "d", capacity=1000.0)
134
+
135
+ H = {
136
+ "a": {"b": 1000.0, "c": 1000.0},
137
+ "b": {"c": 0, "d": 1000.0},
138
+ "c": {"d": 1000.0},
139
+ "d": {},
140
+ }
141
+
142
+ compare_flows_and_cuts(G, "a", "d", H, 2000.0)
143
+
144
+ def test_digraph2(self):
145
+ # An example in which some edges end up with zero flow.
146
+ G = nx.DiGraph()
147
+ G.add_edge("s", "b", capacity=2)
148
+ G.add_edge("s", "c", capacity=1)
149
+ G.add_edge("c", "d", capacity=1)
150
+ G.add_edge("d", "a", capacity=1)
151
+ G.add_edge("b", "a", capacity=2)
152
+ G.add_edge("a", "t", capacity=2)
153
+
154
+ H = {
155
+ "s": {"b": 2, "c": 0},
156
+ "c": {"d": 0},
157
+ "d": {"a": 0},
158
+ "b": {"a": 2},
159
+ "a": {"t": 2},
160
+ "t": {},
161
+ }
162
+
163
+ compare_flows_and_cuts(G, "s", "t", H, 2)
164
+
165
+ def test_digraph3(self):
166
+ # A directed graph example from Cormen et al.
167
+ G = nx.DiGraph()
168
+ G.add_edge("s", "v1", capacity=16.0)
169
+ G.add_edge("s", "v2", capacity=13.0)
170
+ G.add_edge("v1", "v2", capacity=10.0)
171
+ G.add_edge("v2", "v1", capacity=4.0)
172
+ G.add_edge("v1", "v3", capacity=12.0)
173
+ G.add_edge("v3", "v2", capacity=9.0)
174
+ G.add_edge("v2", "v4", capacity=14.0)
175
+ G.add_edge("v4", "v3", capacity=7.0)
176
+ G.add_edge("v3", "t", capacity=20.0)
177
+ G.add_edge("v4", "t", capacity=4.0)
178
+
179
+ H = {
180
+ "s": {"v1": 12.0, "v2": 11.0},
181
+ "v2": {"v1": 0, "v4": 11.0},
182
+ "v1": {"v2": 0, "v3": 12.0},
183
+ "v3": {"v2": 0, "t": 19.0},
184
+ "v4": {"v3": 7.0, "t": 4.0},
185
+ "t": {},
186
+ }
187
+
188
+ compare_flows_and_cuts(G, "s", "t", H, 23.0)
189
+
190
+ def test_digraph4(self):
191
+ # A more complex directed graph
192
+ # from https://web.archive.org/web/20220815055650/https://www.topcoder.com/thrive/articles/Maximum%20Flow:%20Part%20One
193
+ G = nx.DiGraph()
194
+ G.add_edge("x", "a", capacity=3.0)
195
+ G.add_edge("x", "b", capacity=1.0)
196
+ G.add_edge("a", "c", capacity=3.0)
197
+ G.add_edge("b", "c", capacity=5.0)
198
+ G.add_edge("b", "d", capacity=4.0)
199
+ G.add_edge("d", "e", capacity=2.0)
200
+ G.add_edge("c", "y", capacity=2.0)
201
+ G.add_edge("e", "y", capacity=3.0)
202
+
203
+ H = {
204
+ "x": {"a": 2.0, "b": 1.0},
205
+ "a": {"c": 2.0},
206
+ "b": {"c": 0, "d": 1.0},
207
+ "c": {"y": 2.0},
208
+ "d": {"e": 1.0},
209
+ "e": {"y": 1.0},
210
+ "y": {},
211
+ }
212
+
213
+ compare_flows_and_cuts(G, "x", "y", H, 3.0)
214
+
215
+ def test_wikipedia_dinitz_example(self):
216
+ # Nice example from https://en.wikipedia.org/wiki/Dinic's_algorithm
217
+ G = nx.DiGraph()
218
+ G.add_edge("s", 1, capacity=10)
219
+ G.add_edge("s", 2, capacity=10)
220
+ G.add_edge(1, 3, capacity=4)
221
+ G.add_edge(1, 4, capacity=8)
222
+ G.add_edge(1, 2, capacity=2)
223
+ G.add_edge(2, 4, capacity=9)
224
+ G.add_edge(3, "t", capacity=10)
225
+ G.add_edge(4, 3, capacity=6)
226
+ G.add_edge(4, "t", capacity=10)
227
+
228
+ solnFlows = {
229
+ 1: {2: 0, 3: 4, 4: 6},
230
+ 2: {4: 9},
231
+ 3: {"t": 9},
232
+ 4: {3: 5, "t": 10},
233
+ "s": {1: 10, 2: 9},
234
+ "t": {},
235
+ }
236
+
237
+ compare_flows_and_cuts(G, "s", "t", solnFlows, 19)
238
+
239
+ def test_optional_capacity(self):
240
+ # Test optional capacity parameter.
241
+ G = nx.DiGraph()
242
+ G.add_edge("x", "a", spam=3.0)
243
+ G.add_edge("x", "b", spam=1.0)
244
+ G.add_edge("a", "c", spam=3.0)
245
+ G.add_edge("b", "c", spam=5.0)
246
+ G.add_edge("b", "d", spam=4.0)
247
+ G.add_edge("d", "e", spam=2.0)
248
+ G.add_edge("c", "y", spam=2.0)
249
+ G.add_edge("e", "y", spam=3.0)
250
+
251
+ solnFlows = {
252
+ "x": {"a": 2.0, "b": 1.0},
253
+ "a": {"c": 2.0},
254
+ "b": {"c": 0, "d": 1.0},
255
+ "c": {"y": 2.0},
256
+ "d": {"e": 1.0},
257
+ "e": {"y": 1.0},
258
+ "y": {},
259
+ }
260
+ solnValue = 3.0
261
+ s = "x"
262
+ t = "y"
263
+
264
+ compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity="spam")
265
+
266
+ def test_digraph_infcap_edges(self):
267
+ # DiGraph with infinite capacity edges
268
+ G = nx.DiGraph()
269
+ G.add_edge("s", "a")
270
+ G.add_edge("s", "b", capacity=30)
271
+ G.add_edge("a", "c", capacity=25)
272
+ G.add_edge("b", "c", capacity=12)
273
+ G.add_edge("a", "t", capacity=60)
274
+ G.add_edge("c", "t")
275
+
276
+ H = {
277
+ "s": {"a": 85, "b": 12},
278
+ "a": {"c": 25, "t": 60},
279
+ "b": {"c": 12},
280
+ "c": {"t": 37},
281
+ "t": {},
282
+ }
283
+
284
+ compare_flows_and_cuts(G, "s", "t", H, 97)
285
+
286
+ # DiGraph with infinite capacity digon
287
+ G = nx.DiGraph()
288
+ G.add_edge("s", "a", capacity=85)
289
+ G.add_edge("s", "b", capacity=30)
290
+ G.add_edge("a", "c")
291
+ G.add_edge("c", "a")
292
+ G.add_edge("b", "c", capacity=12)
293
+ G.add_edge("a", "t", capacity=60)
294
+ G.add_edge("c", "t", capacity=37)
295
+
296
+ H = {
297
+ "s": {"a": 85, "b": 12},
298
+ "a": {"c": 25, "t": 60},
299
+ "c": {"a": 0, "t": 37},
300
+ "b": {"c": 12},
301
+ "t": {},
302
+ }
303
+
304
+ compare_flows_and_cuts(G, "s", "t", H, 97)
305
+
306
+ def test_digraph_infcap_path(self):
307
+ # Graph with infinite capacity (s, t)-path
308
+ G = nx.DiGraph()
309
+ G.add_edge("s", "a")
310
+ G.add_edge("s", "b", capacity=30)
311
+ G.add_edge("a", "c")
312
+ G.add_edge("b", "c", capacity=12)
313
+ G.add_edge("a", "t", capacity=60)
314
+ G.add_edge("c", "t")
315
+
316
+ for flow_func in all_funcs:
317
+ pytest.raises(nx.NetworkXUnbounded, flow_func, G, "s", "t")
318
+
319
+ def test_graph_infcap_edges(self):
320
+ # Undirected graph with infinite capacity edges
321
+ G = nx.Graph()
322
+ G.add_edge("s", "a")
323
+ G.add_edge("s", "b", capacity=30)
324
+ G.add_edge("a", "c", capacity=25)
325
+ G.add_edge("b", "c", capacity=12)
326
+ G.add_edge("a", "t", capacity=60)
327
+ G.add_edge("c", "t")
328
+
329
+ H = {
330
+ "s": {"a": 85, "b": 12},
331
+ "a": {"c": 25, "s": 85, "t": 60},
332
+ "b": {"c": 12, "s": 12},
333
+ "c": {"a": 25, "b": 12, "t": 37},
334
+ "t": {"a": 60, "c": 37},
335
+ }
336
+
337
+ compare_flows_and_cuts(G, "s", "t", H, 97)
338
+
339
+ def test_digraph5(self):
340
+ # From ticket #429 by mfrasca.
341
+ G = nx.DiGraph()
342
+ G.add_edge("s", "a", capacity=2)
343
+ G.add_edge("s", "b", capacity=2)
344
+ G.add_edge("a", "b", capacity=5)
345
+ G.add_edge("a", "t", capacity=1)
346
+ G.add_edge("b", "a", capacity=1)
347
+ G.add_edge("b", "t", capacity=3)
348
+ flowSoln = {
349
+ "a": {"b": 1, "t": 1},
350
+ "b": {"a": 0, "t": 3},
351
+ "s": {"a": 2, "b": 2},
352
+ "t": {},
353
+ }
354
+ compare_flows_and_cuts(G, "s", "t", flowSoln, 4)
355
+
356
+ def test_disconnected(self):
357
+ G = nx.Graph()
358
+ G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity")
359
+ G.remove_node(1)
360
+ assert nx.maximum_flow_value(G, 0, 3) == 0
361
+ flowSoln = {0: {}, 2: {3: 0}, 3: {2: 0}}
362
+ compare_flows_and_cuts(G, 0, 3, flowSoln, 0)
363
+
364
+ def test_source_target_not_in_graph(self):
365
+ G = nx.Graph()
366
+ G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity")
367
+ G.remove_node(0)
368
+ for flow_func in all_funcs:
369
+ pytest.raises(nx.NetworkXError, flow_func, G, 0, 3)
370
+ G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity")
371
+ G.remove_node(3)
372
+ for flow_func in all_funcs:
373
+ pytest.raises(nx.NetworkXError, flow_func, G, 0, 3)
374
+
375
+ def test_source_target_coincide(self):
376
+ G = nx.Graph()
377
+ G.add_node(0)
378
+ for flow_func in all_funcs:
379
+ pytest.raises(nx.NetworkXError, flow_func, G, 0, 0)
380
+
381
+ def test_multigraphs_raise(self):
382
+ G = nx.MultiGraph()
383
+ M = nx.MultiDiGraph()
384
+ G.add_edges_from([(0, 1), (1, 0)], capacity=True)
385
+ for flow_func in all_funcs:
386
+ pytest.raises(nx.NetworkXError, flow_func, G, 0, 0)
387
+
388
+
389
+ class TestMaxFlowMinCutInterface:
390
+ def setup_method(self):
391
+ G = nx.DiGraph()
392
+ G.add_edge("x", "a", capacity=3.0)
393
+ G.add_edge("x", "b", capacity=1.0)
394
+ G.add_edge("a", "c", capacity=3.0)
395
+ G.add_edge("b", "c", capacity=5.0)
396
+ G.add_edge("b", "d", capacity=4.0)
397
+ G.add_edge("d", "e", capacity=2.0)
398
+ G.add_edge("c", "y", capacity=2.0)
399
+ G.add_edge("e", "y", capacity=3.0)
400
+ self.G = G
401
+ H = nx.DiGraph()
402
+ H.add_edge(0, 1, capacity=1.0)
403
+ H.add_edge(1, 2, capacity=1.0)
404
+ self.H = H
405
+
406
+ def test_flow_func_not_callable(self):
407
+ elements = ["this_should_be_callable", 10, {1, 2, 3}]
408
+ G = nx.Graph()
409
+ G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity")
410
+ for flow_func in interface_funcs:
411
+ for element in elements:
412
+ pytest.raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element)
413
+ pytest.raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element)
414
+
415
+ def test_flow_func_parameters(self):
416
+ G = self.G
417
+ fv = 3.0
418
+ for interface_func in interface_funcs:
419
+ for flow_func in flow_funcs:
420
+ errmsg = (
421
+ f"Assertion failed in function: {flow_func.__name__} "
422
+ f"in interface {interface_func.__name__}"
423
+ )
424
+ result = interface_func(G, "x", "y", flow_func=flow_func)
425
+ if interface_func in max_min_funcs:
426
+ result = result[0]
427
+ assert fv == result, errmsg
428
+
429
+ def test_minimum_cut_no_cutoff(self):
430
+ G = self.G
431
+ pytest.raises(
432
+ nx.NetworkXError,
433
+ nx.minimum_cut,
434
+ G,
435
+ "x",
436
+ "y",
437
+ flow_func=preflow_push,
438
+ cutoff=1.0,
439
+ )
440
+ pytest.raises(
441
+ nx.NetworkXError,
442
+ nx.minimum_cut_value,
443
+ G,
444
+ "x",
445
+ "y",
446
+ flow_func=preflow_push,
447
+ cutoff=1.0,
448
+ )
449
+
450
+ def test_kwargs(self):
451
+ G = self.H
452
+ fv = 1.0
453
+ to_test = (
454
+ (shortest_augmenting_path, {"two_phase": True}),
455
+ (preflow_push, {"global_relabel_freq": 5}),
456
+ )
457
+ for interface_func in interface_funcs:
458
+ for flow_func, kwargs in to_test:
459
+ errmsg = (
460
+ f"Assertion failed in function: {flow_func.__name__} "
461
+ f"in interface {interface_func.__name__}"
462
+ )
463
+ result = interface_func(G, 0, 2, flow_func=flow_func, **kwargs)
464
+ if interface_func in max_min_funcs:
465
+ result = result[0]
466
+ assert fv == result, errmsg
467
+
468
+ def test_kwargs_default_flow_func(self):
469
+ G = self.H
470
+ for interface_func in interface_funcs:
471
+ pytest.raises(
472
+ nx.NetworkXError, interface_func, G, 0, 1, global_relabel_freq=2
473
+ )
474
+
475
+ def test_reusing_residual(self):
476
+ G = self.G
477
+ fv = 3.0
478
+ s, t = "x", "y"
479
+ R = build_residual_network(G, "capacity")
480
+ for interface_func in interface_funcs:
481
+ for flow_func in flow_funcs:
482
+ errmsg = (
483
+ f"Assertion failed in function: {flow_func.__name__} "
484
+ f"in interface {interface_func.__name__}"
485
+ )
486
+ for i in range(3):
487
+ result = interface_func(
488
+ G, "x", "y", flow_func=flow_func, residual=R
489
+ )
490
+ if interface_func in max_min_funcs:
491
+ result = result[0]
492
+ assert fv == result, errmsg
493
+
494
+
495
+ # Tests specific to one algorithm
496
+ def test_preflow_push_global_relabel_freq():
497
+ G = nx.DiGraph()
498
+ G.add_edge(1, 2, capacity=1)
499
+ R = preflow_push(G, 1, 2, global_relabel_freq=None)
500
+ assert R.graph["flow_value"] == 1
501
+ pytest.raises(nx.NetworkXError, preflow_push, G, 1, 2, global_relabel_freq=-1)
502
+
503
+
504
+ def test_preflow_push_makes_enough_space():
505
+ # From ticket #1542
506
+ G = nx.DiGraph()
507
+ nx.add_path(G, [0, 1, 3], capacity=1)
508
+ nx.add_path(G, [1, 2, 3], capacity=1)
509
+ R = preflow_push(G, 0, 3, value_only=False)
510
+ assert R.graph["flow_value"] == 1
511
+
512
+
513
+ def test_shortest_augmenting_path_two_phase():
514
+ k = 5
515
+ p = 1000
516
+ G = nx.DiGraph()
517
+ for i in range(k):
518
+ G.add_edge("s", (i, 0), capacity=1)
519
+ nx.add_path(G, ((i, j) for j in range(p)), capacity=1)
520
+ G.add_edge((i, p - 1), "t", capacity=1)
521
+ R = shortest_augmenting_path(G, "s", "t", two_phase=True)
522
+ assert R.graph["flow_value"] == k
523
+ R = shortest_augmenting_path(G, "s", "t", two_phase=False)
524
+ assert R.graph["flow_value"] == k
525
+
526
+
527
+ class TestCutoff:
528
+ def test_cutoff(self):
529
+ k = 5
530
+ p = 1000
531
+ G = nx.DiGraph()
532
+ for i in range(k):
533
+ G.add_edge("s", (i, 0), capacity=2)
534
+ nx.add_path(G, ((i, j) for j in range(p)), capacity=2)
535
+ G.add_edge((i, p - 1), "t", capacity=2)
536
+ R = shortest_augmenting_path(G, "s", "t", two_phase=True, cutoff=k)
537
+ assert k <= R.graph["flow_value"] <= (2 * k)
538
+ R = shortest_augmenting_path(G, "s", "t", two_phase=False, cutoff=k)
539
+ assert k <= R.graph["flow_value"] <= (2 * k)
540
+ R = edmonds_karp(G, "s", "t", cutoff=k)
541
+ assert k <= R.graph["flow_value"] <= (2 * k)
542
+ R = dinitz(G, "s", "t", cutoff=k)
543
+ assert k <= R.graph["flow_value"] <= (2 * k)
544
+ R = boykov_kolmogorov(G, "s", "t", cutoff=k)
545
+ assert k <= R.graph["flow_value"] <= (2 * k)
546
+
547
+ def test_complete_graph_cutoff(self):
548
+ G = nx.complete_graph(5)
549
+ nx.set_edge_attributes(G, {(u, v): 1 for u, v in G.edges()}, "capacity")
550
+ for flow_func in [
551
+ shortest_augmenting_path,
552
+ edmonds_karp,
553
+ dinitz,
554
+ boykov_kolmogorov,
555
+ ]:
556
+ for cutoff in [3, 2, 1]:
557
+ result = nx.maximum_flow_value(
558
+ G, 0, 4, flow_func=flow_func, cutoff=cutoff
559
+ )
560
+ assert cutoff == result, f"cutoff error in {flow_func.__name__}"
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Maximum flow algorithms test suite on large graphs.
2
+ """
3
+
4
+ import bz2
5
+ import importlib.resources
6
+ import os
7
+ import pickle
8
+
9
+ import pytest
10
+
11
+ import networkx as nx
12
+ from networkx.algorithms.flow import (
13
+ boykov_kolmogorov,
14
+ build_flow_dict,
15
+ build_residual_network,
16
+ dinitz,
17
+ edmonds_karp,
18
+ preflow_push,
19
+ shortest_augmenting_path,
20
+ )
21
+
22
+ flow_funcs = [
23
+ boykov_kolmogorov,
24
+ dinitz,
25
+ edmonds_karp,
26
+ preflow_push,
27
+ shortest_augmenting_path,
28
+ ]
29
+
30
+
31
+ def gen_pyramid(N):
32
+ # This graph admits a flow of value 1 for which every arc is at
33
+ # capacity (except the arcs incident to the sink which have
34
+ # infinite capacity).
35
+ G = nx.DiGraph()
36
+
37
+ for i in range(N - 1):
38
+ cap = 1.0 / (i + 2)
39
+ for j in range(i + 1):
40
+ G.add_edge((i, j), (i + 1, j), capacity=cap)
41
+ cap = 1.0 / (i + 1) - cap
42
+ G.add_edge((i, j), (i + 1, j + 1), capacity=cap)
43
+ cap = 1.0 / (i + 2) - cap
44
+
45
+ for j in range(N):
46
+ G.add_edge((N - 1, j), "t")
47
+
48
+ return G
49
+
50
+
51
+ def read_graph(name):
52
+ fname = (
53
+ importlib.resources.files("networkx.algorithms.flow.tests")
54
+ / f"{name}.gpickle.bz2"
55
+ )
56
+
57
+ with bz2.BZ2File(fname, "rb") as f:
58
+ G = pickle.load(f)
59
+ return G
60
+
61
+
62
+ def validate_flows(G, s, t, soln_value, R, flow_func):
63
+ flow_value = R.graph["flow_value"]
64
+ flow_dict = build_flow_dict(G, R)
65
+ errmsg = f"Assertion failed in function: {flow_func.__name__}"
66
+ assert soln_value == flow_value, errmsg
67
+ assert set(G) == set(flow_dict), errmsg
68
+ for u in G:
69
+ assert set(G[u]) == set(flow_dict[u]), errmsg
70
+ excess = {u: 0 for u in flow_dict}
71
+ for u in flow_dict:
72
+ for v, flow in flow_dict[u].items():
73
+ assert flow <= G[u][v].get("capacity", float("inf")), errmsg
74
+ assert flow >= 0, errmsg
75
+ excess[u] -= flow
76
+ excess[v] += flow
77
+ for u, exc in excess.items():
78
+ if u == s:
79
+ assert exc == -soln_value, errmsg
80
+ elif u == t:
81
+ assert exc == soln_value, errmsg
82
+ else:
83
+ assert exc == 0, errmsg
84
+
85
+
86
+ class TestMaxflowLargeGraph:
87
+ def test_complete_graph(self):
88
+ N = 50
89
+ G = nx.complete_graph(N)
90
+ nx.set_edge_attributes(G, 5, "capacity")
91
+ R = build_residual_network(G, "capacity")
92
+ kwargs = {"residual": R}
93
+
94
+ for flow_func in flow_funcs:
95
+ kwargs["flow_func"] = flow_func
96
+ errmsg = f"Assertion failed in function: {flow_func.__name__}"
97
+ flow_value = nx.maximum_flow_value(G, 1, 2, **kwargs)
98
+ assert flow_value == 5 * (N - 1), errmsg
99
+
100
+ def test_pyramid(self):
101
+ N = 10
102
+ # N = 100 # this gives a graph with 5051 nodes
103
+ G = gen_pyramid(N)
104
+ R = build_residual_network(G, "capacity")
105
+ kwargs = {"residual": R}
106
+
107
+ for flow_func in flow_funcs:
108
+ kwargs["flow_func"] = flow_func
109
+ errmsg = f"Assertion failed in function: {flow_func.__name__}"
110
+ flow_value = nx.maximum_flow_value(G, (0, 0), "t", **kwargs)
111
+ assert flow_value == pytest.approx(1.0, abs=1e-7)
112
+
113
+ def test_gl1(self):
114
+ G = read_graph("gl1")
115
+ s = 1
116
+ t = len(G)
117
+ R = build_residual_network(G, "capacity")
118
+ kwargs = {"residual": R}
119
+
120
+ # do one flow_func to save time
121
+ flow_func = flow_funcs[0]
122
+ validate_flows(G, s, t, 156545, flow_func(G, s, t, **kwargs), flow_func)
123
+
124
+ # for flow_func in flow_funcs:
125
+ # validate_flows(G, s, t, 156545, flow_func(G, s, t, **kwargs),
126
+ # flow_func)
127
+
128
+ @pytest.mark.slow
129
+ def test_gw1(self):
130
+ G = read_graph("gw1")
131
+ s = 1
132
+ t = len(G)
133
+ R = build_residual_network(G, "capacity")
134
+ kwargs = {"residual": R}
135
+
136
+ for flow_func in flow_funcs:
137
+ validate_flows(G, s, t, 1202018, flow_func(G, s, t, **kwargs), flow_func)
138
+
139
+ def test_wlm3(self):
140
+ G = read_graph("wlm3")
141
+ s = 1
142
+ t = len(G)
143
+ R = build_residual_network(G, "capacity")
144
+ kwargs = {"residual": R}
145
+
146
+ # do one flow_func to save time
147
+ flow_func = flow_funcs[0]
148
+ validate_flows(G, s, t, 11875108, flow_func(G, s, t, **kwargs), flow_func)
149
+
150
+ # for flow_func in flow_funcs:
151
+ # validate_flows(G, s, t, 11875108, flow_func(G, s, t, **kwargs),
152
+ # flow_func)
153
+
154
+ def test_preflow_push_global_relabel(self):
155
+ G = read_graph("gw1")
156
+ R = preflow_push(G, 1, len(G), global_relabel_freq=50)
157
+ assert R.graph["flow_value"] == 1202018
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_mincost.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bz2
2
+ import importlib.resources
3
+ import os
4
+ import pickle
5
+
6
+ import pytest
7
+
8
+ import networkx as nx
9
+
10
+
11
+ class TestMinCostFlow:
12
+ def test_simple_digraph(self):
13
+ G = nx.DiGraph()
14
+ G.add_node("a", demand=-5)
15
+ G.add_node("d", demand=5)
16
+ G.add_edge("a", "b", weight=3, capacity=4)
17
+ G.add_edge("a", "c", weight=6, capacity=10)
18
+ G.add_edge("b", "d", weight=1, capacity=9)
19
+ G.add_edge("c", "d", weight=2, capacity=5)
20
+ flowCost, H = nx.network_simplex(G)
21
+ soln = {"a": {"b": 4, "c": 1}, "b": {"d": 4}, "c": {"d": 1}, "d": {}}
22
+ assert flowCost == 24
23
+ assert nx.min_cost_flow_cost(G) == 24
24
+ assert H == soln
25
+ assert nx.min_cost_flow(G) == soln
26
+ assert nx.cost_of_flow(G, H) == 24
27
+
28
+ flowCost, H = nx.capacity_scaling(G)
29
+ assert flowCost == 24
30
+ assert nx.cost_of_flow(G, H) == 24
31
+ assert H == soln
32
+
33
+ def test_negcycle_infcap(self):
34
+ G = nx.DiGraph()
35
+ G.add_node("s", demand=-5)
36
+ G.add_node("t", demand=5)
37
+ G.add_edge("s", "a", weight=1, capacity=3)
38
+ G.add_edge("a", "b", weight=3)
39
+ G.add_edge("c", "a", weight=-6)
40
+ G.add_edge("b", "d", weight=1)
41
+ G.add_edge("d", "c", weight=-2)
42
+ G.add_edge("d", "t", weight=1, capacity=3)
43
+ pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
44
+ pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G)
45
+
46
+ def test_sum_demands_not_zero(self):
47
+ G = nx.DiGraph()
48
+ G.add_node("s", demand=-5)
49
+ G.add_node("t", demand=4)
50
+ G.add_edge("s", "a", weight=1, capacity=3)
51
+ G.add_edge("a", "b", weight=3)
52
+ G.add_edge("a", "c", weight=-6)
53
+ G.add_edge("b", "d", weight=1)
54
+ G.add_edge("c", "d", weight=-2)
55
+ G.add_edge("d", "t", weight=1, capacity=3)
56
+ pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
57
+ pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
58
+
59
+ def test_no_flow_satisfying_demands(self):
60
+ G = nx.DiGraph()
61
+ G.add_node("s", demand=-5)
62
+ G.add_node("t", demand=5)
63
+ G.add_edge("s", "a", weight=1, capacity=3)
64
+ G.add_edge("a", "b", weight=3)
65
+ G.add_edge("a", "c", weight=-6)
66
+ G.add_edge("b", "d", weight=1)
67
+ G.add_edge("c", "d", weight=-2)
68
+ G.add_edge("d", "t", weight=1, capacity=3)
69
+ pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
70
+ pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
71
+
72
+ def test_transshipment(self):
73
+ G = nx.DiGraph()
74
+ G.add_node("a", demand=1)
75
+ G.add_node("b", demand=-2)
76
+ G.add_node("c", demand=-2)
77
+ G.add_node("d", demand=3)
78
+ G.add_node("e", demand=-4)
79
+ G.add_node("f", demand=-4)
80
+ G.add_node("g", demand=3)
81
+ G.add_node("h", demand=2)
82
+ G.add_node("r", demand=3)
83
+ G.add_edge("a", "c", weight=3)
84
+ G.add_edge("r", "a", weight=2)
85
+ G.add_edge("b", "a", weight=9)
86
+ G.add_edge("r", "c", weight=0)
87
+ G.add_edge("b", "r", weight=-6)
88
+ G.add_edge("c", "d", weight=5)
89
+ G.add_edge("e", "r", weight=4)
90
+ G.add_edge("e", "f", weight=3)
91
+ G.add_edge("h", "b", weight=4)
92
+ G.add_edge("f", "d", weight=7)
93
+ G.add_edge("f", "h", weight=12)
94
+ G.add_edge("g", "d", weight=12)
95
+ G.add_edge("f", "g", weight=-1)
96
+ G.add_edge("h", "g", weight=-10)
97
+ flowCost, H = nx.network_simplex(G)
98
+ soln = {
99
+ "a": {"c": 0},
100
+ "b": {"a": 0, "r": 2},
101
+ "c": {"d": 3},
102
+ "d": {},
103
+ "e": {"r": 3, "f": 1},
104
+ "f": {"d": 0, "g": 3, "h": 2},
105
+ "g": {"d": 0},
106
+ "h": {"b": 0, "g": 0},
107
+ "r": {"a": 1, "c": 1},
108
+ }
109
+ assert flowCost == 41
110
+ assert nx.min_cost_flow_cost(G) == 41
111
+ assert H == soln
112
+ assert nx.min_cost_flow(G) == soln
113
+ assert nx.cost_of_flow(G, H) == 41
114
+
115
+ flowCost, H = nx.capacity_scaling(G)
116
+ assert flowCost == 41
117
+ assert nx.cost_of_flow(G, H) == 41
118
+ assert H == soln
119
+
120
+ def test_max_flow_min_cost(self):
121
+ G = nx.DiGraph()
122
+ G.add_edge("s", "a", bandwidth=6)
123
+ G.add_edge("s", "c", bandwidth=10, cost=10)
124
+ G.add_edge("a", "b", cost=6)
125
+ G.add_edge("b", "d", bandwidth=8, cost=7)
126
+ G.add_edge("c", "d", cost=10)
127
+ G.add_edge("d", "t", bandwidth=5, cost=5)
128
+ soln = {
129
+ "s": {"a": 5, "c": 0},
130
+ "a": {"b": 5},
131
+ "b": {"d": 5},
132
+ "c": {"d": 0},
133
+ "d": {"t": 5},
134
+ "t": {},
135
+ }
136
+ flow = nx.max_flow_min_cost(G, "s", "t", capacity="bandwidth", weight="cost")
137
+ assert flow == soln
138
+ assert nx.cost_of_flow(G, flow, weight="cost") == 90
139
+
140
+ G.add_edge("t", "s", cost=-100)
141
+ flowCost, flow = nx.capacity_scaling(G, capacity="bandwidth", weight="cost")
142
+ G.remove_edge("t", "s")
143
+ assert flowCost == -410
144
+ assert flow["t"]["s"] == 5
145
+ del flow["t"]["s"]
146
+ assert flow == soln
147
+ assert nx.cost_of_flow(G, flow, weight="cost") == 90
148
+
149
+ def test_digraph1(self):
150
+ # From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied
151
+ # Mathematical Programming. Addison-Wesley, 1977.
152
+ G = nx.DiGraph()
153
+ G.add_node(1, demand=-20)
154
+ G.add_node(4, demand=5)
155
+ G.add_node(5, demand=15)
156
+ G.add_edges_from(
157
+ [
158
+ (1, 2, {"capacity": 15, "weight": 4}),
159
+ (1, 3, {"capacity": 8, "weight": 4}),
160
+ (2, 3, {"weight": 2}),
161
+ (2, 4, {"capacity": 4, "weight": 2}),
162
+ (2, 5, {"capacity": 10, "weight": 6}),
163
+ (3, 4, {"capacity": 15, "weight": 1}),
164
+ (3, 5, {"capacity": 5, "weight": 3}),
165
+ (4, 5, {"weight": 2}),
166
+ (5, 3, {"capacity": 4, "weight": 1}),
167
+ ]
168
+ )
169
+ flowCost, H = nx.network_simplex(G)
170
+ soln = {
171
+ 1: {2: 12, 3: 8},
172
+ 2: {3: 8, 4: 4, 5: 0},
173
+ 3: {4: 11, 5: 5},
174
+ 4: {5: 10},
175
+ 5: {3: 0},
176
+ }
177
+ assert flowCost == 150
178
+ assert nx.min_cost_flow_cost(G) == 150
179
+ assert H == soln
180
+ assert nx.min_cost_flow(G) == soln
181
+ assert nx.cost_of_flow(G, H) == 150
182
+
183
+ flowCost, H = nx.capacity_scaling(G)
184
+ assert flowCost == 150
185
+ assert H == soln
186
+ assert nx.cost_of_flow(G, H) == 150
187
+
188
+ def test_digraph2(self):
189
+ # Example from ticket #430 from mfrasca. Original source:
190
+ # http://www.cs.princeton.edu/courses/archive/spr03/cs226/lectures/mincost.4up.pdf, slide 11.
191
+ G = nx.DiGraph()
192
+ G.add_edge("s", 1, capacity=12)
193
+ G.add_edge("s", 2, capacity=6)
194
+ G.add_edge("s", 3, capacity=14)
195
+ G.add_edge(1, 2, capacity=11, weight=4)
196
+ G.add_edge(2, 3, capacity=9, weight=6)
197
+ G.add_edge(1, 4, capacity=5, weight=5)
198
+ G.add_edge(1, 5, capacity=2, weight=12)
199
+ G.add_edge(2, 5, capacity=4, weight=4)
200
+ G.add_edge(2, 6, capacity=2, weight=6)
201
+ G.add_edge(3, 6, capacity=31, weight=3)
202
+ G.add_edge(4, 5, capacity=18, weight=4)
203
+ G.add_edge(5, 6, capacity=9, weight=5)
204
+ G.add_edge(4, "t", capacity=3)
205
+ G.add_edge(5, "t", capacity=7)
206
+ G.add_edge(6, "t", capacity=22)
207
+ flow = nx.max_flow_min_cost(G, "s", "t")
208
+ soln = {
209
+ 1: {2: 6, 4: 5, 5: 1},
210
+ 2: {3: 6, 5: 4, 6: 2},
211
+ 3: {6: 20},
212
+ 4: {5: 2, "t": 3},
213
+ 5: {6: 0, "t": 7},
214
+ 6: {"t": 22},
215
+ "s": {1: 12, 2: 6, 3: 14},
216
+ "t": {},
217
+ }
218
+ assert flow == soln
219
+
220
+ G.add_edge("t", "s", weight=-100)
221
+ flowCost, flow = nx.capacity_scaling(G)
222
+ G.remove_edge("t", "s")
223
+ assert flow["t"]["s"] == 32
224
+ assert flowCost == -3007
225
+ del flow["t"]["s"]
226
+ assert flow == soln
227
+ assert nx.cost_of_flow(G, flow) == 193
228
+
229
+ def test_digraph3(self):
230
+ """Combinatorial Optimization: Algorithms and Complexity,
231
+ Papadimitriou Steiglitz at page 140 has an example, 7.1, but that
232
+ admits multiple solutions, so I alter it a bit. From ticket #430
233
+ by mfrasca."""
234
+
235
+ G = nx.DiGraph()
236
+ G.add_edge("s", "a")
237
+ G["s"]["a"].update({0: 2, 1: 4})
238
+ G.add_edge("s", "b")
239
+ G["s"]["b"].update({0: 2, 1: 1})
240
+ G.add_edge("a", "b")
241
+ G["a"]["b"].update({0: 5, 1: 2})
242
+ G.add_edge("a", "t")
243
+ G["a"]["t"].update({0: 1, 1: 5})
244
+ G.add_edge("b", "a")
245
+ G["b"]["a"].update({0: 1, 1: 3})
246
+ G.add_edge("b", "t")
247
+ G["b"]["t"].update({0: 3, 1: 2})
248
+
249
+ "PS.ex.7.1: testing main function"
250
+ sol = nx.max_flow_min_cost(G, "s", "t", capacity=0, weight=1)
251
+ flow = sum(v for v in sol["s"].values())
252
+ assert 4 == flow
253
+ assert 23 == nx.cost_of_flow(G, sol, weight=1)
254
+ assert sol["s"] == {"a": 2, "b": 2}
255
+ assert sol["a"] == {"b": 1, "t": 1}
256
+ assert sol["b"] == {"a": 0, "t": 3}
257
+ assert sol["t"] == {}
258
+
259
+ G.add_edge("t", "s")
260
+ G["t"]["s"].update({1: -100})
261
+ flowCost, sol = nx.capacity_scaling(G, capacity=0, weight=1)
262
+ G.remove_edge("t", "s")
263
+ flow = sum(v for v in sol["s"].values())
264
+ assert 4 == flow
265
+ assert sol["t"]["s"] == 4
266
+ assert flowCost == -377
267
+ del sol["t"]["s"]
268
+ assert sol["s"] == {"a": 2, "b": 2}
269
+ assert sol["a"] == {"b": 1, "t": 1}
270
+ assert sol["b"] == {"a": 0, "t": 3}
271
+ assert sol["t"] == {}
272
+ assert nx.cost_of_flow(G, sol, weight=1) == 23
273
+
274
+ def test_zero_capacity_edges(self):
275
+ """Address issue raised in ticket #617 by arv."""
276
+ G = nx.DiGraph()
277
+ G.add_edges_from(
278
+ [
279
+ (1, 2, {"capacity": 1, "weight": 1}),
280
+ (1, 5, {"capacity": 1, "weight": 1}),
281
+ (2, 3, {"capacity": 0, "weight": 1}),
282
+ (2, 5, {"capacity": 1, "weight": 1}),
283
+ (5, 3, {"capacity": 2, "weight": 1}),
284
+ (5, 4, {"capacity": 0, "weight": 1}),
285
+ (3, 4, {"capacity": 2, "weight": 1}),
286
+ ]
287
+ )
288
+ G.nodes[1]["demand"] = -1
289
+ G.nodes[2]["demand"] = -1
290
+ G.nodes[4]["demand"] = 2
291
+
292
+ flowCost, H = nx.network_simplex(G)
293
+ soln = {1: {2: 0, 5: 1}, 2: {3: 0, 5: 1}, 3: {4: 2}, 4: {}, 5: {3: 2, 4: 0}}
294
+ assert flowCost == 6
295
+ assert nx.min_cost_flow_cost(G) == 6
296
+ assert H == soln
297
+ assert nx.min_cost_flow(G) == soln
298
+ assert nx.cost_of_flow(G, H) == 6
299
+
300
+ flowCost, H = nx.capacity_scaling(G)
301
+ assert flowCost == 6
302
+ assert H == soln
303
+ assert nx.cost_of_flow(G, H) == 6
304
+
305
+ def test_digon(self):
306
+ """Check if digons are handled properly. Taken from ticket
307
+ #618 by arv."""
308
+ nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})]
309
+ edges = [
310
+ (1, 2, {"capacity": 3, "weight": 600000}),
311
+ (2, 1, {"capacity": 2, "weight": 0}),
312
+ (2, 3, {"capacity": 5, "weight": 714285}),
313
+ (3, 2, {"capacity": 2, "weight": 0}),
314
+ ]
315
+ G = nx.DiGraph(edges)
316
+ G.add_nodes_from(nodes)
317
+ flowCost, H = nx.network_simplex(G)
318
+ soln = {1: {2: 0}, 2: {1: 0, 3: 4}, 3: {2: 0}}
319
+ assert flowCost == 2857140
320
+ assert nx.min_cost_flow_cost(G) == 2857140
321
+ assert H == soln
322
+ assert nx.min_cost_flow(G) == soln
323
+ assert nx.cost_of_flow(G, H) == 2857140
324
+
325
+ flowCost, H = nx.capacity_scaling(G)
326
+ assert flowCost == 2857140
327
+ assert H == soln
328
+ assert nx.cost_of_flow(G, H) == 2857140
329
+
330
+ def test_deadend(self):
331
+ """Check if one-node cycles are handled properly. Taken from ticket
332
+ #2906 from @sshraven."""
333
+ G = nx.DiGraph()
334
+
335
+ G.add_nodes_from(range(5), demand=0)
336
+ G.nodes[4]["demand"] = -13
337
+ G.nodes[3]["demand"] = 13
338
+
339
+ G.add_edges_from([(0, 2), (0, 3), (2, 1)], capacity=20, weight=0.1)
340
+ pytest.raises(nx.NetworkXUnfeasible, nx.min_cost_flow, G)
341
+
342
+ def test_infinite_capacity_neg_digon(self):
343
+ """An infinite capacity negative cost digon results in an unbounded
344
+ instance."""
345
+ nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})]
346
+ edges = [
347
+ (1, 2, {"weight": -600}),
348
+ (2, 1, {"weight": 0}),
349
+ (2, 3, {"capacity": 5, "weight": 714285}),
350
+ (3, 2, {"capacity": 2, "weight": 0}),
351
+ ]
352
+ G = nx.DiGraph(edges)
353
+ G.add_nodes_from(nodes)
354
+ pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G)
355
+ pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G)
356
+
357
+ def test_finite_capacity_neg_digon(self):
358
+ """The digon should receive the maximum amount of flow it can handle.
359
+ Taken from ticket #749 by @chuongdo."""
360
+ G = nx.DiGraph()
361
+ G.add_edge("a", "b", capacity=1, weight=-1)
362
+ G.add_edge("b", "a", capacity=1, weight=-1)
363
+ min_cost = -2
364
+ assert nx.min_cost_flow_cost(G) == min_cost
365
+
366
+ flowCost, H = nx.capacity_scaling(G)
367
+ assert flowCost == -2
368
+ assert H == {"a": {"b": 1}, "b": {"a": 1}}
369
+ assert nx.cost_of_flow(G, H) == -2
370
+
371
+ def test_multidigraph(self):
372
+ """Multidigraphs are acceptable."""
373
+ G = nx.MultiDiGraph()
374
+ G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight="capacity")
375
+ flowCost, H = nx.network_simplex(G)
376
+ assert flowCost == 0
377
+ assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}}
378
+
379
+ flowCost, H = nx.capacity_scaling(G)
380
+ assert flowCost == 0
381
+ assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}}
382
+
383
+ def test_negative_selfloops(self):
384
+ """Negative selfloops should cause an exception if uncapacitated and
385
+ always be saturated otherwise.
386
+ """
387
+ G = nx.DiGraph()
388
+ G.add_edge(1, 1, weight=-1)
389
+ pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G)
390
+ pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G)
391
+ G[1][1]["capacity"] = 2
392
+ flowCost, H = nx.network_simplex(G)
393
+ assert flowCost == -2
394
+ assert H == {1: {1: 2}}
395
+ flowCost, H = nx.capacity_scaling(G)
396
+ assert flowCost == -2
397
+ assert H == {1: {1: 2}}
398
+
399
+ G = nx.MultiDiGraph()
400
+ G.add_edge(1, 1, "x", weight=-1)
401
+ G.add_edge(1, 1, "y", weight=1)
402
+ pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G)
403
+ pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G)
404
+ G[1][1]["x"]["capacity"] = 2
405
+ flowCost, H = nx.network_simplex(G)
406
+ assert flowCost == -2
407
+ assert H == {1: {1: {"x": 2, "y": 0}}}
408
+ flowCost, H = nx.capacity_scaling(G)
409
+ assert flowCost == -2
410
+ assert H == {1: {1: {"x": 2, "y": 0}}}
411
+
412
+ def test_bone_shaped(self):
413
+ # From #1283
414
+ G = nx.DiGraph()
415
+ G.add_node(0, demand=-4)
416
+ G.add_node(1, demand=2)
417
+ G.add_node(2, demand=2)
418
+ G.add_node(3, demand=4)
419
+ G.add_node(4, demand=-2)
420
+ G.add_node(5, demand=-2)
421
+ G.add_edge(0, 1, capacity=4)
422
+ G.add_edge(0, 2, capacity=4)
423
+ G.add_edge(4, 3, capacity=4)
424
+ G.add_edge(5, 3, capacity=4)
425
+ G.add_edge(0, 3, capacity=0)
426
+ flowCost, H = nx.network_simplex(G)
427
+ assert flowCost == 0
428
+ assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}}
429
+ flowCost, H = nx.capacity_scaling(G)
430
+ assert flowCost == 0
431
+ assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}}
432
+
433
+ def test_exceptions(self):
434
+ G = nx.Graph()
435
+ pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G)
436
+ pytest.raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G)
437
+ G = nx.MultiGraph()
438
+ pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G)
439
+ pytest.raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G)
440
+ G = nx.DiGraph()
441
+ pytest.raises(nx.NetworkXError, nx.network_simplex, G)
442
+ # pytest.raises(nx.NetworkXError, nx.capacity_scaling, G)
443
+ G.add_node(0, demand=float("inf"))
444
+ pytest.raises(nx.NetworkXError, nx.network_simplex, G)
445
+ pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
446
+ G.nodes[0]["demand"] = 0
447
+ G.add_node(1, demand=0)
448
+ G.add_edge(0, 1, weight=-float("inf"))
449
+ pytest.raises(nx.NetworkXError, nx.network_simplex, G)
450
+ pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
451
+ G[0][1]["weight"] = 0
452
+ G.add_edge(0, 0, weight=float("inf"))
453
+ pytest.raises(nx.NetworkXError, nx.network_simplex, G)
454
+ # pytest.raises(nx.NetworkXError, nx.capacity_scaling, G)
455
+ G[0][0]["weight"] = 0
456
+ G[0][1]["capacity"] = -1
457
+ pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
458
+ # pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
459
+ G[0][1]["capacity"] = 0
460
+ G[0][0]["capacity"] = -1
461
+ pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
462
+ # pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
463
+
464
+ def test_large(self):
465
+ fname = (
466
+ importlib.resources.files("networkx.algorithms.flow.tests")
467
+ / "netgen-2.gpickle.bz2"
468
+ )
469
+ with bz2.BZ2File(fname, "rb") as f:
470
+ G = pickle.load(f)
471
+ flowCost, flowDict = nx.network_simplex(G)
472
+ assert 6749969302 == flowCost
473
+ assert 6749969302 == nx.cost_of_flow(G, flowDict)
474
+ flowCost, flowDict = nx.capacity_scaling(G)
475
+ assert 6749969302 == flowCost
476
+ assert 6749969302 == nx.cost_of_flow(G, flowDict)
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bz2
2
+ import importlib.resources
3
+ import os
4
+ import pickle
5
+
6
+ import pytest
7
+
8
+ import networkx as nx
9
+
10
+
11
+ @pytest.fixture
12
+ def simple_flow_graph():
13
+ G = nx.DiGraph()
14
+ G.add_node("a", demand=0)
15
+ G.add_node("b", demand=-5)
16
+ G.add_node("c", demand=50000000)
17
+ G.add_node("d", demand=-49999995)
18
+ G.add_edge("a", "b", weight=3, capacity=4)
19
+ G.add_edge("a", "c", weight=6, capacity=10)
20
+ G.add_edge("b", "d", weight=1, capacity=9)
21
+ G.add_edge("c", "d", weight=2, capacity=5)
22
+ return G
23
+
24
+
25
+ @pytest.fixture
26
+ def simple_no_flow_graph():
27
+ G = nx.DiGraph()
28
+ G.add_node("s", demand=-5)
29
+ G.add_node("t", demand=5)
30
+ G.add_edge("s", "a", weight=1, capacity=3)
31
+ G.add_edge("a", "b", weight=3)
32
+ G.add_edge("a", "c", weight=-6)
33
+ G.add_edge("b", "d", weight=1)
34
+ G.add_edge("c", "d", weight=-2)
35
+ G.add_edge("d", "t", weight=1, capacity=3)
36
+ return G
37
+
38
+
39
+ def get_flowcost_from_flowdict(G, flowDict):
40
+ """Returns flow cost calculated from flow dictionary"""
41
+ flowCost = 0
42
+ for u in flowDict:
43
+ for v in flowDict[u]:
44
+ flowCost += flowDict[u][v] * G[u][v]["weight"]
45
+ return flowCost
46
+
47
+
48
+ def test_infinite_demand_raise(simple_flow_graph):
49
+ G = simple_flow_graph
50
+ inf = float("inf")
51
+ nx.set_node_attributes(G, {"a": {"demand": inf}})
52
+ pytest.raises(nx.NetworkXError, nx.network_simplex, G)
53
+
54
+
55
+ def test_neg_infinite_demand_raise(simple_flow_graph):
56
+ G = simple_flow_graph
57
+ inf = float("inf")
58
+ nx.set_node_attributes(G, {"a": {"demand": -inf}})
59
+ pytest.raises(nx.NetworkXError, nx.network_simplex, G)
60
+
61
+
62
+ def test_infinite_weight_raise(simple_flow_graph):
63
+ G = simple_flow_graph
64
+ inf = float("inf")
65
+ nx.set_edge_attributes(
66
+ G, {("a", "b"): {"weight": inf}, ("b", "d"): {"weight": inf}}
67
+ )
68
+ pytest.raises(nx.NetworkXError, nx.network_simplex, G)
69
+
70
+
71
+ def test_nonzero_net_demand_raise(simple_flow_graph):
72
+ G = simple_flow_graph
73
+ nx.set_node_attributes(G, {"b": {"demand": -4}})
74
+ pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
75
+
76
+
77
+ def test_negative_capacity_raise(simple_flow_graph):
78
+ G = simple_flow_graph
79
+ nx.set_edge_attributes(G, {("a", "b"): {"weight": 1}, ("b", "d"): {"capacity": -9}})
80
+ pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
81
+
82
+
83
+ def test_no_flow_satisfying_demands(simple_no_flow_graph):
84
+ G = simple_no_flow_graph
85
+ pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
86
+
87
+
88
+ def test_sum_demands_not_zero(simple_no_flow_graph):
89
+ G = simple_no_flow_graph
90
+ nx.set_node_attributes(G, {"t": {"demand": 4}})
91
+ pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
92
+
93
+
94
+ def test_google_or_tools_example():
95
+ """
96
+ https://developers.google.com/optimization/flow/mincostflow
97
+ """
98
+ G = nx.DiGraph()
99
+ start_nodes = [0, 0, 1, 1, 1, 2, 2, 3, 4]
100
+ end_nodes = [1, 2, 2, 3, 4, 3, 4, 4, 2]
101
+ capacities = [15, 8, 20, 4, 10, 15, 4, 20, 5]
102
+ unit_costs = [4, 4, 2, 2, 6, 1, 3, 2, 3]
103
+ supplies = [20, 0, 0, -5, -15]
104
+ answer = 150
105
+
106
+ for i in range(len(supplies)):
107
+ G.add_node(i, demand=(-1) * supplies[i]) # supplies are negative of demand
108
+
109
+ for i in range(len(start_nodes)):
110
+ G.add_edge(
111
+ start_nodes[i], end_nodes[i], weight=unit_costs[i], capacity=capacities[i]
112
+ )
113
+
114
+ flowCost, flowDict = nx.network_simplex(G)
115
+ assert flowCost == answer
116
+ assert flowCost == get_flowcost_from_flowdict(G, flowDict)
117
+
118
+
119
+ def test_google_or_tools_example2():
120
+ """
121
+ https://developers.google.com/optimization/flow/mincostflow
122
+ """
123
+ G = nx.DiGraph()
124
+ start_nodes = [0, 0, 1, 1, 1, 2, 2, 3, 4, 3]
125
+ end_nodes = [1, 2, 2, 3, 4, 3, 4, 4, 2, 5]
126
+ capacities = [15, 8, 20, 4, 10, 15, 4, 20, 5, 10]
127
+ unit_costs = [4, 4, 2, 2, 6, 1, 3, 2, 3, 4]
128
+ supplies = [23, 0, 0, -5, -15, -3]
129
+ answer = 183
130
+
131
+ for i in range(len(supplies)):
132
+ G.add_node(i, demand=(-1) * supplies[i]) # supplies are negative of demand
133
+
134
+ for i in range(len(start_nodes)):
135
+ G.add_edge(
136
+ start_nodes[i], end_nodes[i], weight=unit_costs[i], capacity=capacities[i]
137
+ )
138
+
139
+ flowCost, flowDict = nx.network_simplex(G)
140
+ assert flowCost == answer
141
+ assert flowCost == get_flowcost_from_flowdict(G, flowDict)
142
+
143
+
144
+ def test_large():
145
+ fname = (
146
+ importlib.resources.files("networkx.algorithms.flow.tests")
147
+ / "netgen-2.gpickle.bz2"
148
+ )
149
+
150
+ with bz2.BZ2File(fname, "rb") as f:
151
+ G = pickle.load(f)
152
+ flowCost, flowDict = nx.network_simplex(G)
153
+ assert 6749969302 == flowCost
154
+ assert 6749969302 == nx.cost_of_flow(G, flowDict)
155
+
156
+
157
+ def test_simple_digraph():
158
+ G = nx.DiGraph()
159
+ G.add_node("a", demand=-5)
160
+ G.add_node("d", demand=5)
161
+ G.add_edge("a", "b", weight=3, capacity=4)
162
+ G.add_edge("a", "c", weight=6, capacity=10)
163
+ G.add_edge("b", "d", weight=1, capacity=9)
164
+ G.add_edge("c", "d", weight=2, capacity=5)
165
+ flowCost, H = nx.network_simplex(G)
166
+ soln = {"a": {"b": 4, "c": 1}, "b": {"d": 4}, "c": {"d": 1}, "d": {}}
167
+ assert flowCost == 24
168
+ assert nx.min_cost_flow_cost(G) == 24
169
+ assert H == soln
170
+
171
+
172
+ def test_negcycle_infcap():
173
+ G = nx.DiGraph()
174
+ G.add_node("s", demand=-5)
175
+ G.add_node("t", demand=5)
176
+ G.add_edge("s", "a", weight=1, capacity=3)
177
+ G.add_edge("a", "b", weight=3)
178
+ G.add_edge("c", "a", weight=-6)
179
+ G.add_edge("b", "d", weight=1)
180
+ G.add_edge("d", "c", weight=-2)
181
+ G.add_edge("d", "t", weight=1, capacity=3)
182
+ pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
183
+
184
+
185
+ def test_transshipment():
186
+ G = nx.DiGraph()
187
+ G.add_node("a", demand=1)
188
+ G.add_node("b", demand=-2)
189
+ G.add_node("c", demand=-2)
190
+ G.add_node("d", demand=3)
191
+ G.add_node("e", demand=-4)
192
+ G.add_node("f", demand=-4)
193
+ G.add_node("g", demand=3)
194
+ G.add_node("h", demand=2)
195
+ G.add_node("r", demand=3)
196
+ G.add_edge("a", "c", weight=3)
197
+ G.add_edge("r", "a", weight=2)
198
+ G.add_edge("b", "a", weight=9)
199
+ G.add_edge("r", "c", weight=0)
200
+ G.add_edge("b", "r", weight=-6)
201
+ G.add_edge("c", "d", weight=5)
202
+ G.add_edge("e", "r", weight=4)
203
+ G.add_edge("e", "f", weight=3)
204
+ G.add_edge("h", "b", weight=4)
205
+ G.add_edge("f", "d", weight=7)
206
+ G.add_edge("f", "h", weight=12)
207
+ G.add_edge("g", "d", weight=12)
208
+ G.add_edge("f", "g", weight=-1)
209
+ G.add_edge("h", "g", weight=-10)
210
+ flowCost, H = nx.network_simplex(G)
211
+ soln = {
212
+ "a": {"c": 0},
213
+ "b": {"a": 0, "r": 2},
214
+ "c": {"d": 3},
215
+ "d": {},
216
+ "e": {"r": 3, "f": 1},
217
+ "f": {"d": 0, "g": 3, "h": 2},
218
+ "g": {"d": 0},
219
+ "h": {"b": 0, "g": 0},
220
+ "r": {"a": 1, "c": 1},
221
+ }
222
+ assert flowCost == 41
223
+ assert H == soln
224
+
225
+
226
+ def test_digraph1():
227
+ # From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied
228
+ # Mathematical Programming. Addison-Wesley, 1977.
229
+ G = nx.DiGraph()
230
+ G.add_node(1, demand=-20)
231
+ G.add_node(4, demand=5)
232
+ G.add_node(5, demand=15)
233
+ G.add_edges_from(
234
+ [
235
+ (1, 2, {"capacity": 15, "weight": 4}),
236
+ (1, 3, {"capacity": 8, "weight": 4}),
237
+ (2, 3, {"weight": 2}),
238
+ (2, 4, {"capacity": 4, "weight": 2}),
239
+ (2, 5, {"capacity": 10, "weight": 6}),
240
+ (3, 4, {"capacity": 15, "weight": 1}),
241
+ (3, 5, {"capacity": 5, "weight": 3}),
242
+ (4, 5, {"weight": 2}),
243
+ (5, 3, {"capacity": 4, "weight": 1}),
244
+ ]
245
+ )
246
+ flowCost, H = nx.network_simplex(G)
247
+ soln = {
248
+ 1: {2: 12, 3: 8},
249
+ 2: {3: 8, 4: 4, 5: 0},
250
+ 3: {4: 11, 5: 5},
251
+ 4: {5: 10},
252
+ 5: {3: 0},
253
+ }
254
+ assert flowCost == 150
255
+ assert nx.min_cost_flow_cost(G) == 150
256
+ assert H == soln
257
+
258
+
259
+ def test_zero_capacity_edges():
260
+ """Address issue raised in ticket #617 by arv."""
261
+ G = nx.DiGraph()
262
+ G.add_edges_from(
263
+ [
264
+ (1, 2, {"capacity": 1, "weight": 1}),
265
+ (1, 5, {"capacity": 1, "weight": 1}),
266
+ (2, 3, {"capacity": 0, "weight": 1}),
267
+ (2, 5, {"capacity": 1, "weight": 1}),
268
+ (5, 3, {"capacity": 2, "weight": 1}),
269
+ (5, 4, {"capacity": 0, "weight": 1}),
270
+ (3, 4, {"capacity": 2, "weight": 1}),
271
+ ]
272
+ )
273
+ G.nodes[1]["demand"] = -1
274
+ G.nodes[2]["demand"] = -1
275
+ G.nodes[4]["demand"] = 2
276
+
277
+ flowCost, H = nx.network_simplex(G)
278
+ soln = {1: {2: 0, 5: 1}, 2: {3: 0, 5: 1}, 3: {4: 2}, 4: {}, 5: {3: 2, 4: 0}}
279
+ assert flowCost == 6
280
+ assert nx.min_cost_flow_cost(G) == 6
281
+ assert H == soln
282
+
283
+
284
+ def test_digon():
285
+ """Check if digons are handled properly. Taken from ticket
286
+ #618 by arv."""
287
+ nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})]
288
+ edges = [
289
+ (1, 2, {"capacity": 3, "weight": 600000}),
290
+ (2, 1, {"capacity": 2, "weight": 0}),
291
+ (2, 3, {"capacity": 5, "weight": 714285}),
292
+ (3, 2, {"capacity": 2, "weight": 0}),
293
+ ]
294
+ G = nx.DiGraph(edges)
295
+ G.add_nodes_from(nodes)
296
+ flowCost, H = nx.network_simplex(G)
297
+ soln = {1: {2: 0}, 2: {1: 0, 3: 4}, 3: {2: 0}}
298
+ assert flowCost == 2857140
299
+
300
+
301
+ def test_deadend():
302
+ """Check if one-node cycles are handled properly. Taken from ticket
303
+ #2906 from @sshraven."""
304
+ G = nx.DiGraph()
305
+
306
+ G.add_nodes_from(range(5), demand=0)
307
+ G.nodes[4]["demand"] = -13
308
+ G.nodes[3]["demand"] = 13
309
+
310
+ G.add_edges_from([(0, 2), (0, 3), (2, 1)], capacity=20, weight=0.1)
311
+ pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
312
+
313
+
314
+ def test_infinite_capacity_neg_digon():
315
+ """An infinite capacity negative cost digon results in an unbounded
316
+ instance."""
317
+ nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})]
318
+ edges = [
319
+ (1, 2, {"weight": -600}),
320
+ (2, 1, {"weight": 0}),
321
+ (2, 3, {"capacity": 5, "weight": 714285}),
322
+ (3, 2, {"capacity": 2, "weight": 0}),
323
+ ]
324
+ G = nx.DiGraph(edges)
325
+ G.add_nodes_from(nodes)
326
+ pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G)
327
+
328
+
329
+ def test_multidigraph():
330
+ """Multidigraphs are acceptable."""
331
+ G = nx.MultiDiGraph()
332
+ G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight="capacity")
333
+ flowCost, H = nx.network_simplex(G)
334
+ assert flowCost == 0
335
+ assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}}
336
+
337
+
338
+ def test_negative_selfloops():
339
+ """Negative selfloops should cause an exception if uncapacitated and
340
+ always be saturated otherwise.
341
+ """
342
+ G = nx.DiGraph()
343
+ G.add_edge(1, 1, weight=-1)
344
+ pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G)
345
+
346
+ G[1][1]["capacity"] = 2
347
+ flowCost, H = nx.network_simplex(G)
348
+ assert flowCost == -2
349
+ assert H == {1: {1: 2}}
350
+
351
+ G = nx.MultiDiGraph()
352
+ G.add_edge(1, 1, "x", weight=-1)
353
+ G.add_edge(1, 1, "y", weight=1)
354
+ pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G)
355
+
356
+ G[1][1]["x"]["capacity"] = 2
357
+ flowCost, H = nx.network_simplex(G)
358
+ assert flowCost == -2
359
+ assert H == {1: {1: {"x": 2, "y": 0}}}
360
+
361
+
362
+ def test_bone_shaped():
363
+ # From #1283
364
+ G = nx.DiGraph()
365
+ G.add_node(0, demand=-4)
366
+ G.add_node(1, demand=2)
367
+ G.add_node(2, demand=2)
368
+ G.add_node(3, demand=4)
369
+ G.add_node(4, demand=-2)
370
+ G.add_node(5, demand=-2)
371
+ G.add_edge(0, 1, capacity=4)
372
+ G.add_edge(0, 2, capacity=4)
373
+ G.add_edge(4, 3, capacity=4)
374
+ G.add_edge(5, 3, capacity=4)
375
+ G.add_edge(0, 3, capacity=0)
376
+ flowCost, H = nx.network_simplex(G)
377
+ assert flowCost == 0
378
+ assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}}
379
+
380
+
381
+ def test_graphs_type_exceptions():
382
+ G = nx.Graph()
383
+ pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G)
384
+ G = nx.MultiGraph()
385
+ pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G)
386
+ G = nx.DiGraph()
387
+ pytest.raises(nx.NetworkXError, nx.network_simplex, G)
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/flow/utils.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility classes and functions for network flow algorithms.
3
+ """
4
+
5
+ from collections import deque
6
+
7
+ import networkx as nx
8
+
9
+ __all__ = [
10
+ "CurrentEdge",
11
+ "Level",
12
+ "GlobalRelabelThreshold",
13
+ "build_residual_network",
14
+ "detect_unboundedness",
15
+ "build_flow_dict",
16
+ ]
17
+
18
+
19
+ class CurrentEdge:
20
+ """Mechanism for iterating over out-edges incident to a node in a circular
21
+ manner. StopIteration exception is raised when wraparound occurs.
22
+ """
23
+
24
+ __slots__ = ("_edges", "_it", "_curr")
25
+
26
+ def __init__(self, edges):
27
+ self._edges = edges
28
+ if self._edges:
29
+ self._rewind()
30
+
31
+ def get(self):
32
+ return self._curr
33
+
34
+ def move_to_next(self):
35
+ try:
36
+ self._curr = next(self._it)
37
+ except StopIteration:
38
+ self._rewind()
39
+ raise
40
+
41
+ def _rewind(self):
42
+ self._it = iter(self._edges.items())
43
+ self._curr = next(self._it)
44
+
45
+
46
+ class Level:
47
+ """Active and inactive nodes in a level."""
48
+
49
+ __slots__ = ("active", "inactive")
50
+
51
+ def __init__(self):
52
+ self.active = set()
53
+ self.inactive = set()
54
+
55
+
56
+ class GlobalRelabelThreshold:
57
+ """Measurement of work before the global relabeling heuristic should be
58
+ applied.
59
+ """
60
+
61
+ def __init__(self, n, m, freq):
62
+ self._threshold = (n + m) / freq if freq else float("inf")
63
+ self._work = 0
64
+
65
+ def add_work(self, work):
66
+ self._work += work
67
+
68
+ def is_reached(self):
69
+ return self._work >= self._threshold
70
+
71
+ def clear_work(self):
72
+ self._work = 0
73
+
74
+
75
+ @nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
76
+ def build_residual_network(G, capacity):
77
+ """Build a residual network and initialize a zero flow.
78
+
79
+ The residual network :samp:`R` from an input graph :samp:`G` has the
80
+ same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
81
+ of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
82
+ self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
83
+ in :samp:`G`.
84
+
85
+ For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
86
+ is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
87
+ in :samp:`G` or zero otherwise. If the capacity is infinite,
88
+ :samp:`R[u][v]['capacity']` will have a high arbitrary finite value
89
+ that does not affect the solution of the problem. This value is stored in
90
+ :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
91
+ :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
92
+ satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
93
+
94
+ The flow value, defined as the total flow into :samp:`t`, the sink, is
95
+ stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not
96
+ specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such
97
+ that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
98
+ :samp:`s`-:samp:`t` cut.
99
+
100
+ """
101
+ if G.is_multigraph():
102
+ raise nx.NetworkXError("MultiGraph and MultiDiGraph not supported (yet).")
103
+
104
+ R = nx.DiGraph()
105
+ R.__networkx_cache__ = None # Disable caching
106
+ R.add_nodes_from(G)
107
+
108
+ inf = float("inf")
109
+ # Extract edges with positive capacities. Self loops excluded.
110
+ edge_list = [
111
+ (u, v, attr)
112
+ for u, v, attr in G.edges(data=True)
113
+ if u != v and attr.get(capacity, inf) > 0
114
+ ]
115
+ # Simulate infinity with three times the sum of the finite edge capacities
116
+ # or any positive value if the sum is zero. This allows the
117
+ # infinite-capacity edges to be distinguished for unboundedness detection
118
+ # and directly participate in residual capacity calculation. If the maximum
119
+ # flow is finite, these edges cannot appear in the minimum cut and thus
120
+ # guarantee correctness. Since the residual capacity of an
121
+ # infinite-capacity edge is always at least 2/3 of inf, while that of an
122
+ # finite-capacity edge is at most 1/3 of inf, if an operation moves more
123
+ # than 1/3 of inf units of flow to t, there must be an infinite-capacity
124
+ # s-t path in G.
125
+ inf = (
126
+ 3
127
+ * sum(
128
+ attr[capacity]
129
+ for u, v, attr in edge_list
130
+ if capacity in attr and attr[capacity] != inf
131
+ )
132
+ or 1
133
+ )
134
+ if G.is_directed():
135
+ for u, v, attr in edge_list:
136
+ r = min(attr.get(capacity, inf), inf)
137
+ if not R.has_edge(u, v):
138
+ # Both (u, v) and (v, u) must be present in the residual
139
+ # network.
140
+ R.add_edge(u, v, capacity=r)
141
+ R.add_edge(v, u, capacity=0)
142
+ else:
143
+ # The edge (u, v) was added when (v, u) was visited.
144
+ R[u][v]["capacity"] = r
145
+ else:
146
+ for u, v, attr in edge_list:
147
+ # Add a pair of edges with equal residual capacities.
148
+ r = min(attr.get(capacity, inf), inf)
149
+ R.add_edge(u, v, capacity=r)
150
+ R.add_edge(v, u, capacity=r)
151
+
152
+ # Record the value simulating infinity.
153
+ R.graph["inf"] = inf
154
+
155
+ return R
156
+
157
+
158
+ @nx._dispatchable(
159
+ graphs="R",
160
+ preserve_edge_attrs={"R": {"capacity": float("inf")}},
161
+ preserve_graph_attrs=True,
162
+ )
163
+ def detect_unboundedness(R, s, t):
164
+ """Detect an infinite-capacity s-t path in R."""
165
+ q = deque([s])
166
+ seen = {s}
167
+ inf = R.graph["inf"]
168
+ while q:
169
+ u = q.popleft()
170
+ for v, attr in R[u].items():
171
+ if attr["capacity"] == inf and v not in seen:
172
+ if v == t:
173
+ raise nx.NetworkXUnbounded(
174
+ "Infinite capacity path, flow unbounded above."
175
+ )
176
+ seen.add(v)
177
+ q.append(v)
178
+
179
+
180
+ @nx._dispatchable(graphs={"G": 0, "R": 1}, preserve_edge_attrs={"R": {"flow": None}})
181
+ def build_flow_dict(G, R):
182
+ """Build a flow dictionary from a residual network."""
183
+ flow_dict = {}
184
+ for u in G:
185
+ flow_dict[u] = {v: 0 for v in G[u]}
186
+ flow_dict[u].update(
187
+ (v, attr["flow"]) for v, attr in R[u].items() if attr["flow"] > 0
188
+ )
189
+ return flow_dict
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/graph_hashing.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for hashing graphs to strings.
3
+ Isomorphic graphs should be assigned identical hashes.
4
+ For now, only Weisfeiler-Lehman hashing is implemented.
5
+ """
6
+
7
+ from collections import Counter, defaultdict
8
+ from hashlib import blake2b
9
+
10
+ import networkx as nx
11
+
12
+ __all__ = ["weisfeiler_lehman_graph_hash", "weisfeiler_lehman_subgraph_hashes"]
13
+
14
+
15
+ def _hash_label(label, digest_size):
16
+ return blake2b(label.encode("ascii"), digest_size=digest_size).hexdigest()
17
+
18
+
19
+ def _init_node_labels(G, edge_attr, node_attr):
20
+ if node_attr:
21
+ return {u: str(dd[node_attr]) for u, dd in G.nodes(data=True)}
22
+ elif edge_attr:
23
+ return {u: "" for u in G}
24
+ else:
25
+ return {u: str(deg) for u, deg in G.degree()}
26
+
27
+
28
+ def _neighborhood_aggregate(G, node, node_labels, edge_attr=None):
29
+ """
30
+ Compute new labels for given node by aggregating
31
+ the labels of each node's neighbors.
32
+ """
33
+ label_list = []
34
+ for nbr in G.neighbors(node):
35
+ prefix = "" if edge_attr is None else str(G[node][nbr][edge_attr])
36
+ label_list.append(prefix + node_labels[nbr])
37
+ return node_labels[node] + "".join(sorted(label_list))
38
+
39
+
40
+ @nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr")
41
+ def weisfeiler_lehman_graph_hash(
42
+ G, edge_attr=None, node_attr=None, iterations=3, digest_size=16
43
+ ):
44
+ """Return Weisfeiler Lehman (WL) graph hash.
45
+
46
+ The function iteratively aggregates and hashes neighborhoods of each node.
47
+ After each node's neighbors are hashed to obtain updated node labels,
48
+ a hashed histogram of resulting labels is returned as the final hash.
49
+
50
+ Hashes are identical for isomorphic graphs and strong guarantees that
51
+ non-isomorphic graphs will get different hashes. See [1]_ for details.
52
+
53
+ If no node or edge attributes are provided, the degree of each node
54
+ is used as its initial label.
55
+ Otherwise, node and/or edge labels are used to compute the hash.
56
+
57
+ Parameters
58
+ ----------
59
+ G : graph
60
+ The graph to be hashed.
61
+ Can have node and/or edge attributes. Can also have no attributes.
62
+ edge_attr : string, optional (default=None)
63
+ The key in edge attribute dictionary to be used for hashing.
64
+ If None, edge labels are ignored.
65
+ node_attr: string, optional (default=None)
66
+ The key in node attribute dictionary to be used for hashing.
67
+ If None, and no edge_attr given, use the degrees of the nodes as labels.
68
+ iterations: int, optional (default=3)
69
+ Number of neighbor aggregations to perform.
70
+ Should be larger for larger graphs.
71
+ digest_size: int, optional (default=16)
72
+ Size (in bits) of blake2b hash digest to use for hashing node labels.
73
+
74
+ Returns
75
+ -------
76
+ h : string
77
+ Hexadecimal string corresponding to hash of the input graph.
78
+
79
+ Examples
80
+ --------
81
+ Two graphs with edge attributes that are isomorphic, except for
82
+ differences in the edge labels.
83
+
84
+ >>> G1 = nx.Graph()
85
+ >>> G1.add_edges_from(
86
+ ... [
87
+ ... (1, 2, {"label": "A"}),
88
+ ... (2, 3, {"label": "A"}),
89
+ ... (3, 1, {"label": "A"}),
90
+ ... (1, 4, {"label": "B"}),
91
+ ... ]
92
+ ... )
93
+ >>> G2 = nx.Graph()
94
+ >>> G2.add_edges_from(
95
+ ... [
96
+ ... (5, 6, {"label": "B"}),
97
+ ... (6, 7, {"label": "A"}),
98
+ ... (7, 5, {"label": "A"}),
99
+ ... (7, 8, {"label": "A"}),
100
+ ... ]
101
+ ... )
102
+
103
+ Omitting the `edge_attr` option, results in identical hashes.
104
+
105
+ >>> nx.weisfeiler_lehman_graph_hash(G1)
106
+ '7bc4dde9a09d0b94c5097b219891d81a'
107
+ >>> nx.weisfeiler_lehman_graph_hash(G2)
108
+ '7bc4dde9a09d0b94c5097b219891d81a'
109
+
110
+ With edge labels, the graphs are no longer assigned
111
+ the same hash digest.
112
+
113
+ >>> nx.weisfeiler_lehman_graph_hash(G1, edge_attr="label")
114
+ 'c653d85538bcf041d88c011f4f905f10'
115
+ >>> nx.weisfeiler_lehman_graph_hash(G2, edge_attr="label")
116
+ '3dcd84af1ca855d0eff3c978d88e7ec7'
117
+
118
+ Notes
119
+ -----
120
+ To return the WL hashes of each subgraph of a graph, use
121
+ `weisfeiler_lehman_subgraph_hashes`
122
+
123
+ Similarity between hashes does not imply similarity between graphs.
124
+
125
+ References
126
+ ----------
127
+ .. [1] Shervashidze, Nino, Pascal Schweitzer, Erik Jan Van Leeuwen,
128
+ Kurt Mehlhorn, and Karsten M. Borgwardt. Weisfeiler Lehman
129
+ Graph Kernels. Journal of Machine Learning Research. 2011.
130
+ http://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf
131
+
132
+ See also
133
+ --------
134
+ weisfeiler_lehman_subgraph_hashes
135
+ """
136
+
137
+ def weisfeiler_lehman_step(G, labels, edge_attr=None):
138
+ """
139
+ Apply neighborhood aggregation to each node
140
+ in the graph.
141
+ Computes a dictionary with labels for each node.
142
+ """
143
+ new_labels = {}
144
+ for node in G.nodes():
145
+ label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr)
146
+ new_labels[node] = _hash_label(label, digest_size)
147
+ return new_labels
148
+
149
+ # set initial node labels
150
+ node_labels = _init_node_labels(G, edge_attr, node_attr)
151
+
152
+ subgraph_hash_counts = []
153
+ for _ in range(iterations):
154
+ node_labels = weisfeiler_lehman_step(G, node_labels, edge_attr=edge_attr)
155
+ counter = Counter(node_labels.values())
156
+ # sort the counter, extend total counts
157
+ subgraph_hash_counts.extend(sorted(counter.items(), key=lambda x: x[0]))
158
+
159
+ # hash the final counter
160
+ return _hash_label(str(tuple(subgraph_hash_counts)), digest_size)
161
+
162
+
163
+ @nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr")
164
+ def weisfeiler_lehman_subgraph_hashes(
165
+ G,
166
+ edge_attr=None,
167
+ node_attr=None,
168
+ iterations=3,
169
+ digest_size=16,
170
+ include_initial_labels=False,
171
+ ):
172
+ """
173
+ Return a dictionary of subgraph hashes by node.
174
+
175
+ Dictionary keys are nodes in `G`, and values are a list of hashes.
176
+ Each hash corresponds to a subgraph rooted at a given node u in `G`.
177
+ Lists of subgraph hashes are sorted in increasing order of depth from
178
+ their root node, with the hash at index i corresponding to a subgraph
179
+ of nodes at most i edges distance from u. Thus, each list will contain
180
+ `iterations` elements - a hash for a subgraph at each depth. If
181
+ `include_initial_labels` is set to `True`, each list will additionally
182
+ have contain a hash of the initial node label (or equivalently a
183
+ subgraph of depth 0) prepended, totalling ``iterations + 1`` elements.
184
+
185
+ The function iteratively aggregates and hashes neighborhoods of each node.
186
+ This is achieved for each step by replacing for each node its label from
187
+ the previous iteration with its hashed 1-hop neighborhood aggregate.
188
+ The new node label is then appended to a list of node labels for each
189
+ node.
190
+
191
+ To aggregate neighborhoods for a node $u$ at each step, all labels of
192
+ nodes adjacent to $u$ are concatenated. If the `edge_attr` parameter is set,
193
+ labels for each neighboring node are prefixed with the value of this attribute
194
+ along the connecting edge from this neighbor to node $u$. The resulting string
195
+ is then hashed to compress this information into a fixed digest size.
196
+
197
+ Thus, at the $i$-th iteration, nodes within $i$ hops influence any given
198
+ hashed node label. We can therefore say that at depth $i$ for node $u$
199
+ we have a hash for a subgraph induced by the $i$-hop neighborhood of $u$.
200
+
201
+ The output can be used to to create general Weisfeiler-Lehman graph kernels,
202
+ or generate features for graphs or nodes - for example to generate 'words' in
203
+ a graph as seen in the 'graph2vec' algorithm.
204
+ See [1]_ & [2]_ respectively for details.
205
+
206
+ Hashes are identical for isomorphic subgraphs and there exist strong
207
+ guarantees that non-isomorphic graphs will get different hashes.
208
+ See [1]_ for details.
209
+
210
+ If no node or edge attributes are provided, the degree of each node
211
+ is used as its initial label.
212
+ Otherwise, node and/or edge labels are used to compute the hash.
213
+
214
+ Parameters
215
+ ----------
216
+ G : graph
217
+ The graph to be hashed.
218
+ Can have node and/or edge attributes. Can also have no attributes.
219
+ edge_attr : string, optional (default=None)
220
+ The key in edge attribute dictionary to be used for hashing.
221
+ If None, edge labels are ignored.
222
+ node_attr : string, optional (default=None)
223
+ The key in node attribute dictionary to be used for hashing.
224
+ If None, and no edge_attr given, use the degrees of the nodes as labels.
225
+ If None, and edge_attr is given, each node starts with an identical label.
226
+ iterations : int, optional (default=3)
227
+ Number of neighbor aggregations to perform.
228
+ Should be larger for larger graphs.
229
+ digest_size : int, optional (default=16)
230
+ Size (in bits) of blake2b hash digest to use for hashing node labels.
231
+ The default size is 16 bits.
232
+ include_initial_labels : bool, optional (default=False)
233
+ If True, include the hashed initial node label as the first subgraph
234
+ hash for each node.
235
+
236
+ Returns
237
+ -------
238
+ node_subgraph_hashes : dict
239
+ A dictionary with each key given by a node in G, and each value given
240
+ by the subgraph hashes in order of depth from the key node.
241
+
242
+ Examples
243
+ --------
244
+ Finding similar nodes in different graphs:
245
+
246
+ >>> G1 = nx.Graph()
247
+ >>> G1.add_edges_from([(1, 2), (2, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 7)])
248
+ >>> G2 = nx.Graph()
249
+ >>> G2.add_edges_from([(1, 3), (2, 3), (1, 6), (1, 5), (4, 6)])
250
+ >>> g1_hashes = nx.weisfeiler_lehman_subgraph_hashes(G1, iterations=3, digest_size=8)
251
+ >>> g2_hashes = nx.weisfeiler_lehman_subgraph_hashes(G2, iterations=3, digest_size=8)
252
+
253
+ Even though G1 and G2 are not isomorphic (they have different numbers of edges),
254
+ the hash sequence of depth 3 for node 1 in G1 and node 5 in G2 are similar:
255
+
256
+ >>> g1_hashes[1]
257
+ ['a93b64973cfc8897', 'db1b43ae35a1878f', '57872a7d2059c1c0']
258
+ >>> g2_hashes[5]
259
+ ['a93b64973cfc8897', 'db1b43ae35a1878f', '1716d2a4012fa4bc']
260
+
261
+ The first 2 WL subgraph hashes match. From this we can conclude that it's very
262
+ likely the neighborhood of 2 hops around these nodes are isomorphic.
263
+
264
+ However the 3-hop neighborhoods of ``G1`` and ``G2`` are not isomorphic since the
265
+ 3rd hashes in the lists above are not equal.
266
+
267
+ These nodes may be candidates to be classified together since their local topology
268
+ is similar.
269
+
270
+ Notes
271
+ -----
272
+ To hash the full graph when subgraph hashes are not needed, use
273
+ `weisfeiler_lehman_graph_hash` for efficiency.
274
+
275
+ Similarity between hashes does not imply similarity between graphs.
276
+
277
+ References
278
+ ----------
279
+ .. [1] Shervashidze, Nino, Pascal Schweitzer, Erik Jan Van Leeuwen,
280
+ Kurt Mehlhorn, and Karsten M. Borgwardt. Weisfeiler Lehman
281
+ Graph Kernels. Journal of Machine Learning Research. 2011.
282
+ http://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf
283
+ .. [2] Annamalai Narayanan, Mahinthan Chandramohan, Rajasekar Venkatesan,
284
+ Lihui Chen, Yang Liu and Shantanu Jaiswa. graph2vec: Learning
285
+ Distributed Representations of Graphs. arXiv. 2017
286
+ https://arxiv.org/pdf/1707.05005.pdf
287
+
288
+ See also
289
+ --------
290
+ weisfeiler_lehman_graph_hash
291
+ """
292
+
293
+ def weisfeiler_lehman_step(G, labels, node_subgraph_hashes, edge_attr=None):
294
+ """
295
+ Apply neighborhood aggregation to each node
296
+ in the graph.
297
+ Computes a dictionary with labels for each node.
298
+ Appends the new hashed label to the dictionary of subgraph hashes
299
+ originating from and indexed by each node in G
300
+ """
301
+ new_labels = {}
302
+ for node in G.nodes():
303
+ label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr)
304
+ hashed_label = _hash_label(label, digest_size)
305
+ new_labels[node] = hashed_label
306
+ node_subgraph_hashes[node].append(hashed_label)
307
+ return new_labels
308
+
309
+ node_labels = _init_node_labels(G, edge_attr, node_attr)
310
+ if include_initial_labels:
311
+ node_subgraph_hashes = {
312
+ k: [_hash_label(v, digest_size)] for k, v in node_labels.items()
313
+ }
314
+ else:
315
+ node_subgraph_hashes = defaultdict(list)
316
+
317
+ for _ in range(iterations):
318
+ node_labels = weisfeiler_lehman_step(
319
+ G, node_labels, node_subgraph_hashes, edge_attr
320
+ )
321
+
322
+ return dict(node_subgraph_hashes)
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/graphical.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test sequences for graphiness.
2
+ """
3
+ import heapq
4
+
5
+ import networkx as nx
6
+
7
+ __all__ = [
8
+ "is_graphical",
9
+ "is_multigraphical",
10
+ "is_pseudographical",
11
+ "is_digraphical",
12
+ "is_valid_degree_sequence_erdos_gallai",
13
+ "is_valid_degree_sequence_havel_hakimi",
14
+ ]
15
+
16
+
17
+ @nx._dispatchable(graphs=None)
18
+ def is_graphical(sequence, method="eg"):
19
+ """Returns True if sequence is a valid degree sequence.
20
+
21
+ A degree sequence is valid if some graph can realize it.
22
+
23
+ Parameters
24
+ ----------
25
+ sequence : list or iterable container
26
+ A sequence of integer node degrees
27
+
28
+ method : "eg" | "hh" (default: 'eg')
29
+ The method used to validate the degree sequence.
30
+ "eg" corresponds to the Erdős-Gallai algorithm
31
+ [EG1960]_, [choudum1986]_, and
32
+ "hh" to the Havel-Hakimi algorithm
33
+ [havel1955]_, [hakimi1962]_, [CL1996]_.
34
+
35
+ Returns
36
+ -------
37
+ valid : bool
38
+ True if the sequence is a valid degree sequence and False if not.
39
+
40
+ Examples
41
+ --------
42
+ >>> G = nx.path_graph(4)
43
+ >>> sequence = (d for n, d in G.degree())
44
+ >>> nx.is_graphical(sequence)
45
+ True
46
+
47
+ To test a non-graphical sequence:
48
+ >>> sequence_list = [d for n, d in G.degree()]
49
+ >>> sequence_list[-1] += 1
50
+ >>> nx.is_graphical(sequence_list)
51
+ False
52
+
53
+ References
54
+ ----------
55
+ .. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960.
56
+ .. [choudum1986] S.A. Choudum. "A simple proof of the Erdős-Gallai theorem on
57
+ graph sequences." Bulletin of the Australian Mathematical Society, 33,
58
+ pp 67-70, 1986. https://doi.org/10.1017/S0004972700002872
59
+ .. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs"
60
+ Casopis Pest. Mat. 80, 477-480, 1955.
61
+ .. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as
62
+ Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962.
63
+ .. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs",
64
+ Chapman and Hall/CRC, 1996.
65
+ """
66
+ if method == "eg":
67
+ valid = is_valid_degree_sequence_erdos_gallai(list(sequence))
68
+ elif method == "hh":
69
+ valid = is_valid_degree_sequence_havel_hakimi(list(sequence))
70
+ else:
71
+ msg = "`method` must be 'eg' or 'hh'"
72
+ raise nx.NetworkXException(msg)
73
+ return valid
74
+
75
+
76
+ def _basic_graphical_tests(deg_sequence):
77
+ # Sort and perform some simple tests on the sequence
78
+ deg_sequence = nx.utils.make_list_of_ints(deg_sequence)
79
+ p = len(deg_sequence)
80
+ num_degs = [0] * p
81
+ dmax, dmin, dsum, n = 0, p, 0, 0
82
+ for d in deg_sequence:
83
+ # Reject if degree is negative or larger than the sequence length
84
+ if d < 0 or d >= p:
85
+ raise nx.NetworkXUnfeasible
86
+ # Process only the non-zero integers
87
+ elif d > 0:
88
+ dmax, dmin, dsum, n = max(dmax, d), min(dmin, d), dsum + d, n + 1
89
+ num_degs[d] += 1
90
+ # Reject sequence if it has odd sum or is oversaturated
91
+ if dsum % 2 or dsum > n * (n - 1):
92
+ raise nx.NetworkXUnfeasible
93
+ return dmax, dmin, dsum, n, num_degs
94
+
95
+
96
+ @nx._dispatchable(graphs=None)
97
+ def is_valid_degree_sequence_havel_hakimi(deg_sequence):
98
+ r"""Returns True if deg_sequence can be realized by a simple graph.
99
+
100
+ The validation proceeds using the Havel-Hakimi theorem
101
+ [havel1955]_, [hakimi1962]_, [CL1996]_.
102
+ Worst-case run time is $O(s)$ where $s$ is the sum of the sequence.
103
+
104
+ Parameters
105
+ ----------
106
+ deg_sequence : list
107
+ A list of integers where each element specifies the degree of a node
108
+ in a graph.
109
+
110
+ Returns
111
+ -------
112
+ valid : bool
113
+ True if deg_sequence is graphical and False if not.
114
+
115
+ Examples
116
+ --------
117
+ >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)])
118
+ >>> sequence = (d for _, d in G.degree())
119
+ >>> nx.is_valid_degree_sequence_havel_hakimi(sequence)
120
+ True
121
+
122
+ To test a non-valid sequence:
123
+ >>> sequence_list = [d for _, d in G.degree()]
124
+ >>> sequence_list[-1] += 1
125
+ >>> nx.is_valid_degree_sequence_havel_hakimi(sequence_list)
126
+ False
127
+
128
+ Notes
129
+ -----
130
+ The ZZ condition says that for the sequence d if
131
+
132
+ .. math::
133
+ |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
134
+
135
+ then d is graphical. This was shown in Theorem 6 in [1]_.
136
+
137
+ References
138
+ ----------
139
+ .. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
140
+ of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
141
+ .. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs"
142
+ Casopis Pest. Mat. 80, 477-480, 1955.
143
+ .. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as
144
+ Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962.
145
+ .. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs",
146
+ Chapman and Hall/CRC, 1996.
147
+ """
148
+ try:
149
+ dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
150
+ except nx.NetworkXUnfeasible:
151
+ return False
152
+ # Accept if sequence has no non-zero degrees or passes the ZZ condition
153
+ if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
154
+ return True
155
+
156
+ modstubs = [0] * (dmax + 1)
157
+ # Successively reduce degree sequence by removing the maximum degree
158
+ while n > 0:
159
+ # Retrieve the maximum degree in the sequence
160
+ while num_degs[dmax] == 0:
161
+ dmax -= 1
162
+ # If there are not enough stubs to connect to, then the sequence is
163
+ # not graphical
164
+ if dmax > n - 1:
165
+ return False
166
+
167
+ # Remove largest stub in list
168
+ num_degs[dmax], n = num_degs[dmax] - 1, n - 1
169
+ # Reduce the next dmax largest stubs
170
+ mslen = 0
171
+ k = dmax
172
+ for i in range(dmax):
173
+ while num_degs[k] == 0:
174
+ k -= 1
175
+ num_degs[k], n = num_degs[k] - 1, n - 1
176
+ if k > 1:
177
+ modstubs[mslen] = k - 1
178
+ mslen += 1
179
+ # Add back to the list any non-zero stubs that were removed
180
+ for i in range(mslen):
181
+ stub = modstubs[i]
182
+ num_degs[stub], n = num_degs[stub] + 1, n + 1
183
+ return True
184
+
185
+
186
+ @nx._dispatchable(graphs=None)
187
+ def is_valid_degree_sequence_erdos_gallai(deg_sequence):
188
+ r"""Returns True if deg_sequence can be realized by a simple graph.
189
+
190
+ The validation is done using the Erdős-Gallai theorem [EG1960]_.
191
+
192
+ Parameters
193
+ ----------
194
+ deg_sequence : list
195
+ A list of integers
196
+
197
+ Returns
198
+ -------
199
+ valid : bool
200
+ True if deg_sequence is graphical and False if not.
201
+
202
+ Examples
203
+ --------
204
+ >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)])
205
+ >>> sequence = (d for _, d in G.degree())
206
+ >>> nx.is_valid_degree_sequence_erdos_gallai(sequence)
207
+ True
208
+
209
+ To test a non-valid sequence:
210
+ >>> sequence_list = [d for _, d in G.degree()]
211
+ >>> sequence_list[-1] += 1
212
+ >>> nx.is_valid_degree_sequence_erdos_gallai(sequence_list)
213
+ False
214
+
215
+ Notes
216
+ -----
217
+
218
+ This implementation uses an equivalent form of the Erdős-Gallai criterion.
219
+ Worst-case run time is $O(n)$ where $n$ is the length of the sequence.
220
+
221
+ Specifically, a sequence d is graphical if and only if the
222
+ sum of the sequence is even and for all strong indices k in the sequence,
223
+
224
+ .. math::
225
+
226
+ \sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k)
227
+ = k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j )
228
+
229
+ A strong index k is any index where d_k >= k and the value n_j is the
230
+ number of occurrences of j in d. The maximal strong index is called the
231
+ Durfee index.
232
+
233
+ This particular rearrangement comes from the proof of Theorem 3 in [2]_.
234
+
235
+ The ZZ condition says that for the sequence d if
236
+
237
+ .. math::
238
+ |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
239
+
240
+ then d is graphical. This was shown in Theorem 6 in [2]_.
241
+
242
+ References
243
+ ----------
244
+ .. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai",
245
+ Discrete Mathematics, 265, pp. 417-420 (2003).
246
+ .. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
247
+ of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
248
+ .. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960.
249
+ """
250
+ try:
251
+ dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
252
+ except nx.NetworkXUnfeasible:
253
+ return False
254
+ # Accept if sequence has no non-zero degrees or passes the ZZ condition
255
+ if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
256
+ return True
257
+
258
+ # Perform the EG checks using the reformulation of Zverovich and Zverovich
259
+ k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0
260
+ for dk in range(dmax, dmin - 1, -1):
261
+ if dk < k + 1: # Check if already past Durfee index
262
+ return True
263
+ if num_degs[dk] > 0:
264
+ run_size = num_degs[dk] # Process a run of identical-valued degrees
265
+ if dk < k + run_size: # Check if end of run is past Durfee index
266
+ run_size = dk - k # Adjust back to Durfee index
267
+ sum_deg += run_size * dk
268
+ for v in range(run_size):
269
+ sum_nj += num_degs[k + v]
270
+ sum_jnj += (k + v) * num_degs[k + v]
271
+ k += run_size
272
+ if sum_deg > k * (n - 1) - k * sum_nj + sum_jnj:
273
+ return False
274
+ return True
275
+
276
+
277
+ @nx._dispatchable(graphs=None)
278
+ def is_multigraphical(sequence):
279
+ """Returns True if some multigraph can realize the sequence.
280
+
281
+ Parameters
282
+ ----------
283
+ sequence : list
284
+ A list of integers
285
+
286
+ Returns
287
+ -------
288
+ valid : bool
289
+ True if deg_sequence is a multigraphic degree sequence and False if not.
290
+
291
+ Examples
292
+ --------
293
+ >>> G = nx.MultiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)])
294
+ >>> sequence = (d for _, d in G.degree())
295
+ >>> nx.is_multigraphical(sequence)
296
+ True
297
+
298
+ To test a non-multigraphical sequence:
299
+ >>> sequence_list = [d for _, d in G.degree()]
300
+ >>> sequence_list[-1] += 1
301
+ >>> nx.is_multigraphical(sequence_list)
302
+ False
303
+
304
+ Notes
305
+ -----
306
+ The worst-case run time is $O(n)$ where $n$ is the length of the sequence.
307
+
308
+ References
309
+ ----------
310
+ .. [1] S. L. Hakimi. "On the realizability of a set of integers as
311
+ degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506
312
+ (1962).
313
+ """
314
+ try:
315
+ deg_sequence = nx.utils.make_list_of_ints(sequence)
316
+ except nx.NetworkXError:
317
+ return False
318
+ dsum, dmax = 0, 0
319
+ for d in deg_sequence:
320
+ if d < 0:
321
+ return False
322
+ dsum, dmax = dsum + d, max(dmax, d)
323
+ if dsum % 2 or dsum < 2 * dmax:
324
+ return False
325
+ return True
326
+
327
+
328
+ @nx._dispatchable(graphs=None)
329
+ def is_pseudographical(sequence):
330
+ """Returns True if some pseudograph can realize the sequence.
331
+
332
+ Every nonnegative integer sequence with an even sum is pseudographical
333
+ (see [1]_).
334
+
335
+ Parameters
336
+ ----------
337
+ sequence : list or iterable container
338
+ A sequence of integer node degrees
339
+
340
+ Returns
341
+ -------
342
+ valid : bool
343
+ True if the sequence is a pseudographic degree sequence and False if not.
344
+
345
+ Examples
346
+ --------
347
+ >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)])
348
+ >>> sequence = (d for _, d in G.degree())
349
+ >>> nx.is_pseudographical(sequence)
350
+ True
351
+
352
+ To test a non-pseudographical sequence:
353
+ >>> sequence_list = [d for _, d in G.degree()]
354
+ >>> sequence_list[-1] += 1
355
+ >>> nx.is_pseudographical(sequence_list)
356
+ False
357
+
358
+ Notes
359
+ -----
360
+ The worst-case run time is $O(n)$ where n is the length of the sequence.
361
+
362
+ References
363
+ ----------
364
+ .. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs
365
+ and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12),
366
+ pp. 778-782 (1976).
367
+ """
368
+ try:
369
+ deg_sequence = nx.utils.make_list_of_ints(sequence)
370
+ except nx.NetworkXError:
371
+ return False
372
+ return sum(deg_sequence) % 2 == 0 and min(deg_sequence) >= 0
373
+
374
+
375
+ @nx._dispatchable(graphs=None)
376
+ def is_digraphical(in_sequence, out_sequence):
377
+ r"""Returns True if some directed graph can realize the in- and out-degree
378
+ sequences.
379
+
380
+ Parameters
381
+ ----------
382
+ in_sequence : list or iterable container
383
+ A sequence of integer node in-degrees
384
+
385
+ out_sequence : list or iterable container
386
+ A sequence of integer node out-degrees
387
+
388
+ Returns
389
+ -------
390
+ valid : bool
391
+ True if in and out-sequences are digraphic False if not.
392
+
393
+ Examples
394
+ --------
395
+ >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)])
396
+ >>> in_seq = (d for n, d in G.in_degree())
397
+ >>> out_seq = (d for n, d in G.out_degree())
398
+ >>> nx.is_digraphical(in_seq, out_seq)
399
+ True
400
+
401
+ To test a non-digraphical scenario:
402
+ >>> in_seq_list = [d for n, d in G.in_degree()]
403
+ >>> in_seq_list[-1] += 1
404
+ >>> nx.is_digraphical(in_seq_list, out_seq)
405
+ False
406
+
407
+ Notes
408
+ -----
409
+ This algorithm is from Kleitman and Wang [1]_.
410
+ The worst case runtime is $O(s \times \log n)$ where $s$ and $n$ are the
411
+ sum and length of the sequences respectively.
412
+
413
+ References
414
+ ----------
415
+ .. [1] D.J. Kleitman and D.L. Wang
416
+ Algorithms for Constructing Graphs and Digraphs with Given Valences
417
+ and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973)
418
+ """
419
+ try:
420
+ in_deg_sequence = nx.utils.make_list_of_ints(in_sequence)
421
+ out_deg_sequence = nx.utils.make_list_of_ints(out_sequence)
422
+ except nx.NetworkXError:
423
+ return False
424
+ # Process the sequences and form two heaps to store degree pairs with
425
+ # either zero or non-zero out degrees
426
+ sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
427
+ maxn = max(nin, nout)
428
+ maxin = 0
429
+ if maxn == 0:
430
+ return True
431
+ stubheap, zeroheap = [], []
432
+ for n in range(maxn):
433
+ in_deg, out_deg = 0, 0
434
+ if n < nout:
435
+ out_deg = out_deg_sequence[n]
436
+ if n < nin:
437
+ in_deg = in_deg_sequence[n]
438
+ if in_deg < 0 or out_deg < 0:
439
+ return False
440
+ sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg)
441
+ if in_deg > 0:
442
+ stubheap.append((-1 * out_deg, -1 * in_deg))
443
+ elif out_deg > 0:
444
+ zeroheap.append(-1 * out_deg)
445
+ if sumin != sumout:
446
+ return False
447
+ heapq.heapify(stubheap)
448
+ heapq.heapify(zeroheap)
449
+
450
+ modstubs = [(0, 0)] * (maxin + 1)
451
+ # Successively reduce degree sequence by removing the maximum out degree
452
+ while stubheap:
453
+ # Take the first value in the sequence with non-zero in degree
454
+ (freeout, freein) = heapq.heappop(stubheap)
455
+ freein *= -1
456
+ if freein > len(stubheap) + len(zeroheap):
457
+ return False
458
+
459
+ # Attach out stubs to the nodes with the most in stubs
460
+ mslen = 0
461
+ for i in range(freein):
462
+ if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]):
463
+ stubout = heapq.heappop(zeroheap)
464
+ stubin = 0
465
+ else:
466
+ (stubout, stubin) = heapq.heappop(stubheap)
467
+ if stubout == 0:
468
+ return False
469
+ # Check if target is now totally connected
470
+ if stubout + 1 < 0 or stubin < 0:
471
+ modstubs[mslen] = (stubout + 1, stubin)
472
+ mslen += 1
473
+
474
+ # Add back the nodes to the heap that still have available stubs
475
+ for i in range(mslen):
476
+ stub = modstubs[i]
477
+ if stub[1] < 0:
478
+ heapq.heappush(stubheap, stub)
479
+ else:
480
+ heapq.heappush(zeroheap, stub[0])
481
+ if freeout < 0:
482
+ heapq.heappush(zeroheap, freeout)
483
+ return True
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/hybrid.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Provides functions for finding and testing for locally `(k, l)`-connected
3
+ graphs.
4
+
5
+ """
6
+ import copy
7
+
8
+ import networkx as nx
9
+
10
+ __all__ = ["kl_connected_subgraph", "is_kl_connected"]
11
+
12
+
13
+ @nx._dispatchable(returns_graph=True)
14
+ def kl_connected_subgraph(G, k, l, low_memory=False, same_as_graph=False):
15
+ """Returns the maximum locally `(k, l)`-connected subgraph of `G`.
16
+
17
+ A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the
18
+ graph there are at least `l` edge-disjoint paths of length at most `k`
19
+ joining `u` to `v`.
20
+
21
+ Parameters
22
+ ----------
23
+ G : NetworkX graph
24
+ The graph in which to find a maximum locally `(k, l)`-connected
25
+ subgraph.
26
+
27
+ k : integer
28
+ The maximum length of paths to consider. A higher number means a looser
29
+ connectivity requirement.
30
+
31
+ l : integer
32
+ The number of edge-disjoint paths. A higher number means a stricter
33
+ connectivity requirement.
34
+
35
+ low_memory : bool
36
+ If this is True, this function uses an algorithm that uses slightly
37
+ more time but less memory.
38
+
39
+ same_as_graph : bool
40
+ If True then return a tuple of the form `(H, is_same)`,
41
+ where `H` is the maximum locally `(k, l)`-connected subgraph and
42
+ `is_same` is a Boolean representing whether `G` is locally `(k,
43
+ l)`-connected (and hence, whether `H` is simply a copy of the input
44
+ graph `G`).
45
+
46
+ Returns
47
+ -------
48
+ NetworkX graph or two-tuple
49
+ If `same_as_graph` is True, then this function returns a
50
+ two-tuple as described above. Otherwise, it returns only the maximum
51
+ locally `(k, l)`-connected subgraph.
52
+
53
+ See also
54
+ --------
55
+ is_kl_connected
56
+
57
+ References
58
+ ----------
59
+ .. [1] Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid
60
+ Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg,
61
+ 2004. 89--104.
62
+
63
+ """
64
+ H = copy.deepcopy(G) # subgraph we construct by removing from G
65
+
66
+ graphOK = True
67
+ deleted_some = True # hack to start off the while loop
68
+ while deleted_some:
69
+ deleted_some = False
70
+ # We use `for edge in list(H.edges()):` instead of
71
+ # `for edge in H.edges():` because we edit the graph `H` in
72
+ # the loop. Hence using an iterator will result in
73
+ # `RuntimeError: dictionary changed size during iteration`
74
+ for edge in list(H.edges()):
75
+ (u, v) = edge
76
+ # Get copy of graph needed for this search
77
+ if low_memory:
78
+ verts = {u, v}
79
+ for i in range(k):
80
+ for w in verts.copy():
81
+ verts.update(G[w])
82
+ G2 = G.subgraph(verts).copy()
83
+ else:
84
+ G2 = copy.deepcopy(G)
85
+ ###
86
+ path = [u, v]
87
+ cnt = 0
88
+ accept = 0
89
+ while path:
90
+ cnt += 1 # Found a path
91
+ if cnt >= l:
92
+ accept = 1
93
+ break
94
+ # record edges along this graph
95
+ prev = u
96
+ for w in path:
97
+ if prev != w:
98
+ G2.remove_edge(prev, w)
99
+ prev = w
100
+ # path = shortest_path(G2, u, v, k) # ??? should "Cutoff" be k+1?
101
+ try:
102
+ path = nx.shortest_path(G2, u, v) # ??? should "Cutoff" be k+1?
103
+ except nx.NetworkXNoPath:
104
+ path = False
105
+ # No Other Paths
106
+ if accept == 0:
107
+ H.remove_edge(u, v)
108
+ deleted_some = True
109
+ if graphOK:
110
+ graphOK = False
111
+ # We looked through all edges and removed none of them.
112
+ # So, H is the maximal (k,l)-connected subgraph of G
113
+ if same_as_graph:
114
+ return (H, graphOK)
115
+ return H
116
+
117
+
118
+ @nx._dispatchable
119
+ def is_kl_connected(G, k, l, low_memory=False):
120
+ """Returns True if and only if `G` is locally `(k, l)`-connected.
121
+
122
+ A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the
123
+ graph there are at least `l` edge-disjoint paths of length at most `k`
124
+ joining `u` to `v`.
125
+
126
+ Parameters
127
+ ----------
128
+ G : NetworkX graph
129
+ The graph to test for local `(k, l)`-connectedness.
130
+
131
+ k : integer
132
+ The maximum length of paths to consider. A higher number means a looser
133
+ connectivity requirement.
134
+
135
+ l : integer
136
+ The number of edge-disjoint paths. A higher number means a stricter
137
+ connectivity requirement.
138
+
139
+ low_memory : bool
140
+ If this is True, this function uses an algorithm that uses slightly
141
+ more time but less memory.
142
+
143
+ Returns
144
+ -------
145
+ bool
146
+ Whether the graph is locally `(k, l)`-connected subgraph.
147
+
148
+ See also
149
+ --------
150
+ kl_connected_subgraph
151
+
152
+ References
153
+ ----------
154
+ .. [1] Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid
155
+ Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg,
156
+ 2004. 89--104.
157
+
158
+ """
159
+ graphOK = True
160
+ for edge in G.edges():
161
+ (u, v) = edge
162
+ # Get copy of graph needed for this search
163
+ if low_memory:
164
+ verts = {u, v}
165
+ for i in range(k):
166
+ [verts.update(G.neighbors(w)) for w in verts.copy()]
167
+ G2 = G.subgraph(verts)
168
+ else:
169
+ G2 = copy.deepcopy(G)
170
+ ###
171
+ path = [u, v]
172
+ cnt = 0
173
+ accept = 0
174
+ while path:
175
+ cnt += 1 # Found a path
176
+ if cnt >= l:
177
+ accept = 1
178
+ break
179
+ # record edges along this graph
180
+ prev = u
181
+ for w in path:
182
+ if w != prev:
183
+ G2.remove_edge(prev, w)
184
+ prev = w
185
+ # path = shortest_path(G2, u, v, k) # ??? should "Cutoff" be k+1?
186
+ try:
187
+ path = nx.shortest_path(G2, u, v) # ??? should "Cutoff" be k+1?
188
+ except nx.NetworkXNoPath:
189
+ path = False
190
+ # No Other Paths
191
+ if accept == 0:
192
+ graphOK = False
193
+ break
194
+ # return status
195
+ return graphOK
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/node_classification.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ This module provides the functions for node classification problem.
2
+
3
+ The functions in this module are not imported
4
+ into the top level `networkx` namespace.
5
+ You can access these functions by importing
6
+ the `networkx.algorithms.node_classification` modules,
7
+ then accessing the functions as attributes of `node_classification`.
8
+ For example:
9
+
10
+ >>> from networkx.algorithms import node_classification
11
+ >>> G = nx.path_graph(4)
12
+ >>> G.edges()
13
+ EdgeView([(0, 1), (1, 2), (2, 3)])
14
+ >>> G.nodes[0]["label"] = "A"
15
+ >>> G.nodes[3]["label"] = "B"
16
+ >>> node_classification.harmonic_function(G)
17
+ ['A', 'A', 'B', 'B']
18
+
19
+ References
20
+ ----------
21
+ Zhu, X., Ghahramani, Z., & Lafferty, J. (2003, August).
22
+ Semi-supervised learning using gaussian fields and harmonic functions.
23
+ In ICML (Vol. 3, pp. 912-919).
24
+ """
25
+ import networkx as nx
26
+
27
+ __all__ = ["harmonic_function", "local_and_global_consistency"]
28
+
29
+
30
+ @nx.utils.not_implemented_for("directed")
31
+ @nx._dispatchable(node_attrs="label_name")
32
+ def harmonic_function(G, max_iter=30, label_name="label"):
33
+ """Node classification by Harmonic function
34
+
35
+ Function for computing Harmonic function algorithm by Zhu et al.
36
+
37
+ Parameters
38
+ ----------
39
+ G : NetworkX Graph
40
+ max_iter : int
41
+ maximum number of iterations allowed
42
+ label_name : string
43
+ name of target labels to predict
44
+
45
+ Returns
46
+ -------
47
+ predicted : list
48
+ List of length ``len(G)`` with the predicted labels for each node.
49
+
50
+ Raises
51
+ ------
52
+ NetworkXError
53
+ If no nodes in `G` have attribute `label_name`.
54
+
55
+ Examples
56
+ --------
57
+ >>> from networkx.algorithms import node_classification
58
+ >>> G = nx.path_graph(4)
59
+ >>> G.nodes[0]["label"] = "A"
60
+ >>> G.nodes[3]["label"] = "B"
61
+ >>> G.nodes(data=True)
62
+ NodeDataView({0: {'label': 'A'}, 1: {}, 2: {}, 3: {'label': 'B'}})
63
+ >>> G.edges()
64
+ EdgeView([(0, 1), (1, 2), (2, 3)])
65
+ >>> predicted = node_classification.harmonic_function(G)
66
+ >>> predicted
67
+ ['A', 'A', 'B', 'B']
68
+
69
+ References
70
+ ----------
71
+ Zhu, X., Ghahramani, Z., & Lafferty, J. (2003, August).
72
+ Semi-supervised learning using gaussian fields and harmonic functions.
73
+ In ICML (Vol. 3, pp. 912-919).
74
+ """
75
+ import numpy as np
76
+ import scipy as sp
77
+
78
+ X = nx.to_scipy_sparse_array(G) # adjacency matrix
79
+ labels, label_dict = _get_label_info(G, label_name)
80
+
81
+ if labels.shape[0] == 0:
82
+ raise nx.NetworkXError(
83
+ f"No node on the input graph is labeled by '{label_name}'."
84
+ )
85
+
86
+ n_samples = X.shape[0]
87
+ n_classes = label_dict.shape[0]
88
+ F = np.zeros((n_samples, n_classes))
89
+
90
+ # Build propagation matrix
91
+ degrees = X.sum(axis=0)
92
+ degrees[degrees == 0] = 1 # Avoid division by 0
93
+ # TODO: csr_array
94
+ D = sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0))
95
+ P = (D @ X).tolil()
96
+ P[labels[:, 0]] = 0 # labels[:, 0] indicates IDs of labeled nodes
97
+ # Build base matrix
98
+ B = np.zeros((n_samples, n_classes))
99
+ B[labels[:, 0], labels[:, 1]] = 1
100
+
101
+ for _ in range(max_iter):
102
+ F = (P @ F) + B
103
+
104
+ return label_dict[np.argmax(F, axis=1)].tolist()
105
+
106
+
107
+ @nx.utils.not_implemented_for("directed")
108
+ @nx._dispatchable(node_attrs="label_name")
109
+ def local_and_global_consistency(G, alpha=0.99, max_iter=30, label_name="label"):
110
+ """Node classification by Local and Global Consistency
111
+
112
+ Function for computing Local and global consistency algorithm by Zhou et al.
113
+
114
+ Parameters
115
+ ----------
116
+ G : NetworkX Graph
117
+ alpha : float
118
+ Clamping factor
119
+ max_iter : int
120
+ Maximum number of iterations allowed
121
+ label_name : string
122
+ Name of target labels to predict
123
+
124
+ Returns
125
+ -------
126
+ predicted : list
127
+ List of length ``len(G)`` with the predicted labels for each node.
128
+
129
+ Raises
130
+ ------
131
+ NetworkXError
132
+ If no nodes in `G` have attribute `label_name`.
133
+
134
+ Examples
135
+ --------
136
+ >>> from networkx.algorithms import node_classification
137
+ >>> G = nx.path_graph(4)
138
+ >>> G.nodes[0]["label"] = "A"
139
+ >>> G.nodes[3]["label"] = "B"
140
+ >>> G.nodes(data=True)
141
+ NodeDataView({0: {'label': 'A'}, 1: {}, 2: {}, 3: {'label': 'B'}})
142
+ >>> G.edges()
143
+ EdgeView([(0, 1), (1, 2), (2, 3)])
144
+ >>> predicted = node_classification.local_and_global_consistency(G)
145
+ >>> predicted
146
+ ['A', 'A', 'B', 'B']
147
+
148
+ References
149
+ ----------
150
+ Zhou, D., Bousquet, O., Lal, T. N., Weston, J., & Schölkopf, B. (2004).
151
+ Learning with local and global consistency.
152
+ Advances in neural information processing systems, 16(16), 321-328.
153
+ """
154
+ import numpy as np
155
+ import scipy as sp
156
+
157
+ X = nx.to_scipy_sparse_array(G) # adjacency matrix
158
+ labels, label_dict = _get_label_info(G, label_name)
159
+
160
+ if labels.shape[0] == 0:
161
+ raise nx.NetworkXError(
162
+ f"No node on the input graph is labeled by '{label_name}'."
163
+ )
164
+
165
+ n_samples = X.shape[0]
166
+ n_classes = label_dict.shape[0]
167
+ F = np.zeros((n_samples, n_classes))
168
+
169
+ # Build propagation matrix
170
+ degrees = X.sum(axis=0)
171
+ degrees[degrees == 0] = 1 # Avoid division by 0
172
+ # TODO: csr_array
173
+ D2 = np.sqrt(sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0)))
174
+ P = alpha * ((D2 @ X) @ D2)
175
+ # Build base matrix
176
+ B = np.zeros((n_samples, n_classes))
177
+ B[labels[:, 0], labels[:, 1]] = 1 - alpha
178
+
179
+ for _ in range(max_iter):
180
+ F = (P @ F) + B
181
+
182
+ return label_dict[np.argmax(F, axis=1)].tolist()
183
+
184
+
185
+ def _get_label_info(G, label_name):
186
+ """Get and return information of labels from the input graph
187
+
188
+ Parameters
189
+ ----------
190
+ G : Network X graph
191
+ label_name : string
192
+ Name of the target label
193
+
194
+ Returns
195
+ -------
196
+ labels : numpy array, shape = [n_labeled_samples, 2]
197
+ Array of pairs of labeled node ID and label ID
198
+ label_dict : numpy array, shape = [n_classes]
199
+ Array of labels
200
+ i-th element contains the label corresponding label ID `i`
201
+ """
202
+ import numpy as np
203
+
204
+ labels = []
205
+ label_to_id = {}
206
+ lid = 0
207
+ for i, n in enumerate(G.nodes(data=True)):
208
+ if label_name in n[1]:
209
+ label = n[1][label_name]
210
+ if label not in label_to_id:
211
+ label_to_id[label] = lid
212
+ lid += 1
213
+ labels.append([i, label_to_id[label]])
214
+ labels = np.array(labels)
215
+ label_dict = np.array(
216
+ [label for label, _ in sorted(label_to_id.items(), key=lambda x: x[1])]
217
+ )
218
+ return (labels, label_dict)
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/planarity.py ADDED
@@ -0,0 +1,1402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+
3
+ import networkx as nx
4
+
5
+ __all__ = ["check_planarity", "is_planar", "PlanarEmbedding"]
6
+
7
+
8
+ @nx._dispatchable
9
+ def is_planar(G):
10
+ """Returns True if and only if `G` is planar.
11
+
12
+ A graph is *planar* iff it can be drawn in a plane without
13
+ any edge intersections.
14
+
15
+ Parameters
16
+ ----------
17
+ G : NetworkX graph
18
+
19
+ Returns
20
+ -------
21
+ bool
22
+ Whether the graph is planar.
23
+
24
+ Examples
25
+ --------
26
+ >>> G = nx.Graph([(0, 1), (0, 2)])
27
+ >>> nx.is_planar(G)
28
+ True
29
+ >>> nx.is_planar(nx.complete_graph(5))
30
+ False
31
+
32
+ See Also
33
+ --------
34
+ check_planarity :
35
+ Check if graph is planar *and* return a `PlanarEmbedding` instance if True.
36
+ """
37
+
38
+ return check_planarity(G, counterexample=False)[0]
39
+
40
+
41
+ @nx._dispatchable(returns_graph=True)
42
+ def check_planarity(G, counterexample=False):
43
+ """Check if a graph is planar and return a counterexample or an embedding.
44
+
45
+ A graph is planar iff it can be drawn in a plane without
46
+ any edge intersections.
47
+
48
+ Parameters
49
+ ----------
50
+ G : NetworkX graph
51
+ counterexample : bool
52
+ A Kuratowski subgraph (to proof non planarity) is only returned if set
53
+ to true.
54
+
55
+ Returns
56
+ -------
57
+ (is_planar, certificate) : (bool, NetworkX graph) tuple
58
+ is_planar is true if the graph is planar.
59
+ If the graph is planar `certificate` is a PlanarEmbedding
60
+ otherwise it is a Kuratowski subgraph.
61
+
62
+ Examples
63
+ --------
64
+ >>> G = nx.Graph([(0, 1), (0, 2)])
65
+ >>> is_planar, P = nx.check_planarity(G)
66
+ >>> print(is_planar)
67
+ True
68
+
69
+ When `G` is planar, a `PlanarEmbedding` instance is returned:
70
+
71
+ >>> P.get_data()
72
+ {0: [1, 2], 1: [0], 2: [0]}
73
+
74
+ Notes
75
+ -----
76
+ A (combinatorial) embedding consists of cyclic orderings of the incident
77
+ edges at each vertex. Given such an embedding there are multiple approaches
78
+ discussed in literature to drawing the graph (subject to various
79
+ constraints, e.g. integer coordinates), see e.g. [2].
80
+
81
+ The planarity check algorithm and extraction of the combinatorial embedding
82
+ is based on the Left-Right Planarity Test [1].
83
+
84
+ A counterexample is only generated if the corresponding parameter is set,
85
+ because the complexity of the counterexample generation is higher.
86
+
87
+ See also
88
+ --------
89
+ is_planar :
90
+ Check for planarity without creating a `PlanarEmbedding` or counterexample.
91
+
92
+ References
93
+ ----------
94
+ .. [1] Ulrik Brandes:
95
+ The Left-Right Planarity Test
96
+ 2009
97
+ http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208
98
+ .. [2] Takao Nishizeki, Md Saidur Rahman:
99
+ Planar graph drawing
100
+ Lecture Notes Series on Computing: Volume 12
101
+ 2004
102
+ """
103
+
104
+ planarity_state = LRPlanarity(G)
105
+ embedding = planarity_state.lr_planarity()
106
+ if embedding is None:
107
+ # graph is not planar
108
+ if counterexample:
109
+ return False, get_counterexample(G)
110
+ else:
111
+ return False, None
112
+ else:
113
+ # graph is planar
114
+ return True, embedding
115
+
116
+
117
+ @nx._dispatchable(returns_graph=True)
118
+ def check_planarity_recursive(G, counterexample=False):
119
+ """Recursive version of :meth:`check_planarity`."""
120
+ planarity_state = LRPlanarity(G)
121
+ embedding = planarity_state.lr_planarity_recursive()
122
+ if embedding is None:
123
+ # graph is not planar
124
+ if counterexample:
125
+ return False, get_counterexample_recursive(G)
126
+ else:
127
+ return False, None
128
+ else:
129
+ # graph is planar
130
+ return True, embedding
131
+
132
+
133
+ @nx._dispatchable(returns_graph=True)
134
+ def get_counterexample(G):
135
+ """Obtains a Kuratowski subgraph.
136
+
137
+ Raises nx.NetworkXException if G is planar.
138
+
139
+ The function removes edges such that the graph is still not planar.
140
+ At some point the removal of any edge would make the graph planar.
141
+ This subgraph must be a Kuratowski subgraph.
142
+
143
+ Parameters
144
+ ----------
145
+ G : NetworkX graph
146
+
147
+ Returns
148
+ -------
149
+ subgraph : NetworkX graph
150
+ A Kuratowski subgraph that proves that G is not planar.
151
+
152
+ """
153
+ # copy graph
154
+ G = nx.Graph(G)
155
+
156
+ if check_planarity(G)[0]:
157
+ raise nx.NetworkXException("G is planar - no counter example.")
158
+
159
+ # find Kuratowski subgraph
160
+ subgraph = nx.Graph()
161
+ for u in G:
162
+ nbrs = list(G[u])
163
+ for v in nbrs:
164
+ G.remove_edge(u, v)
165
+ if check_planarity(G)[0]:
166
+ G.add_edge(u, v)
167
+ subgraph.add_edge(u, v)
168
+
169
+ return subgraph
170
+
171
+
172
+ @nx._dispatchable(returns_graph=True)
173
+ def get_counterexample_recursive(G):
174
+ """Recursive version of :meth:`get_counterexample`."""
175
+
176
+ # copy graph
177
+ G = nx.Graph(G)
178
+
179
+ if check_planarity_recursive(G)[0]:
180
+ raise nx.NetworkXException("G is planar - no counter example.")
181
+
182
+ # find Kuratowski subgraph
183
+ subgraph = nx.Graph()
184
+ for u in G:
185
+ nbrs = list(G[u])
186
+ for v in nbrs:
187
+ G.remove_edge(u, v)
188
+ if check_planarity_recursive(G)[0]:
189
+ G.add_edge(u, v)
190
+ subgraph.add_edge(u, v)
191
+
192
+ return subgraph
193
+
194
+
195
+ class Interval:
196
+ """Represents a set of return edges.
197
+
198
+ All return edges in an interval induce a same constraint on the contained
199
+ edges, which means that all edges must either have a left orientation or
200
+ all edges must have a right orientation.
201
+ """
202
+
203
+ def __init__(self, low=None, high=None):
204
+ self.low = low
205
+ self.high = high
206
+
207
+ def empty(self):
208
+ """Check if the interval is empty"""
209
+ return self.low is None and self.high is None
210
+
211
+ def copy(self):
212
+ """Returns a copy of this interval"""
213
+ return Interval(self.low, self.high)
214
+
215
+ def conflicting(self, b, planarity_state):
216
+ """Returns True if interval I conflicts with edge b"""
217
+ return (
218
+ not self.empty()
219
+ and planarity_state.lowpt[self.high] > planarity_state.lowpt[b]
220
+ )
221
+
222
+
223
+ class ConflictPair:
224
+ """Represents a different constraint between two intervals.
225
+
226
+ The edges in the left interval must have a different orientation than
227
+ the one in the right interval.
228
+ """
229
+
230
+ def __init__(self, left=Interval(), right=Interval()):
231
+ self.left = left
232
+ self.right = right
233
+
234
+ def swap(self):
235
+ """Swap left and right intervals"""
236
+ temp = self.left
237
+ self.left = self.right
238
+ self.right = temp
239
+
240
+ def lowest(self, planarity_state):
241
+ """Returns the lowest lowpoint of a conflict pair"""
242
+ if self.left.empty():
243
+ return planarity_state.lowpt[self.right.low]
244
+ if self.right.empty():
245
+ return planarity_state.lowpt[self.left.low]
246
+ return min(
247
+ planarity_state.lowpt[self.left.low], planarity_state.lowpt[self.right.low]
248
+ )
249
+
250
+
251
+ def top_of_stack(l):
252
+ """Returns the element on top of the stack."""
253
+ if not l:
254
+ return None
255
+ return l[-1]
256
+
257
+
258
+ class LRPlanarity:
259
+ """A class to maintain the state during planarity check."""
260
+
261
+ __slots__ = [
262
+ "G",
263
+ "roots",
264
+ "height",
265
+ "lowpt",
266
+ "lowpt2",
267
+ "nesting_depth",
268
+ "parent_edge",
269
+ "DG",
270
+ "adjs",
271
+ "ordered_adjs",
272
+ "ref",
273
+ "side",
274
+ "S",
275
+ "stack_bottom",
276
+ "lowpt_edge",
277
+ "left_ref",
278
+ "right_ref",
279
+ "embedding",
280
+ ]
281
+
282
+ def __init__(self, G):
283
+ # copy G without adding self-loops
284
+ self.G = nx.Graph()
285
+ self.G.add_nodes_from(G.nodes)
286
+ for e in G.edges:
287
+ if e[0] != e[1]:
288
+ self.G.add_edge(e[0], e[1])
289
+
290
+ self.roots = []
291
+
292
+ # distance from tree root
293
+ self.height = defaultdict(lambda: None)
294
+
295
+ self.lowpt = {} # height of lowest return point of an edge
296
+ self.lowpt2 = {} # height of second lowest return point
297
+ self.nesting_depth = {} # for nesting order
298
+
299
+ # None -> missing edge
300
+ self.parent_edge = defaultdict(lambda: None)
301
+
302
+ # oriented DFS graph
303
+ self.DG = nx.DiGraph()
304
+ self.DG.add_nodes_from(G.nodes)
305
+
306
+ self.adjs = {}
307
+ self.ordered_adjs = {}
308
+
309
+ self.ref = defaultdict(lambda: None)
310
+ self.side = defaultdict(lambda: 1)
311
+
312
+ # stack of conflict pairs
313
+ self.S = []
314
+ self.stack_bottom = {}
315
+ self.lowpt_edge = {}
316
+
317
+ self.left_ref = {}
318
+ self.right_ref = {}
319
+
320
+ self.embedding = PlanarEmbedding()
321
+
322
+ def lr_planarity(self):
323
+ """Execute the LR planarity test.
324
+
325
+ Returns
326
+ -------
327
+ embedding : dict
328
+ If the graph is planar an embedding is returned. Otherwise None.
329
+ """
330
+ if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6:
331
+ # graph is not planar
332
+ return None
333
+
334
+ # make adjacency lists for dfs
335
+ for v in self.G:
336
+ self.adjs[v] = list(self.G[v])
337
+
338
+ # orientation of the graph by depth first search traversal
339
+ for v in self.G:
340
+ if self.height[v] is None:
341
+ self.height[v] = 0
342
+ self.roots.append(v)
343
+ self.dfs_orientation(v)
344
+
345
+ # Free no longer used variables
346
+ self.G = None
347
+ self.lowpt2 = None
348
+ self.adjs = None
349
+
350
+ # testing
351
+ for v in self.DG: # sort the adjacency lists by nesting depth
352
+ # note: this sorting leads to non linear time
353
+ self.ordered_adjs[v] = sorted(
354
+ self.DG[v], key=lambda x: self.nesting_depth[(v, x)]
355
+ )
356
+ for v in self.roots:
357
+ if not self.dfs_testing(v):
358
+ return None
359
+
360
+ # Free no longer used variables
361
+ self.height = None
362
+ self.lowpt = None
363
+ self.S = None
364
+ self.stack_bottom = None
365
+ self.lowpt_edge = None
366
+
367
+ for e in self.DG.edges:
368
+ self.nesting_depth[e] = self.sign(e) * self.nesting_depth[e]
369
+
370
+ self.embedding.add_nodes_from(self.DG.nodes)
371
+ for v in self.DG:
372
+ # sort the adjacency lists again
373
+ self.ordered_adjs[v] = sorted(
374
+ self.DG[v], key=lambda x: self.nesting_depth[(v, x)]
375
+ )
376
+ # initialize the embedding
377
+ previous_node = None
378
+ for w in self.ordered_adjs[v]:
379
+ self.embedding.add_half_edge(v, w, ccw=previous_node)
380
+ previous_node = w
381
+
382
+ # Free no longer used variables
383
+ self.DG = None
384
+ self.nesting_depth = None
385
+ self.ref = None
386
+
387
+ # compute the complete embedding
388
+ for v in self.roots:
389
+ self.dfs_embedding(v)
390
+
391
+ # Free no longer used variables
392
+ self.roots = None
393
+ self.parent_edge = None
394
+ self.ordered_adjs = None
395
+ self.left_ref = None
396
+ self.right_ref = None
397
+ self.side = None
398
+
399
+ return self.embedding
400
+
401
+ def lr_planarity_recursive(self):
402
+ """Recursive version of :meth:`lr_planarity`."""
403
+ if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6:
404
+ # graph is not planar
405
+ return None
406
+
407
+ # orientation of the graph by depth first search traversal
408
+ for v in self.G:
409
+ if self.height[v] is None:
410
+ self.height[v] = 0
411
+ self.roots.append(v)
412
+ self.dfs_orientation_recursive(v)
413
+
414
+ # Free no longer used variable
415
+ self.G = None
416
+
417
+ # testing
418
+ for v in self.DG: # sort the adjacency lists by nesting depth
419
+ # note: this sorting leads to non linear time
420
+ self.ordered_adjs[v] = sorted(
421
+ self.DG[v], key=lambda x: self.nesting_depth[(v, x)]
422
+ )
423
+ for v in self.roots:
424
+ if not self.dfs_testing_recursive(v):
425
+ return None
426
+
427
+ for e in self.DG.edges:
428
+ self.nesting_depth[e] = self.sign_recursive(e) * self.nesting_depth[e]
429
+
430
+ self.embedding.add_nodes_from(self.DG.nodes)
431
+ for v in self.DG:
432
+ # sort the adjacency lists again
433
+ self.ordered_adjs[v] = sorted(
434
+ self.DG[v], key=lambda x: self.nesting_depth[(v, x)]
435
+ )
436
+ # initialize the embedding
437
+ previous_node = None
438
+ for w in self.ordered_adjs[v]:
439
+ self.embedding.add_half_edge(v, w, ccw=previous_node)
440
+ previous_node = w
441
+
442
+ # compute the complete embedding
443
+ for v in self.roots:
444
+ self.dfs_embedding_recursive(v)
445
+
446
+ return self.embedding
447
+
448
+ def dfs_orientation(self, v):
449
+ """Orient the graph by DFS, compute lowpoints and nesting order."""
450
+ # the recursion stack
451
+ dfs_stack = [v]
452
+ # index of next edge to handle in adjacency list of each node
453
+ ind = defaultdict(lambda: 0)
454
+ # boolean to indicate whether to skip the initial work for an edge
455
+ skip_init = defaultdict(lambda: False)
456
+
457
+ while dfs_stack:
458
+ v = dfs_stack.pop()
459
+ e = self.parent_edge[v]
460
+
461
+ for w in self.adjs[v][ind[v] :]:
462
+ vw = (v, w)
463
+
464
+ if not skip_init[vw]:
465
+ if (v, w) in self.DG.edges or (w, v) in self.DG.edges:
466
+ ind[v] += 1
467
+ continue # the edge was already oriented
468
+
469
+ self.DG.add_edge(v, w) # orient the edge
470
+
471
+ self.lowpt[vw] = self.height[v]
472
+ self.lowpt2[vw] = self.height[v]
473
+ if self.height[w] is None: # (v, w) is a tree edge
474
+ self.parent_edge[w] = vw
475
+ self.height[w] = self.height[v] + 1
476
+
477
+ dfs_stack.append(v) # revisit v after finishing w
478
+ dfs_stack.append(w) # visit w next
479
+ skip_init[vw] = True # don't redo this block
480
+ break # handle next node in dfs_stack (i.e. w)
481
+ else: # (v, w) is a back edge
482
+ self.lowpt[vw] = self.height[w]
483
+
484
+ # determine nesting graph
485
+ self.nesting_depth[vw] = 2 * self.lowpt[vw]
486
+ if self.lowpt2[vw] < self.height[v]: # chordal
487
+ self.nesting_depth[vw] += 1
488
+
489
+ # update lowpoints of parent edge e
490
+ if e is not None:
491
+ if self.lowpt[vw] < self.lowpt[e]:
492
+ self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw])
493
+ self.lowpt[e] = self.lowpt[vw]
494
+ elif self.lowpt[vw] > self.lowpt[e]:
495
+ self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw])
496
+ else:
497
+ self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw])
498
+
499
+ ind[v] += 1
500
+
501
+ def dfs_orientation_recursive(self, v):
502
+ """Recursive version of :meth:`dfs_orientation`."""
503
+ e = self.parent_edge[v]
504
+ for w in self.G[v]:
505
+ if (v, w) in self.DG.edges or (w, v) in self.DG.edges:
506
+ continue # the edge was already oriented
507
+ vw = (v, w)
508
+ self.DG.add_edge(v, w) # orient the edge
509
+
510
+ self.lowpt[vw] = self.height[v]
511
+ self.lowpt2[vw] = self.height[v]
512
+ if self.height[w] is None: # (v, w) is a tree edge
513
+ self.parent_edge[w] = vw
514
+ self.height[w] = self.height[v] + 1
515
+ self.dfs_orientation_recursive(w)
516
+ else: # (v, w) is a back edge
517
+ self.lowpt[vw] = self.height[w]
518
+
519
+ # determine nesting graph
520
+ self.nesting_depth[vw] = 2 * self.lowpt[vw]
521
+ if self.lowpt2[vw] < self.height[v]: # chordal
522
+ self.nesting_depth[vw] += 1
523
+
524
+ # update lowpoints of parent edge e
525
+ if e is not None:
526
+ if self.lowpt[vw] < self.lowpt[e]:
527
+ self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw])
528
+ self.lowpt[e] = self.lowpt[vw]
529
+ elif self.lowpt[vw] > self.lowpt[e]:
530
+ self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw])
531
+ else:
532
+ self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw])
533
+
534
+ def dfs_testing(self, v):
535
+ """Test for LR partition."""
536
+ # the recursion stack
537
+ dfs_stack = [v]
538
+ # index of next edge to handle in adjacency list of each node
539
+ ind = defaultdict(lambda: 0)
540
+ # boolean to indicate whether to skip the initial work for an edge
541
+ skip_init = defaultdict(lambda: False)
542
+
543
+ while dfs_stack:
544
+ v = dfs_stack.pop()
545
+ e = self.parent_edge[v]
546
+ # to indicate whether to skip the final block after the for loop
547
+ skip_final = False
548
+
549
+ for w in self.ordered_adjs[v][ind[v] :]:
550
+ ei = (v, w)
551
+
552
+ if not skip_init[ei]:
553
+ self.stack_bottom[ei] = top_of_stack(self.S)
554
+
555
+ if ei == self.parent_edge[w]: # tree edge
556
+ dfs_stack.append(v) # revisit v after finishing w
557
+ dfs_stack.append(w) # visit w next
558
+ skip_init[ei] = True # don't redo this block
559
+ skip_final = True # skip final work after breaking
560
+ break # handle next node in dfs_stack (i.e. w)
561
+ else: # back edge
562
+ self.lowpt_edge[ei] = ei
563
+ self.S.append(ConflictPair(right=Interval(ei, ei)))
564
+
565
+ # integrate new return edges
566
+ if self.lowpt[ei] < self.height[v]:
567
+ if w == self.ordered_adjs[v][0]: # e_i has return edge
568
+ self.lowpt_edge[e] = self.lowpt_edge[ei]
569
+ else: # add constraints of e_i
570
+ if not self.add_constraints(ei, e):
571
+ # graph is not planar
572
+ return False
573
+
574
+ ind[v] += 1
575
+
576
+ if not skip_final:
577
+ # remove back edges returning to parent
578
+ if e is not None: # v isn't root
579
+ self.remove_back_edges(e)
580
+
581
+ return True
582
+
583
+ def dfs_testing_recursive(self, v):
584
+ """Recursive version of :meth:`dfs_testing`."""
585
+ e = self.parent_edge[v]
586
+ for w in self.ordered_adjs[v]:
587
+ ei = (v, w)
588
+ self.stack_bottom[ei] = top_of_stack(self.S)
589
+ if ei == self.parent_edge[w]: # tree edge
590
+ if not self.dfs_testing_recursive(w):
591
+ return False
592
+ else: # back edge
593
+ self.lowpt_edge[ei] = ei
594
+ self.S.append(ConflictPair(right=Interval(ei, ei)))
595
+
596
+ # integrate new return edges
597
+ if self.lowpt[ei] < self.height[v]:
598
+ if w == self.ordered_adjs[v][0]: # e_i has return edge
599
+ self.lowpt_edge[e] = self.lowpt_edge[ei]
600
+ else: # add constraints of e_i
601
+ if not self.add_constraints(ei, e):
602
+ # graph is not planar
603
+ return False
604
+
605
+ # remove back edges returning to parent
606
+ if e is not None: # v isn't root
607
+ self.remove_back_edges(e)
608
+ return True
609
+
610
+ def add_constraints(self, ei, e):
611
+ P = ConflictPair()
612
+ # merge return edges of e_i into P.right
613
+ while True:
614
+ Q = self.S.pop()
615
+ if not Q.left.empty():
616
+ Q.swap()
617
+ if not Q.left.empty(): # not planar
618
+ return False
619
+ if self.lowpt[Q.right.low] > self.lowpt[e]:
620
+ # merge intervals
621
+ if P.right.empty(): # topmost interval
622
+ P.right = Q.right.copy()
623
+ else:
624
+ self.ref[P.right.low] = Q.right.high
625
+ P.right.low = Q.right.low
626
+ else: # align
627
+ self.ref[Q.right.low] = self.lowpt_edge[e]
628
+ if top_of_stack(self.S) == self.stack_bottom[ei]:
629
+ break
630
+ # merge conflicting return edges of e_1,...,e_i-1 into P.L
631
+ while top_of_stack(self.S).left.conflicting(ei, self) or top_of_stack(
632
+ self.S
633
+ ).right.conflicting(ei, self):
634
+ Q = self.S.pop()
635
+ if Q.right.conflicting(ei, self):
636
+ Q.swap()
637
+ if Q.right.conflicting(ei, self): # not planar
638
+ return False
639
+ # merge interval below lowpt(e_i) into P.R
640
+ self.ref[P.right.low] = Q.right.high
641
+ if Q.right.low is not None:
642
+ P.right.low = Q.right.low
643
+
644
+ if P.left.empty(): # topmost interval
645
+ P.left = Q.left.copy()
646
+ else:
647
+ self.ref[P.left.low] = Q.left.high
648
+ P.left.low = Q.left.low
649
+
650
+ if not (P.left.empty() and P.right.empty()):
651
+ self.S.append(P)
652
+ return True
653
+
654
+ def remove_back_edges(self, e):
655
+ u = e[0]
656
+ # trim back edges ending at parent u
657
+ # drop entire conflict pairs
658
+ while self.S and top_of_stack(self.S).lowest(self) == self.height[u]:
659
+ P = self.S.pop()
660
+ if P.left.low is not None:
661
+ self.side[P.left.low] = -1
662
+
663
+ if self.S: # one more conflict pair to consider
664
+ P = self.S.pop()
665
+ # trim left interval
666
+ while P.left.high is not None and P.left.high[1] == u:
667
+ P.left.high = self.ref[P.left.high]
668
+ if P.left.high is None and P.left.low is not None:
669
+ # just emptied
670
+ self.ref[P.left.low] = P.right.low
671
+ self.side[P.left.low] = -1
672
+ P.left.low = None
673
+ # trim right interval
674
+ while P.right.high is not None and P.right.high[1] == u:
675
+ P.right.high = self.ref[P.right.high]
676
+ if P.right.high is None and P.right.low is not None:
677
+ # just emptied
678
+ self.ref[P.right.low] = P.left.low
679
+ self.side[P.right.low] = -1
680
+ P.right.low = None
681
+ self.S.append(P)
682
+
683
+ # side of e is side of a highest return edge
684
+ if self.lowpt[e] < self.height[u]: # e has return edge
685
+ hl = top_of_stack(self.S).left.high
686
+ hr = top_of_stack(self.S).right.high
687
+
688
+ if hl is not None and (hr is None or self.lowpt[hl] > self.lowpt[hr]):
689
+ self.ref[e] = hl
690
+ else:
691
+ self.ref[e] = hr
692
+
693
+ def dfs_embedding(self, v):
694
+ """Completes the embedding."""
695
+ # the recursion stack
696
+ dfs_stack = [v]
697
+ # index of next edge to handle in adjacency list of each node
698
+ ind = defaultdict(lambda: 0)
699
+
700
+ while dfs_stack:
701
+ v = dfs_stack.pop()
702
+
703
+ for w in self.ordered_adjs[v][ind[v] :]:
704
+ ind[v] += 1
705
+ ei = (v, w)
706
+
707
+ if ei == self.parent_edge[w]: # tree edge
708
+ self.embedding.add_half_edge_first(w, v)
709
+ self.left_ref[v] = w
710
+ self.right_ref[v] = w
711
+
712
+ dfs_stack.append(v) # revisit v after finishing w
713
+ dfs_stack.append(w) # visit w next
714
+ break # handle next node in dfs_stack (i.e. w)
715
+ else: # back edge
716
+ if self.side[ei] == 1:
717
+ self.embedding.add_half_edge(w, v, ccw=self.right_ref[w])
718
+ else:
719
+ self.embedding.add_half_edge(w, v, cw=self.left_ref[w])
720
+ self.left_ref[w] = v
721
+
722
+ def dfs_embedding_recursive(self, v):
723
+ """Recursive version of :meth:`dfs_embedding`."""
724
+ for w in self.ordered_adjs[v]:
725
+ ei = (v, w)
726
+ if ei == self.parent_edge[w]: # tree edge
727
+ self.embedding.add_half_edge_first(w, v)
728
+ self.left_ref[v] = w
729
+ self.right_ref[v] = w
730
+ self.dfs_embedding_recursive(w)
731
+ else: # back edge
732
+ if self.side[ei] == 1:
733
+ # place v directly after right_ref[w] in embed. list of w
734
+ self.embedding.add_half_edge(w, v, ccw=self.right_ref[w])
735
+ else:
736
+ # place v directly before left_ref[w] in embed. list of w
737
+ self.embedding.add_half_edge(w, v, cw=self.left_ref[w])
738
+ self.left_ref[w] = v
739
+
740
+ def sign(self, e):
741
+ """Resolve the relative side of an edge to the absolute side."""
742
+ # the recursion stack
743
+ dfs_stack = [e]
744
+ # dict to remember reference edges
745
+ old_ref = defaultdict(lambda: None)
746
+
747
+ while dfs_stack:
748
+ e = dfs_stack.pop()
749
+
750
+ if self.ref[e] is not None:
751
+ dfs_stack.append(e) # revisit e after finishing self.ref[e]
752
+ dfs_stack.append(self.ref[e]) # visit self.ref[e] next
753
+ old_ref[e] = self.ref[e] # remember value of self.ref[e]
754
+ self.ref[e] = None
755
+ else:
756
+ self.side[e] *= self.side[old_ref[e]]
757
+
758
+ return self.side[e]
759
+
760
+ def sign_recursive(self, e):
761
+ """Recursive version of :meth:`sign`."""
762
+ if self.ref[e] is not None:
763
+ self.side[e] = self.side[e] * self.sign_recursive(self.ref[e])
764
+ self.ref[e] = None
765
+ return self.side[e]
766
+
767
+
768
+ class PlanarEmbedding(nx.DiGraph):
769
+ """Represents a planar graph with its planar embedding.
770
+
771
+ The planar embedding is given by a `combinatorial embedding
772
+ <https://en.wikipedia.org/wiki/Graph_embedding#Combinatorial_embedding>`_.
773
+
774
+ .. note:: `check_planarity` is the preferred way to check if a graph is planar.
775
+
776
+ **Neighbor ordering:**
777
+
778
+ In comparison to a usual graph structure, the embedding also stores the
779
+ order of all neighbors for every vertex.
780
+ The order of the neighbors can be given in clockwise (cw) direction or
781
+ counterclockwise (ccw) direction. This order is stored as edge attributes
782
+ in the underlying directed graph. For the edge (u, v) the edge attribute
783
+ 'cw' is set to the neighbor of u that follows immediately after v in
784
+ clockwise direction.
785
+
786
+ In order for a PlanarEmbedding to be valid it must fulfill multiple
787
+ conditions. It is possible to check if these conditions are fulfilled with
788
+ the method :meth:`check_structure`.
789
+ The conditions are:
790
+
791
+ * Edges must go in both directions (because the edge attributes differ)
792
+ * Every edge must have a 'cw' and 'ccw' attribute which corresponds to a
793
+ correct planar embedding.
794
+
795
+ As long as a PlanarEmbedding is invalid only the following methods should
796
+ be called:
797
+
798
+ * :meth:`add_half_edge`
799
+ * :meth:`connect_components`
800
+
801
+ Even though the graph is a subclass of nx.DiGraph, it can still be used
802
+ for algorithms that require undirected graphs, because the method
803
+ :meth:`is_directed` is overridden. This is possible, because a valid
804
+ PlanarGraph must have edges in both directions.
805
+
806
+ **Half edges:**
807
+
808
+ In methods like `add_half_edge` the term "half-edge" is used, which is
809
+ a term that is used in `doubly connected edge lists
810
+ <https://en.wikipedia.org/wiki/Doubly_connected_edge_list>`_. It is used
811
+ to emphasize that the edge is only in one direction and there exists
812
+ another half-edge in the opposite direction.
813
+ While conventional edges always have two faces (including outer face) next
814
+ to them, it is possible to assign each half-edge *exactly one* face.
815
+ For a half-edge (u, v) that is oriented such that u is below v then the
816
+ face that belongs to (u, v) is to the right of this half-edge.
817
+
818
+ See Also
819
+ --------
820
+ is_planar :
821
+ Preferred way to check if an existing graph is planar.
822
+
823
+ check_planarity :
824
+ A convenient way to create a `PlanarEmbedding`. If not planar,
825
+ it returns a subgraph that shows this.
826
+
827
+ Examples
828
+ --------
829
+
830
+ Create an embedding of a star graph (compare `nx.star_graph(3)`):
831
+
832
+ >>> G = nx.PlanarEmbedding()
833
+ >>> G.add_half_edge(0, 1)
834
+ >>> G.add_half_edge(0, 2, ccw=1)
835
+ >>> G.add_half_edge(0, 3, ccw=2)
836
+ >>> G.add_half_edge(1, 0)
837
+ >>> G.add_half_edge(2, 0)
838
+ >>> G.add_half_edge(3, 0)
839
+
840
+ Alternatively the same embedding can also be defined in counterclockwise
841
+ orientation. The following results in exactly the same PlanarEmbedding:
842
+
843
+ >>> G = nx.PlanarEmbedding()
844
+ >>> G.add_half_edge(0, 1)
845
+ >>> G.add_half_edge(0, 3, cw=1)
846
+ >>> G.add_half_edge(0, 2, cw=3)
847
+ >>> G.add_half_edge(1, 0)
848
+ >>> G.add_half_edge(2, 0)
849
+ >>> G.add_half_edge(3, 0)
850
+
851
+ After creating a graph, it is possible to validate that the PlanarEmbedding
852
+ object is correct:
853
+
854
+ >>> G.check_structure()
855
+
856
+ """
857
+
858
+ def __init__(self, incoming_graph_data=None, **attr):
859
+ super().__init__(incoming_graph_data=incoming_graph_data, **attr)
860
+ self.add_edge = self.__forbidden
861
+ self.add_edges_from = self.__forbidden
862
+ self.add_weighted_edges_from = self.__forbidden
863
+
864
+ def __forbidden(self, *args, **kwargs):
865
+ """Forbidden operation
866
+
867
+ Any edge additions to a PlanarEmbedding should be done using
868
+ method `add_half_edge`.
869
+ """
870
+ raise NotImplementedError(
871
+ "Use `add_half_edge` method to add edges to a PlanarEmbedding."
872
+ )
873
+
874
+ def get_data(self):
875
+ """Converts the adjacency structure into a better readable structure.
876
+
877
+ Returns
878
+ -------
879
+ embedding : dict
880
+ A dict mapping all nodes to a list of neighbors sorted in
881
+ clockwise order.
882
+
883
+ See Also
884
+ --------
885
+ set_data
886
+
887
+ """
888
+ embedding = {}
889
+ for v in self:
890
+ embedding[v] = list(self.neighbors_cw_order(v))
891
+ return embedding
892
+
893
+ def set_data(self, data):
894
+ """Inserts edges according to given sorted neighbor list.
895
+
896
+ The input format is the same as the output format of get_data().
897
+
898
+ Parameters
899
+ ----------
900
+ data : dict
901
+ A dict mapping all nodes to a list of neighbors sorted in
902
+ clockwise order.
903
+
904
+ See Also
905
+ --------
906
+ get_data
907
+
908
+ """
909
+ for v in data:
910
+ ref = None
911
+ for w in reversed(data[v]):
912
+ self.add_half_edge(v, w, cw=ref)
913
+ ref = w
914
+
915
+ def remove_node(self, n):
916
+ """Remove node n.
917
+
918
+ Removes the node n and all adjacent edges, updating the
919
+ PlanarEmbedding to account for any resulting edge removal.
920
+ Attempting to remove a non-existent node will raise an exception.
921
+
922
+ Parameters
923
+ ----------
924
+ n : node
925
+ A node in the graph
926
+
927
+ Raises
928
+ ------
929
+ NetworkXError
930
+ If n is not in the graph.
931
+
932
+ See Also
933
+ --------
934
+ remove_nodes_from
935
+
936
+ """
937
+ try:
938
+ for u in self._pred[n]:
939
+ succs_u = self._succ[u]
940
+ un_cw = succs_u[n]["cw"]
941
+ un_ccw = succs_u[n]["ccw"]
942
+ del succs_u[n]
943
+ del self._pred[u][n]
944
+ if n != un_cw:
945
+ succs_u[un_cw]["ccw"] = un_ccw
946
+ succs_u[un_ccw]["cw"] = un_cw
947
+ del self._node[n]
948
+ del self._succ[n]
949
+ del self._pred[n]
950
+ except KeyError as err: # NetworkXError if n not in self
951
+ raise nx.NetworkXError(
952
+ f"The node {n} is not in the planar embedding."
953
+ ) from err
954
+ nx._clear_cache(self)
955
+
956
+ def remove_nodes_from(self, nodes):
957
+ """Remove multiple nodes.
958
+
959
+ Parameters
960
+ ----------
961
+ nodes : iterable container
962
+ A container of nodes (list, dict, set, etc.). If a node
963
+ in the container is not in the graph it is silently ignored.
964
+
965
+ See Also
966
+ --------
967
+ remove_node
968
+
969
+ Notes
970
+ -----
971
+ When removing nodes from an iterator over the graph you are changing,
972
+ a `RuntimeError` will be raised with message:
973
+ `RuntimeError: dictionary changed size during iteration`. This
974
+ happens when the graph's underlying dictionary is modified during
975
+ iteration. To avoid this error, evaluate the iterator into a separate
976
+ object, e.g. by using `list(iterator_of_nodes)`, and pass this
977
+ object to `G.remove_nodes_from`.
978
+
979
+ """
980
+ for n in nodes:
981
+ if n in self._node:
982
+ self.remove_node(n)
983
+ # silently skip non-existing nodes
984
+
985
+ def neighbors_cw_order(self, v):
986
+ """Generator for the neighbors of v in clockwise order.
987
+
988
+ Parameters
989
+ ----------
990
+ v : node
991
+
992
+ Yields
993
+ ------
994
+ node
995
+
996
+ """
997
+ succs = self._succ[v]
998
+ if not succs:
999
+ # v has no neighbors
1000
+ return
1001
+ start_node = next(reversed(succs))
1002
+ yield start_node
1003
+ current_node = succs[start_node]["cw"]
1004
+ while start_node != current_node:
1005
+ yield current_node
1006
+ current_node = succs[current_node]["cw"]
1007
+
1008
+ def add_half_edge(self, start_node, end_node, *, cw=None, ccw=None):
1009
+ """Adds a half-edge from `start_node` to `end_node`.
1010
+
1011
+ If the half-edge is not the first one out of `start_node`, a reference
1012
+ node must be provided either in the clockwise (parameter `cw`) or in
1013
+ the counterclockwise (parameter `ccw`) direction. Only one of `cw`/`ccw`
1014
+ can be specified (or neither in the case of the first edge).
1015
+ Note that specifying a reference in the clockwise (`cw`) direction means
1016
+ inserting the new edge in the first counterclockwise position with
1017
+ respect to the reference (and vice-versa).
1018
+
1019
+ Parameters
1020
+ ----------
1021
+ start_node : node
1022
+ Start node of inserted edge.
1023
+ end_node : node
1024
+ End node of inserted edge.
1025
+ cw, ccw: node
1026
+ End node of reference edge.
1027
+ Omit or pass `None` if adding the first out-half-edge of `start_node`.
1028
+
1029
+
1030
+ Raises
1031
+ ------
1032
+ NetworkXException
1033
+ If the `cw` or `ccw` node is not a successor of `start_node`.
1034
+ If `start_node` has successors, but neither `cw` or `ccw` is provided.
1035
+ If both `cw` and `ccw` are specified.
1036
+
1037
+ See Also
1038
+ --------
1039
+ connect_components
1040
+ """
1041
+
1042
+ succs = self._succ.get(start_node)
1043
+ if succs:
1044
+ # there is already some edge out of start_node
1045
+ leftmost_nbr = next(reversed(self._succ[start_node]))
1046
+ if cw is not None:
1047
+ if cw not in succs:
1048
+ raise nx.NetworkXError("Invalid clockwise reference node.")
1049
+ if ccw is not None:
1050
+ raise nx.NetworkXError("Only one of cw/ccw can be specified.")
1051
+ ref_ccw = succs[cw]["ccw"]
1052
+ super().add_edge(start_node, end_node, cw=cw, ccw=ref_ccw)
1053
+ succs[ref_ccw]["cw"] = end_node
1054
+ succs[cw]["ccw"] = end_node
1055
+ # when (cw == leftmost_nbr), the newly added neighbor is
1056
+ # already at the end of dict self._succ[start_node] and
1057
+ # takes the place of the former leftmost_nbr
1058
+ move_leftmost_nbr_to_end = cw != leftmost_nbr
1059
+ elif ccw is not None:
1060
+ if ccw not in succs:
1061
+ raise nx.NetworkXError("Invalid counterclockwise reference node.")
1062
+ ref_cw = succs[ccw]["cw"]
1063
+ super().add_edge(start_node, end_node, cw=ref_cw, ccw=ccw)
1064
+ succs[ref_cw]["ccw"] = end_node
1065
+ succs[ccw]["cw"] = end_node
1066
+ move_leftmost_nbr_to_end = True
1067
+ else:
1068
+ raise nx.NetworkXError(
1069
+ "Node already has out-half-edge(s), either cw or ccw reference node required."
1070
+ )
1071
+ if move_leftmost_nbr_to_end:
1072
+ # LRPlanarity (via self.add_half_edge_first()) requires that
1073
+ # we keep track of the leftmost neighbor, which we accomplish
1074
+ # by keeping it as the last key in dict self._succ[start_node]
1075
+ succs[leftmost_nbr] = succs.pop(leftmost_nbr)
1076
+
1077
+ else:
1078
+ if cw is not None or ccw is not None:
1079
+ raise nx.NetworkXError("Invalid reference node.")
1080
+ # adding the first edge out of start_node
1081
+ super().add_edge(start_node, end_node, ccw=end_node, cw=end_node)
1082
+
1083
+ def check_structure(self):
1084
+ """Runs without exceptions if this object is valid.
1085
+
1086
+ Checks that the following properties are fulfilled:
1087
+
1088
+ * Edges go in both directions (because the edge attributes differ).
1089
+ * Every edge has a 'cw' and 'ccw' attribute which corresponds to a
1090
+ correct planar embedding.
1091
+
1092
+ Running this method verifies that the underlying Graph must be planar.
1093
+
1094
+ Raises
1095
+ ------
1096
+ NetworkXException
1097
+ This exception is raised with a short explanation if the
1098
+ PlanarEmbedding is invalid.
1099
+ """
1100
+ # Check fundamental structure
1101
+ for v in self:
1102
+ try:
1103
+ sorted_nbrs = set(self.neighbors_cw_order(v))
1104
+ except KeyError as err:
1105
+ msg = f"Bad embedding. Missing orientation for a neighbor of {v}"
1106
+ raise nx.NetworkXException(msg) from err
1107
+
1108
+ unsorted_nbrs = set(self[v])
1109
+ if sorted_nbrs != unsorted_nbrs:
1110
+ msg = "Bad embedding. Edge orientations not set correctly."
1111
+ raise nx.NetworkXException(msg)
1112
+ for w in self[v]:
1113
+ # Check if opposite half-edge exists
1114
+ if not self.has_edge(w, v):
1115
+ msg = "Bad embedding. Opposite half-edge is missing."
1116
+ raise nx.NetworkXException(msg)
1117
+
1118
+ # Check planarity
1119
+ counted_half_edges = set()
1120
+ for component in nx.connected_components(self):
1121
+ if len(component) == 1:
1122
+ # Don't need to check single node component
1123
+ continue
1124
+ num_nodes = len(component)
1125
+ num_half_edges = 0
1126
+ num_faces = 0
1127
+ for v in component:
1128
+ for w in self.neighbors_cw_order(v):
1129
+ num_half_edges += 1
1130
+ if (v, w) not in counted_half_edges:
1131
+ # We encountered a new face
1132
+ num_faces += 1
1133
+ # Mark all half-edges belonging to this face
1134
+ self.traverse_face(v, w, counted_half_edges)
1135
+ num_edges = num_half_edges // 2 # num_half_edges is even
1136
+ if num_nodes - num_edges + num_faces != 2:
1137
+ # The result does not match Euler's formula
1138
+ msg = "Bad embedding. The graph does not match Euler's formula"
1139
+ raise nx.NetworkXException(msg)
1140
+
1141
+ def add_half_edge_ccw(self, start_node, end_node, reference_neighbor):
1142
+ """Adds a half-edge from start_node to end_node.
1143
+
1144
+ The half-edge is added counter clockwise next to the existing half-edge
1145
+ (start_node, reference_neighbor).
1146
+
1147
+ Parameters
1148
+ ----------
1149
+ start_node : node
1150
+ Start node of inserted edge.
1151
+ end_node : node
1152
+ End node of inserted edge.
1153
+ reference_neighbor: node
1154
+ End node of reference edge.
1155
+
1156
+ Raises
1157
+ ------
1158
+ NetworkXException
1159
+ If the reference_neighbor does not exist.
1160
+
1161
+ See Also
1162
+ --------
1163
+ add_half_edge
1164
+ add_half_edge_cw
1165
+ connect_components
1166
+
1167
+ """
1168
+ self.add_half_edge(start_node, end_node, cw=reference_neighbor)
1169
+
1170
+ def add_half_edge_cw(self, start_node, end_node, reference_neighbor):
1171
+ """Adds a half-edge from start_node to end_node.
1172
+
1173
+ The half-edge is added clockwise next to the existing half-edge
1174
+ (start_node, reference_neighbor).
1175
+
1176
+ Parameters
1177
+ ----------
1178
+ start_node : node
1179
+ Start node of inserted edge.
1180
+ end_node : node
1181
+ End node of inserted edge.
1182
+ reference_neighbor: node
1183
+ End node of reference edge.
1184
+
1185
+ Raises
1186
+ ------
1187
+ NetworkXException
1188
+ If the reference_neighbor does not exist.
1189
+
1190
+ See Also
1191
+ --------
1192
+ add_half_edge
1193
+ add_half_edge_ccw
1194
+ connect_components
1195
+ """
1196
+ self.add_half_edge(start_node, end_node, ccw=reference_neighbor)
1197
+
1198
+ def remove_edge(self, u, v):
1199
+ """Remove the edge between u and v.
1200
+
1201
+ Parameters
1202
+ ----------
1203
+ u, v : nodes
1204
+ Remove the half-edges (u, v) and (v, u) and update the
1205
+ edge ordering around the removed edge.
1206
+
1207
+ Raises
1208
+ ------
1209
+ NetworkXError
1210
+ If there is not an edge between u and v.
1211
+
1212
+ See Also
1213
+ --------
1214
+ remove_edges_from : remove a collection of edges
1215
+ """
1216
+ try:
1217
+ succs_u = self._succ[u]
1218
+ succs_v = self._succ[v]
1219
+ uv_cw = succs_u[v]["cw"]
1220
+ uv_ccw = succs_u[v]["ccw"]
1221
+ vu_cw = succs_v[u]["cw"]
1222
+ vu_ccw = succs_v[u]["ccw"]
1223
+ del succs_u[v]
1224
+ del self._pred[v][u]
1225
+ del succs_v[u]
1226
+ del self._pred[u][v]
1227
+ if v != uv_cw:
1228
+ succs_u[uv_cw]["ccw"] = uv_ccw
1229
+ succs_u[uv_ccw]["cw"] = uv_cw
1230
+ if u != vu_cw:
1231
+ succs_v[vu_cw]["ccw"] = vu_ccw
1232
+ succs_v[vu_ccw]["cw"] = vu_cw
1233
+ except KeyError as err:
1234
+ raise nx.NetworkXError(
1235
+ f"The edge {u}-{v} is not in the planar embedding."
1236
+ ) from err
1237
+ nx._clear_cache(self)
1238
+
1239
+ def remove_edges_from(self, ebunch):
1240
+ """Remove all edges specified in ebunch.
1241
+
1242
+ Parameters
1243
+ ----------
1244
+ ebunch: list or container of edge tuples
1245
+ Each pair of half-edges between the nodes given in the tuples
1246
+ will be removed from the graph. The nodes can be passed as:
1247
+
1248
+ - 2-tuples (u, v) half-edges (u, v) and (v, u).
1249
+ - 3-tuples (u, v, k) where k is ignored.
1250
+
1251
+ See Also
1252
+ --------
1253
+ remove_edge : remove a single edge
1254
+
1255
+ Notes
1256
+ -----
1257
+ Will fail silently if an edge in ebunch is not in the graph.
1258
+
1259
+ Examples
1260
+ --------
1261
+ >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
1262
+ >>> ebunch = [(1, 2), (2, 3)]
1263
+ >>> G.remove_edges_from(ebunch)
1264
+ """
1265
+ for e in ebunch:
1266
+ u, v = e[:2] # ignore edge data
1267
+ # assuming that the PlanarEmbedding is valid, if the half_edge
1268
+ # (u, v) is in the graph, then so is half_edge (v, u)
1269
+ if u in self._succ and v in self._succ[u]:
1270
+ self.remove_edge(u, v)
1271
+
1272
+ def connect_components(self, v, w):
1273
+ """Adds half-edges for (v, w) and (w, v) at some position.
1274
+
1275
+ This method should only be called if v and w are in different
1276
+ components, or it might break the embedding.
1277
+ This especially means that if `connect_components(v, w)`
1278
+ is called it is not allowed to call `connect_components(w, v)`
1279
+ afterwards. The neighbor orientations in both directions are
1280
+ all set correctly after the first call.
1281
+
1282
+ Parameters
1283
+ ----------
1284
+ v : node
1285
+ w : node
1286
+
1287
+ See Also
1288
+ --------
1289
+ add_half_edge
1290
+ """
1291
+ if v in self._succ and self._succ[v]:
1292
+ ref = next(reversed(self._succ[v]))
1293
+ else:
1294
+ ref = None
1295
+ self.add_half_edge(v, w, cw=ref)
1296
+ if w in self._succ and self._succ[w]:
1297
+ ref = next(reversed(self._succ[w]))
1298
+ else:
1299
+ ref = None
1300
+ self.add_half_edge(w, v, cw=ref)
1301
+
1302
+ def add_half_edge_first(self, start_node, end_node):
1303
+ """Add a half-edge and set end_node as start_node's leftmost neighbor.
1304
+
1305
+ The new edge is inserted counterclockwise with respect to the current
1306
+ leftmost neighbor, if there is one.
1307
+
1308
+ Parameters
1309
+ ----------
1310
+ start_node : node
1311
+ end_node : node
1312
+
1313
+ See Also
1314
+ --------
1315
+ add_half_edge
1316
+ connect_components
1317
+ """
1318
+ succs = self._succ.get(start_node)
1319
+ # the leftmost neighbor is the last entry in the
1320
+ # self._succ[start_node] dict
1321
+ leftmost_nbr = next(reversed(succs)) if succs else None
1322
+ self.add_half_edge(start_node, end_node, cw=leftmost_nbr)
1323
+
1324
+ def next_face_half_edge(self, v, w):
1325
+ """Returns the following half-edge left of a face.
1326
+
1327
+ Parameters
1328
+ ----------
1329
+ v : node
1330
+ w : node
1331
+
1332
+ Returns
1333
+ -------
1334
+ half-edge : tuple
1335
+ """
1336
+ new_node = self[w][v]["ccw"]
1337
+ return w, new_node
1338
+
1339
+ def traverse_face(self, v, w, mark_half_edges=None):
1340
+ """Returns nodes on the face that belong to the half-edge (v, w).
1341
+
1342
+ The face that is traversed lies to the right of the half-edge (in an
1343
+ orientation where v is below w).
1344
+
1345
+ Optionally it is possible to pass a set to which all encountered half
1346
+ edges are added. Before calling this method, this set must not include
1347
+ any half-edges that belong to the face.
1348
+
1349
+ Parameters
1350
+ ----------
1351
+ v : node
1352
+ Start node of half-edge.
1353
+ w : node
1354
+ End node of half-edge.
1355
+ mark_half_edges: set, optional
1356
+ Set to which all encountered half-edges are added.
1357
+
1358
+ Returns
1359
+ -------
1360
+ face : list
1361
+ A list of nodes that lie on this face.
1362
+ """
1363
+ if mark_half_edges is None:
1364
+ mark_half_edges = set()
1365
+
1366
+ face_nodes = [v]
1367
+ mark_half_edges.add((v, w))
1368
+ prev_node = v
1369
+ cur_node = w
1370
+ # Last half-edge is (incoming_node, v)
1371
+ incoming_node = self[v][w]["cw"]
1372
+
1373
+ while cur_node != v or prev_node != incoming_node:
1374
+ face_nodes.append(cur_node)
1375
+ prev_node, cur_node = self.next_face_half_edge(prev_node, cur_node)
1376
+ if (prev_node, cur_node) in mark_half_edges:
1377
+ raise nx.NetworkXException("Bad planar embedding. Impossible face.")
1378
+ mark_half_edges.add((prev_node, cur_node))
1379
+
1380
+ return face_nodes
1381
+
1382
+ def is_directed(self):
1383
+ """A valid PlanarEmbedding is undirected.
1384
+
1385
+ All reverse edges are contained, i.e. for every existing
1386
+ half-edge (v, w) the half-edge in the opposite direction (w, v) is also
1387
+ contained.
1388
+ """
1389
+ return False
1390
+
1391
+ def copy(self, as_view=False):
1392
+ if as_view is True:
1393
+ return nx.graphviews.generic_graph_view(self)
1394
+ G = self.__class__()
1395
+ G.graph.update(self.graph)
1396
+ G.add_nodes_from((n, d.copy()) for n, d in self._node.items())
1397
+ super(self.__class__, G).add_edges_from(
1398
+ (u, v, datadict.copy())
1399
+ for u, nbrs in self._adj.items()
1400
+ for v, datadict in nbrs.items()
1401
+ )
1402
+ return G
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/polynomials.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Provides algorithms supporting the computation of graph polynomials.
2
+
3
+ Graph polynomials are polynomial-valued graph invariants that encode a wide
4
+ variety of structural information. Examples include the Tutte polynomial,
5
+ chromatic polynomial, characteristic polynomial, and matching polynomial. An
6
+ extensive treatment is provided in [1]_.
7
+
8
+ For a simple example, the `~sympy.matrices.matrices.MatrixDeterminant.charpoly`
9
+ method can be used to compute the characteristic polynomial from the adjacency
10
+ matrix of a graph. Consider the complete graph ``K_4``:
11
+
12
+ >>> import sympy
13
+ >>> x = sympy.Symbol("x")
14
+ >>> G = nx.complete_graph(4)
15
+ >>> A = nx.adjacency_matrix(G)
16
+ >>> M = sympy.SparseMatrix(A.todense())
17
+ >>> M.charpoly(x).as_expr()
18
+ x**4 - 6*x**2 - 8*x - 3
19
+
20
+
21
+ .. [1] Y. Shi, M. Dehmer, X. Li, I. Gutman,
22
+ "Graph Polynomials"
23
+ """
24
+ from collections import deque
25
+
26
+ import networkx as nx
27
+ from networkx.utils import not_implemented_for
28
+
29
+ __all__ = ["tutte_polynomial", "chromatic_polynomial"]
30
+
31
+
32
+ @not_implemented_for("directed")
33
+ @nx._dispatchable
34
+ def tutte_polynomial(G):
35
+ r"""Returns the Tutte polynomial of `G`
36
+
37
+ This function computes the Tutte polynomial via an iterative version of
38
+ the deletion-contraction algorithm.
39
+
40
+ The Tutte polynomial `T_G(x, y)` is a fundamental graph polynomial invariant in
41
+ two variables. It encodes a wide array of information related to the
42
+ edge-connectivity of a graph; "Many problems about graphs can be reduced to
43
+ problems of finding and evaluating the Tutte polynomial at certain values" [1]_.
44
+ In fact, every deletion-contraction-expressible feature of a graph is a
45
+ specialization of the Tutte polynomial [2]_ (see Notes for examples).
46
+
47
+ There are several equivalent definitions; here are three:
48
+
49
+ Def 1 (rank-nullity expansion): For `G` an undirected graph, `n(G)` the
50
+ number of vertices of `G`, `E` the edge set of `G`, `V` the vertex set of
51
+ `G`, and `c(A)` the number of connected components of the graph with vertex
52
+ set `V` and edge set `A` [3]_:
53
+
54
+ .. math::
55
+
56
+ T_G(x, y) = \sum_{A \in E} (x-1)^{c(A) - c(E)} (y-1)^{c(A) + |A| - n(G)}
57
+
58
+ Def 2 (spanning tree expansion): Let `G` be an undirected graph, `T` a spanning
59
+ tree of `G`, and `E` the edge set of `G`. Let `E` have an arbitrary strict
60
+ linear order `L`. Let `B_e` be the unique minimal nonempty edge cut of
61
+ $E \setminus T \cup {e}$. An edge `e` is internally active with respect to
62
+ `T` and `L` if `e` is the least edge in `B_e` according to the linear order
63
+ `L`. The internal activity of `T` (denoted `i(T)`) is the number of edges
64
+ in $E \setminus T$ that are internally active with respect to `T` and `L`.
65
+ Let `P_e` be the unique path in $T \cup {e}$ whose source and target vertex
66
+ are the same. An edge `e` is externally active with respect to `T` and `L`
67
+ if `e` is the least edge in `P_e` according to the linear order `L`. The
68
+ external activity of `T` (denoted `e(T)`) is the number of edges in
69
+ $E \setminus T$ that are externally active with respect to `T` and `L`.
70
+ Then [4]_ [5]_:
71
+
72
+ .. math::
73
+
74
+ T_G(x, y) = \sum_{T \text{ a spanning tree of } G} x^{i(T)} y^{e(T)}
75
+
76
+ Def 3 (deletion-contraction recurrence): For `G` an undirected graph, `G-e`
77
+ the graph obtained from `G` by deleting edge `e`, `G/e` the graph obtained
78
+ from `G` by contracting edge `e`, `k(G)` the number of cut-edges of `G`,
79
+ and `l(G)` the number of self-loops of `G`:
80
+
81
+ .. math::
82
+ T_G(x, y) = \begin{cases}
83
+ x^{k(G)} y^{l(G)}, & \text{if all edges are cut-edges or self-loops} \\
84
+ T_{G-e}(x, y) + T_{G/e}(x, y), & \text{otherwise, for an arbitrary edge $e$ not a cut-edge or loop}
85
+ \end{cases}
86
+
87
+ Parameters
88
+ ----------
89
+ G : NetworkX graph
90
+
91
+ Returns
92
+ -------
93
+ instance of `sympy.core.add.Add`
94
+ A Sympy expression representing the Tutte polynomial for `G`.
95
+
96
+ Examples
97
+ --------
98
+ >>> C = nx.cycle_graph(5)
99
+ >>> nx.tutte_polynomial(C)
100
+ x**4 + x**3 + x**2 + x + y
101
+
102
+ >>> D = nx.diamond_graph()
103
+ >>> nx.tutte_polynomial(D)
104
+ x**3 + 2*x**2 + 2*x*y + x + y**2 + y
105
+
106
+ Notes
107
+ -----
108
+ Some specializations of the Tutte polynomial:
109
+
110
+ - `T_G(1, 1)` counts the number of spanning trees of `G`
111
+ - `T_G(1, 2)` counts the number of connected spanning subgraphs of `G`
112
+ - `T_G(2, 1)` counts the number of spanning forests in `G`
113
+ - `T_G(0, 2)` counts the number of strong orientations of `G`
114
+ - `T_G(2, 0)` counts the number of acyclic orientations of `G`
115
+
116
+ Edge contraction is defined and deletion-contraction is introduced in [6]_.
117
+ Combinatorial meaning of the coefficients is introduced in [7]_.
118
+ Universality, properties, and applications are discussed in [8]_.
119
+
120
+ Practically, up-front computation of the Tutte polynomial may be useful when
121
+ users wish to repeatedly calculate edge-connectivity-related information
122
+ about one or more graphs.
123
+
124
+ References
125
+ ----------
126
+ .. [1] M. Brandt,
127
+ "The Tutte Polynomial."
128
+ Talking About Combinatorial Objects Seminar, 2015
129
+ https://math.berkeley.edu/~brandtm/talks/tutte.pdf
130
+ .. [2] A. Björklund, T. Husfeldt, P. Kaski, M. Koivisto,
131
+ "Computing the Tutte polynomial in vertex-exponential time"
132
+ 49th Annual IEEE Symposium on Foundations of Computer Science, 2008
133
+ https://ieeexplore.ieee.org/abstract/document/4691000
134
+ .. [3] Y. Shi, M. Dehmer, X. Li, I. Gutman,
135
+ "Graph Polynomials," p. 14
136
+ .. [4] Y. Shi, M. Dehmer, X. Li, I. Gutman,
137
+ "Graph Polynomials," p. 46
138
+ .. [5] A. Nešetril, J. Goodall,
139
+ "Graph invariants, homomorphisms, and the Tutte polynomial"
140
+ https://iuuk.mff.cuni.cz/~andrew/Tutte.pdf
141
+ .. [6] D. B. West,
142
+ "Introduction to Graph Theory," p. 84
143
+ .. [7] G. Coutinho,
144
+ "A brief introduction to the Tutte polynomial"
145
+ Structural Analysis of Complex Networks, 2011
146
+ https://homepages.dcc.ufmg.br/~gabriel/seminars/coutinho_tuttepolynomial_seminar.pdf
147
+ .. [8] J. A. Ellis-Monaghan, C. Merino,
148
+ "Graph polynomials and their applications I: The Tutte polynomial"
149
+ Structural Analysis of Complex Networks, 2011
150
+ https://arxiv.org/pdf/0803.3079.pdf
151
+ """
152
+ import sympy
153
+
154
+ x = sympy.Symbol("x")
155
+ y = sympy.Symbol("y")
156
+ stack = deque()
157
+ stack.append(nx.MultiGraph(G))
158
+
159
+ polynomial = 0
160
+ while stack:
161
+ G = stack.pop()
162
+ bridges = set(nx.bridges(G))
163
+
164
+ e = None
165
+ for i in G.edges:
166
+ if (i[0], i[1]) not in bridges and i[0] != i[1]:
167
+ e = i
168
+ break
169
+ if not e:
170
+ loops = list(nx.selfloop_edges(G, keys=True))
171
+ polynomial += x ** len(bridges) * y ** len(loops)
172
+ else:
173
+ # deletion-contraction
174
+ C = nx.contracted_edge(G, e, self_loops=True)
175
+ C.remove_edge(e[0], e[0])
176
+ G.remove_edge(*e)
177
+ stack.append(G)
178
+ stack.append(C)
179
+ return sympy.simplify(polynomial)
180
+
181
+
182
+ @not_implemented_for("directed")
183
+ @nx._dispatchable
184
+ def chromatic_polynomial(G):
185
+ r"""Returns the chromatic polynomial of `G`
186
+
187
+ This function computes the chromatic polynomial via an iterative version of
188
+ the deletion-contraction algorithm.
189
+
190
+ The chromatic polynomial `X_G(x)` is a fundamental graph polynomial
191
+ invariant in one variable. Evaluating `X_G(k)` for an natural number `k`
192
+ enumerates the proper k-colorings of `G`.
193
+
194
+ There are several equivalent definitions; here are three:
195
+
196
+ Def 1 (explicit formula):
197
+ For `G` an undirected graph, `c(G)` the number of connected components of
198
+ `G`, `E` the edge set of `G`, and `G(S)` the spanning subgraph of `G` with
199
+ edge set `S` [1]_:
200
+
201
+ .. math::
202
+
203
+ X_G(x) = \sum_{S \subseteq E} (-1)^{|S|} x^{c(G(S))}
204
+
205
+
206
+ Def 2 (interpolating polynomial):
207
+ For `G` an undirected graph, `n(G)` the number of vertices of `G`, `k_0 = 0`,
208
+ and `k_i` the number of distinct ways to color the vertices of `G` with `i`
209
+ unique colors (for `i` a natural number at most `n(G)`), `X_G(x)` is the
210
+ unique Lagrange interpolating polynomial of degree `n(G)` through the points
211
+ `(0, k_0), (1, k_1), \dots, (n(G), k_{n(G)})` [2]_.
212
+
213
+
214
+ Def 3 (chromatic recurrence):
215
+ For `G` an undirected graph, `G-e` the graph obtained from `G` by deleting
216
+ edge `e`, `G/e` the graph obtained from `G` by contracting edge `e`, `n(G)`
217
+ the number of vertices of `G`, and `e(G)` the number of edges of `G` [3]_:
218
+
219
+ .. math::
220
+ X_G(x) = \begin{cases}
221
+ x^{n(G)}, & \text{if $e(G)=0$} \\
222
+ X_{G-e}(x) - X_{G/e}(x), & \text{otherwise, for an arbitrary edge $e$}
223
+ \end{cases}
224
+
225
+ This formulation is also known as the Fundamental Reduction Theorem [4]_.
226
+
227
+
228
+ Parameters
229
+ ----------
230
+ G : NetworkX graph
231
+
232
+ Returns
233
+ -------
234
+ instance of `sympy.core.add.Add`
235
+ A Sympy expression representing the chromatic polynomial for `G`.
236
+
237
+ Examples
238
+ --------
239
+ >>> C = nx.cycle_graph(5)
240
+ >>> nx.chromatic_polynomial(C)
241
+ x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x
242
+
243
+ >>> G = nx.complete_graph(4)
244
+ >>> nx.chromatic_polynomial(G)
245
+ x**4 - 6*x**3 + 11*x**2 - 6*x
246
+
247
+ Notes
248
+ -----
249
+ Interpretation of the coefficients is discussed in [5]_. Several special
250
+ cases are listed in [2]_.
251
+
252
+ The chromatic polynomial is a specialization of the Tutte polynomial; in
253
+ particular, ``X_G(x) = T_G(x, 0)`` [6]_.
254
+
255
+ The chromatic polynomial may take negative arguments, though evaluations
256
+ may not have chromatic interpretations. For instance, ``X_G(-1)`` enumerates
257
+ the acyclic orientations of `G` [7]_.
258
+
259
+ References
260
+ ----------
261
+ .. [1] D. B. West,
262
+ "Introduction to Graph Theory," p. 222
263
+ .. [2] E. W. Weisstein
264
+ "Chromatic Polynomial"
265
+ MathWorld--A Wolfram Web Resource
266
+ https://mathworld.wolfram.com/ChromaticPolynomial.html
267
+ .. [3] D. B. West,
268
+ "Introduction to Graph Theory," p. 221
269
+ .. [4] J. Zhang, J. Goodall,
270
+ "An Introduction to Chromatic Polynomials"
271
+ https://math.mit.edu/~apost/courses/18.204_2018/Julie_Zhang_paper.pdf
272
+ .. [5] R. C. Read,
273
+ "An Introduction to Chromatic Polynomials"
274
+ Journal of Combinatorial Theory, 1968
275
+ https://math.berkeley.edu/~mrklug/ReadChromatic.pdf
276
+ .. [6] W. T. Tutte,
277
+ "Graph-polynomials"
278
+ Advances in Applied Mathematics, 2004
279
+ https://www.sciencedirect.com/science/article/pii/S0196885803000411
280
+ .. [7] R. P. Stanley,
281
+ "Acyclic orientations of graphs"
282
+ Discrete Mathematics, 2006
283
+ https://math.mit.edu/~rstan/pubs/pubfiles/18.pdf
284
+ """
285
+ import sympy
286
+
287
+ x = sympy.Symbol("x")
288
+ stack = deque()
289
+ stack.append(nx.MultiGraph(G, contraction_idx=0))
290
+
291
+ polynomial = 0
292
+ while stack:
293
+ G = stack.pop()
294
+ edges = list(G.edges)
295
+ if not edges:
296
+ polynomial += (-1) ** G.graph["contraction_idx"] * x ** len(G)
297
+ else:
298
+ e = edges[0]
299
+ C = nx.contracted_edge(G, e, self_loops=True)
300
+ C.graph["contraction_idx"] = G.graph["contraction_idx"] + 1
301
+ C.remove_edge(e[0], e[0])
302
+ G.remove_edge(*e)
303
+ stack.append(G)
304
+ stack.append(C)
305
+ return polynomial
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/richclub.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions for computing rich-club coefficients."""
2
+
3
+ from itertools import accumulate
4
+
5
+ import networkx as nx
6
+ from networkx.utils import not_implemented_for
7
+
8
+ __all__ = ["rich_club_coefficient"]
9
+
10
+
11
+ @not_implemented_for("directed")
12
+ @not_implemented_for("multigraph")
13
+ @nx._dispatchable
14
+ def rich_club_coefficient(G, normalized=True, Q=100, seed=None):
15
+ r"""Returns the rich-club coefficient of the graph `G`.
16
+
17
+ For each degree *k*, the *rich-club coefficient* is the ratio of the
18
+ number of actual to the number of potential edges for nodes with
19
+ degree greater than *k*:
20
+
21
+ .. math::
22
+
23
+ \phi(k) = \frac{2 E_k}{N_k (N_k - 1)}
24
+
25
+ where `N_k` is the number of nodes with degree larger than *k*, and
26
+ `E_k` is the number of edges among those nodes.
27
+
28
+ Parameters
29
+ ----------
30
+ G : NetworkX graph
31
+ Undirected graph with neither parallel edges nor self-loops.
32
+ normalized : bool (optional)
33
+ Normalize using randomized network as in [1]_
34
+ Q : float (optional, default=100)
35
+ If `normalized` is True, perform `Q * m` double-edge
36
+ swaps, where `m` is the number of edges in `G`, to use as a
37
+ null-model for normalization.
38
+ seed : integer, random_state, or None (default)
39
+ Indicator of random number generation state.
40
+ See :ref:`Randomness<randomness>`.
41
+
42
+ Returns
43
+ -------
44
+ rc : dictionary
45
+ A dictionary, keyed by degree, with rich-club coefficient values.
46
+
47
+ Raises
48
+ ------
49
+ NetworkXError
50
+ If `G` has fewer than four nodes and ``normalized=True``.
51
+ A randomly sampled graph for normalization cannot be generated in this case.
52
+
53
+ Examples
54
+ --------
55
+ >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)])
56
+ >>> rc = nx.rich_club_coefficient(G, normalized=False, seed=42)
57
+ >>> rc[0]
58
+ 0.4
59
+
60
+ Notes
61
+ -----
62
+ The rich club definition and algorithm are found in [1]_. This
63
+ algorithm ignores any edge weights and is not defined for directed
64
+ graphs or graphs with parallel edges or self loops.
65
+
66
+ Normalization is done by computing the rich club coefficient for a randomly
67
+ sampled graph with the same degree distribution as `G` by
68
+ repeatedly swapping the endpoints of existing edges. For graphs with fewer than 4
69
+ nodes, it is not possible to generate a random graph with a prescribed
70
+ degree distribution, as the degree distribution fully determines the graph
71
+ (hence making the coefficients trivially normalized to 1).
72
+ This function raises an exception in this case.
73
+
74
+ Estimates for appropriate values of `Q` are found in [2]_.
75
+
76
+ References
77
+ ----------
78
+ .. [1] Julian J. McAuley, Luciano da Fontoura Costa,
79
+ and Tibério S. Caetano,
80
+ "The rich-club phenomenon across complex network hierarchies",
81
+ Applied Physics Letters Vol 91 Issue 8, August 2007.
82
+ https://arxiv.org/abs/physics/0701290
83
+ .. [2] R. Milo, N. Kashtan, S. Itzkovitz, M. E. J. Newman, U. Alon,
84
+ "Uniform generation of random graphs with arbitrary degree
85
+ sequences", 2006. https://arxiv.org/abs/cond-mat/0312028
86
+ """
87
+ if nx.number_of_selfloops(G) > 0:
88
+ raise Exception(
89
+ "rich_club_coefficient is not implemented for graphs with self loops."
90
+ )
91
+ rc = _compute_rc(G)
92
+ if normalized:
93
+ # make R a copy of G, randomize with Q*|E| double edge swaps
94
+ # and use rich_club coefficient of R to normalize
95
+ R = G.copy()
96
+ E = R.number_of_edges()
97
+ nx.double_edge_swap(R, Q * E, max_tries=Q * E * 10, seed=seed)
98
+ rcran = _compute_rc(R)
99
+ rc = {k: v / rcran[k] for k, v in rc.items()}
100
+ return rc
101
+
102
+
103
+ def _compute_rc(G):
104
+ """Returns the rich-club coefficient for each degree in the graph
105
+ `G`.
106
+
107
+ `G` is an undirected graph without multiedges.
108
+
109
+ Returns a dictionary mapping degree to rich-club coefficient for
110
+ that degree.
111
+
112
+ """
113
+ deghist = nx.degree_histogram(G)
114
+ total = sum(deghist)
115
+ # Compute the number of nodes with degree greater than `k`, for each
116
+ # degree `k` (omitting the last entry, which is zero).
117
+ nks = (total - cs for cs in accumulate(deghist) if total - cs > 1)
118
+ # Create a sorted list of pairs of edge endpoint degrees.
119
+ #
120
+ # The list is sorted in reverse order so that we can pop from the
121
+ # right side of the list later, instead of popping from the left
122
+ # side of the list, which would have a linear time cost.
123
+ edge_degrees = sorted((sorted(map(G.degree, e)) for e in G.edges()), reverse=True)
124
+ ek = G.number_of_edges()
125
+ if ek == 0:
126
+ return {}
127
+
128
+ k1, k2 = edge_degrees.pop()
129
+ rc = {}
130
+ for d, nk in enumerate(nks):
131
+ while k1 <= d:
132
+ if len(edge_degrees) == 0:
133
+ ek = 0
134
+ break
135
+ k1, k2 = edge_degrees.pop()
136
+ ek -= 1
137
+ rc[d] = 2 * ek / (nk * (nk - 1))
138
+ return rc
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (474 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/__pycache__/astar.cpython-310.pyc ADDED
Binary file (7.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/__pycache__/dense.cpython-310.pyc ADDED
Binary file (7.72 kB). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/__pycache__/generic.cpython-310.pyc ADDED
Binary file (21.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-310.pyc ADDED
Binary file (74.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-310.pyc ADDED
Binary file (9.71 kB). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-310.pyc ADDED
Binary file (4.85 kB). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-310.pyc ADDED
Binary file (2.86 kB). View file
 
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/similarity.py ADDED
@@ -0,0 +1,1777 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Functions measuring similarity using graph edit distance.
2
+
3
+ The graph edit distance is the number of edge/node changes needed
4
+ to make two graphs isomorphic.
5
+
6
+ The default algorithm/implementation is sub-optimal for some graphs.
7
+ The problem of finding the exact Graph Edit Distance (GED) is NP-hard
8
+ so it is often slow. If the simple interface `graph_edit_distance`
9
+ takes too long for your graph, try `optimize_graph_edit_distance`
10
+ and/or `optimize_edit_paths`.
11
+
12
+ At the same time, I encourage capable people to investigate
13
+ alternative GED algorithms, in order to improve the choices available.
14
+ """
15
+
16
+ import math
17
+ import time
18
+ import warnings
19
+ from dataclasses import dataclass
20
+ from itertools import product
21
+
22
+ import networkx as nx
23
+ from networkx.utils import np_random_state
24
+
25
+ __all__ = [
26
+ "graph_edit_distance",
27
+ "optimal_edit_paths",
28
+ "optimize_graph_edit_distance",
29
+ "optimize_edit_paths",
30
+ "simrank_similarity",
31
+ "panther_similarity",
32
+ "generate_random_paths",
33
+ ]
34
+
35
+
36
+ def debug_print(*args, **kwargs):
37
+ print(*args, **kwargs)
38
+
39
+
40
+ @nx._dispatchable(
41
+ graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True
42
+ )
43
+ def graph_edit_distance(
44
+ G1,
45
+ G2,
46
+ node_match=None,
47
+ edge_match=None,
48
+ node_subst_cost=None,
49
+ node_del_cost=None,
50
+ node_ins_cost=None,
51
+ edge_subst_cost=None,
52
+ edge_del_cost=None,
53
+ edge_ins_cost=None,
54
+ roots=None,
55
+ upper_bound=None,
56
+ timeout=None,
57
+ ):
58
+ """Returns GED (graph edit distance) between graphs G1 and G2.
59
+
60
+ Graph edit distance is a graph similarity measure analogous to
61
+ Levenshtein distance for strings. It is defined as minimum cost
62
+ of edit path (sequence of node and edge edit operations)
63
+ transforming graph G1 to graph isomorphic to G2.
64
+
65
+ Parameters
66
+ ----------
67
+ G1, G2: graphs
68
+ The two graphs G1 and G2 must be of the same type.
69
+
70
+ node_match : callable
71
+ A function that returns True if node n1 in G1 and n2 in G2
72
+ should be considered equal during matching.
73
+
74
+ The function will be called like
75
+
76
+ node_match(G1.nodes[n1], G2.nodes[n2]).
77
+
78
+ That is, the function will receive the node attribute
79
+ dictionaries for n1 and n2 as inputs.
80
+
81
+ Ignored if node_subst_cost is specified. If neither
82
+ node_match nor node_subst_cost are specified then node
83
+ attributes are not considered.
84
+
85
+ edge_match : callable
86
+ A function that returns True if the edge attribute dictionaries
87
+ for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
88
+ be considered equal during matching.
89
+
90
+ The function will be called like
91
+
92
+ edge_match(G1[u1][v1], G2[u2][v2]).
93
+
94
+ That is, the function will receive the edge attribute
95
+ dictionaries of the edges under consideration.
96
+
97
+ Ignored if edge_subst_cost is specified. If neither
98
+ edge_match nor edge_subst_cost are specified then edge
99
+ attributes are not considered.
100
+
101
+ node_subst_cost, node_del_cost, node_ins_cost : callable
102
+ Functions that return the costs of node substitution, node
103
+ deletion, and node insertion, respectively.
104
+
105
+ The functions will be called like
106
+
107
+ node_subst_cost(G1.nodes[n1], G2.nodes[n2]),
108
+ node_del_cost(G1.nodes[n1]),
109
+ node_ins_cost(G2.nodes[n2]).
110
+
111
+ That is, the functions will receive the node attribute
112
+ dictionaries as inputs. The functions are expected to return
113
+ positive numeric values.
114
+
115
+ Function node_subst_cost overrides node_match if specified.
116
+ If neither node_match nor node_subst_cost are specified then
117
+ default node substitution cost of 0 is used (node attributes
118
+ are not considered during matching).
119
+
120
+ If node_del_cost is not specified then default node deletion
121
+ cost of 1 is used. If node_ins_cost is not specified then
122
+ default node insertion cost of 1 is used.
123
+
124
+ edge_subst_cost, edge_del_cost, edge_ins_cost : callable
125
+ Functions that return the costs of edge substitution, edge
126
+ deletion, and edge insertion, respectively.
127
+
128
+ The functions will be called like
129
+
130
+ edge_subst_cost(G1[u1][v1], G2[u2][v2]),
131
+ edge_del_cost(G1[u1][v1]),
132
+ edge_ins_cost(G2[u2][v2]).
133
+
134
+ That is, the functions will receive the edge attribute
135
+ dictionaries as inputs. The functions are expected to return
136
+ positive numeric values.
137
+
138
+ Function edge_subst_cost overrides edge_match if specified.
139
+ If neither edge_match nor edge_subst_cost are specified then
140
+ default edge substitution cost of 0 is used (edge attributes
141
+ are not considered during matching).
142
+
143
+ If edge_del_cost is not specified then default edge deletion
144
+ cost of 1 is used. If edge_ins_cost is not specified then
145
+ default edge insertion cost of 1 is used.
146
+
147
+ roots : 2-tuple
148
+ Tuple where first element is a node in G1 and the second
149
+ is a node in G2.
150
+ These nodes are forced to be matched in the comparison to
151
+ allow comparison between rooted graphs.
152
+
153
+ upper_bound : numeric
154
+ Maximum edit distance to consider. Return None if no edit
155
+ distance under or equal to upper_bound exists.
156
+
157
+ timeout : numeric
158
+ Maximum number of seconds to execute.
159
+ After timeout is met, the current best GED is returned.
160
+
161
+ Examples
162
+ --------
163
+ >>> G1 = nx.cycle_graph(6)
164
+ >>> G2 = nx.wheel_graph(7)
165
+ >>> nx.graph_edit_distance(G1, G2)
166
+ 7.0
167
+
168
+ >>> G1 = nx.star_graph(5)
169
+ >>> G2 = nx.star_graph(5)
170
+ >>> nx.graph_edit_distance(G1, G2, roots=(0, 0))
171
+ 0.0
172
+ >>> nx.graph_edit_distance(G1, G2, roots=(1, 0))
173
+ 8.0
174
+
175
+ See Also
176
+ --------
177
+ optimal_edit_paths, optimize_graph_edit_distance,
178
+
179
+ is_isomorphic: test for graph edit distance of 0
180
+
181
+ References
182
+ ----------
183
+ .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick
184
+ Martineau. An Exact Graph Edit Distance Algorithm for Solving
185
+ Pattern Recognition Problems. 4th International Conference on
186
+ Pattern Recognition Applications and Methods 2015, Jan 2015,
187
+ Lisbon, Portugal. 2015,
188
+ <10.5220/0005209202710278>. <hal-01168816>
189
+ https://hal.archives-ouvertes.fr/hal-01168816
190
+
191
+ """
192
+ bestcost = None
193
+ for _, _, cost in optimize_edit_paths(
194
+ G1,
195
+ G2,
196
+ node_match,
197
+ edge_match,
198
+ node_subst_cost,
199
+ node_del_cost,
200
+ node_ins_cost,
201
+ edge_subst_cost,
202
+ edge_del_cost,
203
+ edge_ins_cost,
204
+ upper_bound,
205
+ True,
206
+ roots,
207
+ timeout,
208
+ ):
209
+ # assert bestcost is None or cost < bestcost
210
+ bestcost = cost
211
+ return bestcost
212
+
213
+
214
+ @nx._dispatchable(graphs={"G1": 0, "G2": 1})
215
+ def optimal_edit_paths(
216
+ G1,
217
+ G2,
218
+ node_match=None,
219
+ edge_match=None,
220
+ node_subst_cost=None,
221
+ node_del_cost=None,
222
+ node_ins_cost=None,
223
+ edge_subst_cost=None,
224
+ edge_del_cost=None,
225
+ edge_ins_cost=None,
226
+ upper_bound=None,
227
+ ):
228
+ """Returns all minimum-cost edit paths transforming G1 to G2.
229
+
230
+ Graph edit path is a sequence of node and edge edit operations
231
+ transforming graph G1 to graph isomorphic to G2. Edit operations
232
+ include substitutions, deletions, and insertions.
233
+
234
+ Parameters
235
+ ----------
236
+ G1, G2: graphs
237
+ The two graphs G1 and G2 must be of the same type.
238
+
239
+ node_match : callable
240
+ A function that returns True if node n1 in G1 and n2 in G2
241
+ should be considered equal during matching.
242
+
243
+ The function will be called like
244
+
245
+ node_match(G1.nodes[n1], G2.nodes[n2]).
246
+
247
+ That is, the function will receive the node attribute
248
+ dictionaries for n1 and n2 as inputs.
249
+
250
+ Ignored if node_subst_cost is specified. If neither
251
+ node_match nor node_subst_cost are specified then node
252
+ attributes are not considered.
253
+
254
+ edge_match : callable
255
+ A function that returns True if the edge attribute dictionaries
256
+ for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
257
+ be considered equal during matching.
258
+
259
+ The function will be called like
260
+
261
+ edge_match(G1[u1][v1], G2[u2][v2]).
262
+
263
+ That is, the function will receive the edge attribute
264
+ dictionaries of the edges under consideration.
265
+
266
+ Ignored if edge_subst_cost is specified. If neither
267
+ edge_match nor edge_subst_cost are specified then edge
268
+ attributes are not considered.
269
+
270
+ node_subst_cost, node_del_cost, node_ins_cost : callable
271
+ Functions that return the costs of node substitution, node
272
+ deletion, and node insertion, respectively.
273
+
274
+ The functions will be called like
275
+
276
+ node_subst_cost(G1.nodes[n1], G2.nodes[n2]),
277
+ node_del_cost(G1.nodes[n1]),
278
+ node_ins_cost(G2.nodes[n2]).
279
+
280
+ That is, the functions will receive the node attribute
281
+ dictionaries as inputs. The functions are expected to return
282
+ positive numeric values.
283
+
284
+ Function node_subst_cost overrides node_match if specified.
285
+ If neither node_match nor node_subst_cost are specified then
286
+ default node substitution cost of 0 is used (node attributes
287
+ are not considered during matching).
288
+
289
+ If node_del_cost is not specified then default node deletion
290
+ cost of 1 is used. If node_ins_cost is not specified then
291
+ default node insertion cost of 1 is used.
292
+
293
+ edge_subst_cost, edge_del_cost, edge_ins_cost : callable
294
+ Functions that return the costs of edge substitution, edge
295
+ deletion, and edge insertion, respectively.
296
+
297
+ The functions will be called like
298
+
299
+ edge_subst_cost(G1[u1][v1], G2[u2][v2]),
300
+ edge_del_cost(G1[u1][v1]),
301
+ edge_ins_cost(G2[u2][v2]).
302
+
303
+ That is, the functions will receive the edge attribute
304
+ dictionaries as inputs. The functions are expected to return
305
+ positive numeric values.
306
+
307
+ Function edge_subst_cost overrides edge_match if specified.
308
+ If neither edge_match nor edge_subst_cost are specified then
309
+ default edge substitution cost of 0 is used (edge attributes
310
+ are not considered during matching).
311
+
312
+ If edge_del_cost is not specified then default edge deletion
313
+ cost of 1 is used. If edge_ins_cost is not specified then
314
+ default edge insertion cost of 1 is used.
315
+
316
+ upper_bound : numeric
317
+ Maximum edit distance to consider.
318
+
319
+ Returns
320
+ -------
321
+ edit_paths : list of tuples (node_edit_path, edge_edit_path)
322
+ node_edit_path : list of tuples (u, v)
323
+ edge_edit_path : list of tuples ((u1, v1), (u2, v2))
324
+
325
+ cost : numeric
326
+ Optimal edit path cost (graph edit distance). When the cost
327
+ is zero, it indicates that `G1` and `G2` are isomorphic.
328
+
329
+ Examples
330
+ --------
331
+ >>> G1 = nx.cycle_graph(4)
332
+ >>> G2 = nx.wheel_graph(5)
333
+ >>> paths, cost = nx.optimal_edit_paths(G1, G2)
334
+ >>> len(paths)
335
+ 40
336
+ >>> cost
337
+ 5.0
338
+
339
+ Notes
340
+ -----
341
+ To transform `G1` into a graph isomorphic to `G2`, apply the node
342
+ and edge edits in the returned ``edit_paths``.
343
+ In the case of isomorphic graphs, the cost is zero, and the paths
344
+ represent different isomorphic mappings (isomorphisms). That is, the
345
+ edits involve renaming nodes and edges to match the structure of `G2`.
346
+
347
+ See Also
348
+ --------
349
+ graph_edit_distance, optimize_edit_paths
350
+
351
+ References
352
+ ----------
353
+ .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick
354
+ Martineau. An Exact Graph Edit Distance Algorithm for Solving
355
+ Pattern Recognition Problems. 4th International Conference on
356
+ Pattern Recognition Applications and Methods 2015, Jan 2015,
357
+ Lisbon, Portugal. 2015,
358
+ <10.5220/0005209202710278>. <hal-01168816>
359
+ https://hal.archives-ouvertes.fr/hal-01168816
360
+
361
+ """
362
+ paths = []
363
+ bestcost = None
364
+ for vertex_path, edge_path, cost in optimize_edit_paths(
365
+ G1,
366
+ G2,
367
+ node_match,
368
+ edge_match,
369
+ node_subst_cost,
370
+ node_del_cost,
371
+ node_ins_cost,
372
+ edge_subst_cost,
373
+ edge_del_cost,
374
+ edge_ins_cost,
375
+ upper_bound,
376
+ False,
377
+ ):
378
+ # assert bestcost is None or cost <= bestcost
379
+ if bestcost is not None and cost < bestcost:
380
+ paths = []
381
+ paths.append((vertex_path, edge_path))
382
+ bestcost = cost
383
+ return paths, bestcost
384
+
385
+
386
+ @nx._dispatchable(graphs={"G1": 0, "G2": 1})
387
+ def optimize_graph_edit_distance(
388
+ G1,
389
+ G2,
390
+ node_match=None,
391
+ edge_match=None,
392
+ node_subst_cost=None,
393
+ node_del_cost=None,
394
+ node_ins_cost=None,
395
+ edge_subst_cost=None,
396
+ edge_del_cost=None,
397
+ edge_ins_cost=None,
398
+ upper_bound=None,
399
+ ):
400
+ """Returns consecutive approximations of GED (graph edit distance)
401
+ between graphs G1 and G2.
402
+
403
+ Graph edit distance is a graph similarity measure analogous to
404
+ Levenshtein distance for strings. It is defined as minimum cost
405
+ of edit path (sequence of node and edge edit operations)
406
+ transforming graph G1 to graph isomorphic to G2.
407
+
408
+ Parameters
409
+ ----------
410
+ G1, G2: graphs
411
+ The two graphs G1 and G2 must be of the same type.
412
+
413
+ node_match : callable
414
+ A function that returns True if node n1 in G1 and n2 in G2
415
+ should be considered equal during matching.
416
+
417
+ The function will be called like
418
+
419
+ node_match(G1.nodes[n1], G2.nodes[n2]).
420
+
421
+ That is, the function will receive the node attribute
422
+ dictionaries for n1 and n2 as inputs.
423
+
424
+ Ignored if node_subst_cost is specified. If neither
425
+ node_match nor node_subst_cost are specified then node
426
+ attributes are not considered.
427
+
428
+ edge_match : callable
429
+ A function that returns True if the edge attribute dictionaries
430
+ for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
431
+ be considered equal during matching.
432
+
433
+ The function will be called like
434
+
435
+ edge_match(G1[u1][v1], G2[u2][v2]).
436
+
437
+ That is, the function will receive the edge attribute
438
+ dictionaries of the edges under consideration.
439
+
440
+ Ignored if edge_subst_cost is specified. If neither
441
+ edge_match nor edge_subst_cost are specified then edge
442
+ attributes are not considered.
443
+
444
+ node_subst_cost, node_del_cost, node_ins_cost : callable
445
+ Functions that return the costs of node substitution, node
446
+ deletion, and node insertion, respectively.
447
+
448
+ The functions will be called like
449
+
450
+ node_subst_cost(G1.nodes[n1], G2.nodes[n2]),
451
+ node_del_cost(G1.nodes[n1]),
452
+ node_ins_cost(G2.nodes[n2]).
453
+
454
+ That is, the functions will receive the node attribute
455
+ dictionaries as inputs. The functions are expected to return
456
+ positive numeric values.
457
+
458
+ Function node_subst_cost overrides node_match if specified.
459
+ If neither node_match nor node_subst_cost are specified then
460
+ default node substitution cost of 0 is used (node attributes
461
+ are not considered during matching).
462
+
463
+ If node_del_cost is not specified then default node deletion
464
+ cost of 1 is used. If node_ins_cost is not specified then
465
+ default node insertion cost of 1 is used.
466
+
467
+ edge_subst_cost, edge_del_cost, edge_ins_cost : callable
468
+ Functions that return the costs of edge substitution, edge
469
+ deletion, and edge insertion, respectively.
470
+
471
+ The functions will be called like
472
+
473
+ edge_subst_cost(G1[u1][v1], G2[u2][v2]),
474
+ edge_del_cost(G1[u1][v1]),
475
+ edge_ins_cost(G2[u2][v2]).
476
+
477
+ That is, the functions will receive the edge attribute
478
+ dictionaries as inputs. The functions are expected to return
479
+ positive numeric values.
480
+
481
+ Function edge_subst_cost overrides edge_match if specified.
482
+ If neither edge_match nor edge_subst_cost are specified then
483
+ default edge substitution cost of 0 is used (edge attributes
484
+ are not considered during matching).
485
+
486
+ If edge_del_cost is not specified then default edge deletion
487
+ cost of 1 is used. If edge_ins_cost is not specified then
488
+ default edge insertion cost of 1 is used.
489
+
490
+ upper_bound : numeric
491
+ Maximum edit distance to consider.
492
+
493
+ Returns
494
+ -------
495
+ Generator of consecutive approximations of graph edit distance.
496
+
497
+ Examples
498
+ --------
499
+ >>> G1 = nx.cycle_graph(6)
500
+ >>> G2 = nx.wheel_graph(7)
501
+ >>> for v in nx.optimize_graph_edit_distance(G1, G2):
502
+ ... minv = v
503
+ >>> minv
504
+ 7.0
505
+
506
+ See Also
507
+ --------
508
+ graph_edit_distance, optimize_edit_paths
509
+
510
+ References
511
+ ----------
512
+ .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick
513
+ Martineau. An Exact Graph Edit Distance Algorithm for Solving
514
+ Pattern Recognition Problems. 4th International Conference on
515
+ Pattern Recognition Applications and Methods 2015, Jan 2015,
516
+ Lisbon, Portugal. 2015,
517
+ <10.5220/0005209202710278>. <hal-01168816>
518
+ https://hal.archives-ouvertes.fr/hal-01168816
519
+ """
520
+ for _, _, cost in optimize_edit_paths(
521
+ G1,
522
+ G2,
523
+ node_match,
524
+ edge_match,
525
+ node_subst_cost,
526
+ node_del_cost,
527
+ node_ins_cost,
528
+ edge_subst_cost,
529
+ edge_del_cost,
530
+ edge_ins_cost,
531
+ upper_bound,
532
+ True,
533
+ ):
534
+ yield cost
535
+
536
+
537
+ @nx._dispatchable(
538
+ graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True
539
+ )
540
+ def optimize_edit_paths(
541
+ G1,
542
+ G2,
543
+ node_match=None,
544
+ edge_match=None,
545
+ node_subst_cost=None,
546
+ node_del_cost=None,
547
+ node_ins_cost=None,
548
+ edge_subst_cost=None,
549
+ edge_del_cost=None,
550
+ edge_ins_cost=None,
551
+ upper_bound=None,
552
+ strictly_decreasing=True,
553
+ roots=None,
554
+ timeout=None,
555
+ ):
556
+ """GED (graph edit distance) calculation: advanced interface.
557
+
558
+ Graph edit path is a sequence of node and edge edit operations
559
+ transforming graph G1 to graph isomorphic to G2. Edit operations
560
+ include substitutions, deletions, and insertions.
561
+
562
+ Graph edit distance is defined as minimum cost of edit path.
563
+
564
+ Parameters
565
+ ----------
566
+ G1, G2: graphs
567
+ The two graphs G1 and G2 must be of the same type.
568
+
569
+ node_match : callable
570
+ A function that returns True if node n1 in G1 and n2 in G2
571
+ should be considered equal during matching.
572
+
573
+ The function will be called like
574
+
575
+ node_match(G1.nodes[n1], G2.nodes[n2]).
576
+
577
+ That is, the function will receive the node attribute
578
+ dictionaries for n1 and n2 as inputs.
579
+
580
+ Ignored if node_subst_cost is specified. If neither
581
+ node_match nor node_subst_cost are specified then node
582
+ attributes are not considered.
583
+
584
+ edge_match : callable
585
+ A function that returns True if the edge attribute dictionaries
586
+ for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
587
+ be considered equal during matching.
588
+
589
+ The function will be called like
590
+
591
+ edge_match(G1[u1][v1], G2[u2][v2]).
592
+
593
+ That is, the function will receive the edge attribute
594
+ dictionaries of the edges under consideration.
595
+
596
+ Ignored if edge_subst_cost is specified. If neither
597
+ edge_match nor edge_subst_cost are specified then edge
598
+ attributes are not considered.
599
+
600
+ node_subst_cost, node_del_cost, node_ins_cost : callable
601
+ Functions that return the costs of node substitution, node
602
+ deletion, and node insertion, respectively.
603
+
604
+ The functions will be called like
605
+
606
+ node_subst_cost(G1.nodes[n1], G2.nodes[n2]),
607
+ node_del_cost(G1.nodes[n1]),
608
+ node_ins_cost(G2.nodes[n2]).
609
+
610
+ That is, the functions will receive the node attribute
611
+ dictionaries as inputs. The functions are expected to return
612
+ positive numeric values.
613
+
614
+ Function node_subst_cost overrides node_match if specified.
615
+ If neither node_match nor node_subst_cost are specified then
616
+ default node substitution cost of 0 is used (node attributes
617
+ are not considered during matching).
618
+
619
+ If node_del_cost is not specified then default node deletion
620
+ cost of 1 is used. If node_ins_cost is not specified then
621
+ default node insertion cost of 1 is used.
622
+
623
+ edge_subst_cost, edge_del_cost, edge_ins_cost : callable
624
+ Functions that return the costs of edge substitution, edge
625
+ deletion, and edge insertion, respectively.
626
+
627
+ The functions will be called like
628
+
629
+ edge_subst_cost(G1[u1][v1], G2[u2][v2]),
630
+ edge_del_cost(G1[u1][v1]),
631
+ edge_ins_cost(G2[u2][v2]).
632
+
633
+ That is, the functions will receive the edge attribute
634
+ dictionaries as inputs. The functions are expected to return
635
+ positive numeric values.
636
+
637
+ Function edge_subst_cost overrides edge_match if specified.
638
+ If neither edge_match nor edge_subst_cost are specified then
639
+ default edge substitution cost of 0 is used (edge attributes
640
+ are not considered during matching).
641
+
642
+ If edge_del_cost is not specified then default edge deletion
643
+ cost of 1 is used. If edge_ins_cost is not specified then
644
+ default edge insertion cost of 1 is used.
645
+
646
+ upper_bound : numeric
647
+ Maximum edit distance to consider.
648
+
649
+ strictly_decreasing : bool
650
+ If True, return consecutive approximations of strictly
651
+ decreasing cost. Otherwise, return all edit paths of cost
652
+ less than or equal to the previous minimum cost.
653
+
654
+ roots : 2-tuple
655
+ Tuple where first element is a node in G1 and the second
656
+ is a node in G2.
657
+ These nodes are forced to be matched in the comparison to
658
+ allow comparison between rooted graphs.
659
+
660
+ timeout : numeric
661
+ Maximum number of seconds to execute.
662
+ After timeout is met, the current best GED is returned.
663
+
664
+ Returns
665
+ -------
666
+ Generator of tuples (node_edit_path, edge_edit_path, cost)
667
+ node_edit_path : list of tuples (u, v)
668
+ edge_edit_path : list of tuples ((u1, v1), (u2, v2))
669
+ cost : numeric
670
+
671
+ See Also
672
+ --------
673
+ graph_edit_distance, optimize_graph_edit_distance, optimal_edit_paths
674
+
675
+ References
676
+ ----------
677
+ .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick
678
+ Martineau. An Exact Graph Edit Distance Algorithm for Solving
679
+ Pattern Recognition Problems. 4th International Conference on
680
+ Pattern Recognition Applications and Methods 2015, Jan 2015,
681
+ Lisbon, Portugal. 2015,
682
+ <10.5220/0005209202710278>. <hal-01168816>
683
+ https://hal.archives-ouvertes.fr/hal-01168816
684
+
685
+ """
686
+ # TODO: support DiGraph
687
+
688
+ import numpy as np
689
+ import scipy as sp
690
+
691
+ @dataclass
692
+ class CostMatrix:
693
+ C: ...
694
+ lsa_row_ind: ...
695
+ lsa_col_ind: ...
696
+ ls: ...
697
+
698
+ def make_CostMatrix(C, m, n):
699
+ # assert(C.shape == (m + n, m + n))
700
+ lsa_row_ind, lsa_col_ind = sp.optimize.linear_sum_assignment(C)
701
+
702
+ # Fixup dummy assignments:
703
+ # each substitution i<->j should have dummy assignment m+j<->n+i
704
+ # NOTE: fast reduce of Cv relies on it
705
+ # assert len(lsa_row_ind) == len(lsa_col_ind)
706
+ indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind)
707
+ subst_ind = [k for k, i, j in indexes if i < m and j < n]
708
+ indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind)
709
+ dummy_ind = [k for k, i, j in indexes if i >= m and j >= n]
710
+ # assert len(subst_ind) == len(dummy_ind)
711
+ lsa_row_ind[dummy_ind] = lsa_col_ind[subst_ind] + m
712
+ lsa_col_ind[dummy_ind] = lsa_row_ind[subst_ind] + n
713
+
714
+ return CostMatrix(
715
+ C, lsa_row_ind, lsa_col_ind, C[lsa_row_ind, lsa_col_ind].sum()
716
+ )
717
+
718
+ def extract_C(C, i, j, m, n):
719
+ # assert(C.shape == (m + n, m + n))
720
+ row_ind = [k in i or k - m in j for k in range(m + n)]
721
+ col_ind = [k in j or k - n in i for k in range(m + n)]
722
+ return C[row_ind, :][:, col_ind]
723
+
724
+ def reduce_C(C, i, j, m, n):
725
+ # assert(C.shape == (m + n, m + n))
726
+ row_ind = [k not in i and k - m not in j for k in range(m + n)]
727
+ col_ind = [k not in j and k - n not in i for k in range(m + n)]
728
+ return C[row_ind, :][:, col_ind]
729
+
730
+ def reduce_ind(ind, i):
731
+ # assert set(ind) == set(range(len(ind)))
732
+ rind = ind[[k not in i for k in ind]]
733
+ for k in set(i):
734
+ rind[rind >= k] -= 1
735
+ return rind
736
+
737
+ def match_edges(u, v, pending_g, pending_h, Ce, matched_uv=None):
738
+ """
739
+ Parameters:
740
+ u, v: matched vertices, u=None or v=None for
741
+ deletion/insertion
742
+ pending_g, pending_h: lists of edges not yet mapped
743
+ Ce: CostMatrix of pending edge mappings
744
+ matched_uv: partial vertex edit path
745
+ list of tuples (u, v) of previously matched vertex
746
+ mappings u<->v, u=None or v=None for
747
+ deletion/insertion
748
+
749
+ Returns:
750
+ list of (i, j): indices of edge mappings g<->h
751
+ localCe: local CostMatrix of edge mappings
752
+ (basically submatrix of Ce at cross of rows i, cols j)
753
+ """
754
+ M = len(pending_g)
755
+ N = len(pending_h)
756
+ # assert Ce.C.shape == (M + N, M + N)
757
+
758
+ # only attempt to match edges after one node match has been made
759
+ # this will stop self-edges on the first node being automatically deleted
760
+ # even when a substitution is the better option
761
+ if matched_uv is None or len(matched_uv) == 0:
762
+ g_ind = []
763
+ h_ind = []
764
+ else:
765
+ g_ind = [
766
+ i
767
+ for i in range(M)
768
+ if pending_g[i][:2] == (u, u)
769
+ or any(
770
+ pending_g[i][:2] in ((p, u), (u, p), (p, p)) for p, q in matched_uv
771
+ )
772
+ ]
773
+ h_ind = [
774
+ j
775
+ for j in range(N)
776
+ if pending_h[j][:2] == (v, v)
777
+ or any(
778
+ pending_h[j][:2] in ((q, v), (v, q), (q, q)) for p, q in matched_uv
779
+ )
780
+ ]
781
+
782
+ m = len(g_ind)
783
+ n = len(h_ind)
784
+
785
+ if m or n:
786
+ C = extract_C(Ce.C, g_ind, h_ind, M, N)
787
+ # assert C.shape == (m + n, m + n)
788
+
789
+ # Forbid structurally invalid matches
790
+ # NOTE: inf remembered from Ce construction
791
+ for k, i in enumerate(g_ind):
792
+ g = pending_g[i][:2]
793
+ for l, j in enumerate(h_ind):
794
+ h = pending_h[j][:2]
795
+ if nx.is_directed(G1) or nx.is_directed(G2):
796
+ if any(
797
+ g == (p, u) and h == (q, v) or g == (u, p) and h == (v, q)
798
+ for p, q in matched_uv
799
+ ):
800
+ continue
801
+ else:
802
+ if any(
803
+ g in ((p, u), (u, p)) and h in ((q, v), (v, q))
804
+ for p, q in matched_uv
805
+ ):
806
+ continue
807
+ if g == (u, u) or any(g == (p, p) for p, q in matched_uv):
808
+ continue
809
+ if h == (v, v) or any(h == (q, q) for p, q in matched_uv):
810
+ continue
811
+ C[k, l] = inf
812
+
813
+ localCe = make_CostMatrix(C, m, n)
814
+ ij = [
815
+ (
816
+ g_ind[k] if k < m else M + h_ind[l],
817
+ h_ind[l] if l < n else N + g_ind[k],
818
+ )
819
+ for k, l in zip(localCe.lsa_row_ind, localCe.lsa_col_ind)
820
+ if k < m or l < n
821
+ ]
822
+
823
+ else:
824
+ ij = []
825
+ localCe = CostMatrix(np.empty((0, 0)), [], [], 0)
826
+
827
+ return ij, localCe
828
+
829
+ def reduce_Ce(Ce, ij, m, n):
830
+ if len(ij):
831
+ i, j = zip(*ij)
832
+ m_i = m - sum(1 for t in i if t < m)
833
+ n_j = n - sum(1 for t in j if t < n)
834
+ return make_CostMatrix(reduce_C(Ce.C, i, j, m, n), m_i, n_j)
835
+ return Ce
836
+
837
+ def get_edit_ops(
838
+ matched_uv, pending_u, pending_v, Cv, pending_g, pending_h, Ce, matched_cost
839
+ ):
840
+ """
841
+ Parameters:
842
+ matched_uv: partial vertex edit path
843
+ list of tuples (u, v) of vertex mappings u<->v,
844
+ u=None or v=None for deletion/insertion
845
+ pending_u, pending_v: lists of vertices not yet mapped
846
+ Cv: CostMatrix of pending vertex mappings
847
+ pending_g, pending_h: lists of edges not yet mapped
848
+ Ce: CostMatrix of pending edge mappings
849
+ matched_cost: cost of partial edit path
850
+
851
+ Returns:
852
+ sequence of
853
+ (i, j): indices of vertex mapping u<->v
854
+ Cv_ij: reduced CostMatrix of pending vertex mappings
855
+ (basically Cv with row i, col j removed)
856
+ list of (x, y): indices of edge mappings g<->h
857
+ Ce_xy: reduced CostMatrix of pending edge mappings
858
+ (basically Ce with rows x, cols y removed)
859
+ cost: total cost of edit operation
860
+ NOTE: most promising ops first
861
+ """
862
+ m = len(pending_u)
863
+ n = len(pending_v)
864
+ # assert Cv.C.shape == (m + n, m + n)
865
+
866
+ # 1) a vertex mapping from optimal linear sum assignment
867
+ i, j = min(
868
+ (k, l) for k, l in zip(Cv.lsa_row_ind, Cv.lsa_col_ind) if k < m or l < n
869
+ )
870
+ xy, localCe = match_edges(
871
+ pending_u[i] if i < m else None,
872
+ pending_v[j] if j < n else None,
873
+ pending_g,
874
+ pending_h,
875
+ Ce,
876
+ matched_uv,
877
+ )
878
+ Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h))
879
+ # assert Ce.ls <= localCe.ls + Ce_xy.ls
880
+ if prune(matched_cost + Cv.ls + localCe.ls + Ce_xy.ls):
881
+ pass
882
+ else:
883
+ # get reduced Cv efficiently
884
+ Cv_ij = CostMatrix(
885
+ reduce_C(Cv.C, (i,), (j,), m, n),
886
+ reduce_ind(Cv.lsa_row_ind, (i, m + j)),
887
+ reduce_ind(Cv.lsa_col_ind, (j, n + i)),
888
+ Cv.ls - Cv.C[i, j],
889
+ )
890
+ yield (i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls
891
+
892
+ # 2) other candidates, sorted by lower-bound cost estimate
893
+ other = []
894
+ fixed_i, fixed_j = i, j
895
+ if m <= n:
896
+ candidates = (
897
+ (t, fixed_j)
898
+ for t in range(m + n)
899
+ if t != fixed_i and (t < m or t == m + fixed_j)
900
+ )
901
+ else:
902
+ candidates = (
903
+ (fixed_i, t)
904
+ for t in range(m + n)
905
+ if t != fixed_j and (t < n or t == n + fixed_i)
906
+ )
907
+ for i, j in candidates:
908
+ if prune(matched_cost + Cv.C[i, j] + Ce.ls):
909
+ continue
910
+ Cv_ij = make_CostMatrix(
911
+ reduce_C(Cv.C, (i,), (j,), m, n),
912
+ m - 1 if i < m else m,
913
+ n - 1 if j < n else n,
914
+ )
915
+ # assert Cv.ls <= Cv.C[i, j] + Cv_ij.ls
916
+ if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + Ce.ls):
917
+ continue
918
+ xy, localCe = match_edges(
919
+ pending_u[i] if i < m else None,
920
+ pending_v[j] if j < n else None,
921
+ pending_g,
922
+ pending_h,
923
+ Ce,
924
+ matched_uv,
925
+ )
926
+ if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls):
927
+ continue
928
+ Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h))
929
+ # assert Ce.ls <= localCe.ls + Ce_xy.ls
930
+ if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls + Ce_xy.ls):
931
+ continue
932
+ other.append(((i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls))
933
+
934
+ yield from sorted(other, key=lambda t: t[4] + t[1].ls + t[3].ls)
935
+
936
+ def get_edit_paths(
937
+ matched_uv,
938
+ pending_u,
939
+ pending_v,
940
+ Cv,
941
+ matched_gh,
942
+ pending_g,
943
+ pending_h,
944
+ Ce,
945
+ matched_cost,
946
+ ):
947
+ """
948
+ Parameters:
949
+ matched_uv: partial vertex edit path
950
+ list of tuples (u, v) of vertex mappings u<->v,
951
+ u=None or v=None for deletion/insertion
952
+ pending_u, pending_v: lists of vertices not yet mapped
953
+ Cv: CostMatrix of pending vertex mappings
954
+ matched_gh: partial edge edit path
955
+ list of tuples (g, h) of edge mappings g<->h,
956
+ g=None or h=None for deletion/insertion
957
+ pending_g, pending_h: lists of edges not yet mapped
958
+ Ce: CostMatrix of pending edge mappings
959
+ matched_cost: cost of partial edit path
960
+
961
+ Returns:
962
+ sequence of (vertex_path, edge_path, cost)
963
+ vertex_path: complete vertex edit path
964
+ list of tuples (u, v) of vertex mappings u<->v,
965
+ u=None or v=None for deletion/insertion
966
+ edge_path: complete edge edit path
967
+ list of tuples (g, h) of edge mappings g<->h,
968
+ g=None or h=None for deletion/insertion
969
+ cost: total cost of edit path
970
+ NOTE: path costs are non-increasing
971
+ """
972
+ # debug_print('matched-uv:', matched_uv)
973
+ # debug_print('matched-gh:', matched_gh)
974
+ # debug_print('matched-cost:', matched_cost)
975
+ # debug_print('pending-u:', pending_u)
976
+ # debug_print('pending-v:', pending_v)
977
+ # debug_print(Cv.C)
978
+ # assert list(sorted(G1.nodes)) == list(sorted(list(u for u, v in matched_uv if u is not None) + pending_u))
979
+ # assert list(sorted(G2.nodes)) == list(sorted(list(v for u, v in matched_uv if v is not None) + pending_v))
980
+ # debug_print('pending-g:', pending_g)
981
+ # debug_print('pending-h:', pending_h)
982
+ # debug_print(Ce.C)
983
+ # assert list(sorted(G1.edges)) == list(sorted(list(g for g, h in matched_gh if g is not None) + pending_g))
984
+ # assert list(sorted(G2.edges)) == list(sorted(list(h for g, h in matched_gh if h is not None) + pending_h))
985
+ # debug_print()
986
+
987
+ if prune(matched_cost + Cv.ls + Ce.ls):
988
+ return
989
+
990
+ if not max(len(pending_u), len(pending_v)):
991
+ # assert not len(pending_g)
992
+ # assert not len(pending_h)
993
+ # path completed!
994
+ # assert matched_cost <= maxcost_value
995
+ nonlocal maxcost_value
996
+ maxcost_value = min(maxcost_value, matched_cost)
997
+ yield matched_uv, matched_gh, matched_cost
998
+
999
+ else:
1000
+ edit_ops = get_edit_ops(
1001
+ matched_uv,
1002
+ pending_u,
1003
+ pending_v,
1004
+ Cv,
1005
+ pending_g,
1006
+ pending_h,
1007
+ Ce,
1008
+ matched_cost,
1009
+ )
1010
+ for ij, Cv_ij, xy, Ce_xy, edit_cost in edit_ops:
1011
+ i, j = ij
1012
+ # assert Cv.C[i, j] + sum(Ce.C[t] for t in xy) == edit_cost
1013
+ if prune(matched_cost + edit_cost + Cv_ij.ls + Ce_xy.ls):
1014
+ continue
1015
+
1016
+ # dive deeper
1017
+ u = pending_u.pop(i) if i < len(pending_u) else None
1018
+ v = pending_v.pop(j) if j < len(pending_v) else None
1019
+ matched_uv.append((u, v))
1020
+ for x, y in xy:
1021
+ len_g = len(pending_g)
1022
+ len_h = len(pending_h)
1023
+ matched_gh.append(
1024
+ (
1025
+ pending_g[x] if x < len_g else None,
1026
+ pending_h[y] if y < len_h else None,
1027
+ )
1028
+ )
1029
+ sortedx = sorted(x for x, y in xy)
1030
+ sortedy = sorted(y for x, y in xy)
1031
+ G = [
1032
+ (pending_g.pop(x) if x < len(pending_g) else None)
1033
+ for x in reversed(sortedx)
1034
+ ]
1035
+ H = [
1036
+ (pending_h.pop(y) if y < len(pending_h) else None)
1037
+ for y in reversed(sortedy)
1038
+ ]
1039
+
1040
+ yield from get_edit_paths(
1041
+ matched_uv,
1042
+ pending_u,
1043
+ pending_v,
1044
+ Cv_ij,
1045
+ matched_gh,
1046
+ pending_g,
1047
+ pending_h,
1048
+ Ce_xy,
1049
+ matched_cost + edit_cost,
1050
+ )
1051
+
1052
+ # backtrack
1053
+ if u is not None:
1054
+ pending_u.insert(i, u)
1055
+ if v is not None:
1056
+ pending_v.insert(j, v)
1057
+ matched_uv.pop()
1058
+ for x, g in zip(sortedx, reversed(G)):
1059
+ if g is not None:
1060
+ pending_g.insert(x, g)
1061
+ for y, h in zip(sortedy, reversed(H)):
1062
+ if h is not None:
1063
+ pending_h.insert(y, h)
1064
+ for _ in xy:
1065
+ matched_gh.pop()
1066
+
1067
+ # Initialization
1068
+
1069
+ pending_u = list(G1.nodes)
1070
+ pending_v = list(G2.nodes)
1071
+
1072
+ initial_cost = 0
1073
+ if roots:
1074
+ root_u, root_v = roots
1075
+ if root_u not in pending_u or root_v not in pending_v:
1076
+ raise nx.NodeNotFound("Root node not in graph.")
1077
+
1078
+ # remove roots from pending
1079
+ pending_u.remove(root_u)
1080
+ pending_v.remove(root_v)
1081
+
1082
+ # cost matrix of vertex mappings
1083
+ m = len(pending_u)
1084
+ n = len(pending_v)
1085
+ C = np.zeros((m + n, m + n))
1086
+ if node_subst_cost:
1087
+ C[0:m, 0:n] = np.array(
1088
+ [
1089
+ node_subst_cost(G1.nodes[u], G2.nodes[v])
1090
+ for u in pending_u
1091
+ for v in pending_v
1092
+ ]
1093
+ ).reshape(m, n)
1094
+ if roots:
1095
+ initial_cost = node_subst_cost(G1.nodes[root_u], G2.nodes[root_v])
1096
+ elif node_match:
1097
+ C[0:m, 0:n] = np.array(
1098
+ [
1099
+ 1 - int(node_match(G1.nodes[u], G2.nodes[v]))
1100
+ for u in pending_u
1101
+ for v in pending_v
1102
+ ]
1103
+ ).reshape(m, n)
1104
+ if roots:
1105
+ initial_cost = 1 - node_match(G1.nodes[root_u], G2.nodes[root_v])
1106
+ else:
1107
+ # all zeroes
1108
+ pass
1109
+ # assert not min(m, n) or C[0:m, 0:n].min() >= 0
1110
+ if node_del_cost:
1111
+ del_costs = [node_del_cost(G1.nodes[u]) for u in pending_u]
1112
+ else:
1113
+ del_costs = [1] * len(pending_u)
1114
+ # assert not m or min(del_costs) >= 0
1115
+ if node_ins_cost:
1116
+ ins_costs = [node_ins_cost(G2.nodes[v]) for v in pending_v]
1117
+ else:
1118
+ ins_costs = [1] * len(pending_v)
1119
+ # assert not n or min(ins_costs) >= 0
1120
+ inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1
1121
+ C[0:m, n : n + m] = np.array(
1122
+ [del_costs[i] if i == j else inf for i in range(m) for j in range(m)]
1123
+ ).reshape(m, m)
1124
+ C[m : m + n, 0:n] = np.array(
1125
+ [ins_costs[i] if i == j else inf for i in range(n) for j in range(n)]
1126
+ ).reshape(n, n)
1127
+ Cv = make_CostMatrix(C, m, n)
1128
+ # debug_print(f"Cv: {m} x {n}")
1129
+ # debug_print(Cv.C)
1130
+
1131
+ pending_g = list(G1.edges)
1132
+ pending_h = list(G2.edges)
1133
+
1134
+ # cost matrix of edge mappings
1135
+ m = len(pending_g)
1136
+ n = len(pending_h)
1137
+ C = np.zeros((m + n, m + n))
1138
+ if edge_subst_cost:
1139
+ C[0:m, 0:n] = np.array(
1140
+ [
1141
+ edge_subst_cost(G1.edges[g], G2.edges[h])
1142
+ for g in pending_g
1143
+ for h in pending_h
1144
+ ]
1145
+ ).reshape(m, n)
1146
+ elif edge_match:
1147
+ C[0:m, 0:n] = np.array(
1148
+ [
1149
+ 1 - int(edge_match(G1.edges[g], G2.edges[h]))
1150
+ for g in pending_g
1151
+ for h in pending_h
1152
+ ]
1153
+ ).reshape(m, n)
1154
+ else:
1155
+ # all zeroes
1156
+ pass
1157
+ # assert not min(m, n) or C[0:m, 0:n].min() >= 0
1158
+ if edge_del_cost:
1159
+ del_costs = [edge_del_cost(G1.edges[g]) for g in pending_g]
1160
+ else:
1161
+ del_costs = [1] * len(pending_g)
1162
+ # assert not m or min(del_costs) >= 0
1163
+ if edge_ins_cost:
1164
+ ins_costs = [edge_ins_cost(G2.edges[h]) for h in pending_h]
1165
+ else:
1166
+ ins_costs = [1] * len(pending_h)
1167
+ # assert not n or min(ins_costs) >= 0
1168
+ inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1
1169
+ C[0:m, n : n + m] = np.array(
1170
+ [del_costs[i] if i == j else inf for i in range(m) for j in range(m)]
1171
+ ).reshape(m, m)
1172
+ C[m : m + n, 0:n] = np.array(
1173
+ [ins_costs[i] if i == j else inf for i in range(n) for j in range(n)]
1174
+ ).reshape(n, n)
1175
+ Ce = make_CostMatrix(C, m, n)
1176
+ # debug_print(f'Ce: {m} x {n}')
1177
+ # debug_print(Ce.C)
1178
+ # debug_print()
1179
+
1180
+ maxcost_value = Cv.C.sum() + Ce.C.sum() + 1
1181
+
1182
+ if timeout is not None:
1183
+ if timeout <= 0:
1184
+ raise nx.NetworkXError("Timeout value must be greater than 0")
1185
+ start = time.perf_counter()
1186
+
1187
+ def prune(cost):
1188
+ if timeout is not None:
1189
+ if time.perf_counter() - start > timeout:
1190
+ return True
1191
+ if upper_bound is not None:
1192
+ if cost > upper_bound:
1193
+ return True
1194
+ if cost > maxcost_value:
1195
+ return True
1196
+ if strictly_decreasing and cost >= maxcost_value:
1197
+ return True
1198
+ return False
1199
+
1200
+ # Now go!
1201
+
1202
+ done_uv = [] if roots is None else [roots]
1203
+
1204
+ for vertex_path, edge_path, cost in get_edit_paths(
1205
+ done_uv, pending_u, pending_v, Cv, [], pending_g, pending_h, Ce, initial_cost
1206
+ ):
1207
+ # assert sorted(G1.nodes) == sorted(u for u, v in vertex_path if u is not None)
1208
+ # assert sorted(G2.nodes) == sorted(v for u, v in vertex_path if v is not None)
1209
+ # assert sorted(G1.edges) == sorted(g for g, h in edge_path if g is not None)
1210
+ # assert sorted(G2.edges) == sorted(h for g, h in edge_path if h is not None)
1211
+ # print(vertex_path, edge_path, cost, file = sys.stderr)
1212
+ # assert cost == maxcost_value
1213
+ yield list(vertex_path), list(edge_path), float(cost)
1214
+
1215
+
1216
+ @nx._dispatchable
1217
+ def simrank_similarity(
1218
+ G,
1219
+ source=None,
1220
+ target=None,
1221
+ importance_factor=0.9,
1222
+ max_iterations=1000,
1223
+ tolerance=1e-4,
1224
+ ):
1225
+ """Returns the SimRank similarity of nodes in the graph ``G``.
1226
+
1227
+ SimRank is a similarity metric that says "two objects are considered
1228
+ to be similar if they are referenced by similar objects." [1]_.
1229
+
1230
+ The pseudo-code definition from the paper is::
1231
+
1232
+ def simrank(G, u, v):
1233
+ in_neighbors_u = G.predecessors(u)
1234
+ in_neighbors_v = G.predecessors(v)
1235
+ scale = C / (len(in_neighbors_u) * len(in_neighbors_v))
1236
+ return scale * sum(
1237
+ simrank(G, w, x) for w, x in product(in_neighbors_u, in_neighbors_v)
1238
+ )
1239
+
1240
+ where ``G`` is the graph, ``u`` is the source, ``v`` is the target,
1241
+ and ``C`` is a float decay or importance factor between 0 and 1.
1242
+
1243
+ The SimRank algorithm for determining node similarity is defined in
1244
+ [2]_.
1245
+
1246
+ Parameters
1247
+ ----------
1248
+ G : NetworkX graph
1249
+ A NetworkX graph
1250
+
1251
+ source : node
1252
+ If this is specified, the returned dictionary maps each node
1253
+ ``v`` in the graph to the similarity between ``source`` and
1254
+ ``v``.
1255
+
1256
+ target : node
1257
+ If both ``source`` and ``target`` are specified, the similarity
1258
+ value between ``source`` and ``target`` is returned. If
1259
+ ``target`` is specified but ``source`` is not, this argument is
1260
+ ignored.
1261
+
1262
+ importance_factor : float
1263
+ The relative importance of indirect neighbors with respect to
1264
+ direct neighbors.
1265
+
1266
+ max_iterations : integer
1267
+ Maximum number of iterations.
1268
+
1269
+ tolerance : float
1270
+ Error tolerance used to check convergence. When an iteration of
1271
+ the algorithm finds that no similarity value changes more than
1272
+ this amount, the algorithm halts.
1273
+
1274
+ Returns
1275
+ -------
1276
+ similarity : dictionary or float
1277
+ If ``source`` and ``target`` are both ``None``, this returns a
1278
+ dictionary of dictionaries, where keys are node pairs and value
1279
+ are similarity of the pair of nodes.
1280
+
1281
+ If ``source`` is not ``None`` but ``target`` is, this returns a
1282
+ dictionary mapping node to the similarity of ``source`` and that
1283
+ node.
1284
+
1285
+ If neither ``source`` nor ``target`` is ``None``, this returns
1286
+ the similarity value for the given pair of nodes.
1287
+
1288
+ Raises
1289
+ ------
1290
+ ExceededMaxIterations
1291
+ If the algorithm does not converge within ``max_iterations``.
1292
+
1293
+ NodeNotFound
1294
+ If either ``source`` or ``target`` is not in `G`.
1295
+
1296
+ Examples
1297
+ --------
1298
+ >>> G = nx.cycle_graph(2)
1299
+ >>> nx.simrank_similarity(G)
1300
+ {0: {0: 1.0, 1: 0.0}, 1: {0: 0.0, 1: 1.0}}
1301
+ >>> nx.simrank_similarity(G, source=0)
1302
+ {0: 1.0, 1: 0.0}
1303
+ >>> nx.simrank_similarity(G, source=0, target=0)
1304
+ 1.0
1305
+
1306
+ The result of this function can be converted to a numpy array
1307
+ representing the SimRank matrix by using the node order of the
1308
+ graph to determine which row and column represent each node.
1309
+ Other ordering of nodes is also possible.
1310
+
1311
+ >>> import numpy as np
1312
+ >>> sim = nx.simrank_similarity(G)
1313
+ >>> np.array([[sim[u][v] for v in G] for u in G])
1314
+ array([[1., 0.],
1315
+ [0., 1.]])
1316
+ >>> sim_1d = nx.simrank_similarity(G, source=0)
1317
+ >>> np.array([sim[0][v] for v in G])
1318
+ array([1., 0.])
1319
+
1320
+ References
1321
+ ----------
1322
+ .. [1] https://en.wikipedia.org/wiki/SimRank
1323
+ .. [2] G. Jeh and J. Widom.
1324
+ "SimRank: a measure of structural-context similarity",
1325
+ In KDD'02: Proceedings of the Eighth ACM SIGKDD
1326
+ International Conference on Knowledge Discovery and Data Mining,
1327
+ pp. 538--543. ACM Press, 2002.
1328
+ """
1329
+ import numpy as np
1330
+
1331
+ nodelist = list(G)
1332
+ if source is not None:
1333
+ if source not in nodelist:
1334
+ raise nx.NodeNotFound(f"Source node {source} not in G")
1335
+ else:
1336
+ s_indx = nodelist.index(source)
1337
+ else:
1338
+ s_indx = None
1339
+
1340
+ if target is not None:
1341
+ if target not in nodelist:
1342
+ raise nx.NodeNotFound(f"Target node {target} not in G")
1343
+ else:
1344
+ t_indx = nodelist.index(target)
1345
+ else:
1346
+ t_indx = None
1347
+
1348
+ x = _simrank_similarity_numpy(
1349
+ G, s_indx, t_indx, importance_factor, max_iterations, tolerance
1350
+ )
1351
+
1352
+ if isinstance(x, np.ndarray):
1353
+ if x.ndim == 1:
1354
+ return dict(zip(G, x.tolist()))
1355
+ # else x.ndim == 2
1356
+ return {u: dict(zip(G, row)) for u, row in zip(G, x.tolist())}
1357
+ return float(x)
1358
+
1359
+
1360
+ def _simrank_similarity_python(
1361
+ G,
1362
+ source=None,
1363
+ target=None,
1364
+ importance_factor=0.9,
1365
+ max_iterations=1000,
1366
+ tolerance=1e-4,
1367
+ ):
1368
+ """Returns the SimRank similarity of nodes in the graph ``G``.
1369
+
1370
+ This pure Python version is provided for pedagogical purposes.
1371
+
1372
+ Examples
1373
+ --------
1374
+ >>> G = nx.cycle_graph(2)
1375
+ >>> nx.similarity._simrank_similarity_python(G)
1376
+ {0: {0: 1, 1: 0.0}, 1: {0: 0.0, 1: 1}}
1377
+ >>> nx.similarity._simrank_similarity_python(G, source=0)
1378
+ {0: 1, 1: 0.0}
1379
+ >>> nx.similarity._simrank_similarity_python(G, source=0, target=0)
1380
+ 1
1381
+ """
1382
+ # build up our similarity adjacency dictionary output
1383
+ newsim = {u: {v: 1 if u == v else 0 for v in G} for u in G}
1384
+
1385
+ # These functions compute the update to the similarity value of the nodes
1386
+ # `u` and `v` with respect to the previous similarity values.
1387
+ def avg_sim(s):
1388
+ return sum(newsim[w][x] for (w, x) in s) / len(s) if s else 0.0
1389
+
1390
+ Gadj = G.pred if G.is_directed() else G.adj
1391
+
1392
+ def sim(u, v):
1393
+ return importance_factor * avg_sim(list(product(Gadj[u], Gadj[v])))
1394
+
1395
+ for its in range(max_iterations):
1396
+ oldsim = newsim
1397
+ newsim = {u: {v: sim(u, v) if u != v else 1 for v in G} for u in G}
1398
+ is_close = all(
1399
+ all(
1400
+ abs(newsim[u][v] - old) <= tolerance * (1 + abs(old))
1401
+ for v, old in nbrs.items()
1402
+ )
1403
+ for u, nbrs in oldsim.items()
1404
+ )
1405
+ if is_close:
1406
+ break
1407
+
1408
+ if its + 1 == max_iterations:
1409
+ raise nx.ExceededMaxIterations(
1410
+ f"simrank did not converge after {max_iterations} iterations."
1411
+ )
1412
+
1413
+ if source is not None and target is not None:
1414
+ return newsim[source][target]
1415
+ if source is not None:
1416
+ return newsim[source]
1417
+ return newsim
1418
+
1419
+
1420
+ def _simrank_similarity_numpy(
1421
+ G,
1422
+ source=None,
1423
+ target=None,
1424
+ importance_factor=0.9,
1425
+ max_iterations=1000,
1426
+ tolerance=1e-4,
1427
+ ):
1428
+ """Calculate SimRank of nodes in ``G`` using matrices with ``numpy``.
1429
+
1430
+ The SimRank algorithm for determining node similarity is defined in
1431
+ [1]_.
1432
+
1433
+ Parameters
1434
+ ----------
1435
+ G : NetworkX graph
1436
+ A NetworkX graph
1437
+
1438
+ source : node
1439
+ If this is specified, the returned dictionary maps each node
1440
+ ``v`` in the graph to the similarity between ``source`` and
1441
+ ``v``.
1442
+
1443
+ target : node
1444
+ If both ``source`` and ``target`` are specified, the similarity
1445
+ value between ``source`` and ``target`` is returned. If
1446
+ ``target`` is specified but ``source`` is not, this argument is
1447
+ ignored.
1448
+
1449
+ importance_factor : float
1450
+ The relative importance of indirect neighbors with respect to
1451
+ direct neighbors.
1452
+
1453
+ max_iterations : integer
1454
+ Maximum number of iterations.
1455
+
1456
+ tolerance : float
1457
+ Error tolerance used to check convergence. When an iteration of
1458
+ the algorithm finds that no similarity value changes more than
1459
+ this amount, the algorithm halts.
1460
+
1461
+ Returns
1462
+ -------
1463
+ similarity : numpy array or float
1464
+ If ``source`` and ``target`` are both ``None``, this returns a
1465
+ 2D array containing SimRank scores of the nodes.
1466
+
1467
+ If ``source`` is not ``None`` but ``target`` is, this returns an
1468
+ 1D array containing SimRank scores of ``source`` and that
1469
+ node.
1470
+
1471
+ If neither ``source`` nor ``target`` is ``None``, this returns
1472
+ the similarity value for the given pair of nodes.
1473
+
1474
+ Examples
1475
+ --------
1476
+ >>> G = nx.cycle_graph(2)
1477
+ >>> nx.similarity._simrank_similarity_numpy(G)
1478
+ array([[1., 0.],
1479
+ [0., 1.]])
1480
+ >>> nx.similarity._simrank_similarity_numpy(G, source=0)
1481
+ array([1., 0.])
1482
+ >>> nx.similarity._simrank_similarity_numpy(G, source=0, target=0)
1483
+ 1.0
1484
+
1485
+ References
1486
+ ----------
1487
+ .. [1] G. Jeh and J. Widom.
1488
+ "SimRank: a measure of structural-context similarity",
1489
+ In KDD'02: Proceedings of the Eighth ACM SIGKDD
1490
+ International Conference on Knowledge Discovery and Data Mining,
1491
+ pp. 538--543. ACM Press, 2002.
1492
+ """
1493
+ # This algorithm follows roughly
1494
+ #
1495
+ # S = max{C * (A.T * S * A), I}
1496
+ #
1497
+ # where C is the importance factor, A is the column normalized
1498
+ # adjacency matrix, and I is the identity matrix.
1499
+ import numpy as np
1500
+
1501
+ adjacency_matrix = nx.to_numpy_array(G)
1502
+
1503
+ # column-normalize the ``adjacency_matrix``
1504
+ s = np.array(adjacency_matrix.sum(axis=0))
1505
+ s[s == 0] = 1
1506
+ adjacency_matrix /= s # adjacency_matrix.sum(axis=0)
1507
+
1508
+ newsim = np.eye(len(G), dtype=np.float64)
1509
+ for its in range(max_iterations):
1510
+ prevsim = newsim.copy()
1511
+ newsim = importance_factor * ((adjacency_matrix.T @ prevsim) @ adjacency_matrix)
1512
+ np.fill_diagonal(newsim, 1.0)
1513
+
1514
+ if np.allclose(prevsim, newsim, atol=tolerance):
1515
+ break
1516
+
1517
+ if its + 1 == max_iterations:
1518
+ raise nx.ExceededMaxIterations(
1519
+ f"simrank did not converge after {max_iterations} iterations."
1520
+ )
1521
+
1522
+ if source is not None and target is not None:
1523
+ return float(newsim[source, target])
1524
+ if source is not None:
1525
+ return newsim[source]
1526
+ return newsim
1527
+
1528
+
1529
+ @nx._dispatchable(edge_attrs="weight")
1530
+ def panther_similarity(
1531
+ G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None, weight="weight"
1532
+ ):
1533
+ r"""Returns the Panther similarity of nodes in the graph `G` to node ``v``.
1534
+
1535
+ Panther is a similarity metric that says "two objects are considered
1536
+ to be similar if they frequently appear on the same paths." [1]_.
1537
+
1538
+ Parameters
1539
+ ----------
1540
+ G : NetworkX graph
1541
+ A NetworkX graph
1542
+ source : node
1543
+ Source node for which to find the top `k` similar other nodes
1544
+ k : int (default = 5)
1545
+ The number of most similar nodes to return.
1546
+ path_length : int (default = 5)
1547
+ How long the randomly generated paths should be (``T`` in [1]_)
1548
+ c : float (default = 0.5)
1549
+ A universal positive constant used to scale the number
1550
+ of sample random paths to generate.
1551
+ delta : float (default = 0.1)
1552
+ The probability that the similarity $S$ is not an epsilon-approximation to (R, phi),
1553
+ where $R$ is the number of random paths and $\phi$ is the probability
1554
+ that an element sampled from a set $A \subseteq D$, where $D$ is the domain.
1555
+ eps : float or None (default = None)
1556
+ The error bound. Per [1]_, a good value is ``sqrt(1/|E|)``. Therefore,
1557
+ if no value is provided, the recommended computed value will be used.
1558
+ weight : string or None, optional (default="weight")
1559
+ The name of an edge attribute that holds the numerical value
1560
+ used as a weight. If None then each edge has weight 1.
1561
+
1562
+ Returns
1563
+ -------
1564
+ similarity : dictionary
1565
+ Dictionary of nodes to similarity scores (as floats). Note:
1566
+ the self-similarity (i.e., ``v``) will not be included in
1567
+ the returned dictionary. So, for ``k = 5``, a dictionary of
1568
+ top 4 nodes and their similarity scores will be returned.
1569
+
1570
+ Raises
1571
+ ------
1572
+ NetworkXUnfeasible
1573
+ If `source` is an isolated node.
1574
+
1575
+ NodeNotFound
1576
+ If `source` is not in `G`.
1577
+
1578
+ Notes
1579
+ -----
1580
+ The isolated nodes in `G` are ignored.
1581
+
1582
+ Examples
1583
+ --------
1584
+ >>> G = nx.star_graph(10)
1585
+ >>> sim = nx.panther_similarity(G, 0)
1586
+
1587
+ References
1588
+ ----------
1589
+ .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J.
1590
+ Panther: Fast top-k similarity search on large networks.
1591
+ In Proceedings of the ACM SIGKDD International Conference
1592
+ on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454).
1593
+ Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267.
1594
+ """
1595
+ import numpy as np
1596
+
1597
+ if source not in G:
1598
+ raise nx.NodeNotFound(f"Source node {source} not in G")
1599
+
1600
+ isolates = set(nx.isolates(G))
1601
+
1602
+ if source in isolates:
1603
+ raise nx.NetworkXUnfeasible(
1604
+ f"Panther similarity is not defined for the isolated source node {source}."
1605
+ )
1606
+
1607
+ G = G.subgraph([node for node in G.nodes if node not in isolates]).copy()
1608
+
1609
+ num_nodes = G.number_of_nodes()
1610
+ if num_nodes < k:
1611
+ warnings.warn(
1612
+ f"Number of nodes is {num_nodes}, but requested k is {k}. "
1613
+ "Setting k to number of nodes."
1614
+ )
1615
+ k = num_nodes
1616
+ # According to [1], they empirically determined
1617
+ # a good value for ``eps`` to be sqrt( 1 / |E| )
1618
+ if eps is None:
1619
+ eps = np.sqrt(1.0 / G.number_of_edges())
1620
+
1621
+ inv_node_map = {name: index for index, name in enumerate(G.nodes)}
1622
+ node_map = np.array(G)
1623
+
1624
+ # Calculate the sample size ``R`` for how many paths
1625
+ # to randomly generate
1626
+ t_choose_2 = math.comb(path_length, 2)
1627
+ sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta)))
1628
+ index_map = {}
1629
+ _ = list(
1630
+ generate_random_paths(
1631
+ G, sample_size, path_length=path_length, index_map=index_map, weight=weight
1632
+ )
1633
+ )
1634
+ S = np.zeros(num_nodes)
1635
+
1636
+ inv_sample_size = 1 / sample_size
1637
+
1638
+ source_paths = set(index_map[source])
1639
+
1640
+ # Calculate the path similarities
1641
+ # between ``source`` (v) and ``node`` (v_j)
1642
+ # using our inverted index mapping of
1643
+ # vertices to paths
1644
+ for node, paths in index_map.items():
1645
+ # Only consider paths where both
1646
+ # ``node`` and ``source`` are present
1647
+ common_paths = source_paths.intersection(paths)
1648
+ S[inv_node_map[node]] = len(common_paths) * inv_sample_size
1649
+
1650
+ # Retrieve top ``k`` similar
1651
+ # Note: the below performed anywhere from 4-10x faster
1652
+ # (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]``
1653
+ top_k_unsorted = np.argpartition(S, -k)[-k:]
1654
+ top_k_sorted = top_k_unsorted[np.argsort(S[top_k_unsorted])][::-1]
1655
+
1656
+ # Add back the similarity scores
1657
+ top_k_with_val = dict(
1658
+ zip(node_map[top_k_sorted].tolist(), S[top_k_sorted].tolist())
1659
+ )
1660
+
1661
+ # Remove the self-similarity
1662
+ top_k_with_val.pop(source, None)
1663
+ return top_k_with_val
1664
+
1665
+
1666
+ @np_random_state(5)
1667
+ @nx._dispatchable(edge_attrs="weight")
1668
+ def generate_random_paths(
1669
+ G, sample_size, path_length=5, index_map=None, weight="weight", seed=None
1670
+ ):
1671
+ """Randomly generate `sample_size` paths of length `path_length`.
1672
+
1673
+ Parameters
1674
+ ----------
1675
+ G : NetworkX graph
1676
+ A NetworkX graph
1677
+ sample_size : integer
1678
+ The number of paths to generate. This is ``R`` in [1]_.
1679
+ path_length : integer (default = 5)
1680
+ The maximum size of the path to randomly generate.
1681
+ This is ``T`` in [1]_. According to the paper, ``T >= 5`` is
1682
+ recommended.
1683
+ index_map : dictionary, optional
1684
+ If provided, this will be populated with the inverted
1685
+ index of nodes mapped to the set of generated random path
1686
+ indices within ``paths``.
1687
+ weight : string or None, optional (default="weight")
1688
+ The name of an edge attribute that holds the numerical value
1689
+ used as a weight. If None then each edge has weight 1.
1690
+ seed : integer, random_state, or None (default)
1691
+ Indicator of random number generation state.
1692
+ See :ref:`Randomness<randomness>`.
1693
+
1694
+ Returns
1695
+ -------
1696
+ paths : generator of lists
1697
+ Generator of `sample_size` paths each with length `path_length`.
1698
+
1699
+ Examples
1700
+ --------
1701
+ Note that the return value is the list of paths:
1702
+
1703
+ >>> G = nx.star_graph(3)
1704
+ >>> random_path = nx.generate_random_paths(G, 2)
1705
+
1706
+ By passing a dictionary into `index_map`, it will build an
1707
+ inverted index mapping of nodes to the paths in which that node is present:
1708
+
1709
+ >>> G = nx.star_graph(3)
1710
+ >>> index_map = {}
1711
+ >>> random_path = nx.generate_random_paths(G, 3, index_map=index_map)
1712
+ >>> paths_containing_node_0 = [
1713
+ ... random_path[path_idx] for path_idx in index_map.get(0, [])
1714
+ ... ]
1715
+
1716
+ References
1717
+ ----------
1718
+ .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J.
1719
+ Panther: Fast top-k similarity search on large networks.
1720
+ In Proceedings of the ACM SIGKDD International Conference
1721
+ on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454).
1722
+ Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267.
1723
+ """
1724
+ import numpy as np
1725
+
1726
+ randint_fn = (
1727
+ seed.integers if isinstance(seed, np.random.Generator) else seed.randint
1728
+ )
1729
+
1730
+ # Calculate transition probabilities between
1731
+ # every pair of vertices according to Eq. (3)
1732
+ adj_mat = nx.to_numpy_array(G, weight=weight)
1733
+ inv_row_sums = np.reciprocal(adj_mat.sum(axis=1)).reshape(-1, 1)
1734
+ transition_probabilities = adj_mat * inv_row_sums
1735
+
1736
+ node_map = list(G)
1737
+ num_nodes = G.number_of_nodes()
1738
+
1739
+ for path_index in range(sample_size):
1740
+ # Sample current vertex v = v_i uniformly at random
1741
+ node_index = randint_fn(num_nodes)
1742
+ node = node_map[node_index]
1743
+
1744
+ # Add v into p_r and add p_r into the path set
1745
+ # of v, i.e., P_v
1746
+ path = [node]
1747
+
1748
+ # Build the inverted index (P_v) of vertices to paths
1749
+ if index_map is not None:
1750
+ if node in index_map:
1751
+ index_map[node].add(path_index)
1752
+ else:
1753
+ index_map[node] = {path_index}
1754
+
1755
+ starting_index = node_index
1756
+ for _ in range(path_length):
1757
+ # Randomly sample a neighbor (v_j) according
1758
+ # to transition probabilities from ``node`` (v) to its neighbors
1759
+ nbr_index = seed.choice(
1760
+ num_nodes, p=transition_probabilities[starting_index]
1761
+ )
1762
+
1763
+ # Set current vertex (v = v_j)
1764
+ starting_index = nbr_index
1765
+
1766
+ # Add v into p_r
1767
+ nbr_node = node_map[nbr_index]
1768
+ path.append(nbr_node)
1769
+
1770
+ # Add p_r into P_v
1771
+ if index_map is not None:
1772
+ if nbr_node in index_map:
1773
+ index_map[nbr_node].add(path_index)
1774
+ else:
1775
+ index_map[nbr_node] = {path_index}
1776
+
1777
+ yield path
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/simple_paths.py ADDED
@@ -0,0 +1,937 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from heapq import heappop, heappush
2
+ from itertools import count
3
+
4
+ import networkx as nx
5
+ from networkx.algorithms.shortest_paths.weighted import _weight_function
6
+ from networkx.utils import not_implemented_for, pairwise
7
+
8
+ __all__ = [
9
+ "all_simple_paths",
10
+ "is_simple_path",
11
+ "shortest_simple_paths",
12
+ "all_simple_edge_paths",
13
+ ]
14
+
15
+
16
+ @nx._dispatchable
17
+ def is_simple_path(G, nodes):
18
+ """Returns True if and only if `nodes` form a simple path in `G`.
19
+
20
+ A *simple path* in a graph is a nonempty sequence of nodes in which
21
+ no node appears more than once in the sequence, and each adjacent
22
+ pair of nodes in the sequence is adjacent in the graph.
23
+
24
+ Parameters
25
+ ----------
26
+ G : graph
27
+ A NetworkX graph.
28
+ nodes : list
29
+ A list of one or more nodes in the graph `G`.
30
+
31
+ Returns
32
+ -------
33
+ bool
34
+ Whether the given list of nodes represents a simple path in `G`.
35
+
36
+ Notes
37
+ -----
38
+ An empty list of nodes is not a path but a list of one node is a
39
+ path. Here's an explanation why.
40
+
41
+ This function operates on *node paths*. One could also consider
42
+ *edge paths*. There is a bijection between node paths and edge
43
+ paths.
44
+
45
+ The *length of a path* is the number of edges in the path, so a list
46
+ of nodes of length *n* corresponds to a path of length *n* - 1.
47
+ Thus the smallest edge path would be a list of zero edges, the empty
48
+ path. This corresponds to a list of one node.
49
+
50
+ To convert between a node path and an edge path, you can use code
51
+ like the following::
52
+
53
+ >>> from networkx.utils import pairwise
54
+ >>> nodes = [0, 1, 2, 3]
55
+ >>> edges = list(pairwise(nodes))
56
+ >>> edges
57
+ [(0, 1), (1, 2), (2, 3)]
58
+ >>> nodes = [edges[0][0]] + [v for u, v in edges]
59
+ >>> nodes
60
+ [0, 1, 2, 3]
61
+
62
+ Examples
63
+ --------
64
+ >>> G = nx.cycle_graph(4)
65
+ >>> nx.is_simple_path(G, [2, 3, 0])
66
+ True
67
+ >>> nx.is_simple_path(G, [0, 2])
68
+ False
69
+
70
+ """
71
+ # The empty list is not a valid path. Could also return
72
+ # NetworkXPointlessConcept here.
73
+ if len(nodes) == 0:
74
+ return False
75
+
76
+ # If the list is a single node, just check that the node is actually
77
+ # in the graph.
78
+ if len(nodes) == 1:
79
+ return nodes[0] in G
80
+
81
+ # check that all nodes in the list are in the graph, if at least one
82
+ # is not in the graph, then this is not a simple path
83
+ if not all(n in G for n in nodes):
84
+ return False
85
+
86
+ # If the list contains repeated nodes, then it's not a simple path
87
+ if len(set(nodes)) != len(nodes):
88
+ return False
89
+
90
+ # Test that each adjacent pair of nodes is adjacent.
91
+ return all(v in G[u] for u, v in pairwise(nodes))
92
+
93
+
94
+ @nx._dispatchable
95
+ def all_simple_paths(G, source, target, cutoff=None):
96
+ """Generate all simple paths in the graph G from source to target.
97
+
98
+ A simple path is a path with no repeated nodes.
99
+
100
+ Parameters
101
+ ----------
102
+ G : NetworkX graph
103
+
104
+ source : node
105
+ Starting node for path
106
+
107
+ target : nodes
108
+ Single node or iterable of nodes at which to end path
109
+
110
+ cutoff : integer, optional
111
+ Depth to stop the search. Only paths of length <= cutoff are returned.
112
+
113
+ Returns
114
+ -------
115
+ path_generator: generator
116
+ A generator that produces lists of simple paths. If there are no paths
117
+ between the source and target within the given cutoff the generator
118
+ produces no output. If it is possible to traverse the same sequence of
119
+ nodes in multiple ways, namely through parallel edges, then it will be
120
+ returned multiple times (once for each viable edge combination).
121
+
122
+ Examples
123
+ --------
124
+ This iterator generates lists of nodes::
125
+
126
+ >>> G = nx.complete_graph(4)
127
+ >>> for path in nx.all_simple_paths(G, source=0, target=3):
128
+ ... print(path)
129
+ ...
130
+ [0, 1, 2, 3]
131
+ [0, 1, 3]
132
+ [0, 2, 1, 3]
133
+ [0, 2, 3]
134
+ [0, 3]
135
+
136
+ You can generate only those paths that are shorter than a certain
137
+ length by using the `cutoff` keyword argument::
138
+
139
+ >>> paths = nx.all_simple_paths(G, source=0, target=3, cutoff=2)
140
+ >>> print(list(paths))
141
+ [[0, 1, 3], [0, 2, 3], [0, 3]]
142
+
143
+ To get each path as the corresponding list of edges, you can use the
144
+ :func:`networkx.utils.pairwise` helper function::
145
+
146
+ >>> paths = nx.all_simple_paths(G, source=0, target=3)
147
+ >>> for path in map(nx.utils.pairwise, paths):
148
+ ... print(list(path))
149
+ [(0, 1), (1, 2), (2, 3)]
150
+ [(0, 1), (1, 3)]
151
+ [(0, 2), (2, 1), (1, 3)]
152
+ [(0, 2), (2, 3)]
153
+ [(0, 3)]
154
+
155
+ Pass an iterable of nodes as target to generate all paths ending in any of several nodes::
156
+
157
+ >>> G = nx.complete_graph(4)
158
+ >>> for path in nx.all_simple_paths(G, source=0, target=[3, 2]):
159
+ ... print(path)
160
+ ...
161
+ [0, 1, 2]
162
+ [0, 1, 2, 3]
163
+ [0, 1, 3]
164
+ [0, 1, 3, 2]
165
+ [0, 2]
166
+ [0, 2, 1, 3]
167
+ [0, 2, 3]
168
+ [0, 3]
169
+ [0, 3, 1, 2]
170
+ [0, 3, 2]
171
+
172
+ The singleton path from ``source`` to itself is considered a simple path and is
173
+ included in the results:
174
+
175
+ >>> G = nx.empty_graph(5)
176
+ >>> list(nx.all_simple_paths(G, source=0, target=0))
177
+ [[0]]
178
+
179
+ >>> G = nx.path_graph(3)
180
+ >>> list(nx.all_simple_paths(G, source=0, target={0, 1, 2}))
181
+ [[0], [0, 1], [0, 1, 2]]
182
+
183
+ Iterate over each path from the root nodes to the leaf nodes in a
184
+ directed acyclic graph using a functional programming approach::
185
+
186
+ >>> from itertools import chain
187
+ >>> from itertools import product
188
+ >>> from itertools import starmap
189
+ >>> from functools import partial
190
+ >>>
191
+ >>> chaini = chain.from_iterable
192
+ >>>
193
+ >>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)])
194
+ >>> roots = (v for v, d in G.in_degree() if d == 0)
195
+ >>> leaves = (v for v, d in G.out_degree() if d == 0)
196
+ >>> all_paths = partial(nx.all_simple_paths, G)
197
+ >>> list(chaini(starmap(all_paths, product(roots, leaves))))
198
+ [[0, 1, 2], [0, 3, 2]]
199
+
200
+ The same list computed using an iterative approach::
201
+
202
+ >>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)])
203
+ >>> roots = (v for v, d in G.in_degree() if d == 0)
204
+ >>> leaves = (v for v, d in G.out_degree() if d == 0)
205
+ >>> all_paths = []
206
+ >>> for root in roots:
207
+ ... for leaf in leaves:
208
+ ... paths = nx.all_simple_paths(G, root, leaf)
209
+ ... all_paths.extend(paths)
210
+ >>> all_paths
211
+ [[0, 1, 2], [0, 3, 2]]
212
+
213
+ Iterate over each path from the root nodes to the leaf nodes in a
214
+ directed acyclic graph passing all leaves together to avoid unnecessary
215
+ compute::
216
+
217
+ >>> G = nx.DiGraph([(0, 1), (2, 1), (1, 3), (1, 4)])
218
+ >>> roots = (v for v, d in G.in_degree() if d == 0)
219
+ >>> leaves = [v for v, d in G.out_degree() if d == 0]
220
+ >>> all_paths = []
221
+ >>> for root in roots:
222
+ ... paths = nx.all_simple_paths(G, root, leaves)
223
+ ... all_paths.extend(paths)
224
+ >>> all_paths
225
+ [[0, 1, 3], [0, 1, 4], [2, 1, 3], [2, 1, 4]]
226
+
227
+ If parallel edges offer multiple ways to traverse a given sequence of
228
+ nodes, this sequence of nodes will be returned multiple times:
229
+
230
+ >>> G = nx.MultiDiGraph([(0, 1), (0, 1), (1, 2)])
231
+ >>> list(nx.all_simple_paths(G, 0, 2))
232
+ [[0, 1, 2], [0, 1, 2]]
233
+
234
+ Notes
235
+ -----
236
+ This algorithm uses a modified depth-first search to generate the
237
+ paths [1]_. A single path can be found in $O(V+E)$ time but the
238
+ number of simple paths in a graph can be very large, e.g. $O(n!)$ in
239
+ the complete graph of order $n$.
240
+
241
+ This function does not check that a path exists between `source` and
242
+ `target`. For large graphs, this may result in very long runtimes.
243
+ Consider using `has_path` to check that a path exists between `source` and
244
+ `target` before calling this function on large graphs.
245
+
246
+ References
247
+ ----------
248
+ .. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms",
249
+ Addison Wesley Professional, 3rd ed., 2001.
250
+
251
+ See Also
252
+ --------
253
+ all_shortest_paths, shortest_path, has_path
254
+
255
+ """
256
+ for edge_path in all_simple_edge_paths(G, source, target, cutoff):
257
+ yield [source] + [edge[1] for edge in edge_path]
258
+
259
+
260
+ @nx._dispatchable
261
+ def all_simple_edge_paths(G, source, target, cutoff=None):
262
+ """Generate lists of edges for all simple paths in G from source to target.
263
+
264
+ A simple path is a path with no repeated nodes.
265
+
266
+ Parameters
267
+ ----------
268
+ G : NetworkX graph
269
+
270
+ source : node
271
+ Starting node for path
272
+
273
+ target : nodes
274
+ Single node or iterable of nodes at which to end path
275
+
276
+ cutoff : integer, optional
277
+ Depth to stop the search. Only paths of length <= cutoff are returned.
278
+
279
+ Returns
280
+ -------
281
+ path_generator: generator
282
+ A generator that produces lists of simple paths. If there are no paths
283
+ between the source and target within the given cutoff the generator
284
+ produces no output.
285
+ For multigraphs, the list of edges have elements of the form `(u,v,k)`.
286
+ Where `k` corresponds to the edge key.
287
+
288
+ Examples
289
+ --------
290
+
291
+ Print the simple path edges of a Graph::
292
+
293
+ >>> g = nx.Graph([(1, 2), (2, 4), (1, 3), (3, 4)])
294
+ >>> for path in sorted(nx.all_simple_edge_paths(g, 1, 4)):
295
+ ... print(path)
296
+ [(1, 2), (2, 4)]
297
+ [(1, 3), (3, 4)]
298
+
299
+ Print the simple path edges of a MultiGraph. Returned edges come with
300
+ their associated keys::
301
+
302
+ >>> mg = nx.MultiGraph()
303
+ >>> mg.add_edge(1, 2, key="k0")
304
+ 'k0'
305
+ >>> mg.add_edge(1, 2, key="k1")
306
+ 'k1'
307
+ >>> mg.add_edge(2, 3, key="k0")
308
+ 'k0'
309
+ >>> for path in sorted(nx.all_simple_edge_paths(mg, 1, 3)):
310
+ ... print(path)
311
+ [(1, 2, 'k0'), (2, 3, 'k0')]
312
+ [(1, 2, 'k1'), (2, 3, 'k0')]
313
+
314
+ When ``source`` is one of the targets, the empty path starting and ending at
315
+ ``source`` without traversing any edge is considered a valid simple edge path
316
+ and is included in the results:
317
+
318
+ >>> G = nx.Graph()
319
+ >>> G.add_node(0)
320
+ >>> paths = list(nx.all_simple_edge_paths(G, 0, 0))
321
+ >>> for path in paths:
322
+ ... print(path)
323
+ []
324
+ >>> len(paths)
325
+ 1
326
+
327
+
328
+ Notes
329
+ -----
330
+ This algorithm uses a modified depth-first search to generate the
331
+ paths [1]_. A single path can be found in $O(V+E)$ time but the
332
+ number of simple paths in a graph can be very large, e.g. $O(n!)$ in
333
+ the complete graph of order $n$.
334
+
335
+ References
336
+ ----------
337
+ .. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms",
338
+ Addison Wesley Professional, 3rd ed., 2001.
339
+
340
+ See Also
341
+ --------
342
+ all_shortest_paths, shortest_path, all_simple_paths
343
+
344
+ """
345
+ if source not in G:
346
+ raise nx.NodeNotFound(f"source node {source} not in graph")
347
+
348
+ if target in G:
349
+ targets = {target}
350
+ else:
351
+ try:
352
+ targets = set(target)
353
+ except TypeError as err:
354
+ raise nx.NodeNotFound(f"target node {target} not in graph") from err
355
+
356
+ cutoff = cutoff if cutoff is not None else len(G) - 1
357
+
358
+ if cutoff >= 0 and targets:
359
+ yield from _all_simple_edge_paths(G, source, targets, cutoff)
360
+
361
+
362
+ def _all_simple_edge_paths(G, source, targets, cutoff):
363
+ # We simulate recursion with a stack, keeping the current path being explored
364
+ # and the outgoing edge iterators at each point in the stack.
365
+ # To avoid unnecessary checks, the loop is structured in a way such that a path
366
+ # is considered for yielding only after a new node/edge is added.
367
+ # We bootstrap the search by adding a dummy iterator to the stack that only yields
368
+ # a dummy edge to source (so that the trivial path has a chance of being included).
369
+
370
+ get_edges = (
371
+ (lambda node: G.edges(node, keys=True))
372
+ if G.is_multigraph()
373
+ else (lambda node: G.edges(node))
374
+ )
375
+
376
+ # The current_path is a dictionary that maps nodes in the path to the edge that was
377
+ # used to enter that node (instead of a list of edges) because we want both a fast
378
+ # membership test for nodes in the path and the preservation of insertion order.
379
+ current_path = {None: None}
380
+ stack = [iter([(None, source)])]
381
+
382
+ while stack:
383
+ # 1. Try to extend the current path.
384
+ next_edge = next((e for e in stack[-1] if e[1] not in current_path), None)
385
+ if next_edge is None:
386
+ # All edges of the last node in the current path have been explored.
387
+ stack.pop()
388
+ current_path.popitem()
389
+ continue
390
+ previous_node, next_node, *_ = next_edge
391
+
392
+ # 2. Check if we've reached a target.
393
+ if next_node in targets:
394
+ yield (list(current_path.values()) + [next_edge])[2:] # remove dummy edge
395
+
396
+ # 3. Only expand the search through the next node if it makes sense.
397
+ if len(current_path) - 1 < cutoff and (
398
+ targets - current_path.keys() - {next_node}
399
+ ):
400
+ current_path[next_node] = next_edge
401
+ stack.append(iter(get_edges(next_node)))
402
+
403
+
404
+ @not_implemented_for("multigraph")
405
+ @nx._dispatchable(edge_attrs="weight")
406
+ def shortest_simple_paths(G, source, target, weight=None):
407
+ """Generate all simple paths in the graph G from source to target,
408
+ starting from shortest ones.
409
+
410
+ A simple path is a path with no repeated nodes.
411
+
412
+ If a weighted shortest path search is to be used, no negative weights
413
+ are allowed.
414
+
415
+ Parameters
416
+ ----------
417
+ G : NetworkX graph
418
+
419
+ source : node
420
+ Starting node for path
421
+
422
+ target : node
423
+ Ending node for path
424
+
425
+ weight : string or function
426
+ If it is a string, it is the name of the edge attribute to be
427
+ used as a weight.
428
+
429
+ If it is a function, the weight of an edge is the value returned
430
+ by the function. The function must accept exactly three positional
431
+ arguments: the two endpoints of an edge and the dictionary of edge
432
+ attributes for that edge. The function must return a number.
433
+
434
+ If None all edges are considered to have unit weight. Default
435
+ value None.
436
+
437
+ Returns
438
+ -------
439
+ path_generator: generator
440
+ A generator that produces lists of simple paths, in order from
441
+ shortest to longest.
442
+
443
+ Raises
444
+ ------
445
+ NetworkXNoPath
446
+ If no path exists between source and target.
447
+
448
+ NetworkXError
449
+ If source or target nodes are not in the input graph.
450
+
451
+ NetworkXNotImplemented
452
+ If the input graph is a Multi[Di]Graph.
453
+
454
+ Examples
455
+ --------
456
+
457
+ >>> G = nx.cycle_graph(7)
458
+ >>> paths = list(nx.shortest_simple_paths(G, 0, 3))
459
+ >>> print(paths)
460
+ [[0, 1, 2, 3], [0, 6, 5, 4, 3]]
461
+
462
+ You can use this function to efficiently compute the k shortest/best
463
+ paths between two nodes.
464
+
465
+ >>> from itertools import islice
466
+ >>> def k_shortest_paths(G, source, target, k, weight=None):
467
+ ... return list(
468
+ ... islice(nx.shortest_simple_paths(G, source, target, weight=weight), k)
469
+ ... )
470
+ >>> for path in k_shortest_paths(G, 0, 3, 2):
471
+ ... print(path)
472
+ [0, 1, 2, 3]
473
+ [0, 6, 5, 4, 3]
474
+
475
+ Notes
476
+ -----
477
+ This procedure is based on algorithm by Jin Y. Yen [1]_. Finding
478
+ the first $K$ paths requires $O(KN^3)$ operations.
479
+
480
+ See Also
481
+ --------
482
+ all_shortest_paths
483
+ shortest_path
484
+ all_simple_paths
485
+
486
+ References
487
+ ----------
488
+ .. [1] Jin Y. Yen, "Finding the K Shortest Loopless Paths in a
489
+ Network", Management Science, Vol. 17, No. 11, Theory Series
490
+ (Jul., 1971), pp. 712-716.
491
+
492
+ """
493
+ if source not in G:
494
+ raise nx.NodeNotFound(f"source node {source} not in graph")
495
+
496
+ if target not in G:
497
+ raise nx.NodeNotFound(f"target node {target} not in graph")
498
+
499
+ if weight is None:
500
+ length_func = len
501
+ shortest_path_func = _bidirectional_shortest_path
502
+ else:
503
+ wt = _weight_function(G, weight)
504
+
505
+ def length_func(path):
506
+ return sum(
507
+ wt(u, v, G.get_edge_data(u, v)) for (u, v) in zip(path, path[1:])
508
+ )
509
+
510
+ shortest_path_func = _bidirectional_dijkstra
511
+
512
+ listA = []
513
+ listB = PathBuffer()
514
+ prev_path = None
515
+ while True:
516
+ if not prev_path:
517
+ length, path = shortest_path_func(G, source, target, weight=weight)
518
+ listB.push(length, path)
519
+ else:
520
+ ignore_nodes = set()
521
+ ignore_edges = set()
522
+ for i in range(1, len(prev_path)):
523
+ root = prev_path[:i]
524
+ root_length = length_func(root)
525
+ for path in listA:
526
+ if path[:i] == root:
527
+ ignore_edges.add((path[i - 1], path[i]))
528
+ try:
529
+ length, spur = shortest_path_func(
530
+ G,
531
+ root[-1],
532
+ target,
533
+ ignore_nodes=ignore_nodes,
534
+ ignore_edges=ignore_edges,
535
+ weight=weight,
536
+ )
537
+ path = root[:-1] + spur
538
+ listB.push(root_length + length, path)
539
+ except nx.NetworkXNoPath:
540
+ pass
541
+ ignore_nodes.add(root[-1])
542
+
543
+ if listB:
544
+ path = listB.pop()
545
+ yield path
546
+ listA.append(path)
547
+ prev_path = path
548
+ else:
549
+ break
550
+
551
+
552
+ class PathBuffer:
553
+ def __init__(self):
554
+ self.paths = set()
555
+ self.sortedpaths = []
556
+ self.counter = count()
557
+
558
+ def __len__(self):
559
+ return len(self.sortedpaths)
560
+
561
+ def push(self, cost, path):
562
+ hashable_path = tuple(path)
563
+ if hashable_path not in self.paths:
564
+ heappush(self.sortedpaths, (cost, next(self.counter), path))
565
+ self.paths.add(hashable_path)
566
+
567
+ def pop(self):
568
+ (cost, num, path) = heappop(self.sortedpaths)
569
+ hashable_path = tuple(path)
570
+ self.paths.remove(hashable_path)
571
+ return path
572
+
573
+
574
+ def _bidirectional_shortest_path(
575
+ G, source, target, ignore_nodes=None, ignore_edges=None, weight=None
576
+ ):
577
+ """Returns the shortest path between source and target ignoring
578
+ nodes and edges in the containers ignore_nodes and ignore_edges.
579
+
580
+ This is a custom modification of the standard bidirectional shortest
581
+ path implementation at networkx.algorithms.unweighted
582
+
583
+ Parameters
584
+ ----------
585
+ G : NetworkX graph
586
+
587
+ source : node
588
+ starting node for path
589
+
590
+ target : node
591
+ ending node for path
592
+
593
+ ignore_nodes : container of nodes
594
+ nodes to ignore, optional
595
+
596
+ ignore_edges : container of edges
597
+ edges to ignore, optional
598
+
599
+ weight : None
600
+ This function accepts a weight argument for convenience of
601
+ shortest_simple_paths function. It will be ignored.
602
+
603
+ Returns
604
+ -------
605
+ path: list
606
+ List of nodes in a path from source to target.
607
+
608
+ Raises
609
+ ------
610
+ NetworkXNoPath
611
+ If no path exists between source and target.
612
+
613
+ See Also
614
+ --------
615
+ shortest_path
616
+
617
+ """
618
+ # call helper to do the real work
619
+ results = _bidirectional_pred_succ(G, source, target, ignore_nodes, ignore_edges)
620
+ pred, succ, w = results
621
+
622
+ # build path from pred+w+succ
623
+ path = []
624
+ # from w to target
625
+ while w is not None:
626
+ path.append(w)
627
+ w = succ[w]
628
+ # from source to w
629
+ w = pred[path[0]]
630
+ while w is not None:
631
+ path.insert(0, w)
632
+ w = pred[w]
633
+
634
+ return len(path), path
635
+
636
+
637
+ def _bidirectional_pred_succ(G, source, target, ignore_nodes=None, ignore_edges=None):
638
+ """Bidirectional shortest path helper.
639
+ Returns (pred,succ,w) where
640
+ pred is a dictionary of predecessors from w to the source, and
641
+ succ is a dictionary of successors from w to the target.
642
+ """
643
+ # does BFS from both source and target and meets in the middle
644
+ if ignore_nodes and (source in ignore_nodes or target in ignore_nodes):
645
+ raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
646
+ if target == source:
647
+ return ({target: None}, {source: None}, source)
648
+
649
+ # handle either directed or undirected
650
+ if G.is_directed():
651
+ Gpred = G.predecessors
652
+ Gsucc = G.successors
653
+ else:
654
+ Gpred = G.neighbors
655
+ Gsucc = G.neighbors
656
+
657
+ # support optional nodes filter
658
+ if ignore_nodes:
659
+
660
+ def filter_iter(nodes):
661
+ def iterate(v):
662
+ for w in nodes(v):
663
+ if w not in ignore_nodes:
664
+ yield w
665
+
666
+ return iterate
667
+
668
+ Gpred = filter_iter(Gpred)
669
+ Gsucc = filter_iter(Gsucc)
670
+
671
+ # support optional edges filter
672
+ if ignore_edges:
673
+ if G.is_directed():
674
+
675
+ def filter_pred_iter(pred_iter):
676
+ def iterate(v):
677
+ for w in pred_iter(v):
678
+ if (w, v) not in ignore_edges:
679
+ yield w
680
+
681
+ return iterate
682
+
683
+ def filter_succ_iter(succ_iter):
684
+ def iterate(v):
685
+ for w in succ_iter(v):
686
+ if (v, w) not in ignore_edges:
687
+ yield w
688
+
689
+ return iterate
690
+
691
+ Gpred = filter_pred_iter(Gpred)
692
+ Gsucc = filter_succ_iter(Gsucc)
693
+
694
+ else:
695
+
696
+ def filter_iter(nodes):
697
+ def iterate(v):
698
+ for w in nodes(v):
699
+ if (v, w) not in ignore_edges and (w, v) not in ignore_edges:
700
+ yield w
701
+
702
+ return iterate
703
+
704
+ Gpred = filter_iter(Gpred)
705
+ Gsucc = filter_iter(Gsucc)
706
+
707
+ # predecessor and successors in search
708
+ pred = {source: None}
709
+ succ = {target: None}
710
+
711
+ # initialize fringes, start with forward
712
+ forward_fringe = [source]
713
+ reverse_fringe = [target]
714
+
715
+ while forward_fringe and reverse_fringe:
716
+ if len(forward_fringe) <= len(reverse_fringe):
717
+ this_level = forward_fringe
718
+ forward_fringe = []
719
+ for v in this_level:
720
+ for w in Gsucc(v):
721
+ if w not in pred:
722
+ forward_fringe.append(w)
723
+ pred[w] = v
724
+ if w in succ:
725
+ # found path
726
+ return pred, succ, w
727
+ else:
728
+ this_level = reverse_fringe
729
+ reverse_fringe = []
730
+ for v in this_level:
731
+ for w in Gpred(v):
732
+ if w not in succ:
733
+ succ[w] = v
734
+ reverse_fringe.append(w)
735
+ if w in pred:
736
+ # found path
737
+ return pred, succ, w
738
+
739
+ raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
740
+
741
+
742
+ def _bidirectional_dijkstra(
743
+ G, source, target, weight="weight", ignore_nodes=None, ignore_edges=None
744
+ ):
745
+ """Dijkstra's algorithm for shortest paths using bidirectional search.
746
+
747
+ This function returns the shortest path between source and target
748
+ ignoring nodes and edges in the containers ignore_nodes and
749
+ ignore_edges.
750
+
751
+ This is a custom modification of the standard Dijkstra bidirectional
752
+ shortest path implementation at networkx.algorithms.weighted
753
+
754
+ Parameters
755
+ ----------
756
+ G : NetworkX graph
757
+
758
+ source : node
759
+ Starting node.
760
+
761
+ target : node
762
+ Ending node.
763
+
764
+ weight: string, function, optional (default='weight')
765
+ Edge data key or weight function corresponding to the edge weight
766
+
767
+ ignore_nodes : container of nodes
768
+ nodes to ignore, optional
769
+
770
+ ignore_edges : container of edges
771
+ edges to ignore, optional
772
+
773
+ Returns
774
+ -------
775
+ length : number
776
+ Shortest path length.
777
+
778
+ Returns a tuple of two dictionaries keyed by node.
779
+ The first dictionary stores distance from the source.
780
+ The second stores the path from the source to that node.
781
+
782
+ Raises
783
+ ------
784
+ NetworkXNoPath
785
+ If no path exists between source and target.
786
+
787
+ Notes
788
+ -----
789
+ Edge weight attributes must be numerical.
790
+ Distances are calculated as sums of weighted edges traversed.
791
+
792
+ In practice bidirectional Dijkstra is much more than twice as fast as
793
+ ordinary Dijkstra.
794
+
795
+ Ordinary Dijkstra expands nodes in a sphere-like manner from the
796
+ source. The radius of this sphere will eventually be the length
797
+ of the shortest path. Bidirectional Dijkstra will expand nodes
798
+ from both the source and the target, making two spheres of half
799
+ this radius. Volume of the first sphere is pi*r*r while the
800
+ others are 2*pi*r/2*r/2, making up half the volume.
801
+
802
+ This algorithm is not guaranteed to work if edge weights
803
+ are negative or are floating point numbers
804
+ (overflows and roundoff errors can cause problems).
805
+
806
+ See Also
807
+ --------
808
+ shortest_path
809
+ shortest_path_length
810
+ """
811
+ if ignore_nodes and (source in ignore_nodes or target in ignore_nodes):
812
+ raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
813
+ if source == target:
814
+ if source not in G:
815
+ raise nx.NodeNotFound(f"Node {source} not in graph")
816
+ return (0, [source])
817
+
818
+ # handle either directed or undirected
819
+ if G.is_directed():
820
+ Gpred = G.predecessors
821
+ Gsucc = G.successors
822
+ else:
823
+ Gpred = G.neighbors
824
+ Gsucc = G.neighbors
825
+
826
+ # support optional nodes filter
827
+ if ignore_nodes:
828
+
829
+ def filter_iter(nodes):
830
+ def iterate(v):
831
+ for w in nodes(v):
832
+ if w not in ignore_nodes:
833
+ yield w
834
+
835
+ return iterate
836
+
837
+ Gpred = filter_iter(Gpred)
838
+ Gsucc = filter_iter(Gsucc)
839
+
840
+ # support optional edges filter
841
+ if ignore_edges:
842
+ if G.is_directed():
843
+
844
+ def filter_pred_iter(pred_iter):
845
+ def iterate(v):
846
+ for w in pred_iter(v):
847
+ if (w, v) not in ignore_edges:
848
+ yield w
849
+
850
+ return iterate
851
+
852
+ def filter_succ_iter(succ_iter):
853
+ def iterate(v):
854
+ for w in succ_iter(v):
855
+ if (v, w) not in ignore_edges:
856
+ yield w
857
+
858
+ return iterate
859
+
860
+ Gpred = filter_pred_iter(Gpred)
861
+ Gsucc = filter_succ_iter(Gsucc)
862
+
863
+ else:
864
+
865
+ def filter_iter(nodes):
866
+ def iterate(v):
867
+ for w in nodes(v):
868
+ if (v, w) not in ignore_edges and (w, v) not in ignore_edges:
869
+ yield w
870
+
871
+ return iterate
872
+
873
+ Gpred = filter_iter(Gpred)
874
+ Gsucc = filter_iter(Gsucc)
875
+
876
+ push = heappush
877
+ pop = heappop
878
+ # Init: Forward Backward
879
+ dists = [{}, {}] # dictionary of final distances
880
+ paths = [{source: [source]}, {target: [target]}] # dictionary of paths
881
+ fringe = [[], []] # heap of (distance, node) tuples for
882
+ # extracting next node to expand
883
+ seen = [{source: 0}, {target: 0}] # dictionary of distances to
884
+ # nodes seen
885
+ c = count()
886
+ # initialize fringe heap
887
+ push(fringe[0], (0, next(c), source))
888
+ push(fringe[1], (0, next(c), target))
889
+ # neighs for extracting correct neighbor information
890
+ neighs = [Gsucc, Gpred]
891
+ # variables to hold shortest discovered path
892
+ # finaldist = 1e30000
893
+ finalpath = []
894
+ dir = 1
895
+ while fringe[0] and fringe[1]:
896
+ # choose direction
897
+ # dir == 0 is forward direction and dir == 1 is back
898
+ dir = 1 - dir
899
+ # extract closest to expand
900
+ (dist, _, v) = pop(fringe[dir])
901
+ if v in dists[dir]:
902
+ # Shortest path to v has already been found
903
+ continue
904
+ # update distance
905
+ dists[dir][v] = dist # equal to seen[dir][v]
906
+ if v in dists[1 - dir]:
907
+ # if we have scanned v in both directions we are done
908
+ # we have now discovered the shortest path
909
+ return (finaldist, finalpath)
910
+
911
+ wt = _weight_function(G, weight)
912
+ for w in neighs[dir](v):
913
+ if dir == 0: # forward
914
+ minweight = wt(v, w, G.get_edge_data(v, w))
915
+ vwLength = dists[dir][v] + minweight
916
+ else: # back, must remember to change v,w->w,v
917
+ minweight = wt(w, v, G.get_edge_data(w, v))
918
+ vwLength = dists[dir][v] + minweight
919
+
920
+ if w in dists[dir]:
921
+ if vwLength < dists[dir][w]:
922
+ raise ValueError("Contradictory paths found: negative weights?")
923
+ elif w not in seen[dir] or vwLength < seen[dir][w]:
924
+ # relaxing
925
+ seen[dir][w] = vwLength
926
+ push(fringe[dir], (vwLength, next(c), w))
927
+ paths[dir][w] = paths[dir][v] + [w]
928
+ if w in seen[0] and w in seen[1]:
929
+ # see if this path is better than the already
930
+ # discovered shortest path
931
+ totaldist = seen[0][w] + seen[1][w]
932
+ if finalpath == [] or finaldist > totaldist:
933
+ finaldist = totaldist
934
+ revpath = paths[1][w][:]
935
+ revpath.reverse()
936
+ finalpath = paths[0][w] + revpath[1:]
937
+ raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/structuralholes.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions for computing measures of structural holes."""
2
+
3
+ import networkx as nx
4
+
5
+ __all__ = ["constraint", "local_constraint", "effective_size"]
6
+
7
+
8
+ @nx._dispatchable(edge_attrs="weight")
9
+ def mutual_weight(G, u, v, weight=None):
10
+ """Returns the sum of the weights of the edge from `u` to `v` and
11
+ the edge from `v` to `u` in `G`.
12
+
13
+ `weight` is the edge data key that represents the edge weight. If
14
+ the specified key is `None` or is not in the edge data for an edge,
15
+ that edge is assumed to have weight 1.
16
+
17
+ Pre-conditions: `u` and `v` must both be in `G`.
18
+
19
+ """
20
+ try:
21
+ a_uv = G[u][v].get(weight, 1)
22
+ except KeyError:
23
+ a_uv = 0
24
+ try:
25
+ a_vu = G[v][u].get(weight, 1)
26
+ except KeyError:
27
+ a_vu = 0
28
+ return a_uv + a_vu
29
+
30
+
31
+ @nx._dispatchable(edge_attrs="weight")
32
+ def normalized_mutual_weight(G, u, v, norm=sum, weight=None):
33
+ """Returns normalized mutual weight of the edges from `u` to `v`
34
+ with respect to the mutual weights of the neighbors of `u` in `G`.
35
+
36
+ `norm` specifies how the normalization factor is computed. It must
37
+ be a function that takes a single argument and returns a number.
38
+ The argument will be an iterable of mutual weights
39
+ of pairs ``(u, w)``, where ``w`` ranges over each (in- and
40
+ out-)neighbor of ``u``. Commons values for `normalization` are
41
+ ``sum`` and ``max``.
42
+
43
+ `weight` can be ``None`` or a string, if None, all edge weights
44
+ are considered equal. Otherwise holds the name of the edge
45
+ attribute used as weight.
46
+
47
+ """
48
+ scale = norm(mutual_weight(G, u, w, weight) for w in set(nx.all_neighbors(G, u)))
49
+ return 0 if scale == 0 else mutual_weight(G, u, v, weight) / scale
50
+
51
+
52
+ @nx._dispatchable(edge_attrs="weight")
53
+ def effective_size(G, nodes=None, weight=None):
54
+ r"""Returns the effective size of all nodes in the graph ``G``.
55
+
56
+ The *effective size* of a node's ego network is based on the concept
57
+ of redundancy. A person's ego network has redundancy to the extent
58
+ that her contacts are connected to each other as well. The
59
+ nonredundant part of a person's relationships is the effective
60
+ size of her ego network [1]_. Formally, the effective size of a
61
+ node $u$, denoted $e(u)$, is defined by
62
+
63
+ .. math::
64
+
65
+ e(u) = \sum_{v \in N(u) \setminus \{u\}}
66
+ \left(1 - \sum_{w \in N(v)} p_{uw} m_{vw}\right)
67
+
68
+ where $N(u)$ is the set of neighbors of $u$ and $p_{uw}$ is the
69
+ normalized mutual weight of the (directed or undirected) edges
70
+ joining $u$ and $v$, for each vertex $u$ and $v$ [1]_. And $m_{vw}$
71
+ is the mutual weight of $v$ and $w$ divided by $v$ highest mutual
72
+ weight with any of its neighbors. The *mutual weight* of $u$ and $v$
73
+ is the sum of the weights of edges joining them (edge weights are
74
+ assumed to be one if the graph is unweighted).
75
+
76
+ For the case of unweighted and undirected graphs, Borgatti proposed
77
+ a simplified formula to compute effective size [2]_
78
+
79
+ .. math::
80
+
81
+ e(u) = n - \frac{2t}{n}
82
+
83
+ where `t` is the number of ties in the ego network (not including
84
+ ties to ego) and `n` is the number of nodes (excluding ego).
85
+
86
+ Parameters
87
+ ----------
88
+ G : NetworkX graph
89
+ The graph containing ``v``. Directed graphs are treated like
90
+ undirected graphs when computing neighbors of ``v``.
91
+
92
+ nodes : container, optional
93
+ Container of nodes in the graph ``G`` to compute the effective size.
94
+ If None, the effective size of every node is computed.
95
+
96
+ weight : None or string, optional
97
+ If None, all edge weights are considered equal.
98
+ Otherwise holds the name of the edge attribute used as weight.
99
+
100
+ Returns
101
+ -------
102
+ dict
103
+ Dictionary with nodes as keys and the effective size of the node as values.
104
+
105
+ Notes
106
+ -----
107
+ Burt also defined the related concept of *efficiency* of a node's ego
108
+ network, which is its effective size divided by the degree of that
109
+ node [1]_. So you can easily compute efficiency:
110
+
111
+ >>> G = nx.DiGraph()
112
+ >>> G.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)])
113
+ >>> esize = nx.effective_size(G)
114
+ >>> efficiency = {n: v / G.degree(n) for n, v in esize.items()}
115
+
116
+ See also
117
+ --------
118
+ constraint
119
+
120
+ References
121
+ ----------
122
+ .. [1] Burt, Ronald S.
123
+ *Structural Holes: The Social Structure of Competition.*
124
+ Cambridge: Harvard University Press, 1995.
125
+
126
+ .. [2] Borgatti, S.
127
+ "Structural Holes: Unpacking Burt's Redundancy Measures"
128
+ CONNECTIONS 20(1):35-38.
129
+ http://www.analytictech.com/connections/v20(1)/holes.htm
130
+
131
+ """
132
+
133
+ def redundancy(G, u, v, weight=None):
134
+ nmw = normalized_mutual_weight
135
+ r = sum(
136
+ nmw(G, u, w, weight=weight) * nmw(G, v, w, norm=max, weight=weight)
137
+ for w in set(nx.all_neighbors(G, u))
138
+ )
139
+ return 1 - r
140
+
141
+ effective_size = {}
142
+ if nodes is None:
143
+ nodes = G
144
+ # Use Borgatti's simplified formula for unweighted and undirected graphs
145
+ if not G.is_directed() and weight is None:
146
+ for v in nodes:
147
+ # Effective size is not defined for isolated nodes
148
+ if len(G[v]) == 0:
149
+ effective_size[v] = float("nan")
150
+ continue
151
+ E = nx.ego_graph(G, v, center=False, undirected=True)
152
+ effective_size[v] = len(E) - (2 * E.size()) / len(E)
153
+ else:
154
+ for v in nodes:
155
+ # Effective size is not defined for isolated nodes
156
+ if len(G[v]) == 0:
157
+ effective_size[v] = float("nan")
158
+ continue
159
+ effective_size[v] = sum(
160
+ redundancy(G, v, u, weight) for u in set(nx.all_neighbors(G, v))
161
+ )
162
+ return effective_size
163
+
164
+
165
+ @nx._dispatchable(edge_attrs="weight")
166
+ def constraint(G, nodes=None, weight=None):
167
+ r"""Returns the constraint on all nodes in the graph ``G``.
168
+
169
+ The *constraint* is a measure of the extent to which a node *v* is
170
+ invested in those nodes that are themselves invested in the
171
+ neighbors of *v*. Formally, the *constraint on v*, denoted `c(v)`,
172
+ is defined by
173
+
174
+ .. math::
175
+
176
+ c(v) = \sum_{w \in N(v) \setminus \{v\}} \ell(v, w)
177
+
178
+ where $N(v)$ is the subset of the neighbors of `v` that are either
179
+ predecessors or successors of `v` and $\ell(v, w)$ is the local
180
+ constraint on `v` with respect to `w` [1]_. For the definition of local
181
+ constraint, see :func:`local_constraint`.
182
+
183
+ Parameters
184
+ ----------
185
+ G : NetworkX graph
186
+ The graph containing ``v``. This can be either directed or undirected.
187
+
188
+ nodes : container, optional
189
+ Container of nodes in the graph ``G`` to compute the constraint. If
190
+ None, the constraint of every node is computed.
191
+
192
+ weight : None or string, optional
193
+ If None, all edge weights are considered equal.
194
+ Otherwise holds the name of the edge attribute used as weight.
195
+
196
+ Returns
197
+ -------
198
+ dict
199
+ Dictionary with nodes as keys and the constraint on the node as values.
200
+
201
+ See also
202
+ --------
203
+ local_constraint
204
+
205
+ References
206
+ ----------
207
+ .. [1] Burt, Ronald S.
208
+ "Structural holes and good ideas".
209
+ American Journal of Sociology (110): 349–399.
210
+
211
+ """
212
+ if nodes is None:
213
+ nodes = G
214
+ constraint = {}
215
+ for v in nodes:
216
+ # Constraint is not defined for isolated nodes
217
+ if len(G[v]) == 0:
218
+ constraint[v] = float("nan")
219
+ continue
220
+ constraint[v] = sum(
221
+ local_constraint(G, v, n, weight) for n in set(nx.all_neighbors(G, v))
222
+ )
223
+ return constraint
224
+
225
+
226
+ @nx._dispatchable(edge_attrs="weight")
227
+ def local_constraint(G, u, v, weight=None):
228
+ r"""Returns the local constraint on the node ``u`` with respect to
229
+ the node ``v`` in the graph ``G``.
230
+
231
+ Formally, the *local constraint on u with respect to v*, denoted
232
+ $\ell(u, v)$, is defined by
233
+
234
+ .. math::
235
+
236
+ \ell(u, v) = \left(p_{uv} + \sum_{w \in N(v)} p_{uw} p_{wv}\right)^2,
237
+
238
+ where $N(v)$ is the set of neighbors of $v$ and $p_{uv}$ is the
239
+ normalized mutual weight of the (directed or undirected) edges
240
+ joining $u$ and $v$, for each vertex $u$ and $v$ [1]_. The *mutual
241
+ weight* of $u$ and $v$ is the sum of the weights of edges joining
242
+ them (edge weights are assumed to be one if the graph is
243
+ unweighted).
244
+
245
+ Parameters
246
+ ----------
247
+ G : NetworkX graph
248
+ The graph containing ``u`` and ``v``. This can be either
249
+ directed or undirected.
250
+
251
+ u : node
252
+ A node in the graph ``G``.
253
+
254
+ v : node
255
+ A node in the graph ``G``.
256
+
257
+ weight : None or string, optional
258
+ If None, all edge weights are considered equal.
259
+ Otherwise holds the name of the edge attribute used as weight.
260
+
261
+ Returns
262
+ -------
263
+ float
264
+ The constraint of the node ``v`` in the graph ``G``.
265
+
266
+ See also
267
+ --------
268
+ constraint
269
+
270
+ References
271
+ ----------
272
+ .. [1] Burt, Ronald S.
273
+ "Structural holes and good ideas".
274
+ American Journal of Sociology (110): 349–399.
275
+
276
+ """
277
+ nmw = normalized_mutual_weight
278
+ direct = nmw(G, u, v, weight=weight)
279
+ indirect = sum(
280
+ nmw(G, u, w, weight=weight) * nmw(G, w, v, weight=weight)
281
+ for w in set(nx.all_neighbors(G, u))
282
+ )
283
+ return (direct + indirect) ** 2
env-llmeval/lib/python3.10/site-packages/networkx/algorithms/threshold.py ADDED
@@ -0,0 +1,979 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Threshold Graphs - Creation, manipulation and identification.
3
+ """
4
+ from math import sqrt
5
+
6
+ import networkx as nx
7
+ from networkx.utils import py_random_state
8
+
9
+ __all__ = ["is_threshold_graph", "find_threshold_graph"]
10
+
11
+
12
+ @nx._dispatchable
13
+ def is_threshold_graph(G):
14
+ """
15
+ Returns `True` if `G` is a threshold graph.
16
+
17
+ Parameters
18
+ ----------
19
+ G : NetworkX graph instance
20
+ An instance of `Graph`, `DiGraph`, `MultiGraph` or `MultiDiGraph`
21
+
22
+ Returns
23
+ -------
24
+ bool
25
+ `True` if `G` is a threshold graph, `False` otherwise.
26
+
27
+ Examples
28
+ --------
29
+ >>> from networkx.algorithms.threshold import is_threshold_graph
30
+ >>> G = nx.path_graph(3)
31
+ >>> is_threshold_graph(G)
32
+ True
33
+ >>> G = nx.barbell_graph(3, 3)
34
+ >>> is_threshold_graph(G)
35
+ False
36
+
37
+ References
38
+ ----------
39
+ .. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph
40
+ """
41
+ return is_threshold_sequence([d for n, d in G.degree()])
42
+
43
+
44
+ def is_threshold_sequence(degree_sequence):
45
+ """
46
+ Returns True if the sequence is a threshold degree sequence.
47
+
48
+ Uses the property that a threshold graph must be constructed by
49
+ adding either dominating or isolated nodes. Thus, it can be
50
+ deconstructed iteratively by removing a node of degree zero or a
51
+ node that connects to the remaining nodes. If this deconstruction
52
+ fails then the sequence is not a threshold sequence.
53
+ """
54
+ ds = degree_sequence[:] # get a copy so we don't destroy original
55
+ ds.sort()
56
+ while ds:
57
+ if ds[0] == 0: # if isolated node
58
+ ds.pop(0) # remove it
59
+ continue
60
+ if ds[-1] != len(ds) - 1: # is the largest degree node dominating?
61
+ return False # no, not a threshold degree sequence
62
+ ds.pop() # yes, largest is the dominating node
63
+ ds = [d - 1 for d in ds] # remove it and decrement all degrees
64
+ return True
65
+
66
+
67
+ def creation_sequence(degree_sequence, with_labels=False, compact=False):
68
+ """
69
+ Determines the creation sequence for the given threshold degree sequence.
70
+
71
+ The creation sequence is a list of single characters 'd'
72
+ or 'i': 'd' for dominating or 'i' for isolated vertices.
73
+ Dominating vertices are connected to all vertices present when it
74
+ is added. The first node added is by convention 'd'.
75
+ This list can be converted to a string if desired using "".join(cs)
76
+
77
+ If with_labels==True:
78
+ Returns a list of 2-tuples containing the vertex number
79
+ and a character 'd' or 'i' which describes the type of vertex.
80
+
81
+ If compact==True:
82
+ Returns the creation sequence in a compact form that is the number
83
+ of 'i's and 'd's alternating.
84
+ Examples:
85
+ [1,2,2,3] represents d,i,i,d,d,i,i,i
86
+ [3,1,2] represents d,d,d,i,d,d
87
+
88
+ Notice that the first number is the first vertex to be used for
89
+ construction and so is always 'd'.
90
+
91
+ with_labels and compact cannot both be True.
92
+
93
+ Returns None if the sequence is not a threshold sequence
94
+ """
95
+ if with_labels and compact:
96
+ raise ValueError("compact sequences cannot be labeled")
97
+
98
+ # make an indexed copy
99
+ if isinstance(degree_sequence, dict): # labeled degree sequence
100
+ ds = [[degree, label] for (label, degree) in degree_sequence.items()]
101
+ else:
102
+ ds = [[d, i] for i, d in enumerate(degree_sequence)]
103
+ ds.sort()
104
+ cs = [] # creation sequence
105
+ while ds:
106
+ if ds[0][0] == 0: # isolated node
107
+ (d, v) = ds.pop(0)
108
+ if len(ds) > 0: # make sure we start with a d
109
+ cs.insert(0, (v, "i"))
110
+ else:
111
+ cs.insert(0, (v, "d"))
112
+ continue
113
+ if ds[-1][0] != len(ds) - 1: # Not dominating node
114
+ return None # not a threshold degree sequence
115
+ (d, v) = ds.pop()
116
+ cs.insert(0, (v, "d"))
117
+ ds = [[d[0] - 1, d[1]] for d in ds] # decrement due to removing node
118
+
119
+ if with_labels:
120
+ return cs
121
+ if compact:
122
+ return make_compact(cs)
123
+ return [v[1] for v in cs] # not labeled
124
+
125
+
126
+ def make_compact(creation_sequence):
127
+ """
128
+ Returns the creation sequence in a compact form
129
+ that is the number of 'i's and 'd's alternating.
130
+
131
+ Examples
132
+ --------
133
+ >>> from networkx.algorithms.threshold import make_compact
134
+ >>> make_compact(["d", "i", "i", "d", "d", "i", "i", "i"])
135
+ [1, 2, 2, 3]
136
+ >>> make_compact(["d", "d", "d", "i", "d", "d"])
137
+ [3, 1, 2]
138
+
139
+ Notice that the first number is the first vertex
140
+ to be used for construction and so is always 'd'.
141
+
142
+ Labeled creation sequences lose their labels in the
143
+ compact representation.
144
+
145
+ >>> make_compact([3, 1, 2])
146
+ [3, 1, 2]
147
+ """
148
+ first = creation_sequence[0]
149
+ if isinstance(first, str): # creation sequence
150
+ cs = creation_sequence[:]
151
+ elif isinstance(first, tuple): # labeled creation sequence
152
+ cs = [s[1] for s in creation_sequence]
153
+ elif isinstance(first, int): # compact creation sequence
154
+ return creation_sequence
155
+ else:
156
+ raise TypeError("Not a valid creation sequence type")
157
+
158
+ ccs = []
159
+ count = 1 # count the run lengths of d's or i's.
160
+ for i in range(1, len(cs)):
161
+ if cs[i] == cs[i - 1]:
162
+ count += 1
163
+ else:
164
+ ccs.append(count)
165
+ count = 1
166
+ ccs.append(count) # don't forget the last one
167
+ return ccs
168
+
169
+
170
+ def uncompact(creation_sequence):
171
+ """
172
+ Converts a compact creation sequence for a threshold
173
+ graph to a standard creation sequence (unlabeled).
174
+ If the creation_sequence is already standard, return it.
175
+ See creation_sequence.
176
+ """
177
+ first = creation_sequence[0]
178
+ if isinstance(first, str): # creation sequence
179
+ return creation_sequence
180
+ elif isinstance(first, tuple): # labeled creation sequence
181
+ return creation_sequence
182
+ elif isinstance(first, int): # compact creation sequence
183
+ ccscopy = creation_sequence[:]
184
+ else:
185
+ raise TypeError("Not a valid creation sequence type")
186
+ cs = []
187
+ while ccscopy:
188
+ cs.extend(ccscopy.pop(0) * ["d"])
189
+ if ccscopy:
190
+ cs.extend(ccscopy.pop(0) * ["i"])
191
+ return cs
192
+
193
+
194
+ def creation_sequence_to_weights(creation_sequence):
195
+ """
196
+ Returns a list of node weights which create the threshold
197
+ graph designated by the creation sequence. The weights
198
+ are scaled so that the threshold is 1.0. The order of the
199
+ nodes is the same as that in the creation sequence.
200
+ """
201
+ # Turn input sequence into a labeled creation sequence
202
+ first = creation_sequence[0]
203
+ if isinstance(first, str): # creation sequence
204
+ if isinstance(creation_sequence, list):
205
+ wseq = creation_sequence[:]
206
+ else:
207
+ wseq = list(creation_sequence) # string like 'ddidid'
208
+ elif isinstance(first, tuple): # labeled creation sequence
209
+ wseq = [v[1] for v in creation_sequence]
210
+ elif isinstance(first, int): # compact creation sequence
211
+ wseq = uncompact(creation_sequence)
212
+ else:
213
+ raise TypeError("Not a valid creation sequence type")
214
+ # pass through twice--first backwards
215
+ wseq.reverse()
216
+ w = 0
217
+ prev = "i"
218
+ for j, s in enumerate(wseq):
219
+ if s == "i":
220
+ wseq[j] = w
221
+ prev = s
222
+ elif prev == "i":
223
+ prev = s
224
+ w += 1
225
+ wseq.reverse() # now pass through forwards
226
+ for j, s in enumerate(wseq):
227
+ if s == "d":
228
+ wseq[j] = w
229
+ prev = s
230
+ elif prev == "d":
231
+ prev = s
232
+ w += 1
233
+ # Now scale weights
234
+ if prev == "d":
235
+ w += 1
236
+ wscale = 1 / w
237
+ return [ww * wscale for ww in wseq]
238
+ # return wseq
239
+
240
+
241
+ def weights_to_creation_sequence(
242
+ weights, threshold=1, with_labels=False, compact=False
243
+ ):
244
+ """
245
+ Returns a creation sequence for a threshold graph
246
+ determined by the weights and threshold given as input.
247
+ If the sum of two node weights is greater than the
248
+ threshold value, an edge is created between these nodes.
249
+
250
+ The creation sequence is a list of single characters 'd'
251
+ or 'i': 'd' for dominating or 'i' for isolated vertices.
252
+ Dominating vertices are connected to all vertices present
253
+ when it is added. The first node added is by convention 'd'.
254
+
255
+ If with_labels==True:
256
+ Returns a list of 2-tuples containing the vertex number
257
+ and a character 'd' or 'i' which describes the type of vertex.
258
+
259
+ If compact==True:
260
+ Returns the creation sequence in a compact form that is the number
261
+ of 'i's and 'd's alternating.
262
+ Examples:
263
+ [1,2,2,3] represents d,i,i,d,d,i,i,i
264
+ [3,1,2] represents d,d,d,i,d,d
265
+
266
+ Notice that the first number is the first vertex to be used for
267
+ construction and so is always 'd'.
268
+
269
+ with_labels and compact cannot both be True.
270
+ """
271
+ if with_labels and compact:
272
+ raise ValueError("compact sequences cannot be labeled")
273
+
274
+ # make an indexed copy
275
+ if isinstance(weights, dict): # labeled weights
276
+ wseq = [[w, label] for (label, w) in weights.items()]
277
+ else:
278
+ wseq = [[w, i] for i, w in enumerate(weights)]
279
+ wseq.sort()
280
+ cs = [] # creation sequence
281
+ cutoff = threshold - wseq[-1][0]
282
+ while wseq:
283
+ if wseq[0][0] < cutoff: # isolated node
284
+ (w, label) = wseq.pop(0)
285
+ cs.append((label, "i"))
286
+ else:
287
+ (w, label) = wseq.pop()
288
+ cs.append((label, "d"))
289
+ cutoff = threshold - wseq[-1][0]
290
+ if len(wseq) == 1: # make sure we start with a d
291
+ (w, label) = wseq.pop()
292
+ cs.append((label, "d"))
293
+ # put in correct order
294
+ cs.reverse()
295
+
296
+ if with_labels:
297
+ return cs
298
+ if compact:
299
+ return make_compact(cs)
300
+ return [v[1] for v in cs] # not labeled
301
+
302
+
303
+ # Manipulating NetworkX.Graphs in context of threshold graphs
304
+ @nx._dispatchable(graphs=None, returns_graph=True)
305
+ def threshold_graph(creation_sequence, create_using=None):
306
+ """
307
+ Create a threshold graph from the creation sequence or compact
308
+ creation_sequence.
309
+
310
+ The input sequence can be a
311
+
312
+ creation sequence (e.g. ['d','i','d','d','d','i'])
313
+ labeled creation sequence (e.g. [(0,'d'),(2,'d'),(1,'i')])
314
+ compact creation sequence (e.g. [2,1,1,2,0])
315
+
316
+ Use cs=creation_sequence(degree_sequence,labeled=True)
317
+ to convert a degree sequence to a creation sequence.
318
+
319
+ Returns None if the sequence is not valid
320
+ """
321
+ # Turn input sequence into a labeled creation sequence
322
+ first = creation_sequence[0]
323
+ if isinstance(first, str): # creation sequence
324
+ ci = list(enumerate(creation_sequence))
325
+ elif isinstance(first, tuple): # labeled creation sequence
326
+ ci = creation_sequence[:]
327
+ elif isinstance(first, int): # compact creation sequence
328
+ cs = uncompact(creation_sequence)
329
+ ci = list(enumerate(cs))
330
+ else:
331
+ print("not a valid creation sequence type")
332
+ return None
333
+
334
+ G = nx.empty_graph(0, create_using)
335
+ if G.is_directed():
336
+ raise nx.NetworkXError("Directed Graph not supported")
337
+
338
+ G.name = "Threshold Graph"
339
+
340
+ # add nodes and edges
341
+ # if type is 'i' just add nodea
342
+ # if type is a d connect to everything previous
343
+ while ci:
344
+ (v, node_type) = ci.pop(0)
345
+ if node_type == "d": # dominating type, connect to all existing nodes
346
+ # We use `for u in list(G):` instead of
347
+ # `for u in G:` because we edit the graph `G` in
348
+ # the loop. Hence using an iterator will result in
349
+ # `RuntimeError: dictionary changed size during iteration`
350
+ for u in list(G):
351
+ G.add_edge(v, u)
352
+ G.add_node(v)
353
+ return G
354
+
355
+
356
+ @nx._dispatchable
357
+ def find_alternating_4_cycle(G):
358
+ """
359
+ Returns False if there aren't any alternating 4 cycles.
360
+ Otherwise returns the cycle as [a,b,c,d] where (a,b)
361
+ and (c,d) are edges and (a,c) and (b,d) are not.
362
+ """
363
+ for u, v in G.edges():
364
+ for w in G.nodes():
365
+ if not G.has_edge(u, w) and u != w:
366
+ for x in G.neighbors(w):
367
+ if not G.has_edge(v, x) and v != x:
368
+ return [u, v, w, x]
369
+ return False
370
+
371
+
372
+ @nx._dispatchable(returns_graph=True)
373
+ def find_threshold_graph(G, create_using=None):
374
+ """
375
+ Returns a threshold subgraph that is close to largest in `G`.
376
+
377
+ The threshold graph will contain the largest degree node in G.
378
+
379
+ Parameters
380
+ ----------
381
+ G : NetworkX graph instance
382
+ An instance of `Graph`, or `MultiDiGraph`
383
+ create_using : NetworkX graph class or `None` (default), optional
384
+ Type of graph to use when constructing the threshold graph.
385
+ If `None`, infer the appropriate graph type from the input.
386
+
387
+ Returns
388
+ -------
389
+ graph :
390
+ A graph instance representing the threshold graph
391
+
392
+ Examples
393
+ --------
394
+ >>> from networkx.algorithms.threshold import find_threshold_graph
395
+ >>> G = nx.barbell_graph(3, 3)
396
+ >>> T = find_threshold_graph(G)
397
+ >>> T.nodes # may vary
398
+ NodeView((7, 8, 5, 6))
399
+
400
+ References
401
+ ----------
402
+ .. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph
403
+ """
404
+ return threshold_graph(find_creation_sequence(G), create_using)
405
+
406
+
407
+ @nx._dispatchable
408
+ def find_creation_sequence(G):
409
+ """
410
+ Find a threshold subgraph that is close to largest in G.
411
+ Returns the labeled creation sequence of that threshold graph.
412
+ """
413
+ cs = []
414
+ # get a local pointer to the working part of the graph
415
+ H = G
416
+ while H.order() > 0:
417
+ # get new degree sequence on subgraph
418
+ dsdict = dict(H.degree())
419
+ ds = [(d, v) for v, d in dsdict.items()]
420
+ ds.sort()
421
+ # Update threshold graph nodes
422
+ if ds[-1][0] == 0: # all are isolated
423
+ cs.extend(zip(dsdict, ["i"] * (len(ds) - 1) + ["d"]))
424
+ break # Done!
425
+ # pull off isolated nodes
426
+ while ds[0][0] == 0:
427
+ (d, iso) = ds.pop(0)
428
+ cs.append((iso, "i"))
429
+ # find new biggest node
430
+ (d, bigv) = ds.pop()
431
+ # add edges of star to t_g
432
+ cs.append((bigv, "d"))
433
+ # form subgraph of neighbors of big node
434
+ H = H.subgraph(H.neighbors(bigv))
435
+ cs.reverse()
436
+ return cs
437
+
438
+
439
+ # Properties of Threshold Graphs
440
+ def triangles(creation_sequence):
441
+ """
442
+ Compute number of triangles in the threshold graph with the
443
+ given creation sequence.
444
+ """
445
+ # shortcut algorithm that doesn't require computing number
446
+ # of triangles at each node.
447
+ cs = creation_sequence # alias
448
+ dr = cs.count("d") # number of d's in sequence
449
+ ntri = dr * (dr - 1) * (dr - 2) / 6 # number of triangles in clique of nd d's
450
+ # now add dr choose 2 triangles for every 'i' in sequence where
451
+ # dr is the number of d's to the right of the current i
452
+ for i, typ in enumerate(cs):
453
+ if typ == "i":
454
+ ntri += dr * (dr - 1) / 2
455
+ else:
456
+ dr -= 1
457
+ return ntri
458
+
459
+
460
+ def triangle_sequence(creation_sequence):
461
+ """
462
+ Return triangle sequence for the given threshold graph creation sequence.
463
+
464
+ """
465
+ cs = creation_sequence
466
+ seq = []
467
+ dr = cs.count("d") # number of d's to the right of the current pos
468
+ dcur = (dr - 1) * (dr - 2) // 2 # number of triangles through a node of clique dr
469
+ irun = 0 # number of i's in the last run
470
+ drun = 0 # number of d's in the last run
471
+ for i, sym in enumerate(cs):
472
+ if sym == "d":
473
+ drun += 1
474
+ tri = dcur + (dr - 1) * irun # new triangles at this d
475
+ else: # cs[i]="i":
476
+ if prevsym == "d": # new string of i's
477
+ dcur += (dr - 1) * irun # accumulate shared shortest paths
478
+ irun = 0 # reset i run counter
479
+ dr -= drun # reduce number of d's to right
480
+ drun = 0 # reset d run counter
481
+ irun += 1
482
+ tri = dr * (dr - 1) // 2 # new triangles at this i
483
+ seq.append(tri)
484
+ prevsym = sym
485
+ return seq
486
+
487
+
488
+ def cluster_sequence(creation_sequence):
489
+ """
490
+ Return cluster sequence for the given threshold graph creation sequence.
491
+ """
492
+ triseq = triangle_sequence(creation_sequence)
493
+ degseq = degree_sequence(creation_sequence)
494
+ cseq = []
495
+ for i, deg in enumerate(degseq):
496
+ tri = triseq[i]
497
+ if deg <= 1: # isolated vertex or single pair gets cc 0
498
+ cseq.append(0)
499
+ continue
500
+ max_size = (deg * (deg - 1)) // 2
501
+ cseq.append(tri / max_size)
502
+ return cseq
503
+
504
+
505
+ def degree_sequence(creation_sequence):
506
+ """
507
+ Return degree sequence for the threshold graph with the given
508
+ creation sequence
509
+ """
510
+ cs = creation_sequence # alias
511
+ seq = []
512
+ rd = cs.count("d") # number of d to the right
513
+ for i, sym in enumerate(cs):
514
+ if sym == "d":
515
+ rd -= 1
516
+ seq.append(rd + i)
517
+ else:
518
+ seq.append(rd)
519
+ return seq
520
+
521
+
522
+ def density(creation_sequence):
523
+ """
524
+ Return the density of the graph with this creation_sequence.
525
+ The density is the fraction of possible edges present.
526
+ """
527
+ N = len(creation_sequence)
528
+ two_size = sum(degree_sequence(creation_sequence))
529
+ two_possible = N * (N - 1)
530
+ den = two_size / two_possible
531
+ return den
532
+
533
+
534
+ def degree_correlation(creation_sequence):
535
+ """
536
+ Return the degree-degree correlation over all edges.
537
+ """
538
+ cs = creation_sequence
539
+ s1 = 0 # deg_i*deg_j
540
+ s2 = 0 # deg_i^2+deg_j^2
541
+ s3 = 0 # deg_i+deg_j
542
+ m = 0 # number of edges
543
+ rd = cs.count("d") # number of d nodes to the right
544
+ rdi = [i for i, sym in enumerate(cs) if sym == "d"] # index of "d"s
545
+ ds = degree_sequence(cs)
546
+ for i, sym in enumerate(cs):
547
+ if sym == "d":
548
+ if i != rdi[0]:
549
+ print("Logic error in degree_correlation", i, rdi)
550
+ raise ValueError
551
+ rdi.pop(0)
552
+ degi = ds[i]
553
+ for dj in rdi:
554
+ degj = ds[dj]
555
+ s1 += degj * degi
556
+ s2 += degi**2 + degj**2
557
+ s3 += degi + degj
558
+ m += 1
559
+ denom = 2 * m * s2 - s3 * s3
560
+ numer = 4 * m * s1 - s3 * s3
561
+ if denom == 0:
562
+ if numer == 0:
563
+ return 1
564
+ raise ValueError(f"Zero Denominator but Numerator is {numer}")
565
+ return numer / denom
566
+
567
+
568
+ def shortest_path(creation_sequence, u, v):
569
+ """
570
+ Find the shortest path between u and v in a
571
+ threshold graph G with the given creation_sequence.
572
+
573
+ For an unlabeled creation_sequence, the vertices
574
+ u and v must be integers in (0,len(sequence)) referring
575
+ to the position of the desired vertices in the sequence.
576
+
577
+ For a labeled creation_sequence, u and v are labels of vertices.
578
+
579
+ Use cs=creation_sequence(degree_sequence,with_labels=True)
580
+ to convert a degree sequence to a creation sequence.
581
+
582
+ Returns a list of vertices from u to v.
583
+ Example: if they are neighbors, it returns [u,v]
584
+ """
585
+ # Turn input sequence into a labeled creation sequence
586
+ first = creation_sequence[0]
587
+ if isinstance(first, str): # creation sequence
588
+ cs = [(i, creation_sequence[i]) for i in range(len(creation_sequence))]
589
+ elif isinstance(first, tuple): # labeled creation sequence
590
+ cs = creation_sequence[:]
591
+ elif isinstance(first, int): # compact creation sequence
592
+ ci = uncompact(creation_sequence)
593
+ cs = [(i, ci[i]) for i in range(len(ci))]
594
+ else:
595
+ raise TypeError("Not a valid creation sequence type")
596
+
597
+ verts = [s[0] for s in cs]
598
+ if v not in verts:
599
+ raise ValueError(f"Vertex {v} not in graph from creation_sequence")
600
+ if u not in verts:
601
+ raise ValueError(f"Vertex {u} not in graph from creation_sequence")
602
+ # Done checking
603
+ if u == v:
604
+ return [u]
605
+
606
+ uindex = verts.index(u)
607
+ vindex = verts.index(v)
608
+ bigind = max(uindex, vindex)
609
+ if cs[bigind][1] == "d":
610
+ return [u, v]
611
+ # must be that cs[bigind][1]=='i'
612
+ cs = cs[bigind:]
613
+ while cs:
614
+ vert = cs.pop()
615
+ if vert[1] == "d":
616
+ return [u, vert[0], v]
617
+ # All after u are type 'i' so no connection
618
+ return -1
619
+
620
+
621
+ def shortest_path_length(creation_sequence, i):
622
+ """
623
+ Return the shortest path length from indicated node to
624
+ every other node for the threshold graph with the given
625
+ creation sequence.
626
+ Node is indicated by index i in creation_sequence unless
627
+ creation_sequence is labeled in which case, i is taken to
628
+ be the label of the node.
629
+
630
+ Paths lengths in threshold graphs are at most 2.
631
+ Length to unreachable nodes is set to -1.
632
+ """
633
+ # Turn input sequence into a labeled creation sequence
634
+ first = creation_sequence[0]
635
+ if isinstance(first, str): # creation sequence
636
+ if isinstance(creation_sequence, list):
637
+ cs = creation_sequence[:]
638
+ else:
639
+ cs = list(creation_sequence)
640
+ elif isinstance(first, tuple): # labeled creation sequence
641
+ cs = [v[1] for v in creation_sequence]
642
+ i = [v[0] for v in creation_sequence].index(i)
643
+ elif isinstance(first, int): # compact creation sequence
644
+ cs = uncompact(creation_sequence)
645
+ else:
646
+ raise TypeError("Not a valid creation sequence type")
647
+
648
+ # Compute
649
+ N = len(cs)
650
+ spl = [2] * N # length 2 to every node
651
+ spl[i] = 0 # except self which is 0
652
+ # 1 for all d's to the right
653
+ for j in range(i + 1, N):
654
+ if cs[j] == "d":
655
+ spl[j] = 1
656
+ if cs[i] == "d": # 1 for all nodes to the left
657
+ for j in range(i):
658
+ spl[j] = 1
659
+ # and -1 for any trailing i to indicate unreachable
660
+ for j in range(N - 1, 0, -1):
661
+ if cs[j] == "d":
662
+ break
663
+ spl[j] = -1
664
+ return spl
665
+
666
+
667
+ def betweenness_sequence(creation_sequence, normalized=True):
668
+ """
669
+ Return betweenness for the threshold graph with the given creation
670
+ sequence. The result is unscaled. To scale the values
671
+ to the interval [0,1] divide by (n-1)*(n-2).
672
+ """
673
+ cs = creation_sequence
674
+ seq = [] # betweenness
675
+ lastchar = "d" # first node is always a 'd'
676
+ dr = float(cs.count("d")) # number of d's to the right of current pos
677
+ irun = 0 # number of i's in the last run
678
+ drun = 0 # number of d's in the last run
679
+ dlast = 0.0 # betweenness of last d
680
+ for i, c in enumerate(cs):
681
+ if c == "d": # cs[i]=="d":
682
+ # betweenness = amt shared with earlier d's and i's
683
+ # + new isolated nodes covered
684
+ # + new paths to all previous nodes
685
+ b = dlast + (irun - 1) * irun / dr + 2 * irun * (i - drun - irun) / dr
686
+ drun += 1 # update counter
687
+ else: # cs[i]="i":
688
+ if lastchar == "d": # if this is a new run of i's
689
+ dlast = b # accumulate betweenness
690
+ dr -= drun # update number of d's to the right
691
+ drun = 0 # reset d counter
692
+ irun = 0 # reset i counter
693
+ b = 0 # isolated nodes have zero betweenness
694
+ irun += 1 # add another i to the run
695
+ seq.append(float(b))
696
+ lastchar = c
697
+
698
+ # normalize by the number of possible shortest paths
699
+ if normalized:
700
+ order = len(cs)
701
+ scale = 1.0 / ((order - 1) * (order - 2))
702
+ seq = [s * scale for s in seq]
703
+
704
+ return seq
705
+
706
+
707
+ def eigenvectors(creation_sequence):
708
+ """
709
+ Return a 2-tuple of Laplacian eigenvalues and eigenvectors
710
+ for the threshold network with creation_sequence.
711
+ The first value is a list of eigenvalues.
712
+ The second value is a list of eigenvectors.
713
+ The lists are in the same order so corresponding eigenvectors
714
+ and eigenvalues are in the same position in the two lists.
715
+
716
+ Notice that the order of the eigenvalues returned by eigenvalues(cs)
717
+ may not correspond to the order of these eigenvectors.
718
+ """
719
+ ccs = make_compact(creation_sequence)
720
+ N = sum(ccs)
721
+ vec = [0] * N
722
+ val = vec[:]
723
+ # get number of type d nodes to the right (all for first node)
724
+ dr = sum(ccs[::2])
725
+
726
+ nn = ccs[0]
727
+ vec[0] = [1.0 / sqrt(N)] * N
728
+ val[0] = 0
729
+ e = dr
730
+ dr -= nn
731
+ type_d = True
732
+ i = 1
733
+ dd = 1
734
+ while dd < nn:
735
+ scale = 1.0 / sqrt(dd * dd + i)
736
+ vec[i] = i * [-scale] + [dd * scale] + [0] * (N - i - 1)
737
+ val[i] = e
738
+ i += 1
739
+ dd += 1
740
+ if len(ccs) == 1:
741
+ return (val, vec)
742
+ for nn in ccs[1:]:
743
+ scale = 1.0 / sqrt(nn * i * (i + nn))
744
+ vec[i] = i * [-nn * scale] + nn * [i * scale] + [0] * (N - i - nn)
745
+ # find eigenvalue
746
+ type_d = not type_d
747
+ if type_d:
748
+ e = i + dr
749
+ dr -= nn
750
+ else:
751
+ e = dr
752
+ val[i] = e
753
+ st = i
754
+ i += 1
755
+ dd = 1
756
+ while dd < nn:
757
+ scale = 1.0 / sqrt(i - st + dd * dd)
758
+ vec[i] = [0] * st + (i - st) * [-scale] + [dd * scale] + [0] * (N - i - 1)
759
+ val[i] = e
760
+ i += 1
761
+ dd += 1
762
+ return (val, vec)
763
+
764
+
765
+ def spectral_projection(u, eigenpairs):
766
+ """
767
+ Returns the coefficients of each eigenvector
768
+ in a projection of the vector u onto the normalized
769
+ eigenvectors which are contained in eigenpairs.
770
+
771
+ eigenpairs should be a list of two objects. The
772
+ first is a list of eigenvalues and the second a list
773
+ of eigenvectors. The eigenvectors should be lists.
774
+
775
+ There's not a lot of error checking on lengths of
776
+ arrays, etc. so be careful.
777
+ """
778
+ coeff = []
779
+ evect = eigenpairs[1]
780
+ for ev in evect:
781
+ c = sum(evv * uv for (evv, uv) in zip(ev, u))
782
+ coeff.append(c)
783
+ return coeff
784
+
785
+
786
+ def eigenvalues(creation_sequence):
787
+ """
788
+ Return sequence of eigenvalues of the Laplacian of the threshold
789
+ graph for the given creation_sequence.
790
+
791
+ Based on the Ferrer's diagram method. The spectrum is integral
792
+ and is the conjugate of the degree sequence.
793
+
794
+ See::
795
+
796
+ @Article{degree-merris-1994,
797
+ author = {Russel Merris},
798
+ title = {Degree maximal graphs are Laplacian integral},
799
+ journal = {Linear Algebra Appl.},
800
+ year = {1994},
801
+ volume = {199},
802
+ pages = {381--389},
803
+ }
804
+
805
+ """
806
+ degseq = degree_sequence(creation_sequence)
807
+ degseq.sort()
808
+ eiglist = [] # zero is always one eigenvalue
809
+ eig = 0
810
+ row = len(degseq)
811
+ bigdeg = degseq.pop()
812
+ while row:
813
+ if bigdeg < row:
814
+ eiglist.append(eig)
815
+ row -= 1
816
+ else:
817
+ eig += 1
818
+ if degseq:
819
+ bigdeg = degseq.pop()
820
+ else:
821
+ bigdeg = 0
822
+ return eiglist
823
+
824
+
825
+ # Threshold graph creation routines
826
+
827
+
828
+ @py_random_state(2)
829
+ def random_threshold_sequence(n, p, seed=None):
830
+ """
831
+ Create a random threshold sequence of size n.
832
+ A creation sequence is built by randomly choosing d's with
833
+ probability p and i's with probability 1-p.
834
+
835
+ s=nx.random_threshold_sequence(10,0.5)
836
+
837
+ returns a threshold sequence of length 10 with equal
838
+ probably of an i or a d at each position.
839
+
840
+ A "random" threshold graph can be built with
841
+
842
+ G=nx.threshold_graph(s)
843
+
844
+ seed : integer, random_state, or None (default)
845
+ Indicator of random number generation state.
846
+ See :ref:`Randomness<randomness>`.
847
+ """
848
+ if not (0 <= p <= 1):
849
+ raise ValueError("p must be in [0,1]")
850
+
851
+ cs = ["d"] # threshold sequences always start with a d
852
+ for i in range(1, n):
853
+ if seed.random() < p:
854
+ cs.append("d")
855
+ else:
856
+ cs.append("i")
857
+ return cs
858
+
859
+
860
+ # maybe *_d_threshold_sequence routines should
861
+ # be (or be called from) a single routine with a more descriptive name
862
+ # and a keyword parameter?
863
+ def right_d_threshold_sequence(n, m):
864
+ """
865
+ Create a skewed threshold graph with a given number
866
+ of vertices (n) and a given number of edges (m).
867
+
868
+ The routine returns an unlabeled creation sequence
869
+ for the threshold graph.
870
+
871
+ FIXME: describe algorithm
872
+
873
+ """
874
+ cs = ["d"] + ["i"] * (n - 1) # create sequence with n insolated nodes
875
+
876
+ # m <n : not enough edges, make disconnected
877
+ if m < n:
878
+ cs[m] = "d"
879
+ return cs
880
+
881
+ # too many edges
882
+ if m > n * (n - 1) / 2:
883
+ raise ValueError("Too many edges for this many nodes.")
884
+
885
+ # connected case m >n-1
886
+ ind = n - 1
887
+ sum = n - 1
888
+ while sum < m:
889
+ cs[ind] = "d"
890
+ ind -= 1
891
+ sum += ind
892
+ ind = m - (sum - ind)
893
+ cs[ind] = "d"
894
+ return cs
895
+
896
+
897
+ def left_d_threshold_sequence(n, m):
898
+ """
899
+ Create a skewed threshold graph with a given number
900
+ of vertices (n) and a given number of edges (m).
901
+
902
+ The routine returns an unlabeled creation sequence
903
+ for the threshold graph.
904
+
905
+ FIXME: describe algorithm
906
+
907
+ """
908
+ cs = ["d"] + ["i"] * (n - 1) # create sequence with n insolated nodes
909
+
910
+ # m <n : not enough edges, make disconnected
911
+ if m < n:
912
+ cs[m] = "d"
913
+ return cs
914
+
915
+ # too many edges
916
+ if m > n * (n - 1) / 2:
917
+ raise ValueError("Too many edges for this many nodes.")
918
+
919
+ # Connected case when M>N-1
920
+ cs[n - 1] = "d"
921
+ sum = n - 1
922
+ ind = 1
923
+ while sum < m:
924
+ cs[ind] = "d"
925
+ sum += ind
926
+ ind += 1
927
+ if sum > m: # be sure not to change the first vertex
928
+ cs[sum - m] = "i"
929
+ return cs
930
+
931
+
932
+ @py_random_state(3)
933
+ def swap_d(cs, p_split=1.0, p_combine=1.0, seed=None):
934
+ """
935
+ Perform a "swap" operation on a threshold sequence.
936
+
937
+ The swap preserves the number of nodes and edges
938
+ in the graph for the given sequence.
939
+ The resulting sequence is still a threshold sequence.
940
+
941
+ Perform one split and one combine operation on the
942
+ 'd's of a creation sequence for a threshold graph.
943
+ This operation maintains the number of nodes and edges
944
+ in the graph, but shifts the edges from node to node
945
+ maintaining the threshold quality of the graph.
946
+
947
+ seed : integer, random_state, or None (default)
948
+ Indicator of random number generation state.
949
+ See :ref:`Randomness<randomness>`.
950
+ """
951
+ # preprocess the creation sequence
952
+ dlist = [i for (i, node_type) in enumerate(cs[1:-1]) if node_type == "d"]
953
+ # split
954
+ if seed.random() < p_split:
955
+ choice = seed.choice(dlist)
956
+ split_to = seed.choice(range(choice))
957
+ flip_side = choice - split_to
958
+ if split_to != flip_side and cs[split_to] == "i" and cs[flip_side] == "i":
959
+ cs[choice] = "i"
960
+ cs[split_to] = "d"
961
+ cs[flip_side] = "d"
962
+ dlist.remove(choice)
963
+ # don't add or combine may reverse this action
964
+ # dlist.extend([split_to,flip_side])
965
+ # print >>sys.stderr,"split at %s to %s and %s"%(choice,split_to,flip_side)
966
+ # combine
967
+ if seed.random() < p_combine and dlist:
968
+ first_choice = seed.choice(dlist)
969
+ second_choice = seed.choice(dlist)
970
+ target = first_choice + second_choice
971
+ if target >= len(cs) or cs[target] == "d" or first_choice == second_choice:
972
+ return cs
973
+ # OK to combine
974
+ cs[first_choice] = "i"
975
+ cs[second_choice] = "i"
976
+ cs[target] = "d"
977
+ # print >>sys.stderr,"combine %s and %s to make %s."%(first_choice,second_choice,target)
978
+
979
+ return cs