Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/certifi/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/certifi/__pycache__/__main__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/certifi/__pycache__/core.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/conftest.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/convert.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/convert_matrix.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/exception.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/lazy_imports.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/relabel.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/asteroidal.py +170 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/boundary.py +167 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/chains.py +172 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/chordal.py +442 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/clique.py +754 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/cluster.py +609 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/communicability_alg.py +162 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/core.py +648 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/covering.py +142 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/cycles.py +1231 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/d_separation.py +722 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/distance_measures.py +951 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/distance_regular.py +238 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/dominance.py +135 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/dominating.py +94 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/efficiency_measures.py +168 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/euler.py +469 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/graph_hashing.py +322 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/graphical.py +483 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/hierarchy.py +48 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/isolate.py +107 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/link_prediction.py +688 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/lowest_common_ancestors.py +268 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/matching.py +1151 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/mis.py +77 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/moral.py +59 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/node_classification.py +218 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/planar_drawing.py +464 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/polynomials.py +305 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/regular.py +214 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/richclub.py +138 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/similarity.py +1777 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/simple_paths.py +937 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/smetric.py +60 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/sparsifiers.py +295 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/structuralholes.py +283 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/summarization.py +563 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/swap.py +407 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/threshold.py +979 -0
- llmeval-env/lib/python3.10/site-packages/networkx/algorithms/time_dependent.py +142 -0
llmeval-env/lib/python3.10/site-packages/certifi/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (293 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/certifi/__pycache__/__main__.cpython-310.pyc
ADDED
Binary file (431 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/certifi/__pycache__/core.cpython-310.pyc
ADDED
Binary file (2.12 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.23 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/conftest.cpython-310.pyc
ADDED
Binary file (6.12 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/convert.cpython-310.pyc
ADDED
Binary file (13.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/convert_matrix.cpython-310.pyc
ADDED
Binary file (37.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/exception.cpython-310.pyc
ADDED
Binary file (4.52 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/lazy_imports.cpython-310.pyc
ADDED
Binary file (5.87 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/networkx/__pycache__/relabel.cpython-310.pyc
ADDED
Binary file (10.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/asteroidal.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Algorithms for asteroidal triples and asteroidal numbers in graphs.
|
3 |
+
|
4 |
+
An asteroidal triple in a graph G is a set of three non-adjacent vertices
|
5 |
+
u, v and w such that there exist a path between any two of them that avoids
|
6 |
+
closed neighborhood of the third. More formally, v_j, v_k belongs to the same
|
7 |
+
connected component of G - N[v_i], where N[v_i] denotes the closed neighborhood
|
8 |
+
of v_i. A graph which does not contain any asteroidal triples is called
|
9 |
+
an AT-free graph. The class of AT-free graphs is a graph class for which
|
10 |
+
many NP-complete problems are solvable in polynomial time. Amongst them,
|
11 |
+
independent set and coloring.
|
12 |
+
"""
|
13 |
+
import networkx as nx
|
14 |
+
from networkx.utils import not_implemented_for
|
15 |
+
|
16 |
+
__all__ = ["is_at_free", "find_asteroidal_triple"]
|
17 |
+
|
18 |
+
|
19 |
+
@not_implemented_for("directed")
|
20 |
+
@not_implemented_for("multigraph")
|
21 |
+
@nx._dispatchable
|
22 |
+
def find_asteroidal_triple(G):
|
23 |
+
r"""Find an asteroidal triple in the given graph.
|
24 |
+
|
25 |
+
An asteroidal triple is a triple of non-adjacent vertices such that
|
26 |
+
there exists a path between any two of them which avoids the closed
|
27 |
+
neighborhood of the third. It checks all independent triples of vertices
|
28 |
+
and whether they are an asteroidal triple or not. This is done with the
|
29 |
+
help of a data structure called a component structure.
|
30 |
+
A component structure encodes information about which vertices belongs to
|
31 |
+
the same connected component when the closed neighborhood of a given vertex
|
32 |
+
is removed from the graph. The algorithm used to check is the trivial
|
33 |
+
one, outlined in [1]_, which has a runtime of
|
34 |
+
:math:`O(|V||\overline{E} + |V||E|)`, where the second term is the
|
35 |
+
creation of the component structure.
|
36 |
+
|
37 |
+
Parameters
|
38 |
+
----------
|
39 |
+
G : NetworkX Graph
|
40 |
+
The graph to check whether is AT-free or not
|
41 |
+
|
42 |
+
Returns
|
43 |
+
-------
|
44 |
+
list or None
|
45 |
+
An asteroidal triple is returned as a list of nodes. If no asteroidal
|
46 |
+
triple exists, i.e. the graph is AT-free, then None is returned.
|
47 |
+
The returned value depends on the certificate parameter. The default
|
48 |
+
option is a bool which is True if the graph is AT-free, i.e. the
|
49 |
+
given graph contains no asteroidal triples, and False otherwise, i.e.
|
50 |
+
if the graph contains at least one asteroidal triple.
|
51 |
+
|
52 |
+
Notes
|
53 |
+
-----
|
54 |
+
The component structure and the algorithm is described in [1]_. The current
|
55 |
+
implementation implements the trivial algorithm for simple graphs.
|
56 |
+
|
57 |
+
References
|
58 |
+
----------
|
59 |
+
.. [1] Ekkehard Köhler,
|
60 |
+
"Recognizing Graphs without asteroidal triples",
|
61 |
+
Journal of Discrete Algorithms 2, pages 439-452, 2004.
|
62 |
+
https://www.sciencedirect.com/science/article/pii/S157086670400019X
|
63 |
+
"""
|
64 |
+
V = set(G.nodes)
|
65 |
+
|
66 |
+
if len(V) < 6:
|
67 |
+
# An asteroidal triple cannot exist in a graph with 5 or less vertices.
|
68 |
+
return None
|
69 |
+
|
70 |
+
component_structure = create_component_structure(G)
|
71 |
+
E_complement = set(nx.complement(G).edges)
|
72 |
+
|
73 |
+
for e in E_complement:
|
74 |
+
u = e[0]
|
75 |
+
v = e[1]
|
76 |
+
u_neighborhood = set(G[u]).union([u])
|
77 |
+
v_neighborhood = set(G[v]).union([v])
|
78 |
+
union_of_neighborhoods = u_neighborhood.union(v_neighborhood)
|
79 |
+
for w in V - union_of_neighborhoods:
|
80 |
+
# Check for each pair of vertices whether they belong to the
|
81 |
+
# same connected component when the closed neighborhood of the
|
82 |
+
# third is removed.
|
83 |
+
if (
|
84 |
+
component_structure[u][v] == component_structure[u][w]
|
85 |
+
and component_structure[v][u] == component_structure[v][w]
|
86 |
+
and component_structure[w][u] == component_structure[w][v]
|
87 |
+
):
|
88 |
+
return [u, v, w]
|
89 |
+
return None
|
90 |
+
|
91 |
+
|
92 |
+
@not_implemented_for("directed")
|
93 |
+
@not_implemented_for("multigraph")
|
94 |
+
@nx._dispatchable
|
95 |
+
def is_at_free(G):
|
96 |
+
"""Check if a graph is AT-free.
|
97 |
+
|
98 |
+
The method uses the `find_asteroidal_triple` method to recognize
|
99 |
+
an AT-free graph. If no asteroidal triple is found the graph is
|
100 |
+
AT-free and True is returned. If at least one asteroidal triple is
|
101 |
+
found the graph is not AT-free and False is returned.
|
102 |
+
|
103 |
+
Parameters
|
104 |
+
----------
|
105 |
+
G : NetworkX Graph
|
106 |
+
The graph to check whether is AT-free or not.
|
107 |
+
|
108 |
+
Returns
|
109 |
+
-------
|
110 |
+
bool
|
111 |
+
True if G is AT-free and False otherwise.
|
112 |
+
|
113 |
+
Examples
|
114 |
+
--------
|
115 |
+
>>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)])
|
116 |
+
>>> nx.is_at_free(G)
|
117 |
+
True
|
118 |
+
|
119 |
+
>>> G = nx.cycle_graph(6)
|
120 |
+
>>> nx.is_at_free(G)
|
121 |
+
False
|
122 |
+
"""
|
123 |
+
return find_asteroidal_triple(G) is None
|
124 |
+
|
125 |
+
|
126 |
+
@not_implemented_for("directed")
|
127 |
+
@not_implemented_for("multigraph")
|
128 |
+
@nx._dispatchable
|
129 |
+
def create_component_structure(G):
|
130 |
+
r"""Create component structure for G.
|
131 |
+
|
132 |
+
A *component structure* is an `nxn` array, denoted `c`, where `n` is
|
133 |
+
the number of vertices, where each row and column corresponds to a vertex.
|
134 |
+
|
135 |
+
.. math::
|
136 |
+
c_{uv} = \begin{cases} 0, if v \in N[u] \\
|
137 |
+
k, if v \in component k of G \setminus N[u] \end{cases}
|
138 |
+
|
139 |
+
Where `k` is an arbitrary label for each component. The structure is used
|
140 |
+
to simplify the detection of asteroidal triples.
|
141 |
+
|
142 |
+
Parameters
|
143 |
+
----------
|
144 |
+
G : NetworkX Graph
|
145 |
+
Undirected, simple graph.
|
146 |
+
|
147 |
+
Returns
|
148 |
+
-------
|
149 |
+
component_structure : dictionary
|
150 |
+
A dictionary of dictionaries, keyed by pairs of vertices.
|
151 |
+
|
152 |
+
"""
|
153 |
+
V = set(G.nodes)
|
154 |
+
component_structure = {}
|
155 |
+
for v in V:
|
156 |
+
label = 0
|
157 |
+
closed_neighborhood = set(G[v]).union({v})
|
158 |
+
row_dict = {}
|
159 |
+
for u in closed_neighborhood:
|
160 |
+
row_dict[u] = 0
|
161 |
+
|
162 |
+
G_reduced = G.subgraph(set(G.nodes) - closed_neighborhood)
|
163 |
+
for cc in nx.connected_components(G_reduced):
|
164 |
+
label += 1
|
165 |
+
for u in cc:
|
166 |
+
row_dict[u] = label
|
167 |
+
|
168 |
+
component_structure[v] = row_dict
|
169 |
+
|
170 |
+
return component_structure
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/boundary.py
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Routines to find the boundary of a set of nodes.
|
2 |
+
|
3 |
+
An edge boundary is a set of edges, each of which has exactly one
|
4 |
+
endpoint in a given set of nodes (or, in the case of directed graphs,
|
5 |
+
the set of edges whose source node is in the set).
|
6 |
+
|
7 |
+
A node boundary of a set *S* of nodes is the set of (out-)neighbors of
|
8 |
+
nodes in *S* that are outside *S*.
|
9 |
+
|
10 |
+
"""
|
11 |
+
from itertools import chain
|
12 |
+
|
13 |
+
import networkx as nx
|
14 |
+
|
15 |
+
__all__ = ["edge_boundary", "node_boundary"]
|
16 |
+
|
17 |
+
|
18 |
+
@nx._dispatchable(edge_attrs={"data": "default"}, preserve_edge_attrs="data")
|
19 |
+
def edge_boundary(G, nbunch1, nbunch2=None, data=False, keys=False, default=None):
|
20 |
+
"""Returns the edge boundary of `nbunch1`.
|
21 |
+
|
22 |
+
The *edge boundary* of a set *S* with respect to a set *T* is the
|
23 |
+
set of edges (*u*, *v*) such that *u* is in *S* and *v* is in *T*.
|
24 |
+
If *T* is not specified, it is assumed to be the set of all nodes
|
25 |
+
not in *S*.
|
26 |
+
|
27 |
+
Parameters
|
28 |
+
----------
|
29 |
+
G : NetworkX graph
|
30 |
+
|
31 |
+
nbunch1 : iterable
|
32 |
+
Iterable of nodes in the graph representing the set of nodes
|
33 |
+
whose edge boundary will be returned. (This is the set *S* from
|
34 |
+
the definition above.)
|
35 |
+
|
36 |
+
nbunch2 : iterable
|
37 |
+
Iterable of nodes representing the target (or "exterior") set of
|
38 |
+
nodes. (This is the set *T* from the definition above.) If not
|
39 |
+
specified, this is assumed to be the set of all nodes in `G`
|
40 |
+
not in `nbunch1`.
|
41 |
+
|
42 |
+
keys : bool
|
43 |
+
This parameter has the same meaning as in
|
44 |
+
:meth:`MultiGraph.edges`.
|
45 |
+
|
46 |
+
data : bool or object
|
47 |
+
This parameter has the same meaning as in
|
48 |
+
:meth:`MultiGraph.edges`.
|
49 |
+
|
50 |
+
default : object
|
51 |
+
This parameter has the same meaning as in
|
52 |
+
:meth:`MultiGraph.edges`.
|
53 |
+
|
54 |
+
Returns
|
55 |
+
-------
|
56 |
+
iterator
|
57 |
+
An iterator over the edges in the boundary of `nbunch1` with
|
58 |
+
respect to `nbunch2`. If `keys`, `data`, or `default`
|
59 |
+
are specified and `G` is a multigraph, then edges are returned
|
60 |
+
with keys and/or data, as in :meth:`MultiGraph.edges`.
|
61 |
+
|
62 |
+
Examples
|
63 |
+
--------
|
64 |
+
>>> G = nx.wheel_graph(6)
|
65 |
+
|
66 |
+
When nbunch2=None:
|
67 |
+
|
68 |
+
>>> list(nx.edge_boundary(G, (1, 3)))
|
69 |
+
[(1, 0), (1, 2), (1, 5), (3, 0), (3, 2), (3, 4)]
|
70 |
+
|
71 |
+
When nbunch2 is given:
|
72 |
+
|
73 |
+
>>> list(nx.edge_boundary(G, (1, 3), (2, 0)))
|
74 |
+
[(1, 0), (1, 2), (3, 0), (3, 2)]
|
75 |
+
|
76 |
+
Notes
|
77 |
+
-----
|
78 |
+
Any element of `nbunch` that is not in the graph `G` will be
|
79 |
+
ignored.
|
80 |
+
|
81 |
+
`nbunch1` and `nbunch2` are usually meant to be disjoint, but in
|
82 |
+
the interest of speed and generality, that is not required here.
|
83 |
+
|
84 |
+
"""
|
85 |
+
nset1 = {n for n in nbunch1 if n in G}
|
86 |
+
# Here we create an iterator over edges incident to nodes in the set
|
87 |
+
# `nset1`. The `Graph.edges()` method does not provide a guarantee
|
88 |
+
# on the orientation of the edges, so our algorithm below must
|
89 |
+
# handle the case in which exactly one orientation, either (u, v) or
|
90 |
+
# (v, u), appears in this iterable.
|
91 |
+
if G.is_multigraph():
|
92 |
+
edges = G.edges(nset1, data=data, keys=keys, default=default)
|
93 |
+
else:
|
94 |
+
edges = G.edges(nset1, data=data, default=default)
|
95 |
+
# If `nbunch2` is not provided, then it is assumed to be the set
|
96 |
+
# complement of `nbunch1`. For the sake of efficiency, this is
|
97 |
+
# implemented by using the `not in` operator, instead of by creating
|
98 |
+
# an additional set and using the `in` operator.
|
99 |
+
if nbunch2 is None:
|
100 |
+
return (e for e in edges if (e[0] in nset1) ^ (e[1] in nset1))
|
101 |
+
nset2 = set(nbunch2)
|
102 |
+
return (
|
103 |
+
e
|
104 |
+
for e in edges
|
105 |
+
if (e[0] in nset1 and e[1] in nset2) or (e[1] in nset1 and e[0] in nset2)
|
106 |
+
)
|
107 |
+
|
108 |
+
|
109 |
+
@nx._dispatchable
|
110 |
+
def node_boundary(G, nbunch1, nbunch2=None):
|
111 |
+
"""Returns the node boundary of `nbunch1`.
|
112 |
+
|
113 |
+
The *node boundary* of a set *S* with respect to a set *T* is the
|
114 |
+
set of nodes *v* in *T* such that for some *u* in *S*, there is an
|
115 |
+
edge joining *u* to *v*. If *T* is not specified, it is assumed to
|
116 |
+
be the set of all nodes not in *S*.
|
117 |
+
|
118 |
+
Parameters
|
119 |
+
----------
|
120 |
+
G : NetworkX graph
|
121 |
+
|
122 |
+
nbunch1 : iterable
|
123 |
+
Iterable of nodes in the graph representing the set of nodes
|
124 |
+
whose node boundary will be returned. (This is the set *S* from
|
125 |
+
the definition above.)
|
126 |
+
|
127 |
+
nbunch2 : iterable
|
128 |
+
Iterable of nodes representing the target (or "exterior") set of
|
129 |
+
nodes. (This is the set *T* from the definition above.) If not
|
130 |
+
specified, this is assumed to be the set of all nodes in `G`
|
131 |
+
not in `nbunch1`.
|
132 |
+
|
133 |
+
Returns
|
134 |
+
-------
|
135 |
+
set
|
136 |
+
The node boundary of `nbunch1` with respect to `nbunch2`.
|
137 |
+
|
138 |
+
Examples
|
139 |
+
--------
|
140 |
+
>>> G = nx.wheel_graph(6)
|
141 |
+
|
142 |
+
When nbunch2=None:
|
143 |
+
|
144 |
+
>>> list(nx.node_boundary(G, (3, 4)))
|
145 |
+
[0, 2, 5]
|
146 |
+
|
147 |
+
When nbunch2 is given:
|
148 |
+
|
149 |
+
>>> list(nx.node_boundary(G, (3, 4), (0, 1, 5)))
|
150 |
+
[0, 5]
|
151 |
+
|
152 |
+
Notes
|
153 |
+
-----
|
154 |
+
Any element of `nbunch` that is not in the graph `G` will be
|
155 |
+
ignored.
|
156 |
+
|
157 |
+
`nbunch1` and `nbunch2` are usually meant to be disjoint, but in
|
158 |
+
the interest of speed and generality, that is not required here.
|
159 |
+
|
160 |
+
"""
|
161 |
+
nset1 = {n for n in nbunch1 if n in G}
|
162 |
+
bdy = set(chain.from_iterable(G[v] for v in nset1)) - nset1
|
163 |
+
# If `nbunch2` is not specified, it is assumed to be the set
|
164 |
+
# complement of `nbunch1`.
|
165 |
+
if nbunch2 is not None:
|
166 |
+
bdy &= set(nbunch2)
|
167 |
+
return bdy
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/chains.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Functions for finding chains in a graph."""
|
2 |
+
|
3 |
+
import networkx as nx
|
4 |
+
from networkx.utils import not_implemented_for
|
5 |
+
|
6 |
+
__all__ = ["chain_decomposition"]
|
7 |
+
|
8 |
+
|
9 |
+
@not_implemented_for("directed")
|
10 |
+
@not_implemented_for("multigraph")
|
11 |
+
@nx._dispatchable
|
12 |
+
def chain_decomposition(G, root=None):
|
13 |
+
"""Returns the chain decomposition of a graph.
|
14 |
+
|
15 |
+
The *chain decomposition* of a graph with respect a depth-first
|
16 |
+
search tree is a set of cycles or paths derived from the set of
|
17 |
+
fundamental cycles of the tree in the following manner. Consider
|
18 |
+
each fundamental cycle with respect to the given tree, represented
|
19 |
+
as a list of edges beginning with the nontree edge oriented away
|
20 |
+
from the root of the tree. For each fundamental cycle, if it
|
21 |
+
overlaps with any previous fundamental cycle, just take the initial
|
22 |
+
non-overlapping segment, which is a path instead of a cycle. Each
|
23 |
+
cycle or path is called a *chain*. For more information, see [1]_.
|
24 |
+
|
25 |
+
Parameters
|
26 |
+
----------
|
27 |
+
G : undirected graph
|
28 |
+
|
29 |
+
root : node (optional)
|
30 |
+
A node in the graph `G`. If specified, only the chain
|
31 |
+
decomposition for the connected component containing this node
|
32 |
+
will be returned. This node indicates the root of the depth-first
|
33 |
+
search tree.
|
34 |
+
|
35 |
+
Yields
|
36 |
+
------
|
37 |
+
chain : list
|
38 |
+
A list of edges representing a chain. There is no guarantee on
|
39 |
+
the orientation of the edges in each chain (for example, if a
|
40 |
+
chain includes the edge joining nodes 1 and 2, the chain may
|
41 |
+
include either (1, 2) or (2, 1)).
|
42 |
+
|
43 |
+
Raises
|
44 |
+
------
|
45 |
+
NodeNotFound
|
46 |
+
If `root` is not in the graph `G`.
|
47 |
+
|
48 |
+
Examples
|
49 |
+
--------
|
50 |
+
>>> G = nx.Graph([(0, 1), (1, 4), (3, 4), (3, 5), (4, 5)])
|
51 |
+
>>> list(nx.chain_decomposition(G))
|
52 |
+
[[(4, 5), (5, 3), (3, 4)]]
|
53 |
+
|
54 |
+
Notes
|
55 |
+
-----
|
56 |
+
The worst-case running time of this implementation is linear in the
|
57 |
+
number of nodes and number of edges [1]_.
|
58 |
+
|
59 |
+
References
|
60 |
+
----------
|
61 |
+
.. [1] Jens M. Schmidt (2013). "A simple test on 2-vertex-
|
62 |
+
and 2-edge-connectivity." *Information Processing Letters*,
|
63 |
+
113, 241–244. Elsevier. <https://doi.org/10.1016/j.ipl.2013.01.016>
|
64 |
+
|
65 |
+
"""
|
66 |
+
|
67 |
+
def _dfs_cycle_forest(G, root=None):
|
68 |
+
"""Builds a directed graph composed of cycles from the given graph.
|
69 |
+
|
70 |
+
`G` is an undirected simple graph. `root` is a node in the graph
|
71 |
+
from which the depth-first search is started.
|
72 |
+
|
73 |
+
This function returns both the depth-first search cycle graph
|
74 |
+
(as a :class:`~networkx.DiGraph`) and the list of nodes in
|
75 |
+
depth-first preorder. The depth-first search cycle graph is a
|
76 |
+
directed graph whose edges are the edges of `G` oriented toward
|
77 |
+
the root if the edge is a tree edge and away from the root if
|
78 |
+
the edge is a non-tree edge. If `root` is not specified, this
|
79 |
+
performs a depth-first search on each connected component of `G`
|
80 |
+
and returns a directed forest instead.
|
81 |
+
|
82 |
+
If `root` is not in the graph, this raises :exc:`KeyError`.
|
83 |
+
|
84 |
+
"""
|
85 |
+
# Create a directed graph from the depth-first search tree with
|
86 |
+
# root node `root` in which tree edges are directed toward the
|
87 |
+
# root and nontree edges are directed away from the root. For
|
88 |
+
# each node with an incident nontree edge, this creates a
|
89 |
+
# directed cycle starting with the nontree edge and returning to
|
90 |
+
# that node.
|
91 |
+
#
|
92 |
+
# The `parent` node attribute stores the parent of each node in
|
93 |
+
# the DFS tree. The `nontree` edge attribute indicates whether
|
94 |
+
# the edge is a tree edge or a nontree edge.
|
95 |
+
#
|
96 |
+
# We also store the order of the nodes found in the depth-first
|
97 |
+
# search in the `nodes` list.
|
98 |
+
H = nx.DiGraph()
|
99 |
+
nodes = []
|
100 |
+
for u, v, d in nx.dfs_labeled_edges(G, source=root):
|
101 |
+
if d == "forward":
|
102 |
+
# `dfs_labeled_edges()` yields (root, root, 'forward')
|
103 |
+
# if it is beginning the search on a new connected
|
104 |
+
# component.
|
105 |
+
if u == v:
|
106 |
+
H.add_node(v, parent=None)
|
107 |
+
nodes.append(v)
|
108 |
+
else:
|
109 |
+
H.add_node(v, parent=u)
|
110 |
+
H.add_edge(v, u, nontree=False)
|
111 |
+
nodes.append(v)
|
112 |
+
# `dfs_labeled_edges` considers nontree edges in both
|
113 |
+
# orientations, so we need to not add the edge if it its
|
114 |
+
# other orientation has been added.
|
115 |
+
elif d == "nontree" and v not in H[u]:
|
116 |
+
H.add_edge(v, u, nontree=True)
|
117 |
+
else:
|
118 |
+
# Do nothing on 'reverse' edges; we only care about
|
119 |
+
# forward and nontree edges.
|
120 |
+
pass
|
121 |
+
return H, nodes
|
122 |
+
|
123 |
+
def _build_chain(G, u, v, visited):
|
124 |
+
"""Generate the chain starting from the given nontree edge.
|
125 |
+
|
126 |
+
`G` is a DFS cycle graph as constructed by
|
127 |
+
:func:`_dfs_cycle_graph`. The edge (`u`, `v`) is a nontree edge
|
128 |
+
that begins a chain. `visited` is a set representing the nodes
|
129 |
+
in `G` that have already been visited.
|
130 |
+
|
131 |
+
This function yields the edges in an initial segment of the
|
132 |
+
fundamental cycle of `G` starting with the nontree edge (`u`,
|
133 |
+
`v`) that includes all the edges up until the first node that
|
134 |
+
appears in `visited`. The tree edges are given by the 'parent'
|
135 |
+
node attribute. The `visited` set is updated to add each node in
|
136 |
+
an edge yielded by this function.
|
137 |
+
|
138 |
+
"""
|
139 |
+
while v not in visited:
|
140 |
+
yield u, v
|
141 |
+
visited.add(v)
|
142 |
+
u, v = v, G.nodes[v]["parent"]
|
143 |
+
yield u, v
|
144 |
+
|
145 |
+
# Check if the root is in the graph G. If not, raise NodeNotFound
|
146 |
+
if root is not None and root not in G:
|
147 |
+
raise nx.NodeNotFound(f"Root node {root} is not in graph")
|
148 |
+
|
149 |
+
# Create a directed version of H that has the DFS edges directed
|
150 |
+
# toward the root and the nontree edges directed away from the root
|
151 |
+
# (in each connected component).
|
152 |
+
H, nodes = _dfs_cycle_forest(G, root)
|
153 |
+
|
154 |
+
# Visit the nodes again in DFS order. For each node, and for each
|
155 |
+
# nontree edge leaving that node, compute the fundamental cycle for
|
156 |
+
# that nontree edge starting with that edge. If the fundamental
|
157 |
+
# cycle overlaps with any visited nodes, just take the prefix of the
|
158 |
+
# cycle up to the point of visited nodes.
|
159 |
+
#
|
160 |
+
# We repeat this process for each connected component (implicitly,
|
161 |
+
# since `nodes` already has a list of the nodes grouped by connected
|
162 |
+
# component).
|
163 |
+
visited = set()
|
164 |
+
for u in nodes:
|
165 |
+
visited.add(u)
|
166 |
+
# For each nontree edge going out of node u...
|
167 |
+
edges = ((u, v) for u, v, d in H.out_edges(u, data="nontree") if d)
|
168 |
+
for u, v in edges:
|
169 |
+
# Create the cycle or cycle prefix starting with the
|
170 |
+
# nontree edge.
|
171 |
+
chain = list(_build_chain(H, u, v, visited))
|
172 |
+
yield chain
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/chordal.py
ADDED
@@ -0,0 +1,442 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Algorithms for chordal graphs.
|
3 |
+
|
4 |
+
A graph is chordal if every cycle of length at least 4 has a chord
|
5 |
+
(an edge joining two nodes not adjacent in the cycle).
|
6 |
+
https://en.wikipedia.org/wiki/Chordal_graph
|
7 |
+
"""
|
8 |
+
import sys
|
9 |
+
|
10 |
+
import networkx as nx
|
11 |
+
from networkx.algorithms.components import connected_components
|
12 |
+
from networkx.utils import arbitrary_element, not_implemented_for
|
13 |
+
|
14 |
+
__all__ = [
|
15 |
+
"is_chordal",
|
16 |
+
"find_induced_nodes",
|
17 |
+
"chordal_graph_cliques",
|
18 |
+
"chordal_graph_treewidth",
|
19 |
+
"NetworkXTreewidthBoundExceeded",
|
20 |
+
"complete_to_chordal_graph",
|
21 |
+
]
|
22 |
+
|
23 |
+
|
24 |
+
class NetworkXTreewidthBoundExceeded(nx.NetworkXException):
|
25 |
+
"""Exception raised when a treewidth bound has been provided and it has
|
26 |
+
been exceeded"""
|
27 |
+
|
28 |
+
|
29 |
+
@not_implemented_for("directed")
|
30 |
+
@not_implemented_for("multigraph")
|
31 |
+
@nx._dispatchable
|
32 |
+
def is_chordal(G):
|
33 |
+
"""Checks whether G is a chordal graph.
|
34 |
+
|
35 |
+
A graph is chordal if every cycle of length at least 4 has a chord
|
36 |
+
(an edge joining two nodes not adjacent in the cycle).
|
37 |
+
|
38 |
+
Parameters
|
39 |
+
----------
|
40 |
+
G : graph
|
41 |
+
A NetworkX graph.
|
42 |
+
|
43 |
+
Returns
|
44 |
+
-------
|
45 |
+
chordal : bool
|
46 |
+
True if G is a chordal graph and False otherwise.
|
47 |
+
|
48 |
+
Raises
|
49 |
+
------
|
50 |
+
NetworkXNotImplemented
|
51 |
+
The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
|
52 |
+
|
53 |
+
Examples
|
54 |
+
--------
|
55 |
+
>>> e = [
|
56 |
+
... (1, 2),
|
57 |
+
... (1, 3),
|
58 |
+
... (2, 3),
|
59 |
+
... (2, 4),
|
60 |
+
... (3, 4),
|
61 |
+
... (3, 5),
|
62 |
+
... (3, 6),
|
63 |
+
... (4, 5),
|
64 |
+
... (4, 6),
|
65 |
+
... (5, 6),
|
66 |
+
... ]
|
67 |
+
>>> G = nx.Graph(e)
|
68 |
+
>>> nx.is_chordal(G)
|
69 |
+
True
|
70 |
+
|
71 |
+
Notes
|
72 |
+
-----
|
73 |
+
The routine tries to go through every node following maximum cardinality
|
74 |
+
search. It returns False when it finds that the separator for any node
|
75 |
+
is not a clique. Based on the algorithms in [1]_.
|
76 |
+
|
77 |
+
Self loops are ignored.
|
78 |
+
|
79 |
+
References
|
80 |
+
----------
|
81 |
+
.. [1] R. E. Tarjan and M. Yannakakis, Simple linear-time algorithms
|
82 |
+
to test chordality of graphs, test acyclicity of hypergraphs, and
|
83 |
+
selectively reduce acyclic hypergraphs, SIAM J. Comput., 13 (1984),
|
84 |
+
pp. 566–579.
|
85 |
+
"""
|
86 |
+
if len(G.nodes) <= 3:
|
87 |
+
return True
|
88 |
+
return len(_find_chordality_breaker(G)) == 0
|
89 |
+
|
90 |
+
|
91 |
+
@nx._dispatchable
|
92 |
+
def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize):
|
93 |
+
"""Returns the set of induced nodes in the path from s to t.
|
94 |
+
|
95 |
+
Parameters
|
96 |
+
----------
|
97 |
+
G : graph
|
98 |
+
A chordal NetworkX graph
|
99 |
+
s : node
|
100 |
+
Source node to look for induced nodes
|
101 |
+
t : node
|
102 |
+
Destination node to look for induced nodes
|
103 |
+
treewidth_bound: float
|
104 |
+
Maximum treewidth acceptable for the graph H. The search
|
105 |
+
for induced nodes will end as soon as the treewidth_bound is exceeded.
|
106 |
+
|
107 |
+
Returns
|
108 |
+
-------
|
109 |
+
induced_nodes : Set of nodes
|
110 |
+
The set of induced nodes in the path from s to t in G
|
111 |
+
|
112 |
+
Raises
|
113 |
+
------
|
114 |
+
NetworkXError
|
115 |
+
The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
|
116 |
+
If the input graph is an instance of one of these classes, a
|
117 |
+
:exc:`NetworkXError` is raised.
|
118 |
+
The algorithm can only be applied to chordal graphs. If the input
|
119 |
+
graph is found to be non-chordal, a :exc:`NetworkXError` is raised.
|
120 |
+
|
121 |
+
Examples
|
122 |
+
--------
|
123 |
+
>>> G = nx.Graph()
|
124 |
+
>>> G = nx.generators.classic.path_graph(10)
|
125 |
+
>>> induced_nodes = nx.find_induced_nodes(G, 1, 9, 2)
|
126 |
+
>>> sorted(induced_nodes)
|
127 |
+
[1, 2, 3, 4, 5, 6, 7, 8, 9]
|
128 |
+
|
129 |
+
Notes
|
130 |
+
-----
|
131 |
+
G must be a chordal graph and (s,t) an edge that is not in G.
|
132 |
+
|
133 |
+
If a treewidth_bound is provided, the search for induced nodes will end
|
134 |
+
as soon as the treewidth_bound is exceeded.
|
135 |
+
|
136 |
+
The algorithm is inspired by Algorithm 4 in [1]_.
|
137 |
+
A formal definition of induced node can also be found on that reference.
|
138 |
+
|
139 |
+
Self Loops are ignored
|
140 |
+
|
141 |
+
References
|
142 |
+
----------
|
143 |
+
.. [1] Learning Bounded Treewidth Bayesian Networks.
|
144 |
+
Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008.
|
145 |
+
http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf
|
146 |
+
"""
|
147 |
+
if not is_chordal(G):
|
148 |
+
raise nx.NetworkXError("Input graph is not chordal.")
|
149 |
+
|
150 |
+
H = nx.Graph(G)
|
151 |
+
H.add_edge(s, t)
|
152 |
+
induced_nodes = set()
|
153 |
+
triplet = _find_chordality_breaker(H, s, treewidth_bound)
|
154 |
+
while triplet:
|
155 |
+
(u, v, w) = triplet
|
156 |
+
induced_nodes.update(triplet)
|
157 |
+
for n in triplet:
|
158 |
+
if n != s:
|
159 |
+
H.add_edge(s, n)
|
160 |
+
triplet = _find_chordality_breaker(H, s, treewidth_bound)
|
161 |
+
if induced_nodes:
|
162 |
+
# Add t and the second node in the induced path from s to t.
|
163 |
+
induced_nodes.add(t)
|
164 |
+
for u in G[s]:
|
165 |
+
if len(induced_nodes & set(G[u])) == 2:
|
166 |
+
induced_nodes.add(u)
|
167 |
+
break
|
168 |
+
return induced_nodes
|
169 |
+
|
170 |
+
|
171 |
+
@nx._dispatchable
|
172 |
+
def chordal_graph_cliques(G):
|
173 |
+
"""Returns all maximal cliques of a chordal graph.
|
174 |
+
|
175 |
+
The algorithm breaks the graph in connected components and performs a
|
176 |
+
maximum cardinality search in each component to get the cliques.
|
177 |
+
|
178 |
+
Parameters
|
179 |
+
----------
|
180 |
+
G : graph
|
181 |
+
A NetworkX graph
|
182 |
+
|
183 |
+
Yields
|
184 |
+
------
|
185 |
+
frozenset of nodes
|
186 |
+
Maximal cliques, each of which is a frozenset of
|
187 |
+
nodes in `G`. The order of cliques is arbitrary.
|
188 |
+
|
189 |
+
Raises
|
190 |
+
------
|
191 |
+
NetworkXError
|
192 |
+
The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
|
193 |
+
The algorithm can only be applied to chordal graphs. If the input
|
194 |
+
graph is found to be non-chordal, a :exc:`NetworkXError` is raised.
|
195 |
+
|
196 |
+
Examples
|
197 |
+
--------
|
198 |
+
>>> e = [
|
199 |
+
... (1, 2),
|
200 |
+
... (1, 3),
|
201 |
+
... (2, 3),
|
202 |
+
... (2, 4),
|
203 |
+
... (3, 4),
|
204 |
+
... (3, 5),
|
205 |
+
... (3, 6),
|
206 |
+
... (4, 5),
|
207 |
+
... (4, 6),
|
208 |
+
... (5, 6),
|
209 |
+
... (7, 8),
|
210 |
+
... ]
|
211 |
+
>>> G = nx.Graph(e)
|
212 |
+
>>> G.add_node(9)
|
213 |
+
>>> cliques = [c for c in chordal_graph_cliques(G)]
|
214 |
+
>>> cliques[0]
|
215 |
+
frozenset({1, 2, 3})
|
216 |
+
"""
|
217 |
+
for C in (G.subgraph(c).copy() for c in connected_components(G)):
|
218 |
+
if C.number_of_nodes() == 1:
|
219 |
+
if nx.number_of_selfloops(C) > 0:
|
220 |
+
raise nx.NetworkXError("Input graph is not chordal.")
|
221 |
+
yield frozenset(C.nodes())
|
222 |
+
else:
|
223 |
+
unnumbered = set(C.nodes())
|
224 |
+
v = arbitrary_element(C)
|
225 |
+
unnumbered.remove(v)
|
226 |
+
numbered = {v}
|
227 |
+
clique_wanna_be = {v}
|
228 |
+
while unnumbered:
|
229 |
+
v = _max_cardinality_node(C, unnumbered, numbered)
|
230 |
+
unnumbered.remove(v)
|
231 |
+
numbered.add(v)
|
232 |
+
new_clique_wanna_be = set(C.neighbors(v)) & numbered
|
233 |
+
sg = C.subgraph(clique_wanna_be)
|
234 |
+
if _is_complete_graph(sg):
|
235 |
+
new_clique_wanna_be.add(v)
|
236 |
+
if not new_clique_wanna_be >= clique_wanna_be:
|
237 |
+
yield frozenset(clique_wanna_be)
|
238 |
+
clique_wanna_be = new_clique_wanna_be
|
239 |
+
else:
|
240 |
+
raise nx.NetworkXError("Input graph is not chordal.")
|
241 |
+
yield frozenset(clique_wanna_be)
|
242 |
+
|
243 |
+
|
244 |
+
@nx._dispatchable
|
245 |
+
def chordal_graph_treewidth(G):
|
246 |
+
"""Returns the treewidth of the chordal graph G.
|
247 |
+
|
248 |
+
Parameters
|
249 |
+
----------
|
250 |
+
G : graph
|
251 |
+
A NetworkX graph
|
252 |
+
|
253 |
+
Returns
|
254 |
+
-------
|
255 |
+
treewidth : int
|
256 |
+
The size of the largest clique in the graph minus one.
|
257 |
+
|
258 |
+
Raises
|
259 |
+
------
|
260 |
+
NetworkXError
|
261 |
+
The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
|
262 |
+
The algorithm can only be applied to chordal graphs. If the input
|
263 |
+
graph is found to be non-chordal, a :exc:`NetworkXError` is raised.
|
264 |
+
|
265 |
+
Examples
|
266 |
+
--------
|
267 |
+
>>> e = [
|
268 |
+
... (1, 2),
|
269 |
+
... (1, 3),
|
270 |
+
... (2, 3),
|
271 |
+
... (2, 4),
|
272 |
+
... (3, 4),
|
273 |
+
... (3, 5),
|
274 |
+
... (3, 6),
|
275 |
+
... (4, 5),
|
276 |
+
... (4, 6),
|
277 |
+
... (5, 6),
|
278 |
+
... (7, 8),
|
279 |
+
... ]
|
280 |
+
>>> G = nx.Graph(e)
|
281 |
+
>>> G.add_node(9)
|
282 |
+
>>> nx.chordal_graph_treewidth(G)
|
283 |
+
3
|
284 |
+
|
285 |
+
References
|
286 |
+
----------
|
287 |
+
.. [1] https://en.wikipedia.org/wiki/Tree_decomposition#Treewidth
|
288 |
+
"""
|
289 |
+
if not is_chordal(G):
|
290 |
+
raise nx.NetworkXError("Input graph is not chordal.")
|
291 |
+
|
292 |
+
max_clique = -1
|
293 |
+
for clique in nx.chordal_graph_cliques(G):
|
294 |
+
max_clique = max(max_clique, len(clique))
|
295 |
+
return max_clique - 1
|
296 |
+
|
297 |
+
|
298 |
+
def _is_complete_graph(G):
|
299 |
+
"""Returns True if G is a complete graph."""
|
300 |
+
if nx.number_of_selfloops(G) > 0:
|
301 |
+
raise nx.NetworkXError("Self loop found in _is_complete_graph()")
|
302 |
+
n = G.number_of_nodes()
|
303 |
+
if n < 2:
|
304 |
+
return True
|
305 |
+
e = G.number_of_edges()
|
306 |
+
max_edges = (n * (n - 1)) / 2
|
307 |
+
return e == max_edges
|
308 |
+
|
309 |
+
|
310 |
+
def _find_missing_edge(G):
|
311 |
+
"""Given a non-complete graph G, returns a missing edge."""
|
312 |
+
nodes = set(G)
|
313 |
+
for u in G:
|
314 |
+
missing = nodes - set(list(G[u].keys()) + [u])
|
315 |
+
if missing:
|
316 |
+
return (u, missing.pop())
|
317 |
+
|
318 |
+
|
319 |
+
def _max_cardinality_node(G, choices, wanna_connect):
|
320 |
+
"""Returns a the node in choices that has more connections in G
|
321 |
+
to nodes in wanna_connect.
|
322 |
+
"""
|
323 |
+
max_number = -1
|
324 |
+
for x in choices:
|
325 |
+
number = len([y for y in G[x] if y in wanna_connect])
|
326 |
+
if number > max_number:
|
327 |
+
max_number = number
|
328 |
+
max_cardinality_node = x
|
329 |
+
return max_cardinality_node
|
330 |
+
|
331 |
+
|
332 |
+
def _find_chordality_breaker(G, s=None, treewidth_bound=sys.maxsize):
|
333 |
+
"""Given a graph G, starts a max cardinality search
|
334 |
+
(starting from s if s is given and from an arbitrary node otherwise)
|
335 |
+
trying to find a non-chordal cycle.
|
336 |
+
|
337 |
+
If it does find one, it returns (u,v,w) where u,v,w are the three
|
338 |
+
nodes that together with s are involved in the cycle.
|
339 |
+
|
340 |
+
It ignores any self loops.
|
341 |
+
"""
|
342 |
+
if len(G) == 0:
|
343 |
+
raise nx.NetworkXPointlessConcept("Graph has no nodes.")
|
344 |
+
unnumbered = set(G)
|
345 |
+
if s is None:
|
346 |
+
s = arbitrary_element(G)
|
347 |
+
unnumbered.remove(s)
|
348 |
+
numbered = {s}
|
349 |
+
current_treewidth = -1
|
350 |
+
while unnumbered: # and current_treewidth <= treewidth_bound:
|
351 |
+
v = _max_cardinality_node(G, unnumbered, numbered)
|
352 |
+
unnumbered.remove(v)
|
353 |
+
numbered.add(v)
|
354 |
+
clique_wanna_be = set(G[v]) & numbered
|
355 |
+
sg = G.subgraph(clique_wanna_be)
|
356 |
+
if _is_complete_graph(sg):
|
357 |
+
# The graph seems to be chordal by now. We update the treewidth
|
358 |
+
current_treewidth = max(current_treewidth, len(clique_wanna_be))
|
359 |
+
if current_treewidth > treewidth_bound:
|
360 |
+
raise nx.NetworkXTreewidthBoundExceeded(
|
361 |
+
f"treewidth_bound exceeded: {current_treewidth}"
|
362 |
+
)
|
363 |
+
else:
|
364 |
+
# sg is not a clique,
|
365 |
+
# look for an edge that is not included in sg
|
366 |
+
(u, w) = _find_missing_edge(sg)
|
367 |
+
return (u, v, w)
|
368 |
+
return ()
|
369 |
+
|
370 |
+
|
371 |
+
@not_implemented_for("directed")
|
372 |
+
@nx._dispatchable(returns_graph=True)
|
373 |
+
def complete_to_chordal_graph(G):
|
374 |
+
"""Return a copy of G completed to a chordal graph
|
375 |
+
|
376 |
+
Adds edges to a copy of G to create a chordal graph. A graph G=(V,E) is
|
377 |
+
called chordal if for each cycle with length bigger than 3, there exist
|
378 |
+
two non-adjacent nodes connected by an edge (called a chord).
|
379 |
+
|
380 |
+
Parameters
|
381 |
+
----------
|
382 |
+
G : NetworkX graph
|
383 |
+
Undirected graph
|
384 |
+
|
385 |
+
Returns
|
386 |
+
-------
|
387 |
+
H : NetworkX graph
|
388 |
+
The chordal enhancement of G
|
389 |
+
alpha : Dictionary
|
390 |
+
The elimination ordering of nodes of G
|
391 |
+
|
392 |
+
Notes
|
393 |
+
-----
|
394 |
+
There are different approaches to calculate the chordal
|
395 |
+
enhancement of a graph. The algorithm used here is called
|
396 |
+
MCS-M and gives at least minimal (local) triangulation of graph. Note
|
397 |
+
that this triangulation is not necessarily a global minimum.
|
398 |
+
|
399 |
+
https://en.wikipedia.org/wiki/Chordal_graph
|
400 |
+
|
401 |
+
References
|
402 |
+
----------
|
403 |
+
.. [1] Berry, Anne & Blair, Jean & Heggernes, Pinar & Peyton, Barry. (2004)
|
404 |
+
Maximum Cardinality Search for Computing Minimal Triangulations of
|
405 |
+
Graphs. Algorithmica. 39. 287-298. 10.1007/s00453-004-1084-3.
|
406 |
+
|
407 |
+
Examples
|
408 |
+
--------
|
409 |
+
>>> from networkx.algorithms.chordal import complete_to_chordal_graph
|
410 |
+
>>> G = nx.wheel_graph(10)
|
411 |
+
>>> H, alpha = complete_to_chordal_graph(G)
|
412 |
+
"""
|
413 |
+
H = G.copy()
|
414 |
+
alpha = {node: 0 for node in H}
|
415 |
+
if nx.is_chordal(H):
|
416 |
+
return H, alpha
|
417 |
+
chords = set()
|
418 |
+
weight = {node: 0 for node in H.nodes()}
|
419 |
+
unnumbered_nodes = list(H.nodes())
|
420 |
+
for i in range(len(H.nodes()), 0, -1):
|
421 |
+
# get the node in unnumbered_nodes with the maximum weight
|
422 |
+
z = max(unnumbered_nodes, key=lambda node: weight[node])
|
423 |
+
unnumbered_nodes.remove(z)
|
424 |
+
alpha[z] = i
|
425 |
+
update_nodes = []
|
426 |
+
for y in unnumbered_nodes:
|
427 |
+
if G.has_edge(y, z):
|
428 |
+
update_nodes.append(y)
|
429 |
+
else:
|
430 |
+
# y_weight will be bigger than node weights between y and z
|
431 |
+
y_weight = weight[y]
|
432 |
+
lower_nodes = [
|
433 |
+
node for node in unnumbered_nodes if weight[node] < y_weight
|
434 |
+
]
|
435 |
+
if nx.has_path(H.subgraph(lower_nodes + [z, y]), y, z):
|
436 |
+
update_nodes.append(y)
|
437 |
+
chords.add((z, y))
|
438 |
+
# during calculation of paths the weights should not be updated
|
439 |
+
for node in update_nodes:
|
440 |
+
weight[node] += 1
|
441 |
+
H.add_edges_from(chords)
|
442 |
+
return H, alpha
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/clique.py
ADDED
@@ -0,0 +1,754 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Functions for finding and manipulating cliques.
|
2 |
+
|
3 |
+
Finding the largest clique in a graph is NP-complete problem, so most of
|
4 |
+
these algorithms have an exponential running time; for more information,
|
5 |
+
see the Wikipedia article on the clique problem [1]_.
|
6 |
+
|
7 |
+
.. [1] clique problem:: https://en.wikipedia.org/wiki/Clique_problem
|
8 |
+
|
9 |
+
"""
|
10 |
+
from collections import defaultdict, deque
|
11 |
+
from itertools import chain, combinations, islice
|
12 |
+
|
13 |
+
import networkx as nx
|
14 |
+
from networkx.utils import not_implemented_for
|
15 |
+
|
16 |
+
__all__ = [
|
17 |
+
"find_cliques",
|
18 |
+
"find_cliques_recursive",
|
19 |
+
"make_max_clique_graph",
|
20 |
+
"make_clique_bipartite",
|
21 |
+
"node_clique_number",
|
22 |
+
"number_of_cliques",
|
23 |
+
"enumerate_all_cliques",
|
24 |
+
"max_weight_clique",
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
@not_implemented_for("directed")
|
29 |
+
@nx._dispatchable
|
30 |
+
def enumerate_all_cliques(G):
|
31 |
+
"""Returns all cliques in an undirected graph.
|
32 |
+
|
33 |
+
This function returns an iterator over cliques, each of which is a
|
34 |
+
list of nodes. The iteration is ordered by cardinality of the
|
35 |
+
cliques: first all cliques of size one, then all cliques of size
|
36 |
+
two, etc.
|
37 |
+
|
38 |
+
Parameters
|
39 |
+
----------
|
40 |
+
G : NetworkX graph
|
41 |
+
An undirected graph.
|
42 |
+
|
43 |
+
Returns
|
44 |
+
-------
|
45 |
+
iterator
|
46 |
+
An iterator over cliques, each of which is a list of nodes in
|
47 |
+
`G`. The cliques are ordered according to size.
|
48 |
+
|
49 |
+
Notes
|
50 |
+
-----
|
51 |
+
To obtain a list of all cliques, use
|
52 |
+
`list(enumerate_all_cliques(G))`. However, be aware that in the
|
53 |
+
worst-case, the length of this list can be exponential in the number
|
54 |
+
of nodes in the graph (for example, when the graph is the complete
|
55 |
+
graph). This function avoids storing all cliques in memory by only
|
56 |
+
keeping current candidate node lists in memory during its search.
|
57 |
+
|
58 |
+
The implementation is adapted from the algorithm by Zhang, et
|
59 |
+
al. (2005) [1]_ to output all cliques discovered.
|
60 |
+
|
61 |
+
This algorithm ignores self-loops and parallel edges, since cliques
|
62 |
+
are not conventionally defined with such edges.
|
63 |
+
|
64 |
+
References
|
65 |
+
----------
|
66 |
+
.. [1] Yun Zhang, Abu-Khzam, F.N., Baldwin, N.E., Chesler, E.J.,
|
67 |
+
Langston, M.A., Samatova, N.F.,
|
68 |
+
"Genome-Scale Computational Approaches to Memory-Intensive
|
69 |
+
Applications in Systems Biology".
|
70 |
+
*Supercomputing*, 2005. Proceedings of the ACM/IEEE SC 2005
|
71 |
+
Conference, pp. 12, 12--18 Nov. 2005.
|
72 |
+
<https://doi.org/10.1109/SC.2005.29>.
|
73 |
+
|
74 |
+
"""
|
75 |
+
index = {}
|
76 |
+
nbrs = {}
|
77 |
+
for u in G:
|
78 |
+
index[u] = len(index)
|
79 |
+
# Neighbors of u that appear after u in the iteration order of G.
|
80 |
+
nbrs[u] = {v for v in G[u] if v not in index}
|
81 |
+
|
82 |
+
queue = deque(([u], sorted(nbrs[u], key=index.__getitem__)) for u in G)
|
83 |
+
# Loop invariants:
|
84 |
+
# 1. len(base) is nondecreasing.
|
85 |
+
# 2. (base + cnbrs) is sorted with respect to the iteration order of G.
|
86 |
+
# 3. cnbrs is a set of common neighbors of nodes in base.
|
87 |
+
while queue:
|
88 |
+
base, cnbrs = map(list, queue.popleft())
|
89 |
+
yield base
|
90 |
+
for i, u in enumerate(cnbrs):
|
91 |
+
# Use generators to reduce memory consumption.
|
92 |
+
queue.append(
|
93 |
+
(
|
94 |
+
chain(base, [u]),
|
95 |
+
filter(nbrs[u].__contains__, islice(cnbrs, i + 1, None)),
|
96 |
+
)
|
97 |
+
)
|
98 |
+
|
99 |
+
|
100 |
+
@not_implemented_for("directed")
|
101 |
+
@nx._dispatchable
|
102 |
+
def find_cliques(G, nodes=None):
|
103 |
+
"""Returns all maximal cliques in an undirected graph.
|
104 |
+
|
105 |
+
For each node *n*, a *maximal clique for n* is a largest complete
|
106 |
+
subgraph containing *n*. The largest maximal clique is sometimes
|
107 |
+
called the *maximum clique*.
|
108 |
+
|
109 |
+
This function returns an iterator over cliques, each of which is a
|
110 |
+
list of nodes. It is an iterative implementation, so should not
|
111 |
+
suffer from recursion depth issues.
|
112 |
+
|
113 |
+
This function accepts a list of `nodes` and only the maximal cliques
|
114 |
+
containing all of these `nodes` are returned. It can considerably speed up
|
115 |
+
the running time if some specific cliques are desired.
|
116 |
+
|
117 |
+
Parameters
|
118 |
+
----------
|
119 |
+
G : NetworkX graph
|
120 |
+
An undirected graph.
|
121 |
+
|
122 |
+
nodes : list, optional (default=None)
|
123 |
+
If provided, only yield *maximal cliques* containing all nodes in `nodes`.
|
124 |
+
If `nodes` isn't a clique itself, a ValueError is raised.
|
125 |
+
|
126 |
+
Returns
|
127 |
+
-------
|
128 |
+
iterator
|
129 |
+
An iterator over maximal cliques, each of which is a list of
|
130 |
+
nodes in `G`. If `nodes` is provided, only the maximal cliques
|
131 |
+
containing all the nodes in `nodes` are returned. The order of
|
132 |
+
cliques is arbitrary.
|
133 |
+
|
134 |
+
Raises
|
135 |
+
------
|
136 |
+
ValueError
|
137 |
+
If `nodes` is not a clique.
|
138 |
+
|
139 |
+
Examples
|
140 |
+
--------
|
141 |
+
>>> from pprint import pprint # For nice dict formatting
|
142 |
+
>>> G = nx.karate_club_graph()
|
143 |
+
>>> sum(1 for c in nx.find_cliques(G)) # The number of maximal cliques in G
|
144 |
+
36
|
145 |
+
>>> max(nx.find_cliques(G), key=len) # The largest maximal clique in G
|
146 |
+
[0, 1, 2, 3, 13]
|
147 |
+
|
148 |
+
The size of the largest maximal clique is known as the *clique number* of
|
149 |
+
the graph, which can be found directly with:
|
150 |
+
|
151 |
+
>>> max(len(c) for c in nx.find_cliques(G))
|
152 |
+
5
|
153 |
+
|
154 |
+
One can also compute the number of maximal cliques in `G` that contain a given
|
155 |
+
node. The following produces a dictionary keyed by node whose
|
156 |
+
values are the number of maximal cliques in `G` that contain the node:
|
157 |
+
|
158 |
+
>>> pprint({n: sum(1 for c in nx.find_cliques(G) if n in c) for n in G})
|
159 |
+
{0: 13,
|
160 |
+
1: 6,
|
161 |
+
2: 7,
|
162 |
+
3: 3,
|
163 |
+
4: 2,
|
164 |
+
5: 3,
|
165 |
+
6: 3,
|
166 |
+
7: 1,
|
167 |
+
8: 3,
|
168 |
+
9: 2,
|
169 |
+
10: 2,
|
170 |
+
11: 1,
|
171 |
+
12: 1,
|
172 |
+
13: 2,
|
173 |
+
14: 1,
|
174 |
+
15: 1,
|
175 |
+
16: 1,
|
176 |
+
17: 1,
|
177 |
+
18: 1,
|
178 |
+
19: 2,
|
179 |
+
20: 1,
|
180 |
+
21: 1,
|
181 |
+
22: 1,
|
182 |
+
23: 3,
|
183 |
+
24: 2,
|
184 |
+
25: 2,
|
185 |
+
26: 1,
|
186 |
+
27: 3,
|
187 |
+
28: 2,
|
188 |
+
29: 2,
|
189 |
+
30: 2,
|
190 |
+
31: 4,
|
191 |
+
32: 9,
|
192 |
+
33: 14}
|
193 |
+
|
194 |
+
Or, similarly, the maximal cliques in `G` that contain a given node.
|
195 |
+
For example, the 4 maximal cliques that contain node 31:
|
196 |
+
|
197 |
+
>>> [c for c in nx.find_cliques(G) if 31 in c]
|
198 |
+
[[0, 31], [33, 32, 31], [33, 28, 31], [24, 25, 31]]
|
199 |
+
|
200 |
+
See Also
|
201 |
+
--------
|
202 |
+
find_cliques_recursive
|
203 |
+
A recursive version of the same algorithm.
|
204 |
+
|
205 |
+
Notes
|
206 |
+
-----
|
207 |
+
To obtain a list of all maximal cliques, use
|
208 |
+
`list(find_cliques(G))`. However, be aware that in the worst-case,
|
209 |
+
the length of this list can be exponential in the number of nodes in
|
210 |
+
the graph. This function avoids storing all cliques in memory by
|
211 |
+
only keeping current candidate node lists in memory during its search.
|
212 |
+
|
213 |
+
This implementation is based on the algorithm published by Bron and
|
214 |
+
Kerbosch (1973) [1]_, as adapted by Tomita, Tanaka and Takahashi
|
215 |
+
(2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. It
|
216 |
+
essentially unrolls the recursion used in the references to avoid
|
217 |
+
issues of recursion stack depth (for a recursive implementation, see
|
218 |
+
:func:`find_cliques_recursive`).
|
219 |
+
|
220 |
+
This algorithm ignores self-loops and parallel edges, since cliques
|
221 |
+
are not conventionally defined with such edges.
|
222 |
+
|
223 |
+
References
|
224 |
+
----------
|
225 |
+
.. [1] Bron, C. and Kerbosch, J.
|
226 |
+
"Algorithm 457: finding all cliques of an undirected graph".
|
227 |
+
*Communications of the ACM* 16, 9 (Sep. 1973), 575--577.
|
228 |
+
<http://portal.acm.org/citation.cfm?doid=362342.362367>
|
229 |
+
|
230 |
+
.. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi,
|
231 |
+
"The worst-case time complexity for generating all maximal
|
232 |
+
cliques and computational experiments",
|
233 |
+
*Theoretical Computer Science*, Volume 363, Issue 1,
|
234 |
+
Computing and Combinatorics,
|
235 |
+
10th Annual International Conference on
|
236 |
+
Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28--42
|
237 |
+
<https://doi.org/10.1016/j.tcs.2006.06.015>
|
238 |
+
|
239 |
+
.. [3] F. Cazals, C. Karande,
|
240 |
+
"A note on the problem of reporting maximal cliques",
|
241 |
+
*Theoretical Computer Science*,
|
242 |
+
Volume 407, Issues 1--3, 6 November 2008, Pages 564--568,
|
243 |
+
<https://doi.org/10.1016/j.tcs.2008.05.010>
|
244 |
+
|
245 |
+
"""
|
246 |
+
if len(G) == 0:
|
247 |
+
return
|
248 |
+
|
249 |
+
adj = {u: {v for v in G[u] if v != u} for u in G}
|
250 |
+
|
251 |
+
# Initialize Q with the given nodes and subg, cand with their nbrs
|
252 |
+
Q = nodes[:] if nodes is not None else []
|
253 |
+
cand = set(G)
|
254 |
+
for node in Q:
|
255 |
+
if node not in cand:
|
256 |
+
raise ValueError(f"The given `nodes` {nodes} do not form a clique")
|
257 |
+
cand &= adj[node]
|
258 |
+
|
259 |
+
if not cand:
|
260 |
+
yield Q[:]
|
261 |
+
return
|
262 |
+
|
263 |
+
subg = cand.copy()
|
264 |
+
stack = []
|
265 |
+
Q.append(None)
|
266 |
+
|
267 |
+
u = max(subg, key=lambda u: len(cand & adj[u]))
|
268 |
+
ext_u = cand - adj[u]
|
269 |
+
|
270 |
+
try:
|
271 |
+
while True:
|
272 |
+
if ext_u:
|
273 |
+
q = ext_u.pop()
|
274 |
+
cand.remove(q)
|
275 |
+
Q[-1] = q
|
276 |
+
adj_q = adj[q]
|
277 |
+
subg_q = subg & adj_q
|
278 |
+
if not subg_q:
|
279 |
+
yield Q[:]
|
280 |
+
else:
|
281 |
+
cand_q = cand & adj_q
|
282 |
+
if cand_q:
|
283 |
+
stack.append((subg, cand, ext_u))
|
284 |
+
Q.append(None)
|
285 |
+
subg = subg_q
|
286 |
+
cand = cand_q
|
287 |
+
u = max(subg, key=lambda u: len(cand & adj[u]))
|
288 |
+
ext_u = cand - adj[u]
|
289 |
+
else:
|
290 |
+
Q.pop()
|
291 |
+
subg, cand, ext_u = stack.pop()
|
292 |
+
except IndexError:
|
293 |
+
pass
|
294 |
+
|
295 |
+
|
296 |
+
# TODO Should this also be not implemented for directed graphs?
|
297 |
+
@nx._dispatchable
|
298 |
+
def find_cliques_recursive(G, nodes=None):
|
299 |
+
"""Returns all maximal cliques in a graph.
|
300 |
+
|
301 |
+
For each node *v*, a *maximal clique for v* is a largest complete
|
302 |
+
subgraph containing *v*. The largest maximal clique is sometimes
|
303 |
+
called the *maximum clique*.
|
304 |
+
|
305 |
+
This function returns an iterator over cliques, each of which is a
|
306 |
+
list of nodes. It is a recursive implementation, so may suffer from
|
307 |
+
recursion depth issues, but is included for pedagogical reasons.
|
308 |
+
For a non-recursive implementation, see :func:`find_cliques`.
|
309 |
+
|
310 |
+
This function accepts a list of `nodes` and only the maximal cliques
|
311 |
+
containing all of these `nodes` are returned. It can considerably speed up
|
312 |
+
the running time if some specific cliques are desired.
|
313 |
+
|
314 |
+
Parameters
|
315 |
+
----------
|
316 |
+
G : NetworkX graph
|
317 |
+
|
318 |
+
nodes : list, optional (default=None)
|
319 |
+
If provided, only yield *maximal cliques* containing all nodes in `nodes`.
|
320 |
+
If `nodes` isn't a clique itself, a ValueError is raised.
|
321 |
+
|
322 |
+
Returns
|
323 |
+
-------
|
324 |
+
iterator
|
325 |
+
An iterator over maximal cliques, each of which is a list of
|
326 |
+
nodes in `G`. If `nodes` is provided, only the maximal cliques
|
327 |
+
containing all the nodes in `nodes` are yielded. The order of
|
328 |
+
cliques is arbitrary.
|
329 |
+
|
330 |
+
Raises
|
331 |
+
------
|
332 |
+
ValueError
|
333 |
+
If `nodes` is not a clique.
|
334 |
+
|
335 |
+
See Also
|
336 |
+
--------
|
337 |
+
find_cliques
|
338 |
+
An iterative version of the same algorithm. See docstring for examples.
|
339 |
+
|
340 |
+
Notes
|
341 |
+
-----
|
342 |
+
To obtain a list of all maximal cliques, use
|
343 |
+
`list(find_cliques_recursive(G))`. However, be aware that in the
|
344 |
+
worst-case, the length of this list can be exponential in the number
|
345 |
+
of nodes in the graph. This function avoids storing all cliques in memory
|
346 |
+
by only keeping current candidate node lists in memory during its search.
|
347 |
+
|
348 |
+
This implementation is based on the algorithm published by Bron and
|
349 |
+
Kerbosch (1973) [1]_, as adapted by Tomita, Tanaka and Takahashi
|
350 |
+
(2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. For a
|
351 |
+
non-recursive implementation, see :func:`find_cliques`.
|
352 |
+
|
353 |
+
This algorithm ignores self-loops and parallel edges, since cliques
|
354 |
+
are not conventionally defined with such edges.
|
355 |
+
|
356 |
+
References
|
357 |
+
----------
|
358 |
+
.. [1] Bron, C. and Kerbosch, J.
|
359 |
+
"Algorithm 457: finding all cliques of an undirected graph".
|
360 |
+
*Communications of the ACM* 16, 9 (Sep. 1973), 575--577.
|
361 |
+
<http://portal.acm.org/citation.cfm?doid=362342.362367>
|
362 |
+
|
363 |
+
.. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi,
|
364 |
+
"The worst-case time complexity for generating all maximal
|
365 |
+
cliques and computational experiments",
|
366 |
+
*Theoretical Computer Science*, Volume 363, Issue 1,
|
367 |
+
Computing and Combinatorics,
|
368 |
+
10th Annual International Conference on
|
369 |
+
Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28--42
|
370 |
+
<https://doi.org/10.1016/j.tcs.2006.06.015>
|
371 |
+
|
372 |
+
.. [3] F. Cazals, C. Karande,
|
373 |
+
"A note on the problem of reporting maximal cliques",
|
374 |
+
*Theoretical Computer Science*,
|
375 |
+
Volume 407, Issues 1--3, 6 November 2008, Pages 564--568,
|
376 |
+
<https://doi.org/10.1016/j.tcs.2008.05.010>
|
377 |
+
|
378 |
+
"""
|
379 |
+
if len(G) == 0:
|
380 |
+
return iter([])
|
381 |
+
|
382 |
+
adj = {u: {v for v in G[u] if v != u} for u in G}
|
383 |
+
|
384 |
+
# Initialize Q with the given nodes and subg, cand with their nbrs
|
385 |
+
Q = nodes[:] if nodes is not None else []
|
386 |
+
cand_init = set(G)
|
387 |
+
for node in Q:
|
388 |
+
if node not in cand_init:
|
389 |
+
raise ValueError(f"The given `nodes` {nodes} do not form a clique")
|
390 |
+
cand_init &= adj[node]
|
391 |
+
|
392 |
+
if not cand_init:
|
393 |
+
return iter([Q])
|
394 |
+
|
395 |
+
subg_init = cand_init.copy()
|
396 |
+
|
397 |
+
def expand(subg, cand):
|
398 |
+
u = max(subg, key=lambda u: len(cand & adj[u]))
|
399 |
+
for q in cand - adj[u]:
|
400 |
+
cand.remove(q)
|
401 |
+
Q.append(q)
|
402 |
+
adj_q = adj[q]
|
403 |
+
subg_q = subg & adj_q
|
404 |
+
if not subg_q:
|
405 |
+
yield Q[:]
|
406 |
+
else:
|
407 |
+
cand_q = cand & adj_q
|
408 |
+
if cand_q:
|
409 |
+
yield from expand(subg_q, cand_q)
|
410 |
+
Q.pop()
|
411 |
+
|
412 |
+
return expand(subg_init, cand_init)
|
413 |
+
|
414 |
+
|
415 |
+
@nx._dispatchable(returns_graph=True)
|
416 |
+
def make_max_clique_graph(G, create_using=None):
|
417 |
+
"""Returns the maximal clique graph of the given graph.
|
418 |
+
|
419 |
+
The nodes of the maximal clique graph of `G` are the cliques of
|
420 |
+
`G` and an edge joins two cliques if the cliques are not disjoint.
|
421 |
+
|
422 |
+
Parameters
|
423 |
+
----------
|
424 |
+
G : NetworkX graph
|
425 |
+
|
426 |
+
create_using : NetworkX graph constructor, optional (default=nx.Graph)
|
427 |
+
Graph type to create. If graph instance, then cleared before populated.
|
428 |
+
|
429 |
+
Returns
|
430 |
+
-------
|
431 |
+
NetworkX graph
|
432 |
+
A graph whose nodes are the cliques of `G` and whose edges
|
433 |
+
join two cliques if they are not disjoint.
|
434 |
+
|
435 |
+
Notes
|
436 |
+
-----
|
437 |
+
This function behaves like the following code::
|
438 |
+
|
439 |
+
import networkx as nx
|
440 |
+
|
441 |
+
G = nx.make_clique_bipartite(G)
|
442 |
+
cliques = [v for v in G.nodes() if G.nodes[v]["bipartite"] == 0]
|
443 |
+
G = nx.bipartite.projected_graph(G, cliques)
|
444 |
+
G = nx.relabel_nodes(G, {-v: v - 1 for v in G})
|
445 |
+
|
446 |
+
It should be faster, though, since it skips all the intermediate
|
447 |
+
steps.
|
448 |
+
|
449 |
+
"""
|
450 |
+
if create_using is None:
|
451 |
+
B = G.__class__()
|
452 |
+
else:
|
453 |
+
B = nx.empty_graph(0, create_using)
|
454 |
+
cliques = list(enumerate(set(c) for c in find_cliques(G)))
|
455 |
+
# Add a numbered node for each clique.
|
456 |
+
B.add_nodes_from(i for i, c in cliques)
|
457 |
+
# Join cliques by an edge if they share a node.
|
458 |
+
clique_pairs = combinations(cliques, 2)
|
459 |
+
B.add_edges_from((i, j) for (i, c1), (j, c2) in clique_pairs if c1 & c2)
|
460 |
+
return B
|
461 |
+
|
462 |
+
|
463 |
+
@nx._dispatchable(returns_graph=True)
|
464 |
+
def make_clique_bipartite(G, fpos=None, create_using=None, name=None):
|
465 |
+
"""Returns the bipartite clique graph corresponding to `G`.
|
466 |
+
|
467 |
+
In the returned bipartite graph, the "bottom" nodes are the nodes of
|
468 |
+
`G` and the "top" nodes represent the maximal cliques of `G`.
|
469 |
+
There is an edge from node *v* to clique *C* in the returned graph
|
470 |
+
if and only if *v* is an element of *C*.
|
471 |
+
|
472 |
+
Parameters
|
473 |
+
----------
|
474 |
+
G : NetworkX graph
|
475 |
+
An undirected graph.
|
476 |
+
|
477 |
+
fpos : bool
|
478 |
+
If True or not None, the returned graph will have an
|
479 |
+
additional attribute, `pos`, a dictionary mapping node to
|
480 |
+
position in the Euclidean plane.
|
481 |
+
|
482 |
+
create_using : NetworkX graph constructor, optional (default=nx.Graph)
|
483 |
+
Graph type to create. If graph instance, then cleared before populated.
|
484 |
+
|
485 |
+
Returns
|
486 |
+
-------
|
487 |
+
NetworkX graph
|
488 |
+
A bipartite graph whose "bottom" set is the nodes of the graph
|
489 |
+
`G`, whose "top" set is the cliques of `G`, and whose edges
|
490 |
+
join nodes of `G` to the cliques that contain them.
|
491 |
+
|
492 |
+
The nodes of the graph `G` have the node attribute
|
493 |
+
'bipartite' set to 1 and the nodes representing cliques
|
494 |
+
have the node attribute 'bipartite' set to 0, as is the
|
495 |
+
convention for bipartite graphs in NetworkX.
|
496 |
+
|
497 |
+
"""
|
498 |
+
B = nx.empty_graph(0, create_using)
|
499 |
+
B.clear()
|
500 |
+
# The "bottom" nodes in the bipartite graph are the nodes of the
|
501 |
+
# original graph, G.
|
502 |
+
B.add_nodes_from(G, bipartite=1)
|
503 |
+
for i, cl in enumerate(find_cliques(G)):
|
504 |
+
# The "top" nodes in the bipartite graph are the cliques. These
|
505 |
+
# nodes get negative numbers as labels.
|
506 |
+
name = -i - 1
|
507 |
+
B.add_node(name, bipartite=0)
|
508 |
+
B.add_edges_from((v, name) for v in cl)
|
509 |
+
return B
|
510 |
+
|
511 |
+
|
512 |
+
@nx._dispatchable
|
513 |
+
def node_clique_number(G, nodes=None, cliques=None, separate_nodes=False):
|
514 |
+
"""Returns the size of the largest maximal clique containing each given node.
|
515 |
+
|
516 |
+
Returns a single or list depending on input nodes.
|
517 |
+
An optional list of cliques can be input if already computed.
|
518 |
+
|
519 |
+
Parameters
|
520 |
+
----------
|
521 |
+
G : NetworkX graph
|
522 |
+
An undirected graph.
|
523 |
+
|
524 |
+
cliques : list, optional (default=None)
|
525 |
+
A list of cliques, each of which is itself a list of nodes.
|
526 |
+
If not specified, the list of all cliques will be computed
|
527 |
+
using :func:`find_cliques`.
|
528 |
+
|
529 |
+
Returns
|
530 |
+
-------
|
531 |
+
int or dict
|
532 |
+
If `nodes` is a single node, returns the size of the
|
533 |
+
largest maximal clique in `G` containing that node.
|
534 |
+
Otherwise return a dict keyed by node to the size
|
535 |
+
of the largest maximal clique containing that node.
|
536 |
+
|
537 |
+
See Also
|
538 |
+
--------
|
539 |
+
find_cliques
|
540 |
+
find_cliques yields the maximal cliques of G.
|
541 |
+
It accepts a `nodes` argument which restricts consideration to
|
542 |
+
maximal cliques containing all the given `nodes`.
|
543 |
+
The search for the cliques is optimized for `nodes`.
|
544 |
+
"""
|
545 |
+
if cliques is None:
|
546 |
+
if nodes is not None:
|
547 |
+
# Use ego_graph to decrease size of graph
|
548 |
+
# check for single node
|
549 |
+
if nodes in G:
|
550 |
+
return max(len(c) for c in find_cliques(nx.ego_graph(G, nodes)))
|
551 |
+
# handle multiple nodes
|
552 |
+
return {
|
553 |
+
n: max(len(c) for c in find_cliques(nx.ego_graph(G, n))) for n in nodes
|
554 |
+
}
|
555 |
+
|
556 |
+
# nodes is None--find all cliques
|
557 |
+
cliques = list(find_cliques(G))
|
558 |
+
|
559 |
+
# single node requested
|
560 |
+
if nodes in G:
|
561 |
+
return max(len(c) for c in cliques if nodes in c)
|
562 |
+
|
563 |
+
# multiple nodes requested
|
564 |
+
# preprocess all nodes (faster than one at a time for even 2 nodes)
|
565 |
+
size_for_n = defaultdict(int)
|
566 |
+
for c in cliques:
|
567 |
+
size_of_c = len(c)
|
568 |
+
for n in c:
|
569 |
+
if size_for_n[n] < size_of_c:
|
570 |
+
size_for_n[n] = size_of_c
|
571 |
+
if nodes is None:
|
572 |
+
return size_for_n
|
573 |
+
return {n: size_for_n[n] for n in nodes}
|
574 |
+
|
575 |
+
|
576 |
+
def number_of_cliques(G, nodes=None, cliques=None):
|
577 |
+
"""Returns the number of maximal cliques for each node.
|
578 |
+
|
579 |
+
Returns a single or list depending on input nodes.
|
580 |
+
Optional list of cliques can be input if already computed.
|
581 |
+
"""
|
582 |
+
if cliques is None:
|
583 |
+
cliques = list(find_cliques(G))
|
584 |
+
|
585 |
+
if nodes is None:
|
586 |
+
nodes = list(G.nodes()) # none, get entire graph
|
587 |
+
|
588 |
+
if not isinstance(nodes, list): # check for a list
|
589 |
+
v = nodes
|
590 |
+
# assume it is a single value
|
591 |
+
numcliq = len([1 for c in cliques if v in c])
|
592 |
+
else:
|
593 |
+
numcliq = {}
|
594 |
+
for v in nodes:
|
595 |
+
numcliq[v] = len([1 for c in cliques if v in c])
|
596 |
+
return numcliq
|
597 |
+
|
598 |
+
|
599 |
+
class MaxWeightClique:
|
600 |
+
"""A class for the maximum weight clique algorithm.
|
601 |
+
|
602 |
+
This class is a helper for the `max_weight_clique` function. The class
|
603 |
+
should not normally be used directly.
|
604 |
+
|
605 |
+
Parameters
|
606 |
+
----------
|
607 |
+
G : NetworkX graph
|
608 |
+
The undirected graph for which a maximum weight clique is sought
|
609 |
+
weight : string or None, optional (default='weight')
|
610 |
+
The node attribute that holds the integer value used as a weight.
|
611 |
+
If None, then each node has weight 1.
|
612 |
+
|
613 |
+
Attributes
|
614 |
+
----------
|
615 |
+
G : NetworkX graph
|
616 |
+
The undirected graph for which a maximum weight clique is sought
|
617 |
+
node_weights: dict
|
618 |
+
The weight of each node
|
619 |
+
incumbent_nodes : list
|
620 |
+
The nodes of the incumbent clique (the best clique found so far)
|
621 |
+
incumbent_weight: int
|
622 |
+
The weight of the incumbent clique
|
623 |
+
"""
|
624 |
+
|
625 |
+
def __init__(self, G, weight):
|
626 |
+
self.G = G
|
627 |
+
self.incumbent_nodes = []
|
628 |
+
self.incumbent_weight = 0
|
629 |
+
|
630 |
+
if weight is None:
|
631 |
+
self.node_weights = {v: 1 for v in G.nodes()}
|
632 |
+
else:
|
633 |
+
for v in G.nodes():
|
634 |
+
if weight not in G.nodes[v]:
|
635 |
+
errmsg = f"Node {v!r} does not have the requested weight field."
|
636 |
+
raise KeyError(errmsg)
|
637 |
+
if not isinstance(G.nodes[v][weight], int):
|
638 |
+
errmsg = f"The {weight!r} field of node {v!r} is not an integer."
|
639 |
+
raise ValueError(errmsg)
|
640 |
+
self.node_weights = {v: G.nodes[v][weight] for v in G.nodes()}
|
641 |
+
|
642 |
+
def update_incumbent_if_improved(self, C, C_weight):
|
643 |
+
"""Update the incumbent if the node set C has greater weight.
|
644 |
+
|
645 |
+
C is assumed to be a clique.
|
646 |
+
"""
|
647 |
+
if C_weight > self.incumbent_weight:
|
648 |
+
self.incumbent_nodes = C[:]
|
649 |
+
self.incumbent_weight = C_weight
|
650 |
+
|
651 |
+
def greedily_find_independent_set(self, P):
|
652 |
+
"""Greedily find an independent set of nodes from a set of
|
653 |
+
nodes P."""
|
654 |
+
independent_set = []
|
655 |
+
P = P[:]
|
656 |
+
while P:
|
657 |
+
v = P[0]
|
658 |
+
independent_set.append(v)
|
659 |
+
P = [w for w in P if v != w and not self.G.has_edge(v, w)]
|
660 |
+
return independent_set
|
661 |
+
|
662 |
+
def find_branching_nodes(self, P, target):
|
663 |
+
"""Find a set of nodes to branch on."""
|
664 |
+
residual_wt = {v: self.node_weights[v] for v in P}
|
665 |
+
total_wt = 0
|
666 |
+
P = P[:]
|
667 |
+
while P:
|
668 |
+
independent_set = self.greedily_find_independent_set(P)
|
669 |
+
min_wt_in_class = min(residual_wt[v] for v in independent_set)
|
670 |
+
total_wt += min_wt_in_class
|
671 |
+
if total_wt > target:
|
672 |
+
break
|
673 |
+
for v in independent_set:
|
674 |
+
residual_wt[v] -= min_wt_in_class
|
675 |
+
P = [v for v in P if residual_wt[v] != 0]
|
676 |
+
return P
|
677 |
+
|
678 |
+
def expand(self, C, C_weight, P):
|
679 |
+
"""Look for the best clique that contains all the nodes in C and zero or
|
680 |
+
more of the nodes in P, backtracking if it can be shown that no such
|
681 |
+
clique has greater weight than the incumbent.
|
682 |
+
"""
|
683 |
+
self.update_incumbent_if_improved(C, C_weight)
|
684 |
+
branching_nodes = self.find_branching_nodes(P, self.incumbent_weight - C_weight)
|
685 |
+
while branching_nodes:
|
686 |
+
v = branching_nodes.pop()
|
687 |
+
P.remove(v)
|
688 |
+
new_C = C + [v]
|
689 |
+
new_C_weight = C_weight + self.node_weights[v]
|
690 |
+
new_P = [w for w in P if self.G.has_edge(v, w)]
|
691 |
+
self.expand(new_C, new_C_weight, new_P)
|
692 |
+
|
693 |
+
def find_max_weight_clique(self):
|
694 |
+
"""Find a maximum weight clique."""
|
695 |
+
# Sort nodes in reverse order of degree for speed
|
696 |
+
nodes = sorted(self.G.nodes(), key=lambda v: self.G.degree(v), reverse=True)
|
697 |
+
nodes = [v for v in nodes if self.node_weights[v] > 0]
|
698 |
+
self.expand([], 0, nodes)
|
699 |
+
|
700 |
+
|
701 |
+
@not_implemented_for("directed")
|
702 |
+
@nx._dispatchable(node_attrs="weight")
|
703 |
+
def max_weight_clique(G, weight="weight"):
|
704 |
+
"""Find a maximum weight clique in G.
|
705 |
+
|
706 |
+
A *clique* in a graph is a set of nodes such that every two distinct nodes
|
707 |
+
are adjacent. The *weight* of a clique is the sum of the weights of its
|
708 |
+
nodes. A *maximum weight clique* of graph G is a clique C in G such that
|
709 |
+
no clique in G has weight greater than the weight of C.
|
710 |
+
|
711 |
+
Parameters
|
712 |
+
----------
|
713 |
+
G : NetworkX graph
|
714 |
+
Undirected graph
|
715 |
+
weight : string or None, optional (default='weight')
|
716 |
+
The node attribute that holds the integer value used as a weight.
|
717 |
+
If None, then each node has weight 1.
|
718 |
+
|
719 |
+
Returns
|
720 |
+
-------
|
721 |
+
clique : list
|
722 |
+
the nodes of a maximum weight clique
|
723 |
+
weight : int
|
724 |
+
the weight of a maximum weight clique
|
725 |
+
|
726 |
+
Notes
|
727 |
+
-----
|
728 |
+
The implementation is recursive, and therefore it may run into recursion
|
729 |
+
depth issues if G contains a clique whose number of nodes is close to the
|
730 |
+
recursion depth limit.
|
731 |
+
|
732 |
+
At each search node, the algorithm greedily constructs a weighted
|
733 |
+
independent set cover of part of the graph in order to find a small set of
|
734 |
+
nodes on which to branch. The algorithm is very similar to the algorithm
|
735 |
+
of Tavares et al. [1]_, other than the fact that the NetworkX version does
|
736 |
+
not use bitsets. This style of algorithm for maximum weight clique (and
|
737 |
+
maximum weight independent set, which is the same problem but on the
|
738 |
+
complement graph) has a decades-long history. See Algorithm B of Warren
|
739 |
+
and Hicks [2]_ and the references in that paper.
|
740 |
+
|
741 |
+
References
|
742 |
+
----------
|
743 |
+
.. [1] Tavares, W.A., Neto, M.B.C., Rodrigues, C.D., Michelon, P.: Um
|
744 |
+
algoritmo de branch and bound para o problema da clique máxima
|
745 |
+
ponderada. Proceedings of XLVII SBPO 1 (2015).
|
746 |
+
|
747 |
+
.. [2] Warren, Jeffrey S, Hicks, Illya V.: Combinatorial Branch-and-Bound
|
748 |
+
for the Maximum Weight Independent Set Problem. Technical Report,
|
749 |
+
Texas A&M University (2016).
|
750 |
+
"""
|
751 |
+
|
752 |
+
mwc = MaxWeightClique(G, weight)
|
753 |
+
mwc.find_max_weight_clique()
|
754 |
+
return mwc.incumbent_nodes, mwc.incumbent_weight
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/cluster.py
ADDED
@@ -0,0 +1,609 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Algorithms to characterize the number of triangles in a graph."""
|
2 |
+
|
3 |
+
from collections import Counter
|
4 |
+
from itertools import chain, combinations
|
5 |
+
|
6 |
+
import networkx as nx
|
7 |
+
from networkx.utils import not_implemented_for
|
8 |
+
|
9 |
+
__all__ = [
|
10 |
+
"triangles",
|
11 |
+
"average_clustering",
|
12 |
+
"clustering",
|
13 |
+
"transitivity",
|
14 |
+
"square_clustering",
|
15 |
+
"generalized_degree",
|
16 |
+
]
|
17 |
+
|
18 |
+
|
19 |
+
@not_implemented_for("directed")
|
20 |
+
@nx._dispatchable
|
21 |
+
def triangles(G, nodes=None):
|
22 |
+
"""Compute the number of triangles.
|
23 |
+
|
24 |
+
Finds the number of triangles that include a node as one vertex.
|
25 |
+
|
26 |
+
Parameters
|
27 |
+
----------
|
28 |
+
G : graph
|
29 |
+
A networkx graph
|
30 |
+
|
31 |
+
nodes : node, iterable of nodes, or None (default=None)
|
32 |
+
If a singleton node, return the number of triangles for that node.
|
33 |
+
If an iterable, compute the number of triangles for each of those nodes.
|
34 |
+
If `None` (the default) compute the number of triangles for all nodes in `G`.
|
35 |
+
|
36 |
+
Returns
|
37 |
+
-------
|
38 |
+
out : dict or int
|
39 |
+
If `nodes` is a container of nodes, returns number of triangles keyed by node (dict).
|
40 |
+
If `nodes` is a specific node, returns number of triangles for the node (int).
|
41 |
+
|
42 |
+
Examples
|
43 |
+
--------
|
44 |
+
>>> G = nx.complete_graph(5)
|
45 |
+
>>> print(nx.triangles(G, 0))
|
46 |
+
6
|
47 |
+
>>> print(nx.triangles(G))
|
48 |
+
{0: 6, 1: 6, 2: 6, 3: 6, 4: 6}
|
49 |
+
>>> print(list(nx.triangles(G, [0, 1]).values()))
|
50 |
+
[6, 6]
|
51 |
+
|
52 |
+
Notes
|
53 |
+
-----
|
54 |
+
Self loops are ignored.
|
55 |
+
|
56 |
+
"""
|
57 |
+
if nodes is not None:
|
58 |
+
# If `nodes` represents a single node, return only its number of triangles
|
59 |
+
if nodes in G:
|
60 |
+
return next(_triangles_and_degree_iter(G, nodes))[2] // 2
|
61 |
+
|
62 |
+
# if `nodes` is a container of nodes, then return a
|
63 |
+
# dictionary mapping node to number of triangles.
|
64 |
+
return {v: t // 2 for v, d, t, _ in _triangles_and_degree_iter(G, nodes)}
|
65 |
+
|
66 |
+
# if nodes is None, then compute triangles for the complete graph
|
67 |
+
|
68 |
+
# dict used to avoid visiting the same nodes twice
|
69 |
+
# this allows calculating/counting each triangle only once
|
70 |
+
later_nbrs = {}
|
71 |
+
|
72 |
+
# iterate over the nodes in a graph
|
73 |
+
for node, neighbors in G.adjacency():
|
74 |
+
later_nbrs[node] = {n for n in neighbors if n not in later_nbrs and n != node}
|
75 |
+
|
76 |
+
# instantiate Counter for each node to include isolated nodes
|
77 |
+
# add 1 to the count if a nodes neighbor's neighbor is also a neighbor
|
78 |
+
triangle_counts = Counter(dict.fromkeys(G, 0))
|
79 |
+
for node1, neighbors in later_nbrs.items():
|
80 |
+
for node2 in neighbors:
|
81 |
+
third_nodes = neighbors & later_nbrs[node2]
|
82 |
+
m = len(third_nodes)
|
83 |
+
triangle_counts[node1] += m
|
84 |
+
triangle_counts[node2] += m
|
85 |
+
triangle_counts.update(third_nodes)
|
86 |
+
|
87 |
+
return dict(triangle_counts)
|
88 |
+
|
89 |
+
|
90 |
+
@not_implemented_for("multigraph")
|
91 |
+
def _triangles_and_degree_iter(G, nodes=None):
|
92 |
+
"""Return an iterator of (node, degree, triangles, generalized degree).
|
93 |
+
|
94 |
+
This double counts triangles so you may want to divide by 2.
|
95 |
+
See degree(), triangles() and generalized_degree() for definitions
|
96 |
+
and details.
|
97 |
+
|
98 |
+
"""
|
99 |
+
if nodes is None:
|
100 |
+
nodes_nbrs = G.adj.items()
|
101 |
+
else:
|
102 |
+
nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes))
|
103 |
+
|
104 |
+
for v, v_nbrs in nodes_nbrs:
|
105 |
+
vs = set(v_nbrs) - {v}
|
106 |
+
gen_degree = Counter(len(vs & (set(G[w]) - {w})) for w in vs)
|
107 |
+
ntriangles = sum(k * val for k, val in gen_degree.items())
|
108 |
+
yield (v, len(vs), ntriangles, gen_degree)
|
109 |
+
|
110 |
+
|
111 |
+
@not_implemented_for("multigraph")
|
112 |
+
def _weighted_triangles_and_degree_iter(G, nodes=None, weight="weight"):
|
113 |
+
"""Return an iterator of (node, degree, weighted_triangles).
|
114 |
+
|
115 |
+
Used for weighted clustering.
|
116 |
+
Note: this returns the geometric average weight of edges in the triangle.
|
117 |
+
Also, each triangle is counted twice (each direction).
|
118 |
+
So you may want to divide by 2.
|
119 |
+
|
120 |
+
"""
|
121 |
+
import numpy as np
|
122 |
+
|
123 |
+
if weight is None or G.number_of_edges() == 0:
|
124 |
+
max_weight = 1
|
125 |
+
else:
|
126 |
+
max_weight = max(d.get(weight, 1) for u, v, d in G.edges(data=True))
|
127 |
+
if nodes is None:
|
128 |
+
nodes_nbrs = G.adj.items()
|
129 |
+
else:
|
130 |
+
nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes))
|
131 |
+
|
132 |
+
def wt(u, v):
|
133 |
+
return G[u][v].get(weight, 1) / max_weight
|
134 |
+
|
135 |
+
for i, nbrs in nodes_nbrs:
|
136 |
+
inbrs = set(nbrs) - {i}
|
137 |
+
weighted_triangles = 0
|
138 |
+
seen = set()
|
139 |
+
for j in inbrs:
|
140 |
+
seen.add(j)
|
141 |
+
# This avoids counting twice -- we double at the end.
|
142 |
+
jnbrs = set(G[j]) - seen
|
143 |
+
# Only compute the edge weight once, before the inner inner
|
144 |
+
# loop.
|
145 |
+
wij = wt(i, j)
|
146 |
+
weighted_triangles += np.cbrt(
|
147 |
+
[(wij * wt(j, k) * wt(k, i)) for k in inbrs & jnbrs]
|
148 |
+
).sum()
|
149 |
+
yield (i, len(inbrs), 2 * float(weighted_triangles))
|
150 |
+
|
151 |
+
|
152 |
+
@not_implemented_for("multigraph")
|
153 |
+
def _directed_triangles_and_degree_iter(G, nodes=None):
|
154 |
+
"""Return an iterator of
|
155 |
+
(node, total_degree, reciprocal_degree, directed_triangles).
|
156 |
+
|
157 |
+
Used for directed clustering.
|
158 |
+
Note that unlike `_triangles_and_degree_iter()`, this function counts
|
159 |
+
directed triangles so does not count triangles twice.
|
160 |
+
|
161 |
+
"""
|
162 |
+
nodes_nbrs = ((n, G._pred[n], G._succ[n]) for n in G.nbunch_iter(nodes))
|
163 |
+
|
164 |
+
for i, preds, succs in nodes_nbrs:
|
165 |
+
ipreds = set(preds) - {i}
|
166 |
+
isuccs = set(succs) - {i}
|
167 |
+
|
168 |
+
directed_triangles = 0
|
169 |
+
for j in chain(ipreds, isuccs):
|
170 |
+
jpreds = set(G._pred[j]) - {j}
|
171 |
+
jsuccs = set(G._succ[j]) - {j}
|
172 |
+
directed_triangles += sum(
|
173 |
+
1
|
174 |
+
for k in chain(
|
175 |
+
(ipreds & jpreds),
|
176 |
+
(ipreds & jsuccs),
|
177 |
+
(isuccs & jpreds),
|
178 |
+
(isuccs & jsuccs),
|
179 |
+
)
|
180 |
+
)
|
181 |
+
dtotal = len(ipreds) + len(isuccs)
|
182 |
+
dbidirectional = len(ipreds & isuccs)
|
183 |
+
yield (i, dtotal, dbidirectional, directed_triangles)
|
184 |
+
|
185 |
+
|
186 |
+
@not_implemented_for("multigraph")
|
187 |
+
def _directed_weighted_triangles_and_degree_iter(G, nodes=None, weight="weight"):
|
188 |
+
"""Return an iterator of
|
189 |
+
(node, total_degree, reciprocal_degree, directed_weighted_triangles).
|
190 |
+
|
191 |
+
Used for directed weighted clustering.
|
192 |
+
Note that unlike `_weighted_triangles_and_degree_iter()`, this function counts
|
193 |
+
directed triangles so does not count triangles twice.
|
194 |
+
|
195 |
+
"""
|
196 |
+
import numpy as np
|
197 |
+
|
198 |
+
if weight is None or G.number_of_edges() == 0:
|
199 |
+
max_weight = 1
|
200 |
+
else:
|
201 |
+
max_weight = max(d.get(weight, 1) for u, v, d in G.edges(data=True))
|
202 |
+
|
203 |
+
nodes_nbrs = ((n, G._pred[n], G._succ[n]) for n in G.nbunch_iter(nodes))
|
204 |
+
|
205 |
+
def wt(u, v):
|
206 |
+
return G[u][v].get(weight, 1) / max_weight
|
207 |
+
|
208 |
+
for i, preds, succs in nodes_nbrs:
|
209 |
+
ipreds = set(preds) - {i}
|
210 |
+
isuccs = set(succs) - {i}
|
211 |
+
|
212 |
+
directed_triangles = 0
|
213 |
+
for j in ipreds:
|
214 |
+
jpreds = set(G._pred[j]) - {j}
|
215 |
+
jsuccs = set(G._succ[j]) - {j}
|
216 |
+
directed_triangles += np.cbrt(
|
217 |
+
[(wt(j, i) * wt(k, i) * wt(k, j)) for k in ipreds & jpreds]
|
218 |
+
).sum()
|
219 |
+
directed_triangles += np.cbrt(
|
220 |
+
[(wt(j, i) * wt(k, i) * wt(j, k)) for k in ipreds & jsuccs]
|
221 |
+
).sum()
|
222 |
+
directed_triangles += np.cbrt(
|
223 |
+
[(wt(j, i) * wt(i, k) * wt(k, j)) for k in isuccs & jpreds]
|
224 |
+
).sum()
|
225 |
+
directed_triangles += np.cbrt(
|
226 |
+
[(wt(j, i) * wt(i, k) * wt(j, k)) for k in isuccs & jsuccs]
|
227 |
+
).sum()
|
228 |
+
|
229 |
+
for j in isuccs:
|
230 |
+
jpreds = set(G._pred[j]) - {j}
|
231 |
+
jsuccs = set(G._succ[j]) - {j}
|
232 |
+
directed_triangles += np.cbrt(
|
233 |
+
[(wt(i, j) * wt(k, i) * wt(k, j)) for k in ipreds & jpreds]
|
234 |
+
).sum()
|
235 |
+
directed_triangles += np.cbrt(
|
236 |
+
[(wt(i, j) * wt(k, i) * wt(j, k)) for k in ipreds & jsuccs]
|
237 |
+
).sum()
|
238 |
+
directed_triangles += np.cbrt(
|
239 |
+
[(wt(i, j) * wt(i, k) * wt(k, j)) for k in isuccs & jpreds]
|
240 |
+
).sum()
|
241 |
+
directed_triangles += np.cbrt(
|
242 |
+
[(wt(i, j) * wt(i, k) * wt(j, k)) for k in isuccs & jsuccs]
|
243 |
+
).sum()
|
244 |
+
|
245 |
+
dtotal = len(ipreds) + len(isuccs)
|
246 |
+
dbidirectional = len(ipreds & isuccs)
|
247 |
+
yield (i, dtotal, dbidirectional, float(directed_triangles))
|
248 |
+
|
249 |
+
|
250 |
+
@nx._dispatchable(edge_attrs="weight")
|
251 |
+
def average_clustering(G, nodes=None, weight=None, count_zeros=True):
|
252 |
+
r"""Compute the average clustering coefficient for the graph G.
|
253 |
+
|
254 |
+
The clustering coefficient for the graph is the average,
|
255 |
+
|
256 |
+
.. math::
|
257 |
+
|
258 |
+
C = \frac{1}{n}\sum_{v \in G} c_v,
|
259 |
+
|
260 |
+
where :math:`n` is the number of nodes in `G`.
|
261 |
+
|
262 |
+
Parameters
|
263 |
+
----------
|
264 |
+
G : graph
|
265 |
+
|
266 |
+
nodes : container of nodes, optional (default=all nodes in G)
|
267 |
+
Compute average clustering for nodes in this container.
|
268 |
+
|
269 |
+
weight : string or None, optional (default=None)
|
270 |
+
The edge attribute that holds the numerical value used as a weight.
|
271 |
+
If None, then each edge has weight 1.
|
272 |
+
|
273 |
+
count_zeros : bool
|
274 |
+
If False include only the nodes with nonzero clustering in the average.
|
275 |
+
|
276 |
+
Returns
|
277 |
+
-------
|
278 |
+
avg : float
|
279 |
+
Average clustering
|
280 |
+
|
281 |
+
Examples
|
282 |
+
--------
|
283 |
+
>>> G = nx.complete_graph(5)
|
284 |
+
>>> print(nx.average_clustering(G))
|
285 |
+
1.0
|
286 |
+
|
287 |
+
Notes
|
288 |
+
-----
|
289 |
+
This is a space saving routine; it might be faster
|
290 |
+
to use the clustering function to get a list and then take the average.
|
291 |
+
|
292 |
+
Self loops are ignored.
|
293 |
+
|
294 |
+
References
|
295 |
+
----------
|
296 |
+
.. [1] Generalizations of the clustering coefficient to weighted
|
297 |
+
complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela,
|
298 |
+
K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007).
|
299 |
+
http://jponnela.com/web_documents/a9.pdf
|
300 |
+
.. [2] Marcus Kaiser, Mean clustering coefficients: the role of isolated
|
301 |
+
nodes and leafs on clustering measures for small-world networks.
|
302 |
+
https://arxiv.org/abs/0802.2512
|
303 |
+
"""
|
304 |
+
c = clustering(G, nodes, weight=weight).values()
|
305 |
+
if not count_zeros:
|
306 |
+
c = [v for v in c if abs(v) > 0]
|
307 |
+
return sum(c) / len(c)
|
308 |
+
|
309 |
+
|
310 |
+
@nx._dispatchable(edge_attrs="weight")
|
311 |
+
def clustering(G, nodes=None, weight=None):
|
312 |
+
r"""Compute the clustering coefficient for nodes.
|
313 |
+
|
314 |
+
For unweighted graphs, the clustering of a node :math:`u`
|
315 |
+
is the fraction of possible triangles through that node that exist,
|
316 |
+
|
317 |
+
.. math::
|
318 |
+
|
319 |
+
c_u = \frac{2 T(u)}{deg(u)(deg(u)-1)},
|
320 |
+
|
321 |
+
where :math:`T(u)` is the number of triangles through node :math:`u` and
|
322 |
+
:math:`deg(u)` is the degree of :math:`u`.
|
323 |
+
|
324 |
+
For weighted graphs, there are several ways to define clustering [1]_.
|
325 |
+
the one used here is defined
|
326 |
+
as the geometric average of the subgraph edge weights [2]_,
|
327 |
+
|
328 |
+
.. math::
|
329 |
+
|
330 |
+
c_u = \frac{1}{deg(u)(deg(u)-1))}
|
331 |
+
\sum_{vw} (\hat{w}_{uv} \hat{w}_{uw} \hat{w}_{vw})^{1/3}.
|
332 |
+
|
333 |
+
The edge weights :math:`\hat{w}_{uv}` are normalized by the maximum weight
|
334 |
+
in the network :math:`\hat{w}_{uv} = w_{uv}/\max(w)`.
|
335 |
+
|
336 |
+
The value of :math:`c_u` is assigned to 0 if :math:`deg(u) < 2`.
|
337 |
+
|
338 |
+
Additionally, this weighted definition has been generalized to support negative edge weights [3]_.
|
339 |
+
|
340 |
+
For directed graphs, the clustering is similarly defined as the fraction
|
341 |
+
of all possible directed triangles or geometric average of the subgraph
|
342 |
+
edge weights for unweighted and weighted directed graph respectively [4]_.
|
343 |
+
|
344 |
+
.. math::
|
345 |
+
|
346 |
+
c_u = \frac{T(u)}{2(deg^{tot}(u)(deg^{tot}(u)-1) - 2deg^{\leftrightarrow}(u))},
|
347 |
+
|
348 |
+
where :math:`T(u)` is the number of directed triangles through node
|
349 |
+
:math:`u`, :math:`deg^{tot}(u)` is the sum of in degree and out degree of
|
350 |
+
:math:`u` and :math:`deg^{\leftrightarrow}(u)` is the reciprocal degree of
|
351 |
+
:math:`u`.
|
352 |
+
|
353 |
+
|
354 |
+
Parameters
|
355 |
+
----------
|
356 |
+
G : graph
|
357 |
+
|
358 |
+
nodes : node, iterable of nodes, or None (default=None)
|
359 |
+
If a singleton node, return the number of triangles for that node.
|
360 |
+
If an iterable, compute the number of triangles for each of those nodes.
|
361 |
+
If `None` (the default) compute the number of triangles for all nodes in `G`.
|
362 |
+
|
363 |
+
weight : string or None, optional (default=None)
|
364 |
+
The edge attribute that holds the numerical value used as a weight.
|
365 |
+
If None, then each edge has weight 1.
|
366 |
+
|
367 |
+
Returns
|
368 |
+
-------
|
369 |
+
out : float, or dictionary
|
370 |
+
Clustering coefficient at specified nodes
|
371 |
+
|
372 |
+
Examples
|
373 |
+
--------
|
374 |
+
>>> G = nx.complete_graph(5)
|
375 |
+
>>> print(nx.clustering(G, 0))
|
376 |
+
1.0
|
377 |
+
>>> print(nx.clustering(G))
|
378 |
+
{0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0}
|
379 |
+
|
380 |
+
Notes
|
381 |
+
-----
|
382 |
+
Self loops are ignored.
|
383 |
+
|
384 |
+
References
|
385 |
+
----------
|
386 |
+
.. [1] Generalizations of the clustering coefficient to weighted
|
387 |
+
complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela,
|
388 |
+
K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007).
|
389 |
+
http://jponnela.com/web_documents/a9.pdf
|
390 |
+
.. [2] Intensity and coherence of motifs in weighted complex
|
391 |
+
networks by J. P. Onnela, J. Saramäki, J. Kertész, and K. Kaski,
|
392 |
+
Physical Review E, 71(6), 065103 (2005).
|
393 |
+
.. [3] Generalization of Clustering Coefficients to Signed Correlation Networks
|
394 |
+
by G. Costantini and M. Perugini, PloS one, 9(2), e88669 (2014).
|
395 |
+
.. [4] Clustering in complex directed networks by G. Fagiolo,
|
396 |
+
Physical Review E, 76(2), 026107 (2007).
|
397 |
+
"""
|
398 |
+
if G.is_directed():
|
399 |
+
if weight is not None:
|
400 |
+
td_iter = _directed_weighted_triangles_and_degree_iter(G, nodes, weight)
|
401 |
+
clusterc = {
|
402 |
+
v: 0 if t == 0 else t / ((dt * (dt - 1) - 2 * db) * 2)
|
403 |
+
for v, dt, db, t in td_iter
|
404 |
+
}
|
405 |
+
else:
|
406 |
+
td_iter = _directed_triangles_and_degree_iter(G, nodes)
|
407 |
+
clusterc = {
|
408 |
+
v: 0 if t == 0 else t / ((dt * (dt - 1) - 2 * db) * 2)
|
409 |
+
for v, dt, db, t in td_iter
|
410 |
+
}
|
411 |
+
else:
|
412 |
+
# The formula 2*T/(d*(d-1)) from docs is t/(d*(d-1)) here b/c t==2*T
|
413 |
+
if weight is not None:
|
414 |
+
td_iter = _weighted_triangles_and_degree_iter(G, nodes, weight)
|
415 |
+
clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t in td_iter}
|
416 |
+
else:
|
417 |
+
td_iter = _triangles_and_degree_iter(G, nodes)
|
418 |
+
clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t, _ in td_iter}
|
419 |
+
if nodes in G:
|
420 |
+
# Return the value of the sole entry in the dictionary.
|
421 |
+
return clusterc[nodes]
|
422 |
+
return clusterc
|
423 |
+
|
424 |
+
|
425 |
+
@nx._dispatchable
|
426 |
+
def transitivity(G):
|
427 |
+
r"""Compute graph transitivity, the fraction of all possible triangles
|
428 |
+
present in G.
|
429 |
+
|
430 |
+
Possible triangles are identified by the number of "triads"
|
431 |
+
(two edges with a shared vertex).
|
432 |
+
|
433 |
+
The transitivity is
|
434 |
+
|
435 |
+
.. math::
|
436 |
+
|
437 |
+
T = 3\frac{\#triangles}{\#triads}.
|
438 |
+
|
439 |
+
Parameters
|
440 |
+
----------
|
441 |
+
G : graph
|
442 |
+
|
443 |
+
Returns
|
444 |
+
-------
|
445 |
+
out : float
|
446 |
+
Transitivity
|
447 |
+
|
448 |
+
Notes
|
449 |
+
-----
|
450 |
+
Self loops are ignored.
|
451 |
+
|
452 |
+
Examples
|
453 |
+
--------
|
454 |
+
>>> G = nx.complete_graph(5)
|
455 |
+
>>> print(nx.transitivity(G))
|
456 |
+
1.0
|
457 |
+
"""
|
458 |
+
triangles_contri = [
|
459 |
+
(t, d * (d - 1)) for v, d, t, _ in _triangles_and_degree_iter(G)
|
460 |
+
]
|
461 |
+
# If the graph is empty
|
462 |
+
if len(triangles_contri) == 0:
|
463 |
+
return 0
|
464 |
+
triangles, contri = map(sum, zip(*triangles_contri))
|
465 |
+
return 0 if triangles == 0 else triangles / contri
|
466 |
+
|
467 |
+
|
468 |
+
@nx._dispatchable
|
469 |
+
def square_clustering(G, nodes=None):
|
470 |
+
r"""Compute the squares clustering coefficient for nodes.
|
471 |
+
|
472 |
+
For each node return the fraction of possible squares that exist at
|
473 |
+
the node [1]_
|
474 |
+
|
475 |
+
.. math::
|
476 |
+
C_4(v) = \frac{ \sum_{u=1}^{k_v}
|
477 |
+
\sum_{w=u+1}^{k_v} q_v(u,w) }{ \sum_{u=1}^{k_v}
|
478 |
+
\sum_{w=u+1}^{k_v} [a_v(u,w) + q_v(u,w)]},
|
479 |
+
|
480 |
+
where :math:`q_v(u,w)` are the number of common neighbors of :math:`u` and
|
481 |
+
:math:`w` other than :math:`v` (ie squares), and :math:`a_v(u,w) = (k_u -
|
482 |
+
(1+q_v(u,w)+\theta_{uv})) + (k_w - (1+q_v(u,w)+\theta_{uw}))`, where
|
483 |
+
:math:`\theta_{uw} = 1` if :math:`u` and :math:`w` are connected and 0
|
484 |
+
otherwise. [2]_
|
485 |
+
|
486 |
+
Parameters
|
487 |
+
----------
|
488 |
+
G : graph
|
489 |
+
|
490 |
+
nodes : container of nodes, optional (default=all nodes in G)
|
491 |
+
Compute clustering for nodes in this container.
|
492 |
+
|
493 |
+
Returns
|
494 |
+
-------
|
495 |
+
c4 : dictionary
|
496 |
+
A dictionary keyed by node with the square clustering coefficient value.
|
497 |
+
|
498 |
+
Examples
|
499 |
+
--------
|
500 |
+
>>> G = nx.complete_graph(5)
|
501 |
+
>>> print(nx.square_clustering(G, 0))
|
502 |
+
1.0
|
503 |
+
>>> print(nx.square_clustering(G))
|
504 |
+
{0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0}
|
505 |
+
|
506 |
+
Notes
|
507 |
+
-----
|
508 |
+
While :math:`C_3(v)` (triangle clustering) gives the probability that
|
509 |
+
two neighbors of node v are connected with each other, :math:`C_4(v)` is
|
510 |
+
the probability that two neighbors of node v share a common
|
511 |
+
neighbor different from v. This algorithm can be applied to both
|
512 |
+
bipartite and unipartite networks.
|
513 |
+
|
514 |
+
References
|
515 |
+
----------
|
516 |
+
.. [1] Pedro G. Lind, Marta C. González, and Hans J. Herrmann. 2005
|
517 |
+
Cycles and clustering in bipartite networks.
|
518 |
+
Physical Review E (72) 056127.
|
519 |
+
.. [2] Zhang, Peng et al. Clustering Coefficient and Community Structure of
|
520 |
+
Bipartite Networks. Physica A: Statistical Mechanics and its Applications 387.27 (2008): 6869–6875.
|
521 |
+
https://arxiv.org/abs/0710.0117v1
|
522 |
+
"""
|
523 |
+
if nodes is None:
|
524 |
+
node_iter = G
|
525 |
+
else:
|
526 |
+
node_iter = G.nbunch_iter(nodes)
|
527 |
+
clustering = {}
|
528 |
+
for v in node_iter:
|
529 |
+
clustering[v] = 0
|
530 |
+
potential = 0
|
531 |
+
for u, w in combinations(G[v], 2):
|
532 |
+
squares = len((set(G[u]) & set(G[w])) - {v})
|
533 |
+
clustering[v] += squares
|
534 |
+
degm = squares + 1
|
535 |
+
if w in G[u]:
|
536 |
+
degm += 1
|
537 |
+
potential += (len(G[u]) - degm) + (len(G[w]) - degm) + squares
|
538 |
+
if potential > 0:
|
539 |
+
clustering[v] /= potential
|
540 |
+
if nodes in G:
|
541 |
+
# Return the value of the sole entry in the dictionary.
|
542 |
+
return clustering[nodes]
|
543 |
+
return clustering
|
544 |
+
|
545 |
+
|
546 |
+
@not_implemented_for("directed")
|
547 |
+
@nx._dispatchable
|
548 |
+
def generalized_degree(G, nodes=None):
|
549 |
+
r"""Compute the generalized degree for nodes.
|
550 |
+
|
551 |
+
For each node, the generalized degree shows how many edges of given
|
552 |
+
triangle multiplicity the node is connected to. The triangle multiplicity
|
553 |
+
of an edge is the number of triangles an edge participates in. The
|
554 |
+
generalized degree of node :math:`i` can be written as a vector
|
555 |
+
:math:`\mathbf{k}_i=(k_i^{(0)}, \dotsc, k_i^{(N-2)})` where
|
556 |
+
:math:`k_i^{(j)}` is the number of edges attached to node :math:`i` that
|
557 |
+
participate in :math:`j` triangles.
|
558 |
+
|
559 |
+
Parameters
|
560 |
+
----------
|
561 |
+
G : graph
|
562 |
+
|
563 |
+
nodes : container of nodes, optional (default=all nodes in G)
|
564 |
+
Compute the generalized degree for nodes in this container.
|
565 |
+
|
566 |
+
Returns
|
567 |
+
-------
|
568 |
+
out : Counter, or dictionary of Counters
|
569 |
+
Generalized degree of specified nodes. The Counter is keyed by edge
|
570 |
+
triangle multiplicity.
|
571 |
+
|
572 |
+
Examples
|
573 |
+
--------
|
574 |
+
>>> G = nx.complete_graph(5)
|
575 |
+
>>> print(nx.generalized_degree(G, 0))
|
576 |
+
Counter({3: 4})
|
577 |
+
>>> print(nx.generalized_degree(G))
|
578 |
+
{0: Counter({3: 4}), 1: Counter({3: 4}), 2: Counter({3: 4}), 3: Counter({3: 4}), 4: Counter({3: 4})}
|
579 |
+
|
580 |
+
To recover the number of triangles attached to a node:
|
581 |
+
|
582 |
+
>>> k1 = nx.generalized_degree(G, 0)
|
583 |
+
>>> sum([k * v for k, v in k1.items()]) / 2 == nx.triangles(G, 0)
|
584 |
+
True
|
585 |
+
|
586 |
+
Notes
|
587 |
+
-----
|
588 |
+
Self loops are ignored.
|
589 |
+
|
590 |
+
In a network of N nodes, the highest triangle multiplicity an edge can have
|
591 |
+
is N-2.
|
592 |
+
|
593 |
+
The return value does not include a `zero` entry if no edges of a
|
594 |
+
particular triangle multiplicity are present.
|
595 |
+
|
596 |
+
The number of triangles node :math:`i` is attached to can be recovered from
|
597 |
+
the generalized degree :math:`\mathbf{k}_i=(k_i^{(0)}, \dotsc,
|
598 |
+
k_i^{(N-2)})` by :math:`(k_i^{(1)}+2k_i^{(2)}+\dotsc +(N-2)k_i^{(N-2)})/2`.
|
599 |
+
|
600 |
+
References
|
601 |
+
----------
|
602 |
+
.. [1] Networks with arbitrary edge multiplicities by V. Zlatić,
|
603 |
+
D. Garlaschelli and G. Caldarelli, EPL (Europhysics Letters),
|
604 |
+
Volume 97, Number 2 (2012).
|
605 |
+
https://iopscience.iop.org/article/10.1209/0295-5075/97/28005
|
606 |
+
"""
|
607 |
+
if nodes in G:
|
608 |
+
return next(_triangles_and_degree_iter(G, nodes))[3]
|
609 |
+
return {v: gd for v, d, t, gd in _triangles_and_degree_iter(G, nodes)}
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/communicability_alg.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Communicability.
|
3 |
+
"""
|
4 |
+
import networkx as nx
|
5 |
+
from networkx.utils import not_implemented_for
|
6 |
+
|
7 |
+
__all__ = ["communicability", "communicability_exp"]
|
8 |
+
|
9 |
+
|
10 |
+
@not_implemented_for("directed")
|
11 |
+
@not_implemented_for("multigraph")
|
12 |
+
@nx._dispatchable
|
13 |
+
def communicability(G):
|
14 |
+
r"""Returns communicability between all pairs of nodes in G.
|
15 |
+
|
16 |
+
The communicability between pairs of nodes in G is the sum of
|
17 |
+
walks of different lengths starting at node u and ending at node v.
|
18 |
+
|
19 |
+
Parameters
|
20 |
+
----------
|
21 |
+
G: graph
|
22 |
+
|
23 |
+
Returns
|
24 |
+
-------
|
25 |
+
comm: dictionary of dictionaries
|
26 |
+
Dictionary of dictionaries keyed by nodes with communicability
|
27 |
+
as the value.
|
28 |
+
|
29 |
+
Raises
|
30 |
+
------
|
31 |
+
NetworkXError
|
32 |
+
If the graph is not undirected and simple.
|
33 |
+
|
34 |
+
See Also
|
35 |
+
--------
|
36 |
+
communicability_exp:
|
37 |
+
Communicability between all pairs of nodes in G using spectral
|
38 |
+
decomposition.
|
39 |
+
communicability_betweenness_centrality:
|
40 |
+
Communicability betweenness centrality for each node in G.
|
41 |
+
|
42 |
+
Notes
|
43 |
+
-----
|
44 |
+
This algorithm uses a spectral decomposition of the adjacency matrix.
|
45 |
+
Let G=(V,E) be a simple undirected graph. Using the connection between
|
46 |
+
the powers of the adjacency matrix and the number of walks in the graph,
|
47 |
+
the communicability between nodes `u` and `v` based on the graph spectrum
|
48 |
+
is [1]_
|
49 |
+
|
50 |
+
.. math::
|
51 |
+
C(u,v)=\sum_{j=1}^{n}\phi_{j}(u)\phi_{j}(v)e^{\lambda_{j}},
|
52 |
+
|
53 |
+
where `\phi_{j}(u)` is the `u\rm{th}` element of the `j\rm{th}` orthonormal
|
54 |
+
eigenvector of the adjacency matrix associated with the eigenvalue
|
55 |
+
`\lambda_{j}`.
|
56 |
+
|
57 |
+
References
|
58 |
+
----------
|
59 |
+
.. [1] Ernesto Estrada, Naomichi Hatano,
|
60 |
+
"Communicability in complex networks",
|
61 |
+
Phys. Rev. E 77, 036111 (2008).
|
62 |
+
https://arxiv.org/abs/0707.0756
|
63 |
+
|
64 |
+
Examples
|
65 |
+
--------
|
66 |
+
>>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)])
|
67 |
+
>>> c = nx.communicability(G)
|
68 |
+
"""
|
69 |
+
import numpy as np
|
70 |
+
|
71 |
+
nodelist = list(G) # ordering of nodes in matrix
|
72 |
+
A = nx.to_numpy_array(G, nodelist)
|
73 |
+
# convert to 0-1 matrix
|
74 |
+
A[A != 0.0] = 1
|
75 |
+
w, vec = np.linalg.eigh(A)
|
76 |
+
expw = np.exp(w)
|
77 |
+
mapping = dict(zip(nodelist, range(len(nodelist))))
|
78 |
+
c = {}
|
79 |
+
# computing communicabilities
|
80 |
+
for u in G:
|
81 |
+
c[u] = {}
|
82 |
+
for v in G:
|
83 |
+
s = 0
|
84 |
+
p = mapping[u]
|
85 |
+
q = mapping[v]
|
86 |
+
for j in range(len(nodelist)):
|
87 |
+
s += vec[:, j][p] * vec[:, j][q] * expw[j]
|
88 |
+
c[u][v] = float(s)
|
89 |
+
return c
|
90 |
+
|
91 |
+
|
92 |
+
@not_implemented_for("directed")
|
93 |
+
@not_implemented_for("multigraph")
|
94 |
+
@nx._dispatchable
|
95 |
+
def communicability_exp(G):
|
96 |
+
r"""Returns communicability between all pairs of nodes in G.
|
97 |
+
|
98 |
+
Communicability between pair of node (u,v) of node in G is the sum of
|
99 |
+
walks of different lengths starting at node u and ending at node v.
|
100 |
+
|
101 |
+
Parameters
|
102 |
+
----------
|
103 |
+
G: graph
|
104 |
+
|
105 |
+
Returns
|
106 |
+
-------
|
107 |
+
comm: dictionary of dictionaries
|
108 |
+
Dictionary of dictionaries keyed by nodes with communicability
|
109 |
+
as the value.
|
110 |
+
|
111 |
+
Raises
|
112 |
+
------
|
113 |
+
NetworkXError
|
114 |
+
If the graph is not undirected and simple.
|
115 |
+
|
116 |
+
See Also
|
117 |
+
--------
|
118 |
+
communicability:
|
119 |
+
Communicability between pairs of nodes in G.
|
120 |
+
communicability_betweenness_centrality:
|
121 |
+
Communicability betweenness centrality for each node in G.
|
122 |
+
|
123 |
+
Notes
|
124 |
+
-----
|
125 |
+
This algorithm uses matrix exponentiation of the adjacency matrix.
|
126 |
+
|
127 |
+
Let G=(V,E) be a simple undirected graph. Using the connection between
|
128 |
+
the powers of the adjacency matrix and the number of walks in the graph,
|
129 |
+
the communicability between nodes u and v is [1]_,
|
130 |
+
|
131 |
+
.. math::
|
132 |
+
C(u,v) = (e^A)_{uv},
|
133 |
+
|
134 |
+
where `A` is the adjacency matrix of G.
|
135 |
+
|
136 |
+
References
|
137 |
+
----------
|
138 |
+
.. [1] Ernesto Estrada, Naomichi Hatano,
|
139 |
+
"Communicability in complex networks",
|
140 |
+
Phys. Rev. E 77, 036111 (2008).
|
141 |
+
https://arxiv.org/abs/0707.0756
|
142 |
+
|
143 |
+
Examples
|
144 |
+
--------
|
145 |
+
>>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)])
|
146 |
+
>>> c = nx.communicability_exp(G)
|
147 |
+
"""
|
148 |
+
import scipy as sp
|
149 |
+
|
150 |
+
nodelist = list(G) # ordering of nodes in matrix
|
151 |
+
A = nx.to_numpy_array(G, nodelist)
|
152 |
+
# convert to 0-1 matrix
|
153 |
+
A[A != 0.0] = 1
|
154 |
+
# communicability matrix
|
155 |
+
expA = sp.linalg.expm(A)
|
156 |
+
mapping = dict(zip(nodelist, range(len(nodelist))))
|
157 |
+
c = {}
|
158 |
+
for u in G:
|
159 |
+
c[u] = {}
|
160 |
+
for v in G:
|
161 |
+
c[u][v] = float(expA[mapping[u], mapping[v]])
|
162 |
+
return c
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/core.py
ADDED
@@ -0,0 +1,648 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Find the k-cores of a graph.
|
3 |
+
|
4 |
+
The k-core is found by recursively pruning nodes with degrees less than k.
|
5 |
+
|
6 |
+
See the following references for details:
|
7 |
+
|
8 |
+
An O(m) Algorithm for Cores Decomposition of Networks
|
9 |
+
Vladimir Batagelj and Matjaz Zaversnik, 2003.
|
10 |
+
https://arxiv.org/abs/cs.DS/0310049
|
11 |
+
|
12 |
+
Generalized Cores
|
13 |
+
Vladimir Batagelj and Matjaz Zaversnik, 2002.
|
14 |
+
https://arxiv.org/pdf/cs/0202039
|
15 |
+
|
16 |
+
For directed graphs a more general notion is that of D-cores which
|
17 |
+
looks at (k, l) restrictions on (in, out) degree. The (k, k) D-core
|
18 |
+
is the k-core.
|
19 |
+
|
20 |
+
D-cores: Measuring Collaboration of Directed Graphs Based on Degeneracy
|
21 |
+
Christos Giatsidis, Dimitrios M. Thilikos, Michalis Vazirgiannis, ICDM 2011.
|
22 |
+
http://www.graphdegeneracy.org/dcores_ICDM_2011.pdf
|
23 |
+
|
24 |
+
Multi-scale structure and topological anomaly detection via a new network \
|
25 |
+
statistic: The onion decomposition
|
26 |
+
L. Hébert-Dufresne, J. A. Grochow, and A. Allard
|
27 |
+
Scientific Reports 6, 31708 (2016)
|
28 |
+
http://doi.org/10.1038/srep31708
|
29 |
+
|
30 |
+
"""
|
31 |
+
import networkx as nx
|
32 |
+
|
33 |
+
__all__ = [
|
34 |
+
"core_number",
|
35 |
+
"k_core",
|
36 |
+
"k_shell",
|
37 |
+
"k_crust",
|
38 |
+
"k_corona",
|
39 |
+
"k_truss",
|
40 |
+
"onion_layers",
|
41 |
+
]
|
42 |
+
|
43 |
+
|
44 |
+
@nx.utils.not_implemented_for("multigraph")
|
45 |
+
@nx._dispatchable
|
46 |
+
def core_number(G):
|
47 |
+
"""Returns the core number for each node.
|
48 |
+
|
49 |
+
A k-core is a maximal subgraph that contains nodes of degree k or more.
|
50 |
+
|
51 |
+
The core number of a node is the largest value k of a k-core containing
|
52 |
+
that node.
|
53 |
+
|
54 |
+
Parameters
|
55 |
+
----------
|
56 |
+
G : NetworkX graph
|
57 |
+
An undirected or directed graph
|
58 |
+
|
59 |
+
Returns
|
60 |
+
-------
|
61 |
+
core_number : dictionary
|
62 |
+
A dictionary keyed by node to the core number.
|
63 |
+
|
64 |
+
Raises
|
65 |
+
------
|
66 |
+
NetworkXNotImplemented
|
67 |
+
If `G` is a multigraph or contains self loops.
|
68 |
+
|
69 |
+
Notes
|
70 |
+
-----
|
71 |
+
For directed graphs the node degree is defined to be the
|
72 |
+
in-degree + out-degree.
|
73 |
+
|
74 |
+
Examples
|
75 |
+
--------
|
76 |
+
>>> degrees = [0, 1, 2, 2, 2, 2, 3]
|
77 |
+
>>> H = nx.havel_hakimi_graph(degrees)
|
78 |
+
>>> nx.core_number(H)
|
79 |
+
{0: 1, 1: 2, 2: 2, 3: 2, 4: 1, 5: 2, 6: 0}
|
80 |
+
>>> G = nx.DiGraph()
|
81 |
+
>>> G.add_edges_from([(1, 2), (2, 1), (2, 3), (2, 4), (3, 4), (4, 3)])
|
82 |
+
>>> nx.core_number(G)
|
83 |
+
{1: 2, 2: 2, 3: 2, 4: 2}
|
84 |
+
|
85 |
+
References
|
86 |
+
----------
|
87 |
+
.. [1] An O(m) Algorithm for Cores Decomposition of Networks
|
88 |
+
Vladimir Batagelj and Matjaz Zaversnik, 2003.
|
89 |
+
https://arxiv.org/abs/cs.DS/0310049
|
90 |
+
"""
|
91 |
+
if nx.number_of_selfloops(G) > 0:
|
92 |
+
msg = (
|
93 |
+
"Input graph has self loops which is not permitted; "
|
94 |
+
"Consider using G.remove_edges_from(nx.selfloop_edges(G))."
|
95 |
+
)
|
96 |
+
raise nx.NetworkXNotImplemented(msg)
|
97 |
+
degrees = dict(G.degree())
|
98 |
+
# Sort nodes by degree.
|
99 |
+
nodes = sorted(degrees, key=degrees.get)
|
100 |
+
bin_boundaries = [0]
|
101 |
+
curr_degree = 0
|
102 |
+
for i, v in enumerate(nodes):
|
103 |
+
if degrees[v] > curr_degree:
|
104 |
+
bin_boundaries.extend([i] * (degrees[v] - curr_degree))
|
105 |
+
curr_degree = degrees[v]
|
106 |
+
node_pos = {v: pos for pos, v in enumerate(nodes)}
|
107 |
+
# The initial guess for the core number of a node is its degree.
|
108 |
+
core = degrees
|
109 |
+
nbrs = {v: list(nx.all_neighbors(G, v)) for v in G}
|
110 |
+
for v in nodes:
|
111 |
+
for u in nbrs[v]:
|
112 |
+
if core[u] > core[v]:
|
113 |
+
nbrs[u].remove(v)
|
114 |
+
pos = node_pos[u]
|
115 |
+
bin_start = bin_boundaries[core[u]]
|
116 |
+
node_pos[u] = bin_start
|
117 |
+
node_pos[nodes[bin_start]] = pos
|
118 |
+
nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]
|
119 |
+
bin_boundaries[core[u]] += 1
|
120 |
+
core[u] -= 1
|
121 |
+
return core
|
122 |
+
|
123 |
+
|
124 |
+
def _core_subgraph(G, k_filter, k=None, core=None):
|
125 |
+
"""Returns the subgraph induced by nodes passing filter `k_filter`.
|
126 |
+
|
127 |
+
Parameters
|
128 |
+
----------
|
129 |
+
G : NetworkX graph
|
130 |
+
The graph or directed graph to process
|
131 |
+
k_filter : filter function
|
132 |
+
This function filters the nodes chosen. It takes three inputs:
|
133 |
+
A node of G, the filter's cutoff, and the core dict of the graph.
|
134 |
+
The function should return a Boolean value.
|
135 |
+
k : int, optional
|
136 |
+
The order of the core. If not specified use the max core number.
|
137 |
+
This value is used as the cutoff for the filter.
|
138 |
+
core : dict, optional
|
139 |
+
Precomputed core numbers keyed by node for the graph `G`.
|
140 |
+
If not specified, the core numbers will be computed from `G`.
|
141 |
+
|
142 |
+
"""
|
143 |
+
if core is None:
|
144 |
+
core = core_number(G)
|
145 |
+
if k is None:
|
146 |
+
k = max(core.values())
|
147 |
+
nodes = (v for v in core if k_filter(v, k, core))
|
148 |
+
return G.subgraph(nodes).copy()
|
149 |
+
|
150 |
+
|
151 |
+
@nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
|
152 |
+
def k_core(G, k=None, core_number=None):
|
153 |
+
"""Returns the k-core of G.
|
154 |
+
|
155 |
+
A k-core is a maximal subgraph that contains nodes of degree `k` or more.
|
156 |
+
|
157 |
+
.. deprecated:: 3.3
|
158 |
+
`k_core` will not accept `MultiGraph` objects in version 3.5.
|
159 |
+
|
160 |
+
Parameters
|
161 |
+
----------
|
162 |
+
G : NetworkX graph
|
163 |
+
A graph or directed graph
|
164 |
+
k : int, optional
|
165 |
+
The order of the core. If not specified return the main core.
|
166 |
+
core_number : dictionary, optional
|
167 |
+
Precomputed core numbers for the graph G.
|
168 |
+
|
169 |
+
Returns
|
170 |
+
-------
|
171 |
+
G : NetworkX graph
|
172 |
+
The k-core subgraph
|
173 |
+
|
174 |
+
Raises
|
175 |
+
------
|
176 |
+
NetworkXNotImplemented
|
177 |
+
The k-core is not defined for multigraphs or graphs with self loops.
|
178 |
+
|
179 |
+
Notes
|
180 |
+
-----
|
181 |
+
The main core is the core with `k` as the largest core_number.
|
182 |
+
|
183 |
+
For directed graphs the node degree is defined to be the
|
184 |
+
in-degree + out-degree.
|
185 |
+
|
186 |
+
Graph, node, and edge attributes are copied to the subgraph.
|
187 |
+
|
188 |
+
Examples
|
189 |
+
--------
|
190 |
+
>>> degrees = [0, 1, 2, 2, 2, 2, 3]
|
191 |
+
>>> H = nx.havel_hakimi_graph(degrees)
|
192 |
+
>>> H.degree
|
193 |
+
DegreeView({0: 1, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 0})
|
194 |
+
>>> nx.k_core(H).nodes
|
195 |
+
NodeView((1, 2, 3, 5))
|
196 |
+
|
197 |
+
See Also
|
198 |
+
--------
|
199 |
+
core_number
|
200 |
+
|
201 |
+
References
|
202 |
+
----------
|
203 |
+
.. [1] An O(m) Algorithm for Cores Decomposition of Networks
|
204 |
+
Vladimir Batagelj and Matjaz Zaversnik, 2003.
|
205 |
+
https://arxiv.org/abs/cs.DS/0310049
|
206 |
+
"""
|
207 |
+
|
208 |
+
import warnings
|
209 |
+
|
210 |
+
if G.is_multigraph():
|
211 |
+
warnings.warn(
|
212 |
+
(
|
213 |
+
"\n\n`k_core` will not accept `MultiGraph` objects in version 3.5.\n"
|
214 |
+
"Convert it to an undirected graph instead, using::\n\n"
|
215 |
+
"\tG = nx.Graph(G)\n"
|
216 |
+
),
|
217 |
+
category=DeprecationWarning,
|
218 |
+
stacklevel=5,
|
219 |
+
)
|
220 |
+
|
221 |
+
def k_filter(v, k, c):
|
222 |
+
return c[v] >= k
|
223 |
+
|
224 |
+
return _core_subgraph(G, k_filter, k, core_number)
|
225 |
+
|
226 |
+
|
227 |
+
@nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
|
228 |
+
def k_shell(G, k=None, core_number=None):
|
229 |
+
"""Returns the k-shell of G.
|
230 |
+
|
231 |
+
The k-shell is the subgraph induced by nodes with core number k.
|
232 |
+
That is, nodes in the k-core that are not in the (k+1)-core.
|
233 |
+
|
234 |
+
.. deprecated:: 3.3
|
235 |
+
`k_shell` will not accept `MultiGraph` objects in version 3.5.
|
236 |
+
|
237 |
+
Parameters
|
238 |
+
----------
|
239 |
+
G : NetworkX graph
|
240 |
+
A graph or directed graph.
|
241 |
+
k : int, optional
|
242 |
+
The order of the shell. If not specified return the outer shell.
|
243 |
+
core_number : dictionary, optional
|
244 |
+
Precomputed core numbers for the graph G.
|
245 |
+
|
246 |
+
|
247 |
+
Returns
|
248 |
+
-------
|
249 |
+
G : NetworkX graph
|
250 |
+
The k-shell subgraph
|
251 |
+
|
252 |
+
Raises
|
253 |
+
------
|
254 |
+
NetworkXNotImplemented
|
255 |
+
The k-shell is not implemented for multigraphs or graphs with self loops.
|
256 |
+
|
257 |
+
Notes
|
258 |
+
-----
|
259 |
+
This is similar to k_corona but in that case only neighbors in the
|
260 |
+
k-core are considered.
|
261 |
+
|
262 |
+
For directed graphs the node degree is defined to be the
|
263 |
+
in-degree + out-degree.
|
264 |
+
|
265 |
+
Graph, node, and edge attributes are copied to the subgraph.
|
266 |
+
|
267 |
+
Examples
|
268 |
+
--------
|
269 |
+
>>> degrees = [0, 1, 2, 2, 2, 2, 3]
|
270 |
+
>>> H = nx.havel_hakimi_graph(degrees)
|
271 |
+
>>> H.degree
|
272 |
+
DegreeView({0: 1, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 0})
|
273 |
+
>>> nx.k_shell(H, k=1).nodes
|
274 |
+
NodeView((0, 4))
|
275 |
+
|
276 |
+
See Also
|
277 |
+
--------
|
278 |
+
core_number
|
279 |
+
k_corona
|
280 |
+
|
281 |
+
|
282 |
+
References
|
283 |
+
----------
|
284 |
+
.. [1] A model of Internet topology using k-shell decomposition
|
285 |
+
Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt,
|
286 |
+
and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154
|
287 |
+
http://www.pnas.org/content/104/27/11150.full
|
288 |
+
"""
|
289 |
+
|
290 |
+
import warnings
|
291 |
+
|
292 |
+
if G.is_multigraph():
|
293 |
+
warnings.warn(
|
294 |
+
(
|
295 |
+
"\n\n`k_shell` will not accept `MultiGraph` objects in version 3.5.\n"
|
296 |
+
"Convert it to an undirected graph instead, using::\n\n"
|
297 |
+
"\tG = nx.Graph(G)\n"
|
298 |
+
),
|
299 |
+
category=DeprecationWarning,
|
300 |
+
stacklevel=5,
|
301 |
+
)
|
302 |
+
|
303 |
+
def k_filter(v, k, c):
|
304 |
+
return c[v] == k
|
305 |
+
|
306 |
+
return _core_subgraph(G, k_filter, k, core_number)
|
307 |
+
|
308 |
+
|
309 |
+
@nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
|
310 |
+
def k_crust(G, k=None, core_number=None):
|
311 |
+
"""Returns the k-crust of G.
|
312 |
+
|
313 |
+
The k-crust is the graph G with the edges of the k-core removed
|
314 |
+
and isolated nodes found after the removal of edges are also removed.
|
315 |
+
|
316 |
+
.. deprecated:: 3.3
|
317 |
+
`k_crust` will not accept `MultiGraph` objects in version 3.5.
|
318 |
+
|
319 |
+
Parameters
|
320 |
+
----------
|
321 |
+
G : NetworkX graph
|
322 |
+
A graph or directed graph.
|
323 |
+
k : int, optional
|
324 |
+
The order of the shell. If not specified return the main crust.
|
325 |
+
core_number : dictionary, optional
|
326 |
+
Precomputed core numbers for the graph G.
|
327 |
+
|
328 |
+
Returns
|
329 |
+
-------
|
330 |
+
G : NetworkX graph
|
331 |
+
The k-crust subgraph
|
332 |
+
|
333 |
+
Raises
|
334 |
+
------
|
335 |
+
NetworkXNotImplemented
|
336 |
+
The k-crust is not implemented for multigraphs or graphs with self loops.
|
337 |
+
|
338 |
+
Notes
|
339 |
+
-----
|
340 |
+
This definition of k-crust is different than the definition in [1]_.
|
341 |
+
The k-crust in [1]_ is equivalent to the k+1 crust of this algorithm.
|
342 |
+
|
343 |
+
For directed graphs the node degree is defined to be the
|
344 |
+
in-degree + out-degree.
|
345 |
+
|
346 |
+
Graph, node, and edge attributes are copied to the subgraph.
|
347 |
+
|
348 |
+
Examples
|
349 |
+
--------
|
350 |
+
>>> degrees = [0, 1, 2, 2, 2, 2, 3]
|
351 |
+
>>> H = nx.havel_hakimi_graph(degrees)
|
352 |
+
>>> H.degree
|
353 |
+
DegreeView({0: 1, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 0})
|
354 |
+
>>> nx.k_crust(H, k=1).nodes
|
355 |
+
NodeView((0, 4, 6))
|
356 |
+
|
357 |
+
See Also
|
358 |
+
--------
|
359 |
+
core_number
|
360 |
+
|
361 |
+
References
|
362 |
+
----------
|
363 |
+
.. [1] A model of Internet topology using k-shell decomposition
|
364 |
+
Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt,
|
365 |
+
and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154
|
366 |
+
http://www.pnas.org/content/104/27/11150.full
|
367 |
+
"""
|
368 |
+
|
369 |
+
import warnings
|
370 |
+
|
371 |
+
if G.is_multigraph():
|
372 |
+
warnings.warn(
|
373 |
+
(
|
374 |
+
"\n\n`k_crust` will not accept `MultiGraph` objects in version 3.5.\n"
|
375 |
+
"Convert it to an undirected graph instead, using::\n\n"
|
376 |
+
"\tG = nx.Graph(G)\n"
|
377 |
+
),
|
378 |
+
category=DeprecationWarning,
|
379 |
+
stacklevel=5,
|
380 |
+
)
|
381 |
+
|
382 |
+
# Default for k is one less than in _core_subgraph, so just inline.
|
383 |
+
# Filter is c[v] <= k
|
384 |
+
if core_number is None:
|
385 |
+
core_number = nx.core_number(G)
|
386 |
+
if k is None:
|
387 |
+
k = max(core_number.values()) - 1
|
388 |
+
nodes = (v for v in core_number if core_number[v] <= k)
|
389 |
+
return G.subgraph(nodes).copy()
|
390 |
+
|
391 |
+
|
392 |
+
@nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
|
393 |
+
def k_corona(G, k, core_number=None):
|
394 |
+
"""Returns the k-corona of G.
|
395 |
+
|
396 |
+
The k-corona is the subgraph of nodes in the k-core which have
|
397 |
+
exactly k neighbors in the k-core.
|
398 |
+
|
399 |
+
.. deprecated:: 3.3
|
400 |
+
`k_corona` will not accept `MultiGraph` objects in version 3.5.
|
401 |
+
|
402 |
+
Parameters
|
403 |
+
----------
|
404 |
+
G : NetworkX graph
|
405 |
+
A graph or directed graph
|
406 |
+
k : int
|
407 |
+
The order of the corona.
|
408 |
+
core_number : dictionary, optional
|
409 |
+
Precomputed core numbers for the graph G.
|
410 |
+
|
411 |
+
Returns
|
412 |
+
-------
|
413 |
+
G : NetworkX graph
|
414 |
+
The k-corona subgraph
|
415 |
+
|
416 |
+
Raises
|
417 |
+
------
|
418 |
+
NetworkXNotImplemented
|
419 |
+
The k-corona is not defined for multigraphs or graphs with self loops.
|
420 |
+
|
421 |
+
Notes
|
422 |
+
-----
|
423 |
+
For directed graphs the node degree is defined to be the
|
424 |
+
in-degree + out-degree.
|
425 |
+
|
426 |
+
Graph, node, and edge attributes are copied to the subgraph.
|
427 |
+
|
428 |
+
Examples
|
429 |
+
--------
|
430 |
+
>>> degrees = [0, 1, 2, 2, 2, 2, 3]
|
431 |
+
>>> H = nx.havel_hakimi_graph(degrees)
|
432 |
+
>>> H.degree
|
433 |
+
DegreeView({0: 1, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 0})
|
434 |
+
>>> nx.k_corona(H, k=2).nodes
|
435 |
+
NodeView((1, 2, 3, 5))
|
436 |
+
|
437 |
+
See Also
|
438 |
+
--------
|
439 |
+
core_number
|
440 |
+
|
441 |
+
References
|
442 |
+
----------
|
443 |
+
.. [1] k -core (bootstrap) percolation on complex networks:
|
444 |
+
Critical phenomena and nonlocal effects,
|
445 |
+
A. V. Goltsev, S. N. Dorogovtsev, and J. F. F. Mendes,
|
446 |
+
Phys. Rev. E 73, 056101 (2006)
|
447 |
+
http://link.aps.org/doi/10.1103/PhysRevE.73.056101
|
448 |
+
"""
|
449 |
+
|
450 |
+
import warnings
|
451 |
+
|
452 |
+
if G.is_multigraph():
|
453 |
+
warnings.warn(
|
454 |
+
(
|
455 |
+
"\n\n`k_corona` will not accept `MultiGraph` objects in version 3.5.\n"
|
456 |
+
"Convert it to an undirected graph instead, using::\n\n"
|
457 |
+
"\tG = nx.Graph(G)\n"
|
458 |
+
),
|
459 |
+
category=DeprecationWarning,
|
460 |
+
stacklevel=5,
|
461 |
+
)
|
462 |
+
|
463 |
+
def func(v, k, c):
|
464 |
+
return c[v] == k and k == sum(1 for w in G[v] if c[w] >= k)
|
465 |
+
|
466 |
+
return _core_subgraph(G, func, k, core_number)
|
467 |
+
|
468 |
+
|
469 |
+
@nx.utils.not_implemented_for("directed")
|
470 |
+
@nx.utils.not_implemented_for("multigraph")
|
471 |
+
@nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
|
472 |
+
def k_truss(G, k):
|
473 |
+
"""Returns the k-truss of `G`.
|
474 |
+
|
475 |
+
The k-truss is the maximal induced subgraph of `G` which contains at least
|
476 |
+
three vertices where every edge is incident to at least `k-2` triangles.
|
477 |
+
|
478 |
+
Parameters
|
479 |
+
----------
|
480 |
+
G : NetworkX graph
|
481 |
+
An undirected graph
|
482 |
+
k : int
|
483 |
+
The order of the truss
|
484 |
+
|
485 |
+
Returns
|
486 |
+
-------
|
487 |
+
H : NetworkX graph
|
488 |
+
The k-truss subgraph
|
489 |
+
|
490 |
+
Raises
|
491 |
+
------
|
492 |
+
NetworkXNotImplemented
|
493 |
+
If `G` is a multigraph or directed graph or if it contains self loops.
|
494 |
+
|
495 |
+
Notes
|
496 |
+
-----
|
497 |
+
A k-clique is a (k-2)-truss and a k-truss is a (k+1)-core.
|
498 |
+
|
499 |
+
Graph, node, and edge attributes are copied to the subgraph.
|
500 |
+
|
501 |
+
K-trusses were originally defined in [2] which states that the k-truss
|
502 |
+
is the maximal induced subgraph where each edge belongs to at least
|
503 |
+
`k-2` triangles. A more recent paper, [1], uses a slightly different
|
504 |
+
definition requiring that each edge belong to at least `k` triangles.
|
505 |
+
This implementation uses the original definition of `k-2` triangles.
|
506 |
+
|
507 |
+
Examples
|
508 |
+
--------
|
509 |
+
>>> degrees = [0, 1, 2, 2, 2, 2, 3]
|
510 |
+
>>> H = nx.havel_hakimi_graph(degrees)
|
511 |
+
>>> H.degree
|
512 |
+
DegreeView({0: 1, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 0})
|
513 |
+
>>> nx.k_truss(H, k=2).nodes
|
514 |
+
NodeView((0, 1, 2, 3, 4, 5))
|
515 |
+
|
516 |
+
References
|
517 |
+
----------
|
518 |
+
.. [1] Bounds and Algorithms for k-truss. Paul Burkhardt, Vance Faber,
|
519 |
+
David G. Harris, 2018. https://arxiv.org/abs/1806.05523v2
|
520 |
+
.. [2] Trusses: Cohesive Subgraphs for Social Network Analysis. Jonathan
|
521 |
+
Cohen, 2005.
|
522 |
+
"""
|
523 |
+
if nx.number_of_selfloops(G) > 0:
|
524 |
+
msg = (
|
525 |
+
"Input graph has self loops which is not permitted; "
|
526 |
+
"Consider using G.remove_edges_from(nx.selfloop_edges(G))."
|
527 |
+
)
|
528 |
+
raise nx.NetworkXNotImplemented(msg)
|
529 |
+
|
530 |
+
H = G.copy()
|
531 |
+
|
532 |
+
n_dropped = 1
|
533 |
+
while n_dropped > 0:
|
534 |
+
n_dropped = 0
|
535 |
+
to_drop = []
|
536 |
+
seen = set()
|
537 |
+
for u in H:
|
538 |
+
nbrs_u = set(H[u])
|
539 |
+
seen.add(u)
|
540 |
+
new_nbrs = [v for v in nbrs_u if v not in seen]
|
541 |
+
for v in new_nbrs:
|
542 |
+
if len(nbrs_u & set(H[v])) < (k - 2):
|
543 |
+
to_drop.append((u, v))
|
544 |
+
H.remove_edges_from(to_drop)
|
545 |
+
n_dropped = len(to_drop)
|
546 |
+
H.remove_nodes_from(list(nx.isolates(H)))
|
547 |
+
|
548 |
+
return H
|
549 |
+
|
550 |
+
|
551 |
+
@nx.utils.not_implemented_for("multigraph")
|
552 |
+
@nx.utils.not_implemented_for("directed")
|
553 |
+
@nx._dispatchable
|
554 |
+
def onion_layers(G):
|
555 |
+
"""Returns the layer of each vertex in an onion decomposition of the graph.
|
556 |
+
|
557 |
+
The onion decomposition refines the k-core decomposition by providing
|
558 |
+
information on the internal organization of each k-shell. It is usually
|
559 |
+
used alongside the `core numbers`.
|
560 |
+
|
561 |
+
Parameters
|
562 |
+
----------
|
563 |
+
G : NetworkX graph
|
564 |
+
An undirected graph without self loops.
|
565 |
+
|
566 |
+
Returns
|
567 |
+
-------
|
568 |
+
od_layers : dictionary
|
569 |
+
A dictionary keyed by node to the onion layer. The layers are
|
570 |
+
contiguous integers starting at 1.
|
571 |
+
|
572 |
+
Raises
|
573 |
+
------
|
574 |
+
NetworkXNotImplemented
|
575 |
+
If `G` is a multigraph or directed graph or if it contains self loops.
|
576 |
+
|
577 |
+
Examples
|
578 |
+
--------
|
579 |
+
>>> degrees = [0, 1, 2, 2, 2, 2, 3]
|
580 |
+
>>> H = nx.havel_hakimi_graph(degrees)
|
581 |
+
>>> H.degree
|
582 |
+
DegreeView({0: 1, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 0})
|
583 |
+
>>> nx.onion_layers(H)
|
584 |
+
{6: 1, 0: 2, 4: 3, 1: 4, 2: 4, 3: 4, 5: 4}
|
585 |
+
|
586 |
+
See Also
|
587 |
+
--------
|
588 |
+
core_number
|
589 |
+
|
590 |
+
References
|
591 |
+
----------
|
592 |
+
.. [1] Multi-scale structure and topological anomaly detection via a new
|
593 |
+
network statistic: The onion decomposition
|
594 |
+
L. Hébert-Dufresne, J. A. Grochow, and A. Allard
|
595 |
+
Scientific Reports 6, 31708 (2016)
|
596 |
+
http://doi.org/10.1038/srep31708
|
597 |
+
.. [2] Percolation and the effective structure of complex networks
|
598 |
+
A. Allard and L. Hébert-Dufresne
|
599 |
+
Physical Review X 9, 011023 (2019)
|
600 |
+
http://doi.org/10.1103/PhysRevX.9.011023
|
601 |
+
"""
|
602 |
+
if nx.number_of_selfloops(G) > 0:
|
603 |
+
msg = (
|
604 |
+
"Input graph contains self loops which is not permitted; "
|
605 |
+
"Consider using G.remove_edges_from(nx.selfloop_edges(G))."
|
606 |
+
)
|
607 |
+
raise nx.NetworkXNotImplemented(msg)
|
608 |
+
# Dictionaries to register the k-core/onion decompositions.
|
609 |
+
od_layers = {}
|
610 |
+
# Adjacency list
|
611 |
+
neighbors = {v: list(nx.all_neighbors(G, v)) for v in G}
|
612 |
+
# Effective degree of nodes.
|
613 |
+
degrees = dict(G.degree())
|
614 |
+
# Performs the onion decomposition.
|
615 |
+
current_core = 1
|
616 |
+
current_layer = 1
|
617 |
+
# Sets vertices of degree 0 to layer 1, if any.
|
618 |
+
isolated_nodes = list(nx.isolates(G))
|
619 |
+
if len(isolated_nodes) > 0:
|
620 |
+
for v in isolated_nodes:
|
621 |
+
od_layers[v] = current_layer
|
622 |
+
degrees.pop(v)
|
623 |
+
current_layer = 2
|
624 |
+
# Finds the layer for the remaining nodes.
|
625 |
+
while len(degrees) > 0:
|
626 |
+
# Sets the order for looking at nodes.
|
627 |
+
nodes = sorted(degrees, key=degrees.get)
|
628 |
+
# Sets properly the current core.
|
629 |
+
min_degree = degrees[nodes[0]]
|
630 |
+
if min_degree > current_core:
|
631 |
+
current_core = min_degree
|
632 |
+
# Identifies vertices in the current layer.
|
633 |
+
this_layer = []
|
634 |
+
for n in nodes:
|
635 |
+
if degrees[n] > current_core:
|
636 |
+
break
|
637 |
+
this_layer.append(n)
|
638 |
+
# Identifies the core/layer of the vertices in the current layer.
|
639 |
+
for v in this_layer:
|
640 |
+
od_layers[v] = current_layer
|
641 |
+
for n in neighbors[v]:
|
642 |
+
neighbors[n].remove(v)
|
643 |
+
degrees[n] = degrees[n] - 1
|
644 |
+
degrees.pop(v)
|
645 |
+
# Updates the layer count.
|
646 |
+
current_layer = current_layer + 1
|
647 |
+
# Returns the dictionaries containing the onion layer of each vertices.
|
648 |
+
return od_layers
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/covering.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Functions related to graph covers."""
|
2 |
+
|
3 |
+
from functools import partial
|
4 |
+
from itertools import chain
|
5 |
+
|
6 |
+
import networkx as nx
|
7 |
+
from networkx.utils import arbitrary_element, not_implemented_for
|
8 |
+
|
9 |
+
__all__ = ["min_edge_cover", "is_edge_cover"]
|
10 |
+
|
11 |
+
|
12 |
+
@not_implemented_for("directed")
|
13 |
+
@not_implemented_for("multigraph")
|
14 |
+
@nx._dispatchable
|
15 |
+
def min_edge_cover(G, matching_algorithm=None):
|
16 |
+
"""Returns the min cardinality edge cover of the graph as a set of edges.
|
17 |
+
|
18 |
+
A smallest edge cover can be found in polynomial time by finding
|
19 |
+
a maximum matching and extending it greedily so that all nodes
|
20 |
+
are covered. This function follows that process. A maximum matching
|
21 |
+
algorithm can be specified for the first step of the algorithm.
|
22 |
+
The resulting set may return a set with one 2-tuple for each edge,
|
23 |
+
(the usual case) or with both 2-tuples `(u, v)` and `(v, u)` for
|
24 |
+
each edge. The latter is only done when a bipartite matching algorithm
|
25 |
+
is specified as `matching_algorithm`.
|
26 |
+
|
27 |
+
Parameters
|
28 |
+
----------
|
29 |
+
G : NetworkX graph
|
30 |
+
An undirected graph.
|
31 |
+
|
32 |
+
matching_algorithm : function
|
33 |
+
A function that returns a maximum cardinality matching for `G`.
|
34 |
+
The function must take one input, the graph `G`, and return
|
35 |
+
either a set of edges (with only one direction for the pair of nodes)
|
36 |
+
or a dictionary mapping each node to its mate. If not specified,
|
37 |
+
:func:`~networkx.algorithms.matching.max_weight_matching` is used.
|
38 |
+
Common bipartite matching functions include
|
39 |
+
:func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching`
|
40 |
+
or
|
41 |
+
:func:`~networkx.algorithms.bipartite.matching.eppstein_matching`.
|
42 |
+
|
43 |
+
Returns
|
44 |
+
-------
|
45 |
+
min_cover : set
|
46 |
+
|
47 |
+
A set of the edges in a minimum edge cover in the form of tuples.
|
48 |
+
It contains only one of the equivalent 2-tuples `(u, v)` and `(v, u)`
|
49 |
+
for each edge. If a bipartite method is used to compute the matching,
|
50 |
+
the returned set contains both the 2-tuples `(u, v)` and `(v, u)`
|
51 |
+
for each edge of a minimum edge cover.
|
52 |
+
|
53 |
+
Examples
|
54 |
+
--------
|
55 |
+
>>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
|
56 |
+
>>> sorted(nx.min_edge_cover(G))
|
57 |
+
[(2, 1), (3, 0)]
|
58 |
+
|
59 |
+
Notes
|
60 |
+
-----
|
61 |
+
An edge cover of a graph is a set of edges such that every node of
|
62 |
+
the graph is incident to at least one edge of the set.
|
63 |
+
The minimum edge cover is an edge covering of smallest cardinality.
|
64 |
+
|
65 |
+
Due to its implementation, the worst-case running time of this algorithm
|
66 |
+
is bounded by the worst-case running time of the function
|
67 |
+
``matching_algorithm``.
|
68 |
+
|
69 |
+
Minimum edge cover for `G` can also be found using the `min_edge_covering`
|
70 |
+
function in :mod:`networkx.algorithms.bipartite.covering` which is
|
71 |
+
simply this function with a default matching algorithm of
|
72 |
+
:func:`~networkx.algorithms.bipartite.matching.hopcraft_karp_matching`
|
73 |
+
"""
|
74 |
+
if len(G) == 0:
|
75 |
+
return set()
|
76 |
+
if nx.number_of_isolates(G) > 0:
|
77 |
+
# ``min_cover`` does not exist as there is an isolated node
|
78 |
+
raise nx.NetworkXException(
|
79 |
+
"Graph has a node with no edge incident on it, so no edge cover exists."
|
80 |
+
)
|
81 |
+
if matching_algorithm is None:
|
82 |
+
matching_algorithm = partial(nx.max_weight_matching, maxcardinality=True)
|
83 |
+
maximum_matching = matching_algorithm(G)
|
84 |
+
# ``min_cover`` is superset of ``maximum_matching``
|
85 |
+
try:
|
86 |
+
# bipartite matching algs return dict so convert if needed
|
87 |
+
min_cover = set(maximum_matching.items())
|
88 |
+
bipartite_cover = True
|
89 |
+
except AttributeError:
|
90 |
+
min_cover = maximum_matching
|
91 |
+
bipartite_cover = False
|
92 |
+
# iterate for uncovered nodes
|
93 |
+
uncovered_nodes = set(G) - {v for u, v in min_cover} - {u for u, v in min_cover}
|
94 |
+
for v in uncovered_nodes:
|
95 |
+
# Since `v` is uncovered, each edge incident to `v` will join it
|
96 |
+
# with a covered node (otherwise, if there were an edge joining
|
97 |
+
# uncovered nodes `u` and `v`, the maximum matching algorithm
|
98 |
+
# would have found it), so we can choose an arbitrary edge
|
99 |
+
# incident to `v`. (This applies only in a simple graph, not a
|
100 |
+
# multigraph.)
|
101 |
+
u = arbitrary_element(G[v])
|
102 |
+
min_cover.add((u, v))
|
103 |
+
if bipartite_cover:
|
104 |
+
min_cover.add((v, u))
|
105 |
+
return min_cover
|
106 |
+
|
107 |
+
|
108 |
+
@not_implemented_for("directed")
|
109 |
+
@nx._dispatchable
|
110 |
+
def is_edge_cover(G, cover):
|
111 |
+
"""Decides whether a set of edges is a valid edge cover of the graph.
|
112 |
+
|
113 |
+
Given a set of edges, whether it is an edge covering can
|
114 |
+
be decided if we just check whether all nodes of the graph
|
115 |
+
has an edge from the set, incident on it.
|
116 |
+
|
117 |
+
Parameters
|
118 |
+
----------
|
119 |
+
G : NetworkX graph
|
120 |
+
An undirected bipartite graph.
|
121 |
+
|
122 |
+
cover : set
|
123 |
+
Set of edges to be checked.
|
124 |
+
|
125 |
+
Returns
|
126 |
+
-------
|
127 |
+
bool
|
128 |
+
Whether the set of edges is a valid edge cover of the graph.
|
129 |
+
|
130 |
+
Examples
|
131 |
+
--------
|
132 |
+
>>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
|
133 |
+
>>> cover = {(2, 1), (3, 0)}
|
134 |
+
>>> nx.is_edge_cover(G, cover)
|
135 |
+
True
|
136 |
+
|
137 |
+
Notes
|
138 |
+
-----
|
139 |
+
An edge cover of a graph is a set of edges such that every node of
|
140 |
+
the graph is incident to at least one edge of the set.
|
141 |
+
"""
|
142 |
+
return set(G) <= set(chain.from_iterable(cover))
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/cycles.py
ADDED
@@ -0,0 +1,1231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
========================
|
3 |
+
Cycle finding algorithms
|
4 |
+
========================
|
5 |
+
"""
|
6 |
+
|
7 |
+
from collections import Counter, defaultdict
|
8 |
+
from itertools import combinations, product
|
9 |
+
from math import inf
|
10 |
+
|
11 |
+
import networkx as nx
|
12 |
+
from networkx.utils import not_implemented_for, pairwise
|
13 |
+
|
14 |
+
__all__ = [
|
15 |
+
"cycle_basis",
|
16 |
+
"simple_cycles",
|
17 |
+
"recursive_simple_cycles",
|
18 |
+
"find_cycle",
|
19 |
+
"minimum_cycle_basis",
|
20 |
+
"chordless_cycles",
|
21 |
+
"girth",
|
22 |
+
]
|
23 |
+
|
24 |
+
|
25 |
+
@not_implemented_for("directed")
|
26 |
+
@not_implemented_for("multigraph")
|
27 |
+
@nx._dispatchable
|
28 |
+
def cycle_basis(G, root=None):
|
29 |
+
"""Returns a list of cycles which form a basis for cycles of G.
|
30 |
+
|
31 |
+
A basis for cycles of a network is a minimal collection of
|
32 |
+
cycles such that any cycle in the network can be written
|
33 |
+
as a sum of cycles in the basis. Here summation of cycles
|
34 |
+
is defined as "exclusive or" of the edges. Cycle bases are
|
35 |
+
useful, e.g. when deriving equations for electric circuits
|
36 |
+
using Kirchhoff's Laws.
|
37 |
+
|
38 |
+
Parameters
|
39 |
+
----------
|
40 |
+
G : NetworkX Graph
|
41 |
+
root : node, optional
|
42 |
+
Specify starting node for basis.
|
43 |
+
|
44 |
+
Returns
|
45 |
+
-------
|
46 |
+
A list of cycle lists. Each cycle list is a list of nodes
|
47 |
+
which forms a cycle (loop) in G.
|
48 |
+
|
49 |
+
Examples
|
50 |
+
--------
|
51 |
+
>>> G = nx.Graph()
|
52 |
+
>>> nx.add_cycle(G, [0, 1, 2, 3])
|
53 |
+
>>> nx.add_cycle(G, [0, 3, 4, 5])
|
54 |
+
>>> nx.cycle_basis(G, 0)
|
55 |
+
[[3, 4, 5, 0], [1, 2, 3, 0]]
|
56 |
+
|
57 |
+
Notes
|
58 |
+
-----
|
59 |
+
This is adapted from algorithm CACM 491 [1]_.
|
60 |
+
|
61 |
+
References
|
62 |
+
----------
|
63 |
+
.. [1] Paton, K. An algorithm for finding a fundamental set of
|
64 |
+
cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518.
|
65 |
+
|
66 |
+
See Also
|
67 |
+
--------
|
68 |
+
simple_cycles
|
69 |
+
minimum_cycle_basis
|
70 |
+
"""
|
71 |
+
gnodes = dict.fromkeys(G) # set-like object that maintains node order
|
72 |
+
cycles = []
|
73 |
+
while gnodes: # loop over connected components
|
74 |
+
if root is None:
|
75 |
+
root = gnodes.popitem()[0]
|
76 |
+
stack = [root]
|
77 |
+
pred = {root: root}
|
78 |
+
used = {root: set()}
|
79 |
+
while stack: # walk the spanning tree finding cycles
|
80 |
+
z = stack.pop() # use last-in so cycles easier to find
|
81 |
+
zused = used[z]
|
82 |
+
for nbr in G[z]:
|
83 |
+
if nbr not in used: # new node
|
84 |
+
pred[nbr] = z
|
85 |
+
stack.append(nbr)
|
86 |
+
used[nbr] = {z}
|
87 |
+
elif nbr == z: # self loops
|
88 |
+
cycles.append([z])
|
89 |
+
elif nbr not in zused: # found a cycle
|
90 |
+
pn = used[nbr]
|
91 |
+
cycle = [nbr, z]
|
92 |
+
p = pred[z]
|
93 |
+
while p not in pn:
|
94 |
+
cycle.append(p)
|
95 |
+
p = pred[p]
|
96 |
+
cycle.append(p)
|
97 |
+
cycles.append(cycle)
|
98 |
+
used[nbr].add(z)
|
99 |
+
for node in pred:
|
100 |
+
gnodes.pop(node, None)
|
101 |
+
root = None
|
102 |
+
return cycles
|
103 |
+
|
104 |
+
|
105 |
+
@nx._dispatchable
|
106 |
+
def simple_cycles(G, length_bound=None):
|
107 |
+
"""Find simple cycles (elementary circuits) of a graph.
|
108 |
+
|
109 |
+
A `simple cycle`, or `elementary circuit`, is a closed path where
|
110 |
+
no node appears twice. In a directed graph, two simple cycles are distinct
|
111 |
+
if they are not cyclic permutations of each other. In an undirected graph,
|
112 |
+
two simple cycles are distinct if they are not cyclic permutations of each
|
113 |
+
other nor of the other's reversal.
|
114 |
+
|
115 |
+
Optionally, the cycles are bounded in length. In the unbounded case, we use
|
116 |
+
a nonrecursive, iterator/generator version of Johnson's algorithm [1]_. In
|
117 |
+
the bounded case, we use a version of the algorithm of Gupta and
|
118 |
+
Suzumura[2]_. There may be better algorithms for some cases [3]_ [4]_ [5]_.
|
119 |
+
|
120 |
+
The algorithms of Johnson, and Gupta and Suzumura, are enhanced by some
|
121 |
+
well-known preprocessing techniques. When G is directed, we restrict our
|
122 |
+
attention to strongly connected components of G, generate all simple cycles
|
123 |
+
containing a certain node, remove that node, and further decompose the
|
124 |
+
remainder into strongly connected components. When G is undirected, we
|
125 |
+
restrict our attention to biconnected components, generate all simple cycles
|
126 |
+
containing a particular edge, remove that edge, and further decompose the
|
127 |
+
remainder into biconnected components.
|
128 |
+
|
129 |
+
Note that multigraphs are supported by this function -- and in undirected
|
130 |
+
multigraphs, a pair of parallel edges is considered a cycle of length 2.
|
131 |
+
Likewise, self-loops are considered to be cycles of length 1. We define
|
132 |
+
cycles as sequences of nodes; so the presence of loops and parallel edges
|
133 |
+
does not change the number of simple cycles in a graph.
|
134 |
+
|
135 |
+
Parameters
|
136 |
+
----------
|
137 |
+
G : NetworkX DiGraph
|
138 |
+
A directed graph
|
139 |
+
|
140 |
+
length_bound : int or None, optional (default=None)
|
141 |
+
If length_bound is an int, generate all simple cycles of G with length at
|
142 |
+
most length_bound. Otherwise, generate all simple cycles of G.
|
143 |
+
|
144 |
+
Yields
|
145 |
+
------
|
146 |
+
list of nodes
|
147 |
+
Each cycle is represented by a list of nodes along the cycle.
|
148 |
+
|
149 |
+
Examples
|
150 |
+
--------
|
151 |
+
>>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]
|
152 |
+
>>> G = nx.DiGraph(edges)
|
153 |
+
>>> sorted(nx.simple_cycles(G))
|
154 |
+
[[0], [0, 1, 2], [0, 2], [1, 2], [2]]
|
155 |
+
|
156 |
+
To filter the cycles so that they don't include certain nodes or edges,
|
157 |
+
copy your graph and eliminate those nodes or edges before calling.
|
158 |
+
For example, to exclude self-loops from the above example:
|
159 |
+
|
160 |
+
>>> H = G.copy()
|
161 |
+
>>> H.remove_edges_from(nx.selfloop_edges(G))
|
162 |
+
>>> sorted(nx.simple_cycles(H))
|
163 |
+
[[0, 1, 2], [0, 2], [1, 2]]
|
164 |
+
|
165 |
+
Notes
|
166 |
+
-----
|
167 |
+
When length_bound is None, the time complexity is $O((n+e)(c+1))$ for $n$
|
168 |
+
nodes, $e$ edges and $c$ simple circuits. Otherwise, when length_bound > 1,
|
169 |
+
the time complexity is $O((c+n)(k-1)d^k)$ where $d$ is the average degree of
|
170 |
+
the nodes of G and $k$ = length_bound.
|
171 |
+
|
172 |
+
Raises
|
173 |
+
------
|
174 |
+
ValueError
|
175 |
+
when length_bound < 0.
|
176 |
+
|
177 |
+
References
|
178 |
+
----------
|
179 |
+
.. [1] Finding all the elementary circuits of a directed graph.
|
180 |
+
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
|
181 |
+
https://doi.org/10.1137/0204007
|
182 |
+
.. [2] Finding All Bounded-Length Simple Cycles in a Directed Graph
|
183 |
+
A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094
|
184 |
+
.. [3] Enumerating the cycles of a digraph: a new preprocessing strategy.
|
185 |
+
G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982.
|
186 |
+
.. [4] A search strategy for the elementary cycles of a directed graph.
|
187 |
+
J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS,
|
188 |
+
v. 16, no. 2, 192-204, 1976.
|
189 |
+
.. [5] Optimal Listing of Cycles and st-Paths in Undirected Graphs
|
190 |
+
R. Ferreira and R. Grossi and A. Marino and N. Pisanti and R. Rizzi and
|
191 |
+
G. Sacomoto https://arxiv.org/abs/1205.2766
|
192 |
+
|
193 |
+
See Also
|
194 |
+
--------
|
195 |
+
cycle_basis
|
196 |
+
chordless_cycles
|
197 |
+
"""
|
198 |
+
|
199 |
+
if length_bound is not None:
|
200 |
+
if length_bound == 0:
|
201 |
+
return
|
202 |
+
elif length_bound < 0:
|
203 |
+
raise ValueError("length bound must be non-negative")
|
204 |
+
|
205 |
+
directed = G.is_directed()
|
206 |
+
yield from ([v] for v, Gv in G.adj.items() if v in Gv)
|
207 |
+
|
208 |
+
if length_bound is not None and length_bound == 1:
|
209 |
+
return
|
210 |
+
|
211 |
+
if G.is_multigraph() and not directed:
|
212 |
+
visited = set()
|
213 |
+
for u, Gu in G.adj.items():
|
214 |
+
multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited)
|
215 |
+
yield from ([u, v] for v, m in multiplicity if m > 1)
|
216 |
+
visited.add(u)
|
217 |
+
|
218 |
+
# explicitly filter out loops; implicitly filter out parallel edges
|
219 |
+
if directed:
|
220 |
+
G = nx.DiGraph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u)
|
221 |
+
else:
|
222 |
+
G = nx.Graph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u)
|
223 |
+
|
224 |
+
# this case is not strictly necessary but improves performance
|
225 |
+
if length_bound is not None and length_bound == 2:
|
226 |
+
if directed:
|
227 |
+
visited = set()
|
228 |
+
for u, Gu in G.adj.items():
|
229 |
+
yield from (
|
230 |
+
[v, u] for v in visited.intersection(Gu) if G.has_edge(v, u)
|
231 |
+
)
|
232 |
+
visited.add(u)
|
233 |
+
return
|
234 |
+
|
235 |
+
if directed:
|
236 |
+
yield from _directed_cycle_search(G, length_bound)
|
237 |
+
else:
|
238 |
+
yield from _undirected_cycle_search(G, length_bound)
|
239 |
+
|
240 |
+
|
241 |
+
def _directed_cycle_search(G, length_bound):
|
242 |
+
"""A dispatch function for `simple_cycles` for directed graphs.
|
243 |
+
|
244 |
+
We generate all cycles of G through binary partition.
|
245 |
+
|
246 |
+
1. Pick a node v in G which belongs to at least one cycle
|
247 |
+
a. Generate all cycles of G which contain the node v.
|
248 |
+
b. Recursively generate all cycles of G \\ v.
|
249 |
+
|
250 |
+
This is accomplished through the following:
|
251 |
+
|
252 |
+
1. Compute the strongly connected components SCC of G.
|
253 |
+
2. Select and remove a biconnected component C from BCC. Select a
|
254 |
+
non-tree edge (u, v) of a depth-first search of G[C].
|
255 |
+
3. For each simple cycle P containing v in G[C], yield P.
|
256 |
+
4. Add the biconnected components of G[C \\ v] to BCC.
|
257 |
+
|
258 |
+
If the parameter length_bound is not None, then step 3 will be limited to
|
259 |
+
simple cycles of length at most length_bound.
|
260 |
+
|
261 |
+
Parameters
|
262 |
+
----------
|
263 |
+
G : NetworkX DiGraph
|
264 |
+
A directed graph
|
265 |
+
|
266 |
+
length_bound : int or None
|
267 |
+
If length_bound is an int, generate all simple cycles of G with length at most length_bound.
|
268 |
+
Otherwise, generate all simple cycles of G.
|
269 |
+
|
270 |
+
Yields
|
271 |
+
------
|
272 |
+
list of nodes
|
273 |
+
Each cycle is represented by a list of nodes along the cycle.
|
274 |
+
"""
|
275 |
+
|
276 |
+
scc = nx.strongly_connected_components
|
277 |
+
components = [c for c in scc(G) if len(c) >= 2]
|
278 |
+
while components:
|
279 |
+
c = components.pop()
|
280 |
+
Gc = G.subgraph(c)
|
281 |
+
v = next(iter(c))
|
282 |
+
if length_bound is None:
|
283 |
+
yield from _johnson_cycle_search(Gc, [v])
|
284 |
+
else:
|
285 |
+
yield from _bounded_cycle_search(Gc, [v], length_bound)
|
286 |
+
# delete v after searching G, to make sure we can find v
|
287 |
+
G.remove_node(v)
|
288 |
+
components.extend(c for c in scc(Gc) if len(c) >= 2)
|
289 |
+
|
290 |
+
|
291 |
+
def _undirected_cycle_search(G, length_bound):
|
292 |
+
"""A dispatch function for `simple_cycles` for undirected graphs.
|
293 |
+
|
294 |
+
We generate all cycles of G through binary partition.
|
295 |
+
|
296 |
+
1. Pick an edge (u, v) in G which belongs to at least one cycle
|
297 |
+
a. Generate all cycles of G which contain the edge (u, v)
|
298 |
+
b. Recursively generate all cycles of G \\ (u, v)
|
299 |
+
|
300 |
+
This is accomplished through the following:
|
301 |
+
|
302 |
+
1. Compute the biconnected components BCC of G.
|
303 |
+
2. Select and remove a biconnected component C from BCC. Select a
|
304 |
+
non-tree edge (u, v) of a depth-first search of G[C].
|
305 |
+
3. For each (v -> u) path P remaining in G[C] \\ (u, v), yield P.
|
306 |
+
4. Add the biconnected components of G[C] \\ (u, v) to BCC.
|
307 |
+
|
308 |
+
If the parameter length_bound is not None, then step 3 will be limited to simple paths
|
309 |
+
of length at most length_bound.
|
310 |
+
|
311 |
+
Parameters
|
312 |
+
----------
|
313 |
+
G : NetworkX Graph
|
314 |
+
An undirected graph
|
315 |
+
|
316 |
+
length_bound : int or None
|
317 |
+
If length_bound is an int, generate all simple cycles of G with length at most length_bound.
|
318 |
+
Otherwise, generate all simple cycles of G.
|
319 |
+
|
320 |
+
Yields
|
321 |
+
------
|
322 |
+
list of nodes
|
323 |
+
Each cycle is represented by a list of nodes along the cycle.
|
324 |
+
"""
|
325 |
+
|
326 |
+
bcc = nx.biconnected_components
|
327 |
+
components = [c for c in bcc(G) if len(c) >= 3]
|
328 |
+
while components:
|
329 |
+
c = components.pop()
|
330 |
+
Gc = G.subgraph(c)
|
331 |
+
uv = list(next(iter(Gc.edges)))
|
332 |
+
G.remove_edge(*uv)
|
333 |
+
# delete (u, v) before searching G, to avoid fake 3-cycles [u, v, u]
|
334 |
+
if length_bound is None:
|
335 |
+
yield from _johnson_cycle_search(Gc, uv)
|
336 |
+
else:
|
337 |
+
yield from _bounded_cycle_search(Gc, uv, length_bound)
|
338 |
+
components.extend(c for c in bcc(Gc) if len(c) >= 3)
|
339 |
+
|
340 |
+
|
341 |
+
class _NeighborhoodCache(dict):
|
342 |
+
"""Very lightweight graph wrapper which caches neighborhoods as list.
|
343 |
+
|
344 |
+
This dict subclass uses the __missing__ functionality to query graphs for
|
345 |
+
their neighborhoods, and store the result as a list. This is used to avoid
|
346 |
+
the performance penalty incurred by subgraph views.
|
347 |
+
"""
|
348 |
+
|
349 |
+
def __init__(self, G):
|
350 |
+
self.G = G
|
351 |
+
|
352 |
+
def __missing__(self, v):
|
353 |
+
Gv = self[v] = list(self.G[v])
|
354 |
+
return Gv
|
355 |
+
|
356 |
+
|
357 |
+
def _johnson_cycle_search(G, path):
|
358 |
+
"""The main loop of the cycle-enumeration algorithm of Johnson.
|
359 |
+
|
360 |
+
Parameters
|
361 |
+
----------
|
362 |
+
G : NetworkX Graph or DiGraph
|
363 |
+
A graph
|
364 |
+
|
365 |
+
path : list
|
366 |
+
A cycle prefix. All cycles generated will begin with this prefix.
|
367 |
+
|
368 |
+
Yields
|
369 |
+
------
|
370 |
+
list of nodes
|
371 |
+
Each cycle is represented by a list of nodes along the cycle.
|
372 |
+
|
373 |
+
References
|
374 |
+
----------
|
375 |
+
.. [1] Finding all the elementary circuits of a directed graph.
|
376 |
+
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
|
377 |
+
https://doi.org/10.1137/0204007
|
378 |
+
|
379 |
+
"""
|
380 |
+
|
381 |
+
G = _NeighborhoodCache(G)
|
382 |
+
blocked = set(path)
|
383 |
+
B = defaultdict(set) # graph portions that yield no elementary circuit
|
384 |
+
start = path[0]
|
385 |
+
stack = [iter(G[path[-1]])]
|
386 |
+
closed = [False]
|
387 |
+
while stack:
|
388 |
+
nbrs = stack[-1]
|
389 |
+
for w in nbrs:
|
390 |
+
if w == start:
|
391 |
+
yield path[:]
|
392 |
+
closed[-1] = True
|
393 |
+
elif w not in blocked:
|
394 |
+
path.append(w)
|
395 |
+
closed.append(False)
|
396 |
+
stack.append(iter(G[w]))
|
397 |
+
blocked.add(w)
|
398 |
+
break
|
399 |
+
else: # no more nbrs
|
400 |
+
stack.pop()
|
401 |
+
v = path.pop()
|
402 |
+
if closed.pop():
|
403 |
+
if closed:
|
404 |
+
closed[-1] = True
|
405 |
+
unblock_stack = {v}
|
406 |
+
while unblock_stack:
|
407 |
+
u = unblock_stack.pop()
|
408 |
+
if u in blocked:
|
409 |
+
blocked.remove(u)
|
410 |
+
unblock_stack.update(B[u])
|
411 |
+
B[u].clear()
|
412 |
+
else:
|
413 |
+
for w in G[v]:
|
414 |
+
B[w].add(v)
|
415 |
+
|
416 |
+
|
417 |
+
def _bounded_cycle_search(G, path, length_bound):
|
418 |
+
"""The main loop of the cycle-enumeration algorithm of Gupta and Suzumura.
|
419 |
+
|
420 |
+
Parameters
|
421 |
+
----------
|
422 |
+
G : NetworkX Graph or DiGraph
|
423 |
+
A graph
|
424 |
+
|
425 |
+
path : list
|
426 |
+
A cycle prefix. All cycles generated will begin with this prefix.
|
427 |
+
|
428 |
+
length_bound: int
|
429 |
+
A length bound. All cycles generated will have length at most length_bound.
|
430 |
+
|
431 |
+
Yields
|
432 |
+
------
|
433 |
+
list of nodes
|
434 |
+
Each cycle is represented by a list of nodes along the cycle.
|
435 |
+
|
436 |
+
References
|
437 |
+
----------
|
438 |
+
.. [1] Finding All Bounded-Length Simple Cycles in a Directed Graph
|
439 |
+
A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094
|
440 |
+
|
441 |
+
"""
|
442 |
+
G = _NeighborhoodCache(G)
|
443 |
+
lock = {v: 0 for v in path}
|
444 |
+
B = defaultdict(set)
|
445 |
+
start = path[0]
|
446 |
+
stack = [iter(G[path[-1]])]
|
447 |
+
blen = [length_bound]
|
448 |
+
while stack:
|
449 |
+
nbrs = stack[-1]
|
450 |
+
for w in nbrs:
|
451 |
+
if w == start:
|
452 |
+
yield path[:]
|
453 |
+
blen[-1] = 1
|
454 |
+
elif len(path) < lock.get(w, length_bound):
|
455 |
+
path.append(w)
|
456 |
+
blen.append(length_bound)
|
457 |
+
lock[w] = len(path)
|
458 |
+
stack.append(iter(G[w]))
|
459 |
+
break
|
460 |
+
else:
|
461 |
+
stack.pop()
|
462 |
+
v = path.pop()
|
463 |
+
bl = blen.pop()
|
464 |
+
if blen:
|
465 |
+
blen[-1] = min(blen[-1], bl)
|
466 |
+
if bl < length_bound:
|
467 |
+
relax_stack = [(bl, v)]
|
468 |
+
while relax_stack:
|
469 |
+
bl, u = relax_stack.pop()
|
470 |
+
if lock.get(u, length_bound) < length_bound - bl + 1:
|
471 |
+
lock[u] = length_bound - bl + 1
|
472 |
+
relax_stack.extend((bl + 1, w) for w in B[u].difference(path))
|
473 |
+
else:
|
474 |
+
for w in G[v]:
|
475 |
+
B[w].add(v)
|
476 |
+
|
477 |
+
|
478 |
+
@nx._dispatchable
|
479 |
+
def chordless_cycles(G, length_bound=None):
|
480 |
+
"""Find simple chordless cycles of a graph.
|
481 |
+
|
482 |
+
A `simple cycle` is a closed path where no node appears twice. In a simple
|
483 |
+
cycle, a `chord` is an additional edge between two nodes in the cycle. A
|
484 |
+
`chordless cycle` is a simple cycle without chords. Said differently, a
|
485 |
+
chordless cycle is a cycle C in a graph G where the number of edges in the
|
486 |
+
induced graph G[C] is equal to the length of `C`.
|
487 |
+
|
488 |
+
Note that some care must be taken in the case that G is not a simple graph
|
489 |
+
nor a simple digraph. Some authors limit the definition of chordless cycles
|
490 |
+
to have a prescribed minimum length; we do not.
|
491 |
+
|
492 |
+
1. We interpret self-loops to be chordless cycles, except in multigraphs
|
493 |
+
with multiple loops in parallel. Likewise, in a chordless cycle of
|
494 |
+
length greater than 1, there can be no nodes with self-loops.
|
495 |
+
|
496 |
+
2. We interpret directed two-cycles to be chordless cycles, except in
|
497 |
+
multi-digraphs when any edge in a two-cycle has a parallel copy.
|
498 |
+
|
499 |
+
3. We interpret parallel pairs of undirected edges as two-cycles, except
|
500 |
+
when a third (or more) parallel edge exists between the two nodes.
|
501 |
+
|
502 |
+
4. Generalizing the above, edges with parallel clones may not occur in
|
503 |
+
chordless cycles.
|
504 |
+
|
505 |
+
In a directed graph, two chordless cycles are distinct if they are not
|
506 |
+
cyclic permutations of each other. In an undirected graph, two chordless
|
507 |
+
cycles are distinct if they are not cyclic permutations of each other nor of
|
508 |
+
the other's reversal.
|
509 |
+
|
510 |
+
Optionally, the cycles are bounded in length.
|
511 |
+
|
512 |
+
We use an algorithm strongly inspired by that of Dias et al [1]_. It has
|
513 |
+
been modified in the following ways:
|
514 |
+
|
515 |
+
1. Recursion is avoided, per Python's limitations
|
516 |
+
|
517 |
+
2. The labeling function is not necessary, because the starting paths
|
518 |
+
are chosen (and deleted from the host graph) to prevent multiple
|
519 |
+
occurrences of the same path
|
520 |
+
|
521 |
+
3. The search is optionally bounded at a specified length
|
522 |
+
|
523 |
+
4. Support for directed graphs is provided by extending cycles along
|
524 |
+
forward edges, and blocking nodes along forward and reverse edges
|
525 |
+
|
526 |
+
5. Support for multigraphs is provided by omitting digons from the set
|
527 |
+
of forward edges
|
528 |
+
|
529 |
+
Parameters
|
530 |
+
----------
|
531 |
+
G : NetworkX DiGraph
|
532 |
+
A directed graph
|
533 |
+
|
534 |
+
length_bound : int or None, optional (default=None)
|
535 |
+
If length_bound is an int, generate all simple cycles of G with length at
|
536 |
+
most length_bound. Otherwise, generate all simple cycles of G.
|
537 |
+
|
538 |
+
Yields
|
539 |
+
------
|
540 |
+
list of nodes
|
541 |
+
Each cycle is represented by a list of nodes along the cycle.
|
542 |
+
|
543 |
+
Examples
|
544 |
+
--------
|
545 |
+
>>> sorted(list(nx.chordless_cycles(nx.complete_graph(4))))
|
546 |
+
[[1, 0, 2], [1, 0, 3], [2, 0, 3], [2, 1, 3]]
|
547 |
+
|
548 |
+
Notes
|
549 |
+
-----
|
550 |
+
When length_bound is None, and the graph is simple, the time complexity is
|
551 |
+
$O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ chordless cycles.
|
552 |
+
|
553 |
+
Raises
|
554 |
+
------
|
555 |
+
ValueError
|
556 |
+
when length_bound < 0.
|
557 |
+
|
558 |
+
References
|
559 |
+
----------
|
560 |
+
.. [1] Efficient enumeration of chordless cycles
|
561 |
+
E. Dias and D. Castonguay and H. Longo and W.A.R. Jradi
|
562 |
+
https://arxiv.org/abs/1309.1051
|
563 |
+
|
564 |
+
See Also
|
565 |
+
--------
|
566 |
+
simple_cycles
|
567 |
+
"""
|
568 |
+
|
569 |
+
if length_bound is not None:
|
570 |
+
if length_bound == 0:
|
571 |
+
return
|
572 |
+
elif length_bound < 0:
|
573 |
+
raise ValueError("length bound must be non-negative")
|
574 |
+
|
575 |
+
directed = G.is_directed()
|
576 |
+
multigraph = G.is_multigraph()
|
577 |
+
|
578 |
+
if multigraph:
|
579 |
+
yield from ([v] for v, Gv in G.adj.items() if len(Gv.get(v, ())) == 1)
|
580 |
+
else:
|
581 |
+
yield from ([v] for v, Gv in G.adj.items() if v in Gv)
|
582 |
+
|
583 |
+
if length_bound is not None and length_bound == 1:
|
584 |
+
return
|
585 |
+
|
586 |
+
# Nodes with loops cannot belong to longer cycles. Let's delete them here.
|
587 |
+
# also, we implicitly reduce the multiplicity of edges down to 1 in the case
|
588 |
+
# of multiedges.
|
589 |
+
if directed:
|
590 |
+
F = nx.DiGraph((u, v) for u, Gu in G.adj.items() if u not in Gu for v in Gu)
|
591 |
+
B = F.to_undirected(as_view=False)
|
592 |
+
else:
|
593 |
+
F = nx.Graph((u, v) for u, Gu in G.adj.items() if u not in Gu for v in Gu)
|
594 |
+
B = None
|
595 |
+
|
596 |
+
# If we're given a multigraph, we have a few cases to consider with parallel
|
597 |
+
# edges.
|
598 |
+
#
|
599 |
+
# 1. If we have 2 or more edges in parallel between the nodes (u, v), we
|
600 |
+
# must not construct longer cycles along (u, v).
|
601 |
+
# 2. If G is not directed, then a pair of parallel edges between (u, v) is a
|
602 |
+
# chordless cycle unless there exists a third (or more) parallel edge.
|
603 |
+
# 3. If G is directed, then parallel edges do not form cycles, but do
|
604 |
+
# preclude back-edges from forming cycles (handled in the next section),
|
605 |
+
# Thus, if an edge (u, v) is duplicated and the reverse (v, u) is also
|
606 |
+
# present, then we remove both from F.
|
607 |
+
#
|
608 |
+
# In directed graphs, we need to consider both directions that edges can
|
609 |
+
# take, so iterate over all edges (u, v) and possibly (v, u). In undirected
|
610 |
+
# graphs, we need to be a little careful to only consider every edge once,
|
611 |
+
# so we use a "visited" set to emulate node-order comparisons.
|
612 |
+
|
613 |
+
if multigraph:
|
614 |
+
if not directed:
|
615 |
+
B = F.copy()
|
616 |
+
visited = set()
|
617 |
+
for u, Gu in G.adj.items():
|
618 |
+
if directed:
|
619 |
+
multiplicity = ((v, len(Guv)) for v, Guv in Gu.items())
|
620 |
+
for v, m in multiplicity:
|
621 |
+
if m > 1:
|
622 |
+
F.remove_edges_from(((u, v), (v, u)))
|
623 |
+
else:
|
624 |
+
multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited)
|
625 |
+
for v, m in multiplicity:
|
626 |
+
if m == 2:
|
627 |
+
yield [u, v]
|
628 |
+
if m > 1:
|
629 |
+
F.remove_edge(u, v)
|
630 |
+
visited.add(u)
|
631 |
+
|
632 |
+
# If we're given a directed graphs, we need to think about digons. If we
|
633 |
+
# have two edges (u, v) and (v, u), then that's a two-cycle. If either edge
|
634 |
+
# was duplicated above, then we removed both from F. So, any digons we find
|
635 |
+
# here are chordless. After finding digons, we remove their edges from F
|
636 |
+
# to avoid traversing them in the search for chordless cycles.
|
637 |
+
if directed:
|
638 |
+
for u, Fu in F.adj.items():
|
639 |
+
digons = [[u, v] for v in Fu if F.has_edge(v, u)]
|
640 |
+
yield from digons
|
641 |
+
F.remove_edges_from(digons)
|
642 |
+
F.remove_edges_from(e[::-1] for e in digons)
|
643 |
+
|
644 |
+
if length_bound is not None and length_bound == 2:
|
645 |
+
return
|
646 |
+
|
647 |
+
# Now, we prepare to search for cycles. We have removed all cycles of
|
648 |
+
# lengths 1 and 2, so F is a simple graph or simple digraph. We repeatedly
|
649 |
+
# separate digraphs into their strongly connected components, and undirected
|
650 |
+
# graphs into their biconnected components. For each component, we pick a
|
651 |
+
# node v, search for chordless cycles based at each "stem" (u, v, w), and
|
652 |
+
# then remove v from that component before separating the graph again.
|
653 |
+
if directed:
|
654 |
+
separate = nx.strongly_connected_components
|
655 |
+
|
656 |
+
# Directed stems look like (u -> v -> w), so we use the product of
|
657 |
+
# predecessors of v with successors of v.
|
658 |
+
def stems(C, v):
|
659 |
+
for u, w in product(C.pred[v], C.succ[v]):
|
660 |
+
if not G.has_edge(u, w): # omit stems with acyclic chords
|
661 |
+
yield [u, v, w], F.has_edge(w, u)
|
662 |
+
|
663 |
+
else:
|
664 |
+
separate = nx.biconnected_components
|
665 |
+
|
666 |
+
# Undirected stems look like (u ~ v ~ w), but we must not also search
|
667 |
+
# (w ~ v ~ u), so we use combinations of v's neighbors of length 2.
|
668 |
+
def stems(C, v):
|
669 |
+
yield from (([u, v, w], F.has_edge(w, u)) for u, w in combinations(C[v], 2))
|
670 |
+
|
671 |
+
components = [c for c in separate(F) if len(c) > 2]
|
672 |
+
while components:
|
673 |
+
c = components.pop()
|
674 |
+
v = next(iter(c))
|
675 |
+
Fc = F.subgraph(c)
|
676 |
+
Fcc = Bcc = None
|
677 |
+
for S, is_triangle in stems(Fc, v):
|
678 |
+
if is_triangle:
|
679 |
+
yield S
|
680 |
+
else:
|
681 |
+
if Fcc is None:
|
682 |
+
Fcc = _NeighborhoodCache(Fc)
|
683 |
+
Bcc = Fcc if B is None else _NeighborhoodCache(B.subgraph(c))
|
684 |
+
yield from _chordless_cycle_search(Fcc, Bcc, S, length_bound)
|
685 |
+
|
686 |
+
components.extend(c for c in separate(F.subgraph(c - {v})) if len(c) > 2)
|
687 |
+
|
688 |
+
|
689 |
+
def _chordless_cycle_search(F, B, path, length_bound):
|
690 |
+
"""The main loop for chordless cycle enumeration.
|
691 |
+
|
692 |
+
This algorithm is strongly inspired by that of Dias et al [1]_. It has been
|
693 |
+
modified in the following ways:
|
694 |
+
|
695 |
+
1. Recursion is avoided, per Python's limitations
|
696 |
+
|
697 |
+
2. The labeling function is not necessary, because the starting paths
|
698 |
+
are chosen (and deleted from the host graph) to prevent multiple
|
699 |
+
occurrences of the same path
|
700 |
+
|
701 |
+
3. The search is optionally bounded at a specified length
|
702 |
+
|
703 |
+
4. Support for directed graphs is provided by extending cycles along
|
704 |
+
forward edges, and blocking nodes along forward and reverse edges
|
705 |
+
|
706 |
+
5. Support for multigraphs is provided by omitting digons from the set
|
707 |
+
of forward edges
|
708 |
+
|
709 |
+
Parameters
|
710 |
+
----------
|
711 |
+
F : _NeighborhoodCache
|
712 |
+
A graph of forward edges to follow in constructing cycles
|
713 |
+
|
714 |
+
B : _NeighborhoodCache
|
715 |
+
A graph of blocking edges to prevent the production of chordless cycles
|
716 |
+
|
717 |
+
path : list
|
718 |
+
A cycle prefix. All cycles generated will begin with this prefix.
|
719 |
+
|
720 |
+
length_bound : int
|
721 |
+
A length bound. All cycles generated will have length at most length_bound.
|
722 |
+
|
723 |
+
|
724 |
+
Yields
|
725 |
+
------
|
726 |
+
list of nodes
|
727 |
+
Each cycle is represented by a list of nodes along the cycle.
|
728 |
+
|
729 |
+
References
|
730 |
+
----------
|
731 |
+
.. [1] Efficient enumeration of chordless cycles
|
732 |
+
E. Dias and D. Castonguay and H. Longo and W.A.R. Jradi
|
733 |
+
https://arxiv.org/abs/1309.1051
|
734 |
+
|
735 |
+
"""
|
736 |
+
blocked = defaultdict(int)
|
737 |
+
target = path[0]
|
738 |
+
blocked[path[1]] = 1
|
739 |
+
for w in path[1:]:
|
740 |
+
for v in B[w]:
|
741 |
+
blocked[v] += 1
|
742 |
+
|
743 |
+
stack = [iter(F[path[2]])]
|
744 |
+
while stack:
|
745 |
+
nbrs = stack[-1]
|
746 |
+
for w in nbrs:
|
747 |
+
if blocked[w] == 1 and (length_bound is None or len(path) < length_bound):
|
748 |
+
Fw = F[w]
|
749 |
+
if target in Fw:
|
750 |
+
yield path + [w]
|
751 |
+
else:
|
752 |
+
Bw = B[w]
|
753 |
+
if target in Bw:
|
754 |
+
continue
|
755 |
+
for v in Bw:
|
756 |
+
blocked[v] += 1
|
757 |
+
path.append(w)
|
758 |
+
stack.append(iter(Fw))
|
759 |
+
break
|
760 |
+
else:
|
761 |
+
stack.pop()
|
762 |
+
for v in B[path.pop()]:
|
763 |
+
blocked[v] -= 1
|
764 |
+
|
765 |
+
|
766 |
+
@not_implemented_for("undirected")
|
767 |
+
@nx._dispatchable(mutates_input=True)
|
768 |
+
def recursive_simple_cycles(G):
|
769 |
+
"""Find simple cycles (elementary circuits) of a directed graph.
|
770 |
+
|
771 |
+
A `simple cycle`, or `elementary circuit`, is a closed path where
|
772 |
+
no node appears twice. Two elementary circuits are distinct if they
|
773 |
+
are not cyclic permutations of each other.
|
774 |
+
|
775 |
+
This version uses a recursive algorithm to build a list of cycles.
|
776 |
+
You should probably use the iterator version called simple_cycles().
|
777 |
+
Warning: This recursive version uses lots of RAM!
|
778 |
+
It appears in NetworkX for pedagogical value.
|
779 |
+
|
780 |
+
Parameters
|
781 |
+
----------
|
782 |
+
G : NetworkX DiGraph
|
783 |
+
A directed graph
|
784 |
+
|
785 |
+
Returns
|
786 |
+
-------
|
787 |
+
A list of cycles, where each cycle is represented by a list of nodes
|
788 |
+
along the cycle.
|
789 |
+
|
790 |
+
Example:
|
791 |
+
|
792 |
+
>>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)]
|
793 |
+
>>> G = nx.DiGraph(edges)
|
794 |
+
>>> nx.recursive_simple_cycles(G)
|
795 |
+
[[0], [2], [0, 1, 2], [0, 2], [1, 2]]
|
796 |
+
|
797 |
+
Notes
|
798 |
+
-----
|
799 |
+
The implementation follows pp. 79-80 in [1]_.
|
800 |
+
|
801 |
+
The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$
|
802 |
+
elementary circuits.
|
803 |
+
|
804 |
+
References
|
805 |
+
----------
|
806 |
+
.. [1] Finding all the elementary circuits of a directed graph.
|
807 |
+
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
|
808 |
+
https://doi.org/10.1137/0204007
|
809 |
+
|
810 |
+
See Also
|
811 |
+
--------
|
812 |
+
simple_cycles, cycle_basis
|
813 |
+
"""
|
814 |
+
|
815 |
+
# Jon Olav Vik, 2010-08-09
|
816 |
+
def _unblock(thisnode):
|
817 |
+
"""Recursively unblock and remove nodes from B[thisnode]."""
|
818 |
+
if blocked[thisnode]:
|
819 |
+
blocked[thisnode] = False
|
820 |
+
while B[thisnode]:
|
821 |
+
_unblock(B[thisnode].pop())
|
822 |
+
|
823 |
+
def circuit(thisnode, startnode, component):
|
824 |
+
closed = False # set to True if elementary path is closed
|
825 |
+
path.append(thisnode)
|
826 |
+
blocked[thisnode] = True
|
827 |
+
for nextnode in component[thisnode]: # direct successors of thisnode
|
828 |
+
if nextnode == startnode:
|
829 |
+
result.append(path[:])
|
830 |
+
closed = True
|
831 |
+
elif not blocked[nextnode]:
|
832 |
+
if circuit(nextnode, startnode, component):
|
833 |
+
closed = True
|
834 |
+
if closed:
|
835 |
+
_unblock(thisnode)
|
836 |
+
else:
|
837 |
+
for nextnode in component[thisnode]:
|
838 |
+
if thisnode not in B[nextnode]: # TODO: use set for speedup?
|
839 |
+
B[nextnode].append(thisnode)
|
840 |
+
path.pop() # remove thisnode from path
|
841 |
+
return closed
|
842 |
+
|
843 |
+
path = [] # stack of nodes in current path
|
844 |
+
blocked = defaultdict(bool) # vertex: blocked from search?
|
845 |
+
B = defaultdict(list) # graph portions that yield no elementary circuit
|
846 |
+
result = [] # list to accumulate the circuits found
|
847 |
+
|
848 |
+
# Johnson's algorithm exclude self cycle edges like (v, v)
|
849 |
+
# To be backward compatible, we record those cycles in advance
|
850 |
+
# and then remove from subG
|
851 |
+
for v in G:
|
852 |
+
if G.has_edge(v, v):
|
853 |
+
result.append([v])
|
854 |
+
G.remove_edge(v, v)
|
855 |
+
|
856 |
+
# Johnson's algorithm requires some ordering of the nodes.
|
857 |
+
# They might not be sortable so we assign an arbitrary ordering.
|
858 |
+
ordering = dict(zip(G, range(len(G))))
|
859 |
+
for s in ordering:
|
860 |
+
# Build the subgraph induced by s and following nodes in the ordering
|
861 |
+
subgraph = G.subgraph(node for node in G if ordering[node] >= ordering[s])
|
862 |
+
# Find the strongly connected component in the subgraph
|
863 |
+
# that contains the least node according to the ordering
|
864 |
+
strongcomp = nx.strongly_connected_components(subgraph)
|
865 |
+
mincomp = min(strongcomp, key=lambda ns: min(ordering[n] for n in ns))
|
866 |
+
component = G.subgraph(mincomp)
|
867 |
+
if len(component) > 1:
|
868 |
+
# smallest node in the component according to the ordering
|
869 |
+
startnode = min(component, key=ordering.__getitem__)
|
870 |
+
for node in component:
|
871 |
+
blocked[node] = False
|
872 |
+
B[node][:] = []
|
873 |
+
dummy = circuit(startnode, startnode, component)
|
874 |
+
return result
|
875 |
+
|
876 |
+
|
877 |
+
@nx._dispatchable
|
878 |
+
def find_cycle(G, source=None, orientation=None):
|
879 |
+
"""Returns a cycle found via depth-first traversal.
|
880 |
+
|
881 |
+
The cycle is a list of edges indicating the cyclic path.
|
882 |
+
Orientation of directed edges is controlled by `orientation`.
|
883 |
+
|
884 |
+
Parameters
|
885 |
+
----------
|
886 |
+
G : graph
|
887 |
+
A directed/undirected graph/multigraph.
|
888 |
+
|
889 |
+
source : node, list of nodes
|
890 |
+
The node from which the traversal begins. If None, then a source
|
891 |
+
is chosen arbitrarily and repeatedly until all edges from each node in
|
892 |
+
the graph are searched.
|
893 |
+
|
894 |
+
orientation : None | 'original' | 'reverse' | 'ignore' (default: None)
|
895 |
+
For directed graphs and directed multigraphs, edge traversals need not
|
896 |
+
respect the original orientation of the edges.
|
897 |
+
When set to 'reverse' every edge is traversed in the reverse direction.
|
898 |
+
When set to 'ignore', every edge is treated as undirected.
|
899 |
+
When set to 'original', every edge is treated as directed.
|
900 |
+
In all three cases, the yielded edge tuples add a last entry to
|
901 |
+
indicate the direction in which that edge was traversed.
|
902 |
+
If orientation is None, the yielded edge has no direction indicated.
|
903 |
+
The direction is respected, but not reported.
|
904 |
+
|
905 |
+
Returns
|
906 |
+
-------
|
907 |
+
edges : directed edges
|
908 |
+
A list of directed edges indicating the path taken for the loop.
|
909 |
+
If no cycle is found, then an exception is raised.
|
910 |
+
For graphs, an edge is of the form `(u, v)` where `u` and `v`
|
911 |
+
are the tail and head of the edge as determined by the traversal.
|
912 |
+
For multigraphs, an edge is of the form `(u, v, key)`, where `key` is
|
913 |
+
the key of the edge. When the graph is directed, then `u` and `v`
|
914 |
+
are always in the order of the actual directed edge.
|
915 |
+
If orientation is not None then the edge tuple is extended to include
|
916 |
+
the direction of traversal ('forward' or 'reverse') on that edge.
|
917 |
+
|
918 |
+
Raises
|
919 |
+
------
|
920 |
+
NetworkXNoCycle
|
921 |
+
If no cycle was found.
|
922 |
+
|
923 |
+
Examples
|
924 |
+
--------
|
925 |
+
In this example, we construct a DAG and find, in the first call, that there
|
926 |
+
are no directed cycles, and so an exception is raised. In the second call,
|
927 |
+
we ignore edge orientations and find that there is an undirected cycle.
|
928 |
+
Note that the second call finds a directed cycle while effectively
|
929 |
+
traversing an undirected graph, and so, we found an "undirected cycle".
|
930 |
+
This means that this DAG structure does not form a directed tree (which
|
931 |
+
is also known as a polytree).
|
932 |
+
|
933 |
+
>>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)])
|
934 |
+
>>> nx.find_cycle(G, orientation="original")
|
935 |
+
Traceback (most recent call last):
|
936 |
+
...
|
937 |
+
networkx.exception.NetworkXNoCycle: No cycle found.
|
938 |
+
>>> list(nx.find_cycle(G, orientation="ignore"))
|
939 |
+
[(0, 1, 'forward'), (1, 2, 'forward'), (0, 2, 'reverse')]
|
940 |
+
|
941 |
+
See Also
|
942 |
+
--------
|
943 |
+
simple_cycles
|
944 |
+
"""
|
945 |
+
if not G.is_directed() or orientation in (None, "original"):
|
946 |
+
|
947 |
+
def tailhead(edge):
|
948 |
+
return edge[:2]
|
949 |
+
|
950 |
+
elif orientation == "reverse":
|
951 |
+
|
952 |
+
def tailhead(edge):
|
953 |
+
return edge[1], edge[0]
|
954 |
+
|
955 |
+
elif orientation == "ignore":
|
956 |
+
|
957 |
+
def tailhead(edge):
|
958 |
+
if edge[-1] == "reverse":
|
959 |
+
return edge[1], edge[0]
|
960 |
+
return edge[:2]
|
961 |
+
|
962 |
+
explored = set()
|
963 |
+
cycle = []
|
964 |
+
final_node = None
|
965 |
+
for start_node in G.nbunch_iter(source):
|
966 |
+
if start_node in explored:
|
967 |
+
# No loop is possible.
|
968 |
+
continue
|
969 |
+
|
970 |
+
edges = []
|
971 |
+
# All nodes seen in this iteration of edge_dfs
|
972 |
+
seen = {start_node}
|
973 |
+
# Nodes in active path.
|
974 |
+
active_nodes = {start_node}
|
975 |
+
previous_head = None
|
976 |
+
|
977 |
+
for edge in nx.edge_dfs(G, start_node, orientation):
|
978 |
+
# Determine if this edge is a continuation of the active path.
|
979 |
+
tail, head = tailhead(edge)
|
980 |
+
if head in explored:
|
981 |
+
# Then we've already explored it. No loop is possible.
|
982 |
+
continue
|
983 |
+
if previous_head is not None and tail != previous_head:
|
984 |
+
# This edge results from backtracking.
|
985 |
+
# Pop until we get a node whose head equals the current tail.
|
986 |
+
# So for example, we might have:
|
987 |
+
# (0, 1), (1, 2), (2, 3), (1, 4)
|
988 |
+
# which must become:
|
989 |
+
# (0, 1), (1, 4)
|
990 |
+
while True:
|
991 |
+
try:
|
992 |
+
popped_edge = edges.pop()
|
993 |
+
except IndexError:
|
994 |
+
edges = []
|
995 |
+
active_nodes = {tail}
|
996 |
+
break
|
997 |
+
else:
|
998 |
+
popped_head = tailhead(popped_edge)[1]
|
999 |
+
active_nodes.remove(popped_head)
|
1000 |
+
|
1001 |
+
if edges:
|
1002 |
+
last_head = tailhead(edges[-1])[1]
|
1003 |
+
if tail == last_head:
|
1004 |
+
break
|
1005 |
+
edges.append(edge)
|
1006 |
+
|
1007 |
+
if head in active_nodes:
|
1008 |
+
# We have a loop!
|
1009 |
+
cycle.extend(edges)
|
1010 |
+
final_node = head
|
1011 |
+
break
|
1012 |
+
else:
|
1013 |
+
seen.add(head)
|
1014 |
+
active_nodes.add(head)
|
1015 |
+
previous_head = head
|
1016 |
+
|
1017 |
+
if cycle:
|
1018 |
+
break
|
1019 |
+
else:
|
1020 |
+
explored.update(seen)
|
1021 |
+
|
1022 |
+
else:
|
1023 |
+
assert len(cycle) == 0
|
1024 |
+
raise nx.exception.NetworkXNoCycle("No cycle found.")
|
1025 |
+
|
1026 |
+
# We now have a list of edges which ends on a cycle.
|
1027 |
+
# So we need to remove from the beginning edges that are not relevant.
|
1028 |
+
|
1029 |
+
for i, edge in enumerate(cycle):
|
1030 |
+
tail, head = tailhead(edge)
|
1031 |
+
if tail == final_node:
|
1032 |
+
break
|
1033 |
+
|
1034 |
+
return cycle[i:]
|
1035 |
+
|
1036 |
+
|
1037 |
+
@not_implemented_for("directed")
|
1038 |
+
@not_implemented_for("multigraph")
|
1039 |
+
@nx._dispatchable(edge_attrs="weight")
|
1040 |
+
def minimum_cycle_basis(G, weight=None):
|
1041 |
+
"""Returns a minimum weight cycle basis for G
|
1042 |
+
|
1043 |
+
Minimum weight means a cycle basis for which the total weight
|
1044 |
+
(length for unweighted graphs) of all the cycles is minimum.
|
1045 |
+
|
1046 |
+
Parameters
|
1047 |
+
----------
|
1048 |
+
G : NetworkX Graph
|
1049 |
+
weight: string
|
1050 |
+
name of the edge attribute to use for edge weights
|
1051 |
+
|
1052 |
+
Returns
|
1053 |
+
-------
|
1054 |
+
A list of cycle lists. Each cycle list is a list of nodes
|
1055 |
+
which forms a cycle (loop) in G. Note that the nodes are not
|
1056 |
+
necessarily returned in a order by which they appear in the cycle
|
1057 |
+
|
1058 |
+
Examples
|
1059 |
+
--------
|
1060 |
+
>>> G = nx.Graph()
|
1061 |
+
>>> nx.add_cycle(G, [0, 1, 2, 3])
|
1062 |
+
>>> nx.add_cycle(G, [0, 3, 4, 5])
|
1063 |
+
>>> nx.minimum_cycle_basis(G)
|
1064 |
+
[[5, 4, 3, 0], [3, 2, 1, 0]]
|
1065 |
+
|
1066 |
+
References:
|
1067 |
+
[1] Kavitha, Telikepalli, et al. "An O(m^2n) Algorithm for
|
1068 |
+
Minimum Cycle Basis of Graphs."
|
1069 |
+
http://link.springer.com/article/10.1007/s00453-007-9064-z
|
1070 |
+
[2] de Pina, J. 1995. Applications of shortest path methods.
|
1071 |
+
Ph.D. thesis, University of Amsterdam, Netherlands
|
1072 |
+
|
1073 |
+
See Also
|
1074 |
+
--------
|
1075 |
+
simple_cycles, cycle_basis
|
1076 |
+
"""
|
1077 |
+
# We first split the graph in connected subgraphs
|
1078 |
+
return sum(
|
1079 |
+
(_min_cycle_basis(G.subgraph(c), weight) for c in nx.connected_components(G)),
|
1080 |
+
[],
|
1081 |
+
)
|
1082 |
+
|
1083 |
+
|
1084 |
+
def _min_cycle_basis(G, weight):
|
1085 |
+
cb = []
|
1086 |
+
# We extract the edges not in a spanning tree. We do not really need a
|
1087 |
+
# *minimum* spanning tree. That is why we call the next function with
|
1088 |
+
# weight=None. Depending on implementation, it may be faster as well
|
1089 |
+
tree_edges = list(nx.minimum_spanning_edges(G, weight=None, data=False))
|
1090 |
+
chords = G.edges - tree_edges - {(v, u) for u, v in tree_edges}
|
1091 |
+
|
1092 |
+
# We maintain a set of vectors orthogonal to sofar found cycles
|
1093 |
+
set_orth = [{edge} for edge in chords]
|
1094 |
+
while set_orth:
|
1095 |
+
base = set_orth.pop()
|
1096 |
+
# kth cycle is "parallel" to kth vector in set_orth
|
1097 |
+
cycle_edges = _min_cycle(G, base, weight)
|
1098 |
+
cb.append([v for u, v in cycle_edges])
|
1099 |
+
|
1100 |
+
# now update set_orth so that k+1,k+2... th elements are
|
1101 |
+
# orthogonal to the newly found cycle, as per [p. 336, 1]
|
1102 |
+
set_orth = [
|
1103 |
+
(
|
1104 |
+
{e for e in orth if e not in base if e[::-1] not in base}
|
1105 |
+
| {e for e in base if e not in orth if e[::-1] not in orth}
|
1106 |
+
)
|
1107 |
+
if sum((e in orth or e[::-1] in orth) for e in cycle_edges) % 2
|
1108 |
+
else orth
|
1109 |
+
for orth in set_orth
|
1110 |
+
]
|
1111 |
+
return cb
|
1112 |
+
|
1113 |
+
|
1114 |
+
def _min_cycle(G, orth, weight):
|
1115 |
+
"""
|
1116 |
+
Computes the minimum weight cycle in G,
|
1117 |
+
orthogonal to the vector orth as per [p. 338, 1]
|
1118 |
+
Use (u, 1) to indicate the lifted copy of u (denoted u' in paper).
|
1119 |
+
"""
|
1120 |
+
Gi = nx.Graph()
|
1121 |
+
|
1122 |
+
# Add 2 copies of each edge in G to Gi.
|
1123 |
+
# If edge is in orth, add cross edge; otherwise in-plane edge
|
1124 |
+
for u, v, wt in G.edges(data=weight, default=1):
|
1125 |
+
if (u, v) in orth or (v, u) in orth:
|
1126 |
+
Gi.add_edges_from([(u, (v, 1)), ((u, 1), v)], Gi_weight=wt)
|
1127 |
+
else:
|
1128 |
+
Gi.add_edges_from([(u, v), ((u, 1), (v, 1))], Gi_weight=wt)
|
1129 |
+
|
1130 |
+
# find the shortest length in Gi between n and (n, 1) for each n
|
1131 |
+
# Note: Use "Gi_weight" for name of weight attribute
|
1132 |
+
spl = nx.shortest_path_length
|
1133 |
+
lift = {n: spl(Gi, source=n, target=(n, 1), weight="Gi_weight") for n in G}
|
1134 |
+
|
1135 |
+
# Now compute that short path in Gi, which translates to a cycle in G
|
1136 |
+
start = min(lift, key=lift.get)
|
1137 |
+
end = (start, 1)
|
1138 |
+
min_path_i = nx.shortest_path(Gi, source=start, target=end, weight="Gi_weight")
|
1139 |
+
|
1140 |
+
# Now we obtain the actual path, re-map nodes in Gi to those in G
|
1141 |
+
min_path = [n if n in G else n[0] for n in min_path_i]
|
1142 |
+
|
1143 |
+
# Now remove the edges that occur two times
|
1144 |
+
# two passes: flag which edges get kept, then build it
|
1145 |
+
edgelist = list(pairwise(min_path))
|
1146 |
+
edgeset = set()
|
1147 |
+
for e in edgelist:
|
1148 |
+
if e in edgeset:
|
1149 |
+
edgeset.remove(e)
|
1150 |
+
elif e[::-1] in edgeset:
|
1151 |
+
edgeset.remove(e[::-1])
|
1152 |
+
else:
|
1153 |
+
edgeset.add(e)
|
1154 |
+
|
1155 |
+
min_edgelist = []
|
1156 |
+
for e in edgelist:
|
1157 |
+
if e in edgeset:
|
1158 |
+
min_edgelist.append(e)
|
1159 |
+
edgeset.remove(e)
|
1160 |
+
elif e[::-1] in edgeset:
|
1161 |
+
min_edgelist.append(e[::-1])
|
1162 |
+
edgeset.remove(e[::-1])
|
1163 |
+
|
1164 |
+
return min_edgelist
|
1165 |
+
|
1166 |
+
|
1167 |
+
@not_implemented_for("directed")
|
1168 |
+
@not_implemented_for("multigraph")
|
1169 |
+
@nx._dispatchable
|
1170 |
+
def girth(G):
|
1171 |
+
"""Returns the girth of the graph.
|
1172 |
+
|
1173 |
+
The girth of a graph is the length of its shortest cycle, or infinity if
|
1174 |
+
the graph is acyclic. The algorithm follows the description given on the
|
1175 |
+
Wikipedia page [1]_, and runs in time O(mn) on a graph with m edges and n
|
1176 |
+
nodes.
|
1177 |
+
|
1178 |
+
Parameters
|
1179 |
+
----------
|
1180 |
+
G : NetworkX Graph
|
1181 |
+
|
1182 |
+
Returns
|
1183 |
+
-------
|
1184 |
+
int or math.inf
|
1185 |
+
|
1186 |
+
Examples
|
1187 |
+
--------
|
1188 |
+
All examples below (except P_5) can easily be checked using Wikipedia,
|
1189 |
+
which has a page for each of these famous graphs.
|
1190 |
+
|
1191 |
+
>>> nx.girth(nx.chvatal_graph())
|
1192 |
+
4
|
1193 |
+
>>> nx.girth(nx.tutte_graph())
|
1194 |
+
4
|
1195 |
+
>>> nx.girth(nx.petersen_graph())
|
1196 |
+
5
|
1197 |
+
>>> nx.girth(nx.heawood_graph())
|
1198 |
+
6
|
1199 |
+
>>> nx.girth(nx.pappus_graph())
|
1200 |
+
6
|
1201 |
+
>>> nx.girth(nx.path_graph(5))
|
1202 |
+
inf
|
1203 |
+
|
1204 |
+
References
|
1205 |
+
----------
|
1206 |
+
.. [1] `Wikipedia: Girth <https://en.wikipedia.org/wiki/Girth_(graph_theory)>`_
|
1207 |
+
|
1208 |
+
"""
|
1209 |
+
girth = depth_limit = inf
|
1210 |
+
tree_edge = nx.algorithms.traversal.breadth_first_search.TREE_EDGE
|
1211 |
+
level_edge = nx.algorithms.traversal.breadth_first_search.LEVEL_EDGE
|
1212 |
+
for n in G:
|
1213 |
+
# run a BFS from source n, keeping track of distances; since we want
|
1214 |
+
# the shortest cycle, no need to explore beyond the current minimum length
|
1215 |
+
depth = {n: 0}
|
1216 |
+
for u, v, label in nx.bfs_labeled_edges(G, n):
|
1217 |
+
du = depth[u]
|
1218 |
+
if du > depth_limit:
|
1219 |
+
break
|
1220 |
+
if label is tree_edge:
|
1221 |
+
depth[v] = du + 1
|
1222 |
+
else:
|
1223 |
+
# if (u, v) is a level edge, the length is du + du + 1 (odd)
|
1224 |
+
# otherwise, it's a forward edge; length is du + (du + 1) + 1 (even)
|
1225 |
+
delta = label is level_edge
|
1226 |
+
length = du + du + 2 - delta
|
1227 |
+
if length < girth:
|
1228 |
+
girth = length
|
1229 |
+
depth_limit = du - delta
|
1230 |
+
|
1231 |
+
return girth
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/d_separation.py
ADDED
@@ -0,0 +1,722 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Algorithm for testing d-separation in DAGs.
|
3 |
+
|
4 |
+
*d-separation* is a test for conditional independence in probability
|
5 |
+
distributions that can be factorized using DAGs. It is a purely
|
6 |
+
graphical test that uses the underlying graph and makes no reference
|
7 |
+
to the actual distribution parameters. See [1]_ for a formal
|
8 |
+
definition.
|
9 |
+
|
10 |
+
The implementation is based on the conceptually simple linear time
|
11 |
+
algorithm presented in [2]_. Refer to [3]_, [4]_ for a couple of
|
12 |
+
alternative algorithms.
|
13 |
+
|
14 |
+
The functional interface in NetworkX consists of three functions:
|
15 |
+
|
16 |
+
- `find_minimal_d_separator` returns a minimal d-separator set ``z``.
|
17 |
+
That is, removing any node or nodes from it makes it no longer a d-separator.
|
18 |
+
- `is_d_separator` checks if a given set is a d-separator.
|
19 |
+
- `is_minimal_d_separator` checks if a given set is a minimal d-separator.
|
20 |
+
|
21 |
+
D-separators
|
22 |
+
------------
|
23 |
+
|
24 |
+
Here, we provide a brief overview of d-separation and related concepts that
|
25 |
+
are relevant for understanding it:
|
26 |
+
|
27 |
+
The ideas of d-separation and d-connection relate to paths being open or blocked.
|
28 |
+
|
29 |
+
- A "path" is a sequence of nodes connected in order by edges. Unlike for most
|
30 |
+
graph theory analysis, the direction of the edges is ignored. Thus the path
|
31 |
+
can be thought of as a traditional path on the undirected version of the graph.
|
32 |
+
- A "candidate d-separator" ``z`` is a set of nodes being considered as
|
33 |
+
possibly blocking all paths between two prescribed sets ``x`` and ``y`` of nodes.
|
34 |
+
We refer to each node in the candidate d-separator as "known".
|
35 |
+
- A "collider" node on a path is a node that is a successor of its two neighbor
|
36 |
+
nodes on the path. That is, ``c`` is a collider if the edge directions
|
37 |
+
along the path look like ``... u -> c <- v ...``.
|
38 |
+
- If a collider node or any of its descendants are "known", the collider
|
39 |
+
is called an "open collider". Otherwise it is a "blocking collider".
|
40 |
+
- Any path can be "blocked" in two ways. If the path contains a "known" node
|
41 |
+
that is not a collider, the path is blocked. Also, if the path contains a
|
42 |
+
collider that is not a "known" node, the path is blocked.
|
43 |
+
- A path is "open" if it is not blocked. That is, it is open if every node is
|
44 |
+
either an open collider or not a "known". Said another way, every
|
45 |
+
"known" in the path is a collider and every collider is open (has a
|
46 |
+
"known" as a inclusive descendant). The concept of "open path" is meant to
|
47 |
+
demonstrate a probabilistic conditional dependence between two nodes given
|
48 |
+
prescribed knowledge ("known" nodes).
|
49 |
+
- Two sets ``x`` and ``y`` of nodes are "d-separated" by a set of nodes ``z``
|
50 |
+
if all paths between nodes in ``x`` and nodes in ``y`` are blocked. That is,
|
51 |
+
if there are no open paths from any node in ``x`` to any node in ``y``.
|
52 |
+
Such a set ``z`` is a "d-separator" of ``x`` and ``y``.
|
53 |
+
- A "minimal d-separator" is a d-separator ``z`` for which no node or subset
|
54 |
+
of nodes can be removed with it still being a d-separator.
|
55 |
+
|
56 |
+
The d-separator blocks some paths between ``x`` and ``y`` but opens others.
|
57 |
+
Nodes in the d-separator block paths if the nodes are not colliders.
|
58 |
+
But if a collider or its descendant nodes are in the d-separation set, the
|
59 |
+
colliders are open, allowing a path through that collider.
|
60 |
+
|
61 |
+
Illustration of D-separation with examples
|
62 |
+
------------------------------------------
|
63 |
+
|
64 |
+
A pair of two nodes, ``u`` and ``v``, are d-connected if there is a path
|
65 |
+
from ``u`` to ``v`` that is not blocked. That means, there is an open
|
66 |
+
path from ``u`` to ``v``.
|
67 |
+
|
68 |
+
For example, if the d-separating set is the empty set, then the following paths are
|
69 |
+
open between ``u`` and ``v``:
|
70 |
+
|
71 |
+
- u <- n -> v
|
72 |
+
- u -> w -> ... -> n -> v
|
73 |
+
|
74 |
+
If on the other hand, ``n`` is in the d-separating set, then ``n`` blocks
|
75 |
+
those paths between ``u`` and ``v``.
|
76 |
+
|
77 |
+
Colliders block a path if they and their descendants are not included
|
78 |
+
in the d-separating set. An example of a path that is blocked when the
|
79 |
+
d-separating set is empty is:
|
80 |
+
|
81 |
+
- u -> w -> ... -> n <- v
|
82 |
+
|
83 |
+
The node ``n`` is a collider in this path and is not in the d-separating set.
|
84 |
+
So ``n`` blocks this path. However, if ``n`` or a descendant of ``n`` is
|
85 |
+
included in the d-separating set, then the path through the collider
|
86 |
+
at ``n`` (... -> n <- ...) is "open".
|
87 |
+
|
88 |
+
D-separation is concerned with blocking all paths between nodes from ``x`` to ``y``.
|
89 |
+
A d-separating set between ``x`` and ``y`` is one where all paths are blocked.
|
90 |
+
|
91 |
+
D-separation and its applications in probability
|
92 |
+
------------------------------------------------
|
93 |
+
|
94 |
+
D-separation is commonly used in probabilistic causal-graph models. D-separation
|
95 |
+
connects the idea of probabilistic "dependence" with separation in a graph. If
|
96 |
+
one assumes the causal Markov condition [5]_, (every node is conditionally
|
97 |
+
independent of its non-descendants, given its parents) then d-separation implies
|
98 |
+
conditional independence in probability distributions.
|
99 |
+
Symmetrically, d-connection implies dependence.
|
100 |
+
|
101 |
+
The intuition is as follows. The edges on a causal graph indicate which nodes
|
102 |
+
influence the outcome of other nodes directly. An edge from u to v
|
103 |
+
implies that the outcome of event ``u`` influences the probabilities for
|
104 |
+
the outcome of event ``v``. Certainly knowing ``u`` changes predictions for ``v``.
|
105 |
+
But also knowing ``v`` changes predictions for ``u``. The outcomes are dependent.
|
106 |
+
Furthermore, an edge from ``v`` to ``w`` would mean that ``w`` and ``v`` are dependent
|
107 |
+
and thus that ``u`` could indirectly influence ``w``.
|
108 |
+
|
109 |
+
Without any knowledge about the system (candidate d-separating set is empty)
|
110 |
+
a causal graph ``u -> v -> w`` allows all three nodes to be dependent. But
|
111 |
+
if we know the outcome of ``v``, the conditional probabilities of outcomes for
|
112 |
+
``u`` and ``w`` are independent of each other. That is, once we know the outcome
|
113 |
+
for ```v`, the probabilities for ``w`` do not depend on the outcome for ``u``.
|
114 |
+
This is the idea behind ``v`` blocking the path if it is "known" (in the candidate
|
115 |
+
d-separating set).
|
116 |
+
|
117 |
+
The same argument works whether the direction of the edges are both
|
118 |
+
left-going and when both arrows head out from the middle. Having a "known"
|
119 |
+
node on a path blocks the collider-free path because those relationships
|
120 |
+
make the conditional probabilities independent.
|
121 |
+
|
122 |
+
The direction of the causal edges does impact dependence precisely in the
|
123 |
+
case of a collider e.g. ``u -> v <- w``. In that situation, both ``u`` and ``w``
|
124 |
+
influence ``v```. But they do not directly influence each other. So without any
|
125 |
+
knowledge of any outcomes, ``u`` and ``w`` are independent. That is the idea behind
|
126 |
+
colliders blocking the path. But, if ``v`` is known, the conditional probabilities
|
127 |
+
of ``u`` and ``w`` can be dependent. This is the heart of Berkson's Paradox [6]_.
|
128 |
+
For example, suppose ``u`` and ``w`` are boolean events (they either happen or do not)
|
129 |
+
and ``v`` represents the outcome "at least one of ``u`` and ``w`` occur". Then knowing
|
130 |
+
``v`` is true makes the conditional probabilities of ``u`` and ``w`` dependent.
|
131 |
+
Essentially, knowing that at least one of them is true raises the probability of
|
132 |
+
each. But further knowledge that ``w`` is true (or false) change the conditional
|
133 |
+
probability of ``u`` to either the original value or 1. So the conditional
|
134 |
+
probability of ``u`` depends on the outcome of ``w`` even though there is no
|
135 |
+
causal relationship between them. When a collider is known, dependence can
|
136 |
+
occur across paths through that collider. This is the reason open colliders
|
137 |
+
do not block paths.
|
138 |
+
|
139 |
+
Furthermore, even if ``v`` is not "known", if one of its descendants is "known"
|
140 |
+
we can use that information to know more about ``v`` which again makes
|
141 |
+
``u`` and ``w`` potentially dependent. Suppose the chance of ``n`` occurring
|
142 |
+
is much higher when ``v`` occurs ("at least one of ``u`` and ``w`` occur").
|
143 |
+
Then if we know ``n`` occurred, it is more likely that ``v`` occurred and that
|
144 |
+
makes the chance of ``u`` and ``w`` dependent. This is the idea behind why
|
145 |
+
a collider does no block a path if any descendant of the collider is "known".
|
146 |
+
|
147 |
+
When two sets of nodes ``x`` and ``y`` are d-separated by a set ``z``,
|
148 |
+
it means that given the outcomes of the nodes in ``z``, the probabilities
|
149 |
+
of outcomes of the nodes in ``x`` are independent of the outcomes of the
|
150 |
+
nodes in ``y`` and vice versa.
|
151 |
+
|
152 |
+
Examples
|
153 |
+
--------
|
154 |
+
A Hidden Markov Model with 5 observed states and 5 hidden states
|
155 |
+
where the hidden states have causal relationships resulting in
|
156 |
+
a path results in the following causal network. We check that
|
157 |
+
early states along the path are separated from late state in
|
158 |
+
the path by the d-separator of the middle hidden state.
|
159 |
+
Thus if we condition on the middle hidden state, the early
|
160 |
+
state probabilities are independent of the late state outcomes.
|
161 |
+
|
162 |
+
>>> G = nx.DiGraph()
|
163 |
+
>>> G.add_edges_from(
|
164 |
+
... [
|
165 |
+
... ("H1", "H2"),
|
166 |
+
... ("H2", "H3"),
|
167 |
+
... ("H3", "H4"),
|
168 |
+
... ("H4", "H5"),
|
169 |
+
... ("H1", "O1"),
|
170 |
+
... ("H2", "O2"),
|
171 |
+
... ("H3", "O3"),
|
172 |
+
... ("H4", "O4"),
|
173 |
+
... ("H5", "O5"),
|
174 |
+
... ]
|
175 |
+
... )
|
176 |
+
>>> x, y, z = ({"H1", "O1"}, {"H5", "O5"}, {"H3"})
|
177 |
+
>>> nx.is_d_separator(G, x, y, z)
|
178 |
+
True
|
179 |
+
>>> nx.is_minimal_d_separator(G, x, y, z)
|
180 |
+
True
|
181 |
+
>>> nx.is_minimal_d_separator(G, x, y, z | {"O3"})
|
182 |
+
False
|
183 |
+
>>> z = nx.find_minimal_d_separator(G, x | y, {"O2", "O3", "O4"})
|
184 |
+
>>> z == {"H2", "H4"}
|
185 |
+
True
|
186 |
+
|
187 |
+
If no minimal_d_separator exists, `None` is returned
|
188 |
+
|
189 |
+
>>> other_z = nx.find_minimal_d_separator(G, x | y, {"H2", "H3"})
|
190 |
+
>>> other_z is None
|
191 |
+
True
|
192 |
+
|
193 |
+
|
194 |
+
References
|
195 |
+
----------
|
196 |
+
|
197 |
+
.. [1] Pearl, J. (2009). Causality. Cambridge: Cambridge University Press.
|
198 |
+
|
199 |
+
.. [2] Darwiche, A. (2009). Modeling and reasoning with Bayesian networks.
|
200 |
+
Cambridge: Cambridge University Press.
|
201 |
+
|
202 |
+
.. [3] Shachter, Ross D. "Bayes-ball: The rational pastime (for
|
203 |
+
determining irrelevance and requisite information in belief networks
|
204 |
+
and influence diagrams)." In Proceedings of the Fourteenth Conference
|
205 |
+
on Uncertainty in Artificial Intelligence (UAI), (pp. 480–487). 1998.
|
206 |
+
|
207 |
+
.. [4] Koller, D., & Friedman, N. (2009).
|
208 |
+
Probabilistic graphical models: principles and techniques. The MIT Press.
|
209 |
+
|
210 |
+
.. [5] https://en.wikipedia.org/wiki/Causal_Markov_condition
|
211 |
+
|
212 |
+
.. [6] https://en.wikipedia.org/wiki/Berkson%27s_paradox
|
213 |
+
|
214 |
+
"""
|
215 |
+
|
216 |
+
from collections import deque
|
217 |
+
from itertools import chain
|
218 |
+
|
219 |
+
import networkx as nx
|
220 |
+
from networkx.utils import UnionFind, not_implemented_for
|
221 |
+
|
222 |
+
__all__ = [
|
223 |
+
"is_d_separator",
|
224 |
+
"is_minimal_d_separator",
|
225 |
+
"find_minimal_d_separator",
|
226 |
+
"d_separated",
|
227 |
+
"minimal_d_separator",
|
228 |
+
]
|
229 |
+
|
230 |
+
|
231 |
+
@not_implemented_for("undirected")
|
232 |
+
@nx._dispatchable
|
233 |
+
def is_d_separator(G, x, y, z):
|
234 |
+
"""Return whether node sets `x` and `y` are d-separated by `z`.
|
235 |
+
|
236 |
+
Parameters
|
237 |
+
----------
|
238 |
+
G : nx.DiGraph
|
239 |
+
A NetworkX DAG.
|
240 |
+
|
241 |
+
x : node or set of nodes
|
242 |
+
First node or set of nodes in `G`.
|
243 |
+
|
244 |
+
y : node or set of nodes
|
245 |
+
Second node or set of nodes in `G`.
|
246 |
+
|
247 |
+
z : node or set of nodes
|
248 |
+
Potential separator (set of conditioning nodes in `G`). Can be empty set.
|
249 |
+
|
250 |
+
Returns
|
251 |
+
-------
|
252 |
+
b : bool
|
253 |
+
A boolean that is true if `x` is d-separated from `y` given `z` in `G`.
|
254 |
+
|
255 |
+
Raises
|
256 |
+
------
|
257 |
+
NetworkXError
|
258 |
+
The *d-separation* test is commonly used on disjoint sets of
|
259 |
+
nodes in acyclic directed graphs. Accordingly, the algorithm
|
260 |
+
raises a :exc:`NetworkXError` if the node sets are not
|
261 |
+
disjoint or if the input graph is not a DAG.
|
262 |
+
|
263 |
+
NodeNotFound
|
264 |
+
If any of the input nodes are not found in the graph,
|
265 |
+
a :exc:`NodeNotFound` exception is raised
|
266 |
+
|
267 |
+
Notes
|
268 |
+
-----
|
269 |
+
A d-separating set in a DAG is a set of nodes that
|
270 |
+
blocks all paths between the two sets. Nodes in `z`
|
271 |
+
block a path if they are part of the path and are not a collider,
|
272 |
+
or a descendant of a collider. Also colliders that are not in `z`
|
273 |
+
block a path. A collider structure along a path
|
274 |
+
is ``... -> c <- ...`` where ``c`` is the collider node.
|
275 |
+
|
276 |
+
https://en.wikipedia.org/wiki/Bayesian_network#d-separation
|
277 |
+
"""
|
278 |
+
try:
|
279 |
+
x = {x} if x in G else x
|
280 |
+
y = {y} if y in G else y
|
281 |
+
z = {z} if z in G else z
|
282 |
+
|
283 |
+
intersection = x & y or x & z or y & z
|
284 |
+
if intersection:
|
285 |
+
raise nx.NetworkXError(
|
286 |
+
f"The sets are not disjoint, with intersection {intersection}"
|
287 |
+
)
|
288 |
+
|
289 |
+
set_v = x | y | z
|
290 |
+
if set_v - G.nodes:
|
291 |
+
raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are not found in G")
|
292 |
+
except TypeError:
|
293 |
+
raise nx.NodeNotFound("One of x, y, or z is not a node or a set of nodes in G")
|
294 |
+
|
295 |
+
if not nx.is_directed_acyclic_graph(G):
|
296 |
+
raise nx.NetworkXError("graph should be directed acyclic")
|
297 |
+
|
298 |
+
# contains -> and <-> edges from starting node T
|
299 |
+
forward_deque = deque([])
|
300 |
+
forward_visited = set()
|
301 |
+
|
302 |
+
# contains <- and - edges from starting node T
|
303 |
+
backward_deque = deque(x)
|
304 |
+
backward_visited = set()
|
305 |
+
|
306 |
+
ancestors_or_z = set().union(*[nx.ancestors(G, node) for node in x]) | z | x
|
307 |
+
|
308 |
+
while forward_deque or backward_deque:
|
309 |
+
if backward_deque:
|
310 |
+
node = backward_deque.popleft()
|
311 |
+
backward_visited.add(node)
|
312 |
+
if node in y:
|
313 |
+
return False
|
314 |
+
if node in z:
|
315 |
+
continue
|
316 |
+
|
317 |
+
# add <- edges to backward deque
|
318 |
+
backward_deque.extend(G.pred[node].keys() - backward_visited)
|
319 |
+
# add -> edges to forward deque
|
320 |
+
forward_deque.extend(G.succ[node].keys() - forward_visited)
|
321 |
+
|
322 |
+
if forward_deque:
|
323 |
+
node = forward_deque.popleft()
|
324 |
+
forward_visited.add(node)
|
325 |
+
if node in y:
|
326 |
+
return False
|
327 |
+
|
328 |
+
# Consider if -> node <- is opened due to ancestor of node in z
|
329 |
+
if node in ancestors_or_z:
|
330 |
+
# add <- edges to backward deque
|
331 |
+
backward_deque.extend(G.pred[node].keys() - backward_visited)
|
332 |
+
if node not in z:
|
333 |
+
# add -> edges to forward deque
|
334 |
+
forward_deque.extend(G.succ[node].keys() - forward_visited)
|
335 |
+
|
336 |
+
return True
|
337 |
+
|
338 |
+
|
339 |
+
@not_implemented_for("undirected")
|
340 |
+
@nx._dispatchable
|
341 |
+
def find_minimal_d_separator(G, x, y, *, included=None, restricted=None):
|
342 |
+
"""Returns a minimal d-separating set between `x` and `y` if possible
|
343 |
+
|
344 |
+
A d-separating set in a DAG is a set of nodes that blocks all
|
345 |
+
paths between the two sets of nodes, `x` and `y`. This function
|
346 |
+
constructs a d-separating set that is "minimal", meaning no nodes can
|
347 |
+
be removed without it losing the d-separating property for `x` and `y`.
|
348 |
+
If no d-separating sets exist for `x` and `y`, this returns `None`.
|
349 |
+
|
350 |
+
In a DAG there may be more than one minimal d-separator between two
|
351 |
+
sets of nodes. Minimal d-separators are not always unique. This function
|
352 |
+
returns one minimal d-separator, or `None` if no d-separator exists.
|
353 |
+
|
354 |
+
Uses the algorithm presented in [1]_. The complexity of the algorithm
|
355 |
+
is :math:`O(m)`, where :math:`m` stands for the number of edges in
|
356 |
+
the subgraph of G consisting of only the ancestors of `x` and `y`.
|
357 |
+
For full details, see [1]_.
|
358 |
+
|
359 |
+
Parameters
|
360 |
+
----------
|
361 |
+
G : graph
|
362 |
+
A networkx DAG.
|
363 |
+
x : set | node
|
364 |
+
A node or set of nodes in the graph.
|
365 |
+
y : set | node
|
366 |
+
A node or set of nodes in the graph.
|
367 |
+
included : set | node | None
|
368 |
+
A node or set of nodes which must be included in the found separating set,
|
369 |
+
default is None, which means the empty set.
|
370 |
+
restricted : set | node | None
|
371 |
+
Restricted node or set of nodes to consider. Only these nodes can be in
|
372 |
+
the found separating set, default is None meaning all nodes in ``G``.
|
373 |
+
|
374 |
+
Returns
|
375 |
+
-------
|
376 |
+
z : set | None
|
377 |
+
The minimal d-separating set, if at least one d-separating set exists,
|
378 |
+
otherwise None.
|
379 |
+
|
380 |
+
Raises
|
381 |
+
------
|
382 |
+
NetworkXError
|
383 |
+
Raises a :exc:`NetworkXError` if the input graph is not a DAG
|
384 |
+
or if node sets `x`, `y`, and `included` are not disjoint.
|
385 |
+
|
386 |
+
NodeNotFound
|
387 |
+
If any of the input nodes are not found in the graph,
|
388 |
+
a :exc:`NodeNotFound` exception is raised.
|
389 |
+
|
390 |
+
References
|
391 |
+
----------
|
392 |
+
.. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding
|
393 |
+
minimal d-separators in linear time and applications." In
|
394 |
+
Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020.
|
395 |
+
"""
|
396 |
+
if not nx.is_directed_acyclic_graph(G):
|
397 |
+
raise nx.NetworkXError("graph should be directed acyclic")
|
398 |
+
|
399 |
+
try:
|
400 |
+
x = {x} if x in G else x
|
401 |
+
y = {y} if y in G else y
|
402 |
+
|
403 |
+
if included is None:
|
404 |
+
included = set()
|
405 |
+
elif included in G:
|
406 |
+
included = {included}
|
407 |
+
|
408 |
+
if restricted is None:
|
409 |
+
restricted = set(G)
|
410 |
+
elif restricted in G:
|
411 |
+
restricted = {restricted}
|
412 |
+
|
413 |
+
set_y = x | y | included | restricted
|
414 |
+
if set_y - G.nodes:
|
415 |
+
raise nx.NodeNotFound(f"The node(s) {set_y - G.nodes} are not found in G")
|
416 |
+
except TypeError:
|
417 |
+
raise nx.NodeNotFound(
|
418 |
+
"One of x, y, included or restricted is not a node or set of nodes in G"
|
419 |
+
)
|
420 |
+
|
421 |
+
if not included <= restricted:
|
422 |
+
raise nx.NetworkXError(
|
423 |
+
f"Included nodes {included} must be in restricted nodes {restricted}"
|
424 |
+
)
|
425 |
+
|
426 |
+
intersection = x & y or x & included or y & included
|
427 |
+
if intersection:
|
428 |
+
raise nx.NetworkXError(
|
429 |
+
f"The sets x, y, included are not disjoint. Overlap: {intersection}"
|
430 |
+
)
|
431 |
+
|
432 |
+
nodeset = x | y | included
|
433 |
+
ancestors_x_y_included = nodeset.union(*[nx.ancestors(G, node) for node in nodeset])
|
434 |
+
|
435 |
+
z_init = restricted & (ancestors_x_y_included - (x | y))
|
436 |
+
|
437 |
+
x_closure = _reachable(G, x, ancestors_x_y_included, z_init)
|
438 |
+
if x_closure & y:
|
439 |
+
return None
|
440 |
+
|
441 |
+
z_updated = z_init & (x_closure | included)
|
442 |
+
y_closure = _reachable(G, y, ancestors_x_y_included, z_updated)
|
443 |
+
return z_updated & (y_closure | included)
|
444 |
+
|
445 |
+
|
446 |
+
@not_implemented_for("undirected")
|
447 |
+
@nx._dispatchable
|
448 |
+
def is_minimal_d_separator(G, x, y, z, *, included=None, restricted=None):
|
449 |
+
"""Determine if `z` is a minimal d-separator for `x` and `y`.
|
450 |
+
|
451 |
+
A d-separator, `z`, in a DAG is a set of nodes that blocks
|
452 |
+
all paths from nodes in set `x` to nodes in set `y`.
|
453 |
+
A minimal d-separator is a d-separator `z` such that removing
|
454 |
+
any subset of nodes makes it no longer a d-separator.
|
455 |
+
|
456 |
+
Note: This function checks whether `z` is a d-separator AND is
|
457 |
+
minimal. One can use the function `is_d_separator` to only check if
|
458 |
+
`z` is a d-separator. See examples below.
|
459 |
+
|
460 |
+
Parameters
|
461 |
+
----------
|
462 |
+
G : nx.DiGraph
|
463 |
+
A NetworkX DAG.
|
464 |
+
x : node | set
|
465 |
+
A node or set of nodes in the graph.
|
466 |
+
y : node | set
|
467 |
+
A node or set of nodes in the graph.
|
468 |
+
z : node | set
|
469 |
+
The node or set of nodes to check if it is a minimal d-separating set.
|
470 |
+
The function :func:`is_d_separator` is called inside this function
|
471 |
+
to verify that `z` is in fact a d-separator.
|
472 |
+
included : set | node | None
|
473 |
+
A node or set of nodes which must be included in the found separating set,
|
474 |
+
default is ``None``, which means the empty set.
|
475 |
+
restricted : set | node | None
|
476 |
+
Restricted node or set of nodes to consider. Only these nodes can be in
|
477 |
+
the found separating set, default is ``None`` meaning all nodes in ``G``.
|
478 |
+
|
479 |
+
Returns
|
480 |
+
-------
|
481 |
+
bool
|
482 |
+
Whether or not the set `z` is a minimal d-separator subject to
|
483 |
+
`restricted` nodes and `included` node constraints.
|
484 |
+
|
485 |
+
Examples
|
486 |
+
--------
|
487 |
+
>>> G = nx.path_graph([0, 1, 2, 3], create_using=nx.DiGraph)
|
488 |
+
>>> G.add_node(4)
|
489 |
+
>>> nx.is_minimal_d_separator(G, 0, 2, {1})
|
490 |
+
True
|
491 |
+
>>> # since {1} is the minimal d-separator, {1, 3, 4} is not minimal
|
492 |
+
>>> nx.is_minimal_d_separator(G, 0, 2, {1, 3, 4})
|
493 |
+
False
|
494 |
+
>>> # alternatively, if we only want to check that {1, 3, 4} is a d-separator
|
495 |
+
>>> nx.is_d_separator(G, 0, 2, {1, 3, 4})
|
496 |
+
True
|
497 |
+
|
498 |
+
Raises
|
499 |
+
------
|
500 |
+
NetworkXError
|
501 |
+
Raises a :exc:`NetworkXError` if the input graph is not a DAG.
|
502 |
+
|
503 |
+
NodeNotFound
|
504 |
+
If any of the input nodes are not found in the graph,
|
505 |
+
a :exc:`NodeNotFound` exception is raised.
|
506 |
+
|
507 |
+
References
|
508 |
+
----------
|
509 |
+
.. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding
|
510 |
+
minimal d-separators in linear time and applications." In
|
511 |
+
Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020.
|
512 |
+
|
513 |
+
Notes
|
514 |
+
-----
|
515 |
+
This function works on verifying that a set is minimal and
|
516 |
+
d-separating between two nodes. Uses criterion (a), (b), (c) on
|
517 |
+
page 4 of [1]_. a) closure(`x`) and `y` are disjoint. b) `z` contains
|
518 |
+
all nodes from `included` and is contained in the `restricted`
|
519 |
+
nodes and in the union of ancestors of `x`, `y`, and `included`.
|
520 |
+
c) the nodes in `z` not in `included` are contained in both
|
521 |
+
closure(x) and closure(y). The closure of a set is the set of nodes
|
522 |
+
connected to the set by a directed path in G.
|
523 |
+
|
524 |
+
The complexity is :math:`O(m)`, where :math:`m` stands for the
|
525 |
+
number of edges in the subgraph of G consisting of only the
|
526 |
+
ancestors of `x` and `y`.
|
527 |
+
|
528 |
+
For full details, see [1]_.
|
529 |
+
"""
|
530 |
+
if not nx.is_directed_acyclic_graph(G):
|
531 |
+
raise nx.NetworkXError("graph should be directed acyclic")
|
532 |
+
|
533 |
+
try:
|
534 |
+
x = {x} if x in G else x
|
535 |
+
y = {y} if y in G else y
|
536 |
+
z = {z} if z in G else z
|
537 |
+
|
538 |
+
if included is None:
|
539 |
+
included = set()
|
540 |
+
elif included in G:
|
541 |
+
included = {included}
|
542 |
+
|
543 |
+
if restricted is None:
|
544 |
+
restricted = set(G)
|
545 |
+
elif restricted in G:
|
546 |
+
restricted = {restricted}
|
547 |
+
|
548 |
+
set_y = x | y | included | restricted
|
549 |
+
if set_y - G.nodes:
|
550 |
+
raise nx.NodeNotFound(f"The node(s) {set_y - G.nodes} are not found in G")
|
551 |
+
except TypeError:
|
552 |
+
raise nx.NodeNotFound(
|
553 |
+
"One of x, y, z, included or restricted is not a node or set of nodes in G"
|
554 |
+
)
|
555 |
+
|
556 |
+
if not included <= z:
|
557 |
+
raise nx.NetworkXError(
|
558 |
+
f"Included nodes {included} must be in proposed separating set z {x}"
|
559 |
+
)
|
560 |
+
if not z <= restricted:
|
561 |
+
raise nx.NetworkXError(
|
562 |
+
f"Separating set {z} must be contained in restricted set {restricted}"
|
563 |
+
)
|
564 |
+
|
565 |
+
intersection = x.intersection(y) or x.intersection(z) or y.intersection(z)
|
566 |
+
if intersection:
|
567 |
+
raise nx.NetworkXError(
|
568 |
+
f"The sets are not disjoint, with intersection {intersection}"
|
569 |
+
)
|
570 |
+
|
571 |
+
nodeset = x | y | included
|
572 |
+
ancestors_x_y_included = nodeset.union(*[nx.ancestors(G, n) for n in nodeset])
|
573 |
+
|
574 |
+
# criterion (a) -- check that z is actually a separator
|
575 |
+
x_closure = _reachable(G, x, ancestors_x_y_included, z)
|
576 |
+
if x_closure & y:
|
577 |
+
return False
|
578 |
+
|
579 |
+
# criterion (b) -- basic constraint; included and restricted already checked above
|
580 |
+
if not (z <= ancestors_x_y_included):
|
581 |
+
return False
|
582 |
+
|
583 |
+
# criterion (c) -- check that z is minimal
|
584 |
+
y_closure = _reachable(G, y, ancestors_x_y_included, z)
|
585 |
+
if not ((z - included) <= (x_closure & y_closure)):
|
586 |
+
return False
|
587 |
+
return True
|
588 |
+
|
589 |
+
|
590 |
+
@not_implemented_for("undirected")
|
591 |
+
def _reachable(G, x, a, z):
|
592 |
+
"""Modified Bayes-Ball algorithm for finding d-connected nodes.
|
593 |
+
|
594 |
+
Find all nodes in `a` that are d-connected to those in `x` by
|
595 |
+
those in `z`. This is an implementation of the function
|
596 |
+
`REACHABLE` in [1]_ (which is itself a modification of the
|
597 |
+
Bayes-Ball algorithm [2]_) when restricted to DAGs.
|
598 |
+
|
599 |
+
Parameters
|
600 |
+
----------
|
601 |
+
G : nx.DiGraph
|
602 |
+
A NetworkX DAG.
|
603 |
+
x : node | set
|
604 |
+
A node in the DAG, or a set of nodes.
|
605 |
+
a : node | set
|
606 |
+
A (set of) node(s) in the DAG containing the ancestors of `x`.
|
607 |
+
z : node | set
|
608 |
+
The node or set of nodes conditioned on when checking d-connectedness.
|
609 |
+
|
610 |
+
Returns
|
611 |
+
-------
|
612 |
+
w : set
|
613 |
+
The closure of `x` in `a` with respect to d-connectedness
|
614 |
+
given `z`.
|
615 |
+
|
616 |
+
References
|
617 |
+
----------
|
618 |
+
.. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding
|
619 |
+
minimal d-separators in linear time and applications." In
|
620 |
+
Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020.
|
621 |
+
|
622 |
+
.. [2] Shachter, Ross D. "Bayes-ball: The rational pastime
|
623 |
+
(for determining irrelevance and requisite information in
|
624 |
+
belief networks and influence diagrams)." In Proceedings of the
|
625 |
+
Fourteenth Conference on Uncertainty in Artificial Intelligence
|
626 |
+
(UAI), (pp. 480–487). 1998.
|
627 |
+
"""
|
628 |
+
|
629 |
+
def _pass(e, v, f, n):
|
630 |
+
"""Whether a ball entering node `v` along edge `e` passes to `n` along `f`.
|
631 |
+
|
632 |
+
Boolean function defined on page 6 of [1]_.
|
633 |
+
|
634 |
+
Parameters
|
635 |
+
----------
|
636 |
+
e : bool
|
637 |
+
Directed edge by which the ball got to node `v`; `True` iff directed into `v`.
|
638 |
+
v : node
|
639 |
+
Node where the ball is.
|
640 |
+
f : bool
|
641 |
+
Directed edge connecting nodes `v` and `n`; `True` iff directed `n`.
|
642 |
+
n : node
|
643 |
+
Checking whether the ball passes to this node.
|
644 |
+
|
645 |
+
Returns
|
646 |
+
-------
|
647 |
+
b : bool
|
648 |
+
Whether the ball passes or not.
|
649 |
+
|
650 |
+
References
|
651 |
+
----------
|
652 |
+
.. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding
|
653 |
+
minimal d-separators in linear time and applications." In
|
654 |
+
Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020.
|
655 |
+
"""
|
656 |
+
is_element_of_A = n in a
|
657 |
+
# almost_definite_status = True # always true for DAGs; not so for RCGs
|
658 |
+
collider_if_in_Z = v not in z or (e and not f)
|
659 |
+
return is_element_of_A and collider_if_in_Z # and almost_definite_status
|
660 |
+
|
661 |
+
queue = deque([])
|
662 |
+
for node in x:
|
663 |
+
if bool(G.pred[node]):
|
664 |
+
queue.append((True, node))
|
665 |
+
if bool(G.succ[node]):
|
666 |
+
queue.append((False, node))
|
667 |
+
processed = queue.copy()
|
668 |
+
|
669 |
+
while any(queue):
|
670 |
+
e, v = queue.popleft()
|
671 |
+
preds = ((False, n) for n in G.pred[v])
|
672 |
+
succs = ((True, n) for n in G.succ[v])
|
673 |
+
f_n_pairs = chain(preds, succs)
|
674 |
+
for f, n in f_n_pairs:
|
675 |
+
if (f, n) not in processed and _pass(e, v, f, n):
|
676 |
+
queue.append((f, n))
|
677 |
+
processed.append((f, n))
|
678 |
+
|
679 |
+
return {w for (_, w) in processed}
|
680 |
+
|
681 |
+
|
682 |
+
# Deprecated functions:
|
683 |
+
def d_separated(G, x, y, z):
|
684 |
+
"""Return whether nodes sets ``x`` and ``y`` are d-separated by ``z``.
|
685 |
+
|
686 |
+
.. deprecated:: 3.3
|
687 |
+
|
688 |
+
This function is deprecated and will be removed in NetworkX v3.5.
|
689 |
+
Please use `is_d_separator(G, x, y, z)`.
|
690 |
+
|
691 |
+
"""
|
692 |
+
import warnings
|
693 |
+
|
694 |
+
warnings.warn(
|
695 |
+
"d_separated is deprecated and will be removed in NetworkX v3.5."
|
696 |
+
"Please use `is_d_separator(G, x, y, z)`.",
|
697 |
+
category=DeprecationWarning,
|
698 |
+
stacklevel=2,
|
699 |
+
)
|
700 |
+
return nx.is_d_separator(G, x, y, z)
|
701 |
+
|
702 |
+
|
703 |
+
def minimal_d_separator(G, u, v):
|
704 |
+
"""Returns a minimal_d-separating set between `x` and `y` if possible
|
705 |
+
|
706 |
+
.. deprecated:: 3.3
|
707 |
+
|
708 |
+
minimal_d_separator is deprecated and will be removed in NetworkX v3.5.
|
709 |
+
Please use `find_minimal_d_separator(G, x, y)`.
|
710 |
+
|
711 |
+
"""
|
712 |
+
import warnings
|
713 |
+
|
714 |
+
warnings.warn(
|
715 |
+
(
|
716 |
+
"This function is deprecated and will be removed in NetworkX v3.5."
|
717 |
+
"Please use `is_d_separator(G, x, y)`."
|
718 |
+
),
|
719 |
+
category=DeprecationWarning,
|
720 |
+
stacklevel=2,
|
721 |
+
)
|
722 |
+
return nx.find_minimal_d_separator(G, u, v)
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/distance_measures.py
ADDED
@@ -0,0 +1,951 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Graph diameter, radius, eccentricity and other properties."""
|
2 |
+
|
3 |
+
import networkx as nx
|
4 |
+
from networkx.utils import not_implemented_for
|
5 |
+
|
6 |
+
__all__ = [
|
7 |
+
"eccentricity",
|
8 |
+
"diameter",
|
9 |
+
"radius",
|
10 |
+
"periphery",
|
11 |
+
"center",
|
12 |
+
"barycenter",
|
13 |
+
"resistance_distance",
|
14 |
+
"kemeny_constant",
|
15 |
+
"effective_graph_resistance",
|
16 |
+
]
|
17 |
+
|
18 |
+
|
19 |
+
def _extrema_bounding(G, compute="diameter", weight=None):
|
20 |
+
"""Compute requested extreme distance metric of undirected graph G
|
21 |
+
|
22 |
+
Computation is based on smart lower and upper bounds, and in practice
|
23 |
+
linear in the number of nodes, rather than quadratic (except for some
|
24 |
+
border cases such as complete graphs or circle shaped graphs).
|
25 |
+
|
26 |
+
Parameters
|
27 |
+
----------
|
28 |
+
G : NetworkX graph
|
29 |
+
An undirected graph
|
30 |
+
|
31 |
+
compute : string denoting the requesting metric
|
32 |
+
"diameter" for the maximal eccentricity value,
|
33 |
+
"radius" for the minimal eccentricity value,
|
34 |
+
"periphery" for the set of nodes with eccentricity equal to the diameter,
|
35 |
+
"center" for the set of nodes with eccentricity equal to the radius,
|
36 |
+
"eccentricities" for the maximum distance from each node to all other nodes in G
|
37 |
+
|
38 |
+
weight : string, function, or None
|
39 |
+
If this is a string, then edge weights will be accessed via the
|
40 |
+
edge attribute with this key (that is, the weight of the edge
|
41 |
+
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
|
42 |
+
such edge attribute exists, the weight of the edge is assumed to
|
43 |
+
be one.
|
44 |
+
|
45 |
+
If this is a function, the weight of an edge is the value
|
46 |
+
returned by the function. The function must accept exactly three
|
47 |
+
positional arguments: the two endpoints of an edge and the
|
48 |
+
dictionary of edge attributes for that edge. The function must
|
49 |
+
return a number.
|
50 |
+
|
51 |
+
If this is None, every edge has weight/distance/cost 1.
|
52 |
+
|
53 |
+
Weights stored as floating point values can lead to small round-off
|
54 |
+
errors in distances. Use integer weights to avoid this.
|
55 |
+
|
56 |
+
Weights should be positive, since they are distances.
|
57 |
+
|
58 |
+
Returns
|
59 |
+
-------
|
60 |
+
value : value of the requested metric
|
61 |
+
int for "diameter" and "radius" or
|
62 |
+
list of nodes for "center" and "periphery" or
|
63 |
+
dictionary of eccentricity values keyed by node for "eccentricities"
|
64 |
+
|
65 |
+
Raises
|
66 |
+
------
|
67 |
+
NetworkXError
|
68 |
+
If the graph consists of multiple components
|
69 |
+
ValueError
|
70 |
+
If `compute` is not one of "diameter", "radius", "periphery", "center", or "eccentricities".
|
71 |
+
|
72 |
+
Notes
|
73 |
+
-----
|
74 |
+
This algorithm was proposed in [1]_ and discussed further in [2]_ and [3]_.
|
75 |
+
|
76 |
+
References
|
77 |
+
----------
|
78 |
+
.. [1] F. W. Takes, W. A. Kosters,
|
79 |
+
"Determining the diameter of small world networks."
|
80 |
+
Proceedings of the 20th ACM international conference on Information and knowledge management, 2011
|
81 |
+
https://dl.acm.org/doi/abs/10.1145/2063576.2063748
|
82 |
+
.. [2] F. W. Takes, W. A. Kosters,
|
83 |
+
"Computing the Eccentricity Distribution of Large Graphs."
|
84 |
+
Algorithms, 2013
|
85 |
+
https://www.mdpi.com/1999-4893/6/1/100
|
86 |
+
.. [3] M. Borassi, P. Crescenzi, M. Habib, W. A. Kosters, A. Marino, F. W. Takes,
|
87 |
+
"Fast diameter and radius BFS-based computation in (weakly connected) real-world graphs: With an application to the six degrees of separation games. "
|
88 |
+
Theoretical Computer Science, 2015
|
89 |
+
https://www.sciencedirect.com/science/article/pii/S0304397515001644
|
90 |
+
"""
|
91 |
+
# init variables
|
92 |
+
degrees = dict(G.degree()) # start with the highest degree node
|
93 |
+
minlowernode = max(degrees, key=degrees.get)
|
94 |
+
N = len(degrees) # number of nodes
|
95 |
+
# alternate between smallest lower and largest upper bound
|
96 |
+
high = False
|
97 |
+
# status variables
|
98 |
+
ecc_lower = dict.fromkeys(G, 0)
|
99 |
+
ecc_upper = dict.fromkeys(G, N)
|
100 |
+
candidates = set(G)
|
101 |
+
|
102 |
+
# (re)set bound extremes
|
103 |
+
minlower = N
|
104 |
+
maxlower = 0
|
105 |
+
minupper = N
|
106 |
+
maxupper = 0
|
107 |
+
|
108 |
+
# repeat the following until there are no more candidates
|
109 |
+
while candidates:
|
110 |
+
if high:
|
111 |
+
current = maxuppernode # select node with largest upper bound
|
112 |
+
else:
|
113 |
+
current = minlowernode # select node with smallest lower bound
|
114 |
+
high = not high
|
115 |
+
|
116 |
+
# get distances from/to current node and derive eccentricity
|
117 |
+
dist = nx.shortest_path_length(G, source=current, weight=weight)
|
118 |
+
|
119 |
+
if len(dist) != N:
|
120 |
+
msg = "Cannot compute metric because graph is not connected."
|
121 |
+
raise nx.NetworkXError(msg)
|
122 |
+
current_ecc = max(dist.values())
|
123 |
+
|
124 |
+
# print status update
|
125 |
+
# print ("ecc of " + str(current) + " (" + str(ecc_lower[current]) + "/"
|
126 |
+
# + str(ecc_upper[current]) + ", deg: " + str(dist[current]) + ") is "
|
127 |
+
# + str(current_ecc))
|
128 |
+
# print(ecc_upper)
|
129 |
+
|
130 |
+
# (re)set bound extremes
|
131 |
+
maxuppernode = None
|
132 |
+
minlowernode = None
|
133 |
+
|
134 |
+
# update node bounds
|
135 |
+
for i in candidates:
|
136 |
+
# update eccentricity bounds
|
137 |
+
d = dist[i]
|
138 |
+
ecc_lower[i] = low = max(ecc_lower[i], max(d, (current_ecc - d)))
|
139 |
+
ecc_upper[i] = upp = min(ecc_upper[i], current_ecc + d)
|
140 |
+
|
141 |
+
# update min/max values of lower and upper bounds
|
142 |
+
minlower = min(ecc_lower[i], minlower)
|
143 |
+
maxlower = max(ecc_lower[i], maxlower)
|
144 |
+
minupper = min(ecc_upper[i], minupper)
|
145 |
+
maxupper = max(ecc_upper[i], maxupper)
|
146 |
+
|
147 |
+
# update candidate set
|
148 |
+
if compute == "diameter":
|
149 |
+
ruled_out = {
|
150 |
+
i
|
151 |
+
for i in candidates
|
152 |
+
if ecc_upper[i] <= maxlower and 2 * ecc_lower[i] >= maxupper
|
153 |
+
}
|
154 |
+
elif compute == "radius":
|
155 |
+
ruled_out = {
|
156 |
+
i
|
157 |
+
for i in candidates
|
158 |
+
if ecc_lower[i] >= minupper and ecc_upper[i] + 1 <= 2 * minlower
|
159 |
+
}
|
160 |
+
elif compute == "periphery":
|
161 |
+
ruled_out = {
|
162 |
+
i
|
163 |
+
for i in candidates
|
164 |
+
if ecc_upper[i] < maxlower
|
165 |
+
and (maxlower == maxupper or ecc_lower[i] > maxupper)
|
166 |
+
}
|
167 |
+
elif compute == "center":
|
168 |
+
ruled_out = {
|
169 |
+
i
|
170 |
+
for i in candidates
|
171 |
+
if ecc_lower[i] > minupper
|
172 |
+
and (minlower == minupper or ecc_upper[i] + 1 < 2 * minlower)
|
173 |
+
}
|
174 |
+
elif compute == "eccentricities":
|
175 |
+
ruled_out = set()
|
176 |
+
else:
|
177 |
+
msg = "compute must be one of 'diameter', 'radius', 'periphery', 'center', 'eccentricities'"
|
178 |
+
raise ValueError(msg)
|
179 |
+
|
180 |
+
ruled_out.update(i for i in candidates if ecc_lower[i] == ecc_upper[i])
|
181 |
+
candidates -= ruled_out
|
182 |
+
|
183 |
+
# for i in ruled_out:
|
184 |
+
# print("removing %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"%
|
185 |
+
# (i,ecc_upper[i],maxlower,ecc_lower[i],maxupper))
|
186 |
+
# print("node %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"%
|
187 |
+
# (4,ecc_upper[4],maxlower,ecc_lower[4],maxupper))
|
188 |
+
# print("NODE 4: %g"%(ecc_upper[4] <= maxlower))
|
189 |
+
# print("NODE 4: %g"%(2 * ecc_lower[4] >= maxupper))
|
190 |
+
# print("NODE 4: %g"%(ecc_upper[4] <= maxlower
|
191 |
+
# and 2 * ecc_lower[4] >= maxupper))
|
192 |
+
|
193 |
+
# updating maxuppernode and minlowernode for selection in next round
|
194 |
+
for i in candidates:
|
195 |
+
if (
|
196 |
+
minlowernode is None
|
197 |
+
or (
|
198 |
+
ecc_lower[i] == ecc_lower[minlowernode]
|
199 |
+
and degrees[i] > degrees[minlowernode]
|
200 |
+
)
|
201 |
+
or (ecc_lower[i] < ecc_lower[minlowernode])
|
202 |
+
):
|
203 |
+
minlowernode = i
|
204 |
+
|
205 |
+
if (
|
206 |
+
maxuppernode is None
|
207 |
+
or (
|
208 |
+
ecc_upper[i] == ecc_upper[maxuppernode]
|
209 |
+
and degrees[i] > degrees[maxuppernode]
|
210 |
+
)
|
211 |
+
or (ecc_upper[i] > ecc_upper[maxuppernode])
|
212 |
+
):
|
213 |
+
maxuppernode = i
|
214 |
+
|
215 |
+
# print status update
|
216 |
+
# print (" min=" + str(minlower) + "/" + str(minupper) +
|
217 |
+
# " max=" + str(maxlower) + "/" + str(maxupper) +
|
218 |
+
# " candidates: " + str(len(candidates)))
|
219 |
+
# print("cand:",candidates)
|
220 |
+
# print("ecc_l",ecc_lower)
|
221 |
+
# print("ecc_u",ecc_upper)
|
222 |
+
# wait = input("press Enter to continue")
|
223 |
+
|
224 |
+
# return the correct value of the requested metric
|
225 |
+
if compute == "diameter":
|
226 |
+
return maxlower
|
227 |
+
if compute == "radius":
|
228 |
+
return minupper
|
229 |
+
if compute == "periphery":
|
230 |
+
p = [v for v in G if ecc_lower[v] == maxlower]
|
231 |
+
return p
|
232 |
+
if compute == "center":
|
233 |
+
c = [v for v in G if ecc_upper[v] == minupper]
|
234 |
+
return c
|
235 |
+
if compute == "eccentricities":
|
236 |
+
return ecc_lower
|
237 |
+
return None
|
238 |
+
|
239 |
+
|
240 |
+
@nx._dispatchable(edge_attrs="weight")
|
241 |
+
def eccentricity(G, v=None, sp=None, weight=None):
|
242 |
+
"""Returns the eccentricity of nodes in G.
|
243 |
+
|
244 |
+
The eccentricity of a node v is the maximum distance from v to
|
245 |
+
all other nodes in G.
|
246 |
+
|
247 |
+
Parameters
|
248 |
+
----------
|
249 |
+
G : NetworkX graph
|
250 |
+
A graph
|
251 |
+
|
252 |
+
v : node, optional
|
253 |
+
Return value of specified node
|
254 |
+
|
255 |
+
sp : dict of dicts, optional
|
256 |
+
All pairs shortest path lengths as a dictionary of dictionaries
|
257 |
+
|
258 |
+
weight : string, function, or None (default=None)
|
259 |
+
If this is a string, then edge weights will be accessed via the
|
260 |
+
edge attribute with this key (that is, the weight of the edge
|
261 |
+
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
|
262 |
+
such edge attribute exists, the weight of the edge is assumed to
|
263 |
+
be one.
|
264 |
+
|
265 |
+
If this is a function, the weight of an edge is the value
|
266 |
+
returned by the function. The function must accept exactly three
|
267 |
+
positional arguments: the two endpoints of an edge and the
|
268 |
+
dictionary of edge attributes for that edge. The function must
|
269 |
+
return a number.
|
270 |
+
|
271 |
+
If this is None, every edge has weight/distance/cost 1.
|
272 |
+
|
273 |
+
Weights stored as floating point values can lead to small round-off
|
274 |
+
errors in distances. Use integer weights to avoid this.
|
275 |
+
|
276 |
+
Weights should be positive, since they are distances.
|
277 |
+
|
278 |
+
Returns
|
279 |
+
-------
|
280 |
+
ecc : dictionary
|
281 |
+
A dictionary of eccentricity values keyed by node.
|
282 |
+
|
283 |
+
Examples
|
284 |
+
--------
|
285 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
|
286 |
+
>>> dict(nx.eccentricity(G))
|
287 |
+
{1: 2, 2: 3, 3: 2, 4: 2, 5: 3}
|
288 |
+
|
289 |
+
>>> dict(nx.eccentricity(G, v=[1, 5])) # This returns the eccentricity of node 1 & 5
|
290 |
+
{1: 2, 5: 3}
|
291 |
+
|
292 |
+
"""
|
293 |
+
# if v is None: # none, use entire graph
|
294 |
+
# nodes=G.nodes()
|
295 |
+
# elif v in G: # is v a single node
|
296 |
+
# nodes=[v]
|
297 |
+
# else: # assume v is a container of nodes
|
298 |
+
# nodes=v
|
299 |
+
order = G.order()
|
300 |
+
e = {}
|
301 |
+
for n in G.nbunch_iter(v):
|
302 |
+
if sp is None:
|
303 |
+
length = nx.shortest_path_length(G, source=n, weight=weight)
|
304 |
+
|
305 |
+
L = len(length)
|
306 |
+
else:
|
307 |
+
try:
|
308 |
+
length = sp[n]
|
309 |
+
L = len(length)
|
310 |
+
except TypeError as err:
|
311 |
+
raise nx.NetworkXError('Format of "sp" is invalid.') from err
|
312 |
+
if L != order:
|
313 |
+
if G.is_directed():
|
314 |
+
msg = (
|
315 |
+
"Found infinite path length because the digraph is not"
|
316 |
+
" strongly connected"
|
317 |
+
)
|
318 |
+
else:
|
319 |
+
msg = "Found infinite path length because the graph is not" " connected"
|
320 |
+
raise nx.NetworkXError(msg)
|
321 |
+
|
322 |
+
e[n] = max(length.values())
|
323 |
+
|
324 |
+
if v in G:
|
325 |
+
return e[v] # return single value
|
326 |
+
return e
|
327 |
+
|
328 |
+
|
329 |
+
@nx._dispatchable(edge_attrs="weight")
|
330 |
+
def diameter(G, e=None, usebounds=False, weight=None):
|
331 |
+
"""Returns the diameter of the graph G.
|
332 |
+
|
333 |
+
The diameter is the maximum eccentricity.
|
334 |
+
|
335 |
+
Parameters
|
336 |
+
----------
|
337 |
+
G : NetworkX graph
|
338 |
+
A graph
|
339 |
+
|
340 |
+
e : eccentricity dictionary, optional
|
341 |
+
A precomputed dictionary of eccentricities.
|
342 |
+
|
343 |
+
weight : string, function, or None
|
344 |
+
If this is a string, then edge weights will be accessed via the
|
345 |
+
edge attribute with this key (that is, the weight of the edge
|
346 |
+
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
|
347 |
+
such edge attribute exists, the weight of the edge is assumed to
|
348 |
+
be one.
|
349 |
+
|
350 |
+
If this is a function, the weight of an edge is the value
|
351 |
+
returned by the function. The function must accept exactly three
|
352 |
+
positional arguments: the two endpoints of an edge and the
|
353 |
+
dictionary of edge attributes for that edge. The function must
|
354 |
+
return a number.
|
355 |
+
|
356 |
+
If this is None, every edge has weight/distance/cost 1.
|
357 |
+
|
358 |
+
Weights stored as floating point values can lead to small round-off
|
359 |
+
errors in distances. Use integer weights to avoid this.
|
360 |
+
|
361 |
+
Weights should be positive, since they are distances.
|
362 |
+
|
363 |
+
Returns
|
364 |
+
-------
|
365 |
+
d : integer
|
366 |
+
Diameter of graph
|
367 |
+
|
368 |
+
Examples
|
369 |
+
--------
|
370 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
|
371 |
+
>>> nx.diameter(G)
|
372 |
+
3
|
373 |
+
|
374 |
+
See Also
|
375 |
+
--------
|
376 |
+
eccentricity
|
377 |
+
"""
|
378 |
+
if usebounds is True and e is None and not G.is_directed():
|
379 |
+
return _extrema_bounding(G, compute="diameter", weight=weight)
|
380 |
+
if e is None:
|
381 |
+
e = eccentricity(G, weight=weight)
|
382 |
+
return max(e.values())
|
383 |
+
|
384 |
+
|
385 |
+
@nx._dispatchable(edge_attrs="weight")
|
386 |
+
def periphery(G, e=None, usebounds=False, weight=None):
|
387 |
+
"""Returns the periphery of the graph G.
|
388 |
+
|
389 |
+
The periphery is the set of nodes with eccentricity equal to the diameter.
|
390 |
+
|
391 |
+
Parameters
|
392 |
+
----------
|
393 |
+
G : NetworkX graph
|
394 |
+
A graph
|
395 |
+
|
396 |
+
e : eccentricity dictionary, optional
|
397 |
+
A precomputed dictionary of eccentricities.
|
398 |
+
|
399 |
+
weight : string, function, or None
|
400 |
+
If this is a string, then edge weights will be accessed via the
|
401 |
+
edge attribute with this key (that is, the weight of the edge
|
402 |
+
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
|
403 |
+
such edge attribute exists, the weight of the edge is assumed to
|
404 |
+
be one.
|
405 |
+
|
406 |
+
If this is a function, the weight of an edge is the value
|
407 |
+
returned by the function. The function must accept exactly three
|
408 |
+
positional arguments: the two endpoints of an edge and the
|
409 |
+
dictionary of edge attributes for that edge. The function must
|
410 |
+
return a number.
|
411 |
+
|
412 |
+
If this is None, every edge has weight/distance/cost 1.
|
413 |
+
|
414 |
+
Weights stored as floating point values can lead to small round-off
|
415 |
+
errors in distances. Use integer weights to avoid this.
|
416 |
+
|
417 |
+
Weights should be positive, since they are distances.
|
418 |
+
|
419 |
+
Returns
|
420 |
+
-------
|
421 |
+
p : list
|
422 |
+
List of nodes in periphery
|
423 |
+
|
424 |
+
Examples
|
425 |
+
--------
|
426 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
|
427 |
+
>>> nx.periphery(G)
|
428 |
+
[2, 5]
|
429 |
+
|
430 |
+
See Also
|
431 |
+
--------
|
432 |
+
barycenter
|
433 |
+
center
|
434 |
+
"""
|
435 |
+
if usebounds is True and e is None and not G.is_directed():
|
436 |
+
return _extrema_bounding(G, compute="periphery", weight=weight)
|
437 |
+
if e is None:
|
438 |
+
e = eccentricity(G, weight=weight)
|
439 |
+
diameter = max(e.values())
|
440 |
+
p = [v for v in e if e[v] == diameter]
|
441 |
+
return p
|
442 |
+
|
443 |
+
|
444 |
+
@nx._dispatchable(edge_attrs="weight")
|
445 |
+
def radius(G, e=None, usebounds=False, weight=None):
|
446 |
+
"""Returns the radius of the graph G.
|
447 |
+
|
448 |
+
The radius is the minimum eccentricity.
|
449 |
+
|
450 |
+
Parameters
|
451 |
+
----------
|
452 |
+
G : NetworkX graph
|
453 |
+
A graph
|
454 |
+
|
455 |
+
e : eccentricity dictionary, optional
|
456 |
+
A precomputed dictionary of eccentricities.
|
457 |
+
|
458 |
+
weight : string, function, or None
|
459 |
+
If this is a string, then edge weights will be accessed via the
|
460 |
+
edge attribute with this key (that is, the weight of the edge
|
461 |
+
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
|
462 |
+
such edge attribute exists, the weight of the edge is assumed to
|
463 |
+
be one.
|
464 |
+
|
465 |
+
If this is a function, the weight of an edge is the value
|
466 |
+
returned by the function. The function must accept exactly three
|
467 |
+
positional arguments: the two endpoints of an edge and the
|
468 |
+
dictionary of edge attributes for that edge. The function must
|
469 |
+
return a number.
|
470 |
+
|
471 |
+
If this is None, every edge has weight/distance/cost 1.
|
472 |
+
|
473 |
+
Weights stored as floating point values can lead to small round-off
|
474 |
+
errors in distances. Use integer weights to avoid this.
|
475 |
+
|
476 |
+
Weights should be positive, since they are distances.
|
477 |
+
|
478 |
+
Returns
|
479 |
+
-------
|
480 |
+
r : integer
|
481 |
+
Radius of graph
|
482 |
+
|
483 |
+
Examples
|
484 |
+
--------
|
485 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
|
486 |
+
>>> nx.radius(G)
|
487 |
+
2
|
488 |
+
|
489 |
+
"""
|
490 |
+
if usebounds is True and e is None and not G.is_directed():
|
491 |
+
return _extrema_bounding(G, compute="radius", weight=weight)
|
492 |
+
if e is None:
|
493 |
+
e = eccentricity(G, weight=weight)
|
494 |
+
return min(e.values())
|
495 |
+
|
496 |
+
|
497 |
+
@nx._dispatchable(edge_attrs="weight")
|
498 |
+
def center(G, e=None, usebounds=False, weight=None):
|
499 |
+
"""Returns the center of the graph G.
|
500 |
+
|
501 |
+
The center is the set of nodes with eccentricity equal to radius.
|
502 |
+
|
503 |
+
Parameters
|
504 |
+
----------
|
505 |
+
G : NetworkX graph
|
506 |
+
A graph
|
507 |
+
|
508 |
+
e : eccentricity dictionary, optional
|
509 |
+
A precomputed dictionary of eccentricities.
|
510 |
+
|
511 |
+
weight : string, function, or None
|
512 |
+
If this is a string, then edge weights will be accessed via the
|
513 |
+
edge attribute with this key (that is, the weight of the edge
|
514 |
+
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
|
515 |
+
such edge attribute exists, the weight of the edge is assumed to
|
516 |
+
be one.
|
517 |
+
|
518 |
+
If this is a function, the weight of an edge is the value
|
519 |
+
returned by the function. The function must accept exactly three
|
520 |
+
positional arguments: the two endpoints of an edge and the
|
521 |
+
dictionary of edge attributes for that edge. The function must
|
522 |
+
return a number.
|
523 |
+
|
524 |
+
If this is None, every edge has weight/distance/cost 1.
|
525 |
+
|
526 |
+
Weights stored as floating point values can lead to small round-off
|
527 |
+
errors in distances. Use integer weights to avoid this.
|
528 |
+
|
529 |
+
Weights should be positive, since they are distances.
|
530 |
+
|
531 |
+
Returns
|
532 |
+
-------
|
533 |
+
c : list
|
534 |
+
List of nodes in center
|
535 |
+
|
536 |
+
Examples
|
537 |
+
--------
|
538 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
|
539 |
+
>>> list(nx.center(G))
|
540 |
+
[1, 3, 4]
|
541 |
+
|
542 |
+
See Also
|
543 |
+
--------
|
544 |
+
barycenter
|
545 |
+
periphery
|
546 |
+
"""
|
547 |
+
if usebounds is True and e is None and not G.is_directed():
|
548 |
+
return _extrema_bounding(G, compute="center", weight=weight)
|
549 |
+
if e is None:
|
550 |
+
e = eccentricity(G, weight=weight)
|
551 |
+
radius = min(e.values())
|
552 |
+
p = [v for v in e if e[v] == radius]
|
553 |
+
return p
|
554 |
+
|
555 |
+
|
556 |
+
@nx._dispatchable(edge_attrs="weight", mutates_input={"attr": 2})
|
557 |
+
def barycenter(G, weight=None, attr=None, sp=None):
|
558 |
+
r"""Calculate barycenter of a connected graph, optionally with edge weights.
|
559 |
+
|
560 |
+
The :dfn:`barycenter` a
|
561 |
+
:func:`connected <networkx.algorithms.components.is_connected>` graph
|
562 |
+
:math:`G` is the subgraph induced by the set of its nodes :math:`v`
|
563 |
+
minimizing the objective function
|
564 |
+
|
565 |
+
.. math::
|
566 |
+
|
567 |
+
\sum_{u \in V(G)} d_G(u, v),
|
568 |
+
|
569 |
+
where :math:`d_G` is the (possibly weighted) :func:`path length
|
570 |
+
<networkx.algorithms.shortest_paths.generic.shortest_path_length>`.
|
571 |
+
The barycenter is also called the :dfn:`median`. See [West01]_, p. 78.
|
572 |
+
|
573 |
+
Parameters
|
574 |
+
----------
|
575 |
+
G : :class:`networkx.Graph`
|
576 |
+
The connected graph :math:`G`.
|
577 |
+
weight : :class:`str`, optional
|
578 |
+
Passed through to
|
579 |
+
:func:`~networkx.algorithms.shortest_paths.generic.shortest_path_length`.
|
580 |
+
attr : :class:`str`, optional
|
581 |
+
If given, write the value of the objective function to each node's
|
582 |
+
`attr` attribute. Otherwise do not store the value.
|
583 |
+
sp : dict of dicts, optional
|
584 |
+
All pairs shortest path lengths as a dictionary of dictionaries
|
585 |
+
|
586 |
+
Returns
|
587 |
+
-------
|
588 |
+
list
|
589 |
+
Nodes of `G` that induce the barycenter of `G`.
|
590 |
+
|
591 |
+
Raises
|
592 |
+
------
|
593 |
+
NetworkXNoPath
|
594 |
+
If `G` is disconnected. `G` may appear disconnected to
|
595 |
+
:func:`barycenter` if `sp` is given but is missing shortest path
|
596 |
+
lengths for any pairs.
|
597 |
+
ValueError
|
598 |
+
If `sp` and `weight` are both given.
|
599 |
+
|
600 |
+
Examples
|
601 |
+
--------
|
602 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
|
603 |
+
>>> nx.barycenter(G)
|
604 |
+
[1, 3, 4]
|
605 |
+
|
606 |
+
See Also
|
607 |
+
--------
|
608 |
+
center
|
609 |
+
periphery
|
610 |
+
"""
|
611 |
+
if sp is None:
|
612 |
+
sp = nx.shortest_path_length(G, weight=weight)
|
613 |
+
else:
|
614 |
+
sp = sp.items()
|
615 |
+
if weight is not None:
|
616 |
+
raise ValueError("Cannot use both sp, weight arguments together")
|
617 |
+
smallest, barycenter_vertices, n = float("inf"), [], len(G)
|
618 |
+
for v, dists in sp:
|
619 |
+
if len(dists) < n:
|
620 |
+
raise nx.NetworkXNoPath(
|
621 |
+
f"Input graph {G} is disconnected, so every induced subgraph "
|
622 |
+
"has infinite barycentricity."
|
623 |
+
)
|
624 |
+
barycentricity = sum(dists.values())
|
625 |
+
if attr is not None:
|
626 |
+
G.nodes[v][attr] = barycentricity
|
627 |
+
if barycentricity < smallest:
|
628 |
+
smallest = barycentricity
|
629 |
+
barycenter_vertices = [v]
|
630 |
+
elif barycentricity == smallest:
|
631 |
+
barycenter_vertices.append(v)
|
632 |
+
if attr is not None:
|
633 |
+
nx._clear_cache(G)
|
634 |
+
return barycenter_vertices
|
635 |
+
|
636 |
+
|
637 |
+
@not_implemented_for("directed")
|
638 |
+
@nx._dispatchable(edge_attrs="weight")
|
639 |
+
def resistance_distance(G, nodeA=None, nodeB=None, weight=None, invert_weight=True):
|
640 |
+
"""Returns the resistance distance between pairs of nodes in graph G.
|
641 |
+
|
642 |
+
The resistance distance between two nodes of a graph is akin to treating
|
643 |
+
the graph as a grid of resistors with a resistance equal to the provided
|
644 |
+
weight [1]_, [2]_.
|
645 |
+
|
646 |
+
If weight is not provided, then a weight of 1 is used for all edges.
|
647 |
+
|
648 |
+
If two nodes are the same, the resistance distance is zero.
|
649 |
+
|
650 |
+
Parameters
|
651 |
+
----------
|
652 |
+
G : NetworkX graph
|
653 |
+
A graph
|
654 |
+
|
655 |
+
nodeA : node or None, optional (default=None)
|
656 |
+
A node within graph G.
|
657 |
+
If None, compute resistance distance using all nodes as source nodes.
|
658 |
+
|
659 |
+
nodeB : node or None, optional (default=None)
|
660 |
+
A node within graph G.
|
661 |
+
If None, compute resistance distance using all nodes as target nodes.
|
662 |
+
|
663 |
+
weight : string or None, optional (default=None)
|
664 |
+
The edge data key used to compute the resistance distance.
|
665 |
+
If None, then each edge has weight 1.
|
666 |
+
|
667 |
+
invert_weight : boolean (default=True)
|
668 |
+
Proper calculation of resistance distance requires building the
|
669 |
+
Laplacian matrix with the reciprocal of the weight. Not required
|
670 |
+
if the weight is already inverted. Weight cannot be zero.
|
671 |
+
|
672 |
+
Returns
|
673 |
+
-------
|
674 |
+
rd : dict or float
|
675 |
+
If `nodeA` and `nodeB` are given, resistance distance between `nodeA`
|
676 |
+
and `nodeB`. If `nodeA` or `nodeB` is unspecified (the default), a
|
677 |
+
dictionary of nodes with resistance distances as the value.
|
678 |
+
|
679 |
+
Raises
|
680 |
+
------
|
681 |
+
NetworkXNotImplemented
|
682 |
+
If `G` is a directed graph.
|
683 |
+
|
684 |
+
NetworkXError
|
685 |
+
If `G` is not connected, or contains no nodes,
|
686 |
+
or `nodeA` is not in `G` or `nodeB` is not in `G`.
|
687 |
+
|
688 |
+
Examples
|
689 |
+
--------
|
690 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
|
691 |
+
>>> round(nx.resistance_distance(G, 1, 3), 10)
|
692 |
+
0.625
|
693 |
+
|
694 |
+
Notes
|
695 |
+
-----
|
696 |
+
The implementation is based on Theorem A in [2]_. Self-loops are ignored.
|
697 |
+
Multi-edges are contracted in one edge with weight equal to the harmonic sum of the weights.
|
698 |
+
|
699 |
+
References
|
700 |
+
----------
|
701 |
+
.. [1] Wikipedia
|
702 |
+
"Resistance distance."
|
703 |
+
https://en.wikipedia.org/wiki/Resistance_distance
|
704 |
+
.. [2] D. J. Klein and M. Randic.
|
705 |
+
Resistance distance.
|
706 |
+
J. of Math. Chem. 12:81-95, 1993.
|
707 |
+
"""
|
708 |
+
import numpy as np
|
709 |
+
|
710 |
+
if len(G) == 0:
|
711 |
+
raise nx.NetworkXError("Graph G must contain at least one node.")
|
712 |
+
if not nx.is_connected(G):
|
713 |
+
raise nx.NetworkXError("Graph G must be strongly connected.")
|
714 |
+
if nodeA is not None and nodeA not in G:
|
715 |
+
raise nx.NetworkXError("Node A is not in graph G.")
|
716 |
+
if nodeB is not None and nodeB not in G:
|
717 |
+
raise nx.NetworkXError("Node B is not in graph G.")
|
718 |
+
|
719 |
+
G = G.copy()
|
720 |
+
node_list = list(G)
|
721 |
+
|
722 |
+
# Invert weights
|
723 |
+
if invert_weight and weight is not None:
|
724 |
+
if G.is_multigraph():
|
725 |
+
for u, v, k, d in G.edges(keys=True, data=True):
|
726 |
+
d[weight] = 1 / d[weight]
|
727 |
+
else:
|
728 |
+
for u, v, d in G.edges(data=True):
|
729 |
+
d[weight] = 1 / d[weight]
|
730 |
+
|
731 |
+
# Compute resistance distance using the Pseudo-inverse of the Laplacian
|
732 |
+
# Self-loops are ignored
|
733 |
+
L = nx.laplacian_matrix(G, weight=weight).todense()
|
734 |
+
Linv = np.linalg.pinv(L, hermitian=True)
|
735 |
+
|
736 |
+
# Return relevant distances
|
737 |
+
if nodeA is not None and nodeB is not None:
|
738 |
+
i = node_list.index(nodeA)
|
739 |
+
j = node_list.index(nodeB)
|
740 |
+
return Linv.item(i, i) + Linv.item(j, j) - Linv.item(i, j) - Linv.item(j, i)
|
741 |
+
|
742 |
+
elif nodeA is not None:
|
743 |
+
i = node_list.index(nodeA)
|
744 |
+
d = {}
|
745 |
+
for n in G:
|
746 |
+
j = node_list.index(n)
|
747 |
+
d[n] = Linv.item(i, i) + Linv.item(j, j) - Linv.item(i, j) - Linv.item(j, i)
|
748 |
+
return d
|
749 |
+
|
750 |
+
elif nodeB is not None:
|
751 |
+
j = node_list.index(nodeB)
|
752 |
+
d = {}
|
753 |
+
for n in G:
|
754 |
+
i = node_list.index(n)
|
755 |
+
d[n] = Linv.item(i, i) + Linv.item(j, j) - Linv.item(i, j) - Linv.item(j, i)
|
756 |
+
return d
|
757 |
+
|
758 |
+
else:
|
759 |
+
d = {}
|
760 |
+
for n in G:
|
761 |
+
i = node_list.index(n)
|
762 |
+
d[n] = {}
|
763 |
+
for n2 in G:
|
764 |
+
j = node_list.index(n2)
|
765 |
+
d[n][n2] = (
|
766 |
+
Linv.item(i, i)
|
767 |
+
+ Linv.item(j, j)
|
768 |
+
- Linv.item(i, j)
|
769 |
+
- Linv.item(j, i)
|
770 |
+
)
|
771 |
+
return d
|
772 |
+
|
773 |
+
|
774 |
+
@not_implemented_for("directed")
|
775 |
+
@nx._dispatchable(edge_attrs="weight")
|
776 |
+
def effective_graph_resistance(G, weight=None, invert_weight=True):
|
777 |
+
"""Returns the Effective graph resistance of G.
|
778 |
+
|
779 |
+
Also known as the Kirchhoff index.
|
780 |
+
|
781 |
+
The effective graph resistance is defined as the sum
|
782 |
+
of the resistance distance of every node pair in G [1]_.
|
783 |
+
|
784 |
+
If weight is not provided, then a weight of 1 is used for all edges.
|
785 |
+
|
786 |
+
The effective graph resistance of a disconnected graph is infinite.
|
787 |
+
|
788 |
+
Parameters
|
789 |
+
----------
|
790 |
+
G : NetworkX graph
|
791 |
+
A graph
|
792 |
+
|
793 |
+
weight : string or None, optional (default=None)
|
794 |
+
The edge data key used to compute the effective graph resistance.
|
795 |
+
If None, then each edge has weight 1.
|
796 |
+
|
797 |
+
invert_weight : boolean (default=True)
|
798 |
+
Proper calculation of resistance distance requires building the
|
799 |
+
Laplacian matrix with the reciprocal of the weight. Not required
|
800 |
+
if the weight is already inverted. Weight cannot be zero.
|
801 |
+
|
802 |
+
Returns
|
803 |
+
-------
|
804 |
+
RG : float
|
805 |
+
The effective graph resistance of `G`.
|
806 |
+
|
807 |
+
Raises
|
808 |
+
------
|
809 |
+
NetworkXNotImplemented
|
810 |
+
If `G` is a directed graph.
|
811 |
+
|
812 |
+
NetworkXError
|
813 |
+
If `G` does not contain any nodes.
|
814 |
+
|
815 |
+
Examples
|
816 |
+
--------
|
817 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
|
818 |
+
>>> round(nx.effective_graph_resistance(G), 10)
|
819 |
+
10.25
|
820 |
+
|
821 |
+
Notes
|
822 |
+
-----
|
823 |
+
The implementation is based on Theorem 2.2 in [2]_. Self-loops are ignored.
|
824 |
+
Multi-edges are contracted in one edge with weight equal to the harmonic sum of the weights.
|
825 |
+
|
826 |
+
References
|
827 |
+
----------
|
828 |
+
.. [1] Wolfram
|
829 |
+
"Kirchhoff Index."
|
830 |
+
https://mathworld.wolfram.com/KirchhoffIndex.html
|
831 |
+
.. [2] W. Ellens, F. M. Spieksma, P. Van Mieghem, A. Jamakovic, R. E. Kooij.
|
832 |
+
Effective graph resistance.
|
833 |
+
Lin. Alg. Appl. 435:2491-2506, 2011.
|
834 |
+
"""
|
835 |
+
import numpy as np
|
836 |
+
|
837 |
+
if len(G) == 0:
|
838 |
+
raise nx.NetworkXError("Graph G must contain at least one node.")
|
839 |
+
|
840 |
+
# Disconnected graphs have infinite Effective graph resistance
|
841 |
+
if not nx.is_connected(G):
|
842 |
+
return float("inf")
|
843 |
+
|
844 |
+
# Invert weights
|
845 |
+
G = G.copy()
|
846 |
+
if invert_weight and weight is not None:
|
847 |
+
if G.is_multigraph():
|
848 |
+
for u, v, k, d in G.edges(keys=True, data=True):
|
849 |
+
d[weight] = 1 / d[weight]
|
850 |
+
else:
|
851 |
+
for u, v, d in G.edges(data=True):
|
852 |
+
d[weight] = 1 / d[weight]
|
853 |
+
|
854 |
+
# Get Laplacian eigenvalues
|
855 |
+
mu = np.sort(nx.laplacian_spectrum(G, weight=weight))
|
856 |
+
|
857 |
+
# Compute Effective graph resistance based on spectrum of the Laplacian
|
858 |
+
# Self-loops are ignored
|
859 |
+
return float(np.sum(1 / mu[1:]) * G.number_of_nodes())
|
860 |
+
|
861 |
+
|
862 |
+
@nx.utils.not_implemented_for("directed")
|
863 |
+
@nx._dispatchable(edge_attrs="weight")
|
864 |
+
def kemeny_constant(G, *, weight=None):
|
865 |
+
"""Returns the Kemeny constant of the given graph.
|
866 |
+
|
867 |
+
The *Kemeny constant* (or Kemeny's constant) of a graph `G`
|
868 |
+
can be computed by regarding the graph as a Markov chain.
|
869 |
+
The Kemeny constant is then the expected number of time steps
|
870 |
+
to transition from a starting state i to a random destination state
|
871 |
+
sampled from the Markov chain's stationary distribution.
|
872 |
+
The Kemeny constant is independent of the chosen initial state [1]_.
|
873 |
+
|
874 |
+
The Kemeny constant measures the time needed for spreading
|
875 |
+
across a graph. Low values indicate a closely connected graph
|
876 |
+
whereas high values indicate a spread-out graph.
|
877 |
+
|
878 |
+
If weight is not provided, then a weight of 1 is used for all edges.
|
879 |
+
|
880 |
+
Since `G` represents a Markov chain, the weights must be positive.
|
881 |
+
|
882 |
+
Parameters
|
883 |
+
----------
|
884 |
+
G : NetworkX graph
|
885 |
+
|
886 |
+
weight : string or None, optional (default=None)
|
887 |
+
The edge data key used to compute the Kemeny constant.
|
888 |
+
If None, then each edge has weight 1.
|
889 |
+
|
890 |
+
Returns
|
891 |
+
-------
|
892 |
+
float
|
893 |
+
The Kemeny constant of the graph `G`.
|
894 |
+
|
895 |
+
Raises
|
896 |
+
------
|
897 |
+
NetworkXNotImplemented
|
898 |
+
If the graph `G` is directed.
|
899 |
+
|
900 |
+
NetworkXError
|
901 |
+
If the graph `G` is not connected, or contains no nodes,
|
902 |
+
or has edges with negative weights.
|
903 |
+
|
904 |
+
Examples
|
905 |
+
--------
|
906 |
+
>>> G = nx.complete_graph(5)
|
907 |
+
>>> round(nx.kemeny_constant(G), 10)
|
908 |
+
3.2
|
909 |
+
|
910 |
+
Notes
|
911 |
+
-----
|
912 |
+
The implementation is based on equation (3.3) in [2]_.
|
913 |
+
Self-loops are allowed and indicate a Markov chain where
|
914 |
+
the state can remain the same. Multi-edges are contracted
|
915 |
+
in one edge with weight equal to the sum of the weights.
|
916 |
+
|
917 |
+
References
|
918 |
+
----------
|
919 |
+
.. [1] Wikipedia
|
920 |
+
"Kemeny's constant."
|
921 |
+
https://en.wikipedia.org/wiki/Kemeny%27s_constant
|
922 |
+
.. [2] Lovász L.
|
923 |
+
Random walks on graphs: A survey.
|
924 |
+
Paul Erdös is Eighty, vol. 2, Bolyai Society,
|
925 |
+
Mathematical Studies, Keszthely, Hungary (1993), pp. 1-46
|
926 |
+
"""
|
927 |
+
import numpy as np
|
928 |
+
import scipy as sp
|
929 |
+
|
930 |
+
if len(G) == 0:
|
931 |
+
raise nx.NetworkXError("Graph G must contain at least one node.")
|
932 |
+
if not nx.is_connected(G):
|
933 |
+
raise nx.NetworkXError("Graph G must be connected.")
|
934 |
+
if nx.is_negatively_weighted(G, weight=weight):
|
935 |
+
raise nx.NetworkXError("The weights of graph G must be nonnegative.")
|
936 |
+
|
937 |
+
# Compute matrix H = D^-1/2 A D^-1/2
|
938 |
+
A = nx.adjacency_matrix(G, weight=weight)
|
939 |
+
n, m = A.shape
|
940 |
+
diags = A.sum(axis=1)
|
941 |
+
with np.errstate(divide="ignore"):
|
942 |
+
diags_sqrt = 1.0 / np.sqrt(diags)
|
943 |
+
diags_sqrt[np.isinf(diags_sqrt)] = 0
|
944 |
+
DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format="csr"))
|
945 |
+
H = DH @ (A @ DH)
|
946 |
+
|
947 |
+
# Compute eigenvalues of H
|
948 |
+
eig = np.sort(sp.linalg.eigvalsh(H.todense()))
|
949 |
+
|
950 |
+
# Compute the Kemeny constant
|
951 |
+
return float(np.sum(1 / (1 - eig[:-1])))
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/distance_regular.py
ADDED
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
=======================
|
3 |
+
Distance-regular graphs
|
4 |
+
=======================
|
5 |
+
"""
|
6 |
+
|
7 |
+
import networkx as nx
|
8 |
+
from networkx.utils import not_implemented_for
|
9 |
+
|
10 |
+
from .distance_measures import diameter
|
11 |
+
|
12 |
+
__all__ = [
|
13 |
+
"is_distance_regular",
|
14 |
+
"is_strongly_regular",
|
15 |
+
"intersection_array",
|
16 |
+
"global_parameters",
|
17 |
+
]
|
18 |
+
|
19 |
+
|
20 |
+
@nx._dispatchable
|
21 |
+
def is_distance_regular(G):
|
22 |
+
"""Returns True if the graph is distance regular, False otherwise.
|
23 |
+
|
24 |
+
A connected graph G is distance-regular if for any nodes x,y
|
25 |
+
and any integers i,j=0,1,...,d (where d is the graph
|
26 |
+
diameter), the number of vertices at distance i from x and
|
27 |
+
distance j from y depends only on i,j and the graph distance
|
28 |
+
between x and y, independently of the choice of x and y.
|
29 |
+
|
30 |
+
Parameters
|
31 |
+
----------
|
32 |
+
G: Networkx graph (undirected)
|
33 |
+
|
34 |
+
Returns
|
35 |
+
-------
|
36 |
+
bool
|
37 |
+
True if the graph is Distance Regular, False otherwise
|
38 |
+
|
39 |
+
Examples
|
40 |
+
--------
|
41 |
+
>>> G = nx.hypercube_graph(6)
|
42 |
+
>>> nx.is_distance_regular(G)
|
43 |
+
True
|
44 |
+
|
45 |
+
See Also
|
46 |
+
--------
|
47 |
+
intersection_array, global_parameters
|
48 |
+
|
49 |
+
Notes
|
50 |
+
-----
|
51 |
+
For undirected and simple graphs only
|
52 |
+
|
53 |
+
References
|
54 |
+
----------
|
55 |
+
.. [1] Brouwer, A. E.; Cohen, A. M.; and Neumaier, A.
|
56 |
+
Distance-Regular Graphs. New York: Springer-Verlag, 1989.
|
57 |
+
.. [2] Weisstein, Eric W. "Distance-Regular Graph."
|
58 |
+
http://mathworld.wolfram.com/Distance-RegularGraph.html
|
59 |
+
|
60 |
+
"""
|
61 |
+
try:
|
62 |
+
intersection_array(G)
|
63 |
+
return True
|
64 |
+
except nx.NetworkXError:
|
65 |
+
return False
|
66 |
+
|
67 |
+
|
68 |
+
def global_parameters(b, c):
|
69 |
+
"""Returns global parameters for a given intersection array.
|
70 |
+
|
71 |
+
Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d
|
72 |
+
such that for any 2 vertices x,y in G at a distance i=d(x,y), there
|
73 |
+
are exactly c_i neighbors of y at a distance of i-1 from x and b_i
|
74 |
+
neighbors of y at a distance of i+1 from x.
|
75 |
+
|
76 |
+
Thus, a distance regular graph has the global parameters,
|
77 |
+
[[c_0,a_0,b_0],[c_1,a_1,b_1],......,[c_d,a_d,b_d]] for the
|
78 |
+
intersection array [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d]
|
79 |
+
where a_i+b_i+c_i=k , k= degree of every vertex.
|
80 |
+
|
81 |
+
Parameters
|
82 |
+
----------
|
83 |
+
b : list
|
84 |
+
|
85 |
+
c : list
|
86 |
+
|
87 |
+
Returns
|
88 |
+
-------
|
89 |
+
iterable
|
90 |
+
An iterable over three tuples.
|
91 |
+
|
92 |
+
Examples
|
93 |
+
--------
|
94 |
+
>>> G = nx.dodecahedral_graph()
|
95 |
+
>>> b, c = nx.intersection_array(G)
|
96 |
+
>>> list(nx.global_parameters(b, c))
|
97 |
+
[(0, 0, 3), (1, 0, 2), (1, 1, 1), (1, 1, 1), (2, 0, 1), (3, 0, 0)]
|
98 |
+
|
99 |
+
References
|
100 |
+
----------
|
101 |
+
.. [1] Weisstein, Eric W. "Global Parameters."
|
102 |
+
From MathWorld--A Wolfram Web Resource.
|
103 |
+
http://mathworld.wolfram.com/GlobalParameters.html
|
104 |
+
|
105 |
+
See Also
|
106 |
+
--------
|
107 |
+
intersection_array
|
108 |
+
"""
|
109 |
+
return ((y, b[0] - x - y, x) for x, y in zip(b + [0], [0] + c))
|
110 |
+
|
111 |
+
|
112 |
+
@not_implemented_for("directed")
|
113 |
+
@not_implemented_for("multigraph")
|
114 |
+
@nx._dispatchable
|
115 |
+
def intersection_array(G):
|
116 |
+
"""Returns the intersection array of a distance-regular graph.
|
117 |
+
|
118 |
+
Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d
|
119 |
+
such that for any 2 vertices x,y in G at a distance i=d(x,y), there
|
120 |
+
are exactly c_i neighbors of y at a distance of i-1 from x and b_i
|
121 |
+
neighbors of y at a distance of i+1 from x.
|
122 |
+
|
123 |
+
A distance regular graph's intersection array is given by,
|
124 |
+
[b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d]
|
125 |
+
|
126 |
+
Parameters
|
127 |
+
----------
|
128 |
+
G: Networkx graph (undirected)
|
129 |
+
|
130 |
+
Returns
|
131 |
+
-------
|
132 |
+
b,c: tuple of lists
|
133 |
+
|
134 |
+
Examples
|
135 |
+
--------
|
136 |
+
>>> G = nx.icosahedral_graph()
|
137 |
+
>>> nx.intersection_array(G)
|
138 |
+
([5, 2, 1], [1, 2, 5])
|
139 |
+
|
140 |
+
References
|
141 |
+
----------
|
142 |
+
.. [1] Weisstein, Eric W. "Intersection Array."
|
143 |
+
From MathWorld--A Wolfram Web Resource.
|
144 |
+
http://mathworld.wolfram.com/IntersectionArray.html
|
145 |
+
|
146 |
+
See Also
|
147 |
+
--------
|
148 |
+
global_parameters
|
149 |
+
"""
|
150 |
+
# test for regular graph (all degrees must be equal)
|
151 |
+
if len(G) == 0:
|
152 |
+
raise nx.NetworkXPointlessConcept("Graph has no nodes.")
|
153 |
+
degree = iter(G.degree())
|
154 |
+
(_, k) = next(degree)
|
155 |
+
for _, knext in degree:
|
156 |
+
if knext != k:
|
157 |
+
raise nx.NetworkXError("Graph is not distance regular.")
|
158 |
+
k = knext
|
159 |
+
path_length = dict(nx.all_pairs_shortest_path_length(G))
|
160 |
+
diameter = max(max(path_length[n].values()) for n in path_length)
|
161 |
+
bint = {} # 'b' intersection array
|
162 |
+
cint = {} # 'c' intersection array
|
163 |
+
for u in G:
|
164 |
+
for v in G:
|
165 |
+
try:
|
166 |
+
i = path_length[u][v]
|
167 |
+
except KeyError as err: # graph must be connected
|
168 |
+
raise nx.NetworkXError("Graph is not distance regular.") from err
|
169 |
+
# number of neighbors of v at a distance of i-1 from u
|
170 |
+
c = len([n for n in G[v] if path_length[n][u] == i - 1])
|
171 |
+
# number of neighbors of v at a distance of i+1 from u
|
172 |
+
b = len([n for n in G[v] if path_length[n][u] == i + 1])
|
173 |
+
# b,c are independent of u and v
|
174 |
+
if cint.get(i, c) != c or bint.get(i, b) != b:
|
175 |
+
raise nx.NetworkXError("Graph is not distance regular")
|
176 |
+
bint[i] = b
|
177 |
+
cint[i] = c
|
178 |
+
return (
|
179 |
+
[bint.get(j, 0) for j in range(diameter)],
|
180 |
+
[cint.get(j + 1, 0) for j in range(diameter)],
|
181 |
+
)
|
182 |
+
|
183 |
+
|
184 |
+
# TODO There is a definition for directed strongly regular graphs.
|
185 |
+
@not_implemented_for("directed")
|
186 |
+
@not_implemented_for("multigraph")
|
187 |
+
@nx._dispatchable
|
188 |
+
def is_strongly_regular(G):
|
189 |
+
"""Returns True if and only if the given graph is strongly
|
190 |
+
regular.
|
191 |
+
|
192 |
+
An undirected graph is *strongly regular* if
|
193 |
+
|
194 |
+
* it is regular,
|
195 |
+
* each pair of adjacent vertices has the same number of neighbors in
|
196 |
+
common,
|
197 |
+
* each pair of nonadjacent vertices has the same number of neighbors
|
198 |
+
in common.
|
199 |
+
|
200 |
+
Each strongly regular graph is a distance-regular graph.
|
201 |
+
Conversely, if a distance-regular graph has diameter two, then it is
|
202 |
+
a strongly regular graph. For more information on distance-regular
|
203 |
+
graphs, see :func:`is_distance_regular`.
|
204 |
+
|
205 |
+
Parameters
|
206 |
+
----------
|
207 |
+
G : NetworkX graph
|
208 |
+
An undirected graph.
|
209 |
+
|
210 |
+
Returns
|
211 |
+
-------
|
212 |
+
bool
|
213 |
+
Whether `G` is strongly regular.
|
214 |
+
|
215 |
+
Examples
|
216 |
+
--------
|
217 |
+
|
218 |
+
The cycle graph on five vertices is strongly regular. It is
|
219 |
+
two-regular, each pair of adjacent vertices has no shared neighbors,
|
220 |
+
and each pair of nonadjacent vertices has one shared neighbor::
|
221 |
+
|
222 |
+
>>> G = nx.cycle_graph(5)
|
223 |
+
>>> nx.is_strongly_regular(G)
|
224 |
+
True
|
225 |
+
|
226 |
+
"""
|
227 |
+
# Here is an alternate implementation based directly on the
|
228 |
+
# definition of strongly regular graphs:
|
229 |
+
#
|
230 |
+
# return (all_equal(G.degree().values())
|
231 |
+
# and all_equal(len(common_neighbors(G, u, v))
|
232 |
+
# for u, v in G.edges())
|
233 |
+
# and all_equal(len(common_neighbors(G, u, v))
|
234 |
+
# for u, v in non_edges(G)))
|
235 |
+
#
|
236 |
+
# We instead use the fact that a distance-regular graph of diameter
|
237 |
+
# two is strongly regular.
|
238 |
+
return is_distance_regular(G) and diameter(G) == 2
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/dominance.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Dominance algorithms.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from functools import reduce
|
6 |
+
|
7 |
+
import networkx as nx
|
8 |
+
from networkx.utils import not_implemented_for
|
9 |
+
|
10 |
+
__all__ = ["immediate_dominators", "dominance_frontiers"]
|
11 |
+
|
12 |
+
|
13 |
+
@not_implemented_for("undirected")
|
14 |
+
@nx._dispatchable
|
15 |
+
def immediate_dominators(G, start):
|
16 |
+
"""Returns the immediate dominators of all nodes of a directed graph.
|
17 |
+
|
18 |
+
Parameters
|
19 |
+
----------
|
20 |
+
G : a DiGraph or MultiDiGraph
|
21 |
+
The graph where dominance is to be computed.
|
22 |
+
|
23 |
+
start : node
|
24 |
+
The start node of dominance computation.
|
25 |
+
|
26 |
+
Returns
|
27 |
+
-------
|
28 |
+
idom : dict keyed by nodes
|
29 |
+
A dict containing the immediate dominators of each node reachable from
|
30 |
+
`start`.
|
31 |
+
|
32 |
+
Raises
|
33 |
+
------
|
34 |
+
NetworkXNotImplemented
|
35 |
+
If `G` is undirected.
|
36 |
+
|
37 |
+
NetworkXError
|
38 |
+
If `start` is not in `G`.
|
39 |
+
|
40 |
+
Notes
|
41 |
+
-----
|
42 |
+
Except for `start`, the immediate dominators are the parents of their
|
43 |
+
corresponding nodes in the dominator tree.
|
44 |
+
|
45 |
+
Examples
|
46 |
+
--------
|
47 |
+
>>> G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)])
|
48 |
+
>>> sorted(nx.immediate_dominators(G, 1).items())
|
49 |
+
[(1, 1), (2, 1), (3, 1), (4, 3), (5, 1)]
|
50 |
+
|
51 |
+
References
|
52 |
+
----------
|
53 |
+
.. [1] K. D. Cooper, T. J. Harvey, and K. Kennedy.
|
54 |
+
A simple, fast dominance algorithm.
|
55 |
+
Software Practice & Experience, 4:110, 2001.
|
56 |
+
"""
|
57 |
+
if start not in G:
|
58 |
+
raise nx.NetworkXError("start is not in G")
|
59 |
+
|
60 |
+
idom = {start: start}
|
61 |
+
|
62 |
+
order = list(nx.dfs_postorder_nodes(G, start))
|
63 |
+
dfn = {u: i for i, u in enumerate(order)}
|
64 |
+
order.pop()
|
65 |
+
order.reverse()
|
66 |
+
|
67 |
+
def intersect(u, v):
|
68 |
+
while u != v:
|
69 |
+
while dfn[u] < dfn[v]:
|
70 |
+
u = idom[u]
|
71 |
+
while dfn[u] > dfn[v]:
|
72 |
+
v = idom[v]
|
73 |
+
return u
|
74 |
+
|
75 |
+
changed = True
|
76 |
+
while changed:
|
77 |
+
changed = False
|
78 |
+
for u in order:
|
79 |
+
new_idom = reduce(intersect, (v for v in G.pred[u] if v in idom))
|
80 |
+
if u not in idom or idom[u] != new_idom:
|
81 |
+
idom[u] = new_idom
|
82 |
+
changed = True
|
83 |
+
|
84 |
+
return idom
|
85 |
+
|
86 |
+
|
87 |
+
@nx._dispatchable
|
88 |
+
def dominance_frontiers(G, start):
|
89 |
+
"""Returns the dominance frontiers of all nodes of a directed graph.
|
90 |
+
|
91 |
+
Parameters
|
92 |
+
----------
|
93 |
+
G : a DiGraph or MultiDiGraph
|
94 |
+
The graph where dominance is to be computed.
|
95 |
+
|
96 |
+
start : node
|
97 |
+
The start node of dominance computation.
|
98 |
+
|
99 |
+
Returns
|
100 |
+
-------
|
101 |
+
df : dict keyed by nodes
|
102 |
+
A dict containing the dominance frontiers of each node reachable from
|
103 |
+
`start` as lists.
|
104 |
+
|
105 |
+
Raises
|
106 |
+
------
|
107 |
+
NetworkXNotImplemented
|
108 |
+
If `G` is undirected.
|
109 |
+
|
110 |
+
NetworkXError
|
111 |
+
If `start` is not in `G`.
|
112 |
+
|
113 |
+
Examples
|
114 |
+
--------
|
115 |
+
>>> G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)])
|
116 |
+
>>> sorted((u, sorted(df)) for u, df in nx.dominance_frontiers(G, 1).items())
|
117 |
+
[(1, []), (2, [5]), (3, [5]), (4, [5]), (5, [])]
|
118 |
+
|
119 |
+
References
|
120 |
+
----------
|
121 |
+
.. [1] K. D. Cooper, T. J. Harvey, and K. Kennedy.
|
122 |
+
A simple, fast dominance algorithm.
|
123 |
+
Software Practice & Experience, 4:110, 2001.
|
124 |
+
"""
|
125 |
+
idom = nx.immediate_dominators(G, start)
|
126 |
+
|
127 |
+
df = {u: set() for u in idom}
|
128 |
+
for u in idom:
|
129 |
+
if len(G.pred[u]) >= 2:
|
130 |
+
for v in G.pred[u]:
|
131 |
+
if v in idom:
|
132 |
+
while v != idom[u]:
|
133 |
+
df[v].add(u)
|
134 |
+
v = idom[v]
|
135 |
+
return df
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/dominating.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Functions for computing dominating sets in a graph."""
|
2 |
+
from itertools import chain
|
3 |
+
|
4 |
+
import networkx as nx
|
5 |
+
from networkx.utils import arbitrary_element
|
6 |
+
|
7 |
+
__all__ = ["dominating_set", "is_dominating_set"]
|
8 |
+
|
9 |
+
|
10 |
+
@nx._dispatchable
|
11 |
+
def dominating_set(G, start_with=None):
|
12 |
+
r"""Finds a dominating set for the graph G.
|
13 |
+
|
14 |
+
A *dominating set* for a graph with node set *V* is a subset *D* of
|
15 |
+
*V* such that every node not in *D* is adjacent to at least one
|
16 |
+
member of *D* [1]_.
|
17 |
+
|
18 |
+
Parameters
|
19 |
+
----------
|
20 |
+
G : NetworkX graph
|
21 |
+
|
22 |
+
start_with : node (default=None)
|
23 |
+
Node to use as a starting point for the algorithm.
|
24 |
+
|
25 |
+
Returns
|
26 |
+
-------
|
27 |
+
D : set
|
28 |
+
A dominating set for G.
|
29 |
+
|
30 |
+
Notes
|
31 |
+
-----
|
32 |
+
This function is an implementation of algorithm 7 in [2]_ which
|
33 |
+
finds some dominating set, not necessarily the smallest one.
|
34 |
+
|
35 |
+
See also
|
36 |
+
--------
|
37 |
+
is_dominating_set
|
38 |
+
|
39 |
+
References
|
40 |
+
----------
|
41 |
+
.. [1] https://en.wikipedia.org/wiki/Dominating_set
|
42 |
+
|
43 |
+
.. [2] Abdol-Hossein Esfahanian. Connectivity Algorithms.
|
44 |
+
http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
|
45 |
+
|
46 |
+
"""
|
47 |
+
all_nodes = set(G)
|
48 |
+
if start_with is None:
|
49 |
+
start_with = arbitrary_element(all_nodes)
|
50 |
+
if start_with not in G:
|
51 |
+
raise nx.NetworkXError(f"node {start_with} is not in G")
|
52 |
+
dominating_set = {start_with}
|
53 |
+
dominated_nodes = set(G[start_with])
|
54 |
+
remaining_nodes = all_nodes - dominated_nodes - dominating_set
|
55 |
+
while remaining_nodes:
|
56 |
+
# Choose an arbitrary node and determine its undominated neighbors.
|
57 |
+
v = remaining_nodes.pop()
|
58 |
+
undominated_nbrs = set(G[v]) - dominating_set
|
59 |
+
# Add the node to the dominating set and the neighbors to the
|
60 |
+
# dominated set. Finally, remove all of those nodes from the set
|
61 |
+
# of remaining nodes.
|
62 |
+
dominating_set.add(v)
|
63 |
+
dominated_nodes |= undominated_nbrs
|
64 |
+
remaining_nodes -= undominated_nbrs
|
65 |
+
return dominating_set
|
66 |
+
|
67 |
+
|
68 |
+
@nx._dispatchable
|
69 |
+
def is_dominating_set(G, nbunch):
|
70 |
+
"""Checks if `nbunch` is a dominating set for `G`.
|
71 |
+
|
72 |
+
A *dominating set* for a graph with node set *V* is a subset *D* of
|
73 |
+
*V* such that every node not in *D* is adjacent to at least one
|
74 |
+
member of *D* [1]_.
|
75 |
+
|
76 |
+
Parameters
|
77 |
+
----------
|
78 |
+
G : NetworkX graph
|
79 |
+
|
80 |
+
nbunch : iterable
|
81 |
+
An iterable of nodes in the graph `G`.
|
82 |
+
|
83 |
+
See also
|
84 |
+
--------
|
85 |
+
dominating_set
|
86 |
+
|
87 |
+
References
|
88 |
+
----------
|
89 |
+
.. [1] https://en.wikipedia.org/wiki/Dominating_set
|
90 |
+
|
91 |
+
"""
|
92 |
+
testset = {n for n in nbunch if n in G}
|
93 |
+
nbrs = set(chain.from_iterable(G[n] for n in testset))
|
94 |
+
return len(set(G) - testset - nbrs) == 0
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/efficiency_measures.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Provides functions for computing the efficiency of nodes and graphs."""
|
2 |
+
|
3 |
+
import networkx as nx
|
4 |
+
from networkx.exception import NetworkXNoPath
|
5 |
+
|
6 |
+
from ..utils import not_implemented_for
|
7 |
+
|
8 |
+
__all__ = ["efficiency", "local_efficiency", "global_efficiency"]
|
9 |
+
|
10 |
+
|
11 |
+
@not_implemented_for("directed")
|
12 |
+
@nx._dispatchable
|
13 |
+
def efficiency(G, u, v):
|
14 |
+
"""Returns the efficiency of a pair of nodes in a graph.
|
15 |
+
|
16 |
+
The *efficiency* of a pair of nodes is the multiplicative inverse of the
|
17 |
+
shortest path distance between the nodes [1]_. Returns 0 if no path
|
18 |
+
between nodes.
|
19 |
+
|
20 |
+
Parameters
|
21 |
+
----------
|
22 |
+
G : :class:`networkx.Graph`
|
23 |
+
An undirected graph for which to compute the average local efficiency.
|
24 |
+
u, v : node
|
25 |
+
Nodes in the graph ``G``.
|
26 |
+
|
27 |
+
Returns
|
28 |
+
-------
|
29 |
+
float
|
30 |
+
Multiplicative inverse of the shortest path distance between the nodes.
|
31 |
+
|
32 |
+
Examples
|
33 |
+
--------
|
34 |
+
>>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
|
35 |
+
>>> nx.efficiency(G, 2, 3) # this gives efficiency for node 2 and 3
|
36 |
+
0.5
|
37 |
+
|
38 |
+
Notes
|
39 |
+
-----
|
40 |
+
Edge weights are ignored when computing the shortest path distances.
|
41 |
+
|
42 |
+
See also
|
43 |
+
--------
|
44 |
+
local_efficiency
|
45 |
+
global_efficiency
|
46 |
+
|
47 |
+
References
|
48 |
+
----------
|
49 |
+
.. [1] Latora, Vito, and Massimo Marchiori.
|
50 |
+
"Efficient behavior of small-world networks."
|
51 |
+
*Physical Review Letters* 87.19 (2001): 198701.
|
52 |
+
<https://doi.org/10.1103/PhysRevLett.87.198701>
|
53 |
+
|
54 |
+
"""
|
55 |
+
try:
|
56 |
+
eff = 1 / nx.shortest_path_length(G, u, v)
|
57 |
+
except NetworkXNoPath:
|
58 |
+
eff = 0
|
59 |
+
return eff
|
60 |
+
|
61 |
+
|
62 |
+
@not_implemented_for("directed")
|
63 |
+
@nx._dispatchable
|
64 |
+
def global_efficiency(G):
|
65 |
+
"""Returns the average global efficiency of the graph.
|
66 |
+
|
67 |
+
The *efficiency* of a pair of nodes in a graph is the multiplicative
|
68 |
+
inverse of the shortest path distance between the nodes. The *average
|
69 |
+
global efficiency* of a graph is the average efficiency of all pairs of
|
70 |
+
nodes [1]_.
|
71 |
+
|
72 |
+
Parameters
|
73 |
+
----------
|
74 |
+
G : :class:`networkx.Graph`
|
75 |
+
An undirected graph for which to compute the average global efficiency.
|
76 |
+
|
77 |
+
Returns
|
78 |
+
-------
|
79 |
+
float
|
80 |
+
The average global efficiency of the graph.
|
81 |
+
|
82 |
+
Examples
|
83 |
+
--------
|
84 |
+
>>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
|
85 |
+
>>> round(nx.global_efficiency(G), 12)
|
86 |
+
0.916666666667
|
87 |
+
|
88 |
+
Notes
|
89 |
+
-----
|
90 |
+
Edge weights are ignored when computing the shortest path distances.
|
91 |
+
|
92 |
+
See also
|
93 |
+
--------
|
94 |
+
local_efficiency
|
95 |
+
|
96 |
+
References
|
97 |
+
----------
|
98 |
+
.. [1] Latora, Vito, and Massimo Marchiori.
|
99 |
+
"Efficient behavior of small-world networks."
|
100 |
+
*Physical Review Letters* 87.19 (2001): 198701.
|
101 |
+
<https://doi.org/10.1103/PhysRevLett.87.198701>
|
102 |
+
|
103 |
+
"""
|
104 |
+
n = len(G)
|
105 |
+
denom = n * (n - 1)
|
106 |
+
if denom != 0:
|
107 |
+
lengths = nx.all_pairs_shortest_path_length(G)
|
108 |
+
g_eff = 0
|
109 |
+
for source, targets in lengths:
|
110 |
+
for target, distance in targets.items():
|
111 |
+
if distance > 0:
|
112 |
+
g_eff += 1 / distance
|
113 |
+
g_eff /= denom
|
114 |
+
# g_eff = sum(1 / d for s, tgts in lengths
|
115 |
+
# for t, d in tgts.items() if d > 0) / denom
|
116 |
+
else:
|
117 |
+
g_eff = 0
|
118 |
+
# TODO This can be made more efficient by computing all pairs shortest
|
119 |
+
# path lengths in parallel.
|
120 |
+
return g_eff
|
121 |
+
|
122 |
+
|
123 |
+
@not_implemented_for("directed")
|
124 |
+
@nx._dispatchable
|
125 |
+
def local_efficiency(G):
|
126 |
+
"""Returns the average local efficiency of the graph.
|
127 |
+
|
128 |
+
The *efficiency* of a pair of nodes in a graph is the multiplicative
|
129 |
+
inverse of the shortest path distance between the nodes. The *local
|
130 |
+
efficiency* of a node in the graph is the average global efficiency of the
|
131 |
+
subgraph induced by the neighbors of the node. The *average local
|
132 |
+
efficiency* is the average of the local efficiencies of each node [1]_.
|
133 |
+
|
134 |
+
Parameters
|
135 |
+
----------
|
136 |
+
G : :class:`networkx.Graph`
|
137 |
+
An undirected graph for which to compute the average local efficiency.
|
138 |
+
|
139 |
+
Returns
|
140 |
+
-------
|
141 |
+
float
|
142 |
+
The average local efficiency of the graph.
|
143 |
+
|
144 |
+
Examples
|
145 |
+
--------
|
146 |
+
>>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
|
147 |
+
>>> nx.local_efficiency(G)
|
148 |
+
0.9166666666666667
|
149 |
+
|
150 |
+
Notes
|
151 |
+
-----
|
152 |
+
Edge weights are ignored when computing the shortest path distances.
|
153 |
+
|
154 |
+
See also
|
155 |
+
--------
|
156 |
+
global_efficiency
|
157 |
+
|
158 |
+
References
|
159 |
+
----------
|
160 |
+
.. [1] Latora, Vito, and Massimo Marchiori.
|
161 |
+
"Efficient behavior of small-world networks."
|
162 |
+
*Physical Review Letters* 87.19 (2001): 198701.
|
163 |
+
<https://doi.org/10.1103/PhysRevLett.87.198701>
|
164 |
+
|
165 |
+
"""
|
166 |
+
# TODO This summation can be trivially parallelized.
|
167 |
+
efficiency_list = (global_efficiency(G.subgraph(G[v])) for v in G)
|
168 |
+
return sum(efficiency_list) / len(G)
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/euler.py
ADDED
@@ -0,0 +1,469 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Eulerian circuits and graphs.
|
3 |
+
"""
|
4 |
+
from itertools import combinations
|
5 |
+
|
6 |
+
import networkx as nx
|
7 |
+
|
8 |
+
from ..utils import arbitrary_element, not_implemented_for
|
9 |
+
|
10 |
+
__all__ = [
|
11 |
+
"is_eulerian",
|
12 |
+
"eulerian_circuit",
|
13 |
+
"eulerize",
|
14 |
+
"is_semieulerian",
|
15 |
+
"has_eulerian_path",
|
16 |
+
"eulerian_path",
|
17 |
+
]
|
18 |
+
|
19 |
+
|
20 |
+
@nx._dispatchable
|
21 |
+
def is_eulerian(G):
|
22 |
+
"""Returns True if and only if `G` is Eulerian.
|
23 |
+
|
24 |
+
A graph is *Eulerian* if it has an Eulerian circuit. An *Eulerian
|
25 |
+
circuit* is a closed walk that includes each edge of a graph exactly
|
26 |
+
once.
|
27 |
+
|
28 |
+
Graphs with isolated vertices (i.e. vertices with zero degree) are not
|
29 |
+
considered to have Eulerian circuits. Therefore, if the graph is not
|
30 |
+
connected (or not strongly connected, for directed graphs), this function
|
31 |
+
returns False.
|
32 |
+
|
33 |
+
Parameters
|
34 |
+
----------
|
35 |
+
G : NetworkX graph
|
36 |
+
A graph, either directed or undirected.
|
37 |
+
|
38 |
+
Examples
|
39 |
+
--------
|
40 |
+
>>> nx.is_eulerian(nx.DiGraph({0: [3], 1: [2], 2: [3], 3: [0, 1]}))
|
41 |
+
True
|
42 |
+
>>> nx.is_eulerian(nx.complete_graph(5))
|
43 |
+
True
|
44 |
+
>>> nx.is_eulerian(nx.petersen_graph())
|
45 |
+
False
|
46 |
+
|
47 |
+
If you prefer to allow graphs with isolated vertices to have Eulerian circuits,
|
48 |
+
you can first remove such vertices and then call `is_eulerian` as below example shows.
|
49 |
+
|
50 |
+
>>> G = nx.Graph([(0, 1), (1, 2), (0, 2)])
|
51 |
+
>>> G.add_node(3)
|
52 |
+
>>> nx.is_eulerian(G)
|
53 |
+
False
|
54 |
+
|
55 |
+
>>> G.remove_nodes_from(list(nx.isolates(G)))
|
56 |
+
>>> nx.is_eulerian(G)
|
57 |
+
True
|
58 |
+
|
59 |
+
|
60 |
+
"""
|
61 |
+
if G.is_directed():
|
62 |
+
# Every node must have equal in degree and out degree and the
|
63 |
+
# graph must be strongly connected
|
64 |
+
return all(
|
65 |
+
G.in_degree(n) == G.out_degree(n) for n in G
|
66 |
+
) and nx.is_strongly_connected(G)
|
67 |
+
# An undirected Eulerian graph has no vertices of odd degree and
|
68 |
+
# must be connected.
|
69 |
+
return all(d % 2 == 0 for v, d in G.degree()) and nx.is_connected(G)
|
70 |
+
|
71 |
+
|
72 |
+
@nx._dispatchable
|
73 |
+
def is_semieulerian(G):
|
74 |
+
"""Return True iff `G` is semi-Eulerian.
|
75 |
+
|
76 |
+
G is semi-Eulerian if it has an Eulerian path but no Eulerian circuit.
|
77 |
+
|
78 |
+
See Also
|
79 |
+
--------
|
80 |
+
has_eulerian_path
|
81 |
+
is_eulerian
|
82 |
+
"""
|
83 |
+
return has_eulerian_path(G) and not is_eulerian(G)
|
84 |
+
|
85 |
+
|
86 |
+
def _find_path_start(G):
|
87 |
+
"""Return a suitable starting vertex for an Eulerian path.
|
88 |
+
|
89 |
+
If no path exists, return None.
|
90 |
+
"""
|
91 |
+
if not has_eulerian_path(G):
|
92 |
+
return None
|
93 |
+
|
94 |
+
if is_eulerian(G):
|
95 |
+
return arbitrary_element(G)
|
96 |
+
|
97 |
+
if G.is_directed():
|
98 |
+
v1, v2 = (v for v in G if G.in_degree(v) != G.out_degree(v))
|
99 |
+
# Determines which is the 'start' node (as opposed to the 'end')
|
100 |
+
if G.out_degree(v1) > G.in_degree(v1):
|
101 |
+
return v1
|
102 |
+
else:
|
103 |
+
return v2
|
104 |
+
|
105 |
+
else:
|
106 |
+
# In an undirected graph randomly choose one of the possibilities
|
107 |
+
start = [v for v in G if G.degree(v) % 2 != 0][0]
|
108 |
+
return start
|
109 |
+
|
110 |
+
|
111 |
+
def _simplegraph_eulerian_circuit(G, source):
|
112 |
+
if G.is_directed():
|
113 |
+
degree = G.out_degree
|
114 |
+
edges = G.out_edges
|
115 |
+
else:
|
116 |
+
degree = G.degree
|
117 |
+
edges = G.edges
|
118 |
+
vertex_stack = [source]
|
119 |
+
last_vertex = None
|
120 |
+
while vertex_stack:
|
121 |
+
current_vertex = vertex_stack[-1]
|
122 |
+
if degree(current_vertex) == 0:
|
123 |
+
if last_vertex is not None:
|
124 |
+
yield (last_vertex, current_vertex)
|
125 |
+
last_vertex = current_vertex
|
126 |
+
vertex_stack.pop()
|
127 |
+
else:
|
128 |
+
_, next_vertex = arbitrary_element(edges(current_vertex))
|
129 |
+
vertex_stack.append(next_vertex)
|
130 |
+
G.remove_edge(current_vertex, next_vertex)
|
131 |
+
|
132 |
+
|
133 |
+
def _multigraph_eulerian_circuit(G, source):
|
134 |
+
if G.is_directed():
|
135 |
+
degree = G.out_degree
|
136 |
+
edges = G.out_edges
|
137 |
+
else:
|
138 |
+
degree = G.degree
|
139 |
+
edges = G.edges
|
140 |
+
vertex_stack = [(source, None)]
|
141 |
+
last_vertex = None
|
142 |
+
last_key = None
|
143 |
+
while vertex_stack:
|
144 |
+
current_vertex, current_key = vertex_stack[-1]
|
145 |
+
if degree(current_vertex) == 0:
|
146 |
+
if last_vertex is not None:
|
147 |
+
yield (last_vertex, current_vertex, last_key)
|
148 |
+
last_vertex, last_key = current_vertex, current_key
|
149 |
+
vertex_stack.pop()
|
150 |
+
else:
|
151 |
+
triple = arbitrary_element(edges(current_vertex, keys=True))
|
152 |
+
_, next_vertex, next_key = triple
|
153 |
+
vertex_stack.append((next_vertex, next_key))
|
154 |
+
G.remove_edge(current_vertex, next_vertex, next_key)
|
155 |
+
|
156 |
+
|
157 |
+
@nx._dispatchable
|
158 |
+
def eulerian_circuit(G, source=None, keys=False):
|
159 |
+
"""Returns an iterator over the edges of an Eulerian circuit in `G`.
|
160 |
+
|
161 |
+
An *Eulerian circuit* is a closed walk that includes each edge of a
|
162 |
+
graph exactly once.
|
163 |
+
|
164 |
+
Parameters
|
165 |
+
----------
|
166 |
+
G : NetworkX graph
|
167 |
+
A graph, either directed or undirected.
|
168 |
+
|
169 |
+
source : node, optional
|
170 |
+
Starting node for circuit.
|
171 |
+
|
172 |
+
keys : bool
|
173 |
+
If False, edges generated by this function will be of the form
|
174 |
+
``(u, v)``. Otherwise, edges will be of the form ``(u, v, k)``.
|
175 |
+
This option is ignored unless `G` is a multigraph.
|
176 |
+
|
177 |
+
Returns
|
178 |
+
-------
|
179 |
+
edges : iterator
|
180 |
+
An iterator over edges in the Eulerian circuit.
|
181 |
+
|
182 |
+
Raises
|
183 |
+
------
|
184 |
+
NetworkXError
|
185 |
+
If the graph is not Eulerian.
|
186 |
+
|
187 |
+
See Also
|
188 |
+
--------
|
189 |
+
is_eulerian
|
190 |
+
|
191 |
+
Notes
|
192 |
+
-----
|
193 |
+
This is a linear time implementation of an algorithm adapted from [1]_.
|
194 |
+
|
195 |
+
For general information about Euler tours, see [2]_.
|
196 |
+
|
197 |
+
References
|
198 |
+
----------
|
199 |
+
.. [1] J. Edmonds, E. L. Johnson.
|
200 |
+
Matching, Euler tours and the Chinese postman.
|
201 |
+
Mathematical programming, Volume 5, Issue 1 (1973), 111-114.
|
202 |
+
.. [2] https://en.wikipedia.org/wiki/Eulerian_path
|
203 |
+
|
204 |
+
Examples
|
205 |
+
--------
|
206 |
+
To get an Eulerian circuit in an undirected graph::
|
207 |
+
|
208 |
+
>>> G = nx.complete_graph(3)
|
209 |
+
>>> list(nx.eulerian_circuit(G))
|
210 |
+
[(0, 2), (2, 1), (1, 0)]
|
211 |
+
>>> list(nx.eulerian_circuit(G, source=1))
|
212 |
+
[(1, 2), (2, 0), (0, 1)]
|
213 |
+
|
214 |
+
To get the sequence of vertices in an Eulerian circuit::
|
215 |
+
|
216 |
+
>>> [u for u, v in nx.eulerian_circuit(G)]
|
217 |
+
[0, 2, 1]
|
218 |
+
|
219 |
+
"""
|
220 |
+
if not is_eulerian(G):
|
221 |
+
raise nx.NetworkXError("G is not Eulerian.")
|
222 |
+
if G.is_directed():
|
223 |
+
G = G.reverse()
|
224 |
+
else:
|
225 |
+
G = G.copy()
|
226 |
+
if source is None:
|
227 |
+
source = arbitrary_element(G)
|
228 |
+
if G.is_multigraph():
|
229 |
+
for u, v, k in _multigraph_eulerian_circuit(G, source):
|
230 |
+
if keys:
|
231 |
+
yield u, v, k
|
232 |
+
else:
|
233 |
+
yield u, v
|
234 |
+
else:
|
235 |
+
yield from _simplegraph_eulerian_circuit(G, source)
|
236 |
+
|
237 |
+
|
238 |
+
@nx._dispatchable
|
239 |
+
def has_eulerian_path(G, source=None):
|
240 |
+
"""Return True iff `G` has an Eulerian path.
|
241 |
+
|
242 |
+
An Eulerian path is a path in a graph which uses each edge of a graph
|
243 |
+
exactly once. If `source` is specified, then this function checks
|
244 |
+
whether an Eulerian path that starts at node `source` exists.
|
245 |
+
|
246 |
+
A directed graph has an Eulerian path iff:
|
247 |
+
- at most one vertex has out_degree - in_degree = 1,
|
248 |
+
- at most one vertex has in_degree - out_degree = 1,
|
249 |
+
- every other vertex has equal in_degree and out_degree,
|
250 |
+
- and all of its vertices belong to a single connected
|
251 |
+
component of the underlying undirected graph.
|
252 |
+
|
253 |
+
If `source` is not None, an Eulerian path starting at `source` exists if no
|
254 |
+
other node has out_degree - in_degree = 1. This is equivalent to either
|
255 |
+
there exists an Eulerian circuit or `source` has out_degree - in_degree = 1
|
256 |
+
and the conditions above hold.
|
257 |
+
|
258 |
+
An undirected graph has an Eulerian path iff:
|
259 |
+
- exactly zero or two vertices have odd degree,
|
260 |
+
- and all of its vertices belong to a single connected component.
|
261 |
+
|
262 |
+
If `source` is not None, an Eulerian path starting at `source` exists if
|
263 |
+
either there exists an Eulerian circuit or `source` has an odd degree and the
|
264 |
+
conditions above hold.
|
265 |
+
|
266 |
+
Graphs with isolated vertices (i.e. vertices with zero degree) are not considered
|
267 |
+
to have an Eulerian path. Therefore, if the graph is not connected (or not strongly
|
268 |
+
connected, for directed graphs), this function returns False.
|
269 |
+
|
270 |
+
Parameters
|
271 |
+
----------
|
272 |
+
G : NetworkX Graph
|
273 |
+
The graph to find an euler path in.
|
274 |
+
|
275 |
+
source : node, optional
|
276 |
+
Starting node for path.
|
277 |
+
|
278 |
+
Returns
|
279 |
+
-------
|
280 |
+
Bool : True if G has an Eulerian path.
|
281 |
+
|
282 |
+
Examples
|
283 |
+
--------
|
284 |
+
If you prefer to allow graphs with isolated vertices to have Eulerian path,
|
285 |
+
you can first remove such vertices and then call `has_eulerian_path` as below example shows.
|
286 |
+
|
287 |
+
>>> G = nx.Graph([(0, 1), (1, 2), (0, 2)])
|
288 |
+
>>> G.add_node(3)
|
289 |
+
>>> nx.has_eulerian_path(G)
|
290 |
+
False
|
291 |
+
|
292 |
+
>>> G.remove_nodes_from(list(nx.isolates(G)))
|
293 |
+
>>> nx.has_eulerian_path(G)
|
294 |
+
True
|
295 |
+
|
296 |
+
See Also
|
297 |
+
--------
|
298 |
+
is_eulerian
|
299 |
+
eulerian_path
|
300 |
+
"""
|
301 |
+
if nx.is_eulerian(G):
|
302 |
+
return True
|
303 |
+
|
304 |
+
if G.is_directed():
|
305 |
+
ins = G.in_degree
|
306 |
+
outs = G.out_degree
|
307 |
+
# Since we know it is not eulerian, outs - ins must be 1 for source
|
308 |
+
if source is not None and outs[source] - ins[source] != 1:
|
309 |
+
return False
|
310 |
+
|
311 |
+
unbalanced_ins = 0
|
312 |
+
unbalanced_outs = 0
|
313 |
+
for v in G:
|
314 |
+
if ins[v] - outs[v] == 1:
|
315 |
+
unbalanced_ins += 1
|
316 |
+
elif outs[v] - ins[v] == 1:
|
317 |
+
unbalanced_outs += 1
|
318 |
+
elif ins[v] != outs[v]:
|
319 |
+
return False
|
320 |
+
|
321 |
+
return (
|
322 |
+
unbalanced_ins <= 1 and unbalanced_outs <= 1 and nx.is_weakly_connected(G)
|
323 |
+
)
|
324 |
+
else:
|
325 |
+
# We know it is not eulerian, so degree of source must be odd.
|
326 |
+
if source is not None and G.degree[source] % 2 != 1:
|
327 |
+
return False
|
328 |
+
|
329 |
+
# Sum is 2 since we know it is not eulerian (which implies sum is 0)
|
330 |
+
return sum(d % 2 == 1 for v, d in G.degree()) == 2 and nx.is_connected(G)
|
331 |
+
|
332 |
+
|
333 |
+
@nx._dispatchable
|
334 |
+
def eulerian_path(G, source=None, keys=False):
|
335 |
+
"""Return an iterator over the edges of an Eulerian path in `G`.
|
336 |
+
|
337 |
+
Parameters
|
338 |
+
----------
|
339 |
+
G : NetworkX Graph
|
340 |
+
The graph in which to look for an eulerian path.
|
341 |
+
source : node or None (default: None)
|
342 |
+
The node at which to start the search. None means search over all
|
343 |
+
starting nodes.
|
344 |
+
keys : Bool (default: False)
|
345 |
+
Indicates whether to yield edge 3-tuples (u, v, edge_key).
|
346 |
+
The default yields edge 2-tuples
|
347 |
+
|
348 |
+
Yields
|
349 |
+
------
|
350 |
+
Edge tuples along the eulerian path.
|
351 |
+
|
352 |
+
Warning: If `source` provided is not the start node of an Euler path
|
353 |
+
will raise error even if an Euler Path exists.
|
354 |
+
"""
|
355 |
+
if not has_eulerian_path(G, source):
|
356 |
+
raise nx.NetworkXError("Graph has no Eulerian paths.")
|
357 |
+
if G.is_directed():
|
358 |
+
G = G.reverse()
|
359 |
+
if source is None or nx.is_eulerian(G) is False:
|
360 |
+
source = _find_path_start(G)
|
361 |
+
if G.is_multigraph():
|
362 |
+
for u, v, k in _multigraph_eulerian_circuit(G, source):
|
363 |
+
if keys:
|
364 |
+
yield u, v, k
|
365 |
+
else:
|
366 |
+
yield u, v
|
367 |
+
else:
|
368 |
+
yield from _simplegraph_eulerian_circuit(G, source)
|
369 |
+
else:
|
370 |
+
G = G.copy()
|
371 |
+
if source is None:
|
372 |
+
source = _find_path_start(G)
|
373 |
+
if G.is_multigraph():
|
374 |
+
if keys:
|
375 |
+
yield from reversed(
|
376 |
+
[(v, u, k) for u, v, k in _multigraph_eulerian_circuit(G, source)]
|
377 |
+
)
|
378 |
+
else:
|
379 |
+
yield from reversed(
|
380 |
+
[(v, u) for u, v, k in _multigraph_eulerian_circuit(G, source)]
|
381 |
+
)
|
382 |
+
else:
|
383 |
+
yield from reversed(
|
384 |
+
[(v, u) for u, v in _simplegraph_eulerian_circuit(G, source)]
|
385 |
+
)
|
386 |
+
|
387 |
+
|
388 |
+
@not_implemented_for("directed")
|
389 |
+
@nx._dispatchable(returns_graph=True)
|
390 |
+
def eulerize(G):
|
391 |
+
"""Transforms a graph into an Eulerian graph.
|
392 |
+
|
393 |
+
If `G` is Eulerian the result is `G` as a MultiGraph, otherwise the result is a smallest
|
394 |
+
(in terms of the number of edges) multigraph whose underlying simple graph is `G`.
|
395 |
+
|
396 |
+
Parameters
|
397 |
+
----------
|
398 |
+
G : NetworkX graph
|
399 |
+
An undirected graph
|
400 |
+
|
401 |
+
Returns
|
402 |
+
-------
|
403 |
+
G : NetworkX multigraph
|
404 |
+
|
405 |
+
Raises
|
406 |
+
------
|
407 |
+
NetworkXError
|
408 |
+
If the graph is not connected.
|
409 |
+
|
410 |
+
See Also
|
411 |
+
--------
|
412 |
+
is_eulerian
|
413 |
+
eulerian_circuit
|
414 |
+
|
415 |
+
References
|
416 |
+
----------
|
417 |
+
.. [1] J. Edmonds, E. L. Johnson.
|
418 |
+
Matching, Euler tours and the Chinese postman.
|
419 |
+
Mathematical programming, Volume 5, Issue 1 (1973), 111-114.
|
420 |
+
.. [2] https://en.wikipedia.org/wiki/Eulerian_path
|
421 |
+
.. [3] http://web.math.princeton.edu/math_alive/5/Notes1.pdf
|
422 |
+
|
423 |
+
Examples
|
424 |
+
--------
|
425 |
+
>>> G = nx.complete_graph(10)
|
426 |
+
>>> H = nx.eulerize(G)
|
427 |
+
>>> nx.is_eulerian(H)
|
428 |
+
True
|
429 |
+
|
430 |
+
"""
|
431 |
+
if G.order() == 0:
|
432 |
+
raise nx.NetworkXPointlessConcept("Cannot Eulerize null graph")
|
433 |
+
if not nx.is_connected(G):
|
434 |
+
raise nx.NetworkXError("G is not connected")
|
435 |
+
odd_degree_nodes = [n for n, d in G.degree() if d % 2 == 1]
|
436 |
+
G = nx.MultiGraph(G)
|
437 |
+
if len(odd_degree_nodes) == 0:
|
438 |
+
return G
|
439 |
+
|
440 |
+
# get all shortest paths between vertices of odd degree
|
441 |
+
odd_deg_pairs_paths = [
|
442 |
+
(m, {n: nx.shortest_path(G, source=m, target=n)})
|
443 |
+
for m, n in combinations(odd_degree_nodes, 2)
|
444 |
+
]
|
445 |
+
|
446 |
+
# use the number of vertices in a graph + 1 as an upper bound on
|
447 |
+
# the maximum length of a path in G
|
448 |
+
upper_bound_on_max_path_length = len(G) + 1
|
449 |
+
|
450 |
+
# use "len(G) + 1 - len(P)",
|
451 |
+
# where P is a shortest path between vertices n and m,
|
452 |
+
# as edge-weights in a new graph
|
453 |
+
# store the paths in the graph for easy indexing later
|
454 |
+
Gp = nx.Graph()
|
455 |
+
for n, Ps in odd_deg_pairs_paths:
|
456 |
+
for m, P in Ps.items():
|
457 |
+
if n != m:
|
458 |
+
Gp.add_edge(
|
459 |
+
m, n, weight=upper_bound_on_max_path_length - len(P), path=P
|
460 |
+
)
|
461 |
+
|
462 |
+
# find the minimum weight matching of edges in the weighted graph
|
463 |
+
best_matching = nx.Graph(list(nx.max_weight_matching(Gp)))
|
464 |
+
|
465 |
+
# duplicate each edge along each path in the set of paths in Gp
|
466 |
+
for m, n in best_matching.edges():
|
467 |
+
path = Gp[m][n]["path"]
|
468 |
+
G.add_edges_from(nx.utils.pairwise(path))
|
469 |
+
return G
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/graph_hashing.py
ADDED
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Functions for hashing graphs to strings.
|
3 |
+
Isomorphic graphs should be assigned identical hashes.
|
4 |
+
For now, only Weisfeiler-Lehman hashing is implemented.
|
5 |
+
"""
|
6 |
+
|
7 |
+
from collections import Counter, defaultdict
|
8 |
+
from hashlib import blake2b
|
9 |
+
|
10 |
+
import networkx as nx
|
11 |
+
|
12 |
+
__all__ = ["weisfeiler_lehman_graph_hash", "weisfeiler_lehman_subgraph_hashes"]
|
13 |
+
|
14 |
+
|
15 |
+
def _hash_label(label, digest_size):
|
16 |
+
return blake2b(label.encode("ascii"), digest_size=digest_size).hexdigest()
|
17 |
+
|
18 |
+
|
19 |
+
def _init_node_labels(G, edge_attr, node_attr):
|
20 |
+
if node_attr:
|
21 |
+
return {u: str(dd[node_attr]) for u, dd in G.nodes(data=True)}
|
22 |
+
elif edge_attr:
|
23 |
+
return {u: "" for u in G}
|
24 |
+
else:
|
25 |
+
return {u: str(deg) for u, deg in G.degree()}
|
26 |
+
|
27 |
+
|
28 |
+
def _neighborhood_aggregate(G, node, node_labels, edge_attr=None):
|
29 |
+
"""
|
30 |
+
Compute new labels for given node by aggregating
|
31 |
+
the labels of each node's neighbors.
|
32 |
+
"""
|
33 |
+
label_list = []
|
34 |
+
for nbr in G.neighbors(node):
|
35 |
+
prefix = "" if edge_attr is None else str(G[node][nbr][edge_attr])
|
36 |
+
label_list.append(prefix + node_labels[nbr])
|
37 |
+
return node_labels[node] + "".join(sorted(label_list))
|
38 |
+
|
39 |
+
|
40 |
+
@nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr")
|
41 |
+
def weisfeiler_lehman_graph_hash(
|
42 |
+
G, edge_attr=None, node_attr=None, iterations=3, digest_size=16
|
43 |
+
):
|
44 |
+
"""Return Weisfeiler Lehman (WL) graph hash.
|
45 |
+
|
46 |
+
The function iteratively aggregates and hashes neighborhoods of each node.
|
47 |
+
After each node's neighbors are hashed to obtain updated node labels,
|
48 |
+
a hashed histogram of resulting labels is returned as the final hash.
|
49 |
+
|
50 |
+
Hashes are identical for isomorphic graphs and strong guarantees that
|
51 |
+
non-isomorphic graphs will get different hashes. See [1]_ for details.
|
52 |
+
|
53 |
+
If no node or edge attributes are provided, the degree of each node
|
54 |
+
is used as its initial label.
|
55 |
+
Otherwise, node and/or edge labels are used to compute the hash.
|
56 |
+
|
57 |
+
Parameters
|
58 |
+
----------
|
59 |
+
G : graph
|
60 |
+
The graph to be hashed.
|
61 |
+
Can have node and/or edge attributes. Can also have no attributes.
|
62 |
+
edge_attr : string, optional (default=None)
|
63 |
+
The key in edge attribute dictionary to be used for hashing.
|
64 |
+
If None, edge labels are ignored.
|
65 |
+
node_attr: string, optional (default=None)
|
66 |
+
The key in node attribute dictionary to be used for hashing.
|
67 |
+
If None, and no edge_attr given, use the degrees of the nodes as labels.
|
68 |
+
iterations: int, optional (default=3)
|
69 |
+
Number of neighbor aggregations to perform.
|
70 |
+
Should be larger for larger graphs.
|
71 |
+
digest_size: int, optional (default=16)
|
72 |
+
Size (in bits) of blake2b hash digest to use for hashing node labels.
|
73 |
+
|
74 |
+
Returns
|
75 |
+
-------
|
76 |
+
h : string
|
77 |
+
Hexadecimal string corresponding to hash of the input graph.
|
78 |
+
|
79 |
+
Examples
|
80 |
+
--------
|
81 |
+
Two graphs with edge attributes that are isomorphic, except for
|
82 |
+
differences in the edge labels.
|
83 |
+
|
84 |
+
>>> G1 = nx.Graph()
|
85 |
+
>>> G1.add_edges_from(
|
86 |
+
... [
|
87 |
+
... (1, 2, {"label": "A"}),
|
88 |
+
... (2, 3, {"label": "A"}),
|
89 |
+
... (3, 1, {"label": "A"}),
|
90 |
+
... (1, 4, {"label": "B"}),
|
91 |
+
... ]
|
92 |
+
... )
|
93 |
+
>>> G2 = nx.Graph()
|
94 |
+
>>> G2.add_edges_from(
|
95 |
+
... [
|
96 |
+
... (5, 6, {"label": "B"}),
|
97 |
+
... (6, 7, {"label": "A"}),
|
98 |
+
... (7, 5, {"label": "A"}),
|
99 |
+
... (7, 8, {"label": "A"}),
|
100 |
+
... ]
|
101 |
+
... )
|
102 |
+
|
103 |
+
Omitting the `edge_attr` option, results in identical hashes.
|
104 |
+
|
105 |
+
>>> nx.weisfeiler_lehman_graph_hash(G1)
|
106 |
+
'7bc4dde9a09d0b94c5097b219891d81a'
|
107 |
+
>>> nx.weisfeiler_lehman_graph_hash(G2)
|
108 |
+
'7bc4dde9a09d0b94c5097b219891d81a'
|
109 |
+
|
110 |
+
With edge labels, the graphs are no longer assigned
|
111 |
+
the same hash digest.
|
112 |
+
|
113 |
+
>>> nx.weisfeiler_lehman_graph_hash(G1, edge_attr="label")
|
114 |
+
'c653d85538bcf041d88c011f4f905f10'
|
115 |
+
>>> nx.weisfeiler_lehman_graph_hash(G2, edge_attr="label")
|
116 |
+
'3dcd84af1ca855d0eff3c978d88e7ec7'
|
117 |
+
|
118 |
+
Notes
|
119 |
+
-----
|
120 |
+
To return the WL hashes of each subgraph of a graph, use
|
121 |
+
`weisfeiler_lehman_subgraph_hashes`
|
122 |
+
|
123 |
+
Similarity between hashes does not imply similarity between graphs.
|
124 |
+
|
125 |
+
References
|
126 |
+
----------
|
127 |
+
.. [1] Shervashidze, Nino, Pascal Schweitzer, Erik Jan Van Leeuwen,
|
128 |
+
Kurt Mehlhorn, and Karsten M. Borgwardt. Weisfeiler Lehman
|
129 |
+
Graph Kernels. Journal of Machine Learning Research. 2011.
|
130 |
+
http://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf
|
131 |
+
|
132 |
+
See also
|
133 |
+
--------
|
134 |
+
weisfeiler_lehman_subgraph_hashes
|
135 |
+
"""
|
136 |
+
|
137 |
+
def weisfeiler_lehman_step(G, labels, edge_attr=None):
|
138 |
+
"""
|
139 |
+
Apply neighborhood aggregation to each node
|
140 |
+
in the graph.
|
141 |
+
Computes a dictionary with labels for each node.
|
142 |
+
"""
|
143 |
+
new_labels = {}
|
144 |
+
for node in G.nodes():
|
145 |
+
label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr)
|
146 |
+
new_labels[node] = _hash_label(label, digest_size)
|
147 |
+
return new_labels
|
148 |
+
|
149 |
+
# set initial node labels
|
150 |
+
node_labels = _init_node_labels(G, edge_attr, node_attr)
|
151 |
+
|
152 |
+
subgraph_hash_counts = []
|
153 |
+
for _ in range(iterations):
|
154 |
+
node_labels = weisfeiler_lehman_step(G, node_labels, edge_attr=edge_attr)
|
155 |
+
counter = Counter(node_labels.values())
|
156 |
+
# sort the counter, extend total counts
|
157 |
+
subgraph_hash_counts.extend(sorted(counter.items(), key=lambda x: x[0]))
|
158 |
+
|
159 |
+
# hash the final counter
|
160 |
+
return _hash_label(str(tuple(subgraph_hash_counts)), digest_size)
|
161 |
+
|
162 |
+
|
163 |
+
@nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr")
|
164 |
+
def weisfeiler_lehman_subgraph_hashes(
|
165 |
+
G,
|
166 |
+
edge_attr=None,
|
167 |
+
node_attr=None,
|
168 |
+
iterations=3,
|
169 |
+
digest_size=16,
|
170 |
+
include_initial_labels=False,
|
171 |
+
):
|
172 |
+
"""
|
173 |
+
Return a dictionary of subgraph hashes by node.
|
174 |
+
|
175 |
+
Dictionary keys are nodes in `G`, and values are a list of hashes.
|
176 |
+
Each hash corresponds to a subgraph rooted at a given node u in `G`.
|
177 |
+
Lists of subgraph hashes are sorted in increasing order of depth from
|
178 |
+
their root node, with the hash at index i corresponding to a subgraph
|
179 |
+
of nodes at most i edges distance from u. Thus, each list will contain
|
180 |
+
`iterations` elements - a hash for a subgraph at each depth. If
|
181 |
+
`include_initial_labels` is set to `True`, each list will additionally
|
182 |
+
have contain a hash of the initial node label (or equivalently a
|
183 |
+
subgraph of depth 0) prepended, totalling ``iterations + 1`` elements.
|
184 |
+
|
185 |
+
The function iteratively aggregates and hashes neighborhoods of each node.
|
186 |
+
This is achieved for each step by replacing for each node its label from
|
187 |
+
the previous iteration with its hashed 1-hop neighborhood aggregate.
|
188 |
+
The new node label is then appended to a list of node labels for each
|
189 |
+
node.
|
190 |
+
|
191 |
+
To aggregate neighborhoods for a node $u$ at each step, all labels of
|
192 |
+
nodes adjacent to $u$ are concatenated. If the `edge_attr` parameter is set,
|
193 |
+
labels for each neighboring node are prefixed with the value of this attribute
|
194 |
+
along the connecting edge from this neighbor to node $u$. The resulting string
|
195 |
+
is then hashed to compress this information into a fixed digest size.
|
196 |
+
|
197 |
+
Thus, at the $i$-th iteration, nodes within $i$ hops influence any given
|
198 |
+
hashed node label. We can therefore say that at depth $i$ for node $u$
|
199 |
+
we have a hash for a subgraph induced by the $i$-hop neighborhood of $u$.
|
200 |
+
|
201 |
+
The output can be used to to create general Weisfeiler-Lehman graph kernels,
|
202 |
+
or generate features for graphs or nodes - for example to generate 'words' in
|
203 |
+
a graph as seen in the 'graph2vec' algorithm.
|
204 |
+
See [1]_ & [2]_ respectively for details.
|
205 |
+
|
206 |
+
Hashes are identical for isomorphic subgraphs and there exist strong
|
207 |
+
guarantees that non-isomorphic graphs will get different hashes.
|
208 |
+
See [1]_ for details.
|
209 |
+
|
210 |
+
If no node or edge attributes are provided, the degree of each node
|
211 |
+
is used as its initial label.
|
212 |
+
Otherwise, node and/or edge labels are used to compute the hash.
|
213 |
+
|
214 |
+
Parameters
|
215 |
+
----------
|
216 |
+
G : graph
|
217 |
+
The graph to be hashed.
|
218 |
+
Can have node and/or edge attributes. Can also have no attributes.
|
219 |
+
edge_attr : string, optional (default=None)
|
220 |
+
The key in edge attribute dictionary to be used for hashing.
|
221 |
+
If None, edge labels are ignored.
|
222 |
+
node_attr : string, optional (default=None)
|
223 |
+
The key in node attribute dictionary to be used for hashing.
|
224 |
+
If None, and no edge_attr given, use the degrees of the nodes as labels.
|
225 |
+
If None, and edge_attr is given, each node starts with an identical label.
|
226 |
+
iterations : int, optional (default=3)
|
227 |
+
Number of neighbor aggregations to perform.
|
228 |
+
Should be larger for larger graphs.
|
229 |
+
digest_size : int, optional (default=16)
|
230 |
+
Size (in bits) of blake2b hash digest to use for hashing node labels.
|
231 |
+
The default size is 16 bits.
|
232 |
+
include_initial_labels : bool, optional (default=False)
|
233 |
+
If True, include the hashed initial node label as the first subgraph
|
234 |
+
hash for each node.
|
235 |
+
|
236 |
+
Returns
|
237 |
+
-------
|
238 |
+
node_subgraph_hashes : dict
|
239 |
+
A dictionary with each key given by a node in G, and each value given
|
240 |
+
by the subgraph hashes in order of depth from the key node.
|
241 |
+
|
242 |
+
Examples
|
243 |
+
--------
|
244 |
+
Finding similar nodes in different graphs:
|
245 |
+
|
246 |
+
>>> G1 = nx.Graph()
|
247 |
+
>>> G1.add_edges_from([(1, 2), (2, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 7)])
|
248 |
+
>>> G2 = nx.Graph()
|
249 |
+
>>> G2.add_edges_from([(1, 3), (2, 3), (1, 6), (1, 5), (4, 6)])
|
250 |
+
>>> g1_hashes = nx.weisfeiler_lehman_subgraph_hashes(G1, iterations=3, digest_size=8)
|
251 |
+
>>> g2_hashes = nx.weisfeiler_lehman_subgraph_hashes(G2, iterations=3, digest_size=8)
|
252 |
+
|
253 |
+
Even though G1 and G2 are not isomorphic (they have different numbers of edges),
|
254 |
+
the hash sequence of depth 3 for node 1 in G1 and node 5 in G2 are similar:
|
255 |
+
|
256 |
+
>>> g1_hashes[1]
|
257 |
+
['a93b64973cfc8897', 'db1b43ae35a1878f', '57872a7d2059c1c0']
|
258 |
+
>>> g2_hashes[5]
|
259 |
+
['a93b64973cfc8897', 'db1b43ae35a1878f', '1716d2a4012fa4bc']
|
260 |
+
|
261 |
+
The first 2 WL subgraph hashes match. From this we can conclude that it's very
|
262 |
+
likely the neighborhood of 2 hops around these nodes are isomorphic.
|
263 |
+
|
264 |
+
However the 3-hop neighborhoods of ``G1`` and ``G2`` are not isomorphic since the
|
265 |
+
3rd hashes in the lists above are not equal.
|
266 |
+
|
267 |
+
These nodes may be candidates to be classified together since their local topology
|
268 |
+
is similar.
|
269 |
+
|
270 |
+
Notes
|
271 |
+
-----
|
272 |
+
To hash the full graph when subgraph hashes are not needed, use
|
273 |
+
`weisfeiler_lehman_graph_hash` for efficiency.
|
274 |
+
|
275 |
+
Similarity between hashes does not imply similarity between graphs.
|
276 |
+
|
277 |
+
References
|
278 |
+
----------
|
279 |
+
.. [1] Shervashidze, Nino, Pascal Schweitzer, Erik Jan Van Leeuwen,
|
280 |
+
Kurt Mehlhorn, and Karsten M. Borgwardt. Weisfeiler Lehman
|
281 |
+
Graph Kernels. Journal of Machine Learning Research. 2011.
|
282 |
+
http://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf
|
283 |
+
.. [2] Annamalai Narayanan, Mahinthan Chandramohan, Rajasekar Venkatesan,
|
284 |
+
Lihui Chen, Yang Liu and Shantanu Jaiswa. graph2vec: Learning
|
285 |
+
Distributed Representations of Graphs. arXiv. 2017
|
286 |
+
https://arxiv.org/pdf/1707.05005.pdf
|
287 |
+
|
288 |
+
See also
|
289 |
+
--------
|
290 |
+
weisfeiler_lehman_graph_hash
|
291 |
+
"""
|
292 |
+
|
293 |
+
def weisfeiler_lehman_step(G, labels, node_subgraph_hashes, edge_attr=None):
|
294 |
+
"""
|
295 |
+
Apply neighborhood aggregation to each node
|
296 |
+
in the graph.
|
297 |
+
Computes a dictionary with labels for each node.
|
298 |
+
Appends the new hashed label to the dictionary of subgraph hashes
|
299 |
+
originating from and indexed by each node in G
|
300 |
+
"""
|
301 |
+
new_labels = {}
|
302 |
+
for node in G.nodes():
|
303 |
+
label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr)
|
304 |
+
hashed_label = _hash_label(label, digest_size)
|
305 |
+
new_labels[node] = hashed_label
|
306 |
+
node_subgraph_hashes[node].append(hashed_label)
|
307 |
+
return new_labels
|
308 |
+
|
309 |
+
node_labels = _init_node_labels(G, edge_attr, node_attr)
|
310 |
+
if include_initial_labels:
|
311 |
+
node_subgraph_hashes = {
|
312 |
+
k: [_hash_label(v, digest_size)] for k, v in node_labels.items()
|
313 |
+
}
|
314 |
+
else:
|
315 |
+
node_subgraph_hashes = defaultdict(list)
|
316 |
+
|
317 |
+
for _ in range(iterations):
|
318 |
+
node_labels = weisfeiler_lehman_step(
|
319 |
+
G, node_labels, node_subgraph_hashes, edge_attr
|
320 |
+
)
|
321 |
+
|
322 |
+
return dict(node_subgraph_hashes)
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/graphical.py
ADDED
@@ -0,0 +1,483 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Test sequences for graphiness.
|
2 |
+
"""
|
3 |
+
import heapq
|
4 |
+
|
5 |
+
import networkx as nx
|
6 |
+
|
7 |
+
__all__ = [
|
8 |
+
"is_graphical",
|
9 |
+
"is_multigraphical",
|
10 |
+
"is_pseudographical",
|
11 |
+
"is_digraphical",
|
12 |
+
"is_valid_degree_sequence_erdos_gallai",
|
13 |
+
"is_valid_degree_sequence_havel_hakimi",
|
14 |
+
]
|
15 |
+
|
16 |
+
|
17 |
+
@nx._dispatchable(graphs=None)
|
18 |
+
def is_graphical(sequence, method="eg"):
|
19 |
+
"""Returns True if sequence is a valid degree sequence.
|
20 |
+
|
21 |
+
A degree sequence is valid if some graph can realize it.
|
22 |
+
|
23 |
+
Parameters
|
24 |
+
----------
|
25 |
+
sequence : list or iterable container
|
26 |
+
A sequence of integer node degrees
|
27 |
+
|
28 |
+
method : "eg" | "hh" (default: 'eg')
|
29 |
+
The method used to validate the degree sequence.
|
30 |
+
"eg" corresponds to the Erdős-Gallai algorithm
|
31 |
+
[EG1960]_, [choudum1986]_, and
|
32 |
+
"hh" to the Havel-Hakimi algorithm
|
33 |
+
[havel1955]_, [hakimi1962]_, [CL1996]_.
|
34 |
+
|
35 |
+
Returns
|
36 |
+
-------
|
37 |
+
valid : bool
|
38 |
+
True if the sequence is a valid degree sequence and False if not.
|
39 |
+
|
40 |
+
Examples
|
41 |
+
--------
|
42 |
+
>>> G = nx.path_graph(4)
|
43 |
+
>>> sequence = (d for n, d in G.degree())
|
44 |
+
>>> nx.is_graphical(sequence)
|
45 |
+
True
|
46 |
+
|
47 |
+
To test a non-graphical sequence:
|
48 |
+
>>> sequence_list = [d for n, d in G.degree()]
|
49 |
+
>>> sequence_list[-1] += 1
|
50 |
+
>>> nx.is_graphical(sequence_list)
|
51 |
+
False
|
52 |
+
|
53 |
+
References
|
54 |
+
----------
|
55 |
+
.. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960.
|
56 |
+
.. [choudum1986] S.A. Choudum. "A simple proof of the Erdős-Gallai theorem on
|
57 |
+
graph sequences." Bulletin of the Australian Mathematical Society, 33,
|
58 |
+
pp 67-70, 1986. https://doi.org/10.1017/S0004972700002872
|
59 |
+
.. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs"
|
60 |
+
Casopis Pest. Mat. 80, 477-480, 1955.
|
61 |
+
.. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as
|
62 |
+
Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962.
|
63 |
+
.. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs",
|
64 |
+
Chapman and Hall/CRC, 1996.
|
65 |
+
"""
|
66 |
+
if method == "eg":
|
67 |
+
valid = is_valid_degree_sequence_erdos_gallai(list(sequence))
|
68 |
+
elif method == "hh":
|
69 |
+
valid = is_valid_degree_sequence_havel_hakimi(list(sequence))
|
70 |
+
else:
|
71 |
+
msg = "`method` must be 'eg' or 'hh'"
|
72 |
+
raise nx.NetworkXException(msg)
|
73 |
+
return valid
|
74 |
+
|
75 |
+
|
76 |
+
def _basic_graphical_tests(deg_sequence):
|
77 |
+
# Sort and perform some simple tests on the sequence
|
78 |
+
deg_sequence = nx.utils.make_list_of_ints(deg_sequence)
|
79 |
+
p = len(deg_sequence)
|
80 |
+
num_degs = [0] * p
|
81 |
+
dmax, dmin, dsum, n = 0, p, 0, 0
|
82 |
+
for d in deg_sequence:
|
83 |
+
# Reject if degree is negative or larger than the sequence length
|
84 |
+
if d < 0 or d >= p:
|
85 |
+
raise nx.NetworkXUnfeasible
|
86 |
+
# Process only the non-zero integers
|
87 |
+
elif d > 0:
|
88 |
+
dmax, dmin, dsum, n = max(dmax, d), min(dmin, d), dsum + d, n + 1
|
89 |
+
num_degs[d] += 1
|
90 |
+
# Reject sequence if it has odd sum or is oversaturated
|
91 |
+
if dsum % 2 or dsum > n * (n - 1):
|
92 |
+
raise nx.NetworkXUnfeasible
|
93 |
+
return dmax, dmin, dsum, n, num_degs
|
94 |
+
|
95 |
+
|
96 |
+
@nx._dispatchable(graphs=None)
|
97 |
+
def is_valid_degree_sequence_havel_hakimi(deg_sequence):
|
98 |
+
r"""Returns True if deg_sequence can be realized by a simple graph.
|
99 |
+
|
100 |
+
The validation proceeds using the Havel-Hakimi theorem
|
101 |
+
[havel1955]_, [hakimi1962]_, [CL1996]_.
|
102 |
+
Worst-case run time is $O(s)$ where $s$ is the sum of the sequence.
|
103 |
+
|
104 |
+
Parameters
|
105 |
+
----------
|
106 |
+
deg_sequence : list
|
107 |
+
A list of integers where each element specifies the degree of a node
|
108 |
+
in a graph.
|
109 |
+
|
110 |
+
Returns
|
111 |
+
-------
|
112 |
+
valid : bool
|
113 |
+
True if deg_sequence is graphical and False if not.
|
114 |
+
|
115 |
+
Examples
|
116 |
+
--------
|
117 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)])
|
118 |
+
>>> sequence = (d for _, d in G.degree())
|
119 |
+
>>> nx.is_valid_degree_sequence_havel_hakimi(sequence)
|
120 |
+
True
|
121 |
+
|
122 |
+
To test a non-valid sequence:
|
123 |
+
>>> sequence_list = [d for _, d in G.degree()]
|
124 |
+
>>> sequence_list[-1] += 1
|
125 |
+
>>> nx.is_valid_degree_sequence_havel_hakimi(sequence_list)
|
126 |
+
False
|
127 |
+
|
128 |
+
Notes
|
129 |
+
-----
|
130 |
+
The ZZ condition says that for the sequence d if
|
131 |
+
|
132 |
+
.. math::
|
133 |
+
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
|
134 |
+
|
135 |
+
then d is graphical. This was shown in Theorem 6 in [1]_.
|
136 |
+
|
137 |
+
References
|
138 |
+
----------
|
139 |
+
.. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
|
140 |
+
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
|
141 |
+
.. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs"
|
142 |
+
Casopis Pest. Mat. 80, 477-480, 1955.
|
143 |
+
.. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as
|
144 |
+
Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962.
|
145 |
+
.. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs",
|
146 |
+
Chapman and Hall/CRC, 1996.
|
147 |
+
"""
|
148 |
+
try:
|
149 |
+
dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
|
150 |
+
except nx.NetworkXUnfeasible:
|
151 |
+
return False
|
152 |
+
# Accept if sequence has no non-zero degrees or passes the ZZ condition
|
153 |
+
if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
|
154 |
+
return True
|
155 |
+
|
156 |
+
modstubs = [0] * (dmax + 1)
|
157 |
+
# Successively reduce degree sequence by removing the maximum degree
|
158 |
+
while n > 0:
|
159 |
+
# Retrieve the maximum degree in the sequence
|
160 |
+
while num_degs[dmax] == 0:
|
161 |
+
dmax -= 1
|
162 |
+
# If there are not enough stubs to connect to, then the sequence is
|
163 |
+
# not graphical
|
164 |
+
if dmax > n - 1:
|
165 |
+
return False
|
166 |
+
|
167 |
+
# Remove largest stub in list
|
168 |
+
num_degs[dmax], n = num_degs[dmax] - 1, n - 1
|
169 |
+
# Reduce the next dmax largest stubs
|
170 |
+
mslen = 0
|
171 |
+
k = dmax
|
172 |
+
for i in range(dmax):
|
173 |
+
while num_degs[k] == 0:
|
174 |
+
k -= 1
|
175 |
+
num_degs[k], n = num_degs[k] - 1, n - 1
|
176 |
+
if k > 1:
|
177 |
+
modstubs[mslen] = k - 1
|
178 |
+
mslen += 1
|
179 |
+
# Add back to the list any non-zero stubs that were removed
|
180 |
+
for i in range(mslen):
|
181 |
+
stub = modstubs[i]
|
182 |
+
num_degs[stub], n = num_degs[stub] + 1, n + 1
|
183 |
+
return True
|
184 |
+
|
185 |
+
|
186 |
+
@nx._dispatchable(graphs=None)
|
187 |
+
def is_valid_degree_sequence_erdos_gallai(deg_sequence):
|
188 |
+
r"""Returns True if deg_sequence can be realized by a simple graph.
|
189 |
+
|
190 |
+
The validation is done using the Erdős-Gallai theorem [EG1960]_.
|
191 |
+
|
192 |
+
Parameters
|
193 |
+
----------
|
194 |
+
deg_sequence : list
|
195 |
+
A list of integers
|
196 |
+
|
197 |
+
Returns
|
198 |
+
-------
|
199 |
+
valid : bool
|
200 |
+
True if deg_sequence is graphical and False if not.
|
201 |
+
|
202 |
+
Examples
|
203 |
+
--------
|
204 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)])
|
205 |
+
>>> sequence = (d for _, d in G.degree())
|
206 |
+
>>> nx.is_valid_degree_sequence_erdos_gallai(sequence)
|
207 |
+
True
|
208 |
+
|
209 |
+
To test a non-valid sequence:
|
210 |
+
>>> sequence_list = [d for _, d in G.degree()]
|
211 |
+
>>> sequence_list[-1] += 1
|
212 |
+
>>> nx.is_valid_degree_sequence_erdos_gallai(sequence_list)
|
213 |
+
False
|
214 |
+
|
215 |
+
Notes
|
216 |
+
-----
|
217 |
+
|
218 |
+
This implementation uses an equivalent form of the Erdős-Gallai criterion.
|
219 |
+
Worst-case run time is $O(n)$ where $n$ is the length of the sequence.
|
220 |
+
|
221 |
+
Specifically, a sequence d is graphical if and only if the
|
222 |
+
sum of the sequence is even and for all strong indices k in the sequence,
|
223 |
+
|
224 |
+
.. math::
|
225 |
+
|
226 |
+
\sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k)
|
227 |
+
= k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j )
|
228 |
+
|
229 |
+
A strong index k is any index where d_k >= k and the value n_j is the
|
230 |
+
number of occurrences of j in d. The maximal strong index is called the
|
231 |
+
Durfee index.
|
232 |
+
|
233 |
+
This particular rearrangement comes from the proof of Theorem 3 in [2]_.
|
234 |
+
|
235 |
+
The ZZ condition says that for the sequence d if
|
236 |
+
|
237 |
+
.. math::
|
238 |
+
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
|
239 |
+
|
240 |
+
then d is graphical. This was shown in Theorem 6 in [2]_.
|
241 |
+
|
242 |
+
References
|
243 |
+
----------
|
244 |
+
.. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai",
|
245 |
+
Discrete Mathematics, 265, pp. 417-420 (2003).
|
246 |
+
.. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
|
247 |
+
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
|
248 |
+
.. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960.
|
249 |
+
"""
|
250 |
+
try:
|
251 |
+
dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
|
252 |
+
except nx.NetworkXUnfeasible:
|
253 |
+
return False
|
254 |
+
# Accept if sequence has no non-zero degrees or passes the ZZ condition
|
255 |
+
if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
|
256 |
+
return True
|
257 |
+
|
258 |
+
# Perform the EG checks using the reformulation of Zverovich and Zverovich
|
259 |
+
k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0
|
260 |
+
for dk in range(dmax, dmin - 1, -1):
|
261 |
+
if dk < k + 1: # Check if already past Durfee index
|
262 |
+
return True
|
263 |
+
if num_degs[dk] > 0:
|
264 |
+
run_size = num_degs[dk] # Process a run of identical-valued degrees
|
265 |
+
if dk < k + run_size: # Check if end of run is past Durfee index
|
266 |
+
run_size = dk - k # Adjust back to Durfee index
|
267 |
+
sum_deg += run_size * dk
|
268 |
+
for v in range(run_size):
|
269 |
+
sum_nj += num_degs[k + v]
|
270 |
+
sum_jnj += (k + v) * num_degs[k + v]
|
271 |
+
k += run_size
|
272 |
+
if sum_deg > k * (n - 1) - k * sum_nj + sum_jnj:
|
273 |
+
return False
|
274 |
+
return True
|
275 |
+
|
276 |
+
|
277 |
+
@nx._dispatchable(graphs=None)
|
278 |
+
def is_multigraphical(sequence):
|
279 |
+
"""Returns True if some multigraph can realize the sequence.
|
280 |
+
|
281 |
+
Parameters
|
282 |
+
----------
|
283 |
+
sequence : list
|
284 |
+
A list of integers
|
285 |
+
|
286 |
+
Returns
|
287 |
+
-------
|
288 |
+
valid : bool
|
289 |
+
True if deg_sequence is a multigraphic degree sequence and False if not.
|
290 |
+
|
291 |
+
Examples
|
292 |
+
--------
|
293 |
+
>>> G = nx.MultiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)])
|
294 |
+
>>> sequence = (d for _, d in G.degree())
|
295 |
+
>>> nx.is_multigraphical(sequence)
|
296 |
+
True
|
297 |
+
|
298 |
+
To test a non-multigraphical sequence:
|
299 |
+
>>> sequence_list = [d for _, d in G.degree()]
|
300 |
+
>>> sequence_list[-1] += 1
|
301 |
+
>>> nx.is_multigraphical(sequence_list)
|
302 |
+
False
|
303 |
+
|
304 |
+
Notes
|
305 |
+
-----
|
306 |
+
The worst-case run time is $O(n)$ where $n$ is the length of the sequence.
|
307 |
+
|
308 |
+
References
|
309 |
+
----------
|
310 |
+
.. [1] S. L. Hakimi. "On the realizability of a set of integers as
|
311 |
+
degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506
|
312 |
+
(1962).
|
313 |
+
"""
|
314 |
+
try:
|
315 |
+
deg_sequence = nx.utils.make_list_of_ints(sequence)
|
316 |
+
except nx.NetworkXError:
|
317 |
+
return False
|
318 |
+
dsum, dmax = 0, 0
|
319 |
+
for d in deg_sequence:
|
320 |
+
if d < 0:
|
321 |
+
return False
|
322 |
+
dsum, dmax = dsum + d, max(dmax, d)
|
323 |
+
if dsum % 2 or dsum < 2 * dmax:
|
324 |
+
return False
|
325 |
+
return True
|
326 |
+
|
327 |
+
|
328 |
+
@nx._dispatchable(graphs=None)
|
329 |
+
def is_pseudographical(sequence):
|
330 |
+
"""Returns True if some pseudograph can realize the sequence.
|
331 |
+
|
332 |
+
Every nonnegative integer sequence with an even sum is pseudographical
|
333 |
+
(see [1]_).
|
334 |
+
|
335 |
+
Parameters
|
336 |
+
----------
|
337 |
+
sequence : list or iterable container
|
338 |
+
A sequence of integer node degrees
|
339 |
+
|
340 |
+
Returns
|
341 |
+
-------
|
342 |
+
valid : bool
|
343 |
+
True if the sequence is a pseudographic degree sequence and False if not.
|
344 |
+
|
345 |
+
Examples
|
346 |
+
--------
|
347 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)])
|
348 |
+
>>> sequence = (d for _, d in G.degree())
|
349 |
+
>>> nx.is_pseudographical(sequence)
|
350 |
+
True
|
351 |
+
|
352 |
+
To test a non-pseudographical sequence:
|
353 |
+
>>> sequence_list = [d for _, d in G.degree()]
|
354 |
+
>>> sequence_list[-1] += 1
|
355 |
+
>>> nx.is_pseudographical(sequence_list)
|
356 |
+
False
|
357 |
+
|
358 |
+
Notes
|
359 |
+
-----
|
360 |
+
The worst-case run time is $O(n)$ where n is the length of the sequence.
|
361 |
+
|
362 |
+
References
|
363 |
+
----------
|
364 |
+
.. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs
|
365 |
+
and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12),
|
366 |
+
pp. 778-782 (1976).
|
367 |
+
"""
|
368 |
+
try:
|
369 |
+
deg_sequence = nx.utils.make_list_of_ints(sequence)
|
370 |
+
except nx.NetworkXError:
|
371 |
+
return False
|
372 |
+
return sum(deg_sequence) % 2 == 0 and min(deg_sequence) >= 0
|
373 |
+
|
374 |
+
|
375 |
+
@nx._dispatchable(graphs=None)
|
376 |
+
def is_digraphical(in_sequence, out_sequence):
|
377 |
+
r"""Returns True if some directed graph can realize the in- and out-degree
|
378 |
+
sequences.
|
379 |
+
|
380 |
+
Parameters
|
381 |
+
----------
|
382 |
+
in_sequence : list or iterable container
|
383 |
+
A sequence of integer node in-degrees
|
384 |
+
|
385 |
+
out_sequence : list or iterable container
|
386 |
+
A sequence of integer node out-degrees
|
387 |
+
|
388 |
+
Returns
|
389 |
+
-------
|
390 |
+
valid : bool
|
391 |
+
True if in and out-sequences are digraphic False if not.
|
392 |
+
|
393 |
+
Examples
|
394 |
+
--------
|
395 |
+
>>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)])
|
396 |
+
>>> in_seq = (d for n, d in G.in_degree())
|
397 |
+
>>> out_seq = (d for n, d in G.out_degree())
|
398 |
+
>>> nx.is_digraphical(in_seq, out_seq)
|
399 |
+
True
|
400 |
+
|
401 |
+
To test a non-digraphical scenario:
|
402 |
+
>>> in_seq_list = [d for n, d in G.in_degree()]
|
403 |
+
>>> in_seq_list[-1] += 1
|
404 |
+
>>> nx.is_digraphical(in_seq_list, out_seq)
|
405 |
+
False
|
406 |
+
|
407 |
+
Notes
|
408 |
+
-----
|
409 |
+
This algorithm is from Kleitman and Wang [1]_.
|
410 |
+
The worst case runtime is $O(s \times \log n)$ where $s$ and $n$ are the
|
411 |
+
sum and length of the sequences respectively.
|
412 |
+
|
413 |
+
References
|
414 |
+
----------
|
415 |
+
.. [1] D.J. Kleitman and D.L. Wang
|
416 |
+
Algorithms for Constructing Graphs and Digraphs with Given Valences
|
417 |
+
and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973)
|
418 |
+
"""
|
419 |
+
try:
|
420 |
+
in_deg_sequence = nx.utils.make_list_of_ints(in_sequence)
|
421 |
+
out_deg_sequence = nx.utils.make_list_of_ints(out_sequence)
|
422 |
+
except nx.NetworkXError:
|
423 |
+
return False
|
424 |
+
# Process the sequences and form two heaps to store degree pairs with
|
425 |
+
# either zero or non-zero out degrees
|
426 |
+
sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
|
427 |
+
maxn = max(nin, nout)
|
428 |
+
maxin = 0
|
429 |
+
if maxn == 0:
|
430 |
+
return True
|
431 |
+
stubheap, zeroheap = [], []
|
432 |
+
for n in range(maxn):
|
433 |
+
in_deg, out_deg = 0, 0
|
434 |
+
if n < nout:
|
435 |
+
out_deg = out_deg_sequence[n]
|
436 |
+
if n < nin:
|
437 |
+
in_deg = in_deg_sequence[n]
|
438 |
+
if in_deg < 0 or out_deg < 0:
|
439 |
+
return False
|
440 |
+
sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg)
|
441 |
+
if in_deg > 0:
|
442 |
+
stubheap.append((-1 * out_deg, -1 * in_deg))
|
443 |
+
elif out_deg > 0:
|
444 |
+
zeroheap.append(-1 * out_deg)
|
445 |
+
if sumin != sumout:
|
446 |
+
return False
|
447 |
+
heapq.heapify(stubheap)
|
448 |
+
heapq.heapify(zeroheap)
|
449 |
+
|
450 |
+
modstubs = [(0, 0)] * (maxin + 1)
|
451 |
+
# Successively reduce degree sequence by removing the maximum out degree
|
452 |
+
while stubheap:
|
453 |
+
# Take the first value in the sequence with non-zero in degree
|
454 |
+
(freeout, freein) = heapq.heappop(stubheap)
|
455 |
+
freein *= -1
|
456 |
+
if freein > len(stubheap) + len(zeroheap):
|
457 |
+
return False
|
458 |
+
|
459 |
+
# Attach out stubs to the nodes with the most in stubs
|
460 |
+
mslen = 0
|
461 |
+
for i in range(freein):
|
462 |
+
if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]):
|
463 |
+
stubout = heapq.heappop(zeroheap)
|
464 |
+
stubin = 0
|
465 |
+
else:
|
466 |
+
(stubout, stubin) = heapq.heappop(stubheap)
|
467 |
+
if stubout == 0:
|
468 |
+
return False
|
469 |
+
# Check if target is now totally connected
|
470 |
+
if stubout + 1 < 0 or stubin < 0:
|
471 |
+
modstubs[mslen] = (stubout + 1, stubin)
|
472 |
+
mslen += 1
|
473 |
+
|
474 |
+
# Add back the nodes to the heap that still have available stubs
|
475 |
+
for i in range(mslen):
|
476 |
+
stub = modstubs[i]
|
477 |
+
if stub[1] < 0:
|
478 |
+
heapq.heappush(stubheap, stub)
|
479 |
+
else:
|
480 |
+
heapq.heappush(zeroheap, stub[0])
|
481 |
+
if freeout < 0:
|
482 |
+
heapq.heappush(zeroheap, freeout)
|
483 |
+
return True
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/hierarchy.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Flow Hierarchy.
|
3 |
+
"""
|
4 |
+
import networkx as nx
|
5 |
+
|
6 |
+
__all__ = ["flow_hierarchy"]
|
7 |
+
|
8 |
+
|
9 |
+
@nx._dispatchable(edge_attrs="weight")
|
10 |
+
def flow_hierarchy(G, weight=None):
|
11 |
+
"""Returns the flow hierarchy of a directed network.
|
12 |
+
|
13 |
+
Flow hierarchy is defined as the fraction of edges not participating
|
14 |
+
in cycles in a directed graph [1]_.
|
15 |
+
|
16 |
+
Parameters
|
17 |
+
----------
|
18 |
+
G : DiGraph or MultiDiGraph
|
19 |
+
A directed graph
|
20 |
+
|
21 |
+
weight : string, optional (default=None)
|
22 |
+
Attribute to use for edge weights. If None the weight defaults to 1.
|
23 |
+
|
24 |
+
Returns
|
25 |
+
-------
|
26 |
+
h : float
|
27 |
+
Flow hierarchy value
|
28 |
+
|
29 |
+
Notes
|
30 |
+
-----
|
31 |
+
The algorithm described in [1]_ computes the flow hierarchy through
|
32 |
+
exponentiation of the adjacency matrix. This function implements an
|
33 |
+
alternative approach that finds strongly connected components.
|
34 |
+
An edge is in a cycle if and only if it is in a strongly connected
|
35 |
+
component, which can be found in $O(m)$ time using Tarjan's algorithm.
|
36 |
+
|
37 |
+
References
|
38 |
+
----------
|
39 |
+
.. [1] Luo, J.; Magee, C.L. (2011),
|
40 |
+
Detecting evolving patterns of self-organizing networks by flow
|
41 |
+
hierarchy measurement, Complexity, Volume 16 Issue 6 53-61.
|
42 |
+
DOI: 10.1002/cplx.20368
|
43 |
+
http://web.mit.edu/~cmagee/www/documents/28-DetectingEvolvingPatterns_FlowHierarchy.pdf
|
44 |
+
"""
|
45 |
+
if not G.is_directed():
|
46 |
+
raise nx.NetworkXError("G must be a digraph in flow_hierarchy")
|
47 |
+
scc = nx.strongly_connected_components(G)
|
48 |
+
return 1 - sum(G.subgraph(c).size(weight) for c in scc) / G.size(weight)
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/isolate.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Functions for identifying isolate (degree zero) nodes.
|
3 |
+
"""
|
4 |
+
import networkx as nx
|
5 |
+
|
6 |
+
__all__ = ["is_isolate", "isolates", "number_of_isolates"]
|
7 |
+
|
8 |
+
|
9 |
+
@nx._dispatchable
|
10 |
+
def is_isolate(G, n):
|
11 |
+
"""Determines whether a node is an isolate.
|
12 |
+
|
13 |
+
An *isolate* is a node with no neighbors (that is, with degree
|
14 |
+
zero). For directed graphs, this means no in-neighbors and no
|
15 |
+
out-neighbors.
|
16 |
+
|
17 |
+
Parameters
|
18 |
+
----------
|
19 |
+
G : NetworkX graph
|
20 |
+
|
21 |
+
n : node
|
22 |
+
A node in `G`.
|
23 |
+
|
24 |
+
Returns
|
25 |
+
-------
|
26 |
+
is_isolate : bool
|
27 |
+
True if and only if `n` has no neighbors.
|
28 |
+
|
29 |
+
Examples
|
30 |
+
--------
|
31 |
+
>>> G = nx.Graph()
|
32 |
+
>>> G.add_edge(1, 2)
|
33 |
+
>>> G.add_node(3)
|
34 |
+
>>> nx.is_isolate(G, 2)
|
35 |
+
False
|
36 |
+
>>> nx.is_isolate(G, 3)
|
37 |
+
True
|
38 |
+
"""
|
39 |
+
return G.degree(n) == 0
|
40 |
+
|
41 |
+
|
42 |
+
@nx._dispatchable
|
43 |
+
def isolates(G):
|
44 |
+
"""Iterator over isolates in the graph.
|
45 |
+
|
46 |
+
An *isolate* is a node with no neighbors (that is, with degree
|
47 |
+
zero). For directed graphs, this means no in-neighbors and no
|
48 |
+
out-neighbors.
|
49 |
+
|
50 |
+
Parameters
|
51 |
+
----------
|
52 |
+
G : NetworkX graph
|
53 |
+
|
54 |
+
Returns
|
55 |
+
-------
|
56 |
+
iterator
|
57 |
+
An iterator over the isolates of `G`.
|
58 |
+
|
59 |
+
Examples
|
60 |
+
--------
|
61 |
+
To get a list of all isolates of a graph, use the :class:`list`
|
62 |
+
constructor::
|
63 |
+
|
64 |
+
>>> G = nx.Graph()
|
65 |
+
>>> G.add_edge(1, 2)
|
66 |
+
>>> G.add_node(3)
|
67 |
+
>>> list(nx.isolates(G))
|
68 |
+
[3]
|
69 |
+
|
70 |
+
To remove all isolates in the graph, first create a list of the
|
71 |
+
isolates, then use :meth:`Graph.remove_nodes_from`::
|
72 |
+
|
73 |
+
>>> G.remove_nodes_from(list(nx.isolates(G)))
|
74 |
+
>>> list(G)
|
75 |
+
[1, 2]
|
76 |
+
|
77 |
+
For digraphs, isolates have zero in-degree and zero out_degre::
|
78 |
+
|
79 |
+
>>> G = nx.DiGraph([(0, 1), (1, 2)])
|
80 |
+
>>> G.add_node(3)
|
81 |
+
>>> list(nx.isolates(G))
|
82 |
+
[3]
|
83 |
+
|
84 |
+
"""
|
85 |
+
return (n for n, d in G.degree() if d == 0)
|
86 |
+
|
87 |
+
|
88 |
+
@nx._dispatchable
|
89 |
+
def number_of_isolates(G):
|
90 |
+
"""Returns the number of isolates in the graph.
|
91 |
+
|
92 |
+
An *isolate* is a node with no neighbors (that is, with degree
|
93 |
+
zero). For directed graphs, this means no in-neighbors and no
|
94 |
+
out-neighbors.
|
95 |
+
|
96 |
+
Parameters
|
97 |
+
----------
|
98 |
+
G : NetworkX graph
|
99 |
+
|
100 |
+
Returns
|
101 |
+
-------
|
102 |
+
int
|
103 |
+
The number of degree zero nodes in the graph `G`.
|
104 |
+
|
105 |
+
"""
|
106 |
+
# TODO This can be parallelized.
|
107 |
+
return sum(1 for v in isolates(G))
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/link_prediction.py
ADDED
@@ -0,0 +1,688 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Link prediction algorithms.
|
3 |
+
"""
|
4 |
+
|
5 |
+
|
6 |
+
from math import log
|
7 |
+
|
8 |
+
import networkx as nx
|
9 |
+
from networkx.utils import not_implemented_for
|
10 |
+
|
11 |
+
__all__ = [
|
12 |
+
"resource_allocation_index",
|
13 |
+
"jaccard_coefficient",
|
14 |
+
"adamic_adar_index",
|
15 |
+
"preferential_attachment",
|
16 |
+
"cn_soundarajan_hopcroft",
|
17 |
+
"ra_index_soundarajan_hopcroft",
|
18 |
+
"within_inter_cluster",
|
19 |
+
"common_neighbor_centrality",
|
20 |
+
]
|
21 |
+
|
22 |
+
|
23 |
+
def _apply_prediction(G, func, ebunch=None):
|
24 |
+
"""Applies the given function to each edge in the specified iterable
|
25 |
+
of edges.
|
26 |
+
|
27 |
+
`G` is an instance of :class:`networkx.Graph`.
|
28 |
+
|
29 |
+
`func` is a function on two inputs, each of which is a node in the
|
30 |
+
graph. The function can return anything, but it should return a
|
31 |
+
value representing a prediction of the likelihood of a "link"
|
32 |
+
joining the two nodes.
|
33 |
+
|
34 |
+
`ebunch` is an iterable of pairs of nodes. If not specified, all
|
35 |
+
non-edges in the graph `G` will be used.
|
36 |
+
|
37 |
+
"""
|
38 |
+
if ebunch is None:
|
39 |
+
ebunch = nx.non_edges(G)
|
40 |
+
else:
|
41 |
+
for u, v in ebunch:
|
42 |
+
if u not in G:
|
43 |
+
raise nx.NodeNotFound(f"Node {u} not in G.")
|
44 |
+
if v not in G:
|
45 |
+
raise nx.NodeNotFound(f"Node {v} not in G.")
|
46 |
+
return ((u, v, func(u, v)) for u, v in ebunch)
|
47 |
+
|
48 |
+
|
49 |
+
@not_implemented_for("directed")
|
50 |
+
@not_implemented_for("multigraph")
|
51 |
+
@nx._dispatchable
|
52 |
+
def resource_allocation_index(G, ebunch=None):
|
53 |
+
r"""Compute the resource allocation index of all node pairs in ebunch.
|
54 |
+
|
55 |
+
Resource allocation index of `u` and `v` is defined as
|
56 |
+
|
57 |
+
.. math::
|
58 |
+
|
59 |
+
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{|\Gamma(w)|}
|
60 |
+
|
61 |
+
where $\Gamma(u)$ denotes the set of neighbors of $u$.
|
62 |
+
|
63 |
+
Parameters
|
64 |
+
----------
|
65 |
+
G : graph
|
66 |
+
A NetworkX undirected graph.
|
67 |
+
|
68 |
+
ebunch : iterable of node pairs, optional (default = None)
|
69 |
+
Resource allocation index will be computed for each pair of
|
70 |
+
nodes given in the iterable. The pairs must be given as
|
71 |
+
2-tuples (u, v) where u and v are nodes in the graph. If ebunch
|
72 |
+
is None then all nonexistent edges in the graph will be used.
|
73 |
+
Default value: None.
|
74 |
+
|
75 |
+
Returns
|
76 |
+
-------
|
77 |
+
piter : iterator
|
78 |
+
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
|
79 |
+
pair of nodes and p is their resource allocation index.
|
80 |
+
|
81 |
+
Raises
|
82 |
+
------
|
83 |
+
NetworkXNotImplemented
|
84 |
+
If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`.
|
85 |
+
|
86 |
+
NodeNotFound
|
87 |
+
If `ebunch` has a node that is not in `G`.
|
88 |
+
|
89 |
+
Examples
|
90 |
+
--------
|
91 |
+
>>> G = nx.complete_graph(5)
|
92 |
+
>>> preds = nx.resource_allocation_index(G, [(0, 1), (2, 3)])
|
93 |
+
>>> for u, v, p in preds:
|
94 |
+
... print(f"({u}, {v}) -> {p:.8f}")
|
95 |
+
(0, 1) -> 0.75000000
|
96 |
+
(2, 3) -> 0.75000000
|
97 |
+
|
98 |
+
References
|
99 |
+
----------
|
100 |
+
.. [1] T. Zhou, L. Lu, Y.-C. Zhang.
|
101 |
+
Predicting missing links via local information.
|
102 |
+
Eur. Phys. J. B 71 (2009) 623.
|
103 |
+
https://arxiv.org/pdf/0901.0553.pdf
|
104 |
+
"""
|
105 |
+
|
106 |
+
def predict(u, v):
|
107 |
+
return sum(1 / G.degree(w) for w in nx.common_neighbors(G, u, v))
|
108 |
+
|
109 |
+
return _apply_prediction(G, predict, ebunch)
|
110 |
+
|
111 |
+
|
112 |
+
@not_implemented_for("directed")
|
113 |
+
@not_implemented_for("multigraph")
|
114 |
+
@nx._dispatchable
|
115 |
+
def jaccard_coefficient(G, ebunch=None):
|
116 |
+
r"""Compute the Jaccard coefficient of all node pairs in ebunch.
|
117 |
+
|
118 |
+
Jaccard coefficient of nodes `u` and `v` is defined as
|
119 |
+
|
120 |
+
.. math::
|
121 |
+
|
122 |
+
\frac{|\Gamma(u) \cap \Gamma(v)|}{|\Gamma(u) \cup \Gamma(v)|}
|
123 |
+
|
124 |
+
where $\Gamma(u)$ denotes the set of neighbors of $u$.
|
125 |
+
|
126 |
+
Parameters
|
127 |
+
----------
|
128 |
+
G : graph
|
129 |
+
A NetworkX undirected graph.
|
130 |
+
|
131 |
+
ebunch : iterable of node pairs, optional (default = None)
|
132 |
+
Jaccard coefficient will be computed for each pair of nodes
|
133 |
+
given in the iterable. The pairs must be given as 2-tuples
|
134 |
+
(u, v) where u and v are nodes in the graph. If ebunch is None
|
135 |
+
then all nonexistent edges in the graph will be used.
|
136 |
+
Default value: None.
|
137 |
+
|
138 |
+
Returns
|
139 |
+
-------
|
140 |
+
piter : iterator
|
141 |
+
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
|
142 |
+
pair of nodes and p is their Jaccard coefficient.
|
143 |
+
|
144 |
+
Raises
|
145 |
+
------
|
146 |
+
NetworkXNotImplemented
|
147 |
+
If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`.
|
148 |
+
|
149 |
+
NodeNotFound
|
150 |
+
If `ebunch` has a node that is not in `G`.
|
151 |
+
|
152 |
+
Examples
|
153 |
+
--------
|
154 |
+
>>> G = nx.complete_graph(5)
|
155 |
+
>>> preds = nx.jaccard_coefficient(G, [(0, 1), (2, 3)])
|
156 |
+
>>> for u, v, p in preds:
|
157 |
+
... print(f"({u}, {v}) -> {p:.8f}")
|
158 |
+
(0, 1) -> 0.60000000
|
159 |
+
(2, 3) -> 0.60000000
|
160 |
+
|
161 |
+
References
|
162 |
+
----------
|
163 |
+
.. [1] D. Liben-Nowell, J. Kleinberg.
|
164 |
+
The Link Prediction Problem for Social Networks (2004).
|
165 |
+
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
|
166 |
+
"""
|
167 |
+
|
168 |
+
def predict(u, v):
|
169 |
+
union_size = len(set(G[u]) | set(G[v]))
|
170 |
+
if union_size == 0:
|
171 |
+
return 0
|
172 |
+
return len(nx.common_neighbors(G, u, v)) / union_size
|
173 |
+
|
174 |
+
return _apply_prediction(G, predict, ebunch)
|
175 |
+
|
176 |
+
|
177 |
+
@not_implemented_for("directed")
|
178 |
+
@not_implemented_for("multigraph")
|
179 |
+
@nx._dispatchable
|
180 |
+
def adamic_adar_index(G, ebunch=None):
|
181 |
+
r"""Compute the Adamic-Adar index of all node pairs in ebunch.
|
182 |
+
|
183 |
+
Adamic-Adar index of `u` and `v` is defined as
|
184 |
+
|
185 |
+
.. math::
|
186 |
+
|
187 |
+
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{\log |\Gamma(w)|}
|
188 |
+
|
189 |
+
where $\Gamma(u)$ denotes the set of neighbors of $u$.
|
190 |
+
This index leads to zero-division for nodes only connected via self-loops.
|
191 |
+
It is intended to be used when no self-loops are present.
|
192 |
+
|
193 |
+
Parameters
|
194 |
+
----------
|
195 |
+
G : graph
|
196 |
+
NetworkX undirected graph.
|
197 |
+
|
198 |
+
ebunch : iterable of node pairs, optional (default = None)
|
199 |
+
Adamic-Adar index will be computed for each pair of nodes given
|
200 |
+
in the iterable. The pairs must be given as 2-tuples (u, v)
|
201 |
+
where u and v are nodes in the graph. If ebunch is None then all
|
202 |
+
nonexistent edges in the graph will be used.
|
203 |
+
Default value: None.
|
204 |
+
|
205 |
+
Returns
|
206 |
+
-------
|
207 |
+
piter : iterator
|
208 |
+
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
|
209 |
+
pair of nodes and p is their Adamic-Adar index.
|
210 |
+
|
211 |
+
Raises
|
212 |
+
------
|
213 |
+
NetworkXNotImplemented
|
214 |
+
If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`.
|
215 |
+
|
216 |
+
NodeNotFound
|
217 |
+
If `ebunch` has a node that is not in `G`.
|
218 |
+
|
219 |
+
Examples
|
220 |
+
--------
|
221 |
+
>>> G = nx.complete_graph(5)
|
222 |
+
>>> preds = nx.adamic_adar_index(G, [(0, 1), (2, 3)])
|
223 |
+
>>> for u, v, p in preds:
|
224 |
+
... print(f"({u}, {v}) -> {p:.8f}")
|
225 |
+
(0, 1) -> 2.16404256
|
226 |
+
(2, 3) -> 2.16404256
|
227 |
+
|
228 |
+
References
|
229 |
+
----------
|
230 |
+
.. [1] D. Liben-Nowell, J. Kleinberg.
|
231 |
+
The Link Prediction Problem for Social Networks (2004).
|
232 |
+
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
|
233 |
+
"""
|
234 |
+
|
235 |
+
def predict(u, v):
|
236 |
+
return sum(1 / log(G.degree(w)) for w in nx.common_neighbors(G, u, v))
|
237 |
+
|
238 |
+
return _apply_prediction(G, predict, ebunch)
|
239 |
+
|
240 |
+
|
241 |
+
@not_implemented_for("directed")
|
242 |
+
@not_implemented_for("multigraph")
|
243 |
+
@nx._dispatchable
|
244 |
+
def common_neighbor_centrality(G, ebunch=None, alpha=0.8):
|
245 |
+
r"""Return the CCPA score for each pair of nodes.
|
246 |
+
|
247 |
+
Compute the Common Neighbor and Centrality based Parameterized Algorithm(CCPA)
|
248 |
+
score of all node pairs in ebunch.
|
249 |
+
|
250 |
+
CCPA score of `u` and `v` is defined as
|
251 |
+
|
252 |
+
.. math::
|
253 |
+
|
254 |
+
\alpha \cdot (|\Gamma (u){\cap }^{}\Gamma (v)|)+(1-\alpha )\cdot \frac{N}{{d}_{uv}}
|
255 |
+
|
256 |
+
where $\Gamma(u)$ denotes the set of neighbors of $u$, $\Gamma(v)$ denotes the
|
257 |
+
set of neighbors of $v$, $\alpha$ is parameter varies between [0,1], $N$ denotes
|
258 |
+
total number of nodes in the Graph and ${d}_{uv}$ denotes shortest distance
|
259 |
+
between $u$ and $v$.
|
260 |
+
|
261 |
+
This algorithm is based on two vital properties of nodes, namely the number
|
262 |
+
of common neighbors and their centrality. Common neighbor refers to the common
|
263 |
+
nodes between two nodes. Centrality refers to the prestige that a node enjoys
|
264 |
+
in a network.
|
265 |
+
|
266 |
+
.. seealso::
|
267 |
+
|
268 |
+
:func:`common_neighbors`
|
269 |
+
|
270 |
+
Parameters
|
271 |
+
----------
|
272 |
+
G : graph
|
273 |
+
NetworkX undirected graph.
|
274 |
+
|
275 |
+
ebunch : iterable of node pairs, optional (default = None)
|
276 |
+
Preferential attachment score will be computed for each pair of
|
277 |
+
nodes given in the iterable. The pairs must be given as
|
278 |
+
2-tuples (u, v) where u and v are nodes in the graph. If ebunch
|
279 |
+
is None then all nonexistent edges in the graph will be used.
|
280 |
+
Default value: None.
|
281 |
+
|
282 |
+
alpha : Parameter defined for participation of Common Neighbor
|
283 |
+
and Centrality Algorithm share. Values for alpha should
|
284 |
+
normally be between 0 and 1. Default value set to 0.8
|
285 |
+
because author found better performance at 0.8 for all the
|
286 |
+
dataset.
|
287 |
+
Default value: 0.8
|
288 |
+
|
289 |
+
|
290 |
+
Returns
|
291 |
+
-------
|
292 |
+
piter : iterator
|
293 |
+
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
|
294 |
+
pair of nodes and p is their Common Neighbor and Centrality based
|
295 |
+
Parameterized Algorithm(CCPA) score.
|
296 |
+
|
297 |
+
Raises
|
298 |
+
------
|
299 |
+
NetworkXNotImplemented
|
300 |
+
If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`.
|
301 |
+
|
302 |
+
NetworkXAlgorithmError
|
303 |
+
If self loops exsists in `ebunch` or in `G` (if `ebunch` is `None`).
|
304 |
+
|
305 |
+
NodeNotFound
|
306 |
+
If `ebunch` has a node that is not in `G`.
|
307 |
+
|
308 |
+
Examples
|
309 |
+
--------
|
310 |
+
>>> G = nx.complete_graph(5)
|
311 |
+
>>> preds = nx.common_neighbor_centrality(G, [(0, 1), (2, 3)])
|
312 |
+
>>> for u, v, p in preds:
|
313 |
+
... print(f"({u}, {v}) -> {p}")
|
314 |
+
(0, 1) -> 3.4000000000000004
|
315 |
+
(2, 3) -> 3.4000000000000004
|
316 |
+
|
317 |
+
References
|
318 |
+
----------
|
319 |
+
.. [1] Ahmad, I., Akhtar, M.U., Noor, S. et al.
|
320 |
+
Missing Link Prediction using Common Neighbor and Centrality based Parameterized Algorithm.
|
321 |
+
Sci Rep 10, 364 (2020).
|
322 |
+
https://doi.org/10.1038/s41598-019-57304-y
|
323 |
+
"""
|
324 |
+
|
325 |
+
# When alpha == 1, the CCPA score simplifies to the number of common neighbors.
|
326 |
+
if alpha == 1:
|
327 |
+
|
328 |
+
def predict(u, v):
|
329 |
+
if u == v:
|
330 |
+
raise nx.NetworkXAlgorithmError("Self loops are not supported")
|
331 |
+
|
332 |
+
return len(nx.common_neighbors(G, u, v))
|
333 |
+
|
334 |
+
else:
|
335 |
+
spl = dict(nx.shortest_path_length(G))
|
336 |
+
inf = float("inf")
|
337 |
+
|
338 |
+
def predict(u, v):
|
339 |
+
if u == v:
|
340 |
+
raise nx.NetworkXAlgorithmError("Self loops are not supported")
|
341 |
+
path_len = spl[u].get(v, inf)
|
342 |
+
|
343 |
+
n_nbrs = len(nx.common_neighbors(G, u, v))
|
344 |
+
return alpha * n_nbrs + (1 - alpha) * len(G) / path_len
|
345 |
+
|
346 |
+
return _apply_prediction(G, predict, ebunch)
|
347 |
+
|
348 |
+
|
349 |
+
@not_implemented_for("directed")
|
350 |
+
@not_implemented_for("multigraph")
|
351 |
+
@nx._dispatchable
|
352 |
+
def preferential_attachment(G, ebunch=None):
|
353 |
+
r"""Compute the preferential attachment score of all node pairs in ebunch.
|
354 |
+
|
355 |
+
Preferential attachment score of `u` and `v` is defined as
|
356 |
+
|
357 |
+
.. math::
|
358 |
+
|
359 |
+
|\Gamma(u)| |\Gamma(v)|
|
360 |
+
|
361 |
+
where $\Gamma(u)$ denotes the set of neighbors of $u$.
|
362 |
+
|
363 |
+
Parameters
|
364 |
+
----------
|
365 |
+
G : graph
|
366 |
+
NetworkX undirected graph.
|
367 |
+
|
368 |
+
ebunch : iterable of node pairs, optional (default = None)
|
369 |
+
Preferential attachment score will be computed for each pair of
|
370 |
+
nodes given in the iterable. The pairs must be given as
|
371 |
+
2-tuples (u, v) where u and v are nodes in the graph. If ebunch
|
372 |
+
is None then all nonexistent edges in the graph will be used.
|
373 |
+
Default value: None.
|
374 |
+
|
375 |
+
Returns
|
376 |
+
-------
|
377 |
+
piter : iterator
|
378 |
+
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
|
379 |
+
pair of nodes and p is their preferential attachment score.
|
380 |
+
|
381 |
+
Raises
|
382 |
+
------
|
383 |
+
NetworkXNotImplemented
|
384 |
+
If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`.
|
385 |
+
|
386 |
+
NodeNotFound
|
387 |
+
If `ebunch` has a node that is not in `G`.
|
388 |
+
|
389 |
+
Examples
|
390 |
+
--------
|
391 |
+
>>> G = nx.complete_graph(5)
|
392 |
+
>>> preds = nx.preferential_attachment(G, [(0, 1), (2, 3)])
|
393 |
+
>>> for u, v, p in preds:
|
394 |
+
... print(f"({u}, {v}) -> {p}")
|
395 |
+
(0, 1) -> 16
|
396 |
+
(2, 3) -> 16
|
397 |
+
|
398 |
+
References
|
399 |
+
----------
|
400 |
+
.. [1] D. Liben-Nowell, J. Kleinberg.
|
401 |
+
The Link Prediction Problem for Social Networks (2004).
|
402 |
+
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
|
403 |
+
"""
|
404 |
+
|
405 |
+
def predict(u, v):
|
406 |
+
return G.degree(u) * G.degree(v)
|
407 |
+
|
408 |
+
return _apply_prediction(G, predict, ebunch)
|
409 |
+
|
410 |
+
|
411 |
+
@not_implemented_for("directed")
|
412 |
+
@not_implemented_for("multigraph")
|
413 |
+
@nx._dispatchable(node_attrs="community")
|
414 |
+
def cn_soundarajan_hopcroft(G, ebunch=None, community="community"):
|
415 |
+
r"""Count the number of common neighbors of all node pairs in ebunch
|
416 |
+
using community information.
|
417 |
+
|
418 |
+
For two nodes $u$ and $v$, this function computes the number of
|
419 |
+
common neighbors and bonus one for each common neighbor belonging to
|
420 |
+
the same community as $u$ and $v$. Mathematically,
|
421 |
+
|
422 |
+
.. math::
|
423 |
+
|
424 |
+
|\Gamma(u) \cap \Gamma(v)| + \sum_{w \in \Gamma(u) \cap \Gamma(v)} f(w)
|
425 |
+
|
426 |
+
where $f(w)$ equals 1 if $w$ belongs to the same community as $u$
|
427 |
+
and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of
|
428 |
+
neighbors of $u$.
|
429 |
+
|
430 |
+
Parameters
|
431 |
+
----------
|
432 |
+
G : graph
|
433 |
+
A NetworkX undirected graph.
|
434 |
+
|
435 |
+
ebunch : iterable of node pairs, optional (default = None)
|
436 |
+
The score will be computed for each pair of nodes given in the
|
437 |
+
iterable. The pairs must be given as 2-tuples (u, v) where u
|
438 |
+
and v are nodes in the graph. If ebunch is None then all
|
439 |
+
nonexistent edges in the graph will be used.
|
440 |
+
Default value: None.
|
441 |
+
|
442 |
+
community : string, optional (default = 'community')
|
443 |
+
Nodes attribute name containing the community information.
|
444 |
+
G[u][community] identifies which community u belongs to. Each
|
445 |
+
node belongs to at most one community. Default value: 'community'.
|
446 |
+
|
447 |
+
Returns
|
448 |
+
-------
|
449 |
+
piter : iterator
|
450 |
+
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
|
451 |
+
pair of nodes and p is their score.
|
452 |
+
|
453 |
+
Raises
|
454 |
+
------
|
455 |
+
NetworkXNotImplemented
|
456 |
+
If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`.
|
457 |
+
|
458 |
+
NetworkXAlgorithmError
|
459 |
+
If no community information is available for a node in `ebunch` or in `G` (if `ebunch` is `None`).
|
460 |
+
|
461 |
+
NodeNotFound
|
462 |
+
If `ebunch` has a node that is not in `G`.
|
463 |
+
|
464 |
+
Examples
|
465 |
+
--------
|
466 |
+
>>> G = nx.path_graph(3)
|
467 |
+
>>> G.nodes[0]["community"] = 0
|
468 |
+
>>> G.nodes[1]["community"] = 0
|
469 |
+
>>> G.nodes[2]["community"] = 0
|
470 |
+
>>> preds = nx.cn_soundarajan_hopcroft(G, [(0, 2)])
|
471 |
+
>>> for u, v, p in preds:
|
472 |
+
... print(f"({u}, {v}) -> {p}")
|
473 |
+
(0, 2) -> 2
|
474 |
+
|
475 |
+
References
|
476 |
+
----------
|
477 |
+
.. [1] Sucheta Soundarajan and John Hopcroft.
|
478 |
+
Using community information to improve the precision of link
|
479 |
+
prediction methods.
|
480 |
+
In Proceedings of the 21st international conference companion on
|
481 |
+
World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608.
|
482 |
+
http://doi.acm.org/10.1145/2187980.2188150
|
483 |
+
"""
|
484 |
+
|
485 |
+
def predict(u, v):
|
486 |
+
Cu = _community(G, u, community)
|
487 |
+
Cv = _community(G, v, community)
|
488 |
+
cnbors = nx.common_neighbors(G, u, v)
|
489 |
+
neighbors = (
|
490 |
+
sum(_community(G, w, community) == Cu for w in cnbors) if Cu == Cv else 0
|
491 |
+
)
|
492 |
+
return len(cnbors) + neighbors
|
493 |
+
|
494 |
+
return _apply_prediction(G, predict, ebunch)
|
495 |
+
|
496 |
+
|
497 |
+
@not_implemented_for("directed")
|
498 |
+
@not_implemented_for("multigraph")
|
499 |
+
@nx._dispatchable(node_attrs="community")
|
500 |
+
def ra_index_soundarajan_hopcroft(G, ebunch=None, community="community"):
|
501 |
+
r"""Compute the resource allocation index of all node pairs in
|
502 |
+
ebunch using community information.
|
503 |
+
|
504 |
+
For two nodes $u$ and $v$, this function computes the resource
|
505 |
+
allocation index considering only common neighbors belonging to the
|
506 |
+
same community as $u$ and $v$. Mathematically,
|
507 |
+
|
508 |
+
.. math::
|
509 |
+
|
510 |
+
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{f(w)}{|\Gamma(w)|}
|
511 |
+
|
512 |
+
where $f(w)$ equals 1 if $w$ belongs to the same community as $u$
|
513 |
+
and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of
|
514 |
+
neighbors of $u$.
|
515 |
+
|
516 |
+
Parameters
|
517 |
+
----------
|
518 |
+
G : graph
|
519 |
+
A NetworkX undirected graph.
|
520 |
+
|
521 |
+
ebunch : iterable of node pairs, optional (default = None)
|
522 |
+
The score will be computed for each pair of nodes given in the
|
523 |
+
iterable. The pairs must be given as 2-tuples (u, v) where u
|
524 |
+
and v are nodes in the graph. If ebunch is None then all
|
525 |
+
nonexistent edges in the graph will be used.
|
526 |
+
Default value: None.
|
527 |
+
|
528 |
+
community : string, optional (default = 'community')
|
529 |
+
Nodes attribute name containing the community information.
|
530 |
+
G[u][community] identifies which community u belongs to. Each
|
531 |
+
node belongs to at most one community. Default value: 'community'.
|
532 |
+
|
533 |
+
Returns
|
534 |
+
-------
|
535 |
+
piter : iterator
|
536 |
+
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
|
537 |
+
pair of nodes and p is their score.
|
538 |
+
|
539 |
+
Raises
|
540 |
+
------
|
541 |
+
NetworkXNotImplemented
|
542 |
+
If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`.
|
543 |
+
|
544 |
+
NetworkXAlgorithmError
|
545 |
+
If no community information is available for a node in `ebunch` or in `G` (if `ebunch` is `None`).
|
546 |
+
|
547 |
+
NodeNotFound
|
548 |
+
If `ebunch` has a node that is not in `G`.
|
549 |
+
|
550 |
+
Examples
|
551 |
+
--------
|
552 |
+
>>> G = nx.Graph()
|
553 |
+
>>> G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)])
|
554 |
+
>>> G.nodes[0]["community"] = 0
|
555 |
+
>>> G.nodes[1]["community"] = 0
|
556 |
+
>>> G.nodes[2]["community"] = 1
|
557 |
+
>>> G.nodes[3]["community"] = 0
|
558 |
+
>>> preds = nx.ra_index_soundarajan_hopcroft(G, [(0, 3)])
|
559 |
+
>>> for u, v, p in preds:
|
560 |
+
... print(f"({u}, {v}) -> {p:.8f}")
|
561 |
+
(0, 3) -> 0.50000000
|
562 |
+
|
563 |
+
References
|
564 |
+
----------
|
565 |
+
.. [1] Sucheta Soundarajan and John Hopcroft.
|
566 |
+
Using community information to improve the precision of link
|
567 |
+
prediction methods.
|
568 |
+
In Proceedings of the 21st international conference companion on
|
569 |
+
World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608.
|
570 |
+
http://doi.acm.org/10.1145/2187980.2188150
|
571 |
+
"""
|
572 |
+
|
573 |
+
def predict(u, v):
|
574 |
+
Cu = _community(G, u, community)
|
575 |
+
Cv = _community(G, v, community)
|
576 |
+
if Cu != Cv:
|
577 |
+
return 0
|
578 |
+
cnbors = nx.common_neighbors(G, u, v)
|
579 |
+
return sum(1 / G.degree(w) for w in cnbors if _community(G, w, community) == Cu)
|
580 |
+
|
581 |
+
return _apply_prediction(G, predict, ebunch)
|
582 |
+
|
583 |
+
|
584 |
+
@not_implemented_for("directed")
|
585 |
+
@not_implemented_for("multigraph")
|
586 |
+
@nx._dispatchable(node_attrs="community")
|
587 |
+
def within_inter_cluster(G, ebunch=None, delta=0.001, community="community"):
|
588 |
+
"""Compute the ratio of within- and inter-cluster common neighbors
|
589 |
+
of all node pairs in ebunch.
|
590 |
+
|
591 |
+
For two nodes `u` and `v`, if a common neighbor `w` belongs to the
|
592 |
+
same community as them, `w` is considered as within-cluster common
|
593 |
+
neighbor of `u` and `v`. Otherwise, it is considered as
|
594 |
+
inter-cluster common neighbor of `u` and `v`. The ratio between the
|
595 |
+
size of the set of within- and inter-cluster common neighbors is
|
596 |
+
defined as the WIC measure. [1]_
|
597 |
+
|
598 |
+
Parameters
|
599 |
+
----------
|
600 |
+
G : graph
|
601 |
+
A NetworkX undirected graph.
|
602 |
+
|
603 |
+
ebunch : iterable of node pairs, optional (default = None)
|
604 |
+
The WIC measure will be computed for each pair of nodes given in
|
605 |
+
the iterable. The pairs must be given as 2-tuples (u, v) where
|
606 |
+
u and v are nodes in the graph. If ebunch is None then all
|
607 |
+
nonexistent edges in the graph will be used.
|
608 |
+
Default value: None.
|
609 |
+
|
610 |
+
delta : float, optional (default = 0.001)
|
611 |
+
Value to prevent division by zero in case there is no
|
612 |
+
inter-cluster common neighbor between two nodes. See [1]_ for
|
613 |
+
details. Default value: 0.001.
|
614 |
+
|
615 |
+
community : string, optional (default = 'community')
|
616 |
+
Nodes attribute name containing the community information.
|
617 |
+
G[u][community] identifies which community u belongs to. Each
|
618 |
+
node belongs to at most one community. Default value: 'community'.
|
619 |
+
|
620 |
+
Returns
|
621 |
+
-------
|
622 |
+
piter : iterator
|
623 |
+
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
|
624 |
+
pair of nodes and p is their WIC measure.
|
625 |
+
|
626 |
+
Raises
|
627 |
+
------
|
628 |
+
NetworkXNotImplemented
|
629 |
+
If `G` is a `DiGraph`, a `Multigraph` or a `MultiDiGraph`.
|
630 |
+
|
631 |
+
NetworkXAlgorithmError
|
632 |
+
- If `delta` is less than or equal to zero.
|
633 |
+
- If no community information is available for a node in `ebunch` or in `G` (if `ebunch` is `None`).
|
634 |
+
|
635 |
+
NodeNotFound
|
636 |
+
If `ebunch` has a node that is not in `G`.
|
637 |
+
|
638 |
+
Examples
|
639 |
+
--------
|
640 |
+
>>> G = nx.Graph()
|
641 |
+
>>> G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)])
|
642 |
+
>>> G.nodes[0]["community"] = 0
|
643 |
+
>>> G.nodes[1]["community"] = 1
|
644 |
+
>>> G.nodes[2]["community"] = 0
|
645 |
+
>>> G.nodes[3]["community"] = 0
|
646 |
+
>>> G.nodes[4]["community"] = 0
|
647 |
+
>>> preds = nx.within_inter_cluster(G, [(0, 4)])
|
648 |
+
>>> for u, v, p in preds:
|
649 |
+
... print(f"({u}, {v}) -> {p:.8f}")
|
650 |
+
(0, 4) -> 1.99800200
|
651 |
+
>>> preds = nx.within_inter_cluster(G, [(0, 4)], delta=0.5)
|
652 |
+
>>> for u, v, p in preds:
|
653 |
+
... print(f"({u}, {v}) -> {p:.8f}")
|
654 |
+
(0, 4) -> 1.33333333
|
655 |
+
|
656 |
+
References
|
657 |
+
----------
|
658 |
+
.. [1] Jorge Carlos Valverde-Rebaza and Alneu de Andrade Lopes.
|
659 |
+
Link prediction in complex networks based on cluster information.
|
660 |
+
In Proceedings of the 21st Brazilian conference on Advances in
|
661 |
+
Artificial Intelligence (SBIA'12)
|
662 |
+
https://doi.org/10.1007/978-3-642-34459-6_10
|
663 |
+
"""
|
664 |
+
if delta <= 0:
|
665 |
+
raise nx.NetworkXAlgorithmError("Delta must be greater than zero")
|
666 |
+
|
667 |
+
def predict(u, v):
|
668 |
+
Cu = _community(G, u, community)
|
669 |
+
Cv = _community(G, v, community)
|
670 |
+
if Cu != Cv:
|
671 |
+
return 0
|
672 |
+
cnbors = nx.common_neighbors(G, u, v)
|
673 |
+
within = {w for w in cnbors if _community(G, w, community) == Cu}
|
674 |
+
inter = cnbors - within
|
675 |
+
return len(within) / (len(inter) + delta)
|
676 |
+
|
677 |
+
return _apply_prediction(G, predict, ebunch)
|
678 |
+
|
679 |
+
|
680 |
+
def _community(G, u, community):
|
681 |
+
"""Get the community of the given node."""
|
682 |
+
node_u = G.nodes[u]
|
683 |
+
try:
|
684 |
+
return node_u[community]
|
685 |
+
except KeyError as err:
|
686 |
+
raise nx.NetworkXAlgorithmError(
|
687 |
+
f"No community information available for Node {u}"
|
688 |
+
) from err
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/lowest_common_ancestors.py
ADDED
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Algorithms for finding the lowest common ancestor of trees and DAGs."""
|
2 |
+
from collections import defaultdict
|
3 |
+
from collections.abc import Mapping, Set
|
4 |
+
from itertools import combinations_with_replacement
|
5 |
+
|
6 |
+
import networkx as nx
|
7 |
+
from networkx.utils import UnionFind, arbitrary_element, not_implemented_for
|
8 |
+
|
9 |
+
__all__ = [
|
10 |
+
"all_pairs_lowest_common_ancestor",
|
11 |
+
"tree_all_pairs_lowest_common_ancestor",
|
12 |
+
"lowest_common_ancestor",
|
13 |
+
]
|
14 |
+
|
15 |
+
|
16 |
+
@not_implemented_for("undirected")
|
17 |
+
@nx._dispatchable
|
18 |
+
def all_pairs_lowest_common_ancestor(G, pairs=None):
|
19 |
+
"""Return the lowest common ancestor of all pairs or the provided pairs
|
20 |
+
|
21 |
+
Parameters
|
22 |
+
----------
|
23 |
+
G : NetworkX directed graph
|
24 |
+
|
25 |
+
pairs : iterable of pairs of nodes, optional (default: all pairs)
|
26 |
+
The pairs of nodes of interest.
|
27 |
+
If None, will find the LCA of all pairs of nodes.
|
28 |
+
|
29 |
+
Yields
|
30 |
+
------
|
31 |
+
((node1, node2), lca) : 2-tuple
|
32 |
+
Where lca is least common ancestor of node1 and node2.
|
33 |
+
Note that for the default case, the order of the node pair is not considered,
|
34 |
+
e.g. you will not get both ``(a, b)`` and ``(b, a)``
|
35 |
+
|
36 |
+
Raises
|
37 |
+
------
|
38 |
+
NetworkXPointlessConcept
|
39 |
+
If `G` is null.
|
40 |
+
NetworkXError
|
41 |
+
If `G` is not a DAG.
|
42 |
+
|
43 |
+
Examples
|
44 |
+
--------
|
45 |
+
The default behavior is to yield the lowest common ancestor for all
|
46 |
+
possible combinations of nodes in `G`, including self-pairings:
|
47 |
+
|
48 |
+
>>> G = nx.DiGraph([(0, 1), (0, 3), (1, 2)])
|
49 |
+
>>> dict(nx.all_pairs_lowest_common_ancestor(G))
|
50 |
+
{(0, 0): 0, (0, 1): 0, (0, 3): 0, (0, 2): 0, (1, 1): 1, (1, 3): 0, (1, 2): 1, (3, 3): 3, (3, 2): 0, (2, 2): 2}
|
51 |
+
|
52 |
+
The pairs argument can be used to limit the output to only the
|
53 |
+
specified node pairings:
|
54 |
+
|
55 |
+
>>> dict(nx.all_pairs_lowest_common_ancestor(G, pairs=[(1, 2), (2, 3)]))
|
56 |
+
{(1, 2): 1, (2, 3): 0}
|
57 |
+
|
58 |
+
Notes
|
59 |
+
-----
|
60 |
+
Only defined on non-null directed acyclic graphs.
|
61 |
+
|
62 |
+
See Also
|
63 |
+
--------
|
64 |
+
lowest_common_ancestor
|
65 |
+
"""
|
66 |
+
if not nx.is_directed_acyclic_graph(G):
|
67 |
+
raise nx.NetworkXError("LCA only defined on directed acyclic graphs.")
|
68 |
+
if len(G) == 0:
|
69 |
+
raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.")
|
70 |
+
|
71 |
+
if pairs is None:
|
72 |
+
pairs = combinations_with_replacement(G, 2)
|
73 |
+
else:
|
74 |
+
# Convert iterator to iterable, if necessary. Trim duplicates.
|
75 |
+
pairs = dict.fromkeys(pairs)
|
76 |
+
# Verify that each of the nodes in the provided pairs is in G
|
77 |
+
nodeset = set(G)
|
78 |
+
for pair in pairs:
|
79 |
+
if set(pair) - nodeset:
|
80 |
+
raise nx.NodeNotFound(
|
81 |
+
f"Node(s) {set(pair) - nodeset} from pair {pair} not in G."
|
82 |
+
)
|
83 |
+
|
84 |
+
# Once input validation is done, construct the generator
|
85 |
+
def generate_lca_from_pairs(G, pairs):
|
86 |
+
ancestor_cache = {}
|
87 |
+
|
88 |
+
for v, w in pairs:
|
89 |
+
if v not in ancestor_cache:
|
90 |
+
ancestor_cache[v] = nx.ancestors(G, v)
|
91 |
+
ancestor_cache[v].add(v)
|
92 |
+
if w not in ancestor_cache:
|
93 |
+
ancestor_cache[w] = nx.ancestors(G, w)
|
94 |
+
ancestor_cache[w].add(w)
|
95 |
+
|
96 |
+
common_ancestors = ancestor_cache[v] & ancestor_cache[w]
|
97 |
+
|
98 |
+
if common_ancestors:
|
99 |
+
common_ancestor = next(iter(common_ancestors))
|
100 |
+
while True:
|
101 |
+
successor = None
|
102 |
+
for lower_ancestor in G.successors(common_ancestor):
|
103 |
+
if lower_ancestor in common_ancestors:
|
104 |
+
successor = lower_ancestor
|
105 |
+
break
|
106 |
+
if successor is None:
|
107 |
+
break
|
108 |
+
common_ancestor = successor
|
109 |
+
yield ((v, w), common_ancestor)
|
110 |
+
|
111 |
+
return generate_lca_from_pairs(G, pairs)
|
112 |
+
|
113 |
+
|
114 |
+
@not_implemented_for("undirected")
|
115 |
+
@nx._dispatchable
|
116 |
+
def lowest_common_ancestor(G, node1, node2, default=None):
|
117 |
+
"""Compute the lowest common ancestor of the given pair of nodes.
|
118 |
+
|
119 |
+
Parameters
|
120 |
+
----------
|
121 |
+
G : NetworkX directed graph
|
122 |
+
|
123 |
+
node1, node2 : nodes in the graph.
|
124 |
+
|
125 |
+
default : object
|
126 |
+
Returned if no common ancestor between `node1` and `node2`
|
127 |
+
|
128 |
+
Returns
|
129 |
+
-------
|
130 |
+
The lowest common ancestor of node1 and node2,
|
131 |
+
or default if they have no common ancestors.
|
132 |
+
|
133 |
+
Examples
|
134 |
+
--------
|
135 |
+
>>> G = nx.DiGraph()
|
136 |
+
>>> nx.add_path(G, (0, 1, 2, 3))
|
137 |
+
>>> nx.add_path(G, (0, 4, 3))
|
138 |
+
>>> nx.lowest_common_ancestor(G, 2, 4)
|
139 |
+
0
|
140 |
+
|
141 |
+
See Also
|
142 |
+
--------
|
143 |
+
all_pairs_lowest_common_ancestor"""
|
144 |
+
|
145 |
+
ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)]))
|
146 |
+
if ans:
|
147 |
+
assert len(ans) == 1
|
148 |
+
return ans[0][1]
|
149 |
+
return default
|
150 |
+
|
151 |
+
|
152 |
+
@not_implemented_for("undirected")
|
153 |
+
@nx._dispatchable
|
154 |
+
def tree_all_pairs_lowest_common_ancestor(G, root=None, pairs=None):
|
155 |
+
r"""Yield the lowest common ancestor for sets of pairs in a tree.
|
156 |
+
|
157 |
+
Parameters
|
158 |
+
----------
|
159 |
+
G : NetworkX directed graph (must be a tree)
|
160 |
+
|
161 |
+
root : node, optional (default: None)
|
162 |
+
The root of the subtree to operate on.
|
163 |
+
If None, assume the entire graph has exactly one source and use that.
|
164 |
+
|
165 |
+
pairs : iterable or iterator of pairs of nodes, optional (default: None)
|
166 |
+
The pairs of interest. If None, Defaults to all pairs of nodes
|
167 |
+
under `root` that have a lowest common ancestor.
|
168 |
+
|
169 |
+
Returns
|
170 |
+
-------
|
171 |
+
lcas : generator of tuples `((u, v), lca)` where `u` and `v` are nodes
|
172 |
+
in `pairs` and `lca` is their lowest common ancestor.
|
173 |
+
|
174 |
+
Examples
|
175 |
+
--------
|
176 |
+
>>> import pprint
|
177 |
+
>>> G = nx.DiGraph([(1, 3), (2, 4), (1, 2)])
|
178 |
+
>>> pprint.pprint(dict(nx.tree_all_pairs_lowest_common_ancestor(G)))
|
179 |
+
{(1, 1): 1,
|
180 |
+
(2, 1): 1,
|
181 |
+
(2, 2): 2,
|
182 |
+
(3, 1): 1,
|
183 |
+
(3, 2): 1,
|
184 |
+
(3, 3): 3,
|
185 |
+
(3, 4): 1,
|
186 |
+
(4, 1): 1,
|
187 |
+
(4, 2): 2,
|
188 |
+
(4, 4): 4}
|
189 |
+
|
190 |
+
We can also use `pairs` argument to specify the pairs of nodes for which we
|
191 |
+
want to compute lowest common ancestors. Here is an example:
|
192 |
+
|
193 |
+
>>> dict(nx.tree_all_pairs_lowest_common_ancestor(G, pairs=[(1, 4), (2, 3)]))
|
194 |
+
{(2, 3): 1, (1, 4): 1}
|
195 |
+
|
196 |
+
Notes
|
197 |
+
-----
|
198 |
+
Only defined on non-null trees represented with directed edges from
|
199 |
+
parents to children. Uses Tarjan's off-line lowest-common-ancestors
|
200 |
+
algorithm. Runs in time $O(4 \times (V + E + P))$ time, where 4 is the largest
|
201 |
+
value of the inverse Ackermann function likely to ever come up in actual
|
202 |
+
use, and $P$ is the number of pairs requested (or $V^2$ if all are needed).
|
203 |
+
|
204 |
+
Tarjan, R. E. (1979), "Applications of path compression on balanced trees",
|
205 |
+
Journal of the ACM 26 (4): 690-715, doi:10.1145/322154.322161.
|
206 |
+
|
207 |
+
See Also
|
208 |
+
--------
|
209 |
+
all_pairs_lowest_common_ancestor: similar routine for general DAGs
|
210 |
+
lowest_common_ancestor: just a single pair for general DAGs
|
211 |
+
"""
|
212 |
+
if len(G) == 0:
|
213 |
+
raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.")
|
214 |
+
|
215 |
+
# Index pairs of interest for efficient lookup from either side.
|
216 |
+
if pairs is not None:
|
217 |
+
pair_dict = defaultdict(set)
|
218 |
+
# See note on all_pairs_lowest_common_ancestor.
|
219 |
+
if not isinstance(pairs, Mapping | Set):
|
220 |
+
pairs = set(pairs)
|
221 |
+
for u, v in pairs:
|
222 |
+
for n in (u, v):
|
223 |
+
if n not in G:
|
224 |
+
msg = f"The node {str(n)} is not in the digraph."
|
225 |
+
raise nx.NodeNotFound(msg)
|
226 |
+
pair_dict[u].add(v)
|
227 |
+
pair_dict[v].add(u)
|
228 |
+
|
229 |
+
# If root is not specified, find the exactly one node with in degree 0 and
|
230 |
+
# use it. Raise an error if none are found, or more than one is. Also check
|
231 |
+
# for any nodes with in degree larger than 1, which would imply G is not a
|
232 |
+
# tree.
|
233 |
+
if root is None:
|
234 |
+
for n, deg in G.in_degree:
|
235 |
+
if deg == 0:
|
236 |
+
if root is not None:
|
237 |
+
msg = "No root specified and tree has multiple sources."
|
238 |
+
raise nx.NetworkXError(msg)
|
239 |
+
root = n
|
240 |
+
# checking deg>1 is not sufficient for MultiDiGraphs
|
241 |
+
elif deg > 1 and len(G.pred[n]) > 1:
|
242 |
+
msg = "Tree LCA only defined on trees; use DAG routine."
|
243 |
+
raise nx.NetworkXError(msg)
|
244 |
+
if root is None:
|
245 |
+
raise nx.NetworkXError("Graph contains a cycle.")
|
246 |
+
|
247 |
+
# Iterative implementation of Tarjan's offline lca algorithm
|
248 |
+
# as described in CLRS on page 521 (2nd edition)/page 584 (3rd edition)
|
249 |
+
uf = UnionFind()
|
250 |
+
ancestors = {}
|
251 |
+
for node in G:
|
252 |
+
ancestors[node] = uf[node]
|
253 |
+
|
254 |
+
colors = defaultdict(bool)
|
255 |
+
for node in nx.dfs_postorder_nodes(G, root):
|
256 |
+
colors[node] = True
|
257 |
+
for v in pair_dict[node] if pairs is not None else G:
|
258 |
+
if colors[v]:
|
259 |
+
# If the user requested both directions of a pair, give it.
|
260 |
+
# Otherwise, just give one.
|
261 |
+
if pairs is not None and (node, v) in pairs:
|
262 |
+
yield (node, v), ancestors[uf[v]]
|
263 |
+
if pairs is None or (v, node) in pairs:
|
264 |
+
yield (v, node), ancestors[uf[v]]
|
265 |
+
if node != root:
|
266 |
+
parent = arbitrary_element(G.pred[node])
|
267 |
+
uf.union(parent, node)
|
268 |
+
ancestors[uf[parent]] = parent
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/matching.py
ADDED
@@ -0,0 +1,1151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Functions for computing and verifying matchings in a graph."""
|
2 |
+
from collections import Counter
|
3 |
+
from itertools import combinations, repeat
|
4 |
+
|
5 |
+
import networkx as nx
|
6 |
+
from networkx.utils import not_implemented_for
|
7 |
+
|
8 |
+
__all__ = [
|
9 |
+
"is_matching",
|
10 |
+
"is_maximal_matching",
|
11 |
+
"is_perfect_matching",
|
12 |
+
"max_weight_matching",
|
13 |
+
"min_weight_matching",
|
14 |
+
"maximal_matching",
|
15 |
+
]
|
16 |
+
|
17 |
+
|
18 |
+
@not_implemented_for("multigraph")
|
19 |
+
@not_implemented_for("directed")
|
20 |
+
@nx._dispatchable
|
21 |
+
def maximal_matching(G):
|
22 |
+
r"""Find a maximal matching in the graph.
|
23 |
+
|
24 |
+
A matching is a subset of edges in which no node occurs more than once.
|
25 |
+
A maximal matching cannot add more edges and still be a matching.
|
26 |
+
|
27 |
+
Parameters
|
28 |
+
----------
|
29 |
+
G : NetworkX graph
|
30 |
+
Undirected graph
|
31 |
+
|
32 |
+
Returns
|
33 |
+
-------
|
34 |
+
matching : set
|
35 |
+
A maximal matching of the graph.
|
36 |
+
|
37 |
+
Examples
|
38 |
+
--------
|
39 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5)])
|
40 |
+
>>> sorted(nx.maximal_matching(G))
|
41 |
+
[(1, 2), (3, 5)]
|
42 |
+
|
43 |
+
Notes
|
44 |
+
-----
|
45 |
+
The algorithm greedily selects a maximal matching M of the graph G
|
46 |
+
(i.e. no superset of M exists). It runs in $O(|E|)$ time.
|
47 |
+
"""
|
48 |
+
matching = set()
|
49 |
+
nodes = set()
|
50 |
+
for edge in G.edges():
|
51 |
+
# If the edge isn't covered, add it to the matching
|
52 |
+
# then remove neighborhood of u and v from consideration.
|
53 |
+
u, v = edge
|
54 |
+
if u not in nodes and v not in nodes and u != v:
|
55 |
+
matching.add(edge)
|
56 |
+
nodes.update(edge)
|
57 |
+
return matching
|
58 |
+
|
59 |
+
|
60 |
+
def matching_dict_to_set(matching):
|
61 |
+
"""Converts matching dict format to matching set format
|
62 |
+
|
63 |
+
Converts a dictionary representing a matching (as returned by
|
64 |
+
:func:`max_weight_matching`) to a set representing a matching (as
|
65 |
+
returned by :func:`maximal_matching`).
|
66 |
+
|
67 |
+
In the definition of maximal matching adopted by NetworkX,
|
68 |
+
self-loops are not allowed, so the provided dictionary is expected
|
69 |
+
to never have any mapping from a key to itself. However, the
|
70 |
+
dictionary is expected to have mirrored key/value pairs, for
|
71 |
+
example, key ``u`` with value ``v`` and key ``v`` with value ``u``.
|
72 |
+
|
73 |
+
"""
|
74 |
+
edges = set()
|
75 |
+
for edge in matching.items():
|
76 |
+
u, v = edge
|
77 |
+
if (v, u) in edges or edge in edges:
|
78 |
+
continue
|
79 |
+
if u == v:
|
80 |
+
raise nx.NetworkXError(f"Selfloops cannot appear in matchings {edge}")
|
81 |
+
edges.add(edge)
|
82 |
+
return edges
|
83 |
+
|
84 |
+
|
85 |
+
@nx._dispatchable
|
86 |
+
def is_matching(G, matching):
|
87 |
+
"""Return True if ``matching`` is a valid matching of ``G``
|
88 |
+
|
89 |
+
A *matching* in a graph is a set of edges in which no two distinct
|
90 |
+
edges share a common endpoint. Each node is incident to at most one
|
91 |
+
edge in the matching. The edges are said to be independent.
|
92 |
+
|
93 |
+
Parameters
|
94 |
+
----------
|
95 |
+
G : NetworkX graph
|
96 |
+
|
97 |
+
matching : dict or set
|
98 |
+
A dictionary or set representing a matching. If a dictionary, it
|
99 |
+
must have ``matching[u] == v`` and ``matching[v] == u`` for each
|
100 |
+
edge ``(u, v)`` in the matching. If a set, it must have elements
|
101 |
+
of the form ``(u, v)``, where ``(u, v)`` is an edge in the
|
102 |
+
matching.
|
103 |
+
|
104 |
+
Returns
|
105 |
+
-------
|
106 |
+
bool
|
107 |
+
Whether the given set or dictionary represents a valid matching
|
108 |
+
in the graph.
|
109 |
+
|
110 |
+
Raises
|
111 |
+
------
|
112 |
+
NetworkXError
|
113 |
+
If the proposed matching has an edge to a node not in G.
|
114 |
+
Or if the matching is not a collection of 2-tuple edges.
|
115 |
+
|
116 |
+
Examples
|
117 |
+
--------
|
118 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5)])
|
119 |
+
>>> nx.is_maximal_matching(G, {1: 3, 2: 4}) # using dict to represent matching
|
120 |
+
True
|
121 |
+
|
122 |
+
>>> nx.is_matching(G, {(1, 3), (2, 4)}) # using set to represent matching
|
123 |
+
True
|
124 |
+
|
125 |
+
"""
|
126 |
+
if isinstance(matching, dict):
|
127 |
+
matching = matching_dict_to_set(matching)
|
128 |
+
|
129 |
+
nodes = set()
|
130 |
+
for edge in matching:
|
131 |
+
if len(edge) != 2:
|
132 |
+
raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}")
|
133 |
+
u, v = edge
|
134 |
+
if u not in G or v not in G:
|
135 |
+
raise nx.NetworkXError(f"matching contains edge {edge} with node not in G")
|
136 |
+
if u == v:
|
137 |
+
return False
|
138 |
+
if not G.has_edge(u, v):
|
139 |
+
return False
|
140 |
+
if u in nodes or v in nodes:
|
141 |
+
return False
|
142 |
+
nodes.update(edge)
|
143 |
+
return True
|
144 |
+
|
145 |
+
|
146 |
+
@nx._dispatchable
|
147 |
+
def is_maximal_matching(G, matching):
|
148 |
+
"""Return True if ``matching`` is a maximal matching of ``G``
|
149 |
+
|
150 |
+
A *maximal matching* in a graph is a matching in which adding any
|
151 |
+
edge would cause the set to no longer be a valid matching.
|
152 |
+
|
153 |
+
Parameters
|
154 |
+
----------
|
155 |
+
G : NetworkX graph
|
156 |
+
|
157 |
+
matching : dict or set
|
158 |
+
A dictionary or set representing a matching. If a dictionary, it
|
159 |
+
must have ``matching[u] == v`` and ``matching[v] == u`` for each
|
160 |
+
edge ``(u, v)`` in the matching. If a set, it must have elements
|
161 |
+
of the form ``(u, v)``, where ``(u, v)`` is an edge in the
|
162 |
+
matching.
|
163 |
+
|
164 |
+
Returns
|
165 |
+
-------
|
166 |
+
bool
|
167 |
+
Whether the given set or dictionary represents a valid maximal
|
168 |
+
matching in the graph.
|
169 |
+
|
170 |
+
Examples
|
171 |
+
--------
|
172 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (3, 5)])
|
173 |
+
>>> nx.is_maximal_matching(G, {(1, 2), (3, 4)})
|
174 |
+
True
|
175 |
+
|
176 |
+
"""
|
177 |
+
if isinstance(matching, dict):
|
178 |
+
matching = matching_dict_to_set(matching)
|
179 |
+
# If the given set is not a matching, then it is not a maximal matching.
|
180 |
+
edges = set()
|
181 |
+
nodes = set()
|
182 |
+
for edge in matching:
|
183 |
+
if len(edge) != 2:
|
184 |
+
raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}")
|
185 |
+
u, v = edge
|
186 |
+
if u not in G or v not in G:
|
187 |
+
raise nx.NetworkXError(f"matching contains edge {edge} with node not in G")
|
188 |
+
if u == v:
|
189 |
+
return False
|
190 |
+
if not G.has_edge(u, v):
|
191 |
+
return False
|
192 |
+
if u in nodes or v in nodes:
|
193 |
+
return False
|
194 |
+
nodes.update(edge)
|
195 |
+
edges.add(edge)
|
196 |
+
edges.add((v, u))
|
197 |
+
# A matching is maximal if adding any new edge from G to it
|
198 |
+
# causes the resulting set to match some node twice.
|
199 |
+
# Be careful to check for adding selfloops
|
200 |
+
for u, v in G.edges:
|
201 |
+
if (u, v) not in edges:
|
202 |
+
# could add edge (u, v) to edges and have a bigger matching
|
203 |
+
if u not in nodes and v not in nodes and u != v:
|
204 |
+
return False
|
205 |
+
return True
|
206 |
+
|
207 |
+
|
208 |
+
@nx._dispatchable
|
209 |
+
def is_perfect_matching(G, matching):
|
210 |
+
"""Return True if ``matching`` is a perfect matching for ``G``
|
211 |
+
|
212 |
+
A *perfect matching* in a graph is a matching in which exactly one edge
|
213 |
+
is incident upon each vertex.
|
214 |
+
|
215 |
+
Parameters
|
216 |
+
----------
|
217 |
+
G : NetworkX graph
|
218 |
+
|
219 |
+
matching : dict or set
|
220 |
+
A dictionary or set representing a matching. If a dictionary, it
|
221 |
+
must have ``matching[u] == v`` and ``matching[v] == u`` for each
|
222 |
+
edge ``(u, v)`` in the matching. If a set, it must have elements
|
223 |
+
of the form ``(u, v)``, where ``(u, v)`` is an edge in the
|
224 |
+
matching.
|
225 |
+
|
226 |
+
Returns
|
227 |
+
-------
|
228 |
+
bool
|
229 |
+
Whether the given set or dictionary represents a valid perfect
|
230 |
+
matching in the graph.
|
231 |
+
|
232 |
+
Examples
|
233 |
+
--------
|
234 |
+
>>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5), (4, 6)])
|
235 |
+
>>> my_match = {1: 2, 3: 5, 4: 6}
|
236 |
+
>>> nx.is_perfect_matching(G, my_match)
|
237 |
+
True
|
238 |
+
|
239 |
+
"""
|
240 |
+
if isinstance(matching, dict):
|
241 |
+
matching = matching_dict_to_set(matching)
|
242 |
+
|
243 |
+
nodes = set()
|
244 |
+
for edge in matching:
|
245 |
+
if len(edge) != 2:
|
246 |
+
raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}")
|
247 |
+
u, v = edge
|
248 |
+
if u not in G or v not in G:
|
249 |
+
raise nx.NetworkXError(f"matching contains edge {edge} with node not in G")
|
250 |
+
if u == v:
|
251 |
+
return False
|
252 |
+
if not G.has_edge(u, v):
|
253 |
+
return False
|
254 |
+
if u in nodes or v in nodes:
|
255 |
+
return False
|
256 |
+
nodes.update(edge)
|
257 |
+
return len(nodes) == len(G)
|
258 |
+
|
259 |
+
|
260 |
+
@not_implemented_for("multigraph")
|
261 |
+
@not_implemented_for("directed")
|
262 |
+
@nx._dispatchable(edge_attrs="weight")
|
263 |
+
def min_weight_matching(G, weight="weight"):
|
264 |
+
"""Computing a minimum-weight maximal matching of G.
|
265 |
+
|
266 |
+
Use the maximum-weight algorithm with edge weights subtracted
|
267 |
+
from the maximum weight of all edges.
|
268 |
+
|
269 |
+
A matching is a subset of edges in which no node occurs more than once.
|
270 |
+
The weight of a matching is the sum of the weights of its edges.
|
271 |
+
A maximal matching cannot add more edges and still be a matching.
|
272 |
+
The cardinality of a matching is the number of matched edges.
|
273 |
+
|
274 |
+
This method replaces the edge weights with 1 plus the maximum edge weight
|
275 |
+
minus the original edge weight.
|
276 |
+
|
277 |
+
new_weight = (max_weight + 1) - edge_weight
|
278 |
+
|
279 |
+
then runs :func:`max_weight_matching` with the new weights.
|
280 |
+
The max weight matching with these new weights corresponds
|
281 |
+
to the min weight matching using the original weights.
|
282 |
+
Adding 1 to the max edge weight keeps all edge weights positive
|
283 |
+
and as integers if they started as integers.
|
284 |
+
|
285 |
+
You might worry that adding 1 to each weight would make the algorithm
|
286 |
+
favor matchings with more edges. But we use the parameter
|
287 |
+
`maxcardinality=True` in `max_weight_matching` to ensure that the
|
288 |
+
number of edges in the competing matchings are the same and thus
|
289 |
+
the optimum does not change due to changes in the number of edges.
|
290 |
+
|
291 |
+
Read the documentation of `max_weight_matching` for more information.
|
292 |
+
|
293 |
+
Parameters
|
294 |
+
----------
|
295 |
+
G : NetworkX graph
|
296 |
+
Undirected graph
|
297 |
+
|
298 |
+
weight: string, optional (default='weight')
|
299 |
+
Edge data key corresponding to the edge weight.
|
300 |
+
If key not found, uses 1 as weight.
|
301 |
+
|
302 |
+
Returns
|
303 |
+
-------
|
304 |
+
matching : set
|
305 |
+
A minimal weight matching of the graph.
|
306 |
+
|
307 |
+
See Also
|
308 |
+
--------
|
309 |
+
max_weight_matching
|
310 |
+
"""
|
311 |
+
if len(G.edges) == 0:
|
312 |
+
return max_weight_matching(G, maxcardinality=True, weight=weight)
|
313 |
+
G_edges = G.edges(data=weight, default=1)
|
314 |
+
max_weight = 1 + max(w for _, _, w in G_edges)
|
315 |
+
InvG = nx.Graph()
|
316 |
+
edges = ((u, v, max_weight - w) for u, v, w in G_edges)
|
317 |
+
InvG.add_weighted_edges_from(edges, weight=weight)
|
318 |
+
return max_weight_matching(InvG, maxcardinality=True, weight=weight)
|
319 |
+
|
320 |
+
|
321 |
+
@not_implemented_for("multigraph")
|
322 |
+
@not_implemented_for("directed")
|
323 |
+
@nx._dispatchable(edge_attrs="weight")
|
324 |
+
def max_weight_matching(G, maxcardinality=False, weight="weight"):
|
325 |
+
"""Compute a maximum-weighted matching of G.
|
326 |
+
|
327 |
+
A matching is a subset of edges in which no node occurs more than once.
|
328 |
+
The weight of a matching is the sum of the weights of its edges.
|
329 |
+
A maximal matching cannot add more edges and still be a matching.
|
330 |
+
The cardinality of a matching is the number of matched edges.
|
331 |
+
|
332 |
+
Parameters
|
333 |
+
----------
|
334 |
+
G : NetworkX graph
|
335 |
+
Undirected graph
|
336 |
+
|
337 |
+
maxcardinality: bool, optional (default=False)
|
338 |
+
If maxcardinality is True, compute the maximum-cardinality matching
|
339 |
+
with maximum weight among all maximum-cardinality matchings.
|
340 |
+
|
341 |
+
weight: string, optional (default='weight')
|
342 |
+
Edge data key corresponding to the edge weight.
|
343 |
+
If key not found, uses 1 as weight.
|
344 |
+
|
345 |
+
|
346 |
+
Returns
|
347 |
+
-------
|
348 |
+
matching : set
|
349 |
+
A maximal matching of the graph.
|
350 |
+
|
351 |
+
Examples
|
352 |
+
--------
|
353 |
+
>>> G = nx.Graph()
|
354 |
+
>>> edges = [(1, 2, 6), (1, 3, 2), (2, 3, 1), (2, 4, 7), (3, 5, 9), (4, 5, 3)]
|
355 |
+
>>> G.add_weighted_edges_from(edges)
|
356 |
+
>>> sorted(nx.max_weight_matching(G))
|
357 |
+
[(2, 4), (5, 3)]
|
358 |
+
|
359 |
+
Notes
|
360 |
+
-----
|
361 |
+
If G has edges with weight attributes the edge data are used as
|
362 |
+
weight values else the weights are assumed to be 1.
|
363 |
+
|
364 |
+
This function takes time O(number_of_nodes ** 3).
|
365 |
+
|
366 |
+
If all edge weights are integers, the algorithm uses only integer
|
367 |
+
computations. If floating point weights are used, the algorithm
|
368 |
+
could return a slightly suboptimal matching due to numeric
|
369 |
+
precision errors.
|
370 |
+
|
371 |
+
This method is based on the "blossom" method for finding augmenting
|
372 |
+
paths and the "primal-dual" method for finding a matching of maximum
|
373 |
+
weight, both methods invented by Jack Edmonds [1]_.
|
374 |
+
|
375 |
+
Bipartite graphs can also be matched using the functions present in
|
376 |
+
:mod:`networkx.algorithms.bipartite.matching`.
|
377 |
+
|
378 |
+
References
|
379 |
+
----------
|
380 |
+
.. [1] "Efficient Algorithms for Finding Maximum Matching in Graphs",
|
381 |
+
Zvi Galil, ACM Computing Surveys, 1986.
|
382 |
+
"""
|
383 |
+
#
|
384 |
+
# The algorithm is taken from "Efficient Algorithms for Finding Maximum
|
385 |
+
# Matching in Graphs" by Zvi Galil, ACM Computing Surveys, 1986.
|
386 |
+
# It is based on the "blossom" method for finding augmenting paths and
|
387 |
+
# the "primal-dual" method for finding a matching of maximum weight, both
|
388 |
+
# methods invented by Jack Edmonds.
|
389 |
+
#
|
390 |
+
# A C program for maximum weight matching by Ed Rothberg was used
|
391 |
+
# extensively to validate this new code.
|
392 |
+
#
|
393 |
+
# Many terms used in the code comments are explained in the paper
|
394 |
+
# by Galil. You will probably need the paper to make sense of this code.
|
395 |
+
#
|
396 |
+
|
397 |
+
class NoNode:
|
398 |
+
"""Dummy value which is different from any node."""
|
399 |
+
|
400 |
+
class Blossom:
|
401 |
+
"""Representation of a non-trivial blossom or sub-blossom."""
|
402 |
+
|
403 |
+
__slots__ = ["childs", "edges", "mybestedges"]
|
404 |
+
|
405 |
+
# b.childs is an ordered list of b's sub-blossoms, starting with
|
406 |
+
# the base and going round the blossom.
|
407 |
+
|
408 |
+
# b.edges is the list of b's connecting edges, such that
|
409 |
+
# b.edges[i] = (v, w) where v is a vertex in b.childs[i]
|
410 |
+
# and w is a vertex in b.childs[wrap(i+1)].
|
411 |
+
|
412 |
+
# If b is a top-level S-blossom,
|
413 |
+
# b.mybestedges is a list of least-slack edges to neighboring
|
414 |
+
# S-blossoms, or None if no such list has been computed yet.
|
415 |
+
# This is used for efficient computation of delta3.
|
416 |
+
|
417 |
+
# Generate the blossom's leaf vertices.
|
418 |
+
def leaves(self):
|
419 |
+
stack = [*self.childs]
|
420 |
+
while stack:
|
421 |
+
t = stack.pop()
|
422 |
+
if isinstance(t, Blossom):
|
423 |
+
stack.extend(t.childs)
|
424 |
+
else:
|
425 |
+
yield t
|
426 |
+
|
427 |
+
# Get a list of vertices.
|
428 |
+
gnodes = list(G)
|
429 |
+
if not gnodes:
|
430 |
+
return set() # don't bother with empty graphs
|
431 |
+
|
432 |
+
# Find the maximum edge weight.
|
433 |
+
maxweight = 0
|
434 |
+
allinteger = True
|
435 |
+
for i, j, d in G.edges(data=True):
|
436 |
+
wt = d.get(weight, 1)
|
437 |
+
if i != j and wt > maxweight:
|
438 |
+
maxweight = wt
|
439 |
+
allinteger = allinteger and (str(type(wt)).split("'")[1] in ("int", "long"))
|
440 |
+
|
441 |
+
# If v is a matched vertex, mate[v] is its partner vertex.
|
442 |
+
# If v is a single vertex, v does not occur as a key in mate.
|
443 |
+
# Initially all vertices are single; updated during augmentation.
|
444 |
+
mate = {}
|
445 |
+
|
446 |
+
# If b is a top-level blossom,
|
447 |
+
# label.get(b) is None if b is unlabeled (free),
|
448 |
+
# 1 if b is an S-blossom,
|
449 |
+
# 2 if b is a T-blossom.
|
450 |
+
# The label of a vertex is found by looking at the label of its top-level
|
451 |
+
# containing blossom.
|
452 |
+
# If v is a vertex inside a T-blossom, label[v] is 2 iff v is reachable
|
453 |
+
# from an S-vertex outside the blossom.
|
454 |
+
# Labels are assigned during a stage and reset after each augmentation.
|
455 |
+
label = {}
|
456 |
+
|
457 |
+
# If b is a labeled top-level blossom,
|
458 |
+
# labeledge[b] = (v, w) is the edge through which b obtained its label
|
459 |
+
# such that w is a vertex in b, or None if b's base vertex is single.
|
460 |
+
# If w is a vertex inside a T-blossom and label[w] == 2,
|
461 |
+
# labeledge[w] = (v, w) is an edge through which w is reachable from
|
462 |
+
# outside the blossom.
|
463 |
+
labeledge = {}
|
464 |
+
|
465 |
+
# If v is a vertex, inblossom[v] is the top-level blossom to which v
|
466 |
+
# belongs.
|
467 |
+
# If v is a top-level vertex, inblossom[v] == v since v is itself
|
468 |
+
# a (trivial) top-level blossom.
|
469 |
+
# Initially all vertices are top-level trivial blossoms.
|
470 |
+
inblossom = dict(zip(gnodes, gnodes))
|
471 |
+
|
472 |
+
# If b is a sub-blossom,
|
473 |
+
# blossomparent[b] is its immediate parent (sub-)blossom.
|
474 |
+
# If b is a top-level blossom, blossomparent[b] is None.
|
475 |
+
blossomparent = dict(zip(gnodes, repeat(None)))
|
476 |
+
|
477 |
+
# If b is a (sub-)blossom,
|
478 |
+
# blossombase[b] is its base VERTEX (i.e. recursive sub-blossom).
|
479 |
+
blossombase = dict(zip(gnodes, gnodes))
|
480 |
+
|
481 |
+
# If w is a free vertex (or an unreached vertex inside a T-blossom),
|
482 |
+
# bestedge[w] = (v, w) is the least-slack edge from an S-vertex,
|
483 |
+
# or None if there is no such edge.
|
484 |
+
# If b is a (possibly trivial) top-level S-blossom,
|
485 |
+
# bestedge[b] = (v, w) is the least-slack edge to a different S-blossom
|
486 |
+
# (v inside b), or None if there is no such edge.
|
487 |
+
# This is used for efficient computation of delta2 and delta3.
|
488 |
+
bestedge = {}
|
489 |
+
|
490 |
+
# If v is a vertex,
|
491 |
+
# dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual
|
492 |
+
# optimization problem (if all edge weights are integers, multiplication
|
493 |
+
# by two ensures that all values remain integers throughout the algorithm).
|
494 |
+
# Initially, u(v) = maxweight / 2.
|
495 |
+
dualvar = dict(zip(gnodes, repeat(maxweight)))
|
496 |
+
|
497 |
+
# If b is a non-trivial blossom,
|
498 |
+
# blossomdual[b] = z(b) where z(b) is b's variable in the dual
|
499 |
+
# optimization problem.
|
500 |
+
blossomdual = {}
|
501 |
+
|
502 |
+
# If (v, w) in allowedge or (w, v) in allowedg, then the edge
|
503 |
+
# (v, w) is known to have zero slack in the optimization problem;
|
504 |
+
# otherwise the edge may or may not have zero slack.
|
505 |
+
allowedge = {}
|
506 |
+
|
507 |
+
# Queue of newly discovered S-vertices.
|
508 |
+
queue = []
|
509 |
+
|
510 |
+
# Return 2 * slack of edge (v, w) (does not work inside blossoms).
|
511 |
+
def slack(v, w):
|
512 |
+
return dualvar[v] + dualvar[w] - 2 * G[v][w].get(weight, 1)
|
513 |
+
|
514 |
+
# Assign label t to the top-level blossom containing vertex w,
|
515 |
+
# coming through an edge from vertex v.
|
516 |
+
def assignLabel(w, t, v):
|
517 |
+
b = inblossom[w]
|
518 |
+
assert label.get(w) is None and label.get(b) is None
|
519 |
+
label[w] = label[b] = t
|
520 |
+
if v is not None:
|
521 |
+
labeledge[w] = labeledge[b] = (v, w)
|
522 |
+
else:
|
523 |
+
labeledge[w] = labeledge[b] = None
|
524 |
+
bestedge[w] = bestedge[b] = None
|
525 |
+
if t == 1:
|
526 |
+
# b became an S-vertex/blossom; add it(s vertices) to the queue.
|
527 |
+
if isinstance(b, Blossom):
|
528 |
+
queue.extend(b.leaves())
|
529 |
+
else:
|
530 |
+
queue.append(b)
|
531 |
+
elif t == 2:
|
532 |
+
# b became a T-vertex/blossom; assign label S to its mate.
|
533 |
+
# (If b is a non-trivial blossom, its base is the only vertex
|
534 |
+
# with an external mate.)
|
535 |
+
base = blossombase[b]
|
536 |
+
assignLabel(mate[base], 1, base)
|
537 |
+
|
538 |
+
# Trace back from vertices v and w to discover either a new blossom
|
539 |
+
# or an augmenting path. Return the base vertex of the new blossom,
|
540 |
+
# or NoNode if an augmenting path was found.
|
541 |
+
def scanBlossom(v, w):
|
542 |
+
# Trace back from v and w, placing breadcrumbs as we go.
|
543 |
+
path = []
|
544 |
+
base = NoNode
|
545 |
+
while v is not NoNode:
|
546 |
+
# Look for a breadcrumb in v's blossom or put a new breadcrumb.
|
547 |
+
b = inblossom[v]
|
548 |
+
if label[b] & 4:
|
549 |
+
base = blossombase[b]
|
550 |
+
break
|
551 |
+
assert label[b] == 1
|
552 |
+
path.append(b)
|
553 |
+
label[b] = 5
|
554 |
+
# Trace one step back.
|
555 |
+
if labeledge[b] is None:
|
556 |
+
# The base of blossom b is single; stop tracing this path.
|
557 |
+
assert blossombase[b] not in mate
|
558 |
+
v = NoNode
|
559 |
+
else:
|
560 |
+
assert labeledge[b][0] == mate[blossombase[b]]
|
561 |
+
v = labeledge[b][0]
|
562 |
+
b = inblossom[v]
|
563 |
+
assert label[b] == 2
|
564 |
+
# b is a T-blossom; trace one more step back.
|
565 |
+
v = labeledge[b][0]
|
566 |
+
# Swap v and w so that we alternate between both paths.
|
567 |
+
if w is not NoNode:
|
568 |
+
v, w = w, v
|
569 |
+
# Remove breadcrumbs.
|
570 |
+
for b in path:
|
571 |
+
label[b] = 1
|
572 |
+
# Return base vertex, if we found one.
|
573 |
+
return base
|
574 |
+
|
575 |
+
# Construct a new blossom with given base, through S-vertices v and w.
|
576 |
+
# Label the new blossom as S; set its dual variable to zero;
|
577 |
+
# relabel its T-vertices to S and add them to the queue.
|
578 |
+
def addBlossom(base, v, w):
|
579 |
+
bb = inblossom[base]
|
580 |
+
bv = inblossom[v]
|
581 |
+
bw = inblossom[w]
|
582 |
+
# Create blossom.
|
583 |
+
b = Blossom()
|
584 |
+
blossombase[b] = base
|
585 |
+
blossomparent[b] = None
|
586 |
+
blossomparent[bb] = b
|
587 |
+
# Make list of sub-blossoms and their interconnecting edge endpoints.
|
588 |
+
b.childs = path = []
|
589 |
+
b.edges = edgs = [(v, w)]
|
590 |
+
# Trace back from v to base.
|
591 |
+
while bv != bb:
|
592 |
+
# Add bv to the new blossom.
|
593 |
+
blossomparent[bv] = b
|
594 |
+
path.append(bv)
|
595 |
+
edgs.append(labeledge[bv])
|
596 |
+
assert label[bv] == 2 or (
|
597 |
+
label[bv] == 1 and labeledge[bv][0] == mate[blossombase[bv]]
|
598 |
+
)
|
599 |
+
# Trace one step back.
|
600 |
+
v = labeledge[bv][0]
|
601 |
+
bv = inblossom[v]
|
602 |
+
# Add base sub-blossom; reverse lists.
|
603 |
+
path.append(bb)
|
604 |
+
path.reverse()
|
605 |
+
edgs.reverse()
|
606 |
+
# Trace back from w to base.
|
607 |
+
while bw != bb:
|
608 |
+
# Add bw to the new blossom.
|
609 |
+
blossomparent[bw] = b
|
610 |
+
path.append(bw)
|
611 |
+
edgs.append((labeledge[bw][1], labeledge[bw][0]))
|
612 |
+
assert label[bw] == 2 or (
|
613 |
+
label[bw] == 1 and labeledge[bw][0] == mate[blossombase[bw]]
|
614 |
+
)
|
615 |
+
# Trace one step back.
|
616 |
+
w = labeledge[bw][0]
|
617 |
+
bw = inblossom[w]
|
618 |
+
# Set label to S.
|
619 |
+
assert label[bb] == 1
|
620 |
+
label[b] = 1
|
621 |
+
labeledge[b] = labeledge[bb]
|
622 |
+
# Set dual variable to zero.
|
623 |
+
blossomdual[b] = 0
|
624 |
+
# Relabel vertices.
|
625 |
+
for v in b.leaves():
|
626 |
+
if label[inblossom[v]] == 2:
|
627 |
+
# This T-vertex now turns into an S-vertex because it becomes
|
628 |
+
# part of an S-blossom; add it to the queue.
|
629 |
+
queue.append(v)
|
630 |
+
inblossom[v] = b
|
631 |
+
# Compute b.mybestedges.
|
632 |
+
bestedgeto = {}
|
633 |
+
for bv in path:
|
634 |
+
if isinstance(bv, Blossom):
|
635 |
+
if bv.mybestedges is not None:
|
636 |
+
# Walk this subblossom's least-slack edges.
|
637 |
+
nblist = bv.mybestedges
|
638 |
+
# The sub-blossom won't need this data again.
|
639 |
+
bv.mybestedges = None
|
640 |
+
else:
|
641 |
+
# This subblossom does not have a list of least-slack
|
642 |
+
# edges; get the information from the vertices.
|
643 |
+
nblist = [
|
644 |
+
(v, w) for v in bv.leaves() for w in G.neighbors(v) if v != w
|
645 |
+
]
|
646 |
+
else:
|
647 |
+
nblist = [(bv, w) for w in G.neighbors(bv) if bv != w]
|
648 |
+
for k in nblist:
|
649 |
+
(i, j) = k
|
650 |
+
if inblossom[j] == b:
|
651 |
+
i, j = j, i
|
652 |
+
bj = inblossom[j]
|
653 |
+
if (
|
654 |
+
bj != b
|
655 |
+
and label.get(bj) == 1
|
656 |
+
and ((bj not in bestedgeto) or slack(i, j) < slack(*bestedgeto[bj]))
|
657 |
+
):
|
658 |
+
bestedgeto[bj] = k
|
659 |
+
# Forget about least-slack edge of the subblossom.
|
660 |
+
bestedge[bv] = None
|
661 |
+
b.mybestedges = list(bestedgeto.values())
|
662 |
+
# Select bestedge[b].
|
663 |
+
mybestedge = None
|
664 |
+
bestedge[b] = None
|
665 |
+
for k in b.mybestedges:
|
666 |
+
kslack = slack(*k)
|
667 |
+
if mybestedge is None or kslack < mybestslack:
|
668 |
+
mybestedge = k
|
669 |
+
mybestslack = kslack
|
670 |
+
bestedge[b] = mybestedge
|
671 |
+
|
672 |
+
# Expand the given top-level blossom.
|
673 |
+
def expandBlossom(b, endstage):
|
674 |
+
# This is an obnoxiously complicated recursive function for the sake of
|
675 |
+
# a stack-transformation. So, we hack around the complexity by using
|
676 |
+
# a trampoline pattern. By yielding the arguments to each recursive
|
677 |
+
# call, we keep the actual callstack flat.
|
678 |
+
|
679 |
+
def _recurse(b, endstage):
|
680 |
+
# Convert sub-blossoms into top-level blossoms.
|
681 |
+
for s in b.childs:
|
682 |
+
blossomparent[s] = None
|
683 |
+
if isinstance(s, Blossom):
|
684 |
+
if endstage and blossomdual[s] == 0:
|
685 |
+
# Recursively expand this sub-blossom.
|
686 |
+
yield s
|
687 |
+
else:
|
688 |
+
for v in s.leaves():
|
689 |
+
inblossom[v] = s
|
690 |
+
else:
|
691 |
+
inblossom[s] = s
|
692 |
+
# If we expand a T-blossom during a stage, its sub-blossoms must be
|
693 |
+
# relabeled.
|
694 |
+
if (not endstage) and label.get(b) == 2:
|
695 |
+
# Start at the sub-blossom through which the expanding
|
696 |
+
# blossom obtained its label, and relabel sub-blossoms untili
|
697 |
+
# we reach the base.
|
698 |
+
# Figure out through which sub-blossom the expanding blossom
|
699 |
+
# obtained its label initially.
|
700 |
+
entrychild = inblossom[labeledge[b][1]]
|
701 |
+
# Decide in which direction we will go round the blossom.
|
702 |
+
j = b.childs.index(entrychild)
|
703 |
+
if j & 1:
|
704 |
+
# Start index is odd; go forward and wrap.
|
705 |
+
j -= len(b.childs)
|
706 |
+
jstep = 1
|
707 |
+
else:
|
708 |
+
# Start index is even; go backward.
|
709 |
+
jstep = -1
|
710 |
+
# Move along the blossom until we get to the base.
|
711 |
+
v, w = labeledge[b]
|
712 |
+
while j != 0:
|
713 |
+
# Relabel the T-sub-blossom.
|
714 |
+
if jstep == 1:
|
715 |
+
p, q = b.edges[j]
|
716 |
+
else:
|
717 |
+
q, p = b.edges[j - 1]
|
718 |
+
label[w] = None
|
719 |
+
label[q] = None
|
720 |
+
assignLabel(w, 2, v)
|
721 |
+
# Step to the next S-sub-blossom and note its forward edge.
|
722 |
+
allowedge[(p, q)] = allowedge[(q, p)] = True
|
723 |
+
j += jstep
|
724 |
+
if jstep == 1:
|
725 |
+
v, w = b.edges[j]
|
726 |
+
else:
|
727 |
+
w, v = b.edges[j - 1]
|
728 |
+
# Step to the next T-sub-blossom.
|
729 |
+
allowedge[(v, w)] = allowedge[(w, v)] = True
|
730 |
+
j += jstep
|
731 |
+
# Relabel the base T-sub-blossom WITHOUT stepping through to
|
732 |
+
# its mate (so don't call assignLabel).
|
733 |
+
bw = b.childs[j]
|
734 |
+
label[w] = label[bw] = 2
|
735 |
+
labeledge[w] = labeledge[bw] = (v, w)
|
736 |
+
bestedge[bw] = None
|
737 |
+
# Continue along the blossom until we get back to entrychild.
|
738 |
+
j += jstep
|
739 |
+
while b.childs[j] != entrychild:
|
740 |
+
# Examine the vertices of the sub-blossom to see whether
|
741 |
+
# it is reachable from a neighboring S-vertex outside the
|
742 |
+
# expanding blossom.
|
743 |
+
bv = b.childs[j]
|
744 |
+
if label.get(bv) == 1:
|
745 |
+
# This sub-blossom just got label S through one of its
|
746 |
+
# neighbors; leave it be.
|
747 |
+
j += jstep
|
748 |
+
continue
|
749 |
+
if isinstance(bv, Blossom):
|
750 |
+
for v in bv.leaves():
|
751 |
+
if label.get(v):
|
752 |
+
break
|
753 |
+
else:
|
754 |
+
v = bv
|
755 |
+
# If the sub-blossom contains a reachable vertex, assign
|
756 |
+
# label T to the sub-blossom.
|
757 |
+
if label.get(v):
|
758 |
+
assert label[v] == 2
|
759 |
+
assert inblossom[v] == bv
|
760 |
+
label[v] = None
|
761 |
+
label[mate[blossombase[bv]]] = None
|
762 |
+
assignLabel(v, 2, labeledge[v][0])
|
763 |
+
j += jstep
|
764 |
+
# Remove the expanded blossom entirely.
|
765 |
+
label.pop(b, None)
|
766 |
+
labeledge.pop(b, None)
|
767 |
+
bestedge.pop(b, None)
|
768 |
+
del blossomparent[b]
|
769 |
+
del blossombase[b]
|
770 |
+
del blossomdual[b]
|
771 |
+
|
772 |
+
# Now, we apply the trampoline pattern. We simulate a recursive
|
773 |
+
# callstack by maintaining a stack of generators, each yielding a
|
774 |
+
# sequence of function arguments. We grow the stack by appending a call
|
775 |
+
# to _recurse on each argument tuple, and shrink the stack whenever a
|
776 |
+
# generator is exhausted.
|
777 |
+
stack = [_recurse(b, endstage)]
|
778 |
+
while stack:
|
779 |
+
top = stack[-1]
|
780 |
+
for s in top:
|
781 |
+
stack.append(_recurse(s, endstage))
|
782 |
+
break
|
783 |
+
else:
|
784 |
+
stack.pop()
|
785 |
+
|
786 |
+
# Swap matched/unmatched edges over an alternating path through blossom b
|
787 |
+
# between vertex v and the base vertex. Keep blossom bookkeeping
|
788 |
+
# consistent.
|
789 |
+
def augmentBlossom(b, v):
|
790 |
+
# This is an obnoxiously complicated recursive function for the sake of
|
791 |
+
# a stack-transformation. So, we hack around the complexity by using
|
792 |
+
# a trampoline pattern. By yielding the arguments to each recursive
|
793 |
+
# call, we keep the actual callstack flat.
|
794 |
+
|
795 |
+
def _recurse(b, v):
|
796 |
+
# Bubble up through the blossom tree from vertex v to an immediate
|
797 |
+
# sub-blossom of b.
|
798 |
+
t = v
|
799 |
+
while blossomparent[t] != b:
|
800 |
+
t = blossomparent[t]
|
801 |
+
# Recursively deal with the first sub-blossom.
|
802 |
+
if isinstance(t, Blossom):
|
803 |
+
yield (t, v)
|
804 |
+
# Decide in which direction we will go round the blossom.
|
805 |
+
i = j = b.childs.index(t)
|
806 |
+
if i & 1:
|
807 |
+
# Start index is odd; go forward and wrap.
|
808 |
+
j -= len(b.childs)
|
809 |
+
jstep = 1
|
810 |
+
else:
|
811 |
+
# Start index is even; go backward.
|
812 |
+
jstep = -1
|
813 |
+
# Move along the blossom until we get to the base.
|
814 |
+
while j != 0:
|
815 |
+
# Step to the next sub-blossom and augment it recursively.
|
816 |
+
j += jstep
|
817 |
+
t = b.childs[j]
|
818 |
+
if jstep == 1:
|
819 |
+
w, x = b.edges[j]
|
820 |
+
else:
|
821 |
+
x, w = b.edges[j - 1]
|
822 |
+
if isinstance(t, Blossom):
|
823 |
+
yield (t, w)
|
824 |
+
# Step to the next sub-blossom and augment it recursively.
|
825 |
+
j += jstep
|
826 |
+
t = b.childs[j]
|
827 |
+
if isinstance(t, Blossom):
|
828 |
+
yield (t, x)
|
829 |
+
# Match the edge connecting those sub-blossoms.
|
830 |
+
mate[w] = x
|
831 |
+
mate[x] = w
|
832 |
+
# Rotate the list of sub-blossoms to put the new base at the front.
|
833 |
+
b.childs = b.childs[i:] + b.childs[:i]
|
834 |
+
b.edges = b.edges[i:] + b.edges[:i]
|
835 |
+
blossombase[b] = blossombase[b.childs[0]]
|
836 |
+
assert blossombase[b] == v
|
837 |
+
|
838 |
+
# Now, we apply the trampoline pattern. We simulate a recursive
|
839 |
+
# callstack by maintaining a stack of generators, each yielding a
|
840 |
+
# sequence of function arguments. We grow the stack by appending a call
|
841 |
+
# to _recurse on each argument tuple, and shrink the stack whenever a
|
842 |
+
# generator is exhausted.
|
843 |
+
stack = [_recurse(b, v)]
|
844 |
+
while stack:
|
845 |
+
top = stack[-1]
|
846 |
+
for args in top:
|
847 |
+
stack.append(_recurse(*args))
|
848 |
+
break
|
849 |
+
else:
|
850 |
+
stack.pop()
|
851 |
+
|
852 |
+
# Swap matched/unmatched edges over an alternating path between two
|
853 |
+
# single vertices. The augmenting path runs through S-vertices v and w.
|
854 |
+
def augmentMatching(v, w):
|
855 |
+
for s, j in ((v, w), (w, v)):
|
856 |
+
# Match vertex s to vertex j. Then trace back from s
|
857 |
+
# until we find a single vertex, swapping matched and unmatched
|
858 |
+
# edges as we go.
|
859 |
+
while 1:
|
860 |
+
bs = inblossom[s]
|
861 |
+
assert label[bs] == 1
|
862 |
+
assert (labeledge[bs] is None and blossombase[bs] not in mate) or (
|
863 |
+
labeledge[bs][0] == mate[blossombase[bs]]
|
864 |
+
)
|
865 |
+
# Augment through the S-blossom from s to base.
|
866 |
+
if isinstance(bs, Blossom):
|
867 |
+
augmentBlossom(bs, s)
|
868 |
+
# Update mate[s]
|
869 |
+
mate[s] = j
|
870 |
+
# Trace one step back.
|
871 |
+
if labeledge[bs] is None:
|
872 |
+
# Reached single vertex; stop.
|
873 |
+
break
|
874 |
+
t = labeledge[bs][0]
|
875 |
+
bt = inblossom[t]
|
876 |
+
assert label[bt] == 2
|
877 |
+
# Trace one more step back.
|
878 |
+
s, j = labeledge[bt]
|
879 |
+
# Augment through the T-blossom from j to base.
|
880 |
+
assert blossombase[bt] == t
|
881 |
+
if isinstance(bt, Blossom):
|
882 |
+
augmentBlossom(bt, j)
|
883 |
+
# Update mate[j]
|
884 |
+
mate[j] = s
|
885 |
+
|
886 |
+
# Verify that the optimum solution has been reached.
|
887 |
+
def verifyOptimum():
|
888 |
+
if maxcardinality:
|
889 |
+
# Vertices may have negative dual;
|
890 |
+
# find a constant non-negative number to add to all vertex duals.
|
891 |
+
vdualoffset = max(0, -min(dualvar.values()))
|
892 |
+
else:
|
893 |
+
vdualoffset = 0
|
894 |
+
# 0. all dual variables are non-negative
|
895 |
+
assert min(dualvar.values()) + vdualoffset >= 0
|
896 |
+
assert len(blossomdual) == 0 or min(blossomdual.values()) >= 0
|
897 |
+
# 0. all edges have non-negative slack and
|
898 |
+
# 1. all matched edges have zero slack;
|
899 |
+
for i, j, d in G.edges(data=True):
|
900 |
+
wt = d.get(weight, 1)
|
901 |
+
if i == j:
|
902 |
+
continue # ignore self-loops
|
903 |
+
s = dualvar[i] + dualvar[j] - 2 * wt
|
904 |
+
iblossoms = [i]
|
905 |
+
jblossoms = [j]
|
906 |
+
while blossomparent[iblossoms[-1]] is not None:
|
907 |
+
iblossoms.append(blossomparent[iblossoms[-1]])
|
908 |
+
while blossomparent[jblossoms[-1]] is not None:
|
909 |
+
jblossoms.append(blossomparent[jblossoms[-1]])
|
910 |
+
iblossoms.reverse()
|
911 |
+
jblossoms.reverse()
|
912 |
+
for bi, bj in zip(iblossoms, jblossoms):
|
913 |
+
if bi != bj:
|
914 |
+
break
|
915 |
+
s += 2 * blossomdual[bi]
|
916 |
+
assert s >= 0
|
917 |
+
if mate.get(i) == j or mate.get(j) == i:
|
918 |
+
assert mate[i] == j and mate[j] == i
|
919 |
+
assert s == 0
|
920 |
+
# 2. all single vertices have zero dual value;
|
921 |
+
for v in gnodes:
|
922 |
+
assert (v in mate) or dualvar[v] + vdualoffset == 0
|
923 |
+
# 3. all blossoms with positive dual value are full.
|
924 |
+
for b in blossomdual:
|
925 |
+
if blossomdual[b] > 0:
|
926 |
+
assert len(b.edges) % 2 == 1
|
927 |
+
for i, j in b.edges[1::2]:
|
928 |
+
assert mate[i] == j and mate[j] == i
|
929 |
+
# Ok.
|
930 |
+
|
931 |
+
# Main loop: continue until no further improvement is possible.
|
932 |
+
while 1:
|
933 |
+
# Each iteration of this loop is a "stage".
|
934 |
+
# A stage finds an augmenting path and uses that to improve
|
935 |
+
# the matching.
|
936 |
+
|
937 |
+
# Remove labels from top-level blossoms/vertices.
|
938 |
+
label.clear()
|
939 |
+
labeledge.clear()
|
940 |
+
|
941 |
+
# Forget all about least-slack edges.
|
942 |
+
bestedge.clear()
|
943 |
+
for b in blossomdual:
|
944 |
+
b.mybestedges = None
|
945 |
+
|
946 |
+
# Loss of labeling means that we can not be sure that currently
|
947 |
+
# allowable edges remain allowable throughout this stage.
|
948 |
+
allowedge.clear()
|
949 |
+
|
950 |
+
# Make queue empty.
|
951 |
+
queue[:] = []
|
952 |
+
|
953 |
+
# Label single blossoms/vertices with S and put them in the queue.
|
954 |
+
for v in gnodes:
|
955 |
+
if (v not in mate) and label.get(inblossom[v]) is None:
|
956 |
+
assignLabel(v, 1, None)
|
957 |
+
|
958 |
+
# Loop until we succeed in augmenting the matching.
|
959 |
+
augmented = 0
|
960 |
+
while 1:
|
961 |
+
# Each iteration of this loop is a "substage".
|
962 |
+
# A substage tries to find an augmenting path;
|
963 |
+
# if found, the path is used to improve the matching and
|
964 |
+
# the stage ends. If there is no augmenting path, the
|
965 |
+
# primal-dual method is used to pump some slack out of
|
966 |
+
# the dual variables.
|
967 |
+
|
968 |
+
# Continue labeling until all vertices which are reachable
|
969 |
+
# through an alternating path have got a label.
|
970 |
+
while queue and not augmented:
|
971 |
+
# Take an S vertex from the queue.
|
972 |
+
v = queue.pop()
|
973 |
+
assert label[inblossom[v]] == 1
|
974 |
+
|
975 |
+
# Scan its neighbors:
|
976 |
+
for w in G.neighbors(v):
|
977 |
+
if w == v:
|
978 |
+
continue # ignore self-loops
|
979 |
+
# w is a neighbor to v
|
980 |
+
bv = inblossom[v]
|
981 |
+
bw = inblossom[w]
|
982 |
+
if bv == bw:
|
983 |
+
# this edge is internal to a blossom; ignore it
|
984 |
+
continue
|
985 |
+
if (v, w) not in allowedge:
|
986 |
+
kslack = slack(v, w)
|
987 |
+
if kslack <= 0:
|
988 |
+
# edge k has zero slack => it is allowable
|
989 |
+
allowedge[(v, w)] = allowedge[(w, v)] = True
|
990 |
+
if (v, w) in allowedge:
|
991 |
+
if label.get(bw) is None:
|
992 |
+
# (C1) w is a free vertex;
|
993 |
+
# label w with T and label its mate with S (R12).
|
994 |
+
assignLabel(w, 2, v)
|
995 |
+
elif label.get(bw) == 1:
|
996 |
+
# (C2) w is an S-vertex (not in the same blossom);
|
997 |
+
# follow back-links to discover either an
|
998 |
+
# augmenting path or a new blossom.
|
999 |
+
base = scanBlossom(v, w)
|
1000 |
+
if base is not NoNode:
|
1001 |
+
# Found a new blossom; add it to the blossom
|
1002 |
+
# bookkeeping and turn it into an S-blossom.
|
1003 |
+
addBlossom(base, v, w)
|
1004 |
+
else:
|
1005 |
+
# Found an augmenting path; augment the
|
1006 |
+
# matching and end this stage.
|
1007 |
+
augmentMatching(v, w)
|
1008 |
+
augmented = 1
|
1009 |
+
break
|
1010 |
+
elif label.get(w) is None:
|
1011 |
+
# w is inside a T-blossom, but w itself has not
|
1012 |
+
# yet been reached from outside the blossom;
|
1013 |
+
# mark it as reached (we need this to relabel
|
1014 |
+
# during T-blossom expansion).
|
1015 |
+
assert label[bw] == 2
|
1016 |
+
label[w] = 2
|
1017 |
+
labeledge[w] = (v, w)
|
1018 |
+
elif label.get(bw) == 1:
|
1019 |
+
# keep track of the least-slack non-allowable edge to
|
1020 |
+
# a different S-blossom.
|
1021 |
+
if bestedge.get(bv) is None or kslack < slack(*bestedge[bv]):
|
1022 |
+
bestedge[bv] = (v, w)
|
1023 |
+
elif label.get(w) is None:
|
1024 |
+
# w is a free vertex (or an unreached vertex inside
|
1025 |
+
# a T-blossom) but we can not reach it yet;
|
1026 |
+
# keep track of the least-slack edge that reaches w.
|
1027 |
+
if bestedge.get(w) is None or kslack < slack(*bestedge[w]):
|
1028 |
+
bestedge[w] = (v, w)
|
1029 |
+
|
1030 |
+
if augmented:
|
1031 |
+
break
|
1032 |
+
|
1033 |
+
# There is no augmenting path under these constraints;
|
1034 |
+
# compute delta and reduce slack in the optimization problem.
|
1035 |
+
# (Note that our vertex dual variables, edge slacks and delta's
|
1036 |
+
# are pre-multiplied by two.)
|
1037 |
+
deltatype = -1
|
1038 |
+
delta = deltaedge = deltablossom = None
|
1039 |
+
|
1040 |
+
# Compute delta1: the minimum value of any vertex dual.
|
1041 |
+
if not maxcardinality:
|
1042 |
+
deltatype = 1
|
1043 |
+
delta = min(dualvar.values())
|
1044 |
+
|
1045 |
+
# Compute delta2: the minimum slack on any edge between
|
1046 |
+
# an S-vertex and a free vertex.
|
1047 |
+
for v in G.nodes():
|
1048 |
+
if label.get(inblossom[v]) is None and bestedge.get(v) is not None:
|
1049 |
+
d = slack(*bestedge[v])
|
1050 |
+
if deltatype == -1 or d < delta:
|
1051 |
+
delta = d
|
1052 |
+
deltatype = 2
|
1053 |
+
deltaedge = bestedge[v]
|
1054 |
+
|
1055 |
+
# Compute delta3: half the minimum slack on any edge between
|
1056 |
+
# a pair of S-blossoms.
|
1057 |
+
for b in blossomparent:
|
1058 |
+
if (
|
1059 |
+
blossomparent[b] is None
|
1060 |
+
and label.get(b) == 1
|
1061 |
+
and bestedge.get(b) is not None
|
1062 |
+
):
|
1063 |
+
kslack = slack(*bestedge[b])
|
1064 |
+
if allinteger:
|
1065 |
+
assert (kslack % 2) == 0
|
1066 |
+
d = kslack // 2
|
1067 |
+
else:
|
1068 |
+
d = kslack / 2.0
|
1069 |
+
if deltatype == -1 or d < delta:
|
1070 |
+
delta = d
|
1071 |
+
deltatype = 3
|
1072 |
+
deltaedge = bestedge[b]
|
1073 |
+
|
1074 |
+
# Compute delta4: minimum z variable of any T-blossom.
|
1075 |
+
for b in blossomdual:
|
1076 |
+
if (
|
1077 |
+
blossomparent[b] is None
|
1078 |
+
and label.get(b) == 2
|
1079 |
+
and (deltatype == -1 or blossomdual[b] < delta)
|
1080 |
+
):
|
1081 |
+
delta = blossomdual[b]
|
1082 |
+
deltatype = 4
|
1083 |
+
deltablossom = b
|
1084 |
+
|
1085 |
+
if deltatype == -1:
|
1086 |
+
# No further improvement possible; max-cardinality optimum
|
1087 |
+
# reached. Do a final delta update to make the optimum
|
1088 |
+
# verifiable.
|
1089 |
+
assert maxcardinality
|
1090 |
+
deltatype = 1
|
1091 |
+
delta = max(0, min(dualvar.values()))
|
1092 |
+
|
1093 |
+
# Update dual variables according to delta.
|
1094 |
+
for v in gnodes:
|
1095 |
+
if label.get(inblossom[v]) == 1:
|
1096 |
+
# S-vertex: 2*u = 2*u - 2*delta
|
1097 |
+
dualvar[v] -= delta
|
1098 |
+
elif label.get(inblossom[v]) == 2:
|
1099 |
+
# T-vertex: 2*u = 2*u + 2*delta
|
1100 |
+
dualvar[v] += delta
|
1101 |
+
for b in blossomdual:
|
1102 |
+
if blossomparent[b] is None:
|
1103 |
+
if label.get(b) == 1:
|
1104 |
+
# top-level S-blossom: z = z + 2*delta
|
1105 |
+
blossomdual[b] += delta
|
1106 |
+
elif label.get(b) == 2:
|
1107 |
+
# top-level T-blossom: z = z - 2*delta
|
1108 |
+
blossomdual[b] -= delta
|
1109 |
+
|
1110 |
+
# Take action at the point where minimum delta occurred.
|
1111 |
+
if deltatype == 1:
|
1112 |
+
# No further improvement possible; optimum reached.
|
1113 |
+
break
|
1114 |
+
elif deltatype == 2:
|
1115 |
+
# Use the least-slack edge to continue the search.
|
1116 |
+
(v, w) = deltaedge
|
1117 |
+
assert label[inblossom[v]] == 1
|
1118 |
+
allowedge[(v, w)] = allowedge[(w, v)] = True
|
1119 |
+
queue.append(v)
|
1120 |
+
elif deltatype == 3:
|
1121 |
+
# Use the least-slack edge to continue the search.
|
1122 |
+
(v, w) = deltaedge
|
1123 |
+
allowedge[(v, w)] = allowedge[(w, v)] = True
|
1124 |
+
assert label[inblossom[v]] == 1
|
1125 |
+
queue.append(v)
|
1126 |
+
elif deltatype == 4:
|
1127 |
+
# Expand the least-z blossom.
|
1128 |
+
expandBlossom(deltablossom, False)
|
1129 |
+
|
1130 |
+
# End of a this substage.
|
1131 |
+
|
1132 |
+
# Paranoia check that the matching is symmetric.
|
1133 |
+
for v in mate:
|
1134 |
+
assert mate[mate[v]] == v
|
1135 |
+
|
1136 |
+
# Stop when no more augmenting path can be found.
|
1137 |
+
if not augmented:
|
1138 |
+
break
|
1139 |
+
|
1140 |
+
# End of a stage; expand all S-blossoms which have zero dual.
|
1141 |
+
for b in list(blossomdual.keys()):
|
1142 |
+
if b not in blossomdual:
|
1143 |
+
continue # already expanded
|
1144 |
+
if blossomparent[b] is None and label.get(b) == 1 and blossomdual[b] == 0:
|
1145 |
+
expandBlossom(b, True)
|
1146 |
+
|
1147 |
+
# Verify that we reached the optimum solution (only for integer weights).
|
1148 |
+
if allinteger:
|
1149 |
+
verifyOptimum()
|
1150 |
+
|
1151 |
+
return matching_dict_to_set(mate)
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/mis.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Algorithm to find a maximal (not maximum) independent set.
|
3 |
+
|
4 |
+
"""
|
5 |
+
import networkx as nx
|
6 |
+
from networkx.utils import not_implemented_for, py_random_state
|
7 |
+
|
8 |
+
__all__ = ["maximal_independent_set"]
|
9 |
+
|
10 |
+
|
11 |
+
@not_implemented_for("directed")
|
12 |
+
@py_random_state(2)
|
13 |
+
@nx._dispatchable
|
14 |
+
def maximal_independent_set(G, nodes=None, seed=None):
|
15 |
+
"""Returns a random maximal independent set guaranteed to contain
|
16 |
+
a given set of nodes.
|
17 |
+
|
18 |
+
An independent set is a set of nodes such that the subgraph
|
19 |
+
of G induced by these nodes contains no edges. A maximal
|
20 |
+
independent set is an independent set such that it is not possible
|
21 |
+
to add a new node and still get an independent set.
|
22 |
+
|
23 |
+
Parameters
|
24 |
+
----------
|
25 |
+
G : NetworkX graph
|
26 |
+
|
27 |
+
nodes : list or iterable
|
28 |
+
Nodes that must be part of the independent set. This set of nodes
|
29 |
+
must be independent.
|
30 |
+
|
31 |
+
seed : integer, random_state, or None (default)
|
32 |
+
Indicator of random number generation state.
|
33 |
+
See :ref:`Randomness<randomness>`.
|
34 |
+
|
35 |
+
Returns
|
36 |
+
-------
|
37 |
+
indep_nodes : list
|
38 |
+
List of nodes that are part of a maximal independent set.
|
39 |
+
|
40 |
+
Raises
|
41 |
+
------
|
42 |
+
NetworkXUnfeasible
|
43 |
+
If the nodes in the provided list are not part of the graph or
|
44 |
+
do not form an independent set, an exception is raised.
|
45 |
+
|
46 |
+
NetworkXNotImplemented
|
47 |
+
If `G` is directed.
|
48 |
+
|
49 |
+
Examples
|
50 |
+
--------
|
51 |
+
>>> G = nx.path_graph(5)
|
52 |
+
>>> nx.maximal_independent_set(G) # doctest: +SKIP
|
53 |
+
[4, 0, 2]
|
54 |
+
>>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP
|
55 |
+
[1, 3]
|
56 |
+
|
57 |
+
Notes
|
58 |
+
-----
|
59 |
+
This algorithm does not solve the maximum independent set problem.
|
60 |
+
|
61 |
+
"""
|
62 |
+
if not nodes:
|
63 |
+
nodes = {seed.choice(list(G))}
|
64 |
+
else:
|
65 |
+
nodes = set(nodes)
|
66 |
+
if not nodes.issubset(G):
|
67 |
+
raise nx.NetworkXUnfeasible(f"{nodes} is not a subset of the nodes of G")
|
68 |
+
neighbors = set.union(*[set(G.adj[v]) for v in nodes])
|
69 |
+
if set.intersection(neighbors, nodes):
|
70 |
+
raise nx.NetworkXUnfeasible(f"{nodes} is not an independent set of G")
|
71 |
+
indep_nodes = list(nodes)
|
72 |
+
available_nodes = set(G.nodes()).difference(neighbors.union(nodes))
|
73 |
+
while available_nodes:
|
74 |
+
node = seed.choice(list(available_nodes))
|
75 |
+
indep_nodes.append(node)
|
76 |
+
available_nodes.difference_update(list(G.adj[node]) + [node])
|
77 |
+
return indep_nodes
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/moral.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""Function for computing the moral graph of a directed graph."""
|
2 |
+
|
3 |
+
import itertools
|
4 |
+
|
5 |
+
import networkx as nx
|
6 |
+
from networkx.utils import not_implemented_for
|
7 |
+
|
8 |
+
__all__ = ["moral_graph"]
|
9 |
+
|
10 |
+
|
11 |
+
@not_implemented_for("undirected")
|
12 |
+
@nx._dispatchable(returns_graph=True)
|
13 |
+
def moral_graph(G):
|
14 |
+
r"""Return the Moral Graph
|
15 |
+
|
16 |
+
Returns the moralized graph of a given directed graph.
|
17 |
+
|
18 |
+
Parameters
|
19 |
+
----------
|
20 |
+
G : NetworkX graph
|
21 |
+
Directed graph
|
22 |
+
|
23 |
+
Returns
|
24 |
+
-------
|
25 |
+
H : NetworkX graph
|
26 |
+
The undirected moralized graph of G
|
27 |
+
|
28 |
+
Raises
|
29 |
+
------
|
30 |
+
NetworkXNotImplemented
|
31 |
+
If `G` is undirected.
|
32 |
+
|
33 |
+
Examples
|
34 |
+
--------
|
35 |
+
>>> G = nx.DiGraph([(1, 2), (2, 3), (2, 5), (3, 4), (4, 3)])
|
36 |
+
>>> G_moral = nx.moral_graph(G)
|
37 |
+
>>> G_moral.edges()
|
38 |
+
EdgeView([(1, 2), (2, 3), (2, 5), (2, 4), (3, 4)])
|
39 |
+
|
40 |
+
Notes
|
41 |
+
-----
|
42 |
+
A moral graph is an undirected graph H = (V, E) generated from a
|
43 |
+
directed Graph, where if a node has more than one parent node, edges
|
44 |
+
between these parent nodes are inserted and all directed edges become
|
45 |
+
undirected.
|
46 |
+
|
47 |
+
https://en.wikipedia.org/wiki/Moral_graph
|
48 |
+
|
49 |
+
References
|
50 |
+
----------
|
51 |
+
.. [1] Wray L. Buntine. 1995. Chain graphs for learning.
|
52 |
+
In Proceedings of the Eleventh conference on Uncertainty
|
53 |
+
in artificial intelligence (UAI'95)
|
54 |
+
"""
|
55 |
+
H = G.to_undirected()
|
56 |
+
for preds in G.pred.values():
|
57 |
+
predecessors_combinations = itertools.combinations(preds, r=2)
|
58 |
+
H.add_edges_from(predecessors_combinations)
|
59 |
+
return H
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/node_classification.py
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" This module provides the functions for node classification problem.
|
2 |
+
|
3 |
+
The functions in this module are not imported
|
4 |
+
into the top level `networkx` namespace.
|
5 |
+
You can access these functions by importing
|
6 |
+
the `networkx.algorithms.node_classification` modules,
|
7 |
+
then accessing the functions as attributes of `node_classification`.
|
8 |
+
For example:
|
9 |
+
|
10 |
+
>>> from networkx.algorithms import node_classification
|
11 |
+
>>> G = nx.path_graph(4)
|
12 |
+
>>> G.edges()
|
13 |
+
EdgeView([(0, 1), (1, 2), (2, 3)])
|
14 |
+
>>> G.nodes[0]["label"] = "A"
|
15 |
+
>>> G.nodes[3]["label"] = "B"
|
16 |
+
>>> node_classification.harmonic_function(G)
|
17 |
+
['A', 'A', 'B', 'B']
|
18 |
+
|
19 |
+
References
|
20 |
+
----------
|
21 |
+
Zhu, X., Ghahramani, Z., & Lafferty, J. (2003, August).
|
22 |
+
Semi-supervised learning using gaussian fields and harmonic functions.
|
23 |
+
In ICML (Vol. 3, pp. 912-919).
|
24 |
+
"""
|
25 |
+
import networkx as nx
|
26 |
+
|
27 |
+
__all__ = ["harmonic_function", "local_and_global_consistency"]
|
28 |
+
|
29 |
+
|
30 |
+
@nx.utils.not_implemented_for("directed")
|
31 |
+
@nx._dispatchable(node_attrs="label_name")
|
32 |
+
def harmonic_function(G, max_iter=30, label_name="label"):
|
33 |
+
"""Node classification by Harmonic function
|
34 |
+
|
35 |
+
Function for computing Harmonic function algorithm by Zhu et al.
|
36 |
+
|
37 |
+
Parameters
|
38 |
+
----------
|
39 |
+
G : NetworkX Graph
|
40 |
+
max_iter : int
|
41 |
+
maximum number of iterations allowed
|
42 |
+
label_name : string
|
43 |
+
name of target labels to predict
|
44 |
+
|
45 |
+
Returns
|
46 |
+
-------
|
47 |
+
predicted : list
|
48 |
+
List of length ``len(G)`` with the predicted labels for each node.
|
49 |
+
|
50 |
+
Raises
|
51 |
+
------
|
52 |
+
NetworkXError
|
53 |
+
If no nodes in `G` have attribute `label_name`.
|
54 |
+
|
55 |
+
Examples
|
56 |
+
--------
|
57 |
+
>>> from networkx.algorithms import node_classification
|
58 |
+
>>> G = nx.path_graph(4)
|
59 |
+
>>> G.nodes[0]["label"] = "A"
|
60 |
+
>>> G.nodes[3]["label"] = "B"
|
61 |
+
>>> G.nodes(data=True)
|
62 |
+
NodeDataView({0: {'label': 'A'}, 1: {}, 2: {}, 3: {'label': 'B'}})
|
63 |
+
>>> G.edges()
|
64 |
+
EdgeView([(0, 1), (1, 2), (2, 3)])
|
65 |
+
>>> predicted = node_classification.harmonic_function(G)
|
66 |
+
>>> predicted
|
67 |
+
['A', 'A', 'B', 'B']
|
68 |
+
|
69 |
+
References
|
70 |
+
----------
|
71 |
+
Zhu, X., Ghahramani, Z., & Lafferty, J. (2003, August).
|
72 |
+
Semi-supervised learning using gaussian fields and harmonic functions.
|
73 |
+
In ICML (Vol. 3, pp. 912-919).
|
74 |
+
"""
|
75 |
+
import numpy as np
|
76 |
+
import scipy as sp
|
77 |
+
|
78 |
+
X = nx.to_scipy_sparse_array(G) # adjacency matrix
|
79 |
+
labels, label_dict = _get_label_info(G, label_name)
|
80 |
+
|
81 |
+
if labels.shape[0] == 0:
|
82 |
+
raise nx.NetworkXError(
|
83 |
+
f"No node on the input graph is labeled by '{label_name}'."
|
84 |
+
)
|
85 |
+
|
86 |
+
n_samples = X.shape[0]
|
87 |
+
n_classes = label_dict.shape[0]
|
88 |
+
F = np.zeros((n_samples, n_classes))
|
89 |
+
|
90 |
+
# Build propagation matrix
|
91 |
+
degrees = X.sum(axis=0)
|
92 |
+
degrees[degrees == 0] = 1 # Avoid division by 0
|
93 |
+
# TODO: csr_array
|
94 |
+
D = sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0))
|
95 |
+
P = (D @ X).tolil()
|
96 |
+
P[labels[:, 0]] = 0 # labels[:, 0] indicates IDs of labeled nodes
|
97 |
+
# Build base matrix
|
98 |
+
B = np.zeros((n_samples, n_classes))
|
99 |
+
B[labels[:, 0], labels[:, 1]] = 1
|
100 |
+
|
101 |
+
for _ in range(max_iter):
|
102 |
+
F = (P @ F) + B
|
103 |
+
|
104 |
+
return label_dict[np.argmax(F, axis=1)].tolist()
|
105 |
+
|
106 |
+
|
107 |
+
@nx.utils.not_implemented_for("directed")
|
108 |
+
@nx._dispatchable(node_attrs="label_name")
|
109 |
+
def local_and_global_consistency(G, alpha=0.99, max_iter=30, label_name="label"):
|
110 |
+
"""Node classification by Local and Global Consistency
|
111 |
+
|
112 |
+
Function for computing Local and global consistency algorithm by Zhou et al.
|
113 |
+
|
114 |
+
Parameters
|
115 |
+
----------
|
116 |
+
G : NetworkX Graph
|
117 |
+
alpha : float
|
118 |
+
Clamping factor
|
119 |
+
max_iter : int
|
120 |
+
Maximum number of iterations allowed
|
121 |
+
label_name : string
|
122 |
+
Name of target labels to predict
|
123 |
+
|
124 |
+
Returns
|
125 |
+
-------
|
126 |
+
predicted : list
|
127 |
+
List of length ``len(G)`` with the predicted labels for each node.
|
128 |
+
|
129 |
+
Raises
|
130 |
+
------
|
131 |
+
NetworkXError
|
132 |
+
If no nodes in `G` have attribute `label_name`.
|
133 |
+
|
134 |
+
Examples
|
135 |
+
--------
|
136 |
+
>>> from networkx.algorithms import node_classification
|
137 |
+
>>> G = nx.path_graph(4)
|
138 |
+
>>> G.nodes[0]["label"] = "A"
|
139 |
+
>>> G.nodes[3]["label"] = "B"
|
140 |
+
>>> G.nodes(data=True)
|
141 |
+
NodeDataView({0: {'label': 'A'}, 1: {}, 2: {}, 3: {'label': 'B'}})
|
142 |
+
>>> G.edges()
|
143 |
+
EdgeView([(0, 1), (1, 2), (2, 3)])
|
144 |
+
>>> predicted = node_classification.local_and_global_consistency(G)
|
145 |
+
>>> predicted
|
146 |
+
['A', 'A', 'B', 'B']
|
147 |
+
|
148 |
+
References
|
149 |
+
----------
|
150 |
+
Zhou, D., Bousquet, O., Lal, T. N., Weston, J., & Schölkopf, B. (2004).
|
151 |
+
Learning with local and global consistency.
|
152 |
+
Advances in neural information processing systems, 16(16), 321-328.
|
153 |
+
"""
|
154 |
+
import numpy as np
|
155 |
+
import scipy as sp
|
156 |
+
|
157 |
+
X = nx.to_scipy_sparse_array(G) # adjacency matrix
|
158 |
+
labels, label_dict = _get_label_info(G, label_name)
|
159 |
+
|
160 |
+
if labels.shape[0] == 0:
|
161 |
+
raise nx.NetworkXError(
|
162 |
+
f"No node on the input graph is labeled by '{label_name}'."
|
163 |
+
)
|
164 |
+
|
165 |
+
n_samples = X.shape[0]
|
166 |
+
n_classes = label_dict.shape[0]
|
167 |
+
F = np.zeros((n_samples, n_classes))
|
168 |
+
|
169 |
+
# Build propagation matrix
|
170 |
+
degrees = X.sum(axis=0)
|
171 |
+
degrees[degrees == 0] = 1 # Avoid division by 0
|
172 |
+
# TODO: csr_array
|
173 |
+
D2 = np.sqrt(sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0)))
|
174 |
+
P = alpha * ((D2 @ X) @ D2)
|
175 |
+
# Build base matrix
|
176 |
+
B = np.zeros((n_samples, n_classes))
|
177 |
+
B[labels[:, 0], labels[:, 1]] = 1 - alpha
|
178 |
+
|
179 |
+
for _ in range(max_iter):
|
180 |
+
F = (P @ F) + B
|
181 |
+
|
182 |
+
return label_dict[np.argmax(F, axis=1)].tolist()
|
183 |
+
|
184 |
+
|
185 |
+
def _get_label_info(G, label_name):
|
186 |
+
"""Get and return information of labels from the input graph
|
187 |
+
|
188 |
+
Parameters
|
189 |
+
----------
|
190 |
+
G : Network X graph
|
191 |
+
label_name : string
|
192 |
+
Name of the target label
|
193 |
+
|
194 |
+
Returns
|
195 |
+
-------
|
196 |
+
labels : numpy array, shape = [n_labeled_samples, 2]
|
197 |
+
Array of pairs of labeled node ID and label ID
|
198 |
+
label_dict : numpy array, shape = [n_classes]
|
199 |
+
Array of labels
|
200 |
+
i-th element contains the label corresponding label ID `i`
|
201 |
+
"""
|
202 |
+
import numpy as np
|
203 |
+
|
204 |
+
labels = []
|
205 |
+
label_to_id = {}
|
206 |
+
lid = 0
|
207 |
+
for i, n in enumerate(G.nodes(data=True)):
|
208 |
+
if label_name in n[1]:
|
209 |
+
label = n[1][label_name]
|
210 |
+
if label not in label_to_id:
|
211 |
+
label_to_id[label] = lid
|
212 |
+
lid += 1
|
213 |
+
labels.append([i, label_to_id[label]])
|
214 |
+
labels = np.array(labels)
|
215 |
+
label_dict = np.array(
|
216 |
+
[label for label, _ in sorted(label_to_id.items(), key=lambda x: x[1])]
|
217 |
+
)
|
218 |
+
return (labels, label_dict)
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/planar_drawing.py
ADDED
@@ -0,0 +1,464 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import defaultdict
|
2 |
+
|
3 |
+
import networkx as nx
|
4 |
+
|
5 |
+
__all__ = ["combinatorial_embedding_to_pos"]
|
6 |
+
|
7 |
+
|
8 |
+
def combinatorial_embedding_to_pos(embedding, fully_triangulate=False):
|
9 |
+
"""Assigns every node a (x, y) position based on the given embedding
|
10 |
+
|
11 |
+
The algorithm iteratively inserts nodes of the input graph in a certain
|
12 |
+
order and rearranges previously inserted nodes so that the planar drawing
|
13 |
+
stays valid. This is done efficiently by only maintaining relative
|
14 |
+
positions during the node placements and calculating the absolute positions
|
15 |
+
at the end. For more information see [1]_.
|
16 |
+
|
17 |
+
Parameters
|
18 |
+
----------
|
19 |
+
embedding : nx.PlanarEmbedding
|
20 |
+
This defines the order of the edges
|
21 |
+
|
22 |
+
fully_triangulate : bool
|
23 |
+
If set to True the algorithm adds edges to a copy of the input
|
24 |
+
embedding and makes it chordal.
|
25 |
+
|
26 |
+
Returns
|
27 |
+
-------
|
28 |
+
pos : dict
|
29 |
+
Maps each node to a tuple that defines the (x, y) position
|
30 |
+
|
31 |
+
References
|
32 |
+
----------
|
33 |
+
.. [1] M. Chrobak and T.H. Payne:
|
34 |
+
A Linear-time Algorithm for Drawing a Planar Graph on a Grid 1989
|
35 |
+
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.6677
|
36 |
+
|
37 |
+
"""
|
38 |
+
if len(embedding.nodes()) < 4:
|
39 |
+
# Position the node in any triangle
|
40 |
+
default_positions = [(0, 0), (2, 0), (1, 1)]
|
41 |
+
pos = {}
|
42 |
+
for i, v in enumerate(embedding.nodes()):
|
43 |
+
pos[v] = default_positions[i]
|
44 |
+
return pos
|
45 |
+
|
46 |
+
embedding, outer_face = triangulate_embedding(embedding, fully_triangulate)
|
47 |
+
|
48 |
+
# The following dicts map a node to another node
|
49 |
+
# If a node is not in the key set it means that the node is not yet in G_k
|
50 |
+
# If a node maps to None then the corresponding subtree does not exist
|
51 |
+
left_t_child = {}
|
52 |
+
right_t_child = {}
|
53 |
+
|
54 |
+
# The following dicts map a node to an integer
|
55 |
+
delta_x = {}
|
56 |
+
y_coordinate = {}
|
57 |
+
|
58 |
+
node_list = get_canonical_ordering(embedding, outer_face)
|
59 |
+
|
60 |
+
# 1. Phase: Compute relative positions
|
61 |
+
|
62 |
+
# Initialization
|
63 |
+
v1, v2, v3 = node_list[0][0], node_list[1][0], node_list[2][0]
|
64 |
+
|
65 |
+
delta_x[v1] = 0
|
66 |
+
y_coordinate[v1] = 0
|
67 |
+
right_t_child[v1] = v3
|
68 |
+
left_t_child[v1] = None
|
69 |
+
|
70 |
+
delta_x[v2] = 1
|
71 |
+
y_coordinate[v2] = 0
|
72 |
+
right_t_child[v2] = None
|
73 |
+
left_t_child[v2] = None
|
74 |
+
|
75 |
+
delta_x[v3] = 1
|
76 |
+
y_coordinate[v3] = 1
|
77 |
+
right_t_child[v3] = v2
|
78 |
+
left_t_child[v3] = None
|
79 |
+
|
80 |
+
for k in range(3, len(node_list)):
|
81 |
+
vk, contour_nbrs = node_list[k]
|
82 |
+
wp = contour_nbrs[0]
|
83 |
+
wp1 = contour_nbrs[1]
|
84 |
+
wq = contour_nbrs[-1]
|
85 |
+
wq1 = contour_nbrs[-2]
|
86 |
+
adds_mult_tri = len(contour_nbrs) > 2
|
87 |
+
|
88 |
+
# Stretch gaps:
|
89 |
+
delta_x[wp1] += 1
|
90 |
+
delta_x[wq] += 1
|
91 |
+
|
92 |
+
delta_x_wp_wq = sum(delta_x[x] for x in contour_nbrs[1:])
|
93 |
+
|
94 |
+
# Adjust offsets
|
95 |
+
delta_x[vk] = (-y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2
|
96 |
+
y_coordinate[vk] = (y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2
|
97 |
+
delta_x[wq] = delta_x_wp_wq - delta_x[vk]
|
98 |
+
if adds_mult_tri:
|
99 |
+
delta_x[wp1] -= delta_x[vk]
|
100 |
+
|
101 |
+
# Install v_k:
|
102 |
+
right_t_child[wp] = vk
|
103 |
+
right_t_child[vk] = wq
|
104 |
+
if adds_mult_tri:
|
105 |
+
left_t_child[vk] = wp1
|
106 |
+
right_t_child[wq1] = None
|
107 |
+
else:
|
108 |
+
left_t_child[vk] = None
|
109 |
+
|
110 |
+
# 2. Phase: Set absolute positions
|
111 |
+
pos = {}
|
112 |
+
pos[v1] = (0, y_coordinate[v1])
|
113 |
+
remaining_nodes = [v1]
|
114 |
+
while remaining_nodes:
|
115 |
+
parent_node = remaining_nodes.pop()
|
116 |
+
|
117 |
+
# Calculate position for left child
|
118 |
+
set_position(
|
119 |
+
parent_node, left_t_child, remaining_nodes, delta_x, y_coordinate, pos
|
120 |
+
)
|
121 |
+
# Calculate position for right child
|
122 |
+
set_position(
|
123 |
+
parent_node, right_t_child, remaining_nodes, delta_x, y_coordinate, pos
|
124 |
+
)
|
125 |
+
return pos
|
126 |
+
|
127 |
+
|
128 |
+
def set_position(parent, tree, remaining_nodes, delta_x, y_coordinate, pos):
|
129 |
+
"""Helper method to calculate the absolute position of nodes."""
|
130 |
+
child = tree[parent]
|
131 |
+
parent_node_x = pos[parent][0]
|
132 |
+
if child is not None:
|
133 |
+
# Calculate pos of child
|
134 |
+
child_x = parent_node_x + delta_x[child]
|
135 |
+
pos[child] = (child_x, y_coordinate[child])
|
136 |
+
# Remember to calculate pos of its children
|
137 |
+
remaining_nodes.append(child)
|
138 |
+
|
139 |
+
|
140 |
+
def get_canonical_ordering(embedding, outer_face):
|
141 |
+
"""Returns a canonical ordering of the nodes
|
142 |
+
|
143 |
+
The canonical ordering of nodes (v1, ..., vn) must fulfill the following
|
144 |
+
conditions:
|
145 |
+
(See Lemma 1 in [2]_)
|
146 |
+
|
147 |
+
- For the subgraph G_k of the input graph induced by v1, ..., vk it holds:
|
148 |
+
- 2-connected
|
149 |
+
- internally triangulated
|
150 |
+
- the edge (v1, v2) is part of the outer face
|
151 |
+
- For a node v(k+1) the following holds:
|
152 |
+
- The node v(k+1) is part of the outer face of G_k
|
153 |
+
- It has at least two neighbors in G_k
|
154 |
+
- All neighbors of v(k+1) in G_k lie consecutively on the outer face of
|
155 |
+
G_k (excluding the edge (v1, v2)).
|
156 |
+
|
157 |
+
The algorithm used here starts with G_n (containing all nodes). It first
|
158 |
+
selects the nodes v1 and v2. And then tries to find the order of the other
|
159 |
+
nodes by checking which node can be removed in order to fulfill the
|
160 |
+
conditions mentioned above. This is done by calculating the number of
|
161 |
+
chords of nodes on the outer face. For more information see [1]_.
|
162 |
+
|
163 |
+
Parameters
|
164 |
+
----------
|
165 |
+
embedding : nx.PlanarEmbedding
|
166 |
+
The embedding must be triangulated
|
167 |
+
outer_face : list
|
168 |
+
The nodes on the outer face of the graph
|
169 |
+
|
170 |
+
Returns
|
171 |
+
-------
|
172 |
+
ordering : list
|
173 |
+
A list of tuples `(vk, wp_wq)`. Here `vk` is the node at this position
|
174 |
+
in the canonical ordering. The element `wp_wq` is a list of nodes that
|
175 |
+
make up the outer face of G_k.
|
176 |
+
|
177 |
+
References
|
178 |
+
----------
|
179 |
+
.. [1] Steven Chaplick.
|
180 |
+
Canonical Orders of Planar Graphs and (some of) Their Applications 2015
|
181 |
+
https://wuecampus2.uni-wuerzburg.de/moodle/pluginfile.php/545727/mod_resource/content/0/vg-ss15-vl03-canonical-orders-druckversion.pdf
|
182 |
+
.. [2] M. Chrobak and T.H. Payne:
|
183 |
+
A Linear-time Algorithm for Drawing a Planar Graph on a Grid 1989
|
184 |
+
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.6677
|
185 |
+
|
186 |
+
"""
|
187 |
+
v1 = outer_face[0]
|
188 |
+
v2 = outer_face[1]
|
189 |
+
chords = defaultdict(int) # Maps nodes to the number of their chords
|
190 |
+
marked_nodes = set()
|
191 |
+
ready_to_pick = set(outer_face)
|
192 |
+
|
193 |
+
# Initialize outer_face_ccw_nbr (do not include v1 -> v2)
|
194 |
+
outer_face_ccw_nbr = {}
|
195 |
+
prev_nbr = v2
|
196 |
+
for idx in range(2, len(outer_face)):
|
197 |
+
outer_face_ccw_nbr[prev_nbr] = outer_face[idx]
|
198 |
+
prev_nbr = outer_face[idx]
|
199 |
+
outer_face_ccw_nbr[prev_nbr] = v1
|
200 |
+
|
201 |
+
# Initialize outer_face_cw_nbr (do not include v2 -> v1)
|
202 |
+
outer_face_cw_nbr = {}
|
203 |
+
prev_nbr = v1
|
204 |
+
for idx in range(len(outer_face) - 1, 0, -1):
|
205 |
+
outer_face_cw_nbr[prev_nbr] = outer_face[idx]
|
206 |
+
prev_nbr = outer_face[idx]
|
207 |
+
|
208 |
+
def is_outer_face_nbr(x, y):
|
209 |
+
if x not in outer_face_ccw_nbr:
|
210 |
+
return outer_face_cw_nbr[x] == y
|
211 |
+
if x not in outer_face_cw_nbr:
|
212 |
+
return outer_face_ccw_nbr[x] == y
|
213 |
+
return outer_face_ccw_nbr[x] == y or outer_face_cw_nbr[x] == y
|
214 |
+
|
215 |
+
def is_on_outer_face(x):
|
216 |
+
return x not in marked_nodes and (x in outer_face_ccw_nbr or x == v1)
|
217 |
+
|
218 |
+
# Initialize number of chords
|
219 |
+
for v in outer_face:
|
220 |
+
for nbr in embedding.neighbors_cw_order(v):
|
221 |
+
if is_on_outer_face(nbr) and not is_outer_face_nbr(v, nbr):
|
222 |
+
chords[v] += 1
|
223 |
+
ready_to_pick.discard(v)
|
224 |
+
|
225 |
+
# Initialize canonical_ordering
|
226 |
+
canonical_ordering = [None] * len(embedding.nodes())
|
227 |
+
canonical_ordering[0] = (v1, [])
|
228 |
+
canonical_ordering[1] = (v2, [])
|
229 |
+
ready_to_pick.discard(v1)
|
230 |
+
ready_to_pick.discard(v2)
|
231 |
+
|
232 |
+
for k in range(len(embedding.nodes()) - 1, 1, -1):
|
233 |
+
# 1. Pick v from ready_to_pick
|
234 |
+
v = ready_to_pick.pop()
|
235 |
+
marked_nodes.add(v)
|
236 |
+
|
237 |
+
# v has exactly two neighbors on the outer face (wp and wq)
|
238 |
+
wp = None
|
239 |
+
wq = None
|
240 |
+
# Iterate over neighbors of v to find wp and wq
|
241 |
+
nbr_iterator = iter(embedding.neighbors_cw_order(v))
|
242 |
+
while True:
|
243 |
+
nbr = next(nbr_iterator)
|
244 |
+
if nbr in marked_nodes:
|
245 |
+
# Only consider nodes that are not yet removed
|
246 |
+
continue
|
247 |
+
if is_on_outer_face(nbr):
|
248 |
+
# nbr is either wp or wq
|
249 |
+
if nbr == v1:
|
250 |
+
wp = v1
|
251 |
+
elif nbr == v2:
|
252 |
+
wq = v2
|
253 |
+
else:
|
254 |
+
if outer_face_cw_nbr[nbr] == v:
|
255 |
+
# nbr is wp
|
256 |
+
wp = nbr
|
257 |
+
else:
|
258 |
+
# nbr is wq
|
259 |
+
wq = nbr
|
260 |
+
if wp is not None and wq is not None:
|
261 |
+
# We don't need to iterate any further
|
262 |
+
break
|
263 |
+
|
264 |
+
# Obtain new nodes on outer face (neighbors of v from wp to wq)
|
265 |
+
wp_wq = [wp]
|
266 |
+
nbr = wp
|
267 |
+
while nbr != wq:
|
268 |
+
# Get next neighbor (clockwise on the outer face)
|
269 |
+
next_nbr = embedding[v][nbr]["ccw"]
|
270 |
+
wp_wq.append(next_nbr)
|
271 |
+
# Update outer face
|
272 |
+
outer_face_cw_nbr[nbr] = next_nbr
|
273 |
+
outer_face_ccw_nbr[next_nbr] = nbr
|
274 |
+
# Move to next neighbor of v
|
275 |
+
nbr = next_nbr
|
276 |
+
|
277 |
+
if len(wp_wq) == 2:
|
278 |
+
# There was a chord between wp and wq, decrease number of chords
|
279 |
+
chords[wp] -= 1
|
280 |
+
if chords[wp] == 0:
|
281 |
+
ready_to_pick.add(wp)
|
282 |
+
chords[wq] -= 1
|
283 |
+
if chords[wq] == 0:
|
284 |
+
ready_to_pick.add(wq)
|
285 |
+
else:
|
286 |
+
# Update all chords involving w_(p+1) to w_(q-1)
|
287 |
+
new_face_nodes = set(wp_wq[1:-1])
|
288 |
+
for w in new_face_nodes:
|
289 |
+
# If we do not find a chord for w later we can pick it next
|
290 |
+
ready_to_pick.add(w)
|
291 |
+
for nbr in embedding.neighbors_cw_order(w):
|
292 |
+
if is_on_outer_face(nbr) and not is_outer_face_nbr(w, nbr):
|
293 |
+
# There is a chord involving w
|
294 |
+
chords[w] += 1
|
295 |
+
ready_to_pick.discard(w)
|
296 |
+
if nbr not in new_face_nodes:
|
297 |
+
# Also increase chord for the neighbor
|
298 |
+
# We only iterator over new_face_nodes
|
299 |
+
chords[nbr] += 1
|
300 |
+
ready_to_pick.discard(nbr)
|
301 |
+
# Set the canonical ordering node and the list of contour neighbors
|
302 |
+
canonical_ordering[k] = (v, wp_wq)
|
303 |
+
|
304 |
+
return canonical_ordering
|
305 |
+
|
306 |
+
|
307 |
+
def triangulate_face(embedding, v1, v2):
|
308 |
+
"""Triangulates the face given by half edge (v, w)
|
309 |
+
|
310 |
+
Parameters
|
311 |
+
----------
|
312 |
+
embedding : nx.PlanarEmbedding
|
313 |
+
v1 : node
|
314 |
+
The half-edge (v1, v2) belongs to the face that gets triangulated
|
315 |
+
v2 : node
|
316 |
+
"""
|
317 |
+
_, v3 = embedding.next_face_half_edge(v1, v2)
|
318 |
+
_, v4 = embedding.next_face_half_edge(v2, v3)
|
319 |
+
if v1 in (v2, v3):
|
320 |
+
# The component has less than 3 nodes
|
321 |
+
return
|
322 |
+
while v1 != v4:
|
323 |
+
# Add edge if not already present on other side
|
324 |
+
if embedding.has_edge(v1, v3):
|
325 |
+
# Cannot triangulate at this position
|
326 |
+
v1, v2, v3 = v2, v3, v4
|
327 |
+
else:
|
328 |
+
# Add edge for triangulation
|
329 |
+
embedding.add_half_edge(v1, v3, ccw=v2)
|
330 |
+
embedding.add_half_edge(v3, v1, cw=v2)
|
331 |
+
v1, v2, v3 = v1, v3, v4
|
332 |
+
# Get next node
|
333 |
+
_, v4 = embedding.next_face_half_edge(v2, v3)
|
334 |
+
|
335 |
+
|
336 |
+
def triangulate_embedding(embedding, fully_triangulate=True):
|
337 |
+
"""Triangulates the embedding.
|
338 |
+
|
339 |
+
Traverses faces of the embedding and adds edges to a copy of the
|
340 |
+
embedding to triangulate it.
|
341 |
+
The method also ensures that the resulting graph is 2-connected by adding
|
342 |
+
edges if the same vertex is contained twice on a path around a face.
|
343 |
+
|
344 |
+
Parameters
|
345 |
+
----------
|
346 |
+
embedding : nx.PlanarEmbedding
|
347 |
+
The input graph must contain at least 3 nodes.
|
348 |
+
|
349 |
+
fully_triangulate : bool
|
350 |
+
If set to False the face with the most nodes is chooses as outer face.
|
351 |
+
This outer face does not get triangulated.
|
352 |
+
|
353 |
+
Returns
|
354 |
+
-------
|
355 |
+
(embedding, outer_face) : (nx.PlanarEmbedding, list) tuple
|
356 |
+
The element `embedding` is a new embedding containing all edges from
|
357 |
+
the input embedding and the additional edges to triangulate the graph.
|
358 |
+
The element `outer_face` is a list of nodes that lie on the outer face.
|
359 |
+
If the graph is fully triangulated these are three arbitrary connected
|
360 |
+
nodes.
|
361 |
+
|
362 |
+
"""
|
363 |
+
if len(embedding.nodes) <= 1:
|
364 |
+
return embedding, list(embedding.nodes)
|
365 |
+
embedding = nx.PlanarEmbedding(embedding)
|
366 |
+
|
367 |
+
# Get a list with a node for each connected component
|
368 |
+
component_nodes = [next(iter(x)) for x in nx.connected_components(embedding)]
|
369 |
+
|
370 |
+
# 1. Make graph a single component (add edge between components)
|
371 |
+
for i in range(len(component_nodes) - 1):
|
372 |
+
v1 = component_nodes[i]
|
373 |
+
v2 = component_nodes[i + 1]
|
374 |
+
embedding.connect_components(v1, v2)
|
375 |
+
|
376 |
+
# 2. Calculate faces, ensure 2-connectedness and determine outer face
|
377 |
+
outer_face = [] # A face with the most number of nodes
|
378 |
+
face_list = []
|
379 |
+
edges_visited = set() # Used to keep track of already visited faces
|
380 |
+
for v in embedding.nodes():
|
381 |
+
for w in embedding.neighbors_cw_order(v):
|
382 |
+
new_face = make_bi_connected(embedding, v, w, edges_visited)
|
383 |
+
if new_face:
|
384 |
+
# Found a new face
|
385 |
+
face_list.append(new_face)
|
386 |
+
if len(new_face) > len(outer_face):
|
387 |
+
# The face is a candidate to be the outer face
|
388 |
+
outer_face = new_face
|
389 |
+
|
390 |
+
# 3. Triangulate (internal) faces
|
391 |
+
for face in face_list:
|
392 |
+
if face is not outer_face or fully_triangulate:
|
393 |
+
# Triangulate this face
|
394 |
+
triangulate_face(embedding, face[0], face[1])
|
395 |
+
|
396 |
+
if fully_triangulate:
|
397 |
+
v1 = outer_face[0]
|
398 |
+
v2 = outer_face[1]
|
399 |
+
v3 = embedding[v2][v1]["ccw"]
|
400 |
+
outer_face = [v1, v2, v3]
|
401 |
+
|
402 |
+
return embedding, outer_face
|
403 |
+
|
404 |
+
|
405 |
+
def make_bi_connected(embedding, starting_node, outgoing_node, edges_counted):
|
406 |
+
"""Triangulate a face and make it 2-connected
|
407 |
+
|
408 |
+
This method also adds all edges on the face to `edges_counted`.
|
409 |
+
|
410 |
+
Parameters
|
411 |
+
----------
|
412 |
+
embedding: nx.PlanarEmbedding
|
413 |
+
The embedding that defines the faces
|
414 |
+
starting_node : node
|
415 |
+
A node on the face
|
416 |
+
outgoing_node : node
|
417 |
+
A node such that the half edge (starting_node, outgoing_node) belongs
|
418 |
+
to the face
|
419 |
+
edges_counted: set
|
420 |
+
Set of all half-edges that belong to a face that have been visited
|
421 |
+
|
422 |
+
Returns
|
423 |
+
-------
|
424 |
+
face_nodes: list
|
425 |
+
A list of all nodes at the border of this face
|
426 |
+
"""
|
427 |
+
|
428 |
+
# Check if the face has already been calculated
|
429 |
+
if (starting_node, outgoing_node) in edges_counted:
|
430 |
+
# This face was already counted
|
431 |
+
return []
|
432 |
+
edges_counted.add((starting_node, outgoing_node))
|
433 |
+
|
434 |
+
# Add all edges to edges_counted which have this face to their left
|
435 |
+
v1 = starting_node
|
436 |
+
v2 = outgoing_node
|
437 |
+
face_list = [starting_node] # List of nodes around the face
|
438 |
+
face_set = set(face_list) # Set for faster queries
|
439 |
+
_, v3 = embedding.next_face_half_edge(v1, v2)
|
440 |
+
|
441 |
+
# Move the nodes v1, v2, v3 around the face:
|
442 |
+
while v2 != starting_node or v3 != outgoing_node:
|
443 |
+
if v1 == v2:
|
444 |
+
raise nx.NetworkXException("Invalid half-edge")
|
445 |
+
# cycle is not completed yet
|
446 |
+
if v2 in face_set:
|
447 |
+
# v2 encountered twice: Add edge to ensure 2-connectedness
|
448 |
+
embedding.add_half_edge(v1, v3, ccw=v2)
|
449 |
+
embedding.add_half_edge(v3, v1, cw=v2)
|
450 |
+
edges_counted.add((v2, v3))
|
451 |
+
edges_counted.add((v3, v1))
|
452 |
+
v2 = v1
|
453 |
+
else:
|
454 |
+
face_set.add(v2)
|
455 |
+
face_list.append(v2)
|
456 |
+
|
457 |
+
# set next edge
|
458 |
+
v1 = v2
|
459 |
+
v2, v3 = embedding.next_face_half_edge(v2, v3)
|
460 |
+
|
461 |
+
# remember that this edge has been counted
|
462 |
+
edges_counted.add((v1, v2))
|
463 |
+
|
464 |
+
return face_list
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/polynomials.py
ADDED
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Provides algorithms supporting the computation of graph polynomials.
|
2 |
+
|
3 |
+
Graph polynomials are polynomial-valued graph invariants that encode a wide
|
4 |
+
variety of structural information. Examples include the Tutte polynomial,
|
5 |
+
chromatic polynomial, characteristic polynomial, and matching polynomial. An
|
6 |
+
extensive treatment is provided in [1]_.
|
7 |
+
|
8 |
+
For a simple example, the `~sympy.matrices.matrices.MatrixDeterminant.charpoly`
|
9 |
+
method can be used to compute the characteristic polynomial from the adjacency
|
10 |
+
matrix of a graph. Consider the complete graph ``K_4``:
|
11 |
+
|
12 |
+
>>> import sympy
|
13 |
+
>>> x = sympy.Symbol("x")
|
14 |
+
>>> G = nx.complete_graph(4)
|
15 |
+
>>> A = nx.adjacency_matrix(G)
|
16 |
+
>>> M = sympy.SparseMatrix(A.todense())
|
17 |
+
>>> M.charpoly(x).as_expr()
|
18 |
+
x**4 - 6*x**2 - 8*x - 3
|
19 |
+
|
20 |
+
|
21 |
+
.. [1] Y. Shi, M. Dehmer, X. Li, I. Gutman,
|
22 |
+
"Graph Polynomials"
|
23 |
+
"""
|
24 |
+
from collections import deque
|
25 |
+
|
26 |
+
import networkx as nx
|
27 |
+
from networkx.utils import not_implemented_for
|
28 |
+
|
29 |
+
__all__ = ["tutte_polynomial", "chromatic_polynomial"]
|
30 |
+
|
31 |
+
|
32 |
+
@not_implemented_for("directed")
|
33 |
+
@nx._dispatchable
|
34 |
+
def tutte_polynomial(G):
|
35 |
+
r"""Returns the Tutte polynomial of `G`
|
36 |
+
|
37 |
+
This function computes the Tutte polynomial via an iterative version of
|
38 |
+
the deletion-contraction algorithm.
|
39 |
+
|
40 |
+
The Tutte polynomial `T_G(x, y)` is a fundamental graph polynomial invariant in
|
41 |
+
two variables. It encodes a wide array of information related to the
|
42 |
+
edge-connectivity of a graph; "Many problems about graphs can be reduced to
|
43 |
+
problems of finding and evaluating the Tutte polynomial at certain values" [1]_.
|
44 |
+
In fact, every deletion-contraction-expressible feature of a graph is a
|
45 |
+
specialization of the Tutte polynomial [2]_ (see Notes for examples).
|
46 |
+
|
47 |
+
There are several equivalent definitions; here are three:
|
48 |
+
|
49 |
+
Def 1 (rank-nullity expansion): For `G` an undirected graph, `n(G)` the
|
50 |
+
number of vertices of `G`, `E` the edge set of `G`, `V` the vertex set of
|
51 |
+
`G`, and `c(A)` the number of connected components of the graph with vertex
|
52 |
+
set `V` and edge set `A` [3]_:
|
53 |
+
|
54 |
+
.. math::
|
55 |
+
|
56 |
+
T_G(x, y) = \sum_{A \in E} (x-1)^{c(A) - c(E)} (y-1)^{c(A) + |A| - n(G)}
|
57 |
+
|
58 |
+
Def 2 (spanning tree expansion): Let `G` be an undirected graph, `T` a spanning
|
59 |
+
tree of `G`, and `E` the edge set of `G`. Let `E` have an arbitrary strict
|
60 |
+
linear order `L`. Let `B_e` be the unique minimal nonempty edge cut of
|
61 |
+
$E \setminus T \cup {e}$. An edge `e` is internally active with respect to
|
62 |
+
`T` and `L` if `e` is the least edge in `B_e` according to the linear order
|
63 |
+
`L`. The internal activity of `T` (denoted `i(T)`) is the number of edges
|
64 |
+
in $E \setminus T$ that are internally active with respect to `T` and `L`.
|
65 |
+
Let `P_e` be the unique path in $T \cup {e}$ whose source and target vertex
|
66 |
+
are the same. An edge `e` is externally active with respect to `T` and `L`
|
67 |
+
if `e` is the least edge in `P_e` according to the linear order `L`. The
|
68 |
+
external activity of `T` (denoted `e(T)`) is the number of edges in
|
69 |
+
$E \setminus T$ that are externally active with respect to `T` and `L`.
|
70 |
+
Then [4]_ [5]_:
|
71 |
+
|
72 |
+
.. math::
|
73 |
+
|
74 |
+
T_G(x, y) = \sum_{T \text{ a spanning tree of } G} x^{i(T)} y^{e(T)}
|
75 |
+
|
76 |
+
Def 3 (deletion-contraction recurrence): For `G` an undirected graph, `G-e`
|
77 |
+
the graph obtained from `G` by deleting edge `e`, `G/e` the graph obtained
|
78 |
+
from `G` by contracting edge `e`, `k(G)` the number of cut-edges of `G`,
|
79 |
+
and `l(G)` the number of self-loops of `G`:
|
80 |
+
|
81 |
+
.. math::
|
82 |
+
T_G(x, y) = \begin{cases}
|
83 |
+
x^{k(G)} y^{l(G)}, & \text{if all edges are cut-edges or self-loops} \\
|
84 |
+
T_{G-e}(x, y) + T_{G/e}(x, y), & \text{otherwise, for an arbitrary edge $e$ not a cut-edge or loop}
|
85 |
+
\end{cases}
|
86 |
+
|
87 |
+
Parameters
|
88 |
+
----------
|
89 |
+
G : NetworkX graph
|
90 |
+
|
91 |
+
Returns
|
92 |
+
-------
|
93 |
+
instance of `sympy.core.add.Add`
|
94 |
+
A Sympy expression representing the Tutte polynomial for `G`.
|
95 |
+
|
96 |
+
Examples
|
97 |
+
--------
|
98 |
+
>>> C = nx.cycle_graph(5)
|
99 |
+
>>> nx.tutte_polynomial(C)
|
100 |
+
x**4 + x**3 + x**2 + x + y
|
101 |
+
|
102 |
+
>>> D = nx.diamond_graph()
|
103 |
+
>>> nx.tutte_polynomial(D)
|
104 |
+
x**3 + 2*x**2 + 2*x*y + x + y**2 + y
|
105 |
+
|
106 |
+
Notes
|
107 |
+
-----
|
108 |
+
Some specializations of the Tutte polynomial:
|
109 |
+
|
110 |
+
- `T_G(1, 1)` counts the number of spanning trees of `G`
|
111 |
+
- `T_G(1, 2)` counts the number of connected spanning subgraphs of `G`
|
112 |
+
- `T_G(2, 1)` counts the number of spanning forests in `G`
|
113 |
+
- `T_G(0, 2)` counts the number of strong orientations of `G`
|
114 |
+
- `T_G(2, 0)` counts the number of acyclic orientations of `G`
|
115 |
+
|
116 |
+
Edge contraction is defined and deletion-contraction is introduced in [6]_.
|
117 |
+
Combinatorial meaning of the coefficients is introduced in [7]_.
|
118 |
+
Universality, properties, and applications are discussed in [8]_.
|
119 |
+
|
120 |
+
Practically, up-front computation of the Tutte polynomial may be useful when
|
121 |
+
users wish to repeatedly calculate edge-connectivity-related information
|
122 |
+
about one or more graphs.
|
123 |
+
|
124 |
+
References
|
125 |
+
----------
|
126 |
+
.. [1] M. Brandt,
|
127 |
+
"The Tutte Polynomial."
|
128 |
+
Talking About Combinatorial Objects Seminar, 2015
|
129 |
+
https://math.berkeley.edu/~brandtm/talks/tutte.pdf
|
130 |
+
.. [2] A. Björklund, T. Husfeldt, P. Kaski, M. Koivisto,
|
131 |
+
"Computing the Tutte polynomial in vertex-exponential time"
|
132 |
+
49th Annual IEEE Symposium on Foundations of Computer Science, 2008
|
133 |
+
https://ieeexplore.ieee.org/abstract/document/4691000
|
134 |
+
.. [3] Y. Shi, M. Dehmer, X. Li, I. Gutman,
|
135 |
+
"Graph Polynomials," p. 14
|
136 |
+
.. [4] Y. Shi, M. Dehmer, X. Li, I. Gutman,
|
137 |
+
"Graph Polynomials," p. 46
|
138 |
+
.. [5] A. Nešetril, J. Goodall,
|
139 |
+
"Graph invariants, homomorphisms, and the Tutte polynomial"
|
140 |
+
https://iuuk.mff.cuni.cz/~andrew/Tutte.pdf
|
141 |
+
.. [6] D. B. West,
|
142 |
+
"Introduction to Graph Theory," p. 84
|
143 |
+
.. [7] G. Coutinho,
|
144 |
+
"A brief introduction to the Tutte polynomial"
|
145 |
+
Structural Analysis of Complex Networks, 2011
|
146 |
+
https://homepages.dcc.ufmg.br/~gabriel/seminars/coutinho_tuttepolynomial_seminar.pdf
|
147 |
+
.. [8] J. A. Ellis-Monaghan, C. Merino,
|
148 |
+
"Graph polynomials and their applications I: The Tutte polynomial"
|
149 |
+
Structural Analysis of Complex Networks, 2011
|
150 |
+
https://arxiv.org/pdf/0803.3079.pdf
|
151 |
+
"""
|
152 |
+
import sympy
|
153 |
+
|
154 |
+
x = sympy.Symbol("x")
|
155 |
+
y = sympy.Symbol("y")
|
156 |
+
stack = deque()
|
157 |
+
stack.append(nx.MultiGraph(G))
|
158 |
+
|
159 |
+
polynomial = 0
|
160 |
+
while stack:
|
161 |
+
G = stack.pop()
|
162 |
+
bridges = set(nx.bridges(G))
|
163 |
+
|
164 |
+
e = None
|
165 |
+
for i in G.edges:
|
166 |
+
if (i[0], i[1]) not in bridges and i[0] != i[1]:
|
167 |
+
e = i
|
168 |
+
break
|
169 |
+
if not e:
|
170 |
+
loops = list(nx.selfloop_edges(G, keys=True))
|
171 |
+
polynomial += x ** len(bridges) * y ** len(loops)
|
172 |
+
else:
|
173 |
+
# deletion-contraction
|
174 |
+
C = nx.contracted_edge(G, e, self_loops=True)
|
175 |
+
C.remove_edge(e[0], e[0])
|
176 |
+
G.remove_edge(*e)
|
177 |
+
stack.append(G)
|
178 |
+
stack.append(C)
|
179 |
+
return sympy.simplify(polynomial)
|
180 |
+
|
181 |
+
|
182 |
+
@not_implemented_for("directed")
|
183 |
+
@nx._dispatchable
|
184 |
+
def chromatic_polynomial(G):
|
185 |
+
r"""Returns the chromatic polynomial of `G`
|
186 |
+
|
187 |
+
This function computes the chromatic polynomial via an iterative version of
|
188 |
+
the deletion-contraction algorithm.
|
189 |
+
|
190 |
+
The chromatic polynomial `X_G(x)` is a fundamental graph polynomial
|
191 |
+
invariant in one variable. Evaluating `X_G(k)` for an natural number `k`
|
192 |
+
enumerates the proper k-colorings of `G`.
|
193 |
+
|
194 |
+
There are several equivalent definitions; here are three:
|
195 |
+
|
196 |
+
Def 1 (explicit formula):
|
197 |
+
For `G` an undirected graph, `c(G)` the number of connected components of
|
198 |
+
`G`, `E` the edge set of `G`, and `G(S)` the spanning subgraph of `G` with
|
199 |
+
edge set `S` [1]_:
|
200 |
+
|
201 |
+
.. math::
|
202 |
+
|
203 |
+
X_G(x) = \sum_{S \subseteq E} (-1)^{|S|} x^{c(G(S))}
|
204 |
+
|
205 |
+
|
206 |
+
Def 2 (interpolating polynomial):
|
207 |
+
For `G` an undirected graph, `n(G)` the number of vertices of `G`, `k_0 = 0`,
|
208 |
+
and `k_i` the number of distinct ways to color the vertices of `G` with `i`
|
209 |
+
unique colors (for `i` a natural number at most `n(G)`), `X_G(x)` is the
|
210 |
+
unique Lagrange interpolating polynomial of degree `n(G)` through the points
|
211 |
+
`(0, k_0), (1, k_1), \dots, (n(G), k_{n(G)})` [2]_.
|
212 |
+
|
213 |
+
|
214 |
+
Def 3 (chromatic recurrence):
|
215 |
+
For `G` an undirected graph, `G-e` the graph obtained from `G` by deleting
|
216 |
+
edge `e`, `G/e` the graph obtained from `G` by contracting edge `e`, `n(G)`
|
217 |
+
the number of vertices of `G`, and `e(G)` the number of edges of `G` [3]_:
|
218 |
+
|
219 |
+
.. math::
|
220 |
+
X_G(x) = \begin{cases}
|
221 |
+
x^{n(G)}, & \text{if $e(G)=0$} \\
|
222 |
+
X_{G-e}(x) - X_{G/e}(x), & \text{otherwise, for an arbitrary edge $e$}
|
223 |
+
\end{cases}
|
224 |
+
|
225 |
+
This formulation is also known as the Fundamental Reduction Theorem [4]_.
|
226 |
+
|
227 |
+
|
228 |
+
Parameters
|
229 |
+
----------
|
230 |
+
G : NetworkX graph
|
231 |
+
|
232 |
+
Returns
|
233 |
+
-------
|
234 |
+
instance of `sympy.core.add.Add`
|
235 |
+
A Sympy expression representing the chromatic polynomial for `G`.
|
236 |
+
|
237 |
+
Examples
|
238 |
+
--------
|
239 |
+
>>> C = nx.cycle_graph(5)
|
240 |
+
>>> nx.chromatic_polynomial(C)
|
241 |
+
x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x
|
242 |
+
|
243 |
+
>>> G = nx.complete_graph(4)
|
244 |
+
>>> nx.chromatic_polynomial(G)
|
245 |
+
x**4 - 6*x**3 + 11*x**2 - 6*x
|
246 |
+
|
247 |
+
Notes
|
248 |
+
-----
|
249 |
+
Interpretation of the coefficients is discussed in [5]_. Several special
|
250 |
+
cases are listed in [2]_.
|
251 |
+
|
252 |
+
The chromatic polynomial is a specialization of the Tutte polynomial; in
|
253 |
+
particular, ``X_G(x) = T_G(x, 0)`` [6]_.
|
254 |
+
|
255 |
+
The chromatic polynomial may take negative arguments, though evaluations
|
256 |
+
may not have chromatic interpretations. For instance, ``X_G(-1)`` enumerates
|
257 |
+
the acyclic orientations of `G` [7]_.
|
258 |
+
|
259 |
+
References
|
260 |
+
----------
|
261 |
+
.. [1] D. B. West,
|
262 |
+
"Introduction to Graph Theory," p. 222
|
263 |
+
.. [2] E. W. Weisstein
|
264 |
+
"Chromatic Polynomial"
|
265 |
+
MathWorld--A Wolfram Web Resource
|
266 |
+
https://mathworld.wolfram.com/ChromaticPolynomial.html
|
267 |
+
.. [3] D. B. West,
|
268 |
+
"Introduction to Graph Theory," p. 221
|
269 |
+
.. [4] J. Zhang, J. Goodall,
|
270 |
+
"An Introduction to Chromatic Polynomials"
|
271 |
+
https://math.mit.edu/~apost/courses/18.204_2018/Julie_Zhang_paper.pdf
|
272 |
+
.. [5] R. C. Read,
|
273 |
+
"An Introduction to Chromatic Polynomials"
|
274 |
+
Journal of Combinatorial Theory, 1968
|
275 |
+
https://math.berkeley.edu/~mrklug/ReadChromatic.pdf
|
276 |
+
.. [6] W. T. Tutte,
|
277 |
+
"Graph-polynomials"
|
278 |
+
Advances in Applied Mathematics, 2004
|
279 |
+
https://www.sciencedirect.com/science/article/pii/S0196885803000411
|
280 |
+
.. [7] R. P. Stanley,
|
281 |
+
"Acyclic orientations of graphs"
|
282 |
+
Discrete Mathematics, 2006
|
283 |
+
https://math.mit.edu/~rstan/pubs/pubfiles/18.pdf
|
284 |
+
"""
|
285 |
+
import sympy
|
286 |
+
|
287 |
+
x = sympy.Symbol("x")
|
288 |
+
stack = deque()
|
289 |
+
stack.append(nx.MultiGraph(G, contraction_idx=0))
|
290 |
+
|
291 |
+
polynomial = 0
|
292 |
+
while stack:
|
293 |
+
G = stack.pop()
|
294 |
+
edges = list(G.edges)
|
295 |
+
if not edges:
|
296 |
+
polynomial += (-1) ** G.graph["contraction_idx"] * x ** len(G)
|
297 |
+
else:
|
298 |
+
e = edges[0]
|
299 |
+
C = nx.contracted_edge(G, e, self_loops=True)
|
300 |
+
C.graph["contraction_idx"] = G.graph["contraction_idx"] + 1
|
301 |
+
C.remove_edge(e[0], e[0])
|
302 |
+
G.remove_edge(*e)
|
303 |
+
stack.append(G)
|
304 |
+
stack.append(C)
|
305 |
+
return polynomial
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/regular.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Functions for computing and verifying regular graphs."""
|
2 |
+
import networkx as nx
|
3 |
+
from networkx.utils import not_implemented_for
|
4 |
+
|
5 |
+
__all__ = ["is_regular", "is_k_regular", "k_factor"]
|
6 |
+
|
7 |
+
|
8 |
+
@nx._dispatchable
|
9 |
+
def is_regular(G):
|
10 |
+
"""Determines whether the graph ``G`` is a regular graph.
|
11 |
+
|
12 |
+
A regular graph is a graph where each vertex has the same degree. A
|
13 |
+
regular digraph is a graph where the indegree and outdegree of each
|
14 |
+
vertex are equal.
|
15 |
+
|
16 |
+
Parameters
|
17 |
+
----------
|
18 |
+
G : NetworkX graph
|
19 |
+
|
20 |
+
Returns
|
21 |
+
-------
|
22 |
+
bool
|
23 |
+
Whether the given graph or digraph is regular.
|
24 |
+
|
25 |
+
Examples
|
26 |
+
--------
|
27 |
+
>>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 1)])
|
28 |
+
>>> nx.is_regular(G)
|
29 |
+
True
|
30 |
+
|
31 |
+
"""
|
32 |
+
if len(G) == 0:
|
33 |
+
raise nx.NetworkXPointlessConcept("Graph has no nodes.")
|
34 |
+
n1 = nx.utils.arbitrary_element(G)
|
35 |
+
if not G.is_directed():
|
36 |
+
d1 = G.degree(n1)
|
37 |
+
return all(d1 == d for _, d in G.degree)
|
38 |
+
else:
|
39 |
+
d_in = G.in_degree(n1)
|
40 |
+
in_regular = all(d_in == d for _, d in G.in_degree)
|
41 |
+
d_out = G.out_degree(n1)
|
42 |
+
out_regular = all(d_out == d for _, d in G.out_degree)
|
43 |
+
return in_regular and out_regular
|
44 |
+
|
45 |
+
|
46 |
+
@not_implemented_for("directed")
|
47 |
+
@nx._dispatchable
|
48 |
+
def is_k_regular(G, k):
|
49 |
+
"""Determines whether the graph ``G`` is a k-regular graph.
|
50 |
+
|
51 |
+
A k-regular graph is a graph where each vertex has degree k.
|
52 |
+
|
53 |
+
Parameters
|
54 |
+
----------
|
55 |
+
G : NetworkX graph
|
56 |
+
|
57 |
+
Returns
|
58 |
+
-------
|
59 |
+
bool
|
60 |
+
Whether the given graph is k-regular.
|
61 |
+
|
62 |
+
Examples
|
63 |
+
--------
|
64 |
+
>>> G = nx.Graph([(1, 2), (2, 3), (3, 4), (4, 1)])
|
65 |
+
>>> nx.is_k_regular(G, k=3)
|
66 |
+
False
|
67 |
+
|
68 |
+
"""
|
69 |
+
return all(d == k for n, d in G.degree)
|
70 |
+
|
71 |
+
|
72 |
+
@not_implemented_for("directed")
|
73 |
+
@not_implemented_for("multigraph")
|
74 |
+
@nx._dispatchable(preserve_edge_attrs=True, returns_graph=True)
|
75 |
+
def k_factor(G, k, matching_weight="weight"):
|
76 |
+
"""Compute a k-factor of G
|
77 |
+
|
78 |
+
A k-factor of a graph is a spanning k-regular subgraph.
|
79 |
+
A spanning k-regular subgraph of G is a subgraph that contains
|
80 |
+
each vertex of G and a subset of the edges of G such that each
|
81 |
+
vertex has degree k.
|
82 |
+
|
83 |
+
Parameters
|
84 |
+
----------
|
85 |
+
G : NetworkX graph
|
86 |
+
Undirected graph
|
87 |
+
|
88 |
+
matching_weight: string, optional (default='weight')
|
89 |
+
Edge data key corresponding to the edge weight.
|
90 |
+
Used for finding the max-weighted perfect matching.
|
91 |
+
If key not found, uses 1 as weight.
|
92 |
+
|
93 |
+
Returns
|
94 |
+
-------
|
95 |
+
G2 : NetworkX graph
|
96 |
+
A k-factor of G
|
97 |
+
|
98 |
+
Examples
|
99 |
+
--------
|
100 |
+
>>> G = nx.Graph([(1, 2), (2, 3), (3, 4), (4, 1)])
|
101 |
+
>>> G2 = nx.k_factor(G, k=1)
|
102 |
+
>>> G2.edges()
|
103 |
+
EdgeView([(1, 2), (3, 4)])
|
104 |
+
|
105 |
+
References
|
106 |
+
----------
|
107 |
+
.. [1] "An algorithm for computing simple k-factors.",
|
108 |
+
Meijer, Henk, Yurai Núñez-Rodríguez, and David Rappaport,
|
109 |
+
Information processing letters, 2009.
|
110 |
+
"""
|
111 |
+
|
112 |
+
from networkx.algorithms.matching import is_perfect_matching, max_weight_matching
|
113 |
+
|
114 |
+
class LargeKGadget:
|
115 |
+
def __init__(self, k, degree, node, g):
|
116 |
+
self.original = node
|
117 |
+
self.g = g
|
118 |
+
self.k = k
|
119 |
+
self.degree = degree
|
120 |
+
|
121 |
+
self.outer_vertices = [(node, x) for x in range(degree)]
|
122 |
+
self.core_vertices = [(node, x + degree) for x in range(degree - k)]
|
123 |
+
|
124 |
+
def replace_node(self):
|
125 |
+
adj_view = self.g[self.original]
|
126 |
+
neighbors = list(adj_view.keys())
|
127 |
+
edge_attrs = list(adj_view.values())
|
128 |
+
for outer, neighbor, edge_attrs in zip(
|
129 |
+
self.outer_vertices, neighbors, edge_attrs
|
130 |
+
):
|
131 |
+
self.g.add_edge(outer, neighbor, **edge_attrs)
|
132 |
+
for core in self.core_vertices:
|
133 |
+
for outer in self.outer_vertices:
|
134 |
+
self.g.add_edge(core, outer)
|
135 |
+
self.g.remove_node(self.original)
|
136 |
+
|
137 |
+
def restore_node(self):
|
138 |
+
self.g.add_node(self.original)
|
139 |
+
for outer in self.outer_vertices:
|
140 |
+
adj_view = self.g[outer]
|
141 |
+
for neighbor, edge_attrs in list(adj_view.items()):
|
142 |
+
if neighbor not in self.core_vertices:
|
143 |
+
self.g.add_edge(self.original, neighbor, **edge_attrs)
|
144 |
+
break
|
145 |
+
g.remove_nodes_from(self.outer_vertices)
|
146 |
+
g.remove_nodes_from(self.core_vertices)
|
147 |
+
|
148 |
+
class SmallKGadget:
|
149 |
+
def __init__(self, k, degree, node, g):
|
150 |
+
self.original = node
|
151 |
+
self.k = k
|
152 |
+
self.degree = degree
|
153 |
+
self.g = g
|
154 |
+
|
155 |
+
self.outer_vertices = [(node, x) for x in range(degree)]
|
156 |
+
self.inner_vertices = [(node, x + degree) for x in range(degree)]
|
157 |
+
self.core_vertices = [(node, x + 2 * degree) for x in range(k)]
|
158 |
+
|
159 |
+
def replace_node(self):
|
160 |
+
adj_view = self.g[self.original]
|
161 |
+
for outer, inner, (neighbor, edge_attrs) in zip(
|
162 |
+
self.outer_vertices, self.inner_vertices, list(adj_view.items())
|
163 |
+
):
|
164 |
+
self.g.add_edge(outer, inner)
|
165 |
+
self.g.add_edge(outer, neighbor, **edge_attrs)
|
166 |
+
for core in self.core_vertices:
|
167 |
+
for inner in self.inner_vertices:
|
168 |
+
self.g.add_edge(core, inner)
|
169 |
+
self.g.remove_node(self.original)
|
170 |
+
|
171 |
+
def restore_node(self):
|
172 |
+
self.g.add_node(self.original)
|
173 |
+
for outer in self.outer_vertices:
|
174 |
+
adj_view = self.g[outer]
|
175 |
+
for neighbor, edge_attrs in adj_view.items():
|
176 |
+
if neighbor not in self.core_vertices:
|
177 |
+
self.g.add_edge(self.original, neighbor, **edge_attrs)
|
178 |
+
break
|
179 |
+
self.g.remove_nodes_from(self.outer_vertices)
|
180 |
+
self.g.remove_nodes_from(self.inner_vertices)
|
181 |
+
self.g.remove_nodes_from(self.core_vertices)
|
182 |
+
|
183 |
+
# Step 1
|
184 |
+
if any(d < k for _, d in G.degree):
|
185 |
+
raise nx.NetworkXUnfeasible("Graph contains a vertex with degree less than k")
|
186 |
+
g = G.copy()
|
187 |
+
|
188 |
+
# Step 2
|
189 |
+
gadgets = []
|
190 |
+
for node, degree in list(g.degree):
|
191 |
+
if k < degree / 2.0:
|
192 |
+
gadget = SmallKGadget(k, degree, node, g)
|
193 |
+
else:
|
194 |
+
gadget = LargeKGadget(k, degree, node, g)
|
195 |
+
gadget.replace_node()
|
196 |
+
gadgets.append(gadget)
|
197 |
+
|
198 |
+
# Step 3
|
199 |
+
matching = max_weight_matching(g, maxcardinality=True, weight=matching_weight)
|
200 |
+
|
201 |
+
# Step 4
|
202 |
+
if not is_perfect_matching(g, matching):
|
203 |
+
raise nx.NetworkXUnfeasible(
|
204 |
+
"Cannot find k-factor because no perfect matching exists"
|
205 |
+
)
|
206 |
+
|
207 |
+
for edge in g.edges():
|
208 |
+
if edge not in matching and (edge[1], edge[0]) not in matching:
|
209 |
+
g.remove_edge(edge[0], edge[1])
|
210 |
+
|
211 |
+
for gadget in gadgets:
|
212 |
+
gadget.restore_node()
|
213 |
+
|
214 |
+
return g
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/richclub.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Functions for computing rich-club coefficients."""
|
2 |
+
|
3 |
+
from itertools import accumulate
|
4 |
+
|
5 |
+
import networkx as nx
|
6 |
+
from networkx.utils import not_implemented_for
|
7 |
+
|
8 |
+
__all__ = ["rich_club_coefficient"]
|
9 |
+
|
10 |
+
|
11 |
+
@not_implemented_for("directed")
|
12 |
+
@not_implemented_for("multigraph")
|
13 |
+
@nx._dispatchable
|
14 |
+
def rich_club_coefficient(G, normalized=True, Q=100, seed=None):
|
15 |
+
r"""Returns the rich-club coefficient of the graph `G`.
|
16 |
+
|
17 |
+
For each degree *k*, the *rich-club coefficient* is the ratio of the
|
18 |
+
number of actual to the number of potential edges for nodes with
|
19 |
+
degree greater than *k*:
|
20 |
+
|
21 |
+
.. math::
|
22 |
+
|
23 |
+
\phi(k) = \frac{2 E_k}{N_k (N_k - 1)}
|
24 |
+
|
25 |
+
where `N_k` is the number of nodes with degree larger than *k*, and
|
26 |
+
`E_k` is the number of edges among those nodes.
|
27 |
+
|
28 |
+
Parameters
|
29 |
+
----------
|
30 |
+
G : NetworkX graph
|
31 |
+
Undirected graph with neither parallel edges nor self-loops.
|
32 |
+
normalized : bool (optional)
|
33 |
+
Normalize using randomized network as in [1]_
|
34 |
+
Q : float (optional, default=100)
|
35 |
+
If `normalized` is True, perform `Q * m` double-edge
|
36 |
+
swaps, where `m` is the number of edges in `G`, to use as a
|
37 |
+
null-model for normalization.
|
38 |
+
seed : integer, random_state, or None (default)
|
39 |
+
Indicator of random number generation state.
|
40 |
+
See :ref:`Randomness<randomness>`.
|
41 |
+
|
42 |
+
Returns
|
43 |
+
-------
|
44 |
+
rc : dictionary
|
45 |
+
A dictionary, keyed by degree, with rich-club coefficient values.
|
46 |
+
|
47 |
+
Raises
|
48 |
+
------
|
49 |
+
NetworkXError
|
50 |
+
If `G` has fewer than four nodes and ``normalized=True``.
|
51 |
+
A randomly sampled graph for normalization cannot be generated in this case.
|
52 |
+
|
53 |
+
Examples
|
54 |
+
--------
|
55 |
+
>>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)])
|
56 |
+
>>> rc = nx.rich_club_coefficient(G, normalized=False, seed=42)
|
57 |
+
>>> rc[0]
|
58 |
+
0.4
|
59 |
+
|
60 |
+
Notes
|
61 |
+
-----
|
62 |
+
The rich club definition and algorithm are found in [1]_. This
|
63 |
+
algorithm ignores any edge weights and is not defined for directed
|
64 |
+
graphs or graphs with parallel edges or self loops.
|
65 |
+
|
66 |
+
Normalization is done by computing the rich club coefficient for a randomly
|
67 |
+
sampled graph with the same degree distribution as `G` by
|
68 |
+
repeatedly swapping the endpoints of existing edges. For graphs with fewer than 4
|
69 |
+
nodes, it is not possible to generate a random graph with a prescribed
|
70 |
+
degree distribution, as the degree distribution fully determines the graph
|
71 |
+
(hence making the coefficients trivially normalized to 1).
|
72 |
+
This function raises an exception in this case.
|
73 |
+
|
74 |
+
Estimates for appropriate values of `Q` are found in [2]_.
|
75 |
+
|
76 |
+
References
|
77 |
+
----------
|
78 |
+
.. [1] Julian J. McAuley, Luciano da Fontoura Costa,
|
79 |
+
and Tibério S. Caetano,
|
80 |
+
"The rich-club phenomenon across complex network hierarchies",
|
81 |
+
Applied Physics Letters Vol 91 Issue 8, August 2007.
|
82 |
+
https://arxiv.org/abs/physics/0701290
|
83 |
+
.. [2] R. Milo, N. Kashtan, S. Itzkovitz, M. E. J. Newman, U. Alon,
|
84 |
+
"Uniform generation of random graphs with arbitrary degree
|
85 |
+
sequences", 2006. https://arxiv.org/abs/cond-mat/0312028
|
86 |
+
"""
|
87 |
+
if nx.number_of_selfloops(G) > 0:
|
88 |
+
raise Exception(
|
89 |
+
"rich_club_coefficient is not implemented for graphs with self loops."
|
90 |
+
)
|
91 |
+
rc = _compute_rc(G)
|
92 |
+
if normalized:
|
93 |
+
# make R a copy of G, randomize with Q*|E| double edge swaps
|
94 |
+
# and use rich_club coefficient of R to normalize
|
95 |
+
R = G.copy()
|
96 |
+
E = R.number_of_edges()
|
97 |
+
nx.double_edge_swap(R, Q * E, max_tries=Q * E * 10, seed=seed)
|
98 |
+
rcran = _compute_rc(R)
|
99 |
+
rc = {k: v / rcran[k] for k, v in rc.items()}
|
100 |
+
return rc
|
101 |
+
|
102 |
+
|
103 |
+
def _compute_rc(G):
|
104 |
+
"""Returns the rich-club coefficient for each degree in the graph
|
105 |
+
`G`.
|
106 |
+
|
107 |
+
`G` is an undirected graph without multiedges.
|
108 |
+
|
109 |
+
Returns a dictionary mapping degree to rich-club coefficient for
|
110 |
+
that degree.
|
111 |
+
|
112 |
+
"""
|
113 |
+
deghist = nx.degree_histogram(G)
|
114 |
+
total = sum(deghist)
|
115 |
+
# Compute the number of nodes with degree greater than `k`, for each
|
116 |
+
# degree `k` (omitting the last entry, which is zero).
|
117 |
+
nks = (total - cs for cs in accumulate(deghist) if total - cs > 1)
|
118 |
+
# Create a sorted list of pairs of edge endpoint degrees.
|
119 |
+
#
|
120 |
+
# The list is sorted in reverse order so that we can pop from the
|
121 |
+
# right side of the list later, instead of popping from the left
|
122 |
+
# side of the list, which would have a linear time cost.
|
123 |
+
edge_degrees = sorted((sorted(map(G.degree, e)) for e in G.edges()), reverse=True)
|
124 |
+
ek = G.number_of_edges()
|
125 |
+
if ek == 0:
|
126 |
+
return {}
|
127 |
+
|
128 |
+
k1, k2 = edge_degrees.pop()
|
129 |
+
rc = {}
|
130 |
+
for d, nk in enumerate(nks):
|
131 |
+
while k1 <= d:
|
132 |
+
if len(edge_degrees) == 0:
|
133 |
+
ek = 0
|
134 |
+
break
|
135 |
+
k1, k2 = edge_degrees.pop()
|
136 |
+
ek -= 1
|
137 |
+
rc[d] = 2 * ek / (nk * (nk - 1))
|
138 |
+
return rc
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/similarity.py
ADDED
@@ -0,0 +1,1777 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Functions measuring similarity using graph edit distance.
|
2 |
+
|
3 |
+
The graph edit distance is the number of edge/node changes needed
|
4 |
+
to make two graphs isomorphic.
|
5 |
+
|
6 |
+
The default algorithm/implementation is sub-optimal for some graphs.
|
7 |
+
The problem of finding the exact Graph Edit Distance (GED) is NP-hard
|
8 |
+
so it is often slow. If the simple interface `graph_edit_distance`
|
9 |
+
takes too long for your graph, try `optimize_graph_edit_distance`
|
10 |
+
and/or `optimize_edit_paths`.
|
11 |
+
|
12 |
+
At the same time, I encourage capable people to investigate
|
13 |
+
alternative GED algorithms, in order to improve the choices available.
|
14 |
+
"""
|
15 |
+
|
16 |
+
import math
|
17 |
+
import time
|
18 |
+
import warnings
|
19 |
+
from dataclasses import dataclass
|
20 |
+
from itertools import product
|
21 |
+
|
22 |
+
import networkx as nx
|
23 |
+
from networkx.utils import np_random_state
|
24 |
+
|
25 |
+
__all__ = [
|
26 |
+
"graph_edit_distance",
|
27 |
+
"optimal_edit_paths",
|
28 |
+
"optimize_graph_edit_distance",
|
29 |
+
"optimize_edit_paths",
|
30 |
+
"simrank_similarity",
|
31 |
+
"panther_similarity",
|
32 |
+
"generate_random_paths",
|
33 |
+
]
|
34 |
+
|
35 |
+
|
36 |
+
def debug_print(*args, **kwargs):
|
37 |
+
print(*args, **kwargs)
|
38 |
+
|
39 |
+
|
40 |
+
@nx._dispatchable(
|
41 |
+
graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True
|
42 |
+
)
|
43 |
+
def graph_edit_distance(
|
44 |
+
G1,
|
45 |
+
G2,
|
46 |
+
node_match=None,
|
47 |
+
edge_match=None,
|
48 |
+
node_subst_cost=None,
|
49 |
+
node_del_cost=None,
|
50 |
+
node_ins_cost=None,
|
51 |
+
edge_subst_cost=None,
|
52 |
+
edge_del_cost=None,
|
53 |
+
edge_ins_cost=None,
|
54 |
+
roots=None,
|
55 |
+
upper_bound=None,
|
56 |
+
timeout=None,
|
57 |
+
):
|
58 |
+
"""Returns GED (graph edit distance) between graphs G1 and G2.
|
59 |
+
|
60 |
+
Graph edit distance is a graph similarity measure analogous to
|
61 |
+
Levenshtein distance for strings. It is defined as minimum cost
|
62 |
+
of edit path (sequence of node and edge edit operations)
|
63 |
+
transforming graph G1 to graph isomorphic to G2.
|
64 |
+
|
65 |
+
Parameters
|
66 |
+
----------
|
67 |
+
G1, G2: graphs
|
68 |
+
The two graphs G1 and G2 must be of the same type.
|
69 |
+
|
70 |
+
node_match : callable
|
71 |
+
A function that returns True if node n1 in G1 and n2 in G2
|
72 |
+
should be considered equal during matching.
|
73 |
+
|
74 |
+
The function will be called like
|
75 |
+
|
76 |
+
node_match(G1.nodes[n1], G2.nodes[n2]).
|
77 |
+
|
78 |
+
That is, the function will receive the node attribute
|
79 |
+
dictionaries for n1 and n2 as inputs.
|
80 |
+
|
81 |
+
Ignored if node_subst_cost is specified. If neither
|
82 |
+
node_match nor node_subst_cost are specified then node
|
83 |
+
attributes are not considered.
|
84 |
+
|
85 |
+
edge_match : callable
|
86 |
+
A function that returns True if the edge attribute dictionaries
|
87 |
+
for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
|
88 |
+
be considered equal during matching.
|
89 |
+
|
90 |
+
The function will be called like
|
91 |
+
|
92 |
+
edge_match(G1[u1][v1], G2[u2][v2]).
|
93 |
+
|
94 |
+
That is, the function will receive the edge attribute
|
95 |
+
dictionaries of the edges under consideration.
|
96 |
+
|
97 |
+
Ignored if edge_subst_cost is specified. If neither
|
98 |
+
edge_match nor edge_subst_cost are specified then edge
|
99 |
+
attributes are not considered.
|
100 |
+
|
101 |
+
node_subst_cost, node_del_cost, node_ins_cost : callable
|
102 |
+
Functions that return the costs of node substitution, node
|
103 |
+
deletion, and node insertion, respectively.
|
104 |
+
|
105 |
+
The functions will be called like
|
106 |
+
|
107 |
+
node_subst_cost(G1.nodes[n1], G2.nodes[n2]),
|
108 |
+
node_del_cost(G1.nodes[n1]),
|
109 |
+
node_ins_cost(G2.nodes[n2]).
|
110 |
+
|
111 |
+
That is, the functions will receive the node attribute
|
112 |
+
dictionaries as inputs. The functions are expected to return
|
113 |
+
positive numeric values.
|
114 |
+
|
115 |
+
Function node_subst_cost overrides node_match if specified.
|
116 |
+
If neither node_match nor node_subst_cost are specified then
|
117 |
+
default node substitution cost of 0 is used (node attributes
|
118 |
+
are not considered during matching).
|
119 |
+
|
120 |
+
If node_del_cost is not specified then default node deletion
|
121 |
+
cost of 1 is used. If node_ins_cost is not specified then
|
122 |
+
default node insertion cost of 1 is used.
|
123 |
+
|
124 |
+
edge_subst_cost, edge_del_cost, edge_ins_cost : callable
|
125 |
+
Functions that return the costs of edge substitution, edge
|
126 |
+
deletion, and edge insertion, respectively.
|
127 |
+
|
128 |
+
The functions will be called like
|
129 |
+
|
130 |
+
edge_subst_cost(G1[u1][v1], G2[u2][v2]),
|
131 |
+
edge_del_cost(G1[u1][v1]),
|
132 |
+
edge_ins_cost(G2[u2][v2]).
|
133 |
+
|
134 |
+
That is, the functions will receive the edge attribute
|
135 |
+
dictionaries as inputs. The functions are expected to return
|
136 |
+
positive numeric values.
|
137 |
+
|
138 |
+
Function edge_subst_cost overrides edge_match if specified.
|
139 |
+
If neither edge_match nor edge_subst_cost are specified then
|
140 |
+
default edge substitution cost of 0 is used (edge attributes
|
141 |
+
are not considered during matching).
|
142 |
+
|
143 |
+
If edge_del_cost is not specified then default edge deletion
|
144 |
+
cost of 1 is used. If edge_ins_cost is not specified then
|
145 |
+
default edge insertion cost of 1 is used.
|
146 |
+
|
147 |
+
roots : 2-tuple
|
148 |
+
Tuple where first element is a node in G1 and the second
|
149 |
+
is a node in G2.
|
150 |
+
These nodes are forced to be matched in the comparison to
|
151 |
+
allow comparison between rooted graphs.
|
152 |
+
|
153 |
+
upper_bound : numeric
|
154 |
+
Maximum edit distance to consider. Return None if no edit
|
155 |
+
distance under or equal to upper_bound exists.
|
156 |
+
|
157 |
+
timeout : numeric
|
158 |
+
Maximum number of seconds to execute.
|
159 |
+
After timeout is met, the current best GED is returned.
|
160 |
+
|
161 |
+
Examples
|
162 |
+
--------
|
163 |
+
>>> G1 = nx.cycle_graph(6)
|
164 |
+
>>> G2 = nx.wheel_graph(7)
|
165 |
+
>>> nx.graph_edit_distance(G1, G2)
|
166 |
+
7.0
|
167 |
+
|
168 |
+
>>> G1 = nx.star_graph(5)
|
169 |
+
>>> G2 = nx.star_graph(5)
|
170 |
+
>>> nx.graph_edit_distance(G1, G2, roots=(0, 0))
|
171 |
+
0.0
|
172 |
+
>>> nx.graph_edit_distance(G1, G2, roots=(1, 0))
|
173 |
+
8.0
|
174 |
+
|
175 |
+
See Also
|
176 |
+
--------
|
177 |
+
optimal_edit_paths, optimize_graph_edit_distance,
|
178 |
+
|
179 |
+
is_isomorphic: test for graph edit distance of 0
|
180 |
+
|
181 |
+
References
|
182 |
+
----------
|
183 |
+
.. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick
|
184 |
+
Martineau. An Exact Graph Edit Distance Algorithm for Solving
|
185 |
+
Pattern Recognition Problems. 4th International Conference on
|
186 |
+
Pattern Recognition Applications and Methods 2015, Jan 2015,
|
187 |
+
Lisbon, Portugal. 2015,
|
188 |
+
<10.5220/0005209202710278>. <hal-01168816>
|
189 |
+
https://hal.archives-ouvertes.fr/hal-01168816
|
190 |
+
|
191 |
+
"""
|
192 |
+
bestcost = None
|
193 |
+
for _, _, cost in optimize_edit_paths(
|
194 |
+
G1,
|
195 |
+
G2,
|
196 |
+
node_match,
|
197 |
+
edge_match,
|
198 |
+
node_subst_cost,
|
199 |
+
node_del_cost,
|
200 |
+
node_ins_cost,
|
201 |
+
edge_subst_cost,
|
202 |
+
edge_del_cost,
|
203 |
+
edge_ins_cost,
|
204 |
+
upper_bound,
|
205 |
+
True,
|
206 |
+
roots,
|
207 |
+
timeout,
|
208 |
+
):
|
209 |
+
# assert bestcost is None or cost < bestcost
|
210 |
+
bestcost = cost
|
211 |
+
return bestcost
|
212 |
+
|
213 |
+
|
214 |
+
@nx._dispatchable(graphs={"G1": 0, "G2": 1})
|
215 |
+
def optimal_edit_paths(
|
216 |
+
G1,
|
217 |
+
G2,
|
218 |
+
node_match=None,
|
219 |
+
edge_match=None,
|
220 |
+
node_subst_cost=None,
|
221 |
+
node_del_cost=None,
|
222 |
+
node_ins_cost=None,
|
223 |
+
edge_subst_cost=None,
|
224 |
+
edge_del_cost=None,
|
225 |
+
edge_ins_cost=None,
|
226 |
+
upper_bound=None,
|
227 |
+
):
|
228 |
+
"""Returns all minimum-cost edit paths transforming G1 to G2.
|
229 |
+
|
230 |
+
Graph edit path is a sequence of node and edge edit operations
|
231 |
+
transforming graph G1 to graph isomorphic to G2. Edit operations
|
232 |
+
include substitutions, deletions, and insertions.
|
233 |
+
|
234 |
+
Parameters
|
235 |
+
----------
|
236 |
+
G1, G2: graphs
|
237 |
+
The two graphs G1 and G2 must be of the same type.
|
238 |
+
|
239 |
+
node_match : callable
|
240 |
+
A function that returns True if node n1 in G1 and n2 in G2
|
241 |
+
should be considered equal during matching.
|
242 |
+
|
243 |
+
The function will be called like
|
244 |
+
|
245 |
+
node_match(G1.nodes[n1], G2.nodes[n2]).
|
246 |
+
|
247 |
+
That is, the function will receive the node attribute
|
248 |
+
dictionaries for n1 and n2 as inputs.
|
249 |
+
|
250 |
+
Ignored if node_subst_cost is specified. If neither
|
251 |
+
node_match nor node_subst_cost are specified then node
|
252 |
+
attributes are not considered.
|
253 |
+
|
254 |
+
edge_match : callable
|
255 |
+
A function that returns True if the edge attribute dictionaries
|
256 |
+
for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
|
257 |
+
be considered equal during matching.
|
258 |
+
|
259 |
+
The function will be called like
|
260 |
+
|
261 |
+
edge_match(G1[u1][v1], G2[u2][v2]).
|
262 |
+
|
263 |
+
That is, the function will receive the edge attribute
|
264 |
+
dictionaries of the edges under consideration.
|
265 |
+
|
266 |
+
Ignored if edge_subst_cost is specified. If neither
|
267 |
+
edge_match nor edge_subst_cost are specified then edge
|
268 |
+
attributes are not considered.
|
269 |
+
|
270 |
+
node_subst_cost, node_del_cost, node_ins_cost : callable
|
271 |
+
Functions that return the costs of node substitution, node
|
272 |
+
deletion, and node insertion, respectively.
|
273 |
+
|
274 |
+
The functions will be called like
|
275 |
+
|
276 |
+
node_subst_cost(G1.nodes[n1], G2.nodes[n2]),
|
277 |
+
node_del_cost(G1.nodes[n1]),
|
278 |
+
node_ins_cost(G2.nodes[n2]).
|
279 |
+
|
280 |
+
That is, the functions will receive the node attribute
|
281 |
+
dictionaries as inputs. The functions are expected to return
|
282 |
+
positive numeric values.
|
283 |
+
|
284 |
+
Function node_subst_cost overrides node_match if specified.
|
285 |
+
If neither node_match nor node_subst_cost are specified then
|
286 |
+
default node substitution cost of 0 is used (node attributes
|
287 |
+
are not considered during matching).
|
288 |
+
|
289 |
+
If node_del_cost is not specified then default node deletion
|
290 |
+
cost of 1 is used. If node_ins_cost is not specified then
|
291 |
+
default node insertion cost of 1 is used.
|
292 |
+
|
293 |
+
edge_subst_cost, edge_del_cost, edge_ins_cost : callable
|
294 |
+
Functions that return the costs of edge substitution, edge
|
295 |
+
deletion, and edge insertion, respectively.
|
296 |
+
|
297 |
+
The functions will be called like
|
298 |
+
|
299 |
+
edge_subst_cost(G1[u1][v1], G2[u2][v2]),
|
300 |
+
edge_del_cost(G1[u1][v1]),
|
301 |
+
edge_ins_cost(G2[u2][v2]).
|
302 |
+
|
303 |
+
That is, the functions will receive the edge attribute
|
304 |
+
dictionaries as inputs. The functions are expected to return
|
305 |
+
positive numeric values.
|
306 |
+
|
307 |
+
Function edge_subst_cost overrides edge_match if specified.
|
308 |
+
If neither edge_match nor edge_subst_cost are specified then
|
309 |
+
default edge substitution cost of 0 is used (edge attributes
|
310 |
+
are not considered during matching).
|
311 |
+
|
312 |
+
If edge_del_cost is not specified then default edge deletion
|
313 |
+
cost of 1 is used. If edge_ins_cost is not specified then
|
314 |
+
default edge insertion cost of 1 is used.
|
315 |
+
|
316 |
+
upper_bound : numeric
|
317 |
+
Maximum edit distance to consider.
|
318 |
+
|
319 |
+
Returns
|
320 |
+
-------
|
321 |
+
edit_paths : list of tuples (node_edit_path, edge_edit_path)
|
322 |
+
node_edit_path : list of tuples (u, v)
|
323 |
+
edge_edit_path : list of tuples ((u1, v1), (u2, v2))
|
324 |
+
|
325 |
+
cost : numeric
|
326 |
+
Optimal edit path cost (graph edit distance). When the cost
|
327 |
+
is zero, it indicates that `G1` and `G2` are isomorphic.
|
328 |
+
|
329 |
+
Examples
|
330 |
+
--------
|
331 |
+
>>> G1 = nx.cycle_graph(4)
|
332 |
+
>>> G2 = nx.wheel_graph(5)
|
333 |
+
>>> paths, cost = nx.optimal_edit_paths(G1, G2)
|
334 |
+
>>> len(paths)
|
335 |
+
40
|
336 |
+
>>> cost
|
337 |
+
5.0
|
338 |
+
|
339 |
+
Notes
|
340 |
+
-----
|
341 |
+
To transform `G1` into a graph isomorphic to `G2`, apply the node
|
342 |
+
and edge edits in the returned ``edit_paths``.
|
343 |
+
In the case of isomorphic graphs, the cost is zero, and the paths
|
344 |
+
represent different isomorphic mappings (isomorphisms). That is, the
|
345 |
+
edits involve renaming nodes and edges to match the structure of `G2`.
|
346 |
+
|
347 |
+
See Also
|
348 |
+
--------
|
349 |
+
graph_edit_distance, optimize_edit_paths
|
350 |
+
|
351 |
+
References
|
352 |
+
----------
|
353 |
+
.. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick
|
354 |
+
Martineau. An Exact Graph Edit Distance Algorithm for Solving
|
355 |
+
Pattern Recognition Problems. 4th International Conference on
|
356 |
+
Pattern Recognition Applications and Methods 2015, Jan 2015,
|
357 |
+
Lisbon, Portugal. 2015,
|
358 |
+
<10.5220/0005209202710278>. <hal-01168816>
|
359 |
+
https://hal.archives-ouvertes.fr/hal-01168816
|
360 |
+
|
361 |
+
"""
|
362 |
+
paths = []
|
363 |
+
bestcost = None
|
364 |
+
for vertex_path, edge_path, cost in optimize_edit_paths(
|
365 |
+
G1,
|
366 |
+
G2,
|
367 |
+
node_match,
|
368 |
+
edge_match,
|
369 |
+
node_subst_cost,
|
370 |
+
node_del_cost,
|
371 |
+
node_ins_cost,
|
372 |
+
edge_subst_cost,
|
373 |
+
edge_del_cost,
|
374 |
+
edge_ins_cost,
|
375 |
+
upper_bound,
|
376 |
+
False,
|
377 |
+
):
|
378 |
+
# assert bestcost is None or cost <= bestcost
|
379 |
+
if bestcost is not None and cost < bestcost:
|
380 |
+
paths = []
|
381 |
+
paths.append((vertex_path, edge_path))
|
382 |
+
bestcost = cost
|
383 |
+
return paths, bestcost
|
384 |
+
|
385 |
+
|
386 |
+
@nx._dispatchable(graphs={"G1": 0, "G2": 1})
|
387 |
+
def optimize_graph_edit_distance(
|
388 |
+
G1,
|
389 |
+
G2,
|
390 |
+
node_match=None,
|
391 |
+
edge_match=None,
|
392 |
+
node_subst_cost=None,
|
393 |
+
node_del_cost=None,
|
394 |
+
node_ins_cost=None,
|
395 |
+
edge_subst_cost=None,
|
396 |
+
edge_del_cost=None,
|
397 |
+
edge_ins_cost=None,
|
398 |
+
upper_bound=None,
|
399 |
+
):
|
400 |
+
"""Returns consecutive approximations of GED (graph edit distance)
|
401 |
+
between graphs G1 and G2.
|
402 |
+
|
403 |
+
Graph edit distance is a graph similarity measure analogous to
|
404 |
+
Levenshtein distance for strings. It is defined as minimum cost
|
405 |
+
of edit path (sequence of node and edge edit operations)
|
406 |
+
transforming graph G1 to graph isomorphic to G2.
|
407 |
+
|
408 |
+
Parameters
|
409 |
+
----------
|
410 |
+
G1, G2: graphs
|
411 |
+
The two graphs G1 and G2 must be of the same type.
|
412 |
+
|
413 |
+
node_match : callable
|
414 |
+
A function that returns True if node n1 in G1 and n2 in G2
|
415 |
+
should be considered equal during matching.
|
416 |
+
|
417 |
+
The function will be called like
|
418 |
+
|
419 |
+
node_match(G1.nodes[n1], G2.nodes[n2]).
|
420 |
+
|
421 |
+
That is, the function will receive the node attribute
|
422 |
+
dictionaries for n1 and n2 as inputs.
|
423 |
+
|
424 |
+
Ignored if node_subst_cost is specified. If neither
|
425 |
+
node_match nor node_subst_cost are specified then node
|
426 |
+
attributes are not considered.
|
427 |
+
|
428 |
+
edge_match : callable
|
429 |
+
A function that returns True if the edge attribute dictionaries
|
430 |
+
for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
|
431 |
+
be considered equal during matching.
|
432 |
+
|
433 |
+
The function will be called like
|
434 |
+
|
435 |
+
edge_match(G1[u1][v1], G2[u2][v2]).
|
436 |
+
|
437 |
+
That is, the function will receive the edge attribute
|
438 |
+
dictionaries of the edges under consideration.
|
439 |
+
|
440 |
+
Ignored if edge_subst_cost is specified. If neither
|
441 |
+
edge_match nor edge_subst_cost are specified then edge
|
442 |
+
attributes are not considered.
|
443 |
+
|
444 |
+
node_subst_cost, node_del_cost, node_ins_cost : callable
|
445 |
+
Functions that return the costs of node substitution, node
|
446 |
+
deletion, and node insertion, respectively.
|
447 |
+
|
448 |
+
The functions will be called like
|
449 |
+
|
450 |
+
node_subst_cost(G1.nodes[n1], G2.nodes[n2]),
|
451 |
+
node_del_cost(G1.nodes[n1]),
|
452 |
+
node_ins_cost(G2.nodes[n2]).
|
453 |
+
|
454 |
+
That is, the functions will receive the node attribute
|
455 |
+
dictionaries as inputs. The functions are expected to return
|
456 |
+
positive numeric values.
|
457 |
+
|
458 |
+
Function node_subst_cost overrides node_match if specified.
|
459 |
+
If neither node_match nor node_subst_cost are specified then
|
460 |
+
default node substitution cost of 0 is used (node attributes
|
461 |
+
are not considered during matching).
|
462 |
+
|
463 |
+
If node_del_cost is not specified then default node deletion
|
464 |
+
cost of 1 is used. If node_ins_cost is not specified then
|
465 |
+
default node insertion cost of 1 is used.
|
466 |
+
|
467 |
+
edge_subst_cost, edge_del_cost, edge_ins_cost : callable
|
468 |
+
Functions that return the costs of edge substitution, edge
|
469 |
+
deletion, and edge insertion, respectively.
|
470 |
+
|
471 |
+
The functions will be called like
|
472 |
+
|
473 |
+
edge_subst_cost(G1[u1][v1], G2[u2][v2]),
|
474 |
+
edge_del_cost(G1[u1][v1]),
|
475 |
+
edge_ins_cost(G2[u2][v2]).
|
476 |
+
|
477 |
+
That is, the functions will receive the edge attribute
|
478 |
+
dictionaries as inputs. The functions are expected to return
|
479 |
+
positive numeric values.
|
480 |
+
|
481 |
+
Function edge_subst_cost overrides edge_match if specified.
|
482 |
+
If neither edge_match nor edge_subst_cost are specified then
|
483 |
+
default edge substitution cost of 0 is used (edge attributes
|
484 |
+
are not considered during matching).
|
485 |
+
|
486 |
+
If edge_del_cost is not specified then default edge deletion
|
487 |
+
cost of 1 is used. If edge_ins_cost is not specified then
|
488 |
+
default edge insertion cost of 1 is used.
|
489 |
+
|
490 |
+
upper_bound : numeric
|
491 |
+
Maximum edit distance to consider.
|
492 |
+
|
493 |
+
Returns
|
494 |
+
-------
|
495 |
+
Generator of consecutive approximations of graph edit distance.
|
496 |
+
|
497 |
+
Examples
|
498 |
+
--------
|
499 |
+
>>> G1 = nx.cycle_graph(6)
|
500 |
+
>>> G2 = nx.wheel_graph(7)
|
501 |
+
>>> for v in nx.optimize_graph_edit_distance(G1, G2):
|
502 |
+
... minv = v
|
503 |
+
>>> minv
|
504 |
+
7.0
|
505 |
+
|
506 |
+
See Also
|
507 |
+
--------
|
508 |
+
graph_edit_distance, optimize_edit_paths
|
509 |
+
|
510 |
+
References
|
511 |
+
----------
|
512 |
+
.. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick
|
513 |
+
Martineau. An Exact Graph Edit Distance Algorithm for Solving
|
514 |
+
Pattern Recognition Problems. 4th International Conference on
|
515 |
+
Pattern Recognition Applications and Methods 2015, Jan 2015,
|
516 |
+
Lisbon, Portugal. 2015,
|
517 |
+
<10.5220/0005209202710278>. <hal-01168816>
|
518 |
+
https://hal.archives-ouvertes.fr/hal-01168816
|
519 |
+
"""
|
520 |
+
for _, _, cost in optimize_edit_paths(
|
521 |
+
G1,
|
522 |
+
G2,
|
523 |
+
node_match,
|
524 |
+
edge_match,
|
525 |
+
node_subst_cost,
|
526 |
+
node_del_cost,
|
527 |
+
node_ins_cost,
|
528 |
+
edge_subst_cost,
|
529 |
+
edge_del_cost,
|
530 |
+
edge_ins_cost,
|
531 |
+
upper_bound,
|
532 |
+
True,
|
533 |
+
):
|
534 |
+
yield cost
|
535 |
+
|
536 |
+
|
537 |
+
@nx._dispatchable(
|
538 |
+
graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True
|
539 |
+
)
|
540 |
+
def optimize_edit_paths(
|
541 |
+
G1,
|
542 |
+
G2,
|
543 |
+
node_match=None,
|
544 |
+
edge_match=None,
|
545 |
+
node_subst_cost=None,
|
546 |
+
node_del_cost=None,
|
547 |
+
node_ins_cost=None,
|
548 |
+
edge_subst_cost=None,
|
549 |
+
edge_del_cost=None,
|
550 |
+
edge_ins_cost=None,
|
551 |
+
upper_bound=None,
|
552 |
+
strictly_decreasing=True,
|
553 |
+
roots=None,
|
554 |
+
timeout=None,
|
555 |
+
):
|
556 |
+
"""GED (graph edit distance) calculation: advanced interface.
|
557 |
+
|
558 |
+
Graph edit path is a sequence of node and edge edit operations
|
559 |
+
transforming graph G1 to graph isomorphic to G2. Edit operations
|
560 |
+
include substitutions, deletions, and insertions.
|
561 |
+
|
562 |
+
Graph edit distance is defined as minimum cost of edit path.
|
563 |
+
|
564 |
+
Parameters
|
565 |
+
----------
|
566 |
+
G1, G2: graphs
|
567 |
+
The two graphs G1 and G2 must be of the same type.
|
568 |
+
|
569 |
+
node_match : callable
|
570 |
+
A function that returns True if node n1 in G1 and n2 in G2
|
571 |
+
should be considered equal during matching.
|
572 |
+
|
573 |
+
The function will be called like
|
574 |
+
|
575 |
+
node_match(G1.nodes[n1], G2.nodes[n2]).
|
576 |
+
|
577 |
+
That is, the function will receive the node attribute
|
578 |
+
dictionaries for n1 and n2 as inputs.
|
579 |
+
|
580 |
+
Ignored if node_subst_cost is specified. If neither
|
581 |
+
node_match nor node_subst_cost are specified then node
|
582 |
+
attributes are not considered.
|
583 |
+
|
584 |
+
edge_match : callable
|
585 |
+
A function that returns True if the edge attribute dictionaries
|
586 |
+
for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
|
587 |
+
be considered equal during matching.
|
588 |
+
|
589 |
+
The function will be called like
|
590 |
+
|
591 |
+
edge_match(G1[u1][v1], G2[u2][v2]).
|
592 |
+
|
593 |
+
That is, the function will receive the edge attribute
|
594 |
+
dictionaries of the edges under consideration.
|
595 |
+
|
596 |
+
Ignored if edge_subst_cost is specified. If neither
|
597 |
+
edge_match nor edge_subst_cost are specified then edge
|
598 |
+
attributes are not considered.
|
599 |
+
|
600 |
+
node_subst_cost, node_del_cost, node_ins_cost : callable
|
601 |
+
Functions that return the costs of node substitution, node
|
602 |
+
deletion, and node insertion, respectively.
|
603 |
+
|
604 |
+
The functions will be called like
|
605 |
+
|
606 |
+
node_subst_cost(G1.nodes[n1], G2.nodes[n2]),
|
607 |
+
node_del_cost(G1.nodes[n1]),
|
608 |
+
node_ins_cost(G2.nodes[n2]).
|
609 |
+
|
610 |
+
That is, the functions will receive the node attribute
|
611 |
+
dictionaries as inputs. The functions are expected to return
|
612 |
+
positive numeric values.
|
613 |
+
|
614 |
+
Function node_subst_cost overrides node_match if specified.
|
615 |
+
If neither node_match nor node_subst_cost are specified then
|
616 |
+
default node substitution cost of 0 is used (node attributes
|
617 |
+
are not considered during matching).
|
618 |
+
|
619 |
+
If node_del_cost is not specified then default node deletion
|
620 |
+
cost of 1 is used. If node_ins_cost is not specified then
|
621 |
+
default node insertion cost of 1 is used.
|
622 |
+
|
623 |
+
edge_subst_cost, edge_del_cost, edge_ins_cost : callable
|
624 |
+
Functions that return the costs of edge substitution, edge
|
625 |
+
deletion, and edge insertion, respectively.
|
626 |
+
|
627 |
+
The functions will be called like
|
628 |
+
|
629 |
+
edge_subst_cost(G1[u1][v1], G2[u2][v2]),
|
630 |
+
edge_del_cost(G1[u1][v1]),
|
631 |
+
edge_ins_cost(G2[u2][v2]).
|
632 |
+
|
633 |
+
That is, the functions will receive the edge attribute
|
634 |
+
dictionaries as inputs. The functions are expected to return
|
635 |
+
positive numeric values.
|
636 |
+
|
637 |
+
Function edge_subst_cost overrides edge_match if specified.
|
638 |
+
If neither edge_match nor edge_subst_cost are specified then
|
639 |
+
default edge substitution cost of 0 is used (edge attributes
|
640 |
+
are not considered during matching).
|
641 |
+
|
642 |
+
If edge_del_cost is not specified then default edge deletion
|
643 |
+
cost of 1 is used. If edge_ins_cost is not specified then
|
644 |
+
default edge insertion cost of 1 is used.
|
645 |
+
|
646 |
+
upper_bound : numeric
|
647 |
+
Maximum edit distance to consider.
|
648 |
+
|
649 |
+
strictly_decreasing : bool
|
650 |
+
If True, return consecutive approximations of strictly
|
651 |
+
decreasing cost. Otherwise, return all edit paths of cost
|
652 |
+
less than or equal to the previous minimum cost.
|
653 |
+
|
654 |
+
roots : 2-tuple
|
655 |
+
Tuple where first element is a node in G1 and the second
|
656 |
+
is a node in G2.
|
657 |
+
These nodes are forced to be matched in the comparison to
|
658 |
+
allow comparison between rooted graphs.
|
659 |
+
|
660 |
+
timeout : numeric
|
661 |
+
Maximum number of seconds to execute.
|
662 |
+
After timeout is met, the current best GED is returned.
|
663 |
+
|
664 |
+
Returns
|
665 |
+
-------
|
666 |
+
Generator of tuples (node_edit_path, edge_edit_path, cost)
|
667 |
+
node_edit_path : list of tuples (u, v)
|
668 |
+
edge_edit_path : list of tuples ((u1, v1), (u2, v2))
|
669 |
+
cost : numeric
|
670 |
+
|
671 |
+
See Also
|
672 |
+
--------
|
673 |
+
graph_edit_distance, optimize_graph_edit_distance, optimal_edit_paths
|
674 |
+
|
675 |
+
References
|
676 |
+
----------
|
677 |
+
.. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick
|
678 |
+
Martineau. An Exact Graph Edit Distance Algorithm for Solving
|
679 |
+
Pattern Recognition Problems. 4th International Conference on
|
680 |
+
Pattern Recognition Applications and Methods 2015, Jan 2015,
|
681 |
+
Lisbon, Portugal. 2015,
|
682 |
+
<10.5220/0005209202710278>. <hal-01168816>
|
683 |
+
https://hal.archives-ouvertes.fr/hal-01168816
|
684 |
+
|
685 |
+
"""
|
686 |
+
# TODO: support DiGraph
|
687 |
+
|
688 |
+
import numpy as np
|
689 |
+
import scipy as sp
|
690 |
+
|
691 |
+
@dataclass
|
692 |
+
class CostMatrix:
|
693 |
+
C: ...
|
694 |
+
lsa_row_ind: ...
|
695 |
+
lsa_col_ind: ...
|
696 |
+
ls: ...
|
697 |
+
|
698 |
+
def make_CostMatrix(C, m, n):
|
699 |
+
# assert(C.shape == (m + n, m + n))
|
700 |
+
lsa_row_ind, lsa_col_ind = sp.optimize.linear_sum_assignment(C)
|
701 |
+
|
702 |
+
# Fixup dummy assignments:
|
703 |
+
# each substitution i<->j should have dummy assignment m+j<->n+i
|
704 |
+
# NOTE: fast reduce of Cv relies on it
|
705 |
+
# assert len(lsa_row_ind) == len(lsa_col_ind)
|
706 |
+
indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind)
|
707 |
+
subst_ind = [k for k, i, j in indexes if i < m and j < n]
|
708 |
+
indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind)
|
709 |
+
dummy_ind = [k for k, i, j in indexes if i >= m and j >= n]
|
710 |
+
# assert len(subst_ind) == len(dummy_ind)
|
711 |
+
lsa_row_ind[dummy_ind] = lsa_col_ind[subst_ind] + m
|
712 |
+
lsa_col_ind[dummy_ind] = lsa_row_ind[subst_ind] + n
|
713 |
+
|
714 |
+
return CostMatrix(
|
715 |
+
C, lsa_row_ind, lsa_col_ind, C[lsa_row_ind, lsa_col_ind].sum()
|
716 |
+
)
|
717 |
+
|
718 |
+
def extract_C(C, i, j, m, n):
|
719 |
+
# assert(C.shape == (m + n, m + n))
|
720 |
+
row_ind = [k in i or k - m in j for k in range(m + n)]
|
721 |
+
col_ind = [k in j or k - n in i for k in range(m + n)]
|
722 |
+
return C[row_ind, :][:, col_ind]
|
723 |
+
|
724 |
+
def reduce_C(C, i, j, m, n):
|
725 |
+
# assert(C.shape == (m + n, m + n))
|
726 |
+
row_ind = [k not in i and k - m not in j for k in range(m + n)]
|
727 |
+
col_ind = [k not in j and k - n not in i for k in range(m + n)]
|
728 |
+
return C[row_ind, :][:, col_ind]
|
729 |
+
|
730 |
+
def reduce_ind(ind, i):
|
731 |
+
# assert set(ind) == set(range(len(ind)))
|
732 |
+
rind = ind[[k not in i for k in ind]]
|
733 |
+
for k in set(i):
|
734 |
+
rind[rind >= k] -= 1
|
735 |
+
return rind
|
736 |
+
|
737 |
+
def match_edges(u, v, pending_g, pending_h, Ce, matched_uv=None):
|
738 |
+
"""
|
739 |
+
Parameters:
|
740 |
+
u, v: matched vertices, u=None or v=None for
|
741 |
+
deletion/insertion
|
742 |
+
pending_g, pending_h: lists of edges not yet mapped
|
743 |
+
Ce: CostMatrix of pending edge mappings
|
744 |
+
matched_uv: partial vertex edit path
|
745 |
+
list of tuples (u, v) of previously matched vertex
|
746 |
+
mappings u<->v, u=None or v=None for
|
747 |
+
deletion/insertion
|
748 |
+
|
749 |
+
Returns:
|
750 |
+
list of (i, j): indices of edge mappings g<->h
|
751 |
+
localCe: local CostMatrix of edge mappings
|
752 |
+
(basically submatrix of Ce at cross of rows i, cols j)
|
753 |
+
"""
|
754 |
+
M = len(pending_g)
|
755 |
+
N = len(pending_h)
|
756 |
+
# assert Ce.C.shape == (M + N, M + N)
|
757 |
+
|
758 |
+
# only attempt to match edges after one node match has been made
|
759 |
+
# this will stop self-edges on the first node being automatically deleted
|
760 |
+
# even when a substitution is the better option
|
761 |
+
if matched_uv is None or len(matched_uv) == 0:
|
762 |
+
g_ind = []
|
763 |
+
h_ind = []
|
764 |
+
else:
|
765 |
+
g_ind = [
|
766 |
+
i
|
767 |
+
for i in range(M)
|
768 |
+
if pending_g[i][:2] == (u, u)
|
769 |
+
or any(
|
770 |
+
pending_g[i][:2] in ((p, u), (u, p), (p, p)) for p, q in matched_uv
|
771 |
+
)
|
772 |
+
]
|
773 |
+
h_ind = [
|
774 |
+
j
|
775 |
+
for j in range(N)
|
776 |
+
if pending_h[j][:2] == (v, v)
|
777 |
+
or any(
|
778 |
+
pending_h[j][:2] in ((q, v), (v, q), (q, q)) for p, q in matched_uv
|
779 |
+
)
|
780 |
+
]
|
781 |
+
|
782 |
+
m = len(g_ind)
|
783 |
+
n = len(h_ind)
|
784 |
+
|
785 |
+
if m or n:
|
786 |
+
C = extract_C(Ce.C, g_ind, h_ind, M, N)
|
787 |
+
# assert C.shape == (m + n, m + n)
|
788 |
+
|
789 |
+
# Forbid structurally invalid matches
|
790 |
+
# NOTE: inf remembered from Ce construction
|
791 |
+
for k, i in enumerate(g_ind):
|
792 |
+
g = pending_g[i][:2]
|
793 |
+
for l, j in enumerate(h_ind):
|
794 |
+
h = pending_h[j][:2]
|
795 |
+
if nx.is_directed(G1) or nx.is_directed(G2):
|
796 |
+
if any(
|
797 |
+
g == (p, u) and h == (q, v) or g == (u, p) and h == (v, q)
|
798 |
+
for p, q in matched_uv
|
799 |
+
):
|
800 |
+
continue
|
801 |
+
else:
|
802 |
+
if any(
|
803 |
+
g in ((p, u), (u, p)) and h in ((q, v), (v, q))
|
804 |
+
for p, q in matched_uv
|
805 |
+
):
|
806 |
+
continue
|
807 |
+
if g == (u, u) or any(g == (p, p) for p, q in matched_uv):
|
808 |
+
continue
|
809 |
+
if h == (v, v) or any(h == (q, q) for p, q in matched_uv):
|
810 |
+
continue
|
811 |
+
C[k, l] = inf
|
812 |
+
|
813 |
+
localCe = make_CostMatrix(C, m, n)
|
814 |
+
ij = [
|
815 |
+
(
|
816 |
+
g_ind[k] if k < m else M + h_ind[l],
|
817 |
+
h_ind[l] if l < n else N + g_ind[k],
|
818 |
+
)
|
819 |
+
for k, l in zip(localCe.lsa_row_ind, localCe.lsa_col_ind)
|
820 |
+
if k < m or l < n
|
821 |
+
]
|
822 |
+
|
823 |
+
else:
|
824 |
+
ij = []
|
825 |
+
localCe = CostMatrix(np.empty((0, 0)), [], [], 0)
|
826 |
+
|
827 |
+
return ij, localCe
|
828 |
+
|
829 |
+
def reduce_Ce(Ce, ij, m, n):
|
830 |
+
if len(ij):
|
831 |
+
i, j = zip(*ij)
|
832 |
+
m_i = m - sum(1 for t in i if t < m)
|
833 |
+
n_j = n - sum(1 for t in j if t < n)
|
834 |
+
return make_CostMatrix(reduce_C(Ce.C, i, j, m, n), m_i, n_j)
|
835 |
+
return Ce
|
836 |
+
|
837 |
+
def get_edit_ops(
|
838 |
+
matched_uv, pending_u, pending_v, Cv, pending_g, pending_h, Ce, matched_cost
|
839 |
+
):
|
840 |
+
"""
|
841 |
+
Parameters:
|
842 |
+
matched_uv: partial vertex edit path
|
843 |
+
list of tuples (u, v) of vertex mappings u<->v,
|
844 |
+
u=None or v=None for deletion/insertion
|
845 |
+
pending_u, pending_v: lists of vertices not yet mapped
|
846 |
+
Cv: CostMatrix of pending vertex mappings
|
847 |
+
pending_g, pending_h: lists of edges not yet mapped
|
848 |
+
Ce: CostMatrix of pending edge mappings
|
849 |
+
matched_cost: cost of partial edit path
|
850 |
+
|
851 |
+
Returns:
|
852 |
+
sequence of
|
853 |
+
(i, j): indices of vertex mapping u<->v
|
854 |
+
Cv_ij: reduced CostMatrix of pending vertex mappings
|
855 |
+
(basically Cv with row i, col j removed)
|
856 |
+
list of (x, y): indices of edge mappings g<->h
|
857 |
+
Ce_xy: reduced CostMatrix of pending edge mappings
|
858 |
+
(basically Ce with rows x, cols y removed)
|
859 |
+
cost: total cost of edit operation
|
860 |
+
NOTE: most promising ops first
|
861 |
+
"""
|
862 |
+
m = len(pending_u)
|
863 |
+
n = len(pending_v)
|
864 |
+
# assert Cv.C.shape == (m + n, m + n)
|
865 |
+
|
866 |
+
# 1) a vertex mapping from optimal linear sum assignment
|
867 |
+
i, j = min(
|
868 |
+
(k, l) for k, l in zip(Cv.lsa_row_ind, Cv.lsa_col_ind) if k < m or l < n
|
869 |
+
)
|
870 |
+
xy, localCe = match_edges(
|
871 |
+
pending_u[i] if i < m else None,
|
872 |
+
pending_v[j] if j < n else None,
|
873 |
+
pending_g,
|
874 |
+
pending_h,
|
875 |
+
Ce,
|
876 |
+
matched_uv,
|
877 |
+
)
|
878 |
+
Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h))
|
879 |
+
# assert Ce.ls <= localCe.ls + Ce_xy.ls
|
880 |
+
if prune(matched_cost + Cv.ls + localCe.ls + Ce_xy.ls):
|
881 |
+
pass
|
882 |
+
else:
|
883 |
+
# get reduced Cv efficiently
|
884 |
+
Cv_ij = CostMatrix(
|
885 |
+
reduce_C(Cv.C, (i,), (j,), m, n),
|
886 |
+
reduce_ind(Cv.lsa_row_ind, (i, m + j)),
|
887 |
+
reduce_ind(Cv.lsa_col_ind, (j, n + i)),
|
888 |
+
Cv.ls - Cv.C[i, j],
|
889 |
+
)
|
890 |
+
yield (i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls
|
891 |
+
|
892 |
+
# 2) other candidates, sorted by lower-bound cost estimate
|
893 |
+
other = []
|
894 |
+
fixed_i, fixed_j = i, j
|
895 |
+
if m <= n:
|
896 |
+
candidates = (
|
897 |
+
(t, fixed_j)
|
898 |
+
for t in range(m + n)
|
899 |
+
if t != fixed_i and (t < m or t == m + fixed_j)
|
900 |
+
)
|
901 |
+
else:
|
902 |
+
candidates = (
|
903 |
+
(fixed_i, t)
|
904 |
+
for t in range(m + n)
|
905 |
+
if t != fixed_j and (t < n or t == n + fixed_i)
|
906 |
+
)
|
907 |
+
for i, j in candidates:
|
908 |
+
if prune(matched_cost + Cv.C[i, j] + Ce.ls):
|
909 |
+
continue
|
910 |
+
Cv_ij = make_CostMatrix(
|
911 |
+
reduce_C(Cv.C, (i,), (j,), m, n),
|
912 |
+
m - 1 if i < m else m,
|
913 |
+
n - 1 if j < n else n,
|
914 |
+
)
|
915 |
+
# assert Cv.ls <= Cv.C[i, j] + Cv_ij.ls
|
916 |
+
if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + Ce.ls):
|
917 |
+
continue
|
918 |
+
xy, localCe = match_edges(
|
919 |
+
pending_u[i] if i < m else None,
|
920 |
+
pending_v[j] if j < n else None,
|
921 |
+
pending_g,
|
922 |
+
pending_h,
|
923 |
+
Ce,
|
924 |
+
matched_uv,
|
925 |
+
)
|
926 |
+
if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls):
|
927 |
+
continue
|
928 |
+
Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h))
|
929 |
+
# assert Ce.ls <= localCe.ls + Ce_xy.ls
|
930 |
+
if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls + Ce_xy.ls):
|
931 |
+
continue
|
932 |
+
other.append(((i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls))
|
933 |
+
|
934 |
+
yield from sorted(other, key=lambda t: t[4] + t[1].ls + t[3].ls)
|
935 |
+
|
936 |
+
def get_edit_paths(
|
937 |
+
matched_uv,
|
938 |
+
pending_u,
|
939 |
+
pending_v,
|
940 |
+
Cv,
|
941 |
+
matched_gh,
|
942 |
+
pending_g,
|
943 |
+
pending_h,
|
944 |
+
Ce,
|
945 |
+
matched_cost,
|
946 |
+
):
|
947 |
+
"""
|
948 |
+
Parameters:
|
949 |
+
matched_uv: partial vertex edit path
|
950 |
+
list of tuples (u, v) of vertex mappings u<->v,
|
951 |
+
u=None or v=None for deletion/insertion
|
952 |
+
pending_u, pending_v: lists of vertices not yet mapped
|
953 |
+
Cv: CostMatrix of pending vertex mappings
|
954 |
+
matched_gh: partial edge edit path
|
955 |
+
list of tuples (g, h) of edge mappings g<->h,
|
956 |
+
g=None or h=None for deletion/insertion
|
957 |
+
pending_g, pending_h: lists of edges not yet mapped
|
958 |
+
Ce: CostMatrix of pending edge mappings
|
959 |
+
matched_cost: cost of partial edit path
|
960 |
+
|
961 |
+
Returns:
|
962 |
+
sequence of (vertex_path, edge_path, cost)
|
963 |
+
vertex_path: complete vertex edit path
|
964 |
+
list of tuples (u, v) of vertex mappings u<->v,
|
965 |
+
u=None or v=None for deletion/insertion
|
966 |
+
edge_path: complete edge edit path
|
967 |
+
list of tuples (g, h) of edge mappings g<->h,
|
968 |
+
g=None or h=None for deletion/insertion
|
969 |
+
cost: total cost of edit path
|
970 |
+
NOTE: path costs are non-increasing
|
971 |
+
"""
|
972 |
+
# debug_print('matched-uv:', matched_uv)
|
973 |
+
# debug_print('matched-gh:', matched_gh)
|
974 |
+
# debug_print('matched-cost:', matched_cost)
|
975 |
+
# debug_print('pending-u:', pending_u)
|
976 |
+
# debug_print('pending-v:', pending_v)
|
977 |
+
# debug_print(Cv.C)
|
978 |
+
# assert list(sorted(G1.nodes)) == list(sorted(list(u for u, v in matched_uv if u is not None) + pending_u))
|
979 |
+
# assert list(sorted(G2.nodes)) == list(sorted(list(v for u, v in matched_uv if v is not None) + pending_v))
|
980 |
+
# debug_print('pending-g:', pending_g)
|
981 |
+
# debug_print('pending-h:', pending_h)
|
982 |
+
# debug_print(Ce.C)
|
983 |
+
# assert list(sorted(G1.edges)) == list(sorted(list(g for g, h in matched_gh if g is not None) + pending_g))
|
984 |
+
# assert list(sorted(G2.edges)) == list(sorted(list(h for g, h in matched_gh if h is not None) + pending_h))
|
985 |
+
# debug_print()
|
986 |
+
|
987 |
+
if prune(matched_cost + Cv.ls + Ce.ls):
|
988 |
+
return
|
989 |
+
|
990 |
+
if not max(len(pending_u), len(pending_v)):
|
991 |
+
# assert not len(pending_g)
|
992 |
+
# assert not len(pending_h)
|
993 |
+
# path completed!
|
994 |
+
# assert matched_cost <= maxcost_value
|
995 |
+
nonlocal maxcost_value
|
996 |
+
maxcost_value = min(maxcost_value, matched_cost)
|
997 |
+
yield matched_uv, matched_gh, matched_cost
|
998 |
+
|
999 |
+
else:
|
1000 |
+
edit_ops = get_edit_ops(
|
1001 |
+
matched_uv,
|
1002 |
+
pending_u,
|
1003 |
+
pending_v,
|
1004 |
+
Cv,
|
1005 |
+
pending_g,
|
1006 |
+
pending_h,
|
1007 |
+
Ce,
|
1008 |
+
matched_cost,
|
1009 |
+
)
|
1010 |
+
for ij, Cv_ij, xy, Ce_xy, edit_cost in edit_ops:
|
1011 |
+
i, j = ij
|
1012 |
+
# assert Cv.C[i, j] + sum(Ce.C[t] for t in xy) == edit_cost
|
1013 |
+
if prune(matched_cost + edit_cost + Cv_ij.ls + Ce_xy.ls):
|
1014 |
+
continue
|
1015 |
+
|
1016 |
+
# dive deeper
|
1017 |
+
u = pending_u.pop(i) if i < len(pending_u) else None
|
1018 |
+
v = pending_v.pop(j) if j < len(pending_v) else None
|
1019 |
+
matched_uv.append((u, v))
|
1020 |
+
for x, y in xy:
|
1021 |
+
len_g = len(pending_g)
|
1022 |
+
len_h = len(pending_h)
|
1023 |
+
matched_gh.append(
|
1024 |
+
(
|
1025 |
+
pending_g[x] if x < len_g else None,
|
1026 |
+
pending_h[y] if y < len_h else None,
|
1027 |
+
)
|
1028 |
+
)
|
1029 |
+
sortedx = sorted(x for x, y in xy)
|
1030 |
+
sortedy = sorted(y for x, y in xy)
|
1031 |
+
G = [
|
1032 |
+
(pending_g.pop(x) if x < len(pending_g) else None)
|
1033 |
+
for x in reversed(sortedx)
|
1034 |
+
]
|
1035 |
+
H = [
|
1036 |
+
(pending_h.pop(y) if y < len(pending_h) else None)
|
1037 |
+
for y in reversed(sortedy)
|
1038 |
+
]
|
1039 |
+
|
1040 |
+
yield from get_edit_paths(
|
1041 |
+
matched_uv,
|
1042 |
+
pending_u,
|
1043 |
+
pending_v,
|
1044 |
+
Cv_ij,
|
1045 |
+
matched_gh,
|
1046 |
+
pending_g,
|
1047 |
+
pending_h,
|
1048 |
+
Ce_xy,
|
1049 |
+
matched_cost + edit_cost,
|
1050 |
+
)
|
1051 |
+
|
1052 |
+
# backtrack
|
1053 |
+
if u is not None:
|
1054 |
+
pending_u.insert(i, u)
|
1055 |
+
if v is not None:
|
1056 |
+
pending_v.insert(j, v)
|
1057 |
+
matched_uv.pop()
|
1058 |
+
for x, g in zip(sortedx, reversed(G)):
|
1059 |
+
if g is not None:
|
1060 |
+
pending_g.insert(x, g)
|
1061 |
+
for y, h in zip(sortedy, reversed(H)):
|
1062 |
+
if h is not None:
|
1063 |
+
pending_h.insert(y, h)
|
1064 |
+
for _ in xy:
|
1065 |
+
matched_gh.pop()
|
1066 |
+
|
1067 |
+
# Initialization
|
1068 |
+
|
1069 |
+
pending_u = list(G1.nodes)
|
1070 |
+
pending_v = list(G2.nodes)
|
1071 |
+
|
1072 |
+
initial_cost = 0
|
1073 |
+
if roots:
|
1074 |
+
root_u, root_v = roots
|
1075 |
+
if root_u not in pending_u or root_v not in pending_v:
|
1076 |
+
raise nx.NodeNotFound("Root node not in graph.")
|
1077 |
+
|
1078 |
+
# remove roots from pending
|
1079 |
+
pending_u.remove(root_u)
|
1080 |
+
pending_v.remove(root_v)
|
1081 |
+
|
1082 |
+
# cost matrix of vertex mappings
|
1083 |
+
m = len(pending_u)
|
1084 |
+
n = len(pending_v)
|
1085 |
+
C = np.zeros((m + n, m + n))
|
1086 |
+
if node_subst_cost:
|
1087 |
+
C[0:m, 0:n] = np.array(
|
1088 |
+
[
|
1089 |
+
node_subst_cost(G1.nodes[u], G2.nodes[v])
|
1090 |
+
for u in pending_u
|
1091 |
+
for v in pending_v
|
1092 |
+
]
|
1093 |
+
).reshape(m, n)
|
1094 |
+
if roots:
|
1095 |
+
initial_cost = node_subst_cost(G1.nodes[root_u], G2.nodes[root_v])
|
1096 |
+
elif node_match:
|
1097 |
+
C[0:m, 0:n] = np.array(
|
1098 |
+
[
|
1099 |
+
1 - int(node_match(G1.nodes[u], G2.nodes[v]))
|
1100 |
+
for u in pending_u
|
1101 |
+
for v in pending_v
|
1102 |
+
]
|
1103 |
+
).reshape(m, n)
|
1104 |
+
if roots:
|
1105 |
+
initial_cost = 1 - node_match(G1.nodes[root_u], G2.nodes[root_v])
|
1106 |
+
else:
|
1107 |
+
# all zeroes
|
1108 |
+
pass
|
1109 |
+
# assert not min(m, n) or C[0:m, 0:n].min() >= 0
|
1110 |
+
if node_del_cost:
|
1111 |
+
del_costs = [node_del_cost(G1.nodes[u]) for u in pending_u]
|
1112 |
+
else:
|
1113 |
+
del_costs = [1] * len(pending_u)
|
1114 |
+
# assert not m or min(del_costs) >= 0
|
1115 |
+
if node_ins_cost:
|
1116 |
+
ins_costs = [node_ins_cost(G2.nodes[v]) for v in pending_v]
|
1117 |
+
else:
|
1118 |
+
ins_costs = [1] * len(pending_v)
|
1119 |
+
# assert not n or min(ins_costs) >= 0
|
1120 |
+
inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1
|
1121 |
+
C[0:m, n : n + m] = np.array(
|
1122 |
+
[del_costs[i] if i == j else inf for i in range(m) for j in range(m)]
|
1123 |
+
).reshape(m, m)
|
1124 |
+
C[m : m + n, 0:n] = np.array(
|
1125 |
+
[ins_costs[i] if i == j else inf for i in range(n) for j in range(n)]
|
1126 |
+
).reshape(n, n)
|
1127 |
+
Cv = make_CostMatrix(C, m, n)
|
1128 |
+
# debug_print(f"Cv: {m} x {n}")
|
1129 |
+
# debug_print(Cv.C)
|
1130 |
+
|
1131 |
+
pending_g = list(G1.edges)
|
1132 |
+
pending_h = list(G2.edges)
|
1133 |
+
|
1134 |
+
# cost matrix of edge mappings
|
1135 |
+
m = len(pending_g)
|
1136 |
+
n = len(pending_h)
|
1137 |
+
C = np.zeros((m + n, m + n))
|
1138 |
+
if edge_subst_cost:
|
1139 |
+
C[0:m, 0:n] = np.array(
|
1140 |
+
[
|
1141 |
+
edge_subst_cost(G1.edges[g], G2.edges[h])
|
1142 |
+
for g in pending_g
|
1143 |
+
for h in pending_h
|
1144 |
+
]
|
1145 |
+
).reshape(m, n)
|
1146 |
+
elif edge_match:
|
1147 |
+
C[0:m, 0:n] = np.array(
|
1148 |
+
[
|
1149 |
+
1 - int(edge_match(G1.edges[g], G2.edges[h]))
|
1150 |
+
for g in pending_g
|
1151 |
+
for h in pending_h
|
1152 |
+
]
|
1153 |
+
).reshape(m, n)
|
1154 |
+
else:
|
1155 |
+
# all zeroes
|
1156 |
+
pass
|
1157 |
+
# assert not min(m, n) or C[0:m, 0:n].min() >= 0
|
1158 |
+
if edge_del_cost:
|
1159 |
+
del_costs = [edge_del_cost(G1.edges[g]) for g in pending_g]
|
1160 |
+
else:
|
1161 |
+
del_costs = [1] * len(pending_g)
|
1162 |
+
# assert not m or min(del_costs) >= 0
|
1163 |
+
if edge_ins_cost:
|
1164 |
+
ins_costs = [edge_ins_cost(G2.edges[h]) for h in pending_h]
|
1165 |
+
else:
|
1166 |
+
ins_costs = [1] * len(pending_h)
|
1167 |
+
# assert not n or min(ins_costs) >= 0
|
1168 |
+
inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1
|
1169 |
+
C[0:m, n : n + m] = np.array(
|
1170 |
+
[del_costs[i] if i == j else inf for i in range(m) for j in range(m)]
|
1171 |
+
).reshape(m, m)
|
1172 |
+
C[m : m + n, 0:n] = np.array(
|
1173 |
+
[ins_costs[i] if i == j else inf for i in range(n) for j in range(n)]
|
1174 |
+
).reshape(n, n)
|
1175 |
+
Ce = make_CostMatrix(C, m, n)
|
1176 |
+
# debug_print(f'Ce: {m} x {n}')
|
1177 |
+
# debug_print(Ce.C)
|
1178 |
+
# debug_print()
|
1179 |
+
|
1180 |
+
maxcost_value = Cv.C.sum() + Ce.C.sum() + 1
|
1181 |
+
|
1182 |
+
if timeout is not None:
|
1183 |
+
if timeout <= 0:
|
1184 |
+
raise nx.NetworkXError("Timeout value must be greater than 0")
|
1185 |
+
start = time.perf_counter()
|
1186 |
+
|
1187 |
+
def prune(cost):
|
1188 |
+
if timeout is not None:
|
1189 |
+
if time.perf_counter() - start > timeout:
|
1190 |
+
return True
|
1191 |
+
if upper_bound is not None:
|
1192 |
+
if cost > upper_bound:
|
1193 |
+
return True
|
1194 |
+
if cost > maxcost_value:
|
1195 |
+
return True
|
1196 |
+
if strictly_decreasing and cost >= maxcost_value:
|
1197 |
+
return True
|
1198 |
+
return False
|
1199 |
+
|
1200 |
+
# Now go!
|
1201 |
+
|
1202 |
+
done_uv = [] if roots is None else [roots]
|
1203 |
+
|
1204 |
+
for vertex_path, edge_path, cost in get_edit_paths(
|
1205 |
+
done_uv, pending_u, pending_v, Cv, [], pending_g, pending_h, Ce, initial_cost
|
1206 |
+
):
|
1207 |
+
# assert sorted(G1.nodes) == sorted(u for u, v in vertex_path if u is not None)
|
1208 |
+
# assert sorted(G2.nodes) == sorted(v for u, v in vertex_path if v is not None)
|
1209 |
+
# assert sorted(G1.edges) == sorted(g for g, h in edge_path if g is not None)
|
1210 |
+
# assert sorted(G2.edges) == sorted(h for g, h in edge_path if h is not None)
|
1211 |
+
# print(vertex_path, edge_path, cost, file = sys.stderr)
|
1212 |
+
# assert cost == maxcost_value
|
1213 |
+
yield list(vertex_path), list(edge_path), float(cost)
|
1214 |
+
|
1215 |
+
|
1216 |
+
@nx._dispatchable
|
1217 |
+
def simrank_similarity(
|
1218 |
+
G,
|
1219 |
+
source=None,
|
1220 |
+
target=None,
|
1221 |
+
importance_factor=0.9,
|
1222 |
+
max_iterations=1000,
|
1223 |
+
tolerance=1e-4,
|
1224 |
+
):
|
1225 |
+
"""Returns the SimRank similarity of nodes in the graph ``G``.
|
1226 |
+
|
1227 |
+
SimRank is a similarity metric that says "two objects are considered
|
1228 |
+
to be similar if they are referenced by similar objects." [1]_.
|
1229 |
+
|
1230 |
+
The pseudo-code definition from the paper is::
|
1231 |
+
|
1232 |
+
def simrank(G, u, v):
|
1233 |
+
in_neighbors_u = G.predecessors(u)
|
1234 |
+
in_neighbors_v = G.predecessors(v)
|
1235 |
+
scale = C / (len(in_neighbors_u) * len(in_neighbors_v))
|
1236 |
+
return scale * sum(
|
1237 |
+
simrank(G, w, x) for w, x in product(in_neighbors_u, in_neighbors_v)
|
1238 |
+
)
|
1239 |
+
|
1240 |
+
where ``G`` is the graph, ``u`` is the source, ``v`` is the target,
|
1241 |
+
and ``C`` is a float decay or importance factor between 0 and 1.
|
1242 |
+
|
1243 |
+
The SimRank algorithm for determining node similarity is defined in
|
1244 |
+
[2]_.
|
1245 |
+
|
1246 |
+
Parameters
|
1247 |
+
----------
|
1248 |
+
G : NetworkX graph
|
1249 |
+
A NetworkX graph
|
1250 |
+
|
1251 |
+
source : node
|
1252 |
+
If this is specified, the returned dictionary maps each node
|
1253 |
+
``v`` in the graph to the similarity between ``source`` and
|
1254 |
+
``v``.
|
1255 |
+
|
1256 |
+
target : node
|
1257 |
+
If both ``source`` and ``target`` are specified, the similarity
|
1258 |
+
value between ``source`` and ``target`` is returned. If
|
1259 |
+
``target`` is specified but ``source`` is not, this argument is
|
1260 |
+
ignored.
|
1261 |
+
|
1262 |
+
importance_factor : float
|
1263 |
+
The relative importance of indirect neighbors with respect to
|
1264 |
+
direct neighbors.
|
1265 |
+
|
1266 |
+
max_iterations : integer
|
1267 |
+
Maximum number of iterations.
|
1268 |
+
|
1269 |
+
tolerance : float
|
1270 |
+
Error tolerance used to check convergence. When an iteration of
|
1271 |
+
the algorithm finds that no similarity value changes more than
|
1272 |
+
this amount, the algorithm halts.
|
1273 |
+
|
1274 |
+
Returns
|
1275 |
+
-------
|
1276 |
+
similarity : dictionary or float
|
1277 |
+
If ``source`` and ``target`` are both ``None``, this returns a
|
1278 |
+
dictionary of dictionaries, where keys are node pairs and value
|
1279 |
+
are similarity of the pair of nodes.
|
1280 |
+
|
1281 |
+
If ``source`` is not ``None`` but ``target`` is, this returns a
|
1282 |
+
dictionary mapping node to the similarity of ``source`` and that
|
1283 |
+
node.
|
1284 |
+
|
1285 |
+
If neither ``source`` nor ``target`` is ``None``, this returns
|
1286 |
+
the similarity value for the given pair of nodes.
|
1287 |
+
|
1288 |
+
Raises
|
1289 |
+
------
|
1290 |
+
ExceededMaxIterations
|
1291 |
+
If the algorithm does not converge within ``max_iterations``.
|
1292 |
+
|
1293 |
+
NodeNotFound
|
1294 |
+
If either ``source`` or ``target`` is not in `G`.
|
1295 |
+
|
1296 |
+
Examples
|
1297 |
+
--------
|
1298 |
+
>>> G = nx.cycle_graph(2)
|
1299 |
+
>>> nx.simrank_similarity(G)
|
1300 |
+
{0: {0: 1.0, 1: 0.0}, 1: {0: 0.0, 1: 1.0}}
|
1301 |
+
>>> nx.simrank_similarity(G, source=0)
|
1302 |
+
{0: 1.0, 1: 0.0}
|
1303 |
+
>>> nx.simrank_similarity(G, source=0, target=0)
|
1304 |
+
1.0
|
1305 |
+
|
1306 |
+
The result of this function can be converted to a numpy array
|
1307 |
+
representing the SimRank matrix by using the node order of the
|
1308 |
+
graph to determine which row and column represent each node.
|
1309 |
+
Other ordering of nodes is also possible.
|
1310 |
+
|
1311 |
+
>>> import numpy as np
|
1312 |
+
>>> sim = nx.simrank_similarity(G)
|
1313 |
+
>>> np.array([[sim[u][v] for v in G] for u in G])
|
1314 |
+
array([[1., 0.],
|
1315 |
+
[0., 1.]])
|
1316 |
+
>>> sim_1d = nx.simrank_similarity(G, source=0)
|
1317 |
+
>>> np.array([sim[0][v] for v in G])
|
1318 |
+
array([1., 0.])
|
1319 |
+
|
1320 |
+
References
|
1321 |
+
----------
|
1322 |
+
.. [1] https://en.wikipedia.org/wiki/SimRank
|
1323 |
+
.. [2] G. Jeh and J. Widom.
|
1324 |
+
"SimRank: a measure of structural-context similarity",
|
1325 |
+
In KDD'02: Proceedings of the Eighth ACM SIGKDD
|
1326 |
+
International Conference on Knowledge Discovery and Data Mining,
|
1327 |
+
pp. 538--543. ACM Press, 2002.
|
1328 |
+
"""
|
1329 |
+
import numpy as np
|
1330 |
+
|
1331 |
+
nodelist = list(G)
|
1332 |
+
if source is not None:
|
1333 |
+
if source not in nodelist:
|
1334 |
+
raise nx.NodeNotFound(f"Source node {source} not in G")
|
1335 |
+
else:
|
1336 |
+
s_indx = nodelist.index(source)
|
1337 |
+
else:
|
1338 |
+
s_indx = None
|
1339 |
+
|
1340 |
+
if target is not None:
|
1341 |
+
if target not in nodelist:
|
1342 |
+
raise nx.NodeNotFound(f"Target node {target} not in G")
|
1343 |
+
else:
|
1344 |
+
t_indx = nodelist.index(target)
|
1345 |
+
else:
|
1346 |
+
t_indx = None
|
1347 |
+
|
1348 |
+
x = _simrank_similarity_numpy(
|
1349 |
+
G, s_indx, t_indx, importance_factor, max_iterations, tolerance
|
1350 |
+
)
|
1351 |
+
|
1352 |
+
if isinstance(x, np.ndarray):
|
1353 |
+
if x.ndim == 1:
|
1354 |
+
return dict(zip(G, x.tolist()))
|
1355 |
+
# else x.ndim == 2
|
1356 |
+
return {u: dict(zip(G, row)) for u, row in zip(G, x.tolist())}
|
1357 |
+
return float(x)
|
1358 |
+
|
1359 |
+
|
1360 |
+
def _simrank_similarity_python(
|
1361 |
+
G,
|
1362 |
+
source=None,
|
1363 |
+
target=None,
|
1364 |
+
importance_factor=0.9,
|
1365 |
+
max_iterations=1000,
|
1366 |
+
tolerance=1e-4,
|
1367 |
+
):
|
1368 |
+
"""Returns the SimRank similarity of nodes in the graph ``G``.
|
1369 |
+
|
1370 |
+
This pure Python version is provided for pedagogical purposes.
|
1371 |
+
|
1372 |
+
Examples
|
1373 |
+
--------
|
1374 |
+
>>> G = nx.cycle_graph(2)
|
1375 |
+
>>> nx.similarity._simrank_similarity_python(G)
|
1376 |
+
{0: {0: 1, 1: 0.0}, 1: {0: 0.0, 1: 1}}
|
1377 |
+
>>> nx.similarity._simrank_similarity_python(G, source=0)
|
1378 |
+
{0: 1, 1: 0.0}
|
1379 |
+
>>> nx.similarity._simrank_similarity_python(G, source=0, target=0)
|
1380 |
+
1
|
1381 |
+
"""
|
1382 |
+
# build up our similarity adjacency dictionary output
|
1383 |
+
newsim = {u: {v: 1 if u == v else 0 for v in G} for u in G}
|
1384 |
+
|
1385 |
+
# These functions compute the update to the similarity value of the nodes
|
1386 |
+
# `u` and `v` with respect to the previous similarity values.
|
1387 |
+
def avg_sim(s):
|
1388 |
+
return sum(newsim[w][x] for (w, x) in s) / len(s) if s else 0.0
|
1389 |
+
|
1390 |
+
Gadj = G.pred if G.is_directed() else G.adj
|
1391 |
+
|
1392 |
+
def sim(u, v):
|
1393 |
+
return importance_factor * avg_sim(list(product(Gadj[u], Gadj[v])))
|
1394 |
+
|
1395 |
+
for its in range(max_iterations):
|
1396 |
+
oldsim = newsim
|
1397 |
+
newsim = {u: {v: sim(u, v) if u != v else 1 for v in G} for u in G}
|
1398 |
+
is_close = all(
|
1399 |
+
all(
|
1400 |
+
abs(newsim[u][v] - old) <= tolerance * (1 + abs(old))
|
1401 |
+
for v, old in nbrs.items()
|
1402 |
+
)
|
1403 |
+
for u, nbrs in oldsim.items()
|
1404 |
+
)
|
1405 |
+
if is_close:
|
1406 |
+
break
|
1407 |
+
|
1408 |
+
if its + 1 == max_iterations:
|
1409 |
+
raise nx.ExceededMaxIterations(
|
1410 |
+
f"simrank did not converge after {max_iterations} iterations."
|
1411 |
+
)
|
1412 |
+
|
1413 |
+
if source is not None and target is not None:
|
1414 |
+
return newsim[source][target]
|
1415 |
+
if source is not None:
|
1416 |
+
return newsim[source]
|
1417 |
+
return newsim
|
1418 |
+
|
1419 |
+
|
1420 |
+
def _simrank_similarity_numpy(
|
1421 |
+
G,
|
1422 |
+
source=None,
|
1423 |
+
target=None,
|
1424 |
+
importance_factor=0.9,
|
1425 |
+
max_iterations=1000,
|
1426 |
+
tolerance=1e-4,
|
1427 |
+
):
|
1428 |
+
"""Calculate SimRank of nodes in ``G`` using matrices with ``numpy``.
|
1429 |
+
|
1430 |
+
The SimRank algorithm for determining node similarity is defined in
|
1431 |
+
[1]_.
|
1432 |
+
|
1433 |
+
Parameters
|
1434 |
+
----------
|
1435 |
+
G : NetworkX graph
|
1436 |
+
A NetworkX graph
|
1437 |
+
|
1438 |
+
source : node
|
1439 |
+
If this is specified, the returned dictionary maps each node
|
1440 |
+
``v`` in the graph to the similarity between ``source`` and
|
1441 |
+
``v``.
|
1442 |
+
|
1443 |
+
target : node
|
1444 |
+
If both ``source`` and ``target`` are specified, the similarity
|
1445 |
+
value between ``source`` and ``target`` is returned. If
|
1446 |
+
``target`` is specified but ``source`` is not, this argument is
|
1447 |
+
ignored.
|
1448 |
+
|
1449 |
+
importance_factor : float
|
1450 |
+
The relative importance of indirect neighbors with respect to
|
1451 |
+
direct neighbors.
|
1452 |
+
|
1453 |
+
max_iterations : integer
|
1454 |
+
Maximum number of iterations.
|
1455 |
+
|
1456 |
+
tolerance : float
|
1457 |
+
Error tolerance used to check convergence. When an iteration of
|
1458 |
+
the algorithm finds that no similarity value changes more than
|
1459 |
+
this amount, the algorithm halts.
|
1460 |
+
|
1461 |
+
Returns
|
1462 |
+
-------
|
1463 |
+
similarity : numpy array or float
|
1464 |
+
If ``source`` and ``target`` are both ``None``, this returns a
|
1465 |
+
2D array containing SimRank scores of the nodes.
|
1466 |
+
|
1467 |
+
If ``source`` is not ``None`` but ``target`` is, this returns an
|
1468 |
+
1D array containing SimRank scores of ``source`` and that
|
1469 |
+
node.
|
1470 |
+
|
1471 |
+
If neither ``source`` nor ``target`` is ``None``, this returns
|
1472 |
+
the similarity value for the given pair of nodes.
|
1473 |
+
|
1474 |
+
Examples
|
1475 |
+
--------
|
1476 |
+
>>> G = nx.cycle_graph(2)
|
1477 |
+
>>> nx.similarity._simrank_similarity_numpy(G)
|
1478 |
+
array([[1., 0.],
|
1479 |
+
[0., 1.]])
|
1480 |
+
>>> nx.similarity._simrank_similarity_numpy(G, source=0)
|
1481 |
+
array([1., 0.])
|
1482 |
+
>>> nx.similarity._simrank_similarity_numpy(G, source=0, target=0)
|
1483 |
+
1.0
|
1484 |
+
|
1485 |
+
References
|
1486 |
+
----------
|
1487 |
+
.. [1] G. Jeh and J. Widom.
|
1488 |
+
"SimRank: a measure of structural-context similarity",
|
1489 |
+
In KDD'02: Proceedings of the Eighth ACM SIGKDD
|
1490 |
+
International Conference on Knowledge Discovery and Data Mining,
|
1491 |
+
pp. 538--543. ACM Press, 2002.
|
1492 |
+
"""
|
1493 |
+
# This algorithm follows roughly
|
1494 |
+
#
|
1495 |
+
# S = max{C * (A.T * S * A), I}
|
1496 |
+
#
|
1497 |
+
# where C is the importance factor, A is the column normalized
|
1498 |
+
# adjacency matrix, and I is the identity matrix.
|
1499 |
+
import numpy as np
|
1500 |
+
|
1501 |
+
adjacency_matrix = nx.to_numpy_array(G)
|
1502 |
+
|
1503 |
+
# column-normalize the ``adjacency_matrix``
|
1504 |
+
s = np.array(adjacency_matrix.sum(axis=0))
|
1505 |
+
s[s == 0] = 1
|
1506 |
+
adjacency_matrix /= s # adjacency_matrix.sum(axis=0)
|
1507 |
+
|
1508 |
+
newsim = np.eye(len(G), dtype=np.float64)
|
1509 |
+
for its in range(max_iterations):
|
1510 |
+
prevsim = newsim.copy()
|
1511 |
+
newsim = importance_factor * ((adjacency_matrix.T @ prevsim) @ adjacency_matrix)
|
1512 |
+
np.fill_diagonal(newsim, 1.0)
|
1513 |
+
|
1514 |
+
if np.allclose(prevsim, newsim, atol=tolerance):
|
1515 |
+
break
|
1516 |
+
|
1517 |
+
if its + 1 == max_iterations:
|
1518 |
+
raise nx.ExceededMaxIterations(
|
1519 |
+
f"simrank did not converge after {max_iterations} iterations."
|
1520 |
+
)
|
1521 |
+
|
1522 |
+
if source is not None and target is not None:
|
1523 |
+
return float(newsim[source, target])
|
1524 |
+
if source is not None:
|
1525 |
+
return newsim[source]
|
1526 |
+
return newsim
|
1527 |
+
|
1528 |
+
|
1529 |
+
@nx._dispatchable(edge_attrs="weight")
|
1530 |
+
def panther_similarity(
|
1531 |
+
G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None, weight="weight"
|
1532 |
+
):
|
1533 |
+
r"""Returns the Panther similarity of nodes in the graph `G` to node ``v``.
|
1534 |
+
|
1535 |
+
Panther is a similarity metric that says "two objects are considered
|
1536 |
+
to be similar if they frequently appear on the same paths." [1]_.
|
1537 |
+
|
1538 |
+
Parameters
|
1539 |
+
----------
|
1540 |
+
G : NetworkX graph
|
1541 |
+
A NetworkX graph
|
1542 |
+
source : node
|
1543 |
+
Source node for which to find the top `k` similar other nodes
|
1544 |
+
k : int (default = 5)
|
1545 |
+
The number of most similar nodes to return.
|
1546 |
+
path_length : int (default = 5)
|
1547 |
+
How long the randomly generated paths should be (``T`` in [1]_)
|
1548 |
+
c : float (default = 0.5)
|
1549 |
+
A universal positive constant used to scale the number
|
1550 |
+
of sample random paths to generate.
|
1551 |
+
delta : float (default = 0.1)
|
1552 |
+
The probability that the similarity $S$ is not an epsilon-approximation to (R, phi),
|
1553 |
+
where $R$ is the number of random paths and $\phi$ is the probability
|
1554 |
+
that an element sampled from a set $A \subseteq D$, where $D$ is the domain.
|
1555 |
+
eps : float or None (default = None)
|
1556 |
+
The error bound. Per [1]_, a good value is ``sqrt(1/|E|)``. Therefore,
|
1557 |
+
if no value is provided, the recommended computed value will be used.
|
1558 |
+
weight : string or None, optional (default="weight")
|
1559 |
+
The name of an edge attribute that holds the numerical value
|
1560 |
+
used as a weight. If None then each edge has weight 1.
|
1561 |
+
|
1562 |
+
Returns
|
1563 |
+
-------
|
1564 |
+
similarity : dictionary
|
1565 |
+
Dictionary of nodes to similarity scores (as floats). Note:
|
1566 |
+
the self-similarity (i.e., ``v``) will not be included in
|
1567 |
+
the returned dictionary. So, for ``k = 5``, a dictionary of
|
1568 |
+
top 4 nodes and their similarity scores will be returned.
|
1569 |
+
|
1570 |
+
Raises
|
1571 |
+
------
|
1572 |
+
NetworkXUnfeasible
|
1573 |
+
If `source` is an isolated node.
|
1574 |
+
|
1575 |
+
NodeNotFound
|
1576 |
+
If `source` is not in `G`.
|
1577 |
+
|
1578 |
+
Notes
|
1579 |
+
-----
|
1580 |
+
The isolated nodes in `G` are ignored.
|
1581 |
+
|
1582 |
+
Examples
|
1583 |
+
--------
|
1584 |
+
>>> G = nx.star_graph(10)
|
1585 |
+
>>> sim = nx.panther_similarity(G, 0)
|
1586 |
+
|
1587 |
+
References
|
1588 |
+
----------
|
1589 |
+
.. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J.
|
1590 |
+
Panther: Fast top-k similarity search on large networks.
|
1591 |
+
In Proceedings of the ACM SIGKDD International Conference
|
1592 |
+
on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454).
|
1593 |
+
Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267.
|
1594 |
+
"""
|
1595 |
+
import numpy as np
|
1596 |
+
|
1597 |
+
if source not in G:
|
1598 |
+
raise nx.NodeNotFound(f"Source node {source} not in G")
|
1599 |
+
|
1600 |
+
isolates = set(nx.isolates(G))
|
1601 |
+
|
1602 |
+
if source in isolates:
|
1603 |
+
raise nx.NetworkXUnfeasible(
|
1604 |
+
f"Panther similarity is not defined for the isolated source node {source}."
|
1605 |
+
)
|
1606 |
+
|
1607 |
+
G = G.subgraph([node for node in G.nodes if node not in isolates]).copy()
|
1608 |
+
|
1609 |
+
num_nodes = G.number_of_nodes()
|
1610 |
+
if num_nodes < k:
|
1611 |
+
warnings.warn(
|
1612 |
+
f"Number of nodes is {num_nodes}, but requested k is {k}. "
|
1613 |
+
"Setting k to number of nodes."
|
1614 |
+
)
|
1615 |
+
k = num_nodes
|
1616 |
+
# According to [1], they empirically determined
|
1617 |
+
# a good value for ``eps`` to be sqrt( 1 / |E| )
|
1618 |
+
if eps is None:
|
1619 |
+
eps = np.sqrt(1.0 / G.number_of_edges())
|
1620 |
+
|
1621 |
+
inv_node_map = {name: index for index, name in enumerate(G.nodes)}
|
1622 |
+
node_map = np.array(G)
|
1623 |
+
|
1624 |
+
# Calculate the sample size ``R`` for how many paths
|
1625 |
+
# to randomly generate
|
1626 |
+
t_choose_2 = math.comb(path_length, 2)
|
1627 |
+
sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta)))
|
1628 |
+
index_map = {}
|
1629 |
+
_ = list(
|
1630 |
+
generate_random_paths(
|
1631 |
+
G, sample_size, path_length=path_length, index_map=index_map, weight=weight
|
1632 |
+
)
|
1633 |
+
)
|
1634 |
+
S = np.zeros(num_nodes)
|
1635 |
+
|
1636 |
+
inv_sample_size = 1 / sample_size
|
1637 |
+
|
1638 |
+
source_paths = set(index_map[source])
|
1639 |
+
|
1640 |
+
# Calculate the path similarities
|
1641 |
+
# between ``source`` (v) and ``node`` (v_j)
|
1642 |
+
# using our inverted index mapping of
|
1643 |
+
# vertices to paths
|
1644 |
+
for node, paths in index_map.items():
|
1645 |
+
# Only consider paths where both
|
1646 |
+
# ``node`` and ``source`` are present
|
1647 |
+
common_paths = source_paths.intersection(paths)
|
1648 |
+
S[inv_node_map[node]] = len(common_paths) * inv_sample_size
|
1649 |
+
|
1650 |
+
# Retrieve top ``k`` similar
|
1651 |
+
# Note: the below performed anywhere from 4-10x faster
|
1652 |
+
# (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]``
|
1653 |
+
top_k_unsorted = np.argpartition(S, -k)[-k:]
|
1654 |
+
top_k_sorted = top_k_unsorted[np.argsort(S[top_k_unsorted])][::-1]
|
1655 |
+
|
1656 |
+
# Add back the similarity scores
|
1657 |
+
top_k_with_val = dict(
|
1658 |
+
zip(node_map[top_k_sorted].tolist(), S[top_k_sorted].tolist())
|
1659 |
+
)
|
1660 |
+
|
1661 |
+
# Remove the self-similarity
|
1662 |
+
top_k_with_val.pop(source, None)
|
1663 |
+
return top_k_with_val
|
1664 |
+
|
1665 |
+
|
1666 |
+
@np_random_state(5)
|
1667 |
+
@nx._dispatchable(edge_attrs="weight")
|
1668 |
+
def generate_random_paths(
|
1669 |
+
G, sample_size, path_length=5, index_map=None, weight="weight", seed=None
|
1670 |
+
):
|
1671 |
+
"""Randomly generate `sample_size` paths of length `path_length`.
|
1672 |
+
|
1673 |
+
Parameters
|
1674 |
+
----------
|
1675 |
+
G : NetworkX graph
|
1676 |
+
A NetworkX graph
|
1677 |
+
sample_size : integer
|
1678 |
+
The number of paths to generate. This is ``R`` in [1]_.
|
1679 |
+
path_length : integer (default = 5)
|
1680 |
+
The maximum size of the path to randomly generate.
|
1681 |
+
This is ``T`` in [1]_. According to the paper, ``T >= 5`` is
|
1682 |
+
recommended.
|
1683 |
+
index_map : dictionary, optional
|
1684 |
+
If provided, this will be populated with the inverted
|
1685 |
+
index of nodes mapped to the set of generated random path
|
1686 |
+
indices within ``paths``.
|
1687 |
+
weight : string or None, optional (default="weight")
|
1688 |
+
The name of an edge attribute that holds the numerical value
|
1689 |
+
used as a weight. If None then each edge has weight 1.
|
1690 |
+
seed : integer, random_state, or None (default)
|
1691 |
+
Indicator of random number generation state.
|
1692 |
+
See :ref:`Randomness<randomness>`.
|
1693 |
+
|
1694 |
+
Returns
|
1695 |
+
-------
|
1696 |
+
paths : generator of lists
|
1697 |
+
Generator of `sample_size` paths each with length `path_length`.
|
1698 |
+
|
1699 |
+
Examples
|
1700 |
+
--------
|
1701 |
+
Note that the return value is the list of paths:
|
1702 |
+
|
1703 |
+
>>> G = nx.star_graph(3)
|
1704 |
+
>>> random_path = nx.generate_random_paths(G, 2)
|
1705 |
+
|
1706 |
+
By passing a dictionary into `index_map`, it will build an
|
1707 |
+
inverted index mapping of nodes to the paths in which that node is present:
|
1708 |
+
|
1709 |
+
>>> G = nx.star_graph(3)
|
1710 |
+
>>> index_map = {}
|
1711 |
+
>>> random_path = nx.generate_random_paths(G, 3, index_map=index_map)
|
1712 |
+
>>> paths_containing_node_0 = [
|
1713 |
+
... random_path[path_idx] for path_idx in index_map.get(0, [])
|
1714 |
+
... ]
|
1715 |
+
|
1716 |
+
References
|
1717 |
+
----------
|
1718 |
+
.. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J.
|
1719 |
+
Panther: Fast top-k similarity search on large networks.
|
1720 |
+
In Proceedings of the ACM SIGKDD International Conference
|
1721 |
+
on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454).
|
1722 |
+
Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267.
|
1723 |
+
"""
|
1724 |
+
import numpy as np
|
1725 |
+
|
1726 |
+
randint_fn = (
|
1727 |
+
seed.integers if isinstance(seed, np.random.Generator) else seed.randint
|
1728 |
+
)
|
1729 |
+
|
1730 |
+
# Calculate transition probabilities between
|
1731 |
+
# every pair of vertices according to Eq. (3)
|
1732 |
+
adj_mat = nx.to_numpy_array(G, weight=weight)
|
1733 |
+
inv_row_sums = np.reciprocal(adj_mat.sum(axis=1)).reshape(-1, 1)
|
1734 |
+
transition_probabilities = adj_mat * inv_row_sums
|
1735 |
+
|
1736 |
+
node_map = list(G)
|
1737 |
+
num_nodes = G.number_of_nodes()
|
1738 |
+
|
1739 |
+
for path_index in range(sample_size):
|
1740 |
+
# Sample current vertex v = v_i uniformly at random
|
1741 |
+
node_index = randint_fn(num_nodes)
|
1742 |
+
node = node_map[node_index]
|
1743 |
+
|
1744 |
+
# Add v into p_r and add p_r into the path set
|
1745 |
+
# of v, i.e., P_v
|
1746 |
+
path = [node]
|
1747 |
+
|
1748 |
+
# Build the inverted index (P_v) of vertices to paths
|
1749 |
+
if index_map is not None:
|
1750 |
+
if node in index_map:
|
1751 |
+
index_map[node].add(path_index)
|
1752 |
+
else:
|
1753 |
+
index_map[node] = {path_index}
|
1754 |
+
|
1755 |
+
starting_index = node_index
|
1756 |
+
for _ in range(path_length):
|
1757 |
+
# Randomly sample a neighbor (v_j) according
|
1758 |
+
# to transition probabilities from ``node`` (v) to its neighbors
|
1759 |
+
nbr_index = seed.choice(
|
1760 |
+
num_nodes, p=transition_probabilities[starting_index]
|
1761 |
+
)
|
1762 |
+
|
1763 |
+
# Set current vertex (v = v_j)
|
1764 |
+
starting_index = nbr_index
|
1765 |
+
|
1766 |
+
# Add v into p_r
|
1767 |
+
nbr_node = node_map[nbr_index]
|
1768 |
+
path.append(nbr_node)
|
1769 |
+
|
1770 |
+
# Add p_r into P_v
|
1771 |
+
if index_map is not None:
|
1772 |
+
if nbr_node in index_map:
|
1773 |
+
index_map[nbr_node].add(path_index)
|
1774 |
+
else:
|
1775 |
+
index_map[nbr_node] = {path_index}
|
1776 |
+
|
1777 |
+
yield path
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/simple_paths.py
ADDED
@@ -0,0 +1,937 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from heapq import heappop, heappush
|
2 |
+
from itertools import count
|
3 |
+
|
4 |
+
import networkx as nx
|
5 |
+
from networkx.algorithms.shortest_paths.weighted import _weight_function
|
6 |
+
from networkx.utils import not_implemented_for, pairwise
|
7 |
+
|
8 |
+
__all__ = [
|
9 |
+
"all_simple_paths",
|
10 |
+
"is_simple_path",
|
11 |
+
"shortest_simple_paths",
|
12 |
+
"all_simple_edge_paths",
|
13 |
+
]
|
14 |
+
|
15 |
+
|
16 |
+
@nx._dispatchable
|
17 |
+
def is_simple_path(G, nodes):
|
18 |
+
"""Returns True if and only if `nodes` form a simple path in `G`.
|
19 |
+
|
20 |
+
A *simple path* in a graph is a nonempty sequence of nodes in which
|
21 |
+
no node appears more than once in the sequence, and each adjacent
|
22 |
+
pair of nodes in the sequence is adjacent in the graph.
|
23 |
+
|
24 |
+
Parameters
|
25 |
+
----------
|
26 |
+
G : graph
|
27 |
+
A NetworkX graph.
|
28 |
+
nodes : list
|
29 |
+
A list of one or more nodes in the graph `G`.
|
30 |
+
|
31 |
+
Returns
|
32 |
+
-------
|
33 |
+
bool
|
34 |
+
Whether the given list of nodes represents a simple path in `G`.
|
35 |
+
|
36 |
+
Notes
|
37 |
+
-----
|
38 |
+
An empty list of nodes is not a path but a list of one node is a
|
39 |
+
path. Here's an explanation why.
|
40 |
+
|
41 |
+
This function operates on *node paths*. One could also consider
|
42 |
+
*edge paths*. There is a bijection between node paths and edge
|
43 |
+
paths.
|
44 |
+
|
45 |
+
The *length of a path* is the number of edges in the path, so a list
|
46 |
+
of nodes of length *n* corresponds to a path of length *n* - 1.
|
47 |
+
Thus the smallest edge path would be a list of zero edges, the empty
|
48 |
+
path. This corresponds to a list of one node.
|
49 |
+
|
50 |
+
To convert between a node path and an edge path, you can use code
|
51 |
+
like the following::
|
52 |
+
|
53 |
+
>>> from networkx.utils import pairwise
|
54 |
+
>>> nodes = [0, 1, 2, 3]
|
55 |
+
>>> edges = list(pairwise(nodes))
|
56 |
+
>>> edges
|
57 |
+
[(0, 1), (1, 2), (2, 3)]
|
58 |
+
>>> nodes = [edges[0][0]] + [v for u, v in edges]
|
59 |
+
>>> nodes
|
60 |
+
[0, 1, 2, 3]
|
61 |
+
|
62 |
+
Examples
|
63 |
+
--------
|
64 |
+
>>> G = nx.cycle_graph(4)
|
65 |
+
>>> nx.is_simple_path(G, [2, 3, 0])
|
66 |
+
True
|
67 |
+
>>> nx.is_simple_path(G, [0, 2])
|
68 |
+
False
|
69 |
+
|
70 |
+
"""
|
71 |
+
# The empty list is not a valid path. Could also return
|
72 |
+
# NetworkXPointlessConcept here.
|
73 |
+
if len(nodes) == 0:
|
74 |
+
return False
|
75 |
+
|
76 |
+
# If the list is a single node, just check that the node is actually
|
77 |
+
# in the graph.
|
78 |
+
if len(nodes) == 1:
|
79 |
+
return nodes[0] in G
|
80 |
+
|
81 |
+
# check that all nodes in the list are in the graph, if at least one
|
82 |
+
# is not in the graph, then this is not a simple path
|
83 |
+
if not all(n in G for n in nodes):
|
84 |
+
return False
|
85 |
+
|
86 |
+
# If the list contains repeated nodes, then it's not a simple path
|
87 |
+
if len(set(nodes)) != len(nodes):
|
88 |
+
return False
|
89 |
+
|
90 |
+
# Test that each adjacent pair of nodes is adjacent.
|
91 |
+
return all(v in G[u] for u, v in pairwise(nodes))
|
92 |
+
|
93 |
+
|
94 |
+
@nx._dispatchable
|
95 |
+
def all_simple_paths(G, source, target, cutoff=None):
|
96 |
+
"""Generate all simple paths in the graph G from source to target.
|
97 |
+
|
98 |
+
A simple path is a path with no repeated nodes.
|
99 |
+
|
100 |
+
Parameters
|
101 |
+
----------
|
102 |
+
G : NetworkX graph
|
103 |
+
|
104 |
+
source : node
|
105 |
+
Starting node for path
|
106 |
+
|
107 |
+
target : nodes
|
108 |
+
Single node or iterable of nodes at which to end path
|
109 |
+
|
110 |
+
cutoff : integer, optional
|
111 |
+
Depth to stop the search. Only paths of length <= cutoff are returned.
|
112 |
+
|
113 |
+
Returns
|
114 |
+
-------
|
115 |
+
path_generator: generator
|
116 |
+
A generator that produces lists of simple paths. If there are no paths
|
117 |
+
between the source and target within the given cutoff the generator
|
118 |
+
produces no output. If it is possible to traverse the same sequence of
|
119 |
+
nodes in multiple ways, namely through parallel edges, then it will be
|
120 |
+
returned multiple times (once for each viable edge combination).
|
121 |
+
|
122 |
+
Examples
|
123 |
+
--------
|
124 |
+
This iterator generates lists of nodes::
|
125 |
+
|
126 |
+
>>> G = nx.complete_graph(4)
|
127 |
+
>>> for path in nx.all_simple_paths(G, source=0, target=3):
|
128 |
+
... print(path)
|
129 |
+
...
|
130 |
+
[0, 1, 2, 3]
|
131 |
+
[0, 1, 3]
|
132 |
+
[0, 2, 1, 3]
|
133 |
+
[0, 2, 3]
|
134 |
+
[0, 3]
|
135 |
+
|
136 |
+
You can generate only those paths that are shorter than a certain
|
137 |
+
length by using the `cutoff` keyword argument::
|
138 |
+
|
139 |
+
>>> paths = nx.all_simple_paths(G, source=0, target=3, cutoff=2)
|
140 |
+
>>> print(list(paths))
|
141 |
+
[[0, 1, 3], [0, 2, 3], [0, 3]]
|
142 |
+
|
143 |
+
To get each path as the corresponding list of edges, you can use the
|
144 |
+
:func:`networkx.utils.pairwise` helper function::
|
145 |
+
|
146 |
+
>>> paths = nx.all_simple_paths(G, source=0, target=3)
|
147 |
+
>>> for path in map(nx.utils.pairwise, paths):
|
148 |
+
... print(list(path))
|
149 |
+
[(0, 1), (1, 2), (2, 3)]
|
150 |
+
[(0, 1), (1, 3)]
|
151 |
+
[(0, 2), (2, 1), (1, 3)]
|
152 |
+
[(0, 2), (2, 3)]
|
153 |
+
[(0, 3)]
|
154 |
+
|
155 |
+
Pass an iterable of nodes as target to generate all paths ending in any of several nodes::
|
156 |
+
|
157 |
+
>>> G = nx.complete_graph(4)
|
158 |
+
>>> for path in nx.all_simple_paths(G, source=0, target=[3, 2]):
|
159 |
+
... print(path)
|
160 |
+
...
|
161 |
+
[0, 1, 2]
|
162 |
+
[0, 1, 2, 3]
|
163 |
+
[0, 1, 3]
|
164 |
+
[0, 1, 3, 2]
|
165 |
+
[0, 2]
|
166 |
+
[0, 2, 1, 3]
|
167 |
+
[0, 2, 3]
|
168 |
+
[0, 3]
|
169 |
+
[0, 3, 1, 2]
|
170 |
+
[0, 3, 2]
|
171 |
+
|
172 |
+
The singleton path from ``source`` to itself is considered a simple path and is
|
173 |
+
included in the results:
|
174 |
+
|
175 |
+
>>> G = nx.empty_graph(5)
|
176 |
+
>>> list(nx.all_simple_paths(G, source=0, target=0))
|
177 |
+
[[0]]
|
178 |
+
|
179 |
+
>>> G = nx.path_graph(3)
|
180 |
+
>>> list(nx.all_simple_paths(G, source=0, target={0, 1, 2}))
|
181 |
+
[[0], [0, 1], [0, 1, 2]]
|
182 |
+
|
183 |
+
Iterate over each path from the root nodes to the leaf nodes in a
|
184 |
+
directed acyclic graph using a functional programming approach::
|
185 |
+
|
186 |
+
>>> from itertools import chain
|
187 |
+
>>> from itertools import product
|
188 |
+
>>> from itertools import starmap
|
189 |
+
>>> from functools import partial
|
190 |
+
>>>
|
191 |
+
>>> chaini = chain.from_iterable
|
192 |
+
>>>
|
193 |
+
>>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)])
|
194 |
+
>>> roots = (v for v, d in G.in_degree() if d == 0)
|
195 |
+
>>> leaves = (v for v, d in G.out_degree() if d == 0)
|
196 |
+
>>> all_paths = partial(nx.all_simple_paths, G)
|
197 |
+
>>> list(chaini(starmap(all_paths, product(roots, leaves))))
|
198 |
+
[[0, 1, 2], [0, 3, 2]]
|
199 |
+
|
200 |
+
The same list computed using an iterative approach::
|
201 |
+
|
202 |
+
>>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)])
|
203 |
+
>>> roots = (v for v, d in G.in_degree() if d == 0)
|
204 |
+
>>> leaves = (v for v, d in G.out_degree() if d == 0)
|
205 |
+
>>> all_paths = []
|
206 |
+
>>> for root in roots:
|
207 |
+
... for leaf in leaves:
|
208 |
+
... paths = nx.all_simple_paths(G, root, leaf)
|
209 |
+
... all_paths.extend(paths)
|
210 |
+
>>> all_paths
|
211 |
+
[[0, 1, 2], [0, 3, 2]]
|
212 |
+
|
213 |
+
Iterate over each path from the root nodes to the leaf nodes in a
|
214 |
+
directed acyclic graph passing all leaves together to avoid unnecessary
|
215 |
+
compute::
|
216 |
+
|
217 |
+
>>> G = nx.DiGraph([(0, 1), (2, 1), (1, 3), (1, 4)])
|
218 |
+
>>> roots = (v for v, d in G.in_degree() if d == 0)
|
219 |
+
>>> leaves = [v for v, d in G.out_degree() if d == 0]
|
220 |
+
>>> all_paths = []
|
221 |
+
>>> for root in roots:
|
222 |
+
... paths = nx.all_simple_paths(G, root, leaves)
|
223 |
+
... all_paths.extend(paths)
|
224 |
+
>>> all_paths
|
225 |
+
[[0, 1, 3], [0, 1, 4], [2, 1, 3], [2, 1, 4]]
|
226 |
+
|
227 |
+
If parallel edges offer multiple ways to traverse a given sequence of
|
228 |
+
nodes, this sequence of nodes will be returned multiple times:
|
229 |
+
|
230 |
+
>>> G = nx.MultiDiGraph([(0, 1), (0, 1), (1, 2)])
|
231 |
+
>>> list(nx.all_simple_paths(G, 0, 2))
|
232 |
+
[[0, 1, 2], [0, 1, 2]]
|
233 |
+
|
234 |
+
Notes
|
235 |
+
-----
|
236 |
+
This algorithm uses a modified depth-first search to generate the
|
237 |
+
paths [1]_. A single path can be found in $O(V+E)$ time but the
|
238 |
+
number of simple paths in a graph can be very large, e.g. $O(n!)$ in
|
239 |
+
the complete graph of order $n$.
|
240 |
+
|
241 |
+
This function does not check that a path exists between `source` and
|
242 |
+
`target`. For large graphs, this may result in very long runtimes.
|
243 |
+
Consider using `has_path` to check that a path exists between `source` and
|
244 |
+
`target` before calling this function on large graphs.
|
245 |
+
|
246 |
+
References
|
247 |
+
----------
|
248 |
+
.. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms",
|
249 |
+
Addison Wesley Professional, 3rd ed., 2001.
|
250 |
+
|
251 |
+
See Also
|
252 |
+
--------
|
253 |
+
all_shortest_paths, shortest_path, has_path
|
254 |
+
|
255 |
+
"""
|
256 |
+
for edge_path in all_simple_edge_paths(G, source, target, cutoff):
|
257 |
+
yield [source] + [edge[1] for edge in edge_path]
|
258 |
+
|
259 |
+
|
260 |
+
@nx._dispatchable
|
261 |
+
def all_simple_edge_paths(G, source, target, cutoff=None):
|
262 |
+
"""Generate lists of edges for all simple paths in G from source to target.
|
263 |
+
|
264 |
+
A simple path is a path with no repeated nodes.
|
265 |
+
|
266 |
+
Parameters
|
267 |
+
----------
|
268 |
+
G : NetworkX graph
|
269 |
+
|
270 |
+
source : node
|
271 |
+
Starting node for path
|
272 |
+
|
273 |
+
target : nodes
|
274 |
+
Single node or iterable of nodes at which to end path
|
275 |
+
|
276 |
+
cutoff : integer, optional
|
277 |
+
Depth to stop the search. Only paths of length <= cutoff are returned.
|
278 |
+
|
279 |
+
Returns
|
280 |
+
-------
|
281 |
+
path_generator: generator
|
282 |
+
A generator that produces lists of simple paths. If there are no paths
|
283 |
+
between the source and target within the given cutoff the generator
|
284 |
+
produces no output.
|
285 |
+
For multigraphs, the list of edges have elements of the form `(u,v,k)`.
|
286 |
+
Where `k` corresponds to the edge key.
|
287 |
+
|
288 |
+
Examples
|
289 |
+
--------
|
290 |
+
|
291 |
+
Print the simple path edges of a Graph::
|
292 |
+
|
293 |
+
>>> g = nx.Graph([(1, 2), (2, 4), (1, 3), (3, 4)])
|
294 |
+
>>> for path in sorted(nx.all_simple_edge_paths(g, 1, 4)):
|
295 |
+
... print(path)
|
296 |
+
[(1, 2), (2, 4)]
|
297 |
+
[(1, 3), (3, 4)]
|
298 |
+
|
299 |
+
Print the simple path edges of a MultiGraph. Returned edges come with
|
300 |
+
their associated keys::
|
301 |
+
|
302 |
+
>>> mg = nx.MultiGraph()
|
303 |
+
>>> mg.add_edge(1, 2, key="k0")
|
304 |
+
'k0'
|
305 |
+
>>> mg.add_edge(1, 2, key="k1")
|
306 |
+
'k1'
|
307 |
+
>>> mg.add_edge(2, 3, key="k0")
|
308 |
+
'k0'
|
309 |
+
>>> for path in sorted(nx.all_simple_edge_paths(mg, 1, 3)):
|
310 |
+
... print(path)
|
311 |
+
[(1, 2, 'k0'), (2, 3, 'k0')]
|
312 |
+
[(1, 2, 'k1'), (2, 3, 'k0')]
|
313 |
+
|
314 |
+
When ``source`` is one of the targets, the empty path starting and ending at
|
315 |
+
``source`` without traversing any edge is considered a valid simple edge path
|
316 |
+
and is included in the results:
|
317 |
+
|
318 |
+
>>> G = nx.Graph()
|
319 |
+
>>> G.add_node(0)
|
320 |
+
>>> paths = list(nx.all_simple_edge_paths(G, 0, 0))
|
321 |
+
>>> for path in paths:
|
322 |
+
... print(path)
|
323 |
+
[]
|
324 |
+
>>> len(paths)
|
325 |
+
1
|
326 |
+
|
327 |
+
|
328 |
+
Notes
|
329 |
+
-----
|
330 |
+
This algorithm uses a modified depth-first search to generate the
|
331 |
+
paths [1]_. A single path can be found in $O(V+E)$ time but the
|
332 |
+
number of simple paths in a graph can be very large, e.g. $O(n!)$ in
|
333 |
+
the complete graph of order $n$.
|
334 |
+
|
335 |
+
References
|
336 |
+
----------
|
337 |
+
.. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms",
|
338 |
+
Addison Wesley Professional, 3rd ed., 2001.
|
339 |
+
|
340 |
+
See Also
|
341 |
+
--------
|
342 |
+
all_shortest_paths, shortest_path, all_simple_paths
|
343 |
+
|
344 |
+
"""
|
345 |
+
if source not in G:
|
346 |
+
raise nx.NodeNotFound(f"source node {source} not in graph")
|
347 |
+
|
348 |
+
if target in G:
|
349 |
+
targets = {target}
|
350 |
+
else:
|
351 |
+
try:
|
352 |
+
targets = set(target)
|
353 |
+
except TypeError as err:
|
354 |
+
raise nx.NodeNotFound(f"target node {target} not in graph") from err
|
355 |
+
|
356 |
+
cutoff = cutoff if cutoff is not None else len(G) - 1
|
357 |
+
|
358 |
+
if cutoff >= 0 and targets:
|
359 |
+
yield from _all_simple_edge_paths(G, source, targets, cutoff)
|
360 |
+
|
361 |
+
|
362 |
+
def _all_simple_edge_paths(G, source, targets, cutoff):
|
363 |
+
# We simulate recursion with a stack, keeping the current path being explored
|
364 |
+
# and the outgoing edge iterators at each point in the stack.
|
365 |
+
# To avoid unnecessary checks, the loop is structured in a way such that a path
|
366 |
+
# is considered for yielding only after a new node/edge is added.
|
367 |
+
# We bootstrap the search by adding a dummy iterator to the stack that only yields
|
368 |
+
# a dummy edge to source (so that the trivial path has a chance of being included).
|
369 |
+
|
370 |
+
get_edges = (
|
371 |
+
(lambda node: G.edges(node, keys=True))
|
372 |
+
if G.is_multigraph()
|
373 |
+
else (lambda node: G.edges(node))
|
374 |
+
)
|
375 |
+
|
376 |
+
# The current_path is a dictionary that maps nodes in the path to the edge that was
|
377 |
+
# used to enter that node (instead of a list of edges) because we want both a fast
|
378 |
+
# membership test for nodes in the path and the preservation of insertion order.
|
379 |
+
current_path = {None: None}
|
380 |
+
stack = [iter([(None, source)])]
|
381 |
+
|
382 |
+
while stack:
|
383 |
+
# 1. Try to extend the current path.
|
384 |
+
next_edge = next((e for e in stack[-1] if e[1] not in current_path), None)
|
385 |
+
if next_edge is None:
|
386 |
+
# All edges of the last node in the current path have been explored.
|
387 |
+
stack.pop()
|
388 |
+
current_path.popitem()
|
389 |
+
continue
|
390 |
+
previous_node, next_node, *_ = next_edge
|
391 |
+
|
392 |
+
# 2. Check if we've reached a target.
|
393 |
+
if next_node in targets:
|
394 |
+
yield (list(current_path.values()) + [next_edge])[2:] # remove dummy edge
|
395 |
+
|
396 |
+
# 3. Only expand the search through the next node if it makes sense.
|
397 |
+
if len(current_path) - 1 < cutoff and (
|
398 |
+
targets - current_path.keys() - {next_node}
|
399 |
+
):
|
400 |
+
current_path[next_node] = next_edge
|
401 |
+
stack.append(iter(get_edges(next_node)))
|
402 |
+
|
403 |
+
|
404 |
+
@not_implemented_for("multigraph")
|
405 |
+
@nx._dispatchable(edge_attrs="weight")
|
406 |
+
def shortest_simple_paths(G, source, target, weight=None):
|
407 |
+
"""Generate all simple paths in the graph G from source to target,
|
408 |
+
starting from shortest ones.
|
409 |
+
|
410 |
+
A simple path is a path with no repeated nodes.
|
411 |
+
|
412 |
+
If a weighted shortest path search is to be used, no negative weights
|
413 |
+
are allowed.
|
414 |
+
|
415 |
+
Parameters
|
416 |
+
----------
|
417 |
+
G : NetworkX graph
|
418 |
+
|
419 |
+
source : node
|
420 |
+
Starting node for path
|
421 |
+
|
422 |
+
target : node
|
423 |
+
Ending node for path
|
424 |
+
|
425 |
+
weight : string or function
|
426 |
+
If it is a string, it is the name of the edge attribute to be
|
427 |
+
used as a weight.
|
428 |
+
|
429 |
+
If it is a function, the weight of an edge is the value returned
|
430 |
+
by the function. The function must accept exactly three positional
|
431 |
+
arguments: the two endpoints of an edge and the dictionary of edge
|
432 |
+
attributes for that edge. The function must return a number.
|
433 |
+
|
434 |
+
If None all edges are considered to have unit weight. Default
|
435 |
+
value None.
|
436 |
+
|
437 |
+
Returns
|
438 |
+
-------
|
439 |
+
path_generator: generator
|
440 |
+
A generator that produces lists of simple paths, in order from
|
441 |
+
shortest to longest.
|
442 |
+
|
443 |
+
Raises
|
444 |
+
------
|
445 |
+
NetworkXNoPath
|
446 |
+
If no path exists between source and target.
|
447 |
+
|
448 |
+
NetworkXError
|
449 |
+
If source or target nodes are not in the input graph.
|
450 |
+
|
451 |
+
NetworkXNotImplemented
|
452 |
+
If the input graph is a Multi[Di]Graph.
|
453 |
+
|
454 |
+
Examples
|
455 |
+
--------
|
456 |
+
|
457 |
+
>>> G = nx.cycle_graph(7)
|
458 |
+
>>> paths = list(nx.shortest_simple_paths(G, 0, 3))
|
459 |
+
>>> print(paths)
|
460 |
+
[[0, 1, 2, 3], [0, 6, 5, 4, 3]]
|
461 |
+
|
462 |
+
You can use this function to efficiently compute the k shortest/best
|
463 |
+
paths between two nodes.
|
464 |
+
|
465 |
+
>>> from itertools import islice
|
466 |
+
>>> def k_shortest_paths(G, source, target, k, weight=None):
|
467 |
+
... return list(
|
468 |
+
... islice(nx.shortest_simple_paths(G, source, target, weight=weight), k)
|
469 |
+
... )
|
470 |
+
>>> for path in k_shortest_paths(G, 0, 3, 2):
|
471 |
+
... print(path)
|
472 |
+
[0, 1, 2, 3]
|
473 |
+
[0, 6, 5, 4, 3]
|
474 |
+
|
475 |
+
Notes
|
476 |
+
-----
|
477 |
+
This procedure is based on algorithm by Jin Y. Yen [1]_. Finding
|
478 |
+
the first $K$ paths requires $O(KN^3)$ operations.
|
479 |
+
|
480 |
+
See Also
|
481 |
+
--------
|
482 |
+
all_shortest_paths
|
483 |
+
shortest_path
|
484 |
+
all_simple_paths
|
485 |
+
|
486 |
+
References
|
487 |
+
----------
|
488 |
+
.. [1] Jin Y. Yen, "Finding the K Shortest Loopless Paths in a
|
489 |
+
Network", Management Science, Vol. 17, No. 11, Theory Series
|
490 |
+
(Jul., 1971), pp. 712-716.
|
491 |
+
|
492 |
+
"""
|
493 |
+
if source not in G:
|
494 |
+
raise nx.NodeNotFound(f"source node {source} not in graph")
|
495 |
+
|
496 |
+
if target not in G:
|
497 |
+
raise nx.NodeNotFound(f"target node {target} not in graph")
|
498 |
+
|
499 |
+
if weight is None:
|
500 |
+
length_func = len
|
501 |
+
shortest_path_func = _bidirectional_shortest_path
|
502 |
+
else:
|
503 |
+
wt = _weight_function(G, weight)
|
504 |
+
|
505 |
+
def length_func(path):
|
506 |
+
return sum(
|
507 |
+
wt(u, v, G.get_edge_data(u, v)) for (u, v) in zip(path, path[1:])
|
508 |
+
)
|
509 |
+
|
510 |
+
shortest_path_func = _bidirectional_dijkstra
|
511 |
+
|
512 |
+
listA = []
|
513 |
+
listB = PathBuffer()
|
514 |
+
prev_path = None
|
515 |
+
while True:
|
516 |
+
if not prev_path:
|
517 |
+
length, path = shortest_path_func(G, source, target, weight=weight)
|
518 |
+
listB.push(length, path)
|
519 |
+
else:
|
520 |
+
ignore_nodes = set()
|
521 |
+
ignore_edges = set()
|
522 |
+
for i in range(1, len(prev_path)):
|
523 |
+
root = prev_path[:i]
|
524 |
+
root_length = length_func(root)
|
525 |
+
for path in listA:
|
526 |
+
if path[:i] == root:
|
527 |
+
ignore_edges.add((path[i - 1], path[i]))
|
528 |
+
try:
|
529 |
+
length, spur = shortest_path_func(
|
530 |
+
G,
|
531 |
+
root[-1],
|
532 |
+
target,
|
533 |
+
ignore_nodes=ignore_nodes,
|
534 |
+
ignore_edges=ignore_edges,
|
535 |
+
weight=weight,
|
536 |
+
)
|
537 |
+
path = root[:-1] + spur
|
538 |
+
listB.push(root_length + length, path)
|
539 |
+
except nx.NetworkXNoPath:
|
540 |
+
pass
|
541 |
+
ignore_nodes.add(root[-1])
|
542 |
+
|
543 |
+
if listB:
|
544 |
+
path = listB.pop()
|
545 |
+
yield path
|
546 |
+
listA.append(path)
|
547 |
+
prev_path = path
|
548 |
+
else:
|
549 |
+
break
|
550 |
+
|
551 |
+
|
552 |
+
class PathBuffer:
|
553 |
+
def __init__(self):
|
554 |
+
self.paths = set()
|
555 |
+
self.sortedpaths = []
|
556 |
+
self.counter = count()
|
557 |
+
|
558 |
+
def __len__(self):
|
559 |
+
return len(self.sortedpaths)
|
560 |
+
|
561 |
+
def push(self, cost, path):
|
562 |
+
hashable_path = tuple(path)
|
563 |
+
if hashable_path not in self.paths:
|
564 |
+
heappush(self.sortedpaths, (cost, next(self.counter), path))
|
565 |
+
self.paths.add(hashable_path)
|
566 |
+
|
567 |
+
def pop(self):
|
568 |
+
(cost, num, path) = heappop(self.sortedpaths)
|
569 |
+
hashable_path = tuple(path)
|
570 |
+
self.paths.remove(hashable_path)
|
571 |
+
return path
|
572 |
+
|
573 |
+
|
574 |
+
def _bidirectional_shortest_path(
|
575 |
+
G, source, target, ignore_nodes=None, ignore_edges=None, weight=None
|
576 |
+
):
|
577 |
+
"""Returns the shortest path between source and target ignoring
|
578 |
+
nodes and edges in the containers ignore_nodes and ignore_edges.
|
579 |
+
|
580 |
+
This is a custom modification of the standard bidirectional shortest
|
581 |
+
path implementation at networkx.algorithms.unweighted
|
582 |
+
|
583 |
+
Parameters
|
584 |
+
----------
|
585 |
+
G : NetworkX graph
|
586 |
+
|
587 |
+
source : node
|
588 |
+
starting node for path
|
589 |
+
|
590 |
+
target : node
|
591 |
+
ending node for path
|
592 |
+
|
593 |
+
ignore_nodes : container of nodes
|
594 |
+
nodes to ignore, optional
|
595 |
+
|
596 |
+
ignore_edges : container of edges
|
597 |
+
edges to ignore, optional
|
598 |
+
|
599 |
+
weight : None
|
600 |
+
This function accepts a weight argument for convenience of
|
601 |
+
shortest_simple_paths function. It will be ignored.
|
602 |
+
|
603 |
+
Returns
|
604 |
+
-------
|
605 |
+
path: list
|
606 |
+
List of nodes in a path from source to target.
|
607 |
+
|
608 |
+
Raises
|
609 |
+
------
|
610 |
+
NetworkXNoPath
|
611 |
+
If no path exists between source and target.
|
612 |
+
|
613 |
+
See Also
|
614 |
+
--------
|
615 |
+
shortest_path
|
616 |
+
|
617 |
+
"""
|
618 |
+
# call helper to do the real work
|
619 |
+
results = _bidirectional_pred_succ(G, source, target, ignore_nodes, ignore_edges)
|
620 |
+
pred, succ, w = results
|
621 |
+
|
622 |
+
# build path from pred+w+succ
|
623 |
+
path = []
|
624 |
+
# from w to target
|
625 |
+
while w is not None:
|
626 |
+
path.append(w)
|
627 |
+
w = succ[w]
|
628 |
+
# from source to w
|
629 |
+
w = pred[path[0]]
|
630 |
+
while w is not None:
|
631 |
+
path.insert(0, w)
|
632 |
+
w = pred[w]
|
633 |
+
|
634 |
+
return len(path), path
|
635 |
+
|
636 |
+
|
637 |
+
def _bidirectional_pred_succ(G, source, target, ignore_nodes=None, ignore_edges=None):
|
638 |
+
"""Bidirectional shortest path helper.
|
639 |
+
Returns (pred,succ,w) where
|
640 |
+
pred is a dictionary of predecessors from w to the source, and
|
641 |
+
succ is a dictionary of successors from w to the target.
|
642 |
+
"""
|
643 |
+
# does BFS from both source and target and meets in the middle
|
644 |
+
if ignore_nodes and (source in ignore_nodes or target in ignore_nodes):
|
645 |
+
raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
|
646 |
+
if target == source:
|
647 |
+
return ({target: None}, {source: None}, source)
|
648 |
+
|
649 |
+
# handle either directed or undirected
|
650 |
+
if G.is_directed():
|
651 |
+
Gpred = G.predecessors
|
652 |
+
Gsucc = G.successors
|
653 |
+
else:
|
654 |
+
Gpred = G.neighbors
|
655 |
+
Gsucc = G.neighbors
|
656 |
+
|
657 |
+
# support optional nodes filter
|
658 |
+
if ignore_nodes:
|
659 |
+
|
660 |
+
def filter_iter(nodes):
|
661 |
+
def iterate(v):
|
662 |
+
for w in nodes(v):
|
663 |
+
if w not in ignore_nodes:
|
664 |
+
yield w
|
665 |
+
|
666 |
+
return iterate
|
667 |
+
|
668 |
+
Gpred = filter_iter(Gpred)
|
669 |
+
Gsucc = filter_iter(Gsucc)
|
670 |
+
|
671 |
+
# support optional edges filter
|
672 |
+
if ignore_edges:
|
673 |
+
if G.is_directed():
|
674 |
+
|
675 |
+
def filter_pred_iter(pred_iter):
|
676 |
+
def iterate(v):
|
677 |
+
for w in pred_iter(v):
|
678 |
+
if (w, v) not in ignore_edges:
|
679 |
+
yield w
|
680 |
+
|
681 |
+
return iterate
|
682 |
+
|
683 |
+
def filter_succ_iter(succ_iter):
|
684 |
+
def iterate(v):
|
685 |
+
for w in succ_iter(v):
|
686 |
+
if (v, w) not in ignore_edges:
|
687 |
+
yield w
|
688 |
+
|
689 |
+
return iterate
|
690 |
+
|
691 |
+
Gpred = filter_pred_iter(Gpred)
|
692 |
+
Gsucc = filter_succ_iter(Gsucc)
|
693 |
+
|
694 |
+
else:
|
695 |
+
|
696 |
+
def filter_iter(nodes):
|
697 |
+
def iterate(v):
|
698 |
+
for w in nodes(v):
|
699 |
+
if (v, w) not in ignore_edges and (w, v) not in ignore_edges:
|
700 |
+
yield w
|
701 |
+
|
702 |
+
return iterate
|
703 |
+
|
704 |
+
Gpred = filter_iter(Gpred)
|
705 |
+
Gsucc = filter_iter(Gsucc)
|
706 |
+
|
707 |
+
# predecessor and successors in search
|
708 |
+
pred = {source: None}
|
709 |
+
succ = {target: None}
|
710 |
+
|
711 |
+
# initialize fringes, start with forward
|
712 |
+
forward_fringe = [source]
|
713 |
+
reverse_fringe = [target]
|
714 |
+
|
715 |
+
while forward_fringe and reverse_fringe:
|
716 |
+
if len(forward_fringe) <= len(reverse_fringe):
|
717 |
+
this_level = forward_fringe
|
718 |
+
forward_fringe = []
|
719 |
+
for v in this_level:
|
720 |
+
for w in Gsucc(v):
|
721 |
+
if w not in pred:
|
722 |
+
forward_fringe.append(w)
|
723 |
+
pred[w] = v
|
724 |
+
if w in succ:
|
725 |
+
# found path
|
726 |
+
return pred, succ, w
|
727 |
+
else:
|
728 |
+
this_level = reverse_fringe
|
729 |
+
reverse_fringe = []
|
730 |
+
for v in this_level:
|
731 |
+
for w in Gpred(v):
|
732 |
+
if w not in succ:
|
733 |
+
succ[w] = v
|
734 |
+
reverse_fringe.append(w)
|
735 |
+
if w in pred:
|
736 |
+
# found path
|
737 |
+
return pred, succ, w
|
738 |
+
|
739 |
+
raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
|
740 |
+
|
741 |
+
|
742 |
+
def _bidirectional_dijkstra(
|
743 |
+
G, source, target, weight="weight", ignore_nodes=None, ignore_edges=None
|
744 |
+
):
|
745 |
+
"""Dijkstra's algorithm for shortest paths using bidirectional search.
|
746 |
+
|
747 |
+
This function returns the shortest path between source and target
|
748 |
+
ignoring nodes and edges in the containers ignore_nodes and
|
749 |
+
ignore_edges.
|
750 |
+
|
751 |
+
This is a custom modification of the standard Dijkstra bidirectional
|
752 |
+
shortest path implementation at networkx.algorithms.weighted
|
753 |
+
|
754 |
+
Parameters
|
755 |
+
----------
|
756 |
+
G : NetworkX graph
|
757 |
+
|
758 |
+
source : node
|
759 |
+
Starting node.
|
760 |
+
|
761 |
+
target : node
|
762 |
+
Ending node.
|
763 |
+
|
764 |
+
weight: string, function, optional (default='weight')
|
765 |
+
Edge data key or weight function corresponding to the edge weight
|
766 |
+
|
767 |
+
ignore_nodes : container of nodes
|
768 |
+
nodes to ignore, optional
|
769 |
+
|
770 |
+
ignore_edges : container of edges
|
771 |
+
edges to ignore, optional
|
772 |
+
|
773 |
+
Returns
|
774 |
+
-------
|
775 |
+
length : number
|
776 |
+
Shortest path length.
|
777 |
+
|
778 |
+
Returns a tuple of two dictionaries keyed by node.
|
779 |
+
The first dictionary stores distance from the source.
|
780 |
+
The second stores the path from the source to that node.
|
781 |
+
|
782 |
+
Raises
|
783 |
+
------
|
784 |
+
NetworkXNoPath
|
785 |
+
If no path exists between source and target.
|
786 |
+
|
787 |
+
Notes
|
788 |
+
-----
|
789 |
+
Edge weight attributes must be numerical.
|
790 |
+
Distances are calculated as sums of weighted edges traversed.
|
791 |
+
|
792 |
+
In practice bidirectional Dijkstra is much more than twice as fast as
|
793 |
+
ordinary Dijkstra.
|
794 |
+
|
795 |
+
Ordinary Dijkstra expands nodes in a sphere-like manner from the
|
796 |
+
source. The radius of this sphere will eventually be the length
|
797 |
+
of the shortest path. Bidirectional Dijkstra will expand nodes
|
798 |
+
from both the source and the target, making two spheres of half
|
799 |
+
this radius. Volume of the first sphere is pi*r*r while the
|
800 |
+
others are 2*pi*r/2*r/2, making up half the volume.
|
801 |
+
|
802 |
+
This algorithm is not guaranteed to work if edge weights
|
803 |
+
are negative or are floating point numbers
|
804 |
+
(overflows and roundoff errors can cause problems).
|
805 |
+
|
806 |
+
See Also
|
807 |
+
--------
|
808 |
+
shortest_path
|
809 |
+
shortest_path_length
|
810 |
+
"""
|
811 |
+
if ignore_nodes and (source in ignore_nodes or target in ignore_nodes):
|
812 |
+
raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
|
813 |
+
if source == target:
|
814 |
+
if source not in G:
|
815 |
+
raise nx.NodeNotFound(f"Node {source} not in graph")
|
816 |
+
return (0, [source])
|
817 |
+
|
818 |
+
# handle either directed or undirected
|
819 |
+
if G.is_directed():
|
820 |
+
Gpred = G.predecessors
|
821 |
+
Gsucc = G.successors
|
822 |
+
else:
|
823 |
+
Gpred = G.neighbors
|
824 |
+
Gsucc = G.neighbors
|
825 |
+
|
826 |
+
# support optional nodes filter
|
827 |
+
if ignore_nodes:
|
828 |
+
|
829 |
+
def filter_iter(nodes):
|
830 |
+
def iterate(v):
|
831 |
+
for w in nodes(v):
|
832 |
+
if w not in ignore_nodes:
|
833 |
+
yield w
|
834 |
+
|
835 |
+
return iterate
|
836 |
+
|
837 |
+
Gpred = filter_iter(Gpred)
|
838 |
+
Gsucc = filter_iter(Gsucc)
|
839 |
+
|
840 |
+
# support optional edges filter
|
841 |
+
if ignore_edges:
|
842 |
+
if G.is_directed():
|
843 |
+
|
844 |
+
def filter_pred_iter(pred_iter):
|
845 |
+
def iterate(v):
|
846 |
+
for w in pred_iter(v):
|
847 |
+
if (w, v) not in ignore_edges:
|
848 |
+
yield w
|
849 |
+
|
850 |
+
return iterate
|
851 |
+
|
852 |
+
def filter_succ_iter(succ_iter):
|
853 |
+
def iterate(v):
|
854 |
+
for w in succ_iter(v):
|
855 |
+
if (v, w) not in ignore_edges:
|
856 |
+
yield w
|
857 |
+
|
858 |
+
return iterate
|
859 |
+
|
860 |
+
Gpred = filter_pred_iter(Gpred)
|
861 |
+
Gsucc = filter_succ_iter(Gsucc)
|
862 |
+
|
863 |
+
else:
|
864 |
+
|
865 |
+
def filter_iter(nodes):
|
866 |
+
def iterate(v):
|
867 |
+
for w in nodes(v):
|
868 |
+
if (v, w) not in ignore_edges and (w, v) not in ignore_edges:
|
869 |
+
yield w
|
870 |
+
|
871 |
+
return iterate
|
872 |
+
|
873 |
+
Gpred = filter_iter(Gpred)
|
874 |
+
Gsucc = filter_iter(Gsucc)
|
875 |
+
|
876 |
+
push = heappush
|
877 |
+
pop = heappop
|
878 |
+
# Init: Forward Backward
|
879 |
+
dists = [{}, {}] # dictionary of final distances
|
880 |
+
paths = [{source: [source]}, {target: [target]}] # dictionary of paths
|
881 |
+
fringe = [[], []] # heap of (distance, node) tuples for
|
882 |
+
# extracting next node to expand
|
883 |
+
seen = [{source: 0}, {target: 0}] # dictionary of distances to
|
884 |
+
# nodes seen
|
885 |
+
c = count()
|
886 |
+
# initialize fringe heap
|
887 |
+
push(fringe[0], (0, next(c), source))
|
888 |
+
push(fringe[1], (0, next(c), target))
|
889 |
+
# neighs for extracting correct neighbor information
|
890 |
+
neighs = [Gsucc, Gpred]
|
891 |
+
# variables to hold shortest discovered path
|
892 |
+
# finaldist = 1e30000
|
893 |
+
finalpath = []
|
894 |
+
dir = 1
|
895 |
+
while fringe[0] and fringe[1]:
|
896 |
+
# choose direction
|
897 |
+
# dir == 0 is forward direction and dir == 1 is back
|
898 |
+
dir = 1 - dir
|
899 |
+
# extract closest to expand
|
900 |
+
(dist, _, v) = pop(fringe[dir])
|
901 |
+
if v in dists[dir]:
|
902 |
+
# Shortest path to v has already been found
|
903 |
+
continue
|
904 |
+
# update distance
|
905 |
+
dists[dir][v] = dist # equal to seen[dir][v]
|
906 |
+
if v in dists[1 - dir]:
|
907 |
+
# if we have scanned v in both directions we are done
|
908 |
+
# we have now discovered the shortest path
|
909 |
+
return (finaldist, finalpath)
|
910 |
+
|
911 |
+
wt = _weight_function(G, weight)
|
912 |
+
for w in neighs[dir](v):
|
913 |
+
if dir == 0: # forward
|
914 |
+
minweight = wt(v, w, G.get_edge_data(v, w))
|
915 |
+
vwLength = dists[dir][v] + minweight
|
916 |
+
else: # back, must remember to change v,w->w,v
|
917 |
+
minweight = wt(w, v, G.get_edge_data(w, v))
|
918 |
+
vwLength = dists[dir][v] + minweight
|
919 |
+
|
920 |
+
if w in dists[dir]:
|
921 |
+
if vwLength < dists[dir][w]:
|
922 |
+
raise ValueError("Contradictory paths found: negative weights?")
|
923 |
+
elif w not in seen[dir] or vwLength < seen[dir][w]:
|
924 |
+
# relaxing
|
925 |
+
seen[dir][w] = vwLength
|
926 |
+
push(fringe[dir], (vwLength, next(c), w))
|
927 |
+
paths[dir][w] = paths[dir][v] + [w]
|
928 |
+
if w in seen[0] and w in seen[1]:
|
929 |
+
# see if this path is better than the already
|
930 |
+
# discovered shortest path
|
931 |
+
totaldist = seen[0][w] + seen[1][w]
|
932 |
+
if finalpath == [] or finaldist > totaldist:
|
933 |
+
finaldist = totaldist
|
934 |
+
revpath = paths[1][w][:]
|
935 |
+
revpath.reverse()
|
936 |
+
finalpath = paths[0][w] + revpath[1:]
|
937 |
+
raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/smetric.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import networkx as nx
|
2 |
+
|
3 |
+
__all__ = ["s_metric"]
|
4 |
+
|
5 |
+
|
6 |
+
@nx._dispatchable
|
7 |
+
def s_metric(G, **kwargs):
|
8 |
+
"""Returns the s-metric [1]_ of graph.
|
9 |
+
|
10 |
+
The s-metric is defined as the sum of the products ``deg(u) * deg(v)``
|
11 |
+
for every edge ``(u, v)`` in `G`.
|
12 |
+
|
13 |
+
Parameters
|
14 |
+
----------
|
15 |
+
G : graph
|
16 |
+
The graph used to compute the s-metric.
|
17 |
+
normalized : bool (optional)
|
18 |
+
Normalize the value.
|
19 |
+
|
20 |
+
.. deprecated:: 3.2
|
21 |
+
|
22 |
+
The `normalized` keyword argument is deprecated and will be removed
|
23 |
+
in the future
|
24 |
+
|
25 |
+
Returns
|
26 |
+
-------
|
27 |
+
s : float
|
28 |
+
The s-metric of the graph.
|
29 |
+
|
30 |
+
References
|
31 |
+
----------
|
32 |
+
.. [1] Lun Li, David Alderson, John C. Doyle, and Walter Willinger,
|
33 |
+
Towards a Theory of Scale-Free Graphs:
|
34 |
+
Definition, Properties, and Implications (Extended Version), 2005.
|
35 |
+
https://arxiv.org/abs/cond-mat/0501169
|
36 |
+
"""
|
37 |
+
# NOTE: This entire code block + the **kwargs in the signature can all be
|
38 |
+
# removed when the deprecation expires.
|
39 |
+
# Normalized is always False, since all `normalized=True` did was raise
|
40 |
+
# a NotImplementedError
|
41 |
+
if kwargs:
|
42 |
+
# Warn for `normalize`, raise for any other kwarg
|
43 |
+
if "normalized" in kwargs:
|
44 |
+
import warnings
|
45 |
+
|
46 |
+
warnings.warn(
|
47 |
+
"\n\nThe `normalized` keyword is deprecated and will be removed\n"
|
48 |
+
"in the future. To silence this warning, remove `normalized`\n"
|
49 |
+
"when calling `s_metric`.\n\n"
|
50 |
+
"The value of `normalized` is ignored.",
|
51 |
+
DeprecationWarning,
|
52 |
+
stacklevel=3,
|
53 |
+
)
|
54 |
+
else:
|
55 |
+
# Typical raising behavior for Python when kwarg not recognized
|
56 |
+
raise TypeError(
|
57 |
+
f"s_metric got an unexpected keyword argument '{list(kwargs.keys())[0]}'"
|
58 |
+
)
|
59 |
+
|
60 |
+
return float(sum(G.degree(u) * G.degree(v) for (u, v) in G.edges()))
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/sparsifiers.py
ADDED
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Functions for computing sparsifiers of graphs."""
|
2 |
+
import math
|
3 |
+
|
4 |
+
import networkx as nx
|
5 |
+
from networkx.utils import not_implemented_for, py_random_state
|
6 |
+
|
7 |
+
__all__ = ["spanner"]
|
8 |
+
|
9 |
+
|
10 |
+
@not_implemented_for("directed")
|
11 |
+
@not_implemented_for("multigraph")
|
12 |
+
@py_random_state(3)
|
13 |
+
@nx._dispatchable(edge_attrs="weight", returns_graph=True)
|
14 |
+
def spanner(G, stretch, weight=None, seed=None):
|
15 |
+
"""Returns a spanner of the given graph with the given stretch.
|
16 |
+
|
17 |
+
A spanner of a graph G = (V, E) with stretch t is a subgraph
|
18 |
+
H = (V, E_S) such that E_S is a subset of E and the distance between
|
19 |
+
any pair of nodes in H is at most t times the distance between the
|
20 |
+
nodes in G.
|
21 |
+
|
22 |
+
Parameters
|
23 |
+
----------
|
24 |
+
G : NetworkX graph
|
25 |
+
An undirected simple graph.
|
26 |
+
|
27 |
+
stretch : float
|
28 |
+
The stretch of the spanner.
|
29 |
+
|
30 |
+
weight : object
|
31 |
+
The edge attribute to use as distance.
|
32 |
+
|
33 |
+
seed : integer, random_state, or None (default)
|
34 |
+
Indicator of random number generation state.
|
35 |
+
See :ref:`Randomness<randomness>`.
|
36 |
+
|
37 |
+
Returns
|
38 |
+
-------
|
39 |
+
NetworkX graph
|
40 |
+
A spanner of the given graph with the given stretch.
|
41 |
+
|
42 |
+
Raises
|
43 |
+
------
|
44 |
+
ValueError
|
45 |
+
If a stretch less than 1 is given.
|
46 |
+
|
47 |
+
Notes
|
48 |
+
-----
|
49 |
+
This function implements the spanner algorithm by Baswana and Sen,
|
50 |
+
see [1].
|
51 |
+
|
52 |
+
This algorithm is a randomized las vegas algorithm: The expected
|
53 |
+
running time is O(km) where k = (stretch + 1) // 2 and m is the
|
54 |
+
number of edges in G. The returned graph is always a spanner of the
|
55 |
+
given graph with the specified stretch. For weighted graphs the
|
56 |
+
number of edges in the spanner is O(k * n^(1 + 1 / k)) where k is
|
57 |
+
defined as above and n is the number of nodes in G. For unweighted
|
58 |
+
graphs the number of edges is O(n^(1 + 1 / k) + kn).
|
59 |
+
|
60 |
+
References
|
61 |
+
----------
|
62 |
+
[1] S. Baswana, S. Sen. A Simple and Linear Time Randomized
|
63 |
+
Algorithm for Computing Sparse Spanners in Weighted Graphs.
|
64 |
+
Random Struct. Algorithms 30(4): 532-563 (2007).
|
65 |
+
"""
|
66 |
+
if stretch < 1:
|
67 |
+
raise ValueError("stretch must be at least 1")
|
68 |
+
|
69 |
+
k = (stretch + 1) // 2
|
70 |
+
|
71 |
+
# initialize spanner H with empty edge set
|
72 |
+
H = nx.empty_graph()
|
73 |
+
H.add_nodes_from(G.nodes)
|
74 |
+
|
75 |
+
# phase 1: forming the clusters
|
76 |
+
# the residual graph has V' from the paper as its node set
|
77 |
+
# and E' from the paper as its edge set
|
78 |
+
residual_graph = _setup_residual_graph(G, weight)
|
79 |
+
# clustering is a dictionary that maps nodes in a cluster to the
|
80 |
+
# cluster center
|
81 |
+
clustering = {v: v for v in G.nodes}
|
82 |
+
sample_prob = math.pow(G.number_of_nodes(), -1 / k)
|
83 |
+
size_limit = 2 * math.pow(G.number_of_nodes(), 1 + 1 / k)
|
84 |
+
|
85 |
+
i = 0
|
86 |
+
while i < k - 1:
|
87 |
+
# step 1: sample centers
|
88 |
+
sampled_centers = set()
|
89 |
+
for center in set(clustering.values()):
|
90 |
+
if seed.random() < sample_prob:
|
91 |
+
sampled_centers.add(center)
|
92 |
+
|
93 |
+
# combined loop for steps 2 and 3
|
94 |
+
edges_to_add = set()
|
95 |
+
edges_to_remove = set()
|
96 |
+
new_clustering = {}
|
97 |
+
for v in residual_graph.nodes:
|
98 |
+
if clustering[v] in sampled_centers:
|
99 |
+
continue
|
100 |
+
|
101 |
+
# step 2: find neighboring (sampled) clusters and
|
102 |
+
# lightest edges to them
|
103 |
+
lightest_edge_neighbor, lightest_edge_weight = _lightest_edge_dicts(
|
104 |
+
residual_graph, clustering, v
|
105 |
+
)
|
106 |
+
neighboring_sampled_centers = (
|
107 |
+
set(lightest_edge_weight.keys()) & sampled_centers
|
108 |
+
)
|
109 |
+
|
110 |
+
# step 3: add edges to spanner
|
111 |
+
if not neighboring_sampled_centers:
|
112 |
+
# connect to each neighboring center via lightest edge
|
113 |
+
for neighbor in lightest_edge_neighbor.values():
|
114 |
+
edges_to_add.add((v, neighbor))
|
115 |
+
# remove all incident edges
|
116 |
+
for neighbor in residual_graph.adj[v]:
|
117 |
+
edges_to_remove.add((v, neighbor))
|
118 |
+
|
119 |
+
else: # there is a neighboring sampled center
|
120 |
+
closest_center = min(
|
121 |
+
neighboring_sampled_centers, key=lightest_edge_weight.get
|
122 |
+
)
|
123 |
+
closest_center_weight = lightest_edge_weight[closest_center]
|
124 |
+
closest_center_neighbor = lightest_edge_neighbor[closest_center]
|
125 |
+
|
126 |
+
edges_to_add.add((v, closest_center_neighbor))
|
127 |
+
new_clustering[v] = closest_center
|
128 |
+
|
129 |
+
# connect to centers with edge weight less than
|
130 |
+
# closest_center_weight
|
131 |
+
for center, edge_weight in lightest_edge_weight.items():
|
132 |
+
if edge_weight < closest_center_weight:
|
133 |
+
neighbor = lightest_edge_neighbor[center]
|
134 |
+
edges_to_add.add((v, neighbor))
|
135 |
+
|
136 |
+
# remove edges to centers with edge weight less than
|
137 |
+
# closest_center_weight
|
138 |
+
for neighbor in residual_graph.adj[v]:
|
139 |
+
nbr_cluster = clustering[neighbor]
|
140 |
+
nbr_weight = lightest_edge_weight[nbr_cluster]
|
141 |
+
if (
|
142 |
+
nbr_cluster == closest_center
|
143 |
+
or nbr_weight < closest_center_weight
|
144 |
+
):
|
145 |
+
edges_to_remove.add((v, neighbor))
|
146 |
+
|
147 |
+
# check whether iteration added too many edges to spanner,
|
148 |
+
# if so repeat
|
149 |
+
if len(edges_to_add) > size_limit:
|
150 |
+
# an iteration is repeated O(1) times on expectation
|
151 |
+
continue
|
152 |
+
|
153 |
+
# iteration succeeded
|
154 |
+
i = i + 1
|
155 |
+
|
156 |
+
# actually add edges to spanner
|
157 |
+
for u, v in edges_to_add:
|
158 |
+
_add_edge_to_spanner(H, residual_graph, u, v, weight)
|
159 |
+
|
160 |
+
# actually delete edges from residual graph
|
161 |
+
residual_graph.remove_edges_from(edges_to_remove)
|
162 |
+
|
163 |
+
# copy old clustering data to new_clustering
|
164 |
+
for node, center in clustering.items():
|
165 |
+
if center in sampled_centers:
|
166 |
+
new_clustering[node] = center
|
167 |
+
clustering = new_clustering
|
168 |
+
|
169 |
+
# step 4: remove intra-cluster edges
|
170 |
+
for u in residual_graph.nodes:
|
171 |
+
for v in list(residual_graph.adj[u]):
|
172 |
+
if clustering[u] == clustering[v]:
|
173 |
+
residual_graph.remove_edge(u, v)
|
174 |
+
|
175 |
+
# update residual graph node set
|
176 |
+
for v in list(residual_graph.nodes):
|
177 |
+
if v not in clustering:
|
178 |
+
residual_graph.remove_node(v)
|
179 |
+
|
180 |
+
# phase 2: vertex-cluster joining
|
181 |
+
for v in residual_graph.nodes:
|
182 |
+
lightest_edge_neighbor, _ = _lightest_edge_dicts(residual_graph, clustering, v)
|
183 |
+
for neighbor in lightest_edge_neighbor.values():
|
184 |
+
_add_edge_to_spanner(H, residual_graph, v, neighbor, weight)
|
185 |
+
|
186 |
+
return H
|
187 |
+
|
188 |
+
|
189 |
+
def _setup_residual_graph(G, weight):
|
190 |
+
"""Setup residual graph as a copy of G with unique edges weights.
|
191 |
+
|
192 |
+
The node set of the residual graph corresponds to the set V' from
|
193 |
+
the Baswana-Sen paper and the edge set corresponds to the set E'
|
194 |
+
from the paper.
|
195 |
+
|
196 |
+
This function associates distinct weights to the edges of the
|
197 |
+
residual graph (even for unweighted input graphs), as required by
|
198 |
+
the algorithm.
|
199 |
+
|
200 |
+
Parameters
|
201 |
+
----------
|
202 |
+
G : NetworkX graph
|
203 |
+
An undirected simple graph.
|
204 |
+
|
205 |
+
weight : object
|
206 |
+
The edge attribute to use as distance.
|
207 |
+
|
208 |
+
Returns
|
209 |
+
-------
|
210 |
+
NetworkX graph
|
211 |
+
The residual graph used for the Baswana-Sen algorithm.
|
212 |
+
"""
|
213 |
+
residual_graph = G.copy()
|
214 |
+
|
215 |
+
# establish unique edge weights, even for unweighted graphs
|
216 |
+
for u, v in G.edges():
|
217 |
+
if not weight:
|
218 |
+
residual_graph[u][v]["weight"] = (id(u), id(v))
|
219 |
+
else:
|
220 |
+
residual_graph[u][v]["weight"] = (G[u][v][weight], id(u), id(v))
|
221 |
+
|
222 |
+
return residual_graph
|
223 |
+
|
224 |
+
|
225 |
+
def _lightest_edge_dicts(residual_graph, clustering, node):
|
226 |
+
"""Find the lightest edge to each cluster.
|
227 |
+
|
228 |
+
Searches for the minimum-weight edge to each cluster adjacent to
|
229 |
+
the given node.
|
230 |
+
|
231 |
+
Parameters
|
232 |
+
----------
|
233 |
+
residual_graph : NetworkX graph
|
234 |
+
The residual graph used by the Baswana-Sen algorithm.
|
235 |
+
|
236 |
+
clustering : dictionary
|
237 |
+
The current clustering of the nodes.
|
238 |
+
|
239 |
+
node : node
|
240 |
+
The node from which the search originates.
|
241 |
+
|
242 |
+
Returns
|
243 |
+
-------
|
244 |
+
lightest_edge_neighbor, lightest_edge_weight : dictionary, dictionary
|
245 |
+
lightest_edge_neighbor is a dictionary that maps a center C to
|
246 |
+
a node v in the corresponding cluster such that the edge from
|
247 |
+
the given node to v is the lightest edge from the given node to
|
248 |
+
any node in cluster. lightest_edge_weight maps a center C to the
|
249 |
+
weight of the aforementioned edge.
|
250 |
+
|
251 |
+
Notes
|
252 |
+
-----
|
253 |
+
If a cluster has no node that is adjacent to the given node in the
|
254 |
+
residual graph then the center of the cluster is not a key in the
|
255 |
+
returned dictionaries.
|
256 |
+
"""
|
257 |
+
lightest_edge_neighbor = {}
|
258 |
+
lightest_edge_weight = {}
|
259 |
+
for neighbor in residual_graph.adj[node]:
|
260 |
+
nbr_center = clustering[neighbor]
|
261 |
+
weight = residual_graph[node][neighbor]["weight"]
|
262 |
+
if (
|
263 |
+
nbr_center not in lightest_edge_weight
|
264 |
+
or weight < lightest_edge_weight[nbr_center]
|
265 |
+
):
|
266 |
+
lightest_edge_neighbor[nbr_center] = neighbor
|
267 |
+
lightest_edge_weight[nbr_center] = weight
|
268 |
+
return lightest_edge_neighbor, lightest_edge_weight
|
269 |
+
|
270 |
+
|
271 |
+
def _add_edge_to_spanner(H, residual_graph, u, v, weight):
|
272 |
+
"""Add the edge {u, v} to the spanner H and take weight from
|
273 |
+
the residual graph.
|
274 |
+
|
275 |
+
Parameters
|
276 |
+
----------
|
277 |
+
H : NetworkX graph
|
278 |
+
The spanner under construction.
|
279 |
+
|
280 |
+
residual_graph : NetworkX graph
|
281 |
+
The residual graph used by the Baswana-Sen algorithm. The weight
|
282 |
+
for the edge is taken from this graph.
|
283 |
+
|
284 |
+
u : node
|
285 |
+
One endpoint of the edge.
|
286 |
+
|
287 |
+
v : node
|
288 |
+
The other endpoint of the edge.
|
289 |
+
|
290 |
+
weight : object
|
291 |
+
The edge attribute to use as distance.
|
292 |
+
"""
|
293 |
+
H.add_edge(u, v)
|
294 |
+
if weight:
|
295 |
+
H[u][v][weight] = residual_graph[u][v]["weight"][0]
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/structuralholes.py
ADDED
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Functions for computing measures of structural holes."""
|
2 |
+
|
3 |
+
import networkx as nx
|
4 |
+
|
5 |
+
__all__ = ["constraint", "local_constraint", "effective_size"]
|
6 |
+
|
7 |
+
|
8 |
+
@nx._dispatchable(edge_attrs="weight")
|
9 |
+
def mutual_weight(G, u, v, weight=None):
|
10 |
+
"""Returns the sum of the weights of the edge from `u` to `v` and
|
11 |
+
the edge from `v` to `u` in `G`.
|
12 |
+
|
13 |
+
`weight` is the edge data key that represents the edge weight. If
|
14 |
+
the specified key is `None` or is not in the edge data for an edge,
|
15 |
+
that edge is assumed to have weight 1.
|
16 |
+
|
17 |
+
Pre-conditions: `u` and `v` must both be in `G`.
|
18 |
+
|
19 |
+
"""
|
20 |
+
try:
|
21 |
+
a_uv = G[u][v].get(weight, 1)
|
22 |
+
except KeyError:
|
23 |
+
a_uv = 0
|
24 |
+
try:
|
25 |
+
a_vu = G[v][u].get(weight, 1)
|
26 |
+
except KeyError:
|
27 |
+
a_vu = 0
|
28 |
+
return a_uv + a_vu
|
29 |
+
|
30 |
+
|
31 |
+
@nx._dispatchable(edge_attrs="weight")
|
32 |
+
def normalized_mutual_weight(G, u, v, norm=sum, weight=None):
|
33 |
+
"""Returns normalized mutual weight of the edges from `u` to `v`
|
34 |
+
with respect to the mutual weights of the neighbors of `u` in `G`.
|
35 |
+
|
36 |
+
`norm` specifies how the normalization factor is computed. It must
|
37 |
+
be a function that takes a single argument and returns a number.
|
38 |
+
The argument will be an iterable of mutual weights
|
39 |
+
of pairs ``(u, w)``, where ``w`` ranges over each (in- and
|
40 |
+
out-)neighbor of ``u``. Commons values for `normalization` are
|
41 |
+
``sum`` and ``max``.
|
42 |
+
|
43 |
+
`weight` can be ``None`` or a string, if None, all edge weights
|
44 |
+
are considered equal. Otherwise holds the name of the edge
|
45 |
+
attribute used as weight.
|
46 |
+
|
47 |
+
"""
|
48 |
+
scale = norm(mutual_weight(G, u, w, weight) for w in set(nx.all_neighbors(G, u)))
|
49 |
+
return 0 if scale == 0 else mutual_weight(G, u, v, weight) / scale
|
50 |
+
|
51 |
+
|
52 |
+
@nx._dispatchable(edge_attrs="weight")
|
53 |
+
def effective_size(G, nodes=None, weight=None):
|
54 |
+
r"""Returns the effective size of all nodes in the graph ``G``.
|
55 |
+
|
56 |
+
The *effective size* of a node's ego network is based on the concept
|
57 |
+
of redundancy. A person's ego network has redundancy to the extent
|
58 |
+
that her contacts are connected to each other as well. The
|
59 |
+
nonredundant part of a person's relationships is the effective
|
60 |
+
size of her ego network [1]_. Formally, the effective size of a
|
61 |
+
node $u$, denoted $e(u)$, is defined by
|
62 |
+
|
63 |
+
.. math::
|
64 |
+
|
65 |
+
e(u) = \sum_{v \in N(u) \setminus \{u\}}
|
66 |
+
\left(1 - \sum_{w \in N(v)} p_{uw} m_{vw}\right)
|
67 |
+
|
68 |
+
where $N(u)$ is the set of neighbors of $u$ and $p_{uw}$ is the
|
69 |
+
normalized mutual weight of the (directed or undirected) edges
|
70 |
+
joining $u$ and $v$, for each vertex $u$ and $v$ [1]_. And $m_{vw}$
|
71 |
+
is the mutual weight of $v$ and $w$ divided by $v$ highest mutual
|
72 |
+
weight with any of its neighbors. The *mutual weight* of $u$ and $v$
|
73 |
+
is the sum of the weights of edges joining them (edge weights are
|
74 |
+
assumed to be one if the graph is unweighted).
|
75 |
+
|
76 |
+
For the case of unweighted and undirected graphs, Borgatti proposed
|
77 |
+
a simplified formula to compute effective size [2]_
|
78 |
+
|
79 |
+
.. math::
|
80 |
+
|
81 |
+
e(u) = n - \frac{2t}{n}
|
82 |
+
|
83 |
+
where `t` is the number of ties in the ego network (not including
|
84 |
+
ties to ego) and `n` is the number of nodes (excluding ego).
|
85 |
+
|
86 |
+
Parameters
|
87 |
+
----------
|
88 |
+
G : NetworkX graph
|
89 |
+
The graph containing ``v``. Directed graphs are treated like
|
90 |
+
undirected graphs when computing neighbors of ``v``.
|
91 |
+
|
92 |
+
nodes : container, optional
|
93 |
+
Container of nodes in the graph ``G`` to compute the effective size.
|
94 |
+
If None, the effective size of every node is computed.
|
95 |
+
|
96 |
+
weight : None or string, optional
|
97 |
+
If None, all edge weights are considered equal.
|
98 |
+
Otherwise holds the name of the edge attribute used as weight.
|
99 |
+
|
100 |
+
Returns
|
101 |
+
-------
|
102 |
+
dict
|
103 |
+
Dictionary with nodes as keys and the effective size of the node as values.
|
104 |
+
|
105 |
+
Notes
|
106 |
+
-----
|
107 |
+
Burt also defined the related concept of *efficiency* of a node's ego
|
108 |
+
network, which is its effective size divided by the degree of that
|
109 |
+
node [1]_. So you can easily compute efficiency:
|
110 |
+
|
111 |
+
>>> G = nx.DiGraph()
|
112 |
+
>>> G.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)])
|
113 |
+
>>> esize = nx.effective_size(G)
|
114 |
+
>>> efficiency = {n: v / G.degree(n) for n, v in esize.items()}
|
115 |
+
|
116 |
+
See also
|
117 |
+
--------
|
118 |
+
constraint
|
119 |
+
|
120 |
+
References
|
121 |
+
----------
|
122 |
+
.. [1] Burt, Ronald S.
|
123 |
+
*Structural Holes: The Social Structure of Competition.*
|
124 |
+
Cambridge: Harvard University Press, 1995.
|
125 |
+
|
126 |
+
.. [2] Borgatti, S.
|
127 |
+
"Structural Holes: Unpacking Burt's Redundancy Measures"
|
128 |
+
CONNECTIONS 20(1):35-38.
|
129 |
+
http://www.analytictech.com/connections/v20(1)/holes.htm
|
130 |
+
|
131 |
+
"""
|
132 |
+
|
133 |
+
def redundancy(G, u, v, weight=None):
|
134 |
+
nmw = normalized_mutual_weight
|
135 |
+
r = sum(
|
136 |
+
nmw(G, u, w, weight=weight) * nmw(G, v, w, norm=max, weight=weight)
|
137 |
+
for w in set(nx.all_neighbors(G, u))
|
138 |
+
)
|
139 |
+
return 1 - r
|
140 |
+
|
141 |
+
effective_size = {}
|
142 |
+
if nodes is None:
|
143 |
+
nodes = G
|
144 |
+
# Use Borgatti's simplified formula for unweighted and undirected graphs
|
145 |
+
if not G.is_directed() and weight is None:
|
146 |
+
for v in nodes:
|
147 |
+
# Effective size is not defined for isolated nodes
|
148 |
+
if len(G[v]) == 0:
|
149 |
+
effective_size[v] = float("nan")
|
150 |
+
continue
|
151 |
+
E = nx.ego_graph(G, v, center=False, undirected=True)
|
152 |
+
effective_size[v] = len(E) - (2 * E.size()) / len(E)
|
153 |
+
else:
|
154 |
+
for v in nodes:
|
155 |
+
# Effective size is not defined for isolated nodes
|
156 |
+
if len(G[v]) == 0:
|
157 |
+
effective_size[v] = float("nan")
|
158 |
+
continue
|
159 |
+
effective_size[v] = sum(
|
160 |
+
redundancy(G, v, u, weight) for u in set(nx.all_neighbors(G, v))
|
161 |
+
)
|
162 |
+
return effective_size
|
163 |
+
|
164 |
+
|
165 |
+
@nx._dispatchable(edge_attrs="weight")
|
166 |
+
def constraint(G, nodes=None, weight=None):
|
167 |
+
r"""Returns the constraint on all nodes in the graph ``G``.
|
168 |
+
|
169 |
+
The *constraint* is a measure of the extent to which a node *v* is
|
170 |
+
invested in those nodes that are themselves invested in the
|
171 |
+
neighbors of *v*. Formally, the *constraint on v*, denoted `c(v)`,
|
172 |
+
is defined by
|
173 |
+
|
174 |
+
.. math::
|
175 |
+
|
176 |
+
c(v) = \sum_{w \in N(v) \setminus \{v\}} \ell(v, w)
|
177 |
+
|
178 |
+
where $N(v)$ is the subset of the neighbors of `v` that are either
|
179 |
+
predecessors or successors of `v` and $\ell(v, w)$ is the local
|
180 |
+
constraint on `v` with respect to `w` [1]_. For the definition of local
|
181 |
+
constraint, see :func:`local_constraint`.
|
182 |
+
|
183 |
+
Parameters
|
184 |
+
----------
|
185 |
+
G : NetworkX graph
|
186 |
+
The graph containing ``v``. This can be either directed or undirected.
|
187 |
+
|
188 |
+
nodes : container, optional
|
189 |
+
Container of nodes in the graph ``G`` to compute the constraint. If
|
190 |
+
None, the constraint of every node is computed.
|
191 |
+
|
192 |
+
weight : None or string, optional
|
193 |
+
If None, all edge weights are considered equal.
|
194 |
+
Otherwise holds the name of the edge attribute used as weight.
|
195 |
+
|
196 |
+
Returns
|
197 |
+
-------
|
198 |
+
dict
|
199 |
+
Dictionary with nodes as keys and the constraint on the node as values.
|
200 |
+
|
201 |
+
See also
|
202 |
+
--------
|
203 |
+
local_constraint
|
204 |
+
|
205 |
+
References
|
206 |
+
----------
|
207 |
+
.. [1] Burt, Ronald S.
|
208 |
+
"Structural holes and good ideas".
|
209 |
+
American Journal of Sociology (110): 349–399.
|
210 |
+
|
211 |
+
"""
|
212 |
+
if nodes is None:
|
213 |
+
nodes = G
|
214 |
+
constraint = {}
|
215 |
+
for v in nodes:
|
216 |
+
# Constraint is not defined for isolated nodes
|
217 |
+
if len(G[v]) == 0:
|
218 |
+
constraint[v] = float("nan")
|
219 |
+
continue
|
220 |
+
constraint[v] = sum(
|
221 |
+
local_constraint(G, v, n, weight) for n in set(nx.all_neighbors(G, v))
|
222 |
+
)
|
223 |
+
return constraint
|
224 |
+
|
225 |
+
|
226 |
+
@nx._dispatchable(edge_attrs="weight")
|
227 |
+
def local_constraint(G, u, v, weight=None):
|
228 |
+
r"""Returns the local constraint on the node ``u`` with respect to
|
229 |
+
the node ``v`` in the graph ``G``.
|
230 |
+
|
231 |
+
Formally, the *local constraint on u with respect to v*, denoted
|
232 |
+
$\ell(u, v)$, is defined by
|
233 |
+
|
234 |
+
.. math::
|
235 |
+
|
236 |
+
\ell(u, v) = \left(p_{uv} + \sum_{w \in N(v)} p_{uw} p_{wv}\right)^2,
|
237 |
+
|
238 |
+
where $N(v)$ is the set of neighbors of $v$ and $p_{uv}$ is the
|
239 |
+
normalized mutual weight of the (directed or undirected) edges
|
240 |
+
joining $u$ and $v$, for each vertex $u$ and $v$ [1]_. The *mutual
|
241 |
+
weight* of $u$ and $v$ is the sum of the weights of edges joining
|
242 |
+
them (edge weights are assumed to be one if the graph is
|
243 |
+
unweighted).
|
244 |
+
|
245 |
+
Parameters
|
246 |
+
----------
|
247 |
+
G : NetworkX graph
|
248 |
+
The graph containing ``u`` and ``v``. This can be either
|
249 |
+
directed or undirected.
|
250 |
+
|
251 |
+
u : node
|
252 |
+
A node in the graph ``G``.
|
253 |
+
|
254 |
+
v : node
|
255 |
+
A node in the graph ``G``.
|
256 |
+
|
257 |
+
weight : None or string, optional
|
258 |
+
If None, all edge weights are considered equal.
|
259 |
+
Otherwise holds the name of the edge attribute used as weight.
|
260 |
+
|
261 |
+
Returns
|
262 |
+
-------
|
263 |
+
float
|
264 |
+
The constraint of the node ``v`` in the graph ``G``.
|
265 |
+
|
266 |
+
See also
|
267 |
+
--------
|
268 |
+
constraint
|
269 |
+
|
270 |
+
References
|
271 |
+
----------
|
272 |
+
.. [1] Burt, Ronald S.
|
273 |
+
"Structural holes and good ideas".
|
274 |
+
American Journal of Sociology (110): 349–399.
|
275 |
+
|
276 |
+
"""
|
277 |
+
nmw = normalized_mutual_weight
|
278 |
+
direct = nmw(G, u, v, weight=weight)
|
279 |
+
indirect = sum(
|
280 |
+
nmw(G, u, w, weight=weight) * nmw(G, w, v, weight=weight)
|
281 |
+
for w in set(nx.all_neighbors(G, u))
|
282 |
+
)
|
283 |
+
return (direct + indirect) ** 2
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/summarization.py
ADDED
@@ -0,0 +1,563 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Graph summarization finds smaller representations of graphs resulting in faster
|
3 |
+
runtime of algorithms, reduced storage needs, and noise reduction.
|
4 |
+
Summarization has applications in areas such as visualization, pattern mining,
|
5 |
+
clustering and community detection, and more. Core graph summarization
|
6 |
+
techniques are grouping/aggregation, bit-compression,
|
7 |
+
simplification/sparsification, and influence based. Graph summarization
|
8 |
+
algorithms often produce either summary graphs in the form of supergraphs or
|
9 |
+
sparsified graphs, or a list of independent structures. Supergraphs are the
|
10 |
+
most common product, which consist of supernodes and original nodes and are
|
11 |
+
connected by edges and superedges, which represent aggregate edges between
|
12 |
+
nodes and supernodes.
|
13 |
+
|
14 |
+
Grouping/aggregation based techniques compress graphs by representing
|
15 |
+
close/connected nodes and edges in a graph by a single node/edge in a
|
16 |
+
supergraph. Nodes can be grouped together into supernodes based on their
|
17 |
+
structural similarities or proximity within a graph to reduce the total number
|
18 |
+
of nodes in a graph. Edge-grouping techniques group edges into lossy/lossless
|
19 |
+
nodes called compressor or virtual nodes to reduce the total number of edges in
|
20 |
+
a graph. Edge-grouping techniques can be lossless, meaning that they can be
|
21 |
+
used to re-create the original graph, or techniques can be lossy, requiring
|
22 |
+
less space to store the summary graph, but at the expense of lower
|
23 |
+
reconstruction accuracy of the original graph.
|
24 |
+
|
25 |
+
Bit-compression techniques minimize the amount of information needed to
|
26 |
+
describe the original graph, while revealing structural patterns in the
|
27 |
+
original graph. The two-part minimum description length (MDL) is often used to
|
28 |
+
represent the model and the original graph in terms of the model. A key
|
29 |
+
difference between graph compression and graph summarization is that graph
|
30 |
+
summarization focuses on finding structural patterns within the original graph,
|
31 |
+
whereas graph compression focuses on compressions the original graph to be as
|
32 |
+
small as possible. **NOTE**: Some bit-compression methods exist solely to
|
33 |
+
compress a graph without creating a summary graph or finding comprehensible
|
34 |
+
structural patterns.
|
35 |
+
|
36 |
+
Simplification/Sparsification techniques attempt to create a sparse
|
37 |
+
representation of a graph by removing unimportant nodes and edges from the
|
38 |
+
graph. Sparsified graphs differ from supergraphs created by
|
39 |
+
grouping/aggregation by only containing a subset of the original nodes and
|
40 |
+
edges of the original graph.
|
41 |
+
|
42 |
+
Influence based techniques aim to find a high-level description of influence
|
43 |
+
propagation in a large graph. These methods are scarce and have been mostly
|
44 |
+
applied to social graphs.
|
45 |
+
|
46 |
+
*dedensification* is a grouping/aggregation based technique to compress the
|
47 |
+
neighborhoods around high-degree nodes in unweighted graphs by adding
|
48 |
+
compressor nodes that summarize multiple edges of the same type to
|
49 |
+
high-degree nodes (nodes with a degree greater than a given threshold).
|
50 |
+
Dedensification was developed for the purpose of increasing performance of
|
51 |
+
query processing around high-degree nodes in graph databases and enables direct
|
52 |
+
operations on the compressed graph. The structural patterns surrounding
|
53 |
+
high-degree nodes in the original is preserved while using fewer edges and
|
54 |
+
adding a small number of compressor nodes. The degree of nodes present in the
|
55 |
+
original graph is also preserved. The current implementation of dedensification
|
56 |
+
supports graphs with one edge type.
|
57 |
+
|
58 |
+
For more information on graph summarization, see `Graph Summarization Methods
|
59 |
+
and Applications: A Survey <https://dl.acm.org/doi/abs/10.1145/3186727>`_
|
60 |
+
"""
|
61 |
+
from collections import Counter, defaultdict
|
62 |
+
|
63 |
+
import networkx as nx
|
64 |
+
|
65 |
+
__all__ = ["dedensify", "snap_aggregation"]
|
66 |
+
|
67 |
+
|
68 |
+
@nx._dispatchable(mutates_input={"not copy": 3}, returns_graph=True)
|
69 |
+
def dedensify(G, threshold, prefix=None, copy=True):
|
70 |
+
"""Compresses neighborhoods around high-degree nodes
|
71 |
+
|
72 |
+
Reduces the number of edges to high-degree nodes by adding compressor nodes
|
73 |
+
that summarize multiple edges of the same type to high-degree nodes (nodes
|
74 |
+
with a degree greater than a given threshold). Dedensification also has
|
75 |
+
the added benefit of reducing the number of edges around high-degree nodes.
|
76 |
+
The implementation currently supports graphs with a single edge type.
|
77 |
+
|
78 |
+
Parameters
|
79 |
+
----------
|
80 |
+
G: graph
|
81 |
+
A networkx graph
|
82 |
+
threshold: int
|
83 |
+
Minimum degree threshold of a node to be considered a high degree node.
|
84 |
+
The threshold must be greater than or equal to 2.
|
85 |
+
prefix: str or None, optional (default: None)
|
86 |
+
An optional prefix for denoting compressor nodes
|
87 |
+
copy: bool, optional (default: True)
|
88 |
+
Indicates if dedensification should be done inplace
|
89 |
+
|
90 |
+
Returns
|
91 |
+
-------
|
92 |
+
dedensified networkx graph : (graph, set)
|
93 |
+
2-tuple of the dedensified graph and set of compressor nodes
|
94 |
+
|
95 |
+
Notes
|
96 |
+
-----
|
97 |
+
According to the algorithm in [1]_, removes edges in a graph by
|
98 |
+
compressing/decompressing the neighborhoods around high degree nodes by
|
99 |
+
adding compressor nodes that summarize multiple edges of the same type
|
100 |
+
to high-degree nodes. Dedensification will only add a compressor node when
|
101 |
+
doing so will reduce the total number of edges in the given graph. This
|
102 |
+
implementation currently supports graphs with a single edge type.
|
103 |
+
|
104 |
+
Examples
|
105 |
+
--------
|
106 |
+
Dedensification will only add compressor nodes when doing so would result
|
107 |
+
in fewer edges::
|
108 |
+
|
109 |
+
>>> original_graph = nx.DiGraph()
|
110 |
+
>>> original_graph.add_nodes_from(
|
111 |
+
... ["1", "2", "3", "4", "5", "6", "A", "B", "C"]
|
112 |
+
... )
|
113 |
+
>>> original_graph.add_edges_from(
|
114 |
+
... [
|
115 |
+
... ("1", "C"), ("1", "B"),
|
116 |
+
... ("2", "C"), ("2", "B"), ("2", "A"),
|
117 |
+
... ("3", "B"), ("3", "A"), ("3", "6"),
|
118 |
+
... ("4", "C"), ("4", "B"), ("4", "A"),
|
119 |
+
... ("5", "B"), ("5", "A"),
|
120 |
+
... ("6", "5"),
|
121 |
+
... ("A", "6")
|
122 |
+
... ]
|
123 |
+
... )
|
124 |
+
>>> c_graph, c_nodes = nx.dedensify(original_graph, threshold=2)
|
125 |
+
>>> original_graph.number_of_edges()
|
126 |
+
15
|
127 |
+
>>> c_graph.number_of_edges()
|
128 |
+
14
|
129 |
+
|
130 |
+
A dedensified, directed graph can be "densified" to reconstruct the
|
131 |
+
original graph::
|
132 |
+
|
133 |
+
>>> original_graph = nx.DiGraph()
|
134 |
+
>>> original_graph.add_nodes_from(
|
135 |
+
... ["1", "2", "3", "4", "5", "6", "A", "B", "C"]
|
136 |
+
... )
|
137 |
+
>>> original_graph.add_edges_from(
|
138 |
+
... [
|
139 |
+
... ("1", "C"), ("1", "B"),
|
140 |
+
... ("2", "C"), ("2", "B"), ("2", "A"),
|
141 |
+
... ("3", "B"), ("3", "A"), ("3", "6"),
|
142 |
+
... ("4", "C"), ("4", "B"), ("4", "A"),
|
143 |
+
... ("5", "B"), ("5", "A"),
|
144 |
+
... ("6", "5"),
|
145 |
+
... ("A", "6")
|
146 |
+
... ]
|
147 |
+
... )
|
148 |
+
>>> c_graph, c_nodes = nx.dedensify(original_graph, threshold=2)
|
149 |
+
>>> # re-densifies the compressed graph into the original graph
|
150 |
+
>>> for c_node in c_nodes:
|
151 |
+
... all_neighbors = set(nx.all_neighbors(c_graph, c_node))
|
152 |
+
... out_neighbors = set(c_graph.neighbors(c_node))
|
153 |
+
... for out_neighbor in out_neighbors:
|
154 |
+
... c_graph.remove_edge(c_node, out_neighbor)
|
155 |
+
... in_neighbors = all_neighbors - out_neighbors
|
156 |
+
... for in_neighbor in in_neighbors:
|
157 |
+
... c_graph.remove_edge(in_neighbor, c_node)
|
158 |
+
... for out_neighbor in out_neighbors:
|
159 |
+
... c_graph.add_edge(in_neighbor, out_neighbor)
|
160 |
+
... c_graph.remove_node(c_node)
|
161 |
+
...
|
162 |
+
>>> nx.is_isomorphic(original_graph, c_graph)
|
163 |
+
True
|
164 |
+
|
165 |
+
References
|
166 |
+
----------
|
167 |
+
.. [1] Maccioni, A., & Abadi, D. J. (2016, August).
|
168 |
+
Scalable pattern matching over compressed graphs via dedensification.
|
169 |
+
In Proceedings of the 22nd ACM SIGKDD International Conference on
|
170 |
+
Knowledge Discovery and Data Mining (pp. 1755-1764).
|
171 |
+
http://www.cs.umd.edu/~abadi/papers/graph-dedense.pdf
|
172 |
+
"""
|
173 |
+
if threshold < 2:
|
174 |
+
raise nx.NetworkXError("The degree threshold must be >= 2")
|
175 |
+
|
176 |
+
degrees = G.in_degree if G.is_directed() else G.degree
|
177 |
+
# Group nodes based on degree threshold
|
178 |
+
high_degree_nodes = {n for n, d in degrees if d > threshold}
|
179 |
+
low_degree_nodes = G.nodes() - high_degree_nodes
|
180 |
+
|
181 |
+
auxiliary = {}
|
182 |
+
for node in G:
|
183 |
+
high_degree_nbrs = frozenset(high_degree_nodes & set(G[node]))
|
184 |
+
if high_degree_nbrs:
|
185 |
+
if high_degree_nbrs in auxiliary:
|
186 |
+
auxiliary[high_degree_nbrs].add(node)
|
187 |
+
else:
|
188 |
+
auxiliary[high_degree_nbrs] = {node}
|
189 |
+
|
190 |
+
if copy:
|
191 |
+
G = G.copy()
|
192 |
+
|
193 |
+
compressor_nodes = set()
|
194 |
+
for index, (high_degree_nodes, low_degree_nodes) in enumerate(auxiliary.items()):
|
195 |
+
low_degree_node_count = len(low_degree_nodes)
|
196 |
+
high_degree_node_count = len(high_degree_nodes)
|
197 |
+
old_edges = high_degree_node_count * low_degree_node_count
|
198 |
+
new_edges = high_degree_node_count + low_degree_node_count
|
199 |
+
if old_edges <= new_edges:
|
200 |
+
continue
|
201 |
+
compression_node = "".join(str(node) for node in high_degree_nodes)
|
202 |
+
if prefix:
|
203 |
+
compression_node = str(prefix) + compression_node
|
204 |
+
for node in low_degree_nodes:
|
205 |
+
for high_node in high_degree_nodes:
|
206 |
+
if G.has_edge(node, high_node):
|
207 |
+
G.remove_edge(node, high_node)
|
208 |
+
|
209 |
+
G.add_edge(node, compression_node)
|
210 |
+
for node in high_degree_nodes:
|
211 |
+
G.add_edge(compression_node, node)
|
212 |
+
compressor_nodes.add(compression_node)
|
213 |
+
return G, compressor_nodes
|
214 |
+
|
215 |
+
|
216 |
+
def _snap_build_graph(
|
217 |
+
G,
|
218 |
+
groups,
|
219 |
+
node_attributes,
|
220 |
+
edge_attributes,
|
221 |
+
neighbor_info,
|
222 |
+
edge_types,
|
223 |
+
prefix,
|
224 |
+
supernode_attribute,
|
225 |
+
superedge_attribute,
|
226 |
+
):
|
227 |
+
"""
|
228 |
+
Build the summary graph from the data structures produced in the SNAP aggregation algorithm
|
229 |
+
|
230 |
+
Used in the SNAP aggregation algorithm to build the output summary graph and supernode
|
231 |
+
lookup dictionary. This process uses the original graph and the data structures to
|
232 |
+
create the supernodes with the correct node attributes, and the superedges with the correct
|
233 |
+
edge attributes
|
234 |
+
|
235 |
+
Parameters
|
236 |
+
----------
|
237 |
+
G: networkx.Graph
|
238 |
+
the original graph to be summarized
|
239 |
+
groups: dict
|
240 |
+
A dictionary of unique group IDs and their corresponding node groups
|
241 |
+
node_attributes: iterable
|
242 |
+
An iterable of the node attributes considered in the summarization process
|
243 |
+
edge_attributes: iterable
|
244 |
+
An iterable of the edge attributes considered in the summarization process
|
245 |
+
neighbor_info: dict
|
246 |
+
A data structure indicating the number of edges a node has with the
|
247 |
+
groups in the current summarization of each edge type
|
248 |
+
edge_types: dict
|
249 |
+
dictionary of edges in the graph and their corresponding attributes recognized
|
250 |
+
in the summarization
|
251 |
+
prefix: string
|
252 |
+
The prefix to be added to all supernodes
|
253 |
+
supernode_attribute: str
|
254 |
+
The node attribute for recording the supernode groupings of nodes
|
255 |
+
superedge_attribute: str
|
256 |
+
The edge attribute for recording the edge types represented by superedges
|
257 |
+
|
258 |
+
Returns
|
259 |
+
-------
|
260 |
+
summary graph: Networkx graph
|
261 |
+
"""
|
262 |
+
output = G.__class__()
|
263 |
+
node_label_lookup = {}
|
264 |
+
for index, group_id in enumerate(groups):
|
265 |
+
group_set = groups[group_id]
|
266 |
+
supernode = f"{prefix}{index}"
|
267 |
+
node_label_lookup[group_id] = supernode
|
268 |
+
supernode_attributes = {
|
269 |
+
attr: G.nodes[next(iter(group_set))][attr] for attr in node_attributes
|
270 |
+
}
|
271 |
+
supernode_attributes[supernode_attribute] = group_set
|
272 |
+
output.add_node(supernode, **supernode_attributes)
|
273 |
+
|
274 |
+
for group_id in groups:
|
275 |
+
group_set = groups[group_id]
|
276 |
+
source_supernode = node_label_lookup[group_id]
|
277 |
+
for other_group, group_edge_types in neighbor_info[
|
278 |
+
next(iter(group_set))
|
279 |
+
].items():
|
280 |
+
if group_edge_types:
|
281 |
+
target_supernode = node_label_lookup[other_group]
|
282 |
+
summary_graph_edge = (source_supernode, target_supernode)
|
283 |
+
|
284 |
+
edge_types = [
|
285 |
+
dict(zip(edge_attributes, edge_type))
|
286 |
+
for edge_type in group_edge_types
|
287 |
+
]
|
288 |
+
|
289 |
+
has_edge = output.has_edge(*summary_graph_edge)
|
290 |
+
if output.is_multigraph():
|
291 |
+
if not has_edge:
|
292 |
+
for edge_type in edge_types:
|
293 |
+
output.add_edge(*summary_graph_edge, **edge_type)
|
294 |
+
elif not output.is_directed():
|
295 |
+
existing_edge_data = output.get_edge_data(*summary_graph_edge)
|
296 |
+
for edge_type in edge_types:
|
297 |
+
if edge_type not in existing_edge_data.values():
|
298 |
+
output.add_edge(*summary_graph_edge, **edge_type)
|
299 |
+
else:
|
300 |
+
superedge_attributes = {superedge_attribute: edge_types}
|
301 |
+
output.add_edge(*summary_graph_edge, **superedge_attributes)
|
302 |
+
|
303 |
+
return output
|
304 |
+
|
305 |
+
|
306 |
+
def _snap_eligible_group(G, groups, group_lookup, edge_types):
|
307 |
+
"""
|
308 |
+
Determines if a group is eligible to be split.
|
309 |
+
|
310 |
+
A group is eligible to be split if all nodes in the group have edges of the same type(s)
|
311 |
+
with the same other groups.
|
312 |
+
|
313 |
+
Parameters
|
314 |
+
----------
|
315 |
+
G: graph
|
316 |
+
graph to be summarized
|
317 |
+
groups: dict
|
318 |
+
A dictionary of unique group IDs and their corresponding node groups
|
319 |
+
group_lookup: dict
|
320 |
+
dictionary of nodes and their current corresponding group ID
|
321 |
+
edge_types: dict
|
322 |
+
dictionary of edges in the graph and their corresponding attributes recognized
|
323 |
+
in the summarization
|
324 |
+
|
325 |
+
Returns
|
326 |
+
-------
|
327 |
+
tuple: group ID to split, and neighbor-groups participation_counts data structure
|
328 |
+
"""
|
329 |
+
nbr_info = {node: {gid: Counter() for gid in groups} for node in group_lookup}
|
330 |
+
for group_id in groups:
|
331 |
+
current_group = groups[group_id]
|
332 |
+
|
333 |
+
# build nbr_info for nodes in group
|
334 |
+
for node in current_group:
|
335 |
+
nbr_info[node] = {group_id: Counter() for group_id in groups}
|
336 |
+
edges = G.edges(node, keys=True) if G.is_multigraph() else G.edges(node)
|
337 |
+
for edge in edges:
|
338 |
+
neighbor = edge[1]
|
339 |
+
edge_type = edge_types[edge]
|
340 |
+
neighbor_group_id = group_lookup[neighbor]
|
341 |
+
nbr_info[node][neighbor_group_id][edge_type] += 1
|
342 |
+
|
343 |
+
# check if group_id is eligible to be split
|
344 |
+
group_size = len(current_group)
|
345 |
+
for other_group_id in groups:
|
346 |
+
edge_counts = Counter()
|
347 |
+
for node in current_group:
|
348 |
+
edge_counts.update(nbr_info[node][other_group_id].keys())
|
349 |
+
|
350 |
+
if not all(count == group_size for count in edge_counts.values()):
|
351 |
+
# only the nbr_info of the returned group_id is required for handling group splits
|
352 |
+
return group_id, nbr_info
|
353 |
+
|
354 |
+
# if no eligible groups, complete nbr_info is calculated
|
355 |
+
return None, nbr_info
|
356 |
+
|
357 |
+
|
358 |
+
def _snap_split(groups, neighbor_info, group_lookup, group_id):
|
359 |
+
"""
|
360 |
+
Splits a group based on edge types and updates the groups accordingly
|
361 |
+
|
362 |
+
Splits the group with the given group_id based on the edge types
|
363 |
+
of the nodes so that each new grouping will all have the same
|
364 |
+
edges with other nodes.
|
365 |
+
|
366 |
+
Parameters
|
367 |
+
----------
|
368 |
+
groups: dict
|
369 |
+
A dictionary of unique group IDs and their corresponding node groups
|
370 |
+
neighbor_info: dict
|
371 |
+
A data structure indicating the number of edges a node has with the
|
372 |
+
groups in the current summarization of each edge type
|
373 |
+
edge_types: dict
|
374 |
+
dictionary of edges in the graph and their corresponding attributes recognized
|
375 |
+
in the summarization
|
376 |
+
group_lookup: dict
|
377 |
+
dictionary of nodes and their current corresponding group ID
|
378 |
+
group_id: object
|
379 |
+
ID of group to be split
|
380 |
+
|
381 |
+
Returns
|
382 |
+
-------
|
383 |
+
dict
|
384 |
+
The updated groups based on the split
|
385 |
+
"""
|
386 |
+
new_group_mappings = defaultdict(set)
|
387 |
+
for node in groups[group_id]:
|
388 |
+
signature = tuple(
|
389 |
+
frozenset(edge_types) for edge_types in neighbor_info[node].values()
|
390 |
+
)
|
391 |
+
new_group_mappings[signature].add(node)
|
392 |
+
|
393 |
+
# leave the biggest new_group as the original group
|
394 |
+
new_groups = sorted(new_group_mappings.values(), key=len)
|
395 |
+
for new_group in new_groups[:-1]:
|
396 |
+
# Assign unused integer as the new_group_id
|
397 |
+
# ids are tuples, so will not interact with the original group_ids
|
398 |
+
new_group_id = len(groups)
|
399 |
+
groups[new_group_id] = new_group
|
400 |
+
groups[group_id] -= new_group
|
401 |
+
for node in new_group:
|
402 |
+
group_lookup[node] = new_group_id
|
403 |
+
|
404 |
+
return groups
|
405 |
+
|
406 |
+
|
407 |
+
@nx._dispatchable(
|
408 |
+
node_attrs="[node_attributes]", edge_attrs="[edge_attributes]", returns_graph=True
|
409 |
+
)
|
410 |
+
def snap_aggregation(
|
411 |
+
G,
|
412 |
+
node_attributes,
|
413 |
+
edge_attributes=(),
|
414 |
+
prefix="Supernode-",
|
415 |
+
supernode_attribute="group",
|
416 |
+
superedge_attribute="types",
|
417 |
+
):
|
418 |
+
"""Creates a summary graph based on attributes and connectivity.
|
419 |
+
|
420 |
+
This function uses the Summarization by Grouping Nodes on Attributes
|
421 |
+
and Pairwise edges (SNAP) algorithm for summarizing a given
|
422 |
+
graph by grouping nodes by node attributes and their edge attributes
|
423 |
+
into supernodes in a summary graph. This name SNAP should not be
|
424 |
+
confused with the Stanford Network Analysis Project (SNAP).
|
425 |
+
|
426 |
+
Here is a high-level view of how this algorithm works:
|
427 |
+
|
428 |
+
1) Group nodes by node attribute values.
|
429 |
+
|
430 |
+
2) Iteratively split groups until all nodes in each group have edges
|
431 |
+
to nodes in the same groups. That is, until all the groups are homogeneous
|
432 |
+
in their member nodes' edges to other groups. For example,
|
433 |
+
if all the nodes in group A only have edge to nodes in group B, then the
|
434 |
+
group is homogeneous and does not need to be split. If all nodes in group B
|
435 |
+
have edges with nodes in groups {A, C}, but some also have edges with other
|
436 |
+
nodes in B, then group B is not homogeneous and needs to be split into
|
437 |
+
groups have edges with {A, C} and a group of nodes having
|
438 |
+
edges with {A, B, C}. This way, viewers of the summary graph can
|
439 |
+
assume that all nodes in the group have the exact same node attributes and
|
440 |
+
the exact same edges.
|
441 |
+
|
442 |
+
3) Build the output summary graph, where the groups are represented by
|
443 |
+
super-nodes. Edges represent the edges shared between all the nodes in each
|
444 |
+
respective groups.
|
445 |
+
|
446 |
+
A SNAP summary graph can be used to visualize graphs that are too large to display
|
447 |
+
or visually analyze, or to efficiently identify sets of similar nodes with similar connectivity
|
448 |
+
patterns to other sets of similar nodes based on specified node and/or edge attributes in a graph.
|
449 |
+
|
450 |
+
Parameters
|
451 |
+
----------
|
452 |
+
G: graph
|
453 |
+
Networkx Graph to be summarized
|
454 |
+
node_attributes: iterable, required
|
455 |
+
An iterable of the node attributes used to group nodes in the summarization process. Nodes
|
456 |
+
with the same values for these attributes will be grouped together in the summary graph.
|
457 |
+
edge_attributes: iterable, optional
|
458 |
+
An iterable of the edge attributes considered in the summarization process. If provided, unique
|
459 |
+
combinations of the attribute values found in the graph are used to
|
460 |
+
determine the edge types in the graph. If not provided, all edges
|
461 |
+
are considered to be of the same type.
|
462 |
+
prefix: str
|
463 |
+
The prefix used to denote supernodes in the summary graph. Defaults to 'Supernode-'.
|
464 |
+
supernode_attribute: str
|
465 |
+
The node attribute for recording the supernode groupings of nodes. Defaults to 'group'.
|
466 |
+
superedge_attribute: str
|
467 |
+
The edge attribute for recording the edge types of multiple edges. Defaults to 'types'.
|
468 |
+
|
469 |
+
Returns
|
470 |
+
-------
|
471 |
+
networkx.Graph: summary graph
|
472 |
+
|
473 |
+
Examples
|
474 |
+
--------
|
475 |
+
SNAP aggregation takes a graph and summarizes it in the context of user-provided
|
476 |
+
node and edge attributes such that a viewer can more easily extract and
|
477 |
+
analyze the information represented by the graph
|
478 |
+
|
479 |
+
>>> nodes = {
|
480 |
+
... "A": dict(color="Red"),
|
481 |
+
... "B": dict(color="Red"),
|
482 |
+
... "C": dict(color="Red"),
|
483 |
+
... "D": dict(color="Red"),
|
484 |
+
... "E": dict(color="Blue"),
|
485 |
+
... "F": dict(color="Blue"),
|
486 |
+
... }
|
487 |
+
>>> edges = [
|
488 |
+
... ("A", "E", "Strong"),
|
489 |
+
... ("B", "F", "Strong"),
|
490 |
+
... ("C", "E", "Weak"),
|
491 |
+
... ("D", "F", "Weak"),
|
492 |
+
... ]
|
493 |
+
>>> G = nx.Graph()
|
494 |
+
>>> for node in nodes:
|
495 |
+
... attributes = nodes[node]
|
496 |
+
... G.add_node(node, **attributes)
|
497 |
+
>>> for source, target, type in edges:
|
498 |
+
... G.add_edge(source, target, type=type)
|
499 |
+
>>> node_attributes = ("color",)
|
500 |
+
>>> edge_attributes = ("type",)
|
501 |
+
>>> summary_graph = nx.snap_aggregation(
|
502 |
+
... G, node_attributes=node_attributes, edge_attributes=edge_attributes
|
503 |
+
... )
|
504 |
+
|
505 |
+
Notes
|
506 |
+
-----
|
507 |
+
The summary graph produced is called a maximum Attribute-edge
|
508 |
+
compatible (AR-compatible) grouping. According to [1]_, an
|
509 |
+
AR-compatible grouping means that all nodes in each group have the same
|
510 |
+
exact node attribute values and the same exact edges and
|
511 |
+
edge types to one or more nodes in the same groups. The maximal
|
512 |
+
AR-compatible grouping is the grouping with the minimal cardinality.
|
513 |
+
|
514 |
+
The AR-compatible grouping is the most detailed grouping provided by
|
515 |
+
any of the SNAP algorithms.
|
516 |
+
|
517 |
+
References
|
518 |
+
----------
|
519 |
+
.. [1] Y. Tian, R. A. Hankins, and J. M. Patel. Efficient aggregation
|
520 |
+
for graph summarization. In Proc. 2008 ACM-SIGMOD Int. Conf.
|
521 |
+
Management of Data (SIGMOD’08), pages 567–580, Vancouver, Canada,
|
522 |
+
June 2008.
|
523 |
+
"""
|
524 |
+
edge_types = {
|
525 |
+
edge: tuple(attrs.get(attr) for attr in edge_attributes)
|
526 |
+
for edge, attrs in G.edges.items()
|
527 |
+
}
|
528 |
+
if not G.is_directed():
|
529 |
+
if G.is_multigraph():
|
530 |
+
# list is needed to avoid mutating while iterating
|
531 |
+
edges = [((v, u, k), etype) for (u, v, k), etype in edge_types.items()]
|
532 |
+
else:
|
533 |
+
# list is needed to avoid mutating while iterating
|
534 |
+
edges = [((v, u), etype) for (u, v), etype in edge_types.items()]
|
535 |
+
edge_types.update(edges)
|
536 |
+
|
537 |
+
group_lookup = {
|
538 |
+
node: tuple(attrs[attr] for attr in node_attributes)
|
539 |
+
for node, attrs in G.nodes.items()
|
540 |
+
}
|
541 |
+
groups = defaultdict(set)
|
542 |
+
for node, node_type in group_lookup.items():
|
543 |
+
groups[node_type].add(node)
|
544 |
+
|
545 |
+
eligible_group_id, nbr_info = _snap_eligible_group(
|
546 |
+
G, groups, group_lookup, edge_types
|
547 |
+
)
|
548 |
+
while eligible_group_id:
|
549 |
+
groups = _snap_split(groups, nbr_info, group_lookup, eligible_group_id)
|
550 |
+
eligible_group_id, nbr_info = _snap_eligible_group(
|
551 |
+
G, groups, group_lookup, edge_types
|
552 |
+
)
|
553 |
+
return _snap_build_graph(
|
554 |
+
G,
|
555 |
+
groups,
|
556 |
+
node_attributes,
|
557 |
+
edge_attributes,
|
558 |
+
nbr_info,
|
559 |
+
edge_types,
|
560 |
+
prefix,
|
561 |
+
supernode_attribute,
|
562 |
+
superedge_attribute,
|
563 |
+
)
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/swap.py
ADDED
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Swap edges in a graph.
|
2 |
+
"""
|
3 |
+
|
4 |
+
import math
|
5 |
+
|
6 |
+
import networkx as nx
|
7 |
+
from networkx.utils import py_random_state
|
8 |
+
|
9 |
+
__all__ = ["double_edge_swap", "connected_double_edge_swap", "directed_edge_swap"]
|
10 |
+
|
11 |
+
|
12 |
+
@nx.utils.not_implemented_for("undirected")
|
13 |
+
@py_random_state(3)
|
14 |
+
@nx._dispatchable(mutates_input=True, returns_graph=True)
|
15 |
+
def directed_edge_swap(G, *, nswap=1, max_tries=100, seed=None):
|
16 |
+
"""Swap three edges in a directed graph while keeping the node degrees fixed.
|
17 |
+
|
18 |
+
A directed edge swap swaps three edges such that a -> b -> c -> d becomes
|
19 |
+
a -> c -> b -> d. This pattern of swapping allows all possible states with the
|
20 |
+
same in- and out-degree distribution in a directed graph to be reached.
|
21 |
+
|
22 |
+
If the swap would create parallel edges (e.g. if a -> c already existed in the
|
23 |
+
previous example), another attempt is made to find a suitable trio of edges.
|
24 |
+
|
25 |
+
Parameters
|
26 |
+
----------
|
27 |
+
G : DiGraph
|
28 |
+
A directed graph
|
29 |
+
|
30 |
+
nswap : integer (optional, default=1)
|
31 |
+
Number of three-edge (directed) swaps to perform
|
32 |
+
|
33 |
+
max_tries : integer (optional, default=100)
|
34 |
+
Maximum number of attempts to swap edges
|
35 |
+
|
36 |
+
seed : integer, random_state, or None (default)
|
37 |
+
Indicator of random number generation state.
|
38 |
+
See :ref:`Randomness<randomness>`.
|
39 |
+
|
40 |
+
Returns
|
41 |
+
-------
|
42 |
+
G : DiGraph
|
43 |
+
The graph after the edges are swapped.
|
44 |
+
|
45 |
+
Raises
|
46 |
+
------
|
47 |
+
NetworkXError
|
48 |
+
If `G` is not directed, or
|
49 |
+
If nswap > max_tries, or
|
50 |
+
If there are fewer than 4 nodes or 3 edges in `G`.
|
51 |
+
NetworkXAlgorithmError
|
52 |
+
If the number of swap attempts exceeds `max_tries` before `nswap` swaps are made
|
53 |
+
|
54 |
+
Notes
|
55 |
+
-----
|
56 |
+
Does not enforce any connectivity constraints.
|
57 |
+
|
58 |
+
The graph G is modified in place.
|
59 |
+
|
60 |
+
A later swap is allowed to undo a previous swap.
|
61 |
+
|
62 |
+
References
|
63 |
+
----------
|
64 |
+
.. [1] Erdős, Péter L., et al. “A Simple Havel-Hakimi Type Algorithm to Realize
|
65 |
+
Graphical Degree Sequences of Directed Graphs.” ArXiv:0905.4913 [Math],
|
66 |
+
Jan. 2010. https://doi.org/10.48550/arXiv.0905.4913.
|
67 |
+
Published 2010 in Elec. J. Combinatorics (17(1)). R66.
|
68 |
+
http://www.combinatorics.org/Volume_17/PDF/v17i1r66.pdf
|
69 |
+
.. [2] “Combinatorics - Reaching All Possible Simple Directed Graphs with a given
|
70 |
+
Degree Sequence with 2-Edge Swaps.” Mathematics Stack Exchange,
|
71 |
+
https://math.stackexchange.com/questions/22272/. Accessed 30 May 2022.
|
72 |
+
"""
|
73 |
+
if nswap > max_tries:
|
74 |
+
raise nx.NetworkXError("Number of swaps > number of tries allowed.")
|
75 |
+
if len(G) < 4:
|
76 |
+
raise nx.NetworkXError("DiGraph has fewer than four nodes.")
|
77 |
+
if len(G.edges) < 3:
|
78 |
+
raise nx.NetworkXError("DiGraph has fewer than 3 edges")
|
79 |
+
|
80 |
+
# Instead of choosing uniformly at random from a generated edge list,
|
81 |
+
# this algorithm chooses nonuniformly from the set of nodes with
|
82 |
+
# probability weighted by degree.
|
83 |
+
tries = 0
|
84 |
+
swapcount = 0
|
85 |
+
keys, degrees = zip(*G.degree()) # keys, degree
|
86 |
+
cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree
|
87 |
+
discrete_sequence = nx.utils.discrete_sequence
|
88 |
+
|
89 |
+
while swapcount < nswap:
|
90 |
+
# choose source node index from discrete distribution
|
91 |
+
start_index = discrete_sequence(1, cdistribution=cdf, seed=seed)[0]
|
92 |
+
start = keys[start_index]
|
93 |
+
tries += 1
|
94 |
+
|
95 |
+
if tries > max_tries:
|
96 |
+
msg = f"Maximum number of swap attempts ({tries}) exceeded before desired swaps achieved ({nswap})."
|
97 |
+
raise nx.NetworkXAlgorithmError(msg)
|
98 |
+
|
99 |
+
# If the given node doesn't have any out edges, then there isn't anything to swap
|
100 |
+
if G.out_degree(start) == 0:
|
101 |
+
continue
|
102 |
+
second = seed.choice(list(G.succ[start]))
|
103 |
+
if start == second:
|
104 |
+
continue
|
105 |
+
|
106 |
+
if G.out_degree(second) == 0:
|
107 |
+
continue
|
108 |
+
third = seed.choice(list(G.succ[second]))
|
109 |
+
if second == third:
|
110 |
+
continue
|
111 |
+
|
112 |
+
if G.out_degree(third) == 0:
|
113 |
+
continue
|
114 |
+
fourth = seed.choice(list(G.succ[third]))
|
115 |
+
if third == fourth:
|
116 |
+
continue
|
117 |
+
|
118 |
+
if (
|
119 |
+
third not in G.succ[start]
|
120 |
+
and fourth not in G.succ[second]
|
121 |
+
and second not in G.succ[third]
|
122 |
+
):
|
123 |
+
# Swap nodes
|
124 |
+
G.add_edge(start, third)
|
125 |
+
G.add_edge(third, second)
|
126 |
+
G.add_edge(second, fourth)
|
127 |
+
G.remove_edge(start, second)
|
128 |
+
G.remove_edge(second, third)
|
129 |
+
G.remove_edge(third, fourth)
|
130 |
+
swapcount += 1
|
131 |
+
|
132 |
+
return G
|
133 |
+
|
134 |
+
|
135 |
+
@py_random_state(3)
|
136 |
+
@nx._dispatchable(mutates_input=True, returns_graph=True)
|
137 |
+
def double_edge_swap(G, nswap=1, max_tries=100, seed=None):
|
138 |
+
"""Swap two edges in the graph while keeping the node degrees fixed.
|
139 |
+
|
140 |
+
A double-edge swap removes two randomly chosen edges u-v and x-y
|
141 |
+
and creates the new edges u-x and v-y::
|
142 |
+
|
143 |
+
u--v u v
|
144 |
+
becomes | |
|
145 |
+
x--y x y
|
146 |
+
|
147 |
+
If either the edge u-x or v-y already exist no swap is performed
|
148 |
+
and another attempt is made to find a suitable edge pair.
|
149 |
+
|
150 |
+
Parameters
|
151 |
+
----------
|
152 |
+
G : graph
|
153 |
+
An undirected graph
|
154 |
+
|
155 |
+
nswap : integer (optional, default=1)
|
156 |
+
Number of double-edge swaps to perform
|
157 |
+
|
158 |
+
max_tries : integer (optional)
|
159 |
+
Maximum number of attempts to swap edges
|
160 |
+
|
161 |
+
seed : integer, random_state, or None (default)
|
162 |
+
Indicator of random number generation state.
|
163 |
+
See :ref:`Randomness<randomness>`.
|
164 |
+
|
165 |
+
Returns
|
166 |
+
-------
|
167 |
+
G : graph
|
168 |
+
The graph after double edge swaps.
|
169 |
+
|
170 |
+
Raises
|
171 |
+
------
|
172 |
+
NetworkXError
|
173 |
+
If `G` is directed, or
|
174 |
+
If `nswap` > `max_tries`, or
|
175 |
+
If there are fewer than 4 nodes or 2 edges in `G`.
|
176 |
+
NetworkXAlgorithmError
|
177 |
+
If the number of swap attempts exceeds `max_tries` before `nswap` swaps are made
|
178 |
+
|
179 |
+
Notes
|
180 |
+
-----
|
181 |
+
Does not enforce any connectivity constraints.
|
182 |
+
|
183 |
+
The graph G is modified in place.
|
184 |
+
"""
|
185 |
+
if G.is_directed():
|
186 |
+
raise nx.NetworkXError(
|
187 |
+
"double_edge_swap() not defined for directed graphs. Use directed_edge_swap instead."
|
188 |
+
)
|
189 |
+
if nswap > max_tries:
|
190 |
+
raise nx.NetworkXError("Number of swaps > number of tries allowed.")
|
191 |
+
if len(G) < 4:
|
192 |
+
raise nx.NetworkXError("Graph has fewer than four nodes.")
|
193 |
+
if len(G.edges) < 2:
|
194 |
+
raise nx.NetworkXError("Graph has fewer than 2 edges")
|
195 |
+
# Instead of choosing uniformly at random from a generated edge list,
|
196 |
+
# this algorithm chooses nonuniformly from the set of nodes with
|
197 |
+
# probability weighted by degree.
|
198 |
+
n = 0
|
199 |
+
swapcount = 0
|
200 |
+
keys, degrees = zip(*G.degree()) # keys, degree
|
201 |
+
cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree
|
202 |
+
discrete_sequence = nx.utils.discrete_sequence
|
203 |
+
while swapcount < nswap:
|
204 |
+
# if random.random() < 0.5: continue # trick to avoid periodicities?
|
205 |
+
# pick two random edges without creating edge list
|
206 |
+
# choose source node indices from discrete distribution
|
207 |
+
(ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)
|
208 |
+
if ui == xi:
|
209 |
+
continue # same source, skip
|
210 |
+
u = keys[ui] # convert index to label
|
211 |
+
x = keys[xi]
|
212 |
+
# choose target uniformly from neighbors
|
213 |
+
v = seed.choice(list(G[u]))
|
214 |
+
y = seed.choice(list(G[x]))
|
215 |
+
if v == y:
|
216 |
+
continue # same target, skip
|
217 |
+
if (x not in G[u]) and (y not in G[v]): # don't create parallel edges
|
218 |
+
G.add_edge(u, x)
|
219 |
+
G.add_edge(v, y)
|
220 |
+
G.remove_edge(u, v)
|
221 |
+
G.remove_edge(x, y)
|
222 |
+
swapcount += 1
|
223 |
+
if n >= max_tries:
|
224 |
+
e = (
|
225 |
+
f"Maximum number of swap attempts ({n}) exceeded "
|
226 |
+
f"before desired swaps achieved ({nswap})."
|
227 |
+
)
|
228 |
+
raise nx.NetworkXAlgorithmError(e)
|
229 |
+
n += 1
|
230 |
+
return G
|
231 |
+
|
232 |
+
|
233 |
+
@py_random_state(3)
|
234 |
+
@nx._dispatchable(mutates_input=True)
|
235 |
+
def connected_double_edge_swap(G, nswap=1, _window_threshold=3, seed=None):
|
236 |
+
"""Attempts the specified number of double-edge swaps in the graph `G`.
|
237 |
+
|
238 |
+
A double-edge swap removes two randomly chosen edges `(u, v)` and `(x,
|
239 |
+
y)` and creates the new edges `(u, x)` and `(v, y)`::
|
240 |
+
|
241 |
+
u--v u v
|
242 |
+
becomes | |
|
243 |
+
x--y x y
|
244 |
+
|
245 |
+
If either `(u, x)` or `(v, y)` already exist, then no swap is performed
|
246 |
+
so the actual number of swapped edges is always *at most* `nswap`.
|
247 |
+
|
248 |
+
Parameters
|
249 |
+
----------
|
250 |
+
G : graph
|
251 |
+
An undirected graph
|
252 |
+
|
253 |
+
nswap : integer (optional, default=1)
|
254 |
+
Number of double-edge swaps to perform
|
255 |
+
|
256 |
+
_window_threshold : integer
|
257 |
+
|
258 |
+
The window size below which connectedness of the graph will be checked
|
259 |
+
after each swap.
|
260 |
+
|
261 |
+
The "window" in this function is a dynamically updated integer that
|
262 |
+
represents the number of swap attempts to make before checking if the
|
263 |
+
graph remains connected. It is an optimization used to decrease the
|
264 |
+
running time of the algorithm in exchange for increased complexity of
|
265 |
+
implementation.
|
266 |
+
|
267 |
+
If the window size is below this threshold, then the algorithm checks
|
268 |
+
after each swap if the graph remains connected by checking if there is a
|
269 |
+
path joining the two nodes whose edge was just removed. If the window
|
270 |
+
size is above this threshold, then the algorithm performs do all the
|
271 |
+
swaps in the window and only then check if the graph is still connected.
|
272 |
+
|
273 |
+
seed : integer, random_state, or None (default)
|
274 |
+
Indicator of random number generation state.
|
275 |
+
See :ref:`Randomness<randomness>`.
|
276 |
+
|
277 |
+
Returns
|
278 |
+
-------
|
279 |
+
int
|
280 |
+
The number of successful swaps
|
281 |
+
|
282 |
+
Raises
|
283 |
+
------
|
284 |
+
|
285 |
+
NetworkXError
|
286 |
+
|
287 |
+
If the input graph is not connected, or if the graph has fewer than four
|
288 |
+
nodes.
|
289 |
+
|
290 |
+
Notes
|
291 |
+
-----
|
292 |
+
|
293 |
+
The initial graph `G` must be connected, and the resulting graph is
|
294 |
+
connected. The graph `G` is modified in place.
|
295 |
+
|
296 |
+
References
|
297 |
+
----------
|
298 |
+
.. [1] C. Gkantsidis and M. Mihail and E. Zegura,
|
299 |
+
The Markov chain simulation method for generating connected
|
300 |
+
power law random graphs, 2003.
|
301 |
+
http://citeseer.ist.psu.edu/gkantsidis03markov.html
|
302 |
+
"""
|
303 |
+
if not nx.is_connected(G):
|
304 |
+
raise nx.NetworkXError("Graph not connected")
|
305 |
+
if len(G) < 4:
|
306 |
+
raise nx.NetworkXError("Graph has fewer than four nodes.")
|
307 |
+
n = 0
|
308 |
+
swapcount = 0
|
309 |
+
deg = G.degree()
|
310 |
+
# Label key for nodes
|
311 |
+
dk = [n for n, d in G.degree()]
|
312 |
+
cdf = nx.utils.cumulative_distribution([d for n, d in G.degree()])
|
313 |
+
discrete_sequence = nx.utils.discrete_sequence
|
314 |
+
window = 1
|
315 |
+
while n < nswap:
|
316 |
+
wcount = 0
|
317 |
+
swapped = []
|
318 |
+
# If the window is small, we just check each time whether the graph is
|
319 |
+
# connected by checking if the nodes that were just separated are still
|
320 |
+
# connected.
|
321 |
+
if window < _window_threshold:
|
322 |
+
# This Boolean keeps track of whether there was a failure or not.
|
323 |
+
fail = False
|
324 |
+
while wcount < window and n < nswap:
|
325 |
+
# Pick two random edges without creating the edge list. Choose
|
326 |
+
# source nodes from the discrete degree distribution.
|
327 |
+
(ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)
|
328 |
+
# If the source nodes are the same, skip this pair.
|
329 |
+
if ui == xi:
|
330 |
+
continue
|
331 |
+
# Convert an index to a node label.
|
332 |
+
u = dk[ui]
|
333 |
+
x = dk[xi]
|
334 |
+
# Choose targets uniformly from neighbors.
|
335 |
+
v = seed.choice(list(G.neighbors(u)))
|
336 |
+
y = seed.choice(list(G.neighbors(x)))
|
337 |
+
# If the target nodes are the same, skip this pair.
|
338 |
+
if v == y:
|
339 |
+
continue
|
340 |
+
if x not in G[u] and y not in G[v]:
|
341 |
+
G.remove_edge(u, v)
|
342 |
+
G.remove_edge(x, y)
|
343 |
+
G.add_edge(u, x)
|
344 |
+
G.add_edge(v, y)
|
345 |
+
swapped.append((u, v, x, y))
|
346 |
+
swapcount += 1
|
347 |
+
n += 1
|
348 |
+
# If G remains connected...
|
349 |
+
if nx.has_path(G, u, v):
|
350 |
+
wcount += 1
|
351 |
+
# Otherwise, undo the changes.
|
352 |
+
else:
|
353 |
+
G.add_edge(u, v)
|
354 |
+
G.add_edge(x, y)
|
355 |
+
G.remove_edge(u, x)
|
356 |
+
G.remove_edge(v, y)
|
357 |
+
swapcount -= 1
|
358 |
+
fail = True
|
359 |
+
# If one of the swaps failed, reduce the window size.
|
360 |
+
if fail:
|
361 |
+
window = math.ceil(window / 2)
|
362 |
+
else:
|
363 |
+
window += 1
|
364 |
+
# If the window is large, then there is a good chance that a bunch of
|
365 |
+
# swaps will work. It's quicker to do all those swaps first and then
|
366 |
+
# check if the graph remains connected.
|
367 |
+
else:
|
368 |
+
while wcount < window and n < nswap:
|
369 |
+
# Pick two random edges without creating the edge list. Choose
|
370 |
+
# source nodes from the discrete degree distribution.
|
371 |
+
(ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)
|
372 |
+
# If the source nodes are the same, skip this pair.
|
373 |
+
if ui == xi:
|
374 |
+
continue
|
375 |
+
# Convert an index to a node label.
|
376 |
+
u = dk[ui]
|
377 |
+
x = dk[xi]
|
378 |
+
# Choose targets uniformly from neighbors.
|
379 |
+
v = seed.choice(list(G.neighbors(u)))
|
380 |
+
y = seed.choice(list(G.neighbors(x)))
|
381 |
+
# If the target nodes are the same, skip this pair.
|
382 |
+
if v == y:
|
383 |
+
continue
|
384 |
+
if x not in G[u] and y not in G[v]:
|
385 |
+
G.remove_edge(u, v)
|
386 |
+
G.remove_edge(x, y)
|
387 |
+
G.add_edge(u, x)
|
388 |
+
G.add_edge(v, y)
|
389 |
+
swapped.append((u, v, x, y))
|
390 |
+
swapcount += 1
|
391 |
+
n += 1
|
392 |
+
wcount += 1
|
393 |
+
# If the graph remains connected, increase the window size.
|
394 |
+
if nx.is_connected(G):
|
395 |
+
window += 1
|
396 |
+
# Otherwise, undo the changes from the previous window and decrease
|
397 |
+
# the window size.
|
398 |
+
else:
|
399 |
+
while swapped:
|
400 |
+
(u, v, x, y) = swapped.pop()
|
401 |
+
G.add_edge(u, v)
|
402 |
+
G.add_edge(x, y)
|
403 |
+
G.remove_edge(u, x)
|
404 |
+
G.remove_edge(v, y)
|
405 |
+
swapcount -= 1
|
406 |
+
window = math.ceil(window / 2)
|
407 |
+
return swapcount
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/threshold.py
ADDED
@@ -0,0 +1,979 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Threshold Graphs - Creation, manipulation and identification.
|
3 |
+
"""
|
4 |
+
from math import sqrt
|
5 |
+
|
6 |
+
import networkx as nx
|
7 |
+
from networkx.utils import py_random_state
|
8 |
+
|
9 |
+
__all__ = ["is_threshold_graph", "find_threshold_graph"]
|
10 |
+
|
11 |
+
|
12 |
+
@nx._dispatchable
|
13 |
+
def is_threshold_graph(G):
|
14 |
+
"""
|
15 |
+
Returns `True` if `G` is a threshold graph.
|
16 |
+
|
17 |
+
Parameters
|
18 |
+
----------
|
19 |
+
G : NetworkX graph instance
|
20 |
+
An instance of `Graph`, `DiGraph`, `MultiGraph` or `MultiDiGraph`
|
21 |
+
|
22 |
+
Returns
|
23 |
+
-------
|
24 |
+
bool
|
25 |
+
`True` if `G` is a threshold graph, `False` otherwise.
|
26 |
+
|
27 |
+
Examples
|
28 |
+
--------
|
29 |
+
>>> from networkx.algorithms.threshold import is_threshold_graph
|
30 |
+
>>> G = nx.path_graph(3)
|
31 |
+
>>> is_threshold_graph(G)
|
32 |
+
True
|
33 |
+
>>> G = nx.barbell_graph(3, 3)
|
34 |
+
>>> is_threshold_graph(G)
|
35 |
+
False
|
36 |
+
|
37 |
+
References
|
38 |
+
----------
|
39 |
+
.. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph
|
40 |
+
"""
|
41 |
+
return is_threshold_sequence([d for n, d in G.degree()])
|
42 |
+
|
43 |
+
|
44 |
+
def is_threshold_sequence(degree_sequence):
|
45 |
+
"""
|
46 |
+
Returns True if the sequence is a threshold degree sequence.
|
47 |
+
|
48 |
+
Uses the property that a threshold graph must be constructed by
|
49 |
+
adding either dominating or isolated nodes. Thus, it can be
|
50 |
+
deconstructed iteratively by removing a node of degree zero or a
|
51 |
+
node that connects to the remaining nodes. If this deconstruction
|
52 |
+
fails then the sequence is not a threshold sequence.
|
53 |
+
"""
|
54 |
+
ds = degree_sequence[:] # get a copy so we don't destroy original
|
55 |
+
ds.sort()
|
56 |
+
while ds:
|
57 |
+
if ds[0] == 0: # if isolated node
|
58 |
+
ds.pop(0) # remove it
|
59 |
+
continue
|
60 |
+
if ds[-1] != len(ds) - 1: # is the largest degree node dominating?
|
61 |
+
return False # no, not a threshold degree sequence
|
62 |
+
ds.pop() # yes, largest is the dominating node
|
63 |
+
ds = [d - 1 for d in ds] # remove it and decrement all degrees
|
64 |
+
return True
|
65 |
+
|
66 |
+
|
67 |
+
def creation_sequence(degree_sequence, with_labels=False, compact=False):
|
68 |
+
"""
|
69 |
+
Determines the creation sequence for the given threshold degree sequence.
|
70 |
+
|
71 |
+
The creation sequence is a list of single characters 'd'
|
72 |
+
or 'i': 'd' for dominating or 'i' for isolated vertices.
|
73 |
+
Dominating vertices are connected to all vertices present when it
|
74 |
+
is added. The first node added is by convention 'd'.
|
75 |
+
This list can be converted to a string if desired using "".join(cs)
|
76 |
+
|
77 |
+
If with_labels==True:
|
78 |
+
Returns a list of 2-tuples containing the vertex number
|
79 |
+
and a character 'd' or 'i' which describes the type of vertex.
|
80 |
+
|
81 |
+
If compact==True:
|
82 |
+
Returns the creation sequence in a compact form that is the number
|
83 |
+
of 'i's and 'd's alternating.
|
84 |
+
Examples:
|
85 |
+
[1,2,2,3] represents d,i,i,d,d,i,i,i
|
86 |
+
[3,1,2] represents d,d,d,i,d,d
|
87 |
+
|
88 |
+
Notice that the first number is the first vertex to be used for
|
89 |
+
construction and so is always 'd'.
|
90 |
+
|
91 |
+
with_labels and compact cannot both be True.
|
92 |
+
|
93 |
+
Returns None if the sequence is not a threshold sequence
|
94 |
+
"""
|
95 |
+
if with_labels and compact:
|
96 |
+
raise ValueError("compact sequences cannot be labeled")
|
97 |
+
|
98 |
+
# make an indexed copy
|
99 |
+
if isinstance(degree_sequence, dict): # labeled degree sequence
|
100 |
+
ds = [[degree, label] for (label, degree) in degree_sequence.items()]
|
101 |
+
else:
|
102 |
+
ds = [[d, i] for i, d in enumerate(degree_sequence)]
|
103 |
+
ds.sort()
|
104 |
+
cs = [] # creation sequence
|
105 |
+
while ds:
|
106 |
+
if ds[0][0] == 0: # isolated node
|
107 |
+
(d, v) = ds.pop(0)
|
108 |
+
if len(ds) > 0: # make sure we start with a d
|
109 |
+
cs.insert(0, (v, "i"))
|
110 |
+
else:
|
111 |
+
cs.insert(0, (v, "d"))
|
112 |
+
continue
|
113 |
+
if ds[-1][0] != len(ds) - 1: # Not dominating node
|
114 |
+
return None # not a threshold degree sequence
|
115 |
+
(d, v) = ds.pop()
|
116 |
+
cs.insert(0, (v, "d"))
|
117 |
+
ds = [[d[0] - 1, d[1]] for d in ds] # decrement due to removing node
|
118 |
+
|
119 |
+
if with_labels:
|
120 |
+
return cs
|
121 |
+
if compact:
|
122 |
+
return make_compact(cs)
|
123 |
+
return [v[1] for v in cs] # not labeled
|
124 |
+
|
125 |
+
|
126 |
+
def make_compact(creation_sequence):
|
127 |
+
"""
|
128 |
+
Returns the creation sequence in a compact form
|
129 |
+
that is the number of 'i's and 'd's alternating.
|
130 |
+
|
131 |
+
Examples
|
132 |
+
--------
|
133 |
+
>>> from networkx.algorithms.threshold import make_compact
|
134 |
+
>>> make_compact(["d", "i", "i", "d", "d", "i", "i", "i"])
|
135 |
+
[1, 2, 2, 3]
|
136 |
+
>>> make_compact(["d", "d", "d", "i", "d", "d"])
|
137 |
+
[3, 1, 2]
|
138 |
+
|
139 |
+
Notice that the first number is the first vertex
|
140 |
+
to be used for construction and so is always 'd'.
|
141 |
+
|
142 |
+
Labeled creation sequences lose their labels in the
|
143 |
+
compact representation.
|
144 |
+
|
145 |
+
>>> make_compact([3, 1, 2])
|
146 |
+
[3, 1, 2]
|
147 |
+
"""
|
148 |
+
first = creation_sequence[0]
|
149 |
+
if isinstance(first, str): # creation sequence
|
150 |
+
cs = creation_sequence[:]
|
151 |
+
elif isinstance(first, tuple): # labeled creation sequence
|
152 |
+
cs = [s[1] for s in creation_sequence]
|
153 |
+
elif isinstance(first, int): # compact creation sequence
|
154 |
+
return creation_sequence
|
155 |
+
else:
|
156 |
+
raise TypeError("Not a valid creation sequence type")
|
157 |
+
|
158 |
+
ccs = []
|
159 |
+
count = 1 # count the run lengths of d's or i's.
|
160 |
+
for i in range(1, len(cs)):
|
161 |
+
if cs[i] == cs[i - 1]:
|
162 |
+
count += 1
|
163 |
+
else:
|
164 |
+
ccs.append(count)
|
165 |
+
count = 1
|
166 |
+
ccs.append(count) # don't forget the last one
|
167 |
+
return ccs
|
168 |
+
|
169 |
+
|
170 |
+
def uncompact(creation_sequence):
|
171 |
+
"""
|
172 |
+
Converts a compact creation sequence for a threshold
|
173 |
+
graph to a standard creation sequence (unlabeled).
|
174 |
+
If the creation_sequence is already standard, return it.
|
175 |
+
See creation_sequence.
|
176 |
+
"""
|
177 |
+
first = creation_sequence[0]
|
178 |
+
if isinstance(first, str): # creation sequence
|
179 |
+
return creation_sequence
|
180 |
+
elif isinstance(first, tuple): # labeled creation sequence
|
181 |
+
return creation_sequence
|
182 |
+
elif isinstance(first, int): # compact creation sequence
|
183 |
+
ccscopy = creation_sequence[:]
|
184 |
+
else:
|
185 |
+
raise TypeError("Not a valid creation sequence type")
|
186 |
+
cs = []
|
187 |
+
while ccscopy:
|
188 |
+
cs.extend(ccscopy.pop(0) * ["d"])
|
189 |
+
if ccscopy:
|
190 |
+
cs.extend(ccscopy.pop(0) * ["i"])
|
191 |
+
return cs
|
192 |
+
|
193 |
+
|
194 |
+
def creation_sequence_to_weights(creation_sequence):
|
195 |
+
"""
|
196 |
+
Returns a list of node weights which create the threshold
|
197 |
+
graph designated by the creation sequence. The weights
|
198 |
+
are scaled so that the threshold is 1.0. The order of the
|
199 |
+
nodes is the same as that in the creation sequence.
|
200 |
+
"""
|
201 |
+
# Turn input sequence into a labeled creation sequence
|
202 |
+
first = creation_sequence[0]
|
203 |
+
if isinstance(first, str): # creation sequence
|
204 |
+
if isinstance(creation_sequence, list):
|
205 |
+
wseq = creation_sequence[:]
|
206 |
+
else:
|
207 |
+
wseq = list(creation_sequence) # string like 'ddidid'
|
208 |
+
elif isinstance(first, tuple): # labeled creation sequence
|
209 |
+
wseq = [v[1] for v in creation_sequence]
|
210 |
+
elif isinstance(first, int): # compact creation sequence
|
211 |
+
wseq = uncompact(creation_sequence)
|
212 |
+
else:
|
213 |
+
raise TypeError("Not a valid creation sequence type")
|
214 |
+
# pass through twice--first backwards
|
215 |
+
wseq.reverse()
|
216 |
+
w = 0
|
217 |
+
prev = "i"
|
218 |
+
for j, s in enumerate(wseq):
|
219 |
+
if s == "i":
|
220 |
+
wseq[j] = w
|
221 |
+
prev = s
|
222 |
+
elif prev == "i":
|
223 |
+
prev = s
|
224 |
+
w += 1
|
225 |
+
wseq.reverse() # now pass through forwards
|
226 |
+
for j, s in enumerate(wseq):
|
227 |
+
if s == "d":
|
228 |
+
wseq[j] = w
|
229 |
+
prev = s
|
230 |
+
elif prev == "d":
|
231 |
+
prev = s
|
232 |
+
w += 1
|
233 |
+
# Now scale weights
|
234 |
+
if prev == "d":
|
235 |
+
w += 1
|
236 |
+
wscale = 1 / w
|
237 |
+
return [ww * wscale for ww in wseq]
|
238 |
+
# return wseq
|
239 |
+
|
240 |
+
|
241 |
+
def weights_to_creation_sequence(
|
242 |
+
weights, threshold=1, with_labels=False, compact=False
|
243 |
+
):
|
244 |
+
"""
|
245 |
+
Returns a creation sequence for a threshold graph
|
246 |
+
determined by the weights and threshold given as input.
|
247 |
+
If the sum of two node weights is greater than the
|
248 |
+
threshold value, an edge is created between these nodes.
|
249 |
+
|
250 |
+
The creation sequence is a list of single characters 'd'
|
251 |
+
or 'i': 'd' for dominating or 'i' for isolated vertices.
|
252 |
+
Dominating vertices are connected to all vertices present
|
253 |
+
when it is added. The first node added is by convention 'd'.
|
254 |
+
|
255 |
+
If with_labels==True:
|
256 |
+
Returns a list of 2-tuples containing the vertex number
|
257 |
+
and a character 'd' or 'i' which describes the type of vertex.
|
258 |
+
|
259 |
+
If compact==True:
|
260 |
+
Returns the creation sequence in a compact form that is the number
|
261 |
+
of 'i's and 'd's alternating.
|
262 |
+
Examples:
|
263 |
+
[1,2,2,3] represents d,i,i,d,d,i,i,i
|
264 |
+
[3,1,2] represents d,d,d,i,d,d
|
265 |
+
|
266 |
+
Notice that the first number is the first vertex to be used for
|
267 |
+
construction and so is always 'd'.
|
268 |
+
|
269 |
+
with_labels and compact cannot both be True.
|
270 |
+
"""
|
271 |
+
if with_labels and compact:
|
272 |
+
raise ValueError("compact sequences cannot be labeled")
|
273 |
+
|
274 |
+
# make an indexed copy
|
275 |
+
if isinstance(weights, dict): # labeled weights
|
276 |
+
wseq = [[w, label] for (label, w) in weights.items()]
|
277 |
+
else:
|
278 |
+
wseq = [[w, i] for i, w in enumerate(weights)]
|
279 |
+
wseq.sort()
|
280 |
+
cs = [] # creation sequence
|
281 |
+
cutoff = threshold - wseq[-1][0]
|
282 |
+
while wseq:
|
283 |
+
if wseq[0][0] < cutoff: # isolated node
|
284 |
+
(w, label) = wseq.pop(0)
|
285 |
+
cs.append((label, "i"))
|
286 |
+
else:
|
287 |
+
(w, label) = wseq.pop()
|
288 |
+
cs.append((label, "d"))
|
289 |
+
cutoff = threshold - wseq[-1][0]
|
290 |
+
if len(wseq) == 1: # make sure we start with a d
|
291 |
+
(w, label) = wseq.pop()
|
292 |
+
cs.append((label, "d"))
|
293 |
+
# put in correct order
|
294 |
+
cs.reverse()
|
295 |
+
|
296 |
+
if with_labels:
|
297 |
+
return cs
|
298 |
+
if compact:
|
299 |
+
return make_compact(cs)
|
300 |
+
return [v[1] for v in cs] # not labeled
|
301 |
+
|
302 |
+
|
303 |
+
# Manipulating NetworkX.Graphs in context of threshold graphs
|
304 |
+
@nx._dispatchable(graphs=None, returns_graph=True)
|
305 |
+
def threshold_graph(creation_sequence, create_using=None):
|
306 |
+
"""
|
307 |
+
Create a threshold graph from the creation sequence or compact
|
308 |
+
creation_sequence.
|
309 |
+
|
310 |
+
The input sequence can be a
|
311 |
+
|
312 |
+
creation sequence (e.g. ['d','i','d','d','d','i'])
|
313 |
+
labeled creation sequence (e.g. [(0,'d'),(2,'d'),(1,'i')])
|
314 |
+
compact creation sequence (e.g. [2,1,1,2,0])
|
315 |
+
|
316 |
+
Use cs=creation_sequence(degree_sequence,labeled=True)
|
317 |
+
to convert a degree sequence to a creation sequence.
|
318 |
+
|
319 |
+
Returns None if the sequence is not valid
|
320 |
+
"""
|
321 |
+
# Turn input sequence into a labeled creation sequence
|
322 |
+
first = creation_sequence[0]
|
323 |
+
if isinstance(first, str): # creation sequence
|
324 |
+
ci = list(enumerate(creation_sequence))
|
325 |
+
elif isinstance(first, tuple): # labeled creation sequence
|
326 |
+
ci = creation_sequence[:]
|
327 |
+
elif isinstance(first, int): # compact creation sequence
|
328 |
+
cs = uncompact(creation_sequence)
|
329 |
+
ci = list(enumerate(cs))
|
330 |
+
else:
|
331 |
+
print("not a valid creation sequence type")
|
332 |
+
return None
|
333 |
+
|
334 |
+
G = nx.empty_graph(0, create_using)
|
335 |
+
if G.is_directed():
|
336 |
+
raise nx.NetworkXError("Directed Graph not supported")
|
337 |
+
|
338 |
+
G.name = "Threshold Graph"
|
339 |
+
|
340 |
+
# add nodes and edges
|
341 |
+
# if type is 'i' just add nodea
|
342 |
+
# if type is a d connect to everything previous
|
343 |
+
while ci:
|
344 |
+
(v, node_type) = ci.pop(0)
|
345 |
+
if node_type == "d": # dominating type, connect to all existing nodes
|
346 |
+
# We use `for u in list(G):` instead of
|
347 |
+
# `for u in G:` because we edit the graph `G` in
|
348 |
+
# the loop. Hence using an iterator will result in
|
349 |
+
# `RuntimeError: dictionary changed size during iteration`
|
350 |
+
for u in list(G):
|
351 |
+
G.add_edge(v, u)
|
352 |
+
G.add_node(v)
|
353 |
+
return G
|
354 |
+
|
355 |
+
|
356 |
+
@nx._dispatchable
|
357 |
+
def find_alternating_4_cycle(G):
|
358 |
+
"""
|
359 |
+
Returns False if there aren't any alternating 4 cycles.
|
360 |
+
Otherwise returns the cycle as [a,b,c,d] where (a,b)
|
361 |
+
and (c,d) are edges and (a,c) and (b,d) are not.
|
362 |
+
"""
|
363 |
+
for u, v in G.edges():
|
364 |
+
for w in G.nodes():
|
365 |
+
if not G.has_edge(u, w) and u != w:
|
366 |
+
for x in G.neighbors(w):
|
367 |
+
if not G.has_edge(v, x) and v != x:
|
368 |
+
return [u, v, w, x]
|
369 |
+
return False
|
370 |
+
|
371 |
+
|
372 |
+
@nx._dispatchable(returns_graph=True)
|
373 |
+
def find_threshold_graph(G, create_using=None):
|
374 |
+
"""
|
375 |
+
Returns a threshold subgraph that is close to largest in `G`.
|
376 |
+
|
377 |
+
The threshold graph will contain the largest degree node in G.
|
378 |
+
|
379 |
+
Parameters
|
380 |
+
----------
|
381 |
+
G : NetworkX graph instance
|
382 |
+
An instance of `Graph`, or `MultiDiGraph`
|
383 |
+
create_using : NetworkX graph class or `None` (default), optional
|
384 |
+
Type of graph to use when constructing the threshold graph.
|
385 |
+
If `None`, infer the appropriate graph type from the input.
|
386 |
+
|
387 |
+
Returns
|
388 |
+
-------
|
389 |
+
graph :
|
390 |
+
A graph instance representing the threshold graph
|
391 |
+
|
392 |
+
Examples
|
393 |
+
--------
|
394 |
+
>>> from networkx.algorithms.threshold import find_threshold_graph
|
395 |
+
>>> G = nx.barbell_graph(3, 3)
|
396 |
+
>>> T = find_threshold_graph(G)
|
397 |
+
>>> T.nodes # may vary
|
398 |
+
NodeView((7, 8, 5, 6))
|
399 |
+
|
400 |
+
References
|
401 |
+
----------
|
402 |
+
.. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph
|
403 |
+
"""
|
404 |
+
return threshold_graph(find_creation_sequence(G), create_using)
|
405 |
+
|
406 |
+
|
407 |
+
@nx._dispatchable
|
408 |
+
def find_creation_sequence(G):
|
409 |
+
"""
|
410 |
+
Find a threshold subgraph that is close to largest in G.
|
411 |
+
Returns the labeled creation sequence of that threshold graph.
|
412 |
+
"""
|
413 |
+
cs = []
|
414 |
+
# get a local pointer to the working part of the graph
|
415 |
+
H = G
|
416 |
+
while H.order() > 0:
|
417 |
+
# get new degree sequence on subgraph
|
418 |
+
dsdict = dict(H.degree())
|
419 |
+
ds = [(d, v) for v, d in dsdict.items()]
|
420 |
+
ds.sort()
|
421 |
+
# Update threshold graph nodes
|
422 |
+
if ds[-1][0] == 0: # all are isolated
|
423 |
+
cs.extend(zip(dsdict, ["i"] * (len(ds) - 1) + ["d"]))
|
424 |
+
break # Done!
|
425 |
+
# pull off isolated nodes
|
426 |
+
while ds[0][0] == 0:
|
427 |
+
(d, iso) = ds.pop(0)
|
428 |
+
cs.append((iso, "i"))
|
429 |
+
# find new biggest node
|
430 |
+
(d, bigv) = ds.pop()
|
431 |
+
# add edges of star to t_g
|
432 |
+
cs.append((bigv, "d"))
|
433 |
+
# form subgraph of neighbors of big node
|
434 |
+
H = H.subgraph(H.neighbors(bigv))
|
435 |
+
cs.reverse()
|
436 |
+
return cs
|
437 |
+
|
438 |
+
|
439 |
+
# Properties of Threshold Graphs
|
440 |
+
def triangles(creation_sequence):
|
441 |
+
"""
|
442 |
+
Compute number of triangles in the threshold graph with the
|
443 |
+
given creation sequence.
|
444 |
+
"""
|
445 |
+
# shortcut algorithm that doesn't require computing number
|
446 |
+
# of triangles at each node.
|
447 |
+
cs = creation_sequence # alias
|
448 |
+
dr = cs.count("d") # number of d's in sequence
|
449 |
+
ntri = dr * (dr - 1) * (dr - 2) / 6 # number of triangles in clique of nd d's
|
450 |
+
# now add dr choose 2 triangles for every 'i' in sequence where
|
451 |
+
# dr is the number of d's to the right of the current i
|
452 |
+
for i, typ in enumerate(cs):
|
453 |
+
if typ == "i":
|
454 |
+
ntri += dr * (dr - 1) / 2
|
455 |
+
else:
|
456 |
+
dr -= 1
|
457 |
+
return ntri
|
458 |
+
|
459 |
+
|
460 |
+
def triangle_sequence(creation_sequence):
|
461 |
+
"""
|
462 |
+
Return triangle sequence for the given threshold graph creation sequence.
|
463 |
+
|
464 |
+
"""
|
465 |
+
cs = creation_sequence
|
466 |
+
seq = []
|
467 |
+
dr = cs.count("d") # number of d's to the right of the current pos
|
468 |
+
dcur = (dr - 1) * (dr - 2) // 2 # number of triangles through a node of clique dr
|
469 |
+
irun = 0 # number of i's in the last run
|
470 |
+
drun = 0 # number of d's in the last run
|
471 |
+
for i, sym in enumerate(cs):
|
472 |
+
if sym == "d":
|
473 |
+
drun += 1
|
474 |
+
tri = dcur + (dr - 1) * irun # new triangles at this d
|
475 |
+
else: # cs[i]="i":
|
476 |
+
if prevsym == "d": # new string of i's
|
477 |
+
dcur += (dr - 1) * irun # accumulate shared shortest paths
|
478 |
+
irun = 0 # reset i run counter
|
479 |
+
dr -= drun # reduce number of d's to right
|
480 |
+
drun = 0 # reset d run counter
|
481 |
+
irun += 1
|
482 |
+
tri = dr * (dr - 1) // 2 # new triangles at this i
|
483 |
+
seq.append(tri)
|
484 |
+
prevsym = sym
|
485 |
+
return seq
|
486 |
+
|
487 |
+
|
488 |
+
def cluster_sequence(creation_sequence):
|
489 |
+
"""
|
490 |
+
Return cluster sequence for the given threshold graph creation sequence.
|
491 |
+
"""
|
492 |
+
triseq = triangle_sequence(creation_sequence)
|
493 |
+
degseq = degree_sequence(creation_sequence)
|
494 |
+
cseq = []
|
495 |
+
for i, deg in enumerate(degseq):
|
496 |
+
tri = triseq[i]
|
497 |
+
if deg <= 1: # isolated vertex or single pair gets cc 0
|
498 |
+
cseq.append(0)
|
499 |
+
continue
|
500 |
+
max_size = (deg * (deg - 1)) // 2
|
501 |
+
cseq.append(tri / max_size)
|
502 |
+
return cseq
|
503 |
+
|
504 |
+
|
505 |
+
def degree_sequence(creation_sequence):
|
506 |
+
"""
|
507 |
+
Return degree sequence for the threshold graph with the given
|
508 |
+
creation sequence
|
509 |
+
"""
|
510 |
+
cs = creation_sequence # alias
|
511 |
+
seq = []
|
512 |
+
rd = cs.count("d") # number of d to the right
|
513 |
+
for i, sym in enumerate(cs):
|
514 |
+
if sym == "d":
|
515 |
+
rd -= 1
|
516 |
+
seq.append(rd + i)
|
517 |
+
else:
|
518 |
+
seq.append(rd)
|
519 |
+
return seq
|
520 |
+
|
521 |
+
|
522 |
+
def density(creation_sequence):
|
523 |
+
"""
|
524 |
+
Return the density of the graph with this creation_sequence.
|
525 |
+
The density is the fraction of possible edges present.
|
526 |
+
"""
|
527 |
+
N = len(creation_sequence)
|
528 |
+
two_size = sum(degree_sequence(creation_sequence))
|
529 |
+
two_possible = N * (N - 1)
|
530 |
+
den = two_size / two_possible
|
531 |
+
return den
|
532 |
+
|
533 |
+
|
534 |
+
def degree_correlation(creation_sequence):
|
535 |
+
"""
|
536 |
+
Return the degree-degree correlation over all edges.
|
537 |
+
"""
|
538 |
+
cs = creation_sequence
|
539 |
+
s1 = 0 # deg_i*deg_j
|
540 |
+
s2 = 0 # deg_i^2+deg_j^2
|
541 |
+
s3 = 0 # deg_i+deg_j
|
542 |
+
m = 0 # number of edges
|
543 |
+
rd = cs.count("d") # number of d nodes to the right
|
544 |
+
rdi = [i for i, sym in enumerate(cs) if sym == "d"] # index of "d"s
|
545 |
+
ds = degree_sequence(cs)
|
546 |
+
for i, sym in enumerate(cs):
|
547 |
+
if sym == "d":
|
548 |
+
if i != rdi[0]:
|
549 |
+
print("Logic error in degree_correlation", i, rdi)
|
550 |
+
raise ValueError
|
551 |
+
rdi.pop(0)
|
552 |
+
degi = ds[i]
|
553 |
+
for dj in rdi:
|
554 |
+
degj = ds[dj]
|
555 |
+
s1 += degj * degi
|
556 |
+
s2 += degi**2 + degj**2
|
557 |
+
s3 += degi + degj
|
558 |
+
m += 1
|
559 |
+
denom = 2 * m * s2 - s3 * s3
|
560 |
+
numer = 4 * m * s1 - s3 * s3
|
561 |
+
if denom == 0:
|
562 |
+
if numer == 0:
|
563 |
+
return 1
|
564 |
+
raise ValueError(f"Zero Denominator but Numerator is {numer}")
|
565 |
+
return numer / denom
|
566 |
+
|
567 |
+
|
568 |
+
def shortest_path(creation_sequence, u, v):
|
569 |
+
"""
|
570 |
+
Find the shortest path between u and v in a
|
571 |
+
threshold graph G with the given creation_sequence.
|
572 |
+
|
573 |
+
For an unlabeled creation_sequence, the vertices
|
574 |
+
u and v must be integers in (0,len(sequence)) referring
|
575 |
+
to the position of the desired vertices in the sequence.
|
576 |
+
|
577 |
+
For a labeled creation_sequence, u and v are labels of vertices.
|
578 |
+
|
579 |
+
Use cs=creation_sequence(degree_sequence,with_labels=True)
|
580 |
+
to convert a degree sequence to a creation sequence.
|
581 |
+
|
582 |
+
Returns a list of vertices from u to v.
|
583 |
+
Example: if they are neighbors, it returns [u,v]
|
584 |
+
"""
|
585 |
+
# Turn input sequence into a labeled creation sequence
|
586 |
+
first = creation_sequence[0]
|
587 |
+
if isinstance(first, str): # creation sequence
|
588 |
+
cs = [(i, creation_sequence[i]) for i in range(len(creation_sequence))]
|
589 |
+
elif isinstance(first, tuple): # labeled creation sequence
|
590 |
+
cs = creation_sequence[:]
|
591 |
+
elif isinstance(first, int): # compact creation sequence
|
592 |
+
ci = uncompact(creation_sequence)
|
593 |
+
cs = [(i, ci[i]) for i in range(len(ci))]
|
594 |
+
else:
|
595 |
+
raise TypeError("Not a valid creation sequence type")
|
596 |
+
|
597 |
+
verts = [s[0] for s in cs]
|
598 |
+
if v not in verts:
|
599 |
+
raise ValueError(f"Vertex {v} not in graph from creation_sequence")
|
600 |
+
if u not in verts:
|
601 |
+
raise ValueError(f"Vertex {u} not in graph from creation_sequence")
|
602 |
+
# Done checking
|
603 |
+
if u == v:
|
604 |
+
return [u]
|
605 |
+
|
606 |
+
uindex = verts.index(u)
|
607 |
+
vindex = verts.index(v)
|
608 |
+
bigind = max(uindex, vindex)
|
609 |
+
if cs[bigind][1] == "d":
|
610 |
+
return [u, v]
|
611 |
+
# must be that cs[bigind][1]=='i'
|
612 |
+
cs = cs[bigind:]
|
613 |
+
while cs:
|
614 |
+
vert = cs.pop()
|
615 |
+
if vert[1] == "d":
|
616 |
+
return [u, vert[0], v]
|
617 |
+
# All after u are type 'i' so no connection
|
618 |
+
return -1
|
619 |
+
|
620 |
+
|
621 |
+
def shortest_path_length(creation_sequence, i):
|
622 |
+
"""
|
623 |
+
Return the shortest path length from indicated node to
|
624 |
+
every other node for the threshold graph with the given
|
625 |
+
creation sequence.
|
626 |
+
Node is indicated by index i in creation_sequence unless
|
627 |
+
creation_sequence is labeled in which case, i is taken to
|
628 |
+
be the label of the node.
|
629 |
+
|
630 |
+
Paths lengths in threshold graphs are at most 2.
|
631 |
+
Length to unreachable nodes is set to -1.
|
632 |
+
"""
|
633 |
+
# Turn input sequence into a labeled creation sequence
|
634 |
+
first = creation_sequence[0]
|
635 |
+
if isinstance(first, str): # creation sequence
|
636 |
+
if isinstance(creation_sequence, list):
|
637 |
+
cs = creation_sequence[:]
|
638 |
+
else:
|
639 |
+
cs = list(creation_sequence)
|
640 |
+
elif isinstance(first, tuple): # labeled creation sequence
|
641 |
+
cs = [v[1] for v in creation_sequence]
|
642 |
+
i = [v[0] for v in creation_sequence].index(i)
|
643 |
+
elif isinstance(first, int): # compact creation sequence
|
644 |
+
cs = uncompact(creation_sequence)
|
645 |
+
else:
|
646 |
+
raise TypeError("Not a valid creation sequence type")
|
647 |
+
|
648 |
+
# Compute
|
649 |
+
N = len(cs)
|
650 |
+
spl = [2] * N # length 2 to every node
|
651 |
+
spl[i] = 0 # except self which is 0
|
652 |
+
# 1 for all d's to the right
|
653 |
+
for j in range(i + 1, N):
|
654 |
+
if cs[j] == "d":
|
655 |
+
spl[j] = 1
|
656 |
+
if cs[i] == "d": # 1 for all nodes to the left
|
657 |
+
for j in range(i):
|
658 |
+
spl[j] = 1
|
659 |
+
# and -1 for any trailing i to indicate unreachable
|
660 |
+
for j in range(N - 1, 0, -1):
|
661 |
+
if cs[j] == "d":
|
662 |
+
break
|
663 |
+
spl[j] = -1
|
664 |
+
return spl
|
665 |
+
|
666 |
+
|
667 |
+
def betweenness_sequence(creation_sequence, normalized=True):
|
668 |
+
"""
|
669 |
+
Return betweenness for the threshold graph with the given creation
|
670 |
+
sequence. The result is unscaled. To scale the values
|
671 |
+
to the interval [0,1] divide by (n-1)*(n-2).
|
672 |
+
"""
|
673 |
+
cs = creation_sequence
|
674 |
+
seq = [] # betweenness
|
675 |
+
lastchar = "d" # first node is always a 'd'
|
676 |
+
dr = float(cs.count("d")) # number of d's to the right of current pos
|
677 |
+
irun = 0 # number of i's in the last run
|
678 |
+
drun = 0 # number of d's in the last run
|
679 |
+
dlast = 0.0 # betweenness of last d
|
680 |
+
for i, c in enumerate(cs):
|
681 |
+
if c == "d": # cs[i]=="d":
|
682 |
+
# betweenness = amt shared with earlier d's and i's
|
683 |
+
# + new isolated nodes covered
|
684 |
+
# + new paths to all previous nodes
|
685 |
+
b = dlast + (irun - 1) * irun / dr + 2 * irun * (i - drun - irun) / dr
|
686 |
+
drun += 1 # update counter
|
687 |
+
else: # cs[i]="i":
|
688 |
+
if lastchar == "d": # if this is a new run of i's
|
689 |
+
dlast = b # accumulate betweenness
|
690 |
+
dr -= drun # update number of d's to the right
|
691 |
+
drun = 0 # reset d counter
|
692 |
+
irun = 0 # reset i counter
|
693 |
+
b = 0 # isolated nodes have zero betweenness
|
694 |
+
irun += 1 # add another i to the run
|
695 |
+
seq.append(float(b))
|
696 |
+
lastchar = c
|
697 |
+
|
698 |
+
# normalize by the number of possible shortest paths
|
699 |
+
if normalized:
|
700 |
+
order = len(cs)
|
701 |
+
scale = 1.0 / ((order - 1) * (order - 2))
|
702 |
+
seq = [s * scale for s in seq]
|
703 |
+
|
704 |
+
return seq
|
705 |
+
|
706 |
+
|
707 |
+
def eigenvectors(creation_sequence):
|
708 |
+
"""
|
709 |
+
Return a 2-tuple of Laplacian eigenvalues and eigenvectors
|
710 |
+
for the threshold network with creation_sequence.
|
711 |
+
The first value is a list of eigenvalues.
|
712 |
+
The second value is a list of eigenvectors.
|
713 |
+
The lists are in the same order so corresponding eigenvectors
|
714 |
+
and eigenvalues are in the same position in the two lists.
|
715 |
+
|
716 |
+
Notice that the order of the eigenvalues returned by eigenvalues(cs)
|
717 |
+
may not correspond to the order of these eigenvectors.
|
718 |
+
"""
|
719 |
+
ccs = make_compact(creation_sequence)
|
720 |
+
N = sum(ccs)
|
721 |
+
vec = [0] * N
|
722 |
+
val = vec[:]
|
723 |
+
# get number of type d nodes to the right (all for first node)
|
724 |
+
dr = sum(ccs[::2])
|
725 |
+
|
726 |
+
nn = ccs[0]
|
727 |
+
vec[0] = [1.0 / sqrt(N)] * N
|
728 |
+
val[0] = 0
|
729 |
+
e = dr
|
730 |
+
dr -= nn
|
731 |
+
type_d = True
|
732 |
+
i = 1
|
733 |
+
dd = 1
|
734 |
+
while dd < nn:
|
735 |
+
scale = 1.0 / sqrt(dd * dd + i)
|
736 |
+
vec[i] = i * [-scale] + [dd * scale] + [0] * (N - i - 1)
|
737 |
+
val[i] = e
|
738 |
+
i += 1
|
739 |
+
dd += 1
|
740 |
+
if len(ccs) == 1:
|
741 |
+
return (val, vec)
|
742 |
+
for nn in ccs[1:]:
|
743 |
+
scale = 1.0 / sqrt(nn * i * (i + nn))
|
744 |
+
vec[i] = i * [-nn * scale] + nn * [i * scale] + [0] * (N - i - nn)
|
745 |
+
# find eigenvalue
|
746 |
+
type_d = not type_d
|
747 |
+
if type_d:
|
748 |
+
e = i + dr
|
749 |
+
dr -= nn
|
750 |
+
else:
|
751 |
+
e = dr
|
752 |
+
val[i] = e
|
753 |
+
st = i
|
754 |
+
i += 1
|
755 |
+
dd = 1
|
756 |
+
while dd < nn:
|
757 |
+
scale = 1.0 / sqrt(i - st + dd * dd)
|
758 |
+
vec[i] = [0] * st + (i - st) * [-scale] + [dd * scale] + [0] * (N - i - 1)
|
759 |
+
val[i] = e
|
760 |
+
i += 1
|
761 |
+
dd += 1
|
762 |
+
return (val, vec)
|
763 |
+
|
764 |
+
|
765 |
+
def spectral_projection(u, eigenpairs):
|
766 |
+
"""
|
767 |
+
Returns the coefficients of each eigenvector
|
768 |
+
in a projection of the vector u onto the normalized
|
769 |
+
eigenvectors which are contained in eigenpairs.
|
770 |
+
|
771 |
+
eigenpairs should be a list of two objects. The
|
772 |
+
first is a list of eigenvalues and the second a list
|
773 |
+
of eigenvectors. The eigenvectors should be lists.
|
774 |
+
|
775 |
+
There's not a lot of error checking on lengths of
|
776 |
+
arrays, etc. so be careful.
|
777 |
+
"""
|
778 |
+
coeff = []
|
779 |
+
evect = eigenpairs[1]
|
780 |
+
for ev in evect:
|
781 |
+
c = sum(evv * uv for (evv, uv) in zip(ev, u))
|
782 |
+
coeff.append(c)
|
783 |
+
return coeff
|
784 |
+
|
785 |
+
|
786 |
+
def eigenvalues(creation_sequence):
|
787 |
+
"""
|
788 |
+
Return sequence of eigenvalues of the Laplacian of the threshold
|
789 |
+
graph for the given creation_sequence.
|
790 |
+
|
791 |
+
Based on the Ferrer's diagram method. The spectrum is integral
|
792 |
+
and is the conjugate of the degree sequence.
|
793 |
+
|
794 |
+
See::
|
795 |
+
|
796 |
+
@Article{degree-merris-1994,
|
797 |
+
author = {Russel Merris},
|
798 |
+
title = {Degree maximal graphs are Laplacian integral},
|
799 |
+
journal = {Linear Algebra Appl.},
|
800 |
+
year = {1994},
|
801 |
+
volume = {199},
|
802 |
+
pages = {381--389},
|
803 |
+
}
|
804 |
+
|
805 |
+
"""
|
806 |
+
degseq = degree_sequence(creation_sequence)
|
807 |
+
degseq.sort()
|
808 |
+
eiglist = [] # zero is always one eigenvalue
|
809 |
+
eig = 0
|
810 |
+
row = len(degseq)
|
811 |
+
bigdeg = degseq.pop()
|
812 |
+
while row:
|
813 |
+
if bigdeg < row:
|
814 |
+
eiglist.append(eig)
|
815 |
+
row -= 1
|
816 |
+
else:
|
817 |
+
eig += 1
|
818 |
+
if degseq:
|
819 |
+
bigdeg = degseq.pop()
|
820 |
+
else:
|
821 |
+
bigdeg = 0
|
822 |
+
return eiglist
|
823 |
+
|
824 |
+
|
825 |
+
# Threshold graph creation routines
|
826 |
+
|
827 |
+
|
828 |
+
@py_random_state(2)
|
829 |
+
def random_threshold_sequence(n, p, seed=None):
|
830 |
+
"""
|
831 |
+
Create a random threshold sequence of size n.
|
832 |
+
A creation sequence is built by randomly choosing d's with
|
833 |
+
probability p and i's with probability 1-p.
|
834 |
+
|
835 |
+
s=nx.random_threshold_sequence(10,0.5)
|
836 |
+
|
837 |
+
returns a threshold sequence of length 10 with equal
|
838 |
+
probably of an i or a d at each position.
|
839 |
+
|
840 |
+
A "random" threshold graph can be built with
|
841 |
+
|
842 |
+
G=nx.threshold_graph(s)
|
843 |
+
|
844 |
+
seed : integer, random_state, or None (default)
|
845 |
+
Indicator of random number generation state.
|
846 |
+
See :ref:`Randomness<randomness>`.
|
847 |
+
"""
|
848 |
+
if not (0 <= p <= 1):
|
849 |
+
raise ValueError("p must be in [0,1]")
|
850 |
+
|
851 |
+
cs = ["d"] # threshold sequences always start with a d
|
852 |
+
for i in range(1, n):
|
853 |
+
if seed.random() < p:
|
854 |
+
cs.append("d")
|
855 |
+
else:
|
856 |
+
cs.append("i")
|
857 |
+
return cs
|
858 |
+
|
859 |
+
|
860 |
+
# maybe *_d_threshold_sequence routines should
|
861 |
+
# be (or be called from) a single routine with a more descriptive name
|
862 |
+
# and a keyword parameter?
|
863 |
+
def right_d_threshold_sequence(n, m):
|
864 |
+
"""
|
865 |
+
Create a skewed threshold graph with a given number
|
866 |
+
of vertices (n) and a given number of edges (m).
|
867 |
+
|
868 |
+
The routine returns an unlabeled creation sequence
|
869 |
+
for the threshold graph.
|
870 |
+
|
871 |
+
FIXME: describe algorithm
|
872 |
+
|
873 |
+
"""
|
874 |
+
cs = ["d"] + ["i"] * (n - 1) # create sequence with n insolated nodes
|
875 |
+
|
876 |
+
# m <n : not enough edges, make disconnected
|
877 |
+
if m < n:
|
878 |
+
cs[m] = "d"
|
879 |
+
return cs
|
880 |
+
|
881 |
+
# too many edges
|
882 |
+
if m > n * (n - 1) / 2:
|
883 |
+
raise ValueError("Too many edges for this many nodes.")
|
884 |
+
|
885 |
+
# connected case m >n-1
|
886 |
+
ind = n - 1
|
887 |
+
sum = n - 1
|
888 |
+
while sum < m:
|
889 |
+
cs[ind] = "d"
|
890 |
+
ind -= 1
|
891 |
+
sum += ind
|
892 |
+
ind = m - (sum - ind)
|
893 |
+
cs[ind] = "d"
|
894 |
+
return cs
|
895 |
+
|
896 |
+
|
897 |
+
def left_d_threshold_sequence(n, m):
|
898 |
+
"""
|
899 |
+
Create a skewed threshold graph with a given number
|
900 |
+
of vertices (n) and a given number of edges (m).
|
901 |
+
|
902 |
+
The routine returns an unlabeled creation sequence
|
903 |
+
for the threshold graph.
|
904 |
+
|
905 |
+
FIXME: describe algorithm
|
906 |
+
|
907 |
+
"""
|
908 |
+
cs = ["d"] + ["i"] * (n - 1) # create sequence with n insolated nodes
|
909 |
+
|
910 |
+
# m <n : not enough edges, make disconnected
|
911 |
+
if m < n:
|
912 |
+
cs[m] = "d"
|
913 |
+
return cs
|
914 |
+
|
915 |
+
# too many edges
|
916 |
+
if m > n * (n - 1) / 2:
|
917 |
+
raise ValueError("Too many edges for this many nodes.")
|
918 |
+
|
919 |
+
# Connected case when M>N-1
|
920 |
+
cs[n - 1] = "d"
|
921 |
+
sum = n - 1
|
922 |
+
ind = 1
|
923 |
+
while sum < m:
|
924 |
+
cs[ind] = "d"
|
925 |
+
sum += ind
|
926 |
+
ind += 1
|
927 |
+
if sum > m: # be sure not to change the first vertex
|
928 |
+
cs[sum - m] = "i"
|
929 |
+
return cs
|
930 |
+
|
931 |
+
|
932 |
+
@py_random_state(3)
|
933 |
+
def swap_d(cs, p_split=1.0, p_combine=1.0, seed=None):
|
934 |
+
"""
|
935 |
+
Perform a "swap" operation on a threshold sequence.
|
936 |
+
|
937 |
+
The swap preserves the number of nodes and edges
|
938 |
+
in the graph for the given sequence.
|
939 |
+
The resulting sequence is still a threshold sequence.
|
940 |
+
|
941 |
+
Perform one split and one combine operation on the
|
942 |
+
'd's of a creation sequence for a threshold graph.
|
943 |
+
This operation maintains the number of nodes and edges
|
944 |
+
in the graph, but shifts the edges from node to node
|
945 |
+
maintaining the threshold quality of the graph.
|
946 |
+
|
947 |
+
seed : integer, random_state, or None (default)
|
948 |
+
Indicator of random number generation state.
|
949 |
+
See :ref:`Randomness<randomness>`.
|
950 |
+
"""
|
951 |
+
# preprocess the creation sequence
|
952 |
+
dlist = [i for (i, node_type) in enumerate(cs[1:-1]) if node_type == "d"]
|
953 |
+
# split
|
954 |
+
if seed.random() < p_split:
|
955 |
+
choice = seed.choice(dlist)
|
956 |
+
split_to = seed.choice(range(choice))
|
957 |
+
flip_side = choice - split_to
|
958 |
+
if split_to != flip_side and cs[split_to] == "i" and cs[flip_side] == "i":
|
959 |
+
cs[choice] = "i"
|
960 |
+
cs[split_to] = "d"
|
961 |
+
cs[flip_side] = "d"
|
962 |
+
dlist.remove(choice)
|
963 |
+
# don't add or combine may reverse this action
|
964 |
+
# dlist.extend([split_to,flip_side])
|
965 |
+
# print >>sys.stderr,"split at %s to %s and %s"%(choice,split_to,flip_side)
|
966 |
+
# combine
|
967 |
+
if seed.random() < p_combine and dlist:
|
968 |
+
first_choice = seed.choice(dlist)
|
969 |
+
second_choice = seed.choice(dlist)
|
970 |
+
target = first_choice + second_choice
|
971 |
+
if target >= len(cs) or cs[target] == "d" or first_choice == second_choice:
|
972 |
+
return cs
|
973 |
+
# OK to combine
|
974 |
+
cs[first_choice] = "i"
|
975 |
+
cs[second_choice] = "i"
|
976 |
+
cs[target] = "d"
|
977 |
+
# print >>sys.stderr,"combine %s and %s to make %s."%(first_choice,second_choice,target)
|
978 |
+
|
979 |
+
return cs
|
llmeval-env/lib/python3.10/site-packages/networkx/algorithms/time_dependent.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Time dependent algorithms."""
|
2 |
+
|
3 |
+
import networkx as nx
|
4 |
+
from networkx.utils import not_implemented_for
|
5 |
+
|
6 |
+
__all__ = ["cd_index"]
|
7 |
+
|
8 |
+
|
9 |
+
@not_implemented_for("undirected")
|
10 |
+
@not_implemented_for("multigraph")
|
11 |
+
@nx._dispatchable(node_attrs={"time": None, "weight": 1})
|
12 |
+
def cd_index(G, node, time_delta, *, time="time", weight=None):
|
13 |
+
r"""Compute the CD index for `node` within the graph `G`.
|
14 |
+
|
15 |
+
Calculates the CD index for the given node of the graph,
|
16 |
+
considering only its predecessors who have the `time` attribute
|
17 |
+
smaller than or equal to the `time` attribute of the `node`
|
18 |
+
plus `time_delta`.
|
19 |
+
|
20 |
+
Parameters
|
21 |
+
----------
|
22 |
+
G : graph
|
23 |
+
A directed networkx graph whose nodes have `time` attributes and optionally
|
24 |
+
`weight` attributes (if a weight is not given, it is considered 1).
|
25 |
+
node : node
|
26 |
+
The node for which the CD index is calculated.
|
27 |
+
time_delta : numeric or timedelta
|
28 |
+
Amount of time after the `time` attribute of the `node`. The value of
|
29 |
+
`time_delta` must support comparison with the `time` node attribute. For
|
30 |
+
example, if the `time` attribute of the nodes are `datetime.datetime`
|
31 |
+
objects, then `time_delta` should be a `datetime.timedelta` object.
|
32 |
+
time : string (Optional, default is "time")
|
33 |
+
The name of the node attribute that will be used for the calculations.
|
34 |
+
weight : string (Optional, default is None)
|
35 |
+
The name of the node attribute used as weight.
|
36 |
+
|
37 |
+
Returns
|
38 |
+
-------
|
39 |
+
float
|
40 |
+
The CD index calculated for the node `node` within the graph `G`.
|
41 |
+
|
42 |
+
Raises
|
43 |
+
------
|
44 |
+
NetworkXError
|
45 |
+
If not all nodes have a `time` attribute or
|
46 |
+
`time_delta` and `time` attribute types are not compatible or
|
47 |
+
`n` equals 0.
|
48 |
+
|
49 |
+
NetworkXNotImplemented
|
50 |
+
If `G` is a non-directed graph or a multigraph.
|
51 |
+
|
52 |
+
Examples
|
53 |
+
--------
|
54 |
+
>>> from datetime import datetime, timedelta
|
55 |
+
>>> G = nx.DiGraph()
|
56 |
+
>>> nodes = {
|
57 |
+
... 1: {"time": datetime(2015, 1, 1)},
|
58 |
+
... 2: {"time": datetime(2012, 1, 1), "weight": 4},
|
59 |
+
... 3: {"time": datetime(2010, 1, 1)},
|
60 |
+
... 4: {"time": datetime(2008, 1, 1)},
|
61 |
+
... 5: {"time": datetime(2014, 1, 1)},
|
62 |
+
... }
|
63 |
+
>>> G.add_nodes_from([(n, nodes[n]) for n in nodes])
|
64 |
+
>>> edges = [(1, 3), (1, 4), (2, 3), (3, 4), (3, 5)]
|
65 |
+
>>> G.add_edges_from(edges)
|
66 |
+
>>> delta = timedelta(days=5 * 365)
|
67 |
+
>>> nx.cd_index(G, 3, time_delta=delta, time="time")
|
68 |
+
0.5
|
69 |
+
>>> nx.cd_index(G, 3, time_delta=delta, time="time", weight="weight")
|
70 |
+
0.12
|
71 |
+
|
72 |
+
Integers can also be used for the time values:
|
73 |
+
>>> node_times = {1: 2015, 2: 2012, 3: 2010, 4: 2008, 5: 2014}
|
74 |
+
>>> nx.set_node_attributes(G, node_times, "new_time")
|
75 |
+
>>> nx.cd_index(G, 3, time_delta=4, time="new_time")
|
76 |
+
0.5
|
77 |
+
>>> nx.cd_index(G, 3, time_delta=4, time="new_time", weight="weight")
|
78 |
+
0.12
|
79 |
+
|
80 |
+
Notes
|
81 |
+
-----
|
82 |
+
This method implements the algorithm for calculating the CD index,
|
83 |
+
as described in the paper by Funk and Owen-Smith [1]_. The CD index
|
84 |
+
is used in order to check how consolidating or destabilizing a patent
|
85 |
+
is, hence the nodes of the graph represent patents and the edges show
|
86 |
+
the citations between these patents. The mathematical model is given
|
87 |
+
below:
|
88 |
+
|
89 |
+
.. math::
|
90 |
+
CD_{t}=\frac{1}{n_{t}}\sum_{i=1}^{n}\frac{-2f_{it}b_{it}+f_{it}}{w_{it}},
|
91 |
+
|
92 |
+
where `f_{it}` equals 1 if `i` cites the focal patent else 0, `b_{it}` equals
|
93 |
+
1 if `i` cites any of the focal patents successors else 0, `n_{t}` is the number
|
94 |
+
of forward citations in `i` and `w_{it}` is a matrix of weight for patent `i`
|
95 |
+
at time `t`.
|
96 |
+
|
97 |
+
The `datetime.timedelta` package can lead to off-by-one issues when converting
|
98 |
+
from years to days. In the example above `timedelta(days=5 * 365)` looks like
|
99 |
+
5 years, but it isn't because of leap year days. So it gives the same result
|
100 |
+
as `timedelta(days=4 * 365)`. But using `timedelta(days=5 * 365 + 1)` gives
|
101 |
+
a 5 year delta **for this choice of years** but may not if the 5 year gap has
|
102 |
+
more than 1 leap year. To avoid these issues, use integers to represent years,
|
103 |
+
or be very careful when you convert units of time.
|
104 |
+
|
105 |
+
References
|
106 |
+
----------
|
107 |
+
.. [1] Funk, Russell J., and Jason Owen-Smith.
|
108 |
+
"A dynamic network measure of technological change."
|
109 |
+
Management science 63, no. 3 (2017): 791-817.
|
110 |
+
http://russellfunk.org/cdindex/static/papers/funk_ms_2017.pdf
|
111 |
+
|
112 |
+
"""
|
113 |
+
if not all(time in G.nodes[n] for n in G):
|
114 |
+
raise nx.NetworkXError("Not all nodes have a 'time' attribute.")
|
115 |
+
|
116 |
+
try:
|
117 |
+
# get target_date
|
118 |
+
target_date = G.nodes[node][time] + time_delta
|
119 |
+
# keep the predecessors that existed before the target date
|
120 |
+
pred = {i for i in G.pred[node] if G.nodes[i][time] <= target_date}
|
121 |
+
except:
|
122 |
+
raise nx.NetworkXError(
|
123 |
+
"Addition and comparison are not supported between 'time_delta' "
|
124 |
+
"and 'time' types."
|
125 |
+
)
|
126 |
+
|
127 |
+
# -1 if any edge between node's predecessors and node's successors, else 1
|
128 |
+
b = [-1 if any(j in G[i] for j in G[node]) else 1 for i in pred]
|
129 |
+
|
130 |
+
# n is size of the union of the focal node's predecessors and its successors' predecessors
|
131 |
+
n = len(pred.union(*(G.pred[s].keys() - {node} for s in G[node])))
|
132 |
+
if n == 0:
|
133 |
+
raise nx.NetworkXError("The cd index cannot be defined.")
|
134 |
+
|
135 |
+
# calculate cd index
|
136 |
+
if weight is None:
|
137 |
+
return round(sum(bi for bi in b) / n, 2)
|
138 |
+
else:
|
139 |
+
# If a node has the specified weight attribute, its weight is used in the calculation
|
140 |
+
# otherwise, a weight of 1 is assumed for that node
|
141 |
+
weights = [G.nodes[i].get(weight, 1) for i in pred]
|
142 |
+
return round(sum(bi / wt for bi, wt in zip(b, weights)) / n, 2)
|