diff --git a/ckpts/universal/global_step80/zero/15.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step80/zero/15.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..38849cb0179870492638dfb0d4ace532bd01d082 --- /dev/null +++ b/ckpts/universal/global_step80/zero/15.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a78742781f162ff89bf7d1fae2fcf225c794c423b9613c852ec8384ca8b46871 +size 9293 diff --git a/ckpts/universal/global_step80/zero/24.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/24.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..b328412346557f11a760f28b78a413fb094f817f --- /dev/null +++ b/ckpts/universal/global_step80/zero/24.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0675b13c77c2f6db2c239e8e09d173d8c8b8bc8793dd23f5be569617b2763d64 +size 33555627 diff --git a/ckpts/universal/global_step80/zero/24.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step80/zero/24.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..6c614338e605c0d8f30d6dd113f16629d2b2e5a8 --- /dev/null +++ b/ckpts/universal/global_step80/zero/24.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b343ff4efa93c020821c466aa5f4e83dbe206cfff7c7d1424935196c68aeb1ff +size 33555533 diff --git a/ckpts/universal/global_step80/zero/26.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/26.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..0379f1c6b6e8032595bd9a43cac83d35d57df2dc --- /dev/null +++ b/ckpts/universal/global_step80/zero/26.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1489f8bf237e2f571ccf5baab7f629ea3fd3b0f17a19fa10f72315026af7e906 +size 33555612 diff --git a/ckpts/universal/global_step80/zero/26.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/26.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..0278fc777a54a707f1de44e19d3bad7523c36510 --- /dev/null +++ b/ckpts/universal/global_step80/zero/26.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e133114747f59f6cb148f4ed5bda17900cc4b97e1636d44f1e1a8e6d808649e +size 33555627 diff --git a/ckpts/universal/global_step80/zero/5.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/5.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..861863b4cbb0ac2156702946c3f470f81b096f60 --- /dev/null +++ b/ckpts/universal/global_step80/zero/5.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eea12b6da32f05e353108a0fb7feabe5f22855e56e57fd51c1a21379e1727d5b +size 50332828 diff --git a/ckpts/universal/global_step80/zero/5.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/5.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..baa0604d40088b2233f4bf1f53beea15dfeb3940 --- /dev/null +++ b/ckpts/universal/global_step80/zero/5.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a87f789664eb4f9d87f13318194365f6a888e195752c96d9520a23c67564dd0 +size 50332843 diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__init__.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4d9888609cbc43d4ba2121fcd0feda0985d1aebd --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__init__.py @@ -0,0 +1,5 @@ +from networkx.algorithms.assortativity.connectivity import * +from networkx.algorithms.assortativity.correlation import * +from networkx.algorithms.assortativity.mixing import * +from networkx.algorithms.assortativity.neighbor_degree import * +from networkx.algorithms.assortativity.pairs import * diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e3b72d560b3f28c6b666fdc71961a521edd36ad Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68d0a0651400e3b08970b42dc88848bf12ef7c61 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c91fbbbb91fb2b0171e30dced4568c08293d5d7a Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9159c4323f4050f3345269fea5ad943e647cdf8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..589b4ac12cd8037434d74b583dc4702a58e750a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bc461ea989367e64a34bad7b24aa300d482eca9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/connectivity.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..c3fde0da68a1990da29ced6996620d709c52c13d --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/connectivity.py @@ -0,0 +1,122 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["average_degree_connectivity"] + + +@nx._dispatchable(edge_attrs="weight") +def average_degree_connectivity( + G, source="in+out", target="in+out", nodes=None, weight=None +): + r"""Compute the average degree connectivity of graph. + + The average degree connectivity is the average nearest neighbor degree of + nodes with degree k. For weighted graphs, an analogous measure can + be computed using the weighted average neighbors degree defined in + [1]_, for a node `i`, as + + .. math:: + + k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j + + where `s_i` is the weighted degree of node `i`, + `w_{ij}` is the weight of the edge that links `i` and `j`, + and `N(i)` are the neighbors of node `i`. + + Parameters + ---------- + G : NetworkX graph + + source : "in"|"out"|"in+out" (default:"in+out") + Directed graphs only. Use "in"- or "out"-degree for source node. + + target : "in"|"out"|"in+out" (default:"in+out" + Directed graphs only. Use "in"- or "out"-degree for target node. + + nodes : list or iterable (optional) + Compute neighbor connectivity for these nodes. The default is all + nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + d : dict + A dictionary keyed by degree k with the value of average connectivity. + + Raises + ------ + NetworkXError + If either `source` or `target` are not one of 'in', + 'out', or 'in+out'. + If either `source` or `target` is passed for an undirected graph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.edges[1, 2]["weight"] = 3 + >>> nx.average_degree_connectivity(G) + {1: 2.0, 2: 1.5} + >>> nx.average_degree_connectivity(G, weight="weight") + {1: 2.0, 2: 1.75} + + See Also + -------- + average_neighbor_degree + + References + ---------- + .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani, + "The architecture of complex weighted networks". + PNAS 101 (11): 3747–3752 (2004). + """ + # First, determine the type of neighbors and the type of degree to use. + if G.is_directed(): + if source not in ("in", "out", "in+out"): + raise nx.NetworkXError('source must be one of "in", "out", or "in+out"') + if target not in ("in", "out", "in+out"): + raise nx.NetworkXError('target must be one of "in", "out", or "in+out"') + direction = {"out": G.out_degree, "in": G.in_degree, "in+out": G.degree} + neighbor_funcs = { + "out": G.successors, + "in": G.predecessors, + "in+out": G.neighbors, + } + source_degree = direction[source] + target_degree = direction[target] + neighbors = neighbor_funcs[source] + # `reverse` indicates whether to look at the in-edge when + # computing the weight of an edge. + reverse = source == "in" + else: + if source != "in+out" or target != "in+out": + raise nx.NetworkXError( + f"source and target arguments are only supported for directed graphs" + ) + source_degree = G.degree + target_degree = G.degree + neighbors = G.neighbors + reverse = False + dsum = defaultdict(int) + dnorm = defaultdict(int) + # Check if `source_nodes` is actually a single node in the graph. + source_nodes = source_degree(nodes) + if nodes in G: + source_nodes = [(nodes, source_degree(nodes))] + for n, k in source_nodes: + nbrdeg = target_degree(neighbors(n)) + if weight is None: + s = sum(d for n, d in nbrdeg) + else: # weight nbr degree by weight of (n,nbr) edge + if reverse: + s = sum(G[nbr][n].get(weight, 1) * d for nbr, d in nbrdeg) + else: + s = sum(G[n][nbr].get(weight, 1) * d for nbr, d in nbrdeg) + dnorm[k] += source_degree(n, weight=weight) + dsum[k] += s + + # normalize + return {k: avg if dnorm[k] == 0 else avg / dnorm[k] for k, avg in dsum.items()} diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/correlation.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/correlation.py new file mode 100644 index 0000000000000000000000000000000000000000..170d219a5d4ba92d2c1d9933768f547e4750e4ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/correlation.py @@ -0,0 +1,302 @@ +"""Node assortativity coefficients and correlation measures. +""" +import networkx as nx +from networkx.algorithms.assortativity.mixing import ( + attribute_mixing_matrix, + degree_mixing_matrix, +) +from networkx.algorithms.assortativity.pairs import node_degree_xy + +__all__ = [ + "degree_pearson_correlation_coefficient", + "degree_assortativity_coefficient", + "attribute_assortativity_coefficient", + "numeric_assortativity_coefficient", +] + + +@nx._dispatchable(edge_attrs="weight") +def degree_assortativity_coefficient(G, x="out", y="in", weight=None, nodes=None): + """Compute degree assortativity of graph. + + Assortativity measures the similarity of connections + in the graph with respect to the node degree. + + Parameters + ---------- + G : NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Compute degree assortativity only for nodes in container. + The default is all nodes. + + Returns + ------- + r : float + Assortativity of graph by degree. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> r = nx.degree_assortativity_coefficient(G) + >>> print(f"{r:3.1f}") + -0.5 + + See Also + -------- + attribute_assortativity_coefficient + numeric_assortativity_coefficient + degree_mixing_dict + degree_mixing_matrix + + Notes + ----- + This computes Eq. (21) in Ref. [1]_ , where e is the joint + probability distribution (mixing matrix) of the degrees. If G is + directed than the matrix e is the joint probability of the + user-specified degree type for the source and target. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M. + Edge direction and the structure of networks, PNAS 107, 10815-20 (2010). + """ + if nodes is None: + nodes = G.nodes + + degrees = None + + if G.is_directed(): + indeg = ( + {d for _, d in G.in_degree(nodes, weight=weight)} + if "in" in (x, y) + else set() + ) + outdeg = ( + {d for _, d in G.out_degree(nodes, weight=weight)} + if "out" in (x, y) + else set() + ) + degrees = set.union(indeg, outdeg) + else: + degrees = {d for _, d in G.degree(nodes, weight=weight)} + + mapping = {d: i for i, d in enumerate(degrees)} + M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight, mapping=mapping) + + return _numeric_ac(M, mapping=mapping) + + +@nx._dispatchable(edge_attrs="weight") +def degree_pearson_correlation_coefficient(G, x="out", y="in", weight=None, nodes=None): + """Compute degree assortativity of graph. + + Assortativity measures the similarity of connections + in the graph with respect to the node degree. + + This is the same as degree_assortativity_coefficient but uses the + potentially faster scipy.stats.pearsonr function. + + Parameters + ---------- + G : NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Compute pearson correlation of degrees only for specified nodes. + The default is all nodes. + + Returns + ------- + r : float + Assortativity of graph by degree. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> r = nx.degree_pearson_correlation_coefficient(G) + >>> print(f"{r:3.1f}") + -0.5 + + Notes + ----- + This calls scipy.stats.pearsonr. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks + Physical Review E, 67 026126, 2003 + .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M. + Edge direction and the structure of networks, PNAS 107, 10815-20 (2010). + """ + import scipy as sp + + xy = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight) + x, y = zip(*xy) + return float(sp.stats.pearsonr(x, y)[0]) + + +@nx._dispatchable(node_attrs="attribute") +def attribute_assortativity_coefficient(G, attribute, nodes=None): + """Compute assortativity for node attributes. + + Assortativity measures the similarity of connections + in the graph with respect to the given attribute. + + Parameters + ---------- + G : NetworkX graph + + attribute : string + Node attribute key + + nodes: list or iterable (optional) + Compute attribute assortativity for nodes in container. + The default is all nodes. + + Returns + ------- + r: float + Assortativity of graph for given attribute + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], color="red") + >>> G.add_nodes_from([2, 3], color="blue") + >>> G.add_edges_from([(0, 1), (2, 3)]) + >>> print(nx.attribute_assortativity_coefficient(G, "color")) + 1.0 + + Notes + ----- + This computes Eq. (2) in Ref. [1]_ , (trace(M)-sum(M^2))/(1-sum(M^2)), + where M is the joint probability distribution (mixing matrix) + of the specified attribute. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + """ + M = attribute_mixing_matrix(G, attribute, nodes) + return attribute_ac(M) + + +@nx._dispatchable(node_attrs="attribute") +def numeric_assortativity_coefficient(G, attribute, nodes=None): + """Compute assortativity for numerical node attributes. + + Assortativity measures the similarity of connections + in the graph with respect to the given numeric attribute. + + Parameters + ---------- + G : NetworkX graph + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Compute numeric assortativity only for attributes of nodes in + container. The default is all nodes. + + Returns + ------- + r: float + Assortativity of graph for given attribute + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], size=2) + >>> G.add_nodes_from([2, 3], size=3) + >>> G.add_edges_from([(0, 1), (2, 3)]) + >>> print(nx.numeric_assortativity_coefficient(G, "size")) + 1.0 + + Notes + ----- + This computes Eq. (21) in Ref. [1]_ , which is the Pearson correlation + coefficient of the specified (scalar valued) attribute across edges. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks + Physical Review E, 67 026126, 2003 + """ + if nodes is None: + nodes = G.nodes + vals = {G.nodes[n][attribute] for n in nodes} + mapping = {d: i for i, d in enumerate(vals)} + M = attribute_mixing_matrix(G, attribute, nodes, mapping) + return _numeric_ac(M, mapping) + + +def attribute_ac(M): + """Compute assortativity for attribute matrix M. + + Parameters + ---------- + M : numpy.ndarray + 2D ndarray representing the attribute mixing matrix. + + Notes + ----- + This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)), + where e is the joint probability distribution (mixing matrix) + of the specified attribute. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + """ + if M.sum() != 1.0: + M = M / M.sum() + s = (M @ M).sum() + t = M.trace() + r = (t - s) / (1 - s) + return float(r) + + +def _numeric_ac(M, mapping): + # M is a 2D numpy array + # numeric assortativity coefficient, pearsonr + import numpy as np + + if M.sum() != 1.0: + M = M / M.sum() + x = np.array(list(mapping.keys())) + y = x # x and y have the same support + idx = list(mapping.values()) + a = M.sum(axis=0) + b = M.sum(axis=1) + vara = (a[idx] * x**2).sum() - ((a[idx] * x).sum()) ** 2 + varb = (b[idx] * y**2).sum() - ((b[idx] * y).sum()) ** 2 + xy = np.outer(x, y) + ab = np.outer(a[idx], b[idx]) + return float((xy * (M - ab)).sum() / np.sqrt(vara * varb)) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/mixing.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/mixing.py new file mode 100644 index 0000000000000000000000000000000000000000..852ad82a4f6b9710571c19159786e94260c50330 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/mixing.py @@ -0,0 +1,254 @@ +""" +Mixing matrices for node attributes and degree. +""" +import networkx as nx +from networkx.algorithms.assortativity.pairs import node_attribute_xy, node_degree_xy +from networkx.utils import dict_to_numpy_array + +__all__ = [ + "attribute_mixing_matrix", + "attribute_mixing_dict", + "degree_mixing_matrix", + "degree_mixing_dict", + "mixing_dict", +] + + +@nx._dispatchable(node_attrs="attribute") +def attribute_mixing_dict(G, attribute, nodes=None, normalized=False): + """Returns dictionary representation of mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Unse nodes in container to build the dict. The default is all nodes. + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], color="red") + >>> G.add_nodes_from([2, 3], color="blue") + >>> G.add_edge(1, 3) + >>> d = nx.attribute_mixing_dict(G, "color") + >>> print(d["red"]["blue"]) + 1 + >>> print(d["blue"]["red"]) # d symmetric for undirected graphs + 1 + + Returns + ------- + d : dictionary + Counts or joint probability of occurrence of attribute pairs. + """ + xy_iter = node_attribute_xy(G, attribute, nodes) + return mixing_dict(xy_iter, normalized=normalized) + + +@nx._dispatchable(node_attrs="attribute") +def attribute_mixing_matrix(G, attribute, nodes=None, mapping=None, normalized=True): + """Returns mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Use only nodes in container to build the matrix. The default is + all nodes. + + mapping : dictionary, optional + Mapping from node attribute to integer index in matrix. + If not specified, an arbitrary ordering will be used. + + normalized : bool (default=True) + Return counts if False or probabilities if True. + + Returns + ------- + m: numpy array + Counts or joint probability of occurrence of attribute pairs. + + Notes + ----- + If each node has a unique attribute value, the unnormalized mixing matrix + will be equal to the adjacency matrix. To get a denser mixing matrix, + the rounding can be performed to form groups of nodes with equal values. + For example, the exact height of persons in cm (180.79155222, 163.9080892, + 163.30095355, 167.99016217, 168.21590163, ...) can be rounded to (180, 163, + 163, 168, 168, ...). + + Definitions of attribute mixing matrix vary on whether the matrix + should include rows for attribute values that don't arise. Here we + do not include such empty-rows. But you can force them to appear + by inputting a `mapping` that includes those values. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> gender = {0: "male", 1: "female", 2: "female"} + >>> nx.set_node_attributes(G, gender, "gender") + >>> mapping = {"male": 0, "female": 1} + >>> mix_mat = nx.attribute_mixing_matrix(G, "gender", mapping=mapping) + >>> mix_mat + array([[0. , 0.25], + [0.25, 0.5 ]]) + """ + d = attribute_mixing_dict(G, attribute, nodes) + a = dict_to_numpy_array(d, mapping=mapping) + if normalized: + a = a / a.sum() + return a + + +@nx._dispatchable(edge_attrs="weight") +def degree_mixing_dict(G, x="out", y="in", weight=None, nodes=None, normalized=False): + """Returns dictionary representation of mixing matrix for degree. + + Parameters + ---------- + G : graph + NetworkX graph object. + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Returns + ------- + d: dictionary + Counts or joint probability of occurrence of degree pairs. + """ + xy_iter = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight) + return mixing_dict(xy_iter, normalized=normalized) + + +@nx._dispatchable(edge_attrs="weight") +def degree_mixing_matrix( + G, x="out", y="in", weight=None, nodes=None, normalized=True, mapping=None +): + """Returns mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + nodes: list or iterable (optional) + Build the matrix using only nodes in container. + The default is all nodes. + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + normalized : bool (default=True) + Return counts if False or probabilities if True. + + mapping : dictionary, optional + Mapping from node degree to integer index in matrix. + If not specified, an arbitrary ordering will be used. + + Returns + ------- + m: numpy array + Counts, or joint probability, of occurrence of node degree. + + Notes + ----- + Definitions of degree mixing matrix vary on whether the matrix + should include rows for degree values that don't arise. Here we + do not include such empty-rows. But you can force them to appear + by inputting a `mapping` that includes those values. See examples. + + Examples + -------- + >>> G = nx.star_graph(3) + >>> mix_mat = nx.degree_mixing_matrix(G) + >>> mix_mat + array([[0. , 0.5], + [0.5, 0. ]]) + + If you want every possible degree to appear as a row, even if no nodes + have that degree, use `mapping` as follows, + + >>> max_degree = max(deg for n, deg in G.degree) + >>> mapping = {x: x for x in range(max_degree + 1)} # identity mapping + >>> mix_mat = nx.degree_mixing_matrix(G, mapping=mapping) + >>> mix_mat + array([[0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0.5], + [0. , 0. , 0. , 0. ], + [0. , 0.5, 0. , 0. ]]) + """ + d = degree_mixing_dict(G, x=x, y=y, nodes=nodes, weight=weight) + a = dict_to_numpy_array(d, mapping=mapping) + if normalized: + a = a / a.sum() + return a + + +def mixing_dict(xy, normalized=False): + """Returns a dictionary representation of mixing matrix. + + Parameters + ---------- + xy : list or container of two-tuples + Pairs of (x,y) items. + + attribute : string + Node attribute key + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Returns + ------- + d: dictionary + Counts or Joint probability of occurrence of values in xy. + """ + d = {} + psum = 0.0 + for x, y in xy: + if x not in d: + d[x] = {} + if y not in d: + d[y] = {} + v = d[x].get(y, 0) + d[x][y] = v + 1 + psum += 1 + + if normalized: + for _, jdict in d.items(): + for j in jdict: + jdict[j] /= psum + return d diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/neighbor_degree.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/neighbor_degree.py new file mode 100644 index 0000000000000000000000000000000000000000..6488d041a8bdc93ef3591283781b81bcf7f47dab --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/neighbor_degree.py @@ -0,0 +1,160 @@ +import networkx as nx + +__all__ = ["average_neighbor_degree"] + + +@nx._dispatchable(edge_attrs="weight") +def average_neighbor_degree(G, source="out", target="out", nodes=None, weight=None): + r"""Returns the average degree of the neighborhood of each node. + + In an undirected graph, the neighborhood `N(i)` of node `i` contains the + nodes that are connected to `i` by an edge. + + For directed graphs, `N(i)` is defined according to the parameter `source`: + + - if source is 'in', then `N(i)` consists of predecessors of node `i`. + - if source is 'out', then `N(i)` consists of successors of node `i`. + - if source is 'in+out', then `N(i)` is both predecessors and successors. + + The average neighborhood degree of a node `i` is + + .. math:: + + k_{nn,i} = \frac{1}{|N(i)|} \sum_{j \in N(i)} k_j + + where `N(i)` are the neighbors of node `i` and `k_j` is + the degree of node `j` which belongs to `N(i)`. For weighted + graphs, an analogous measure can be defined [1]_, + + .. math:: + + k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j + + where `s_i` is the weighted degree of node `i`, `w_{ij}` + is the weight of the edge that links `i` and `j` and + `N(i)` are the neighbors of node `i`. + + + Parameters + ---------- + G : NetworkX graph + + source : string ("in"|"out"|"in+out"), optional (default="out") + Directed graphs only. + Use "in"- or "out"-neighbors of source node. + + target : string ("in"|"out"|"in+out"), optional (default="out") + Directed graphs only. + Use "in"- or "out"-degree for target node. + + nodes : list or iterable, optional (default=G.nodes) + Compute neighbor degree only for specified nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + d: dict + A dictionary keyed by node to the average degree of its neighbors. + + Raises + ------ + NetworkXError + If either `source` or `target` are not one of 'in', 'out', or 'in+out'. + If either `source` or `target` is passed for an undirected graph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.edges[0, 1]["weight"] = 5 + >>> G.edges[2, 3]["weight"] = 3 + + >>> nx.average_neighbor_degree(G) + {0: 2.0, 1: 1.5, 2: 1.5, 3: 2.0} + >>> nx.average_neighbor_degree(G, weight="weight") + {0: 2.0, 1: 1.1666666666666667, 2: 1.25, 3: 2.0} + + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> nx.average_neighbor_degree(G, source="in", target="in") + {0: 0.0, 1: 0.0, 2: 1.0, 3: 1.0} + + >>> nx.average_neighbor_degree(G, source="out", target="out") + {0: 1.0, 1: 1.0, 2: 0.0, 3: 0.0} + + See Also + -------- + average_degree_connectivity + + References + ---------- + .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani, + "The architecture of complex weighted networks". + PNAS 101 (11): 3747–3752 (2004). + """ + if G.is_directed(): + if source == "in": + source_degree = G.in_degree + elif source == "out": + source_degree = G.out_degree + elif source == "in+out": + source_degree = G.degree + else: + raise nx.NetworkXError( + f"source argument {source} must be 'in', 'out' or 'in+out'" + ) + + if target == "in": + target_degree = G.in_degree + elif target == "out": + target_degree = G.out_degree + elif target == "in+out": + target_degree = G.degree + else: + raise nx.NetworkXError( + f"target argument {target} must be 'in', 'out' or 'in+out'" + ) + else: + if source != "out" or target != "out": + raise nx.NetworkXError( + f"source and target arguments are only supported for directed graphs" + ) + source_degree = target_degree = G.degree + + # precompute target degrees -- should *not* be weighted degree + t_deg = dict(target_degree()) + + # Set up both predecessor and successor neighbor dicts leaving empty if not needed + G_P = G_S = {n: {} for n in G} + if G.is_directed(): + # "in" or "in+out" cases: G_P contains predecessors + if "in" in source: + G_P = G.pred + # "out" or "in+out" cases: G_S contains successors + if "out" in source: + G_S = G.succ + else: + # undirected leave G_P empty but G_S is the adjacency + G_S = G.adj + + # Main loop: Compute average degree of neighbors + avg = {} + for n, deg in source_degree(nodes, weight=weight): + # handle degree zero average + if deg == 0: + avg[n] = 0.0 + continue + + # we sum over both G_P and G_S, but one of the two is usually empty. + if weight is None: + avg[n] = ( + sum(t_deg[nbr] for nbr in G_S[n]) + sum(t_deg[nbr] for nbr in G_P[n]) + ) / deg + else: + avg[n] = ( + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_S[n].items()) + + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_P[n].items()) + ) / deg + return avg diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/pairs.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/pairs.py new file mode 100644 index 0000000000000000000000000000000000000000..5a1d6f8e1df99a0159e030156385df3c1322a73a --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/pairs.py @@ -0,0 +1,118 @@ +"""Generators of x-y pairs of node data.""" +import networkx as nx + +__all__ = ["node_attribute_xy", "node_degree_xy"] + + +@nx._dispatchable(node_attrs="attribute") +def node_attribute_xy(G, attribute, nodes=None): + """Returns iterator of node-attribute pairs for all edges in G. + + Parameters + ---------- + G: NetworkX graph + + attribute: key + The node attribute key. + + nodes: list or iterable (optional) + Use only edges that are incident to specified nodes. + The default is all nodes. + + Returns + ------- + (x, y): 2-tuple + Generates 2-tuple of (attribute, attribute) values. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_node(1, color="red") + >>> G.add_node(2, color="blue") + >>> G.add_edge(1, 2) + >>> list(nx.node_attribute_xy(G, "color")) + [('red', 'blue')] + + Notes + ----- + For undirected graphs each edge is produced twice, once for each edge + representation (u, v) and (v, u), with the exception of self-loop edges + which only appear once. + """ + if nodes is None: + nodes = set(G) + else: + nodes = set(nodes) + Gnodes = G.nodes + for u, nbrsdict in G.adjacency(): + if u not in nodes: + continue + uattr = Gnodes[u].get(attribute, None) + if G.is_multigraph(): + for v, keys in nbrsdict.items(): + vattr = Gnodes[v].get(attribute, None) + for _ in keys: + yield (uattr, vattr) + else: + for v in nbrsdict: + vattr = Gnodes[v].get(attribute, None) + yield (uattr, vattr) + + +@nx._dispatchable(edge_attrs="weight") +def node_degree_xy(G, x="out", y="in", weight=None, nodes=None): + """Generate node degree-degree pairs for edges in G. + + Parameters + ---------- + G: NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Use only edges that are adjacency to specified nodes. + The default is all nodes. + + Returns + ------- + (x, y): 2-tuple + Generates 2-tuple of (degree, degree) values. + + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> list(nx.node_degree_xy(G, x="out", y="in")) + [(1, 1)] + >>> list(nx.node_degree_xy(G, x="in", y="out")) + [(0, 0)] + + Notes + ----- + For undirected graphs each edge is produced twice, once for each edge + representation (u, v) and (v, u), with the exception of self-loop edges + which only appear once. + """ + nodes = set(G) if nodes is None else set(nodes) + if G.is_directed(): + direction = {"out": G.out_degree, "in": G.in_degree} + xdeg = direction[x] + ydeg = direction[y] + else: + xdeg = ydeg = G.degree + + for u, degu in xdeg(nodes, weight=weight): + # use G.edges to treat multigraphs correctly + neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes) + for _, degv in ydeg(neighbors, weight=weight): + yield degu, degv diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__init__.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9285052325e664537a7704a0ebf471ddf0f3aa48 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbe3a973c05ab59fbc249b91fccfc4d3429c513c Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbd5ee9dc1246c6ba2adc81ddc265fa70a742b7b Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31def26b17bce9a3375639e4cd951995d25ee83f Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ec169f3517d1c25ad9c0e1fb438430f0c0344b8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06d633be78155e5c4c826bc4ac6788fa3c1f2e32 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db7053e708f01b82fc36abe5f78189e6d94808c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/base_test.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/base_test.py new file mode 100644 index 0000000000000000000000000000000000000000..46d6300649d3b4658a7263cad04354988b4da312 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/base_test.py @@ -0,0 +1,81 @@ +import networkx as nx + + +class BaseTestAttributeMixing: + @classmethod + def setup_class(cls): + G = nx.Graph() + G.add_nodes_from([0, 1], fish="one") + G.add_nodes_from([2, 3], fish="two") + G.add_nodes_from([4], fish="red") + G.add_nodes_from([5], fish="blue") + G.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.G = G + + D = nx.DiGraph() + D.add_nodes_from([0, 1], fish="one") + D.add_nodes_from([2, 3], fish="two") + D.add_nodes_from([4], fish="red") + D.add_nodes_from([5], fish="blue") + D.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.D = D + + M = nx.MultiGraph() + M.add_nodes_from([0, 1], fish="one") + M.add_nodes_from([2, 3], fish="two") + M.add_nodes_from([4], fish="red") + M.add_nodes_from([5], fish="blue") + M.add_edges_from([(0, 1), (0, 1), (2, 3)]) + cls.M = M + + S = nx.Graph() + S.add_nodes_from([0, 1], fish="one") + S.add_nodes_from([2, 3], fish="two") + S.add_nodes_from([4], fish="red") + S.add_nodes_from([5], fish="blue") + S.add_edge(0, 0) + S.add_edge(2, 2) + cls.S = S + + N = nx.Graph() + N.add_nodes_from([0, 1], margin=-2) + N.add_nodes_from([2, 3], margin=-2) + N.add_nodes_from([4], margin=-3) + N.add_nodes_from([5], margin=-4) + N.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.N = N + + F = nx.Graph() + F.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5) + F.add_edge(0, 2, weight=1) + nx.set_node_attributes(F, dict(F.degree(weight="weight")), "margin") + cls.F = F + + K = nx.Graph() + K.add_nodes_from([1, 2], margin=-1) + K.add_nodes_from([3], margin=1) + K.add_nodes_from([4], margin=2) + K.add_edges_from([(3, 4), (1, 2), (1, 3)]) + cls.K = K + + +class BaseTestDegreeMixing: + @classmethod + def setup_class(cls): + cls.P4 = nx.path_graph(4) + cls.D = nx.DiGraph() + cls.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)]) + cls.D2 = nx.DiGraph() + cls.D2.add_edges_from([(0, 3), (1, 0), (1, 2), (2, 4), (4, 1), (4, 3), (4, 2)]) + cls.M = nx.MultiGraph() + nx.add_path(cls.M, range(4)) + cls.M.add_edge(0, 1) + cls.S = nx.Graph() + cls.S.add_edges_from([(0, 0), (1, 1)]) + cls.W = nx.Graph() + cls.W.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5) + cls.W.add_edge(0, 2, weight=1) + S1 = nx.star_graph(4) + S2 = nx.star_graph(4) + cls.DS = nx.disjoint_union(S1, S2) + cls.DS.add_edge(4, 5) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..21c6287bbe6b0bfc9aa41201b593f342b2d3976e --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py @@ -0,0 +1,143 @@ +from itertools import permutations + +import pytest + +import networkx as nx + + +class TestNeighborConnectivity: + def test_degree_p4(self): + G = nx.path_graph(4) + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.5} + nd = nx.average_degree_connectivity(D) + assert nd == answer + + answer = {1: 2.0, 2: 1.5} + D = G.to_directed() + nd = nx.average_degree_connectivity(D, source="in", target="in") + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity(D, source="in", target="in") + assert nd == answer + + def test_degree_p4_weighted(self): + G = nx.path_graph(4) + G[1][2]["weight"] = 4 + answer = {1: 2.0, 2: 1.8} + nd = nx.average_degree_connectivity(G, weight="weight") + assert nd == answer + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.8} + nd = nx.average_degree_connectivity(D, weight="weight") + assert nd == answer + + answer = {1: 2.0, 2: 1.8} + D = G.to_directed() + nd = nx.average_degree_connectivity( + D, weight="weight", source="in", target="in" + ) + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity( + D, source="in", target="out", weight="weight" + ) + assert nd == answer + + def test_weight_keyword(self): + G = nx.path_graph(4) + G[1][2]["other"] = 4 + answer = {1: 2.0, 2: 1.8} + nd = nx.average_degree_connectivity(G, weight="other") + assert nd == answer + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G, weight=None) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.8} + nd = nx.average_degree_connectivity(D, weight="other") + assert nd == answer + + answer = {1: 2.0, 2: 1.8} + D = G.to_directed() + nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in") + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in") + assert nd == answer + + def test_degree_barrat(self): + G = nx.star_graph(5) + G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)]) + G[0][5]["weight"] = 5 + nd = nx.average_degree_connectivity(G)[5] + assert nd == 1.8 + nd = nx.average_degree_connectivity(G, weight="weight")[5] + assert nd == pytest.approx(3.222222, abs=1e-5) + + def test_zero_deg(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(1, 3) + G.add_edge(1, 4) + c = nx.average_degree_connectivity(G) + assert c == {1: 0, 3: 1} + c = nx.average_degree_connectivity(G, source="in", target="in") + assert c == {0: 0, 1: 0} + c = nx.average_degree_connectivity(G, source="in", target="out") + assert c == {0: 0, 1: 3} + c = nx.average_degree_connectivity(G, source="in", target="in+out") + assert c == {0: 0, 1: 3} + c = nx.average_degree_connectivity(G, source="out", target="out") + assert c == {0: 0, 3: 0} + c = nx.average_degree_connectivity(G, source="out", target="in") + assert c == {0: 0, 3: 1} + c = nx.average_degree_connectivity(G, source="out", target="in+out") + assert c == {0: 0, 3: 1} + + def test_in_out_weight(self): + G = nx.DiGraph() + G.add_edge(1, 2, weight=1) + G.add_edge(1, 3, weight=1) + G.add_edge(3, 1, weight=1) + for s, t in permutations(["in", "out", "in+out"], 2): + c = nx.average_degree_connectivity(G, source=s, target=t) + cw = nx.average_degree_connectivity(G, source=s, target=t, weight="weight") + assert c == cw + + def test_invalid_source(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.average_degree_connectivity(G, source="bogus") + + def test_invalid_target(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.average_degree_connectivity(G, target="bogus") + + def test_invalid_undirected_graph(self): + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.average_degree_connectivity(G, target="bogus") + with pytest.raises(nx.NetworkXError): + nx.average_degree_connectivity(G, source="bogus") + + def test_single_node(self): + # TODO Is this really the intended behavior for providing a + # single node as the argument `nodes`? Shouldn't the function + # just return the connectivity value itself? + G = nx.trivial_graph() + conn = nx.average_degree_connectivity(G, nodes=0) + assert conn == {0: 0} diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py new file mode 100644 index 0000000000000000000000000000000000000000..5203f9449fd022525b97a19cbe78498e33fb09a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py @@ -0,0 +1,123 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +import networkx as nx +from networkx.algorithms.assortativity.correlation import attribute_ac + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + + +class TestDegreeMixingCorrelation(BaseTestDegreeMixing): + def test_degree_assortativity_undirected(self): + r = nx.degree_assortativity_coefficient(self.P4) + np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4) + + def test_degree_assortativity_node_kwargs(self): + G = nx.Graph() + edges = [(0, 1), (0, 3), (1, 2), (1, 3), (1, 4), (5, 9), (9, 0)] + G.add_edges_from(edges) + r = nx.degree_assortativity_coefficient(G, nodes=[1, 2, 4]) + np.testing.assert_almost_equal(r, -1.0, decimal=4) + + def test_degree_assortativity_directed(self): + r = nx.degree_assortativity_coefficient(self.D) + np.testing.assert_almost_equal(r, -0.57735, decimal=4) + + def test_degree_assortativity_directed2(self): + """Test degree assortativity for a directed graph where the set of + in/out degree does not equal the total degree.""" + r = nx.degree_assortativity_coefficient(self.D2) + np.testing.assert_almost_equal(r, 0.14852, decimal=4) + + def test_degree_assortativity_multigraph(self): + r = nx.degree_assortativity_coefficient(self.M) + np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4) + + def test_degree_pearson_assortativity_undirected(self): + r = nx.degree_pearson_correlation_coefficient(self.P4) + np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4) + + def test_degree_pearson_assortativity_directed(self): + r = nx.degree_pearson_correlation_coefficient(self.D) + np.testing.assert_almost_equal(r, -0.57735, decimal=4) + + def test_degree_pearson_assortativity_directed2(self): + """Test degree assortativity with Pearson for a directed graph where + the set of in/out degree does not equal the total degree.""" + r = nx.degree_pearson_correlation_coefficient(self.D2) + np.testing.assert_almost_equal(r, 0.14852, decimal=4) + + def test_degree_pearson_assortativity_multigraph(self): + r = nx.degree_pearson_correlation_coefficient(self.M) + np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4) + + def test_degree_assortativity_weighted(self): + r = nx.degree_assortativity_coefficient(self.W, weight="weight") + np.testing.assert_almost_equal(r, -0.1429, decimal=4) + + def test_degree_assortativity_double_star(self): + r = nx.degree_assortativity_coefficient(self.DS) + np.testing.assert_almost_equal(r, -0.9339, decimal=4) + + +class TestAttributeMixingCorrelation(BaseTestAttributeMixing): + def test_attribute_assortativity_undirected(self): + r = nx.attribute_assortativity_coefficient(self.G, "fish") + assert r == 6.0 / 22.0 + + def test_attribute_assortativity_directed(self): + r = nx.attribute_assortativity_coefficient(self.D, "fish") + assert r == 1.0 / 3.0 + + def test_attribute_assortativity_multigraph(self): + r = nx.attribute_assortativity_coefficient(self.M, "fish") + assert r == 1.0 + + def test_attribute_assortativity_coefficient(self): + # from "Mixing patterns in networks" + # fmt: off + a = np.array([[0.258, 0.016, 0.035, 0.013], + [0.012, 0.157, 0.058, 0.019], + [0.013, 0.023, 0.306, 0.035], + [0.005, 0.007, 0.024, 0.016]]) + # fmt: on + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.623, decimal=3) + + def test_attribute_assortativity_coefficient2(self): + # fmt: off + a = np.array([[0.18, 0.02, 0.01, 0.03], + [0.02, 0.20, 0.03, 0.02], + [0.01, 0.03, 0.16, 0.01], + [0.03, 0.02, 0.01, 0.22]]) + # fmt: on + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.68, decimal=2) + + def test_attribute_assortativity(self): + a = np.array([[50, 50, 0], [50, 50, 0], [0, 0, 2]]) + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.029, decimal=3) + + def test_attribute_assortativity_negative(self): + r = nx.numeric_assortativity_coefficient(self.N, "margin") + np.testing.assert_almost_equal(r, -0.2903, decimal=4) + + def test_assortativity_node_kwargs(self): + G = nx.Graph() + G.add_nodes_from([0, 1], size=2) + G.add_nodes_from([2, 3], size=3) + G.add_edges_from([(0, 1), (2, 3)]) + r = nx.numeric_assortativity_coefficient(G, "size", nodes=[0, 3]) + np.testing.assert_almost_equal(r, 1.0, decimal=4) + + def test_attribute_assortativity_float(self): + r = nx.numeric_assortativity_coefficient(self.F, "margin") + np.testing.assert_almost_equal(r, -0.1429, decimal=4) + + def test_attribute_assortativity_mixed(self): + r = nx.numeric_assortativity_coefficient(self.K, "margin") + np.testing.assert_almost_equal(r, 0.4340, decimal=4) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py new file mode 100644 index 0000000000000000000000000000000000000000..9af09867235b9092837b517ca542e8a85eb602ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py @@ -0,0 +1,176 @@ +import pytest + +np = pytest.importorskip("numpy") + + +import networkx as nx + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + + +class TestDegreeMixingDict(BaseTestDegreeMixing): + def test_degree_mixing_dict_undirected(self): + d = nx.degree_mixing_dict(self.P4) + d_result = {1: {2: 2}, 2: {1: 2, 2: 2}} + assert d == d_result + + def test_degree_mixing_dict_undirected_normalized(self): + d = nx.degree_mixing_dict(self.P4, normalized=True) + d_result = {1: {2: 1.0 / 3}, 2: {1: 1.0 / 3, 2: 1.0 / 3}} + assert d == d_result + + def test_degree_mixing_dict_directed(self): + d = nx.degree_mixing_dict(self.D) + print(d) + d_result = {1: {3: 2}, 2: {1: 1, 3: 1}, 3: {}} + assert d == d_result + + def test_degree_mixing_dict_multigraph(self): + d = nx.degree_mixing_dict(self.M) + d_result = {1: {2: 1}, 2: {1: 1, 3: 3}, 3: {2: 3}} + assert d == d_result + + def test_degree_mixing_dict_weighted(self): + d = nx.degree_mixing_dict(self.W, weight="weight") + d_result = {0.5: {1.5: 1}, 1.5: {1.5: 6, 0.5: 1}} + assert d == d_result + + +class TestDegreeMixingMatrix(BaseTestDegreeMixing): + def test_degree_mixing_matrix_undirected(self): + # fmt: off + a_result = np.array([[0, 2], + [2, 2]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.P4, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.P4) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_directed(self): + # fmt: off + a_result = np.array([[0, 0, 2], + [1, 0, 1], + [0, 0, 0]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.D, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.D) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_multigraph(self): + # fmt: off + a_result = np.array([[0, 1, 0], + [1, 0, 3], + [0, 3, 0]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.M, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.M) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_selfloop(self): + # fmt: off + a_result = np.array([[2]]) + # fmt: on + a = nx.degree_mixing_matrix(self.S, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.S) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_weighted(self): + a_result = np.array([[0.0, 1.0], [1.0, 6.0]]) + a = nx.degree_mixing_matrix(self.W, weight="weight", normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.W, weight="weight") + np.testing.assert_equal(a, a_result / float(a_result.sum())) + + def test_degree_mixing_matrix_mapping(self): + a_result = np.array([[6.0, 1.0], [1.0, 0.0]]) + mapping = {0.5: 1, 1.5: 0} + a = nx.degree_mixing_matrix( + self.W, weight="weight", normalized=False, mapping=mapping + ) + np.testing.assert_equal(a, a_result) + + +class TestAttributeMixingDict(BaseTestAttributeMixing): + def test_attribute_mixing_dict_undirected(self): + d = nx.attribute_mixing_dict(self.G, "fish") + d_result = { + "one": {"one": 2, "red": 1}, + "two": {"two": 2, "blue": 1}, + "red": {"one": 1}, + "blue": {"two": 1}, + } + assert d == d_result + + def test_attribute_mixing_dict_directed(self): + d = nx.attribute_mixing_dict(self.D, "fish") + d_result = { + "one": {"one": 1, "red": 1}, + "two": {"two": 1, "blue": 1}, + "red": {}, + "blue": {}, + } + assert d == d_result + + def test_attribute_mixing_dict_multigraph(self): + d = nx.attribute_mixing_dict(self.M, "fish") + d_result = {"one": {"one": 4}, "two": {"two": 2}} + assert d == d_result + + +class TestAttributeMixingMatrix(BaseTestAttributeMixing): + def test_attribute_mixing_matrix_undirected(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[2, 0, 1, 0], [0, 2, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.G, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.G, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_directed(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.D, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.D, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_multigraph(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[4, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.M, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.M, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_negative(self): + mapping = {-2: 0, -3: 1, -4: 2} + a_result = np.array([[4.0, 1.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]) + a = nx.attribute_mixing_matrix( + self.N, "margin", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.N, "margin", mapping=mapping) + np.testing.assert_equal(a, a_result / float(a_result.sum())) + + def test_attribute_mixing_matrix_float(self): + mapping = {0.5: 1, 1.5: 0} + a_result = np.array([[6.0, 1.0], [1.0, 0.0]]) + a = nx.attribute_mixing_matrix( + self.F, "margin", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.F, "margin", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py new file mode 100644 index 0000000000000000000000000000000000000000..bf1252d532079d4de6de4659943ce008eb9018b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py @@ -0,0 +1,108 @@ +import pytest + +import networkx as nx + + +class TestAverageNeighbor: + def test_degree_p4(self): + G = nx.path_graph(4) + answer = {0: 2, 1: 1.5, 2: 1.5, 3: 2} + nd = nx.average_neighbor_degree(G) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = nx.DiGraph(G.edges(data=True)) + nd = nx.average_neighbor_degree(D) + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "out") + assert nd == {0: 0, 1: 1, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "out", "in") + assert nd == {0: 1, 1: 1, 2: 1, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "in") + assert nd == {0: 0, 1: 0, 2: 1, 3: 1} + + def test_degree_p4_weighted(self): + G = nx.path_graph(4) + G[1][2]["weight"] = 4 + answer = {0: 2, 1: 1.8, 2: 1.8, 3: 2} + nd = nx.average_neighbor_degree(G, weight="weight") + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == answer + + D = nx.DiGraph(G.edges(data=True)) + print(D.edges(data=True)) + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "out", "out", weight="weight") + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "in", weight="weight") + assert nd == {0: 0, 1: 0, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "in", "out", weight="weight") + assert nd == {0: 0, 1: 1, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "out", "in", weight="weight") + assert nd == {0: 1, 1: 1, 2: 1, 3: 0} + nd = nx.average_neighbor_degree(D, source="in+out", weight="weight") + assert nd == {0: 1.0, 1: 1.0, 2: 0.8, 3: 1.0} + nd = nx.average_neighbor_degree(D, target="in+out", weight="weight") + assert nd == {0: 2.0, 1: 2.0, 2: 1.0, 3: 0.0} + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == answer + nd = nx.average_neighbor_degree(D, source="out", target="out", weight="weight") + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, source="in", target="in", weight="weight") + assert nd == answer + + def test_degree_k4(self): + G = nx.complete_graph(4) + answer = {0: 3, 1: 3, 2: 3, 3: 3} + nd = nx.average_neighbor_degree(G) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, source="in", target="in") + assert nd == answer + + def test_degree_k4_nodes(self): + G = nx.complete_graph(4) + answer = {1: 3.0, 2: 3.0} + nd = nx.average_neighbor_degree(G, nodes=[1, 2]) + assert nd == answer + + def test_degree_barrat(self): + G = nx.star_graph(5) + G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)]) + G[0][5]["weight"] = 5 + nd = nx.average_neighbor_degree(G)[5] + assert nd == 1.8 + nd = nx.average_neighbor_degree(G, weight="weight")[5] + assert nd == pytest.approx(3.222222, abs=1e-5) + + def test_error_invalid_source_target(self): + G = nx.path_graph(4) + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "error") + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "in", "error") + G = G.to_directed() + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "error") + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "in", "error") diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py new file mode 100644 index 0000000000000000000000000000000000000000..3984292be84dd7b306066809fb3c50a7cf0424f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py @@ -0,0 +1,87 @@ +import networkx as nx + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + + +class TestAttributeMixingXY(BaseTestAttributeMixing): + def test_node_attribute_xy_undirected(self): + attrxy = sorted(nx.node_attribute_xy(self.G, "fish")) + attrxy_result = sorted( + [ + ("one", "one"), + ("one", "one"), + ("two", "two"), + ("two", "two"), + ("one", "red"), + ("red", "one"), + ("blue", "two"), + ("two", "blue"), + ] + ) + assert attrxy == attrxy_result + + def test_node_attribute_xy_undirected_nodes(self): + attrxy = sorted(nx.node_attribute_xy(self.G, "fish", nodes=["one", "yellow"])) + attrxy_result = sorted([]) + assert attrxy == attrxy_result + + def test_node_attribute_xy_directed(self): + attrxy = sorted(nx.node_attribute_xy(self.D, "fish")) + attrxy_result = sorted( + [("one", "one"), ("two", "two"), ("one", "red"), ("two", "blue")] + ) + assert attrxy == attrxy_result + + def test_node_attribute_xy_multigraph(self): + attrxy = sorted(nx.node_attribute_xy(self.M, "fish")) + attrxy_result = [ + ("one", "one"), + ("one", "one"), + ("one", "one"), + ("one", "one"), + ("two", "two"), + ("two", "two"), + ] + assert attrxy == attrxy_result + + def test_node_attribute_xy_selfloop(self): + attrxy = sorted(nx.node_attribute_xy(self.S, "fish")) + attrxy_result = [("one", "one"), ("two", "two")] + assert attrxy == attrxy_result + + +class TestDegreeMixingXY(BaseTestDegreeMixing): + def test_node_degree_xy_undirected(self): + xy = sorted(nx.node_degree_xy(self.P4)) + xy_result = sorted([(1, 2), (2, 1), (2, 2), (2, 2), (1, 2), (2, 1)]) + assert xy == xy_result + + def test_node_degree_xy_undirected_nodes(self): + xy = sorted(nx.node_degree_xy(self.P4, nodes=[0, 1, -1])) + xy_result = sorted([(1, 2), (2, 1)]) + assert xy == xy_result + + def test_node_degree_xy_directed(self): + xy = sorted(nx.node_degree_xy(self.D)) + xy_result = sorted([(2, 1), (2, 3), (1, 3), (1, 3)]) + assert xy == xy_result + + def test_node_degree_xy_multigraph(self): + xy = sorted(nx.node_degree_xy(self.M)) + xy_result = sorted( + [(2, 3), (2, 3), (3, 2), (3, 2), (2, 3), (3, 2), (1, 2), (2, 1)] + ) + assert xy == xy_result + + def test_node_degree_xy_selfloop(self): + xy = sorted(nx.node_degree_xy(self.S)) + xy_result = sorted([(2, 2), (2, 2)]) + assert xy == xy_result + + def test_node_degree_xy_weighted(self): + G = nx.Graph() + G.add_edge(1, 2, weight=7) + G.add_edge(2, 3, weight=10) + xy = sorted(nx.node_degree_xy(G, weight="weight")) + xy_result = sorted([(7, 17), (17, 10), (17, 7), (10, 17)]) + assert xy == xy_result diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/__init__.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c91a904a13496ecab5a3a6c8caa026970d99a540 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/__init__.py @@ -0,0 +1,20 @@ +from .betweenness import * +from .betweenness_subset import * +from .closeness import * +from .current_flow_betweenness import * +from .current_flow_betweenness_subset import * +from .current_flow_closeness import * +from .degree_alg import * +from .dispersion import * +from .eigenvector import * +from .group import * +from .harmonic import * +from .katz import * +from .load import * +from .percolation import * +from .reaching import * +from .second_order import * +from .subgraph_alg import * +from .trophic import * +from .voterank_alg import * +from .laplacian import * diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/betweenness_subset.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/betweenness_subset.py new file mode 100644 index 0000000000000000000000000000000000000000..7f9967e964c8cad1393bd9fe3e91a3409c69cf63 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/betweenness_subset.py @@ -0,0 +1,274 @@ +"""Betweenness centrality measures for subsets of nodes.""" +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _add_edge_keys, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_dijkstra_path_basic as dijkstra, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_shortest_path_basic as shortest_path, +) + +__all__ = [ + "betweenness_centrality_subset", + "edge_betweenness_centrality_subset", +] + + +@nx._dispatchable(edge_attrs="weight") +def betweenness_centrality_subset(G, sources, targets, normalized=False, weight=None): + r"""Compute betweenness centrality for a subset of nodes. + + .. math:: + + c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $S$ is the set of sources, $T$ is the set of targets, + $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, + and $\sigma(s, t|v)$ is the number of those paths + passing through some node $v$ other than $s, t$. + If $s = t$, $\sigma(s, t) = 1$, + and if $v \in {s, t}$, $\sigma(s, t|v) = 0$ [2]_. + + + Parameters + ---------- + G : graph + A NetworkX graph. + + sources: list of nodes + Nodes to use as sources for shortest paths in betweenness + + targets: list of nodes + Nodes to use as targets for shortest paths in betweenness + + normalized : bool, optional + If True the betweenness values are normalized by $2/((n-1)(n-2))$ + for graphs, and $1/((n-1)(n-2))$ for directed graphs where $n$ + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + edge_betweenness_centrality + load_centrality + + Notes + ----- + The basic algorithm is from [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The normalization might seem a little strange but it is + designed to make betweenness_centrality(G) be the same as + betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()). + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + are easy to count. Undirected paths are tricky: should a path + from "u" to "v" count as 1 undirected path or as 2 directed paths? + + For betweenness_centrality we report the number of undirected + paths when G is undirected. + + For betweenness_centrality_subset the reporting is different. + If the source and target subsets are the same, then we want + to count undirected paths. But if the source and target subsets + differ -- for example, if sources is {0} and targets is {1}, + then we are only counting the paths in one direction. They are + undirected paths but we are counting them in a directed way. + To count them as undirected paths, each should count as half a path. + + References + ---------- + .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + for s in sources: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + b = _accumulate_subset(b, S, P, sigma, s, targets) + b = _rescale(b, len(G), normalized=normalized, directed=G.is_directed()) + return b + + +@nx._dispatchable(edge_attrs="weight") +def edge_betweenness_centrality_subset( + G, sources, targets, normalized=False, weight=None +): + r"""Compute betweenness centrality for edges for a subset of nodes. + + .. math:: + + c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)} + + where $S$ is the set of sources, $T$ is the set of targets, + $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, + and $\sigma(s, t|e)$ is the number of those paths + passing through edge $e$ [2]_. + + Parameters + ---------- + G : graph + A networkx graph. + + sources: list of nodes + Nodes to use as sources for shortest paths in betweenness + + targets: list of nodes + Nodes to use as targets for shortest paths in betweenness + + normalized : bool, optional + If True the betweenness values are normalized by `2/(n(n-1))` + for graphs, and `1/(n(n-1))` for directed graphs where `n` + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + Returns + ------- + edges : dictionary + Dictionary of edges with Betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_load + + Notes + ----- + The basic algorithm is from [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The normalization might seem a little strange but it is the same + as in edge_betweenness_centrality() and is designed to make + edge_betweenness_centrality(G) be the same as + edge_betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()). + + References + ---------- + .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + b.update(dict.fromkeys(G.edges(), 0.0)) # b[e] for e in G.edges() + for s in sources: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + b = _accumulate_edges_subset(b, S, P, sigma, s, targets) + for n in G: # remove nodes to only return edges + del b[n] + b = _rescale_e(b, len(G), normalized=normalized, directed=G.is_directed()) + if G.is_multigraph(): + b = _add_edge_keys(G, b, weight=weight) + return b + + +def _accumulate_subset(betweenness, S, P, sigma, s, targets): + delta = dict.fromkeys(S, 0.0) + target_set = set(targets) - {s} + while S: + w = S.pop() + if w in target_set: + coeff = (delta[w] + 1.0) / sigma[w] + else: + coeff = delta[w] / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _accumulate_edges_subset(betweenness, S, P, sigma, s, targets): + """edge_betweenness_centrality_subset helper.""" + delta = dict.fromkeys(S, 0) + target_set = set(targets) + while S: + w = S.pop() + for v in P[w]: + if w in target_set: + c = (sigma[v] / sigma[w]) * (1.0 + delta[w]) + else: + c = delta[w] / len(P[w]) + if (v, w) not in betweenness: + betweenness[(w, v)] += c + else: + betweenness[(v, w)] += c + delta[v] += c + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _rescale(betweenness, n, normalized, directed=False): + """betweenness_centrality_subset helper.""" + if normalized: + if n <= 2: + scale = None # no normalization b=0 for all nodes + else: + scale = 1.0 / ((n - 1) * (n - 2)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +def _rescale_e(betweenness, n, normalized, directed=False): + """edge_betweenness_centrality_subset helper.""" + if normalized: + if n <= 1: + scale = None # no normalization b=0 for all nodes + else: + scale = 1.0 / (n * (n - 1)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + for v in betweenness: + betweenness[v] *= scale + return betweenness diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/closeness.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/closeness.py new file mode 100644 index 0000000000000000000000000000000000000000..1c1722d4ed4cd5681867a1c738da529db1dece9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/closeness.py @@ -0,0 +1,281 @@ +""" +Closeness centrality measures. +""" +import functools + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils.decorators import not_implemented_for + +__all__ = ["closeness_centrality", "incremental_closeness_centrality"] + + +@nx._dispatchable(edge_attrs="distance") +def closeness_centrality(G, u=None, distance=None, wf_improved=True): + r"""Compute closeness centrality for nodes. + + Closeness centrality [1]_ of a node `u` is the reciprocal of the + average shortest path distance to `u` over all `n-1` reachable nodes. + + .. math:: + + C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + where `d(v, u)` is the shortest-path distance between `v` and `u`, + and `n-1` is the number of nodes reachable from `u`. Notice that the + closeness distance function computes the incoming distance to `u` + for directed graphs. To use outward distance, act on `G.reverse()`. + + Notice that higher values of closeness indicate higher centrality. + + Wasserman and Faust propose an improved formula for graphs with + more than one connected component. The result is "a ratio of the + fraction of actors in the group who are reachable, to the average + distance" from the reachable actors [2]_. You might think this + scale factor is inverted but it is not. As is, nodes from small + components receive a smaller closeness value. Letting `N` denote + the number of nodes in the graph, + + .. math:: + + C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + Parameters + ---------- + G : graph + A NetworkX graph + + u : node, optional + Return only the value for node u + + distance : edge attribute key, optional (default=None) + Use the specified edge attribute as the edge distance in shortest + path calculations. If `None` (the default) all edges have a distance of 1. + Absent edge attributes are assigned a distance of 1. Note that no check + is performed to ensure that edges have the provided attribute. + + wf_improved : bool, optional (default=True) + If True, scale by the fraction of nodes reachable. This gives the + Wasserman and Faust improved formula. For single component graphs + it is the same as the original formula. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with closeness centrality as the value. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.closeness_centrality(G) + {0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75} + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, incremental_closeness_centrality + + Notes + ----- + The closeness centrality is normalized to `(n-1)/(|G|-1)` where + `n` is the number of nodes in the connected part of graph + containing the node. If the graph is not completely connected, + this algorithm computes the closeness centrality for each + connected part separately scaled by that parts size. + + If the 'distance' keyword is set to an edge attribute key then the + shortest-path length will be computed using Dijkstra's algorithm with + that edge attribute as the edge weight. + + The closeness centrality uses *inward* distance to a node, not outward. + If you want to use outword distances apply the function to `G.reverse()` + + In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the + outward distance rather than the inward distance. If you use a 'distance' + keyword and a DiGraph, your results will change between v2.2 and v2.3. + + References + ---------- + .. [1] Linton C. Freeman: Centrality in networks: I. + Conceptual clarification. Social Networks 1:215-239, 1979. + https://doi.org/10.1016/0378-8733(78)90021-7 + .. [2] pg. 201 of Wasserman, S. and Faust, K., + Social Network Analysis: Methods and Applications, 1994, + Cambridge University Press. + """ + if G.is_directed(): + G = G.reverse() # create a reversed graph view + + if distance is not None: + # use Dijkstra's algorithm with specified attribute as edge weight + path_length = functools.partial( + nx.single_source_dijkstra_path_length, weight=distance + ) + else: + path_length = nx.single_source_shortest_path_length + + if u is None: + nodes = G.nodes + else: + nodes = [u] + closeness_dict = {} + for n in nodes: + sp = path_length(G, n) + totsp = sum(sp.values()) + len_G = len(G) + _closeness_centrality = 0.0 + if totsp > 0.0 and len_G > 1: + _closeness_centrality = (len(sp) - 1.0) / totsp + # normalize to number of nodes-1 in connected part + if wf_improved: + s = (len(sp) - 1.0) / (len_G - 1) + _closeness_centrality *= s + closeness_dict[n] = _closeness_centrality + if u is not None: + return closeness_dict[u] + return closeness_dict + + +@not_implemented_for("directed") +@nx._dispatchable(mutates_input=True) +def incremental_closeness_centrality( + G, edge, prev_cc=None, insertion=True, wf_improved=True +): + r"""Incremental closeness centrality for nodes. + + Compute closeness centrality for nodes using level-based work filtering + as described in Incremental Algorithms for Closeness Centrality by Sariyuce et al. + + Level-based work filtering detects unnecessary updates to the closeness + centrality and filters them out. + + --- + From "Incremental Algorithms for Closeness Centrality": + + Theorem 1: Let :math:`G = (V, E)` be a graph and u and v be two vertices in V + such that there is no edge (u, v) in E. Let :math:`G' = (V, E \cup uv)` + Then :math:`cc[s] = cc'[s]` if and only if :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. + + Where :math:`dG(u, v)` denotes the length of the shortest path between + two vertices u, v in a graph G, cc[s] is the closeness centrality for a + vertex s in V, and cc'[s] is the closeness centrality for a + vertex s in V, with the (u, v) edge added. + --- + + We use Theorem 1 to filter out updates when adding or removing an edge. + When adding an edge (u, v), we compute the shortest path lengths from all + other nodes to u and to v before the node is added. When removing an edge, + we compute the shortest path lengths after the edge is removed. Then we + apply Theorem 1 to use previously computed closeness centrality for nodes + where :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. This works only for + undirected, unweighted graphs; the distance argument is not supported. + + Closeness centrality [1]_ of a node `u` is the reciprocal of the + sum of the shortest path distances from `u` to all `n-1` other nodes. + Since the sum of distances depends on the number of nodes in the + graph, closeness is normalized by the sum of minimum possible + distances `n-1`. + + .. math:: + + C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + where `d(v, u)` is the shortest-path distance between `v` and `u`, + and `n` is the number of nodes in the graph. + + Notice that higher values of closeness indicate higher centrality. + + Parameters + ---------- + G : graph + A NetworkX graph + + edge : tuple + The modified edge (u, v) in the graph. + + prev_cc : dictionary + The previous closeness centrality for all nodes in the graph. + + insertion : bool, optional + If True (default) the edge was inserted, otherwise it was deleted from the graph. + + wf_improved : bool, optional (default=True) + If True, scale by the fraction of nodes reachable. This gives the + Wasserman and Faust improved formula. For single component graphs + it is the same as the original formula. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with closeness centrality as the value. + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, closeness_centrality + + Notes + ----- + The closeness centrality is normalized to `(n-1)/(|G|-1)` where + `n` is the number of nodes in the connected part of graph + containing the node. If the graph is not completely connected, + this algorithm computes the closeness centrality for each + connected part separately. + + References + ---------- + .. [1] Freeman, L.C., 1979. Centrality in networks: I. + Conceptual clarification. Social Networks 1, 215--239. + https://doi.org/10.1016/0378-8733(78)90021-7 + .. [2] Sariyuce, A.E. ; Kaya, K. ; Saule, E. ; Catalyiirek, U.V. Incremental + Algorithms for Closeness Centrality. 2013 IEEE International Conference on Big Data + http://sariyuce.com/papers/bigdata13.pdf + """ + if prev_cc is not None and set(prev_cc.keys()) != set(G.nodes()): + raise NetworkXError("prev_cc and G do not have the same nodes") + + # Unpack edge + (u, v) = edge + path_length = nx.single_source_shortest_path_length + + if insertion: + # For edge insertion, we want shortest paths before the edge is inserted + du = path_length(G, u) + dv = path_length(G, v) + + G.add_edge(u, v) + else: + G.remove_edge(u, v) + + # For edge removal, we want shortest paths after the edge is removed + du = path_length(G, u) + dv = path_length(G, v) + + if prev_cc is None: + return nx.closeness_centrality(G) + + nodes = G.nodes() + closeness_dict = {} + for n in nodes: + if n in du and n in dv and abs(du[n] - dv[n]) <= 1: + closeness_dict[n] = prev_cc[n] + else: + sp = path_length(G, n) + totsp = sum(sp.values()) + len_G = len(G) + _closeness_centrality = 0.0 + if totsp > 0.0 and len_G > 1: + _closeness_centrality = (len(sp) - 1.0) / totsp + # normalize to number of nodes-1 in connected part + if wf_improved: + s = (len(sp) - 1.0) / (len_G - 1) + _closeness_centrality *= s + closeness_dict[n] = _closeness_centrality + + # Leave the graph as we found it + if insertion: + G.remove_edge(u, v) + else: + G.add_edge(u, v) + + return closeness_dict diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py new file mode 100644 index 0000000000000000000000000000000000000000..b79a4c801e887d0466348d9c5782ca1a763eee66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py @@ -0,0 +1,341 @@ +"""Current-flow betweenness centrality measures.""" +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import ( + CGInverseLaplacian, + FullInverseLaplacian, + SuperLUInverseLaplacian, + flow_matrix_row, +) +from networkx.utils import ( + not_implemented_for, + py_random_state, + reverse_cuthill_mckee_ordering, +) + +__all__ = [ + "current_flow_betweenness_centrality", + "approximate_current_flow_betweenness_centrality", + "edge_current_flow_betweenness_centrality", +] + + +@not_implemented_for("directed") +@py_random_state(7) +@nx._dispatchable(edge_attrs="weight") +def approximate_current_flow_betweenness_centrality( + G, + normalized=True, + weight=None, + dtype=float, + solver="full", + epsilon=0.5, + kmax=10000, + seed=None, +): + r"""Compute the approximate current-flow betweenness centrality for nodes. + + Approximates the current-flow betweenness centrality within absolute + error of epsilon with high probability [1]_. + + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + epsilon: float + Absolute error tolerance. + + kmax: int + Maximum number of sample node pairs to use for approximation. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + current_flow_betweenness_centrality + + Notes + ----- + The running time is $O((1/\epsilon^2)m{\sqrt k} \log n)$ + and the space required is $O(m)$ for $n$ nodes and $m$ edges. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Ulrik Brandes and Daniel Fleischer: + Centrality Measures Based on Current Flow. + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C = solvername[solver](L, dtype=dtype) # initialize solver + betweenness = dict.fromkeys(H, 0.0) + nb = (n - 1.0) * (n - 2.0) # normalization factor + cstar = n * (n - 1) / nb + l = 1 # parameter in approximation, adjustable + k = l * int(np.ceil((cstar / epsilon) ** 2 * np.log(n))) + if k > kmax: + msg = f"Number random pairs k>kmax ({k}>{kmax}) " + raise nx.NetworkXError(msg, "Increase kmax or epsilon") + cstar2k = cstar / (2 * k) + for _ in range(k): + s, t = pair = seed.sample(range(n), 2) + b = np.zeros(n, dtype=dtype) + b[s] = 1 + b[t] = -1 + p = C.solve(b) + for v in H: + if v in pair: + continue + for nbr in H[v]: + w = H[v][nbr].get(weight, 1.0) + betweenness[v] += float(w * np.abs(p[v] - p[nbr]) * cstar2k) + if normalized: + factor = 1.0 + else: + factor = nb / 2.0 + # remap to original node names and "unnormalize" if required + return {ordering[k]: v * factor for k, v in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def current_flow_betweenness_centrality( + G, normalized=True, weight=None, dtype=float, solver="full" +): + r"""Compute current-flow betweenness centrality for nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + approximate_current_flow_betweenness_centrality + betweenness_centrality + edge_betweenness_centrality + edge_current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + N = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(N)))) + betweenness = dict.fromkeys(H, 0.0) # b[n]=0 for n in H + for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + pos = dict(zip(row.argsort()[::-1], range(N))) + for i in range(N): + betweenness[s] += (i - pos[i]) * row.item(i) + betweenness[t] += (N - i - 1 - pos[i]) * row.item(i) + if normalized: + nb = (N - 1.0) * (N - 2.0) # normalization factor + else: + nb = 2.0 + return {ordering[n]: (b - n) * 2.0 / nb for n, b in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def edge_current_flow_betweenness_centrality( + G, normalized=True, weight=None, dtype=float, solver="full" +): + r"""Compute current-flow betweenness centrality for edges. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (default=float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of edge tuples with betweenness centrality as the value. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraphs. + If the input graph is an instance of DiGraph class, NetworkXError + is raised. + + See Also + -------- + betweenness_centrality + edge_betweenness_centrality + current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + N = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(N)))) + edges = (tuple(sorted((u, v))) for u, v in H.edges()) + betweenness = dict.fromkeys(edges, 0.0) + if normalized: + nb = (N - 1.0) * (N - 2.0) # normalization factor + else: + nb = 2.0 + for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + pos = dict(zip(row.argsort()[::-1], range(1, N + 1))) + for i in range(N): + betweenness[e] += (i + 1 - pos[i]) * row.item(i) + betweenness[e] += (N - i - pos[i]) * row.item(i) + betweenness[e] /= nb + return {(ordering[s], ordering[t]): b for (s, t), b in betweenness.items()} diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/current_flow_closeness.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/current_flow_closeness.py new file mode 100644 index 0000000000000000000000000000000000000000..92c892f74494bcd32ae82943c8c0afb8bc041685 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/current_flow_closeness.py @@ -0,0 +1,95 @@ +"""Current-flow closeness centrality measures.""" +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import ( + CGInverseLaplacian, + FullInverseLaplacian, + SuperLUInverseLaplacian, +) +from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering + +__all__ = ["current_flow_closeness_centrality", "information_centrality"] + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def current_flow_closeness_centrality(G, weight=None, dtype=float, solver="lu"): + """Compute current-flow closeness centrality for nodes. + + Current-flow closeness centrality is variant of closeness + centrality based on effective resistance between nodes in + a network. This metric is also known as information centrality. + + Parameters + ---------- + G : graph + A NetworkX graph. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (default=float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with current flow closeness centrality as the value. + + See Also + -------- + closeness_centrality + + Notes + ----- + The algorithm is from Brandes [1]_. + + See also [2]_ for the original definition of information centrality. + + References + ---------- + .. [1] Ulrik Brandes and Daniel Fleischer, + Centrality Measures Based on Current Flow. + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] Karen Stephenson and Marvin Zelen: + Rethinking centrality: Methods and examples. + Social Networks 11(1):1-37, 1989. + https://doi.org/10.1016/0378-8733(89)90016-6 + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + N = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(N)))) + betweenness = dict.fromkeys(H, 0.0) # b[n]=0 for n in H + N = H.number_of_nodes() + L = nx.laplacian_matrix(H, nodelist=range(N), weight=weight).asformat("csc") + L = L.astype(dtype) + C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver + for v in H: + col = C2.get_row(v) + for w in H: + betweenness[v] += col.item(v) - 2 * col.item(w) + betweenness[w] += col.item(v) + return {ordering[node]: 1 / value for node, value in betweenness.items()} + + +information_centrality = current_flow_closeness_centrality diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/degree_alg.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/degree_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..ea53f41ea3e64112b31c140eadc9353b84663207 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/degree_alg.py @@ -0,0 +1,149 @@ +"""Degree centrality measures.""" +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = ["degree_centrality", "in_degree_centrality", "out_degree_centrality"] + + +@nx._dispatchable +def degree_centrality(G): + """Compute the degree centrality for nodes. + + The degree centrality for a node v is the fraction of nodes it + is connected to. + + Parameters + ---------- + G : graph + A networkx graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with degree centrality as the value. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.degree_centrality(G) + {0: 1.0, 1: 1.0, 2: 0.6666666666666666, 3: 0.6666666666666666} + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return {n: 1 for n in G} + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.degree()} + return centrality + + +@not_implemented_for("undirected") +@nx._dispatchable +def in_degree_centrality(G): + """Compute the in-degree centrality for nodes. + + The in-degree centrality for a node v is the fraction of nodes its + incoming edges are connected to. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with in-degree centrality as values. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.in_degree_centrality(G) + {0: 0.0, 1: 0.3333333333333333, 2: 0.6666666666666666, 3: 0.6666666666666666} + + See Also + -------- + degree_centrality, out_degree_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return {n: 1 for n in G} + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.in_degree()} + return centrality + + +@not_implemented_for("undirected") +@nx._dispatchable +def out_degree_centrality(G): + """Compute the out-degree centrality for nodes. + + The out-degree centrality for a node v is the fraction of nodes its + outgoing edges are connected to. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with out-degree centrality as values. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.out_degree_centrality(G) + {0: 1.0, 1: 0.6666666666666666, 2: 0.0, 3: 0.0} + + See Also + -------- + degree_centrality, in_degree_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return {n: 1 for n in G} + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.out_degree()} + return centrality diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/group.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/group.py new file mode 100644 index 0000000000000000000000000000000000000000..66fd309ff9b66e150f964baf096bf7cc55bc2263 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/group.py @@ -0,0 +1,786 @@ +"""Group centrality measures.""" +from copy import deepcopy + +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _accumulate_endpoints, + _single_source_dijkstra_path_basic, + _single_source_shortest_path_basic, +) +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "group_betweenness_centrality", + "group_closeness_centrality", + "group_degree_centrality", + "group_in_degree_centrality", + "group_out_degree_centrality", + "prominent_group", +] + + +@nx._dispatchable(edge_attrs="weight") +def group_betweenness_centrality(G, C, normalized=True, weight=None, endpoints=False): + r"""Compute the group betweenness centrality for a group of nodes. + + Group betweenness centrality of a group of nodes $C$ is the sum of the + fraction of all-pairs shortest paths that pass through any vertex in $C$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of + those paths passing through some node in group $C$. Note that + $(s, t)$ are not members of the group ($V-C$ is the set of nodes + in $V$ that are not in $C$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + C : list or set or list of lists or list of sets + A group or a list of groups containing nodes which belong to G, for which group betweenness + centrality is to be calculated. + + normalized : bool, optional (default=True) + If True, group betweenness is normalized by `1/((|V|-|C|)(|V|-|C|-1))` + where `|V|` is the number of nodes in G and `|C|` is the number of nodes in C. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + endpoints : bool, optional (default=False) + If True include the endpoints in the shortest path counts. + + Raises + ------ + NodeNotFound + If node(s) in C are not present in G. + + Returns + ------- + betweenness : list of floats or float + If C is a single group then return a float. If C is a list with + several groups then return a list of group betweenness centralities. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. + The initial implementation of the algorithm is mentioned in [2]_. This function uses + an improved algorithm presented in [4]_. + + The number of nodes in the group must be a maximum of n - 2 where `n` + is the total number of nodes in the graph. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + between "u" and "v" are counted as two possible paths (one each + direction) while undirected paths between "u" and "v" are counted + as one path. Said another way, the sum in the expression above is + over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs. + + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] Ulrik Brandes: + On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.9610&rep=rep1&type=pdf + .. [3] Sourav Medya et. al.: + Group Centrality Maximization via Network Design. + SIAM International Conference on Data Mining, SDM 2018, 126–134. + https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf + .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev. + "Fast algorithm for successive computation of group betweenness centrality." + https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709 + + """ + GBC = [] # initialize betweenness + list_of_groups = True + # check weather C contains one or many groups + if any(el in G for el in C): + C = [C] + list_of_groups = False + set_v = {node for group in C for node in group} + if set_v - G.nodes: # element(s) of C not in G + raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are in C but not in G.") + + # pre-processing + PB, sigma, D = _group_preprocessing(G, set_v, weight) + + # the algorithm for each group + for group in C: + group = set(group) # set of nodes in group + # initialize the matrices of the sigma and the PB + GBC_group = 0 + sigma_m = deepcopy(sigma) + PB_m = deepcopy(PB) + sigma_m_v = deepcopy(sigma_m) + PB_m_v = deepcopy(PB_m) + for v in group: + GBC_group += PB_m[v][v] + for x in group: + for y in group: + dxvy = 0 + dxyv = 0 + dvxy = 0 + if not ( + sigma_m[x][y] == 0 or sigma_m[x][v] == 0 or sigma_m[v][y] == 0 + ): + if D[x][v] == D[x][y] + D[y][v]: + dxyv = sigma_m[x][y] * sigma_m[y][v] / sigma_m[x][v] + if D[x][y] == D[x][v] + D[v][y]: + dxvy = sigma_m[x][v] * sigma_m[v][y] / sigma_m[x][y] + if D[v][y] == D[v][x] + D[x][y]: + dvxy = sigma_m[v][x] * sigma[x][y] / sigma[v][y] + sigma_m_v[x][y] = sigma_m[x][y] * (1 - dxvy) + PB_m_v[x][y] = PB_m[x][y] - PB_m[x][y] * dxvy + if y != v: + PB_m_v[x][y] -= PB_m[x][v] * dxyv + if x != v: + PB_m_v[x][y] -= PB_m[v][y] * dvxy + sigma_m, sigma_m_v = sigma_m_v, sigma_m + PB_m, PB_m_v = PB_m_v, PB_m + + # endpoints + v, c = len(G), len(group) + if not endpoints: + scale = 0 + # if the graph is connected then subtract the endpoints from + # the count for all the nodes in the graph. else count how many + # nodes are connected to the group's nodes and subtract that. + if nx.is_directed(G): + if nx.is_strongly_connected(G): + scale = c * (2 * v - c - 1) + elif nx.is_connected(G): + scale = c * (2 * v - c - 1) + if scale == 0: + for group_node1 in group: + for node in D[group_node1]: + if node != group_node1: + if node in group: + scale += 1 + else: + scale += 2 + GBC_group -= scale + + # normalized + if normalized: + scale = 1 / ((v - c) * (v - c - 1)) + GBC_group *= scale + + # If undirected than count only the undirected edges + elif not G.is_directed(): + GBC_group /= 2 + + GBC.append(GBC_group) + if list_of_groups: + return GBC + return GBC[0] + + +def _group_preprocessing(G, set_v, weight): + sigma = {} + delta = {} + D = {} + betweenness = dict.fromkeys(G, 0) + for s in G: + if weight is None: # use BFS + S, P, sigma[s], D[s] = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma[s], D[s] = _single_source_dijkstra_path_basic(G, s, weight) + betweenness, delta[s] = _accumulate_endpoints(betweenness, S, P, sigma[s], s) + for i in delta[s]: # add the paths from s to i and rescale sigma + if s != i: + delta[s][i] += 1 + if weight is not None: + sigma[s][i] = sigma[s][i] / 2 + # building the path betweenness matrix only for nodes that appear in the group + PB = dict.fromkeys(G) + for group_node1 in set_v: + PB[group_node1] = dict.fromkeys(G, 0.0) + for group_node2 in set_v: + if group_node2 not in D[group_node1]: + continue + for node in G: + # if node is connected to the two group nodes than continue + if group_node2 in D[node] and group_node1 in D[node]: + if ( + D[node][group_node2] + == D[node][group_node1] + D[group_node1][group_node2] + ): + PB[group_node1][group_node2] += ( + delta[node][group_node2] + * sigma[node][group_node1] + * sigma[group_node1][group_node2] + / sigma[node][group_node2] + ) + return PB, sigma, D + + +@nx._dispatchable(edge_attrs="weight") +def prominent_group( + G, k, weight=None, C=None, endpoints=False, normalized=True, greedy=False +): + r"""Find the prominent group of size $k$ in graph $G$. The prominence of the + group is evaluated by the group betweenness centrality. + + Group betweenness centrality of a group of nodes $C$ is the sum of the + fraction of all-pairs shortest paths that pass through any vertex in $C$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of + those paths passing through some node in group $C$. Note that + $(s, t)$ are not members of the group ($V-C$ is the set of nodes + in $V$ that are not in $C$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int + The number of nodes in the group. + + normalized : bool, optional (default=True) + If True, group betweenness is normalized by ``1/((|V|-|C|)(|V|-|C|-1))`` + where ``|V|`` is the number of nodes in G and ``|C|`` is the number of + nodes in C. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + endpoints : bool, optional (default=False) + If True include the endpoints in the shortest path counts. + + C : list or set, optional (default=None) + list of nodes which won't be candidates of the prominent group. + + greedy : bool, optional (default=False) + Using a naive greedy algorithm in order to find non-optimal prominent + group. For scale free networks the results are negligibly below the optimal + results. + + Raises + ------ + NodeNotFound + If node(s) in C are not present in G. + + Returns + ------- + max_GBC : float + The group betweenness centrality of the prominent group. + + max_group : list + The list of nodes in the prominent group. + + See Also + -------- + betweenness_centrality, group_betweenness_centrality + + Notes + ----- + Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. + The algorithm is described in [2]_ and is based on techniques mentioned in [4]_. + + The number of nodes in the group must be a maximum of ``n - 2`` where ``n`` + is the total number of nodes in the graph. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + between "u" and "v" are counted as two possible paths (one each + direction) while undirected paths between "u" and "v" are counted + as one path. Said another way, the sum in the expression above is + over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] Rami Puzis, Yuval Elovici, and Shlomi Dolev: + "Finding the Most Prominent Group in Complex Networks" + AI communications 20(4): 287-296, 2007. + https://www.researchgate.net/profile/Rami_Puzis2/publication/220308855 + .. [3] Sourav Medya et. al.: + Group Centrality Maximization via Network Design. + SIAM International Conference on Data Mining, SDM 2018, 126–134. + https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf + .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev. + "Fast algorithm for successive computation of group betweenness centrality." + https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709 + """ + import numpy as np + import pandas as pd + + if C is not None: + C = set(C) + if C - G.nodes: # element(s) of C not in G + raise nx.NodeNotFound(f"The node(s) {C - G.nodes} are in C but not in G.") + nodes = list(G.nodes - C) + else: + nodes = list(G.nodes) + DF_tree = nx.Graph() + DF_tree.__networkx_cache__ = None # Disable caching + PB, sigma, D = _group_preprocessing(G, nodes, weight) + betweenness = pd.DataFrame.from_dict(PB) + if C is not None: + for node in C: + # remove from the betweenness all the nodes not part of the group + betweenness.drop(index=node, inplace=True) + betweenness.drop(columns=node, inplace=True) + CL = [node for _, node in sorted(zip(np.diag(betweenness), nodes), reverse=True)] + max_GBC = 0 + max_group = [] + DF_tree.add_node( + 1, + CL=CL, + betweenness=betweenness, + GBC=0, + GM=[], + sigma=sigma, + cont=dict(zip(nodes, np.diag(betweenness))), + ) + + # the algorithm + DF_tree.nodes[1]["heu"] = 0 + for i in range(k): + DF_tree.nodes[1]["heu"] += DF_tree.nodes[1]["cont"][DF_tree.nodes[1]["CL"][i]] + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, 1, D, max_group, nodes, greedy + ) + + v = len(G) + if not endpoints: + scale = 0 + # if the graph is connected then subtract the endpoints from + # the count for all the nodes in the graph. else count how many + # nodes are connected to the group's nodes and subtract that. + if nx.is_directed(G): + if nx.is_strongly_connected(G): + scale = k * (2 * v - k - 1) + elif nx.is_connected(G): + scale = k * (2 * v - k - 1) + if scale == 0: + for group_node1 in max_group: + for node in D[group_node1]: + if node != group_node1: + if node in max_group: + scale += 1 + else: + scale += 2 + max_GBC -= scale + + # normalized + if normalized: + scale = 1 / ((v - k) * (v - k - 1)) + max_GBC *= scale + + # If undirected then count only the undirected edges + elif not G.is_directed(): + max_GBC /= 2 + max_GBC = float("%.2f" % max_GBC) + return max_GBC, max_group + + +def _dfbnb(G, k, DF_tree, max_GBC, root, D, max_group, nodes, greedy): + # stopping condition - if we found a group of size k and with higher GBC then prune + if len(DF_tree.nodes[root]["GM"]) == k and DF_tree.nodes[root]["GBC"] > max_GBC: + return DF_tree.nodes[root]["GBC"], DF_tree, DF_tree.nodes[root]["GM"] + # stopping condition - if the size of group members equal to k or there are less than + # k - |GM| in the candidate list or the heuristic function plus the GBC is below the + # maximal GBC found then prune + if ( + len(DF_tree.nodes[root]["GM"]) == k + or len(DF_tree.nodes[root]["CL"]) <= k - len(DF_tree.nodes[root]["GM"]) + or DF_tree.nodes[root]["GBC"] + DF_tree.nodes[root]["heu"] <= max_GBC + ): + return max_GBC, DF_tree, max_group + + # finding the heuristic of both children + node_p, node_m, DF_tree = _heuristic(k, root, DF_tree, D, nodes, greedy) + + # finding the child with the bigger heuristic + GBC and expand + # that node first if greedy then only expand the plus node + if greedy: + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + + elif ( + DF_tree.nodes[node_p]["GBC"] + DF_tree.nodes[node_p]["heu"] + > DF_tree.nodes[node_m]["GBC"] + DF_tree.nodes[node_m]["heu"] + ): + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy + ) + else: + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy + ) + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + return max_GBC, DF_tree, max_group + + +def _heuristic(k, root, DF_tree, D, nodes, greedy): + import numpy as np + + # This helper function add two nodes to DF_tree - one left son and the + # other right son, finds their heuristic, CL, GBC, and GM + node_p = DF_tree.number_of_nodes() + 1 + node_m = DF_tree.number_of_nodes() + 2 + added_node = DF_tree.nodes[root]["CL"][0] + + # adding the plus node + DF_tree.add_nodes_from([(node_p, deepcopy(DF_tree.nodes[root]))]) + DF_tree.nodes[node_p]["GM"].append(added_node) + DF_tree.nodes[node_p]["GBC"] += DF_tree.nodes[node_p]["cont"][added_node] + root_node = DF_tree.nodes[root] + for x in nodes: + for y in nodes: + dxvy = 0 + dxyv = 0 + dvxy = 0 + if not ( + root_node["sigma"][x][y] == 0 + or root_node["sigma"][x][added_node] == 0 + or root_node["sigma"][added_node][y] == 0 + ): + if D[x][added_node] == D[x][y] + D[y][added_node]: + dxyv = ( + root_node["sigma"][x][y] + * root_node["sigma"][y][added_node] + / root_node["sigma"][x][added_node] + ) + if D[x][y] == D[x][added_node] + D[added_node][y]: + dxvy = ( + root_node["sigma"][x][added_node] + * root_node["sigma"][added_node][y] + / root_node["sigma"][x][y] + ) + if D[added_node][y] == D[added_node][x] + D[x][y]: + dvxy = ( + root_node["sigma"][added_node][x] + * root_node["sigma"][x][y] + / root_node["sigma"][added_node][y] + ) + DF_tree.nodes[node_p]["sigma"][x][y] = root_node["sigma"][x][y] * (1 - dxvy) + DF_tree.nodes[node_p]["betweenness"].loc[y, x] = ( + root_node["betweenness"][x][y] - root_node["betweenness"][x][y] * dxvy + ) + if y != added_node: + DF_tree.nodes[node_p]["betweenness"].loc[y, x] -= ( + root_node["betweenness"][x][added_node] * dxyv + ) + if x != added_node: + DF_tree.nodes[node_p]["betweenness"].loc[y, x] -= ( + root_node["betweenness"][added_node][y] * dvxy + ) + + DF_tree.nodes[node_p]["CL"] = [ + node + for _, node in sorted( + zip(np.diag(DF_tree.nodes[node_p]["betweenness"]), nodes), reverse=True + ) + if node not in DF_tree.nodes[node_p]["GM"] + ] + DF_tree.nodes[node_p]["cont"] = dict( + zip(nodes, np.diag(DF_tree.nodes[node_p]["betweenness"])) + ) + DF_tree.nodes[node_p]["heu"] = 0 + for i in range(k - len(DF_tree.nodes[node_p]["GM"])): + DF_tree.nodes[node_p]["heu"] += DF_tree.nodes[node_p]["cont"][ + DF_tree.nodes[node_p]["CL"][i] + ] + + # adding the minus node - don't insert the first node in the CL to GM + # Insert minus node only if isn't greedy type algorithm + if not greedy: + DF_tree.add_nodes_from([(node_m, deepcopy(DF_tree.nodes[root]))]) + DF_tree.nodes[node_m]["CL"].pop(0) + DF_tree.nodes[node_m]["cont"].pop(added_node) + DF_tree.nodes[node_m]["heu"] = 0 + for i in range(k - len(DF_tree.nodes[node_m]["GM"])): + DF_tree.nodes[node_m]["heu"] += DF_tree.nodes[node_m]["cont"][ + DF_tree.nodes[node_m]["CL"][i] + ] + else: + node_m = None + + return node_p, node_m, DF_tree + + +@nx._dispatchable(edge_attrs="weight") +def group_closeness_centrality(G, S, weight=None): + r"""Compute the group closeness centrality for a group of nodes. + + Group closeness centrality of a group of nodes $S$ is a measure + of how close the group is to the other nodes in the graph. + + .. math:: + + c_{close}(S) = \frac{|V-S|}{\sum_{v \in V-S} d_{S, v}} + + d_{S, v} = min_{u \in S} (d_{u, v}) + + where $V$ is the set of nodes, $d_{S, v}$ is the distance of + the group $S$ from $v$ defined as above. ($V-S$ is the set of nodes + in $V$ that are not in $S$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group closeness + centrality is to be calculated. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + Raises + ------ + NodeNotFound + If node(s) in S are not present in G. + + Returns + ------- + closeness : float + Group closeness centrality of the group S. + + See Also + -------- + closeness_centrality + + Notes + ----- + The measure was introduced in [1]_. + The formula implemented here is described in [2]_. + + Higher values of closeness indicate greater centrality. + + It is assumed that 1 / 0 is 0 (required in the case of directed graphs, + or when a shortest path length is 0). + + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + For directed graphs, the incoming distance is utilized here. To use the + outward distance, act on `G.reverse()`. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] J. Zhao et. al.: + Measuring and Maximizing Group Closeness Centrality over + Disk Resident Graphs. + WWWConference Proceedings, 2014. 689-694. + https://doi.org/10.1145/2567948.2579356 + """ + if G.is_directed(): + G = G.reverse() # reverse view + closeness = 0 # initialize to 0 + V = set(G) # set of nodes in G + S = set(S) # set of nodes in group S + V_S = V - S # set of nodes in V but not S + shortest_path_lengths = nx.multi_source_dijkstra_path_length(G, S, weight=weight) + # accumulation + for v in V_S: + try: + closeness += shortest_path_lengths[v] + except KeyError: # no path exists + closeness += 0 + try: + closeness = len(V_S) / closeness + except ZeroDivisionError: # 1 / 0 assumed as 0 + closeness = 0 + return closeness + + +@nx._dispatchable +def group_degree_centrality(G, S): + """Compute the group degree centrality for a group of nodes. + + Group degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group degree + centrality is to be calculated. + + Raises + ------ + NetworkXError + If node(s) in S are not in G. + + Returns + ------- + centrality : float + Group degree centrality of the group S. + + See Also + -------- + degree_centrality + group_in_degree_centrality + group_out_degree_centrality + + Notes + ----- + The measure was introduced in [1]_. + + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + """ + centrality = len(set().union(*[set(G.neighbors(i)) for i in S]) - set(S)) + centrality /= len(G.nodes()) - len(S) + return centrality + + +@not_implemented_for("undirected") +@nx._dispatchable +def group_in_degree_centrality(G, S): + """Compute the group in-degree centrality for a group of nodes. + + Group in-degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members by incoming edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group in-degree + centrality is to be calculated. + + Returns + ------- + centrality : float + Group in-degree centrality of the group S. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + NodeNotFound + If node(s) in S are not in G. + + See Also + -------- + degree_centrality + group_degree_centrality + group_out_degree_centrality + + Notes + ----- + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph, + so for group in-degree centrality, the reverse graph is used. + """ + return group_degree_centrality(G.reverse(), S) + + +@not_implemented_for("undirected") +@nx._dispatchable +def group_out_degree_centrality(G, S): + """Compute the group out-degree centrality for a group of nodes. + + Group out-degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members by outgoing edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group in-degree + centrality is to be calculated. + + Returns + ------- + centrality : float + Group out-degree centrality of the group S. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + NodeNotFound + If node(s) in S are not in G. + + See Also + -------- + degree_centrality + group_degree_centrality + group_in_degree_centrality + + Notes + ----- + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph, + so for group out-degree centrality, the graph itself is used. + """ + return group_degree_centrality(G, S) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/katz.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/katz.py new file mode 100644 index 0000000000000000000000000000000000000000..d85ffd2dcd29be69b0362e69b82de0a99bd04965 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/katz.py @@ -0,0 +1,330 @@ +"""Katz centrality.""" +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["katz_centrality", "katz_centrality_numpy"] + + +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def katz_centrality( + G, + alpha=0.1, + beta=1.0, + max_iter=1000, + tol=1.0e-6, + nstart=None, + normalized=True, + weight=None, +): + r"""Compute the Katz centrality for the nodes of the graph G. + + Katz centrality computes the centrality for a node based on the centrality + of its neighbors. It is a generalization of the eigenvector centrality. The + Katz centrality for node $i$ is + + .. math:: + + x_i = \alpha \sum_{j} A_{ij} x_j + \beta, + + where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$. + + The parameter $\beta$ controls the initial centrality and + + .. math:: + + \alpha < \frac{1}{\lambda_{\max}}. + + Katz centrality computes the relative influence of a node within a + network by measuring the number of the immediate neighbors (first + degree nodes) and also all other nodes in the network that connect + to the node under consideration through these immediate neighbors. + + Extra weight can be provided to immediate neighbors through the + parameter $\beta$. Connections made with distant neighbors + are, however, penalized by an attenuation factor $\alpha$ which + should be strictly less than the inverse largest eigenvalue of the + adjacency matrix in order for the Katz centrality to be computed + correctly. More information is provided in [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + alpha : float, optional (default=0.1) + Attenuation factor + + beta : scalar or dictionary, optional (default=1.0) + Weight attributed to the immediate neighborhood. If not a scalar, the + dictionary must have a value for every node. + + max_iter : integer, optional (default=1000) + Maximum number of iterations in power method. + + tol : float, optional (default=1.0e-6) + Error tolerance used to check convergence in power method iteration. + + nstart : dictionary, optional + Starting value of Katz iteration for each node. + + normalized : bool, optional (default=True) + If True normalize the resulting values. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + In this measure the weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Katz centrality as the value. + + Raises + ------ + NetworkXError + If the parameter `beta` is not a scalar but lacks a value for at least + one node + + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + Examples + -------- + >>> import math + >>> G = nx.path_graph(4) + >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix + >>> centrality = nx.katz_centrality(G, 1 / phi - 0.01) + >>> for n, c in sorted(centrality.items()): + ... print(f"{n} {c:.2f}") + 0 0.37 + 1 0.60 + 2 0.60 + 3 0.37 + + See Also + -------- + katz_centrality_numpy + eigenvector_centrality + eigenvector_centrality_numpy + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Katz centrality was introduced by [2]_. + + This algorithm it uses the power method to find the eigenvector + corresponding to the largest eigenvalue of the adjacency matrix of ``G``. + The parameter ``alpha`` should be strictly less than the inverse of largest + eigenvalue of the adjacency matrix for the algorithm to converge. + You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest + eigenvalue of the adjacency matrix. + The iteration will stop after ``max_iter`` iterations or an error tolerance of + ``number_of_nodes(G) * tol`` has been reached. + + For strongly connected graphs, as $\alpha \to 1/\lambda_{\max}$, and $\beta > 0$, + Katz centrality approaches the results for eigenvector centrality. + + For directed graphs this finds "left" eigenvectors which corresponds + to the in-edges in the graph. For out-edges Katz centrality, + first reverse the graph with ``G.reverse()``. + + References + ---------- + .. [1] Mark E. J. Newman: + Networks: An Introduction. + Oxford University Press, USA, 2010, p. 720. + .. [2] Leo Katz: + A New Status Index Derived from Sociometric Index. + Psychometrika 18(1):39–43, 1953 + https://link.springer.com/content/pdf/10.1007/BF02289026.pdf + """ + if len(G) == 0: + return {} + + nnodes = G.number_of_nodes() + + if nstart is None: + # choose starting vector with entries of 0 + x = {n: 0 for n in G} + else: + x = nstart + + try: + b = dict.fromkeys(G, float(beta)) + except (TypeError, ValueError, AttributeError) as err: + b = beta + if set(beta) != set(G): + raise nx.NetworkXError( + "beta dictionary must have a value for every node" + ) from err + + # make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = dict.fromkeys(xlast, 0) + # do the multiplication y^T = Alpha * x^T A + Beta + for n in x: + for nbr in G[n]: + x[nbr] += xlast[n] * G[n][nbr].get(weight, 1) + for n in x: + x[n] = alpha * x[n] + b[n] + + # check convergence + error = sum(abs(x[n] - xlast[n]) for n in x) + if error < nnodes * tol: + if normalized: + # normalize vector + try: + s = 1.0 / math.hypot(*x.values()) + except ZeroDivisionError: + s = 1.0 + else: + s = 1 + for n in x: + x[n] *= s + return x + raise nx.PowerIterationFailedConvergence(max_iter) + + +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None): + r"""Compute the Katz centrality for the graph G. + + Katz centrality computes the centrality for a node based on the centrality + of its neighbors. It is a generalization of the eigenvector centrality. The + Katz centrality for node $i$ is + + .. math:: + + x_i = \alpha \sum_{j} A_{ij} x_j + \beta, + + where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$. + + The parameter $\beta$ controls the initial centrality and + + .. math:: + + \alpha < \frac{1}{\lambda_{\max}}. + + Katz centrality computes the relative influence of a node within a + network by measuring the number of the immediate neighbors (first + degree nodes) and also all other nodes in the network that connect + to the node under consideration through these immediate neighbors. + + Extra weight can be provided to immediate neighbors through the + parameter $\beta$. Connections made with distant neighbors + are, however, penalized by an attenuation factor $\alpha$ which + should be strictly less than the inverse largest eigenvalue of the + adjacency matrix in order for the Katz centrality to be computed + correctly. More information is provided in [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + alpha : float + Attenuation factor + + beta : scalar or dictionary, optional (default=1.0) + Weight attributed to the immediate neighborhood. If not a scalar the + dictionary must have an value for every node. + + normalized : bool + If True normalize the resulting values. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + In this measure the weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Katz centrality as the value. + + Raises + ------ + NetworkXError + If the parameter `beta` is not a scalar but lacks a value for at least + one node + + Examples + -------- + >>> import math + >>> G = nx.path_graph(4) + >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix + >>> centrality = nx.katz_centrality_numpy(G, 1 / phi) + >>> for n, c in sorted(centrality.items()): + ... print(f"{n} {c:.2f}") + 0 0.37 + 1 0.60 + 2 0.60 + 3 0.37 + + See Also + -------- + katz_centrality + eigenvector_centrality_numpy + eigenvector_centrality + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Katz centrality was introduced by [2]_. + + This algorithm uses a direct linear solver to solve the above equation. + The parameter ``alpha`` should be strictly less than the inverse of largest + eigenvalue of the adjacency matrix for there to be a solution. + You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest + eigenvalue of the adjacency matrix. + + For strongly connected graphs, as $\alpha \to 1/\lambda_{\max}$, and $\beta > 0$, + Katz centrality approaches the results for eigenvector centrality. + + For directed graphs this finds "left" eigenvectors which corresponds + to the in-edges in the graph. For out-edges Katz centrality, + first reverse the graph with ``G.reverse()``. + + References + ---------- + .. [1] Mark E. J. Newman: + Networks: An Introduction. + Oxford University Press, USA, 2010, p. 173. + .. [2] Leo Katz: + A New Status Index Derived from Sociometric Index. + Psychometrika 18(1):39–43, 1953 + https://link.springer.com/content/pdf/10.1007/BF02289026.pdf + """ + import numpy as np + + if len(G) == 0: + return {} + try: + nodelist = beta.keys() + if set(nodelist) != set(G): + raise nx.NetworkXError("beta dictionary must have a value for every node") + b = np.array(list(beta.values()), dtype=float) + except AttributeError: + nodelist = list(G) + try: + b = np.ones((len(nodelist), 1)) * beta + except (TypeError, ValueError, AttributeError) as err: + raise nx.NetworkXError("beta must be a number") from err + + A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight).todense().T + n = A.shape[0] + centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b).squeeze() + + # Normalize: rely on truediv to cast to float, then tolist to make Python numbers + norm = np.sign(sum(centrality)) * np.linalg.norm(centrality) if normalized else 1 + return dict(zip(nodelist, (centrality / norm).tolist())) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/second_order.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/second_order.py new file mode 100644 index 0000000000000000000000000000000000000000..35583cd63e55d14c0c389040cbdeab39b27d1bf9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/second_order.py @@ -0,0 +1,141 @@ +"""Copyright (c) 2015 – Thomson Licensing, SAS + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +* Neither the name of Thomson Licensing, or Technicolor, nor the names +of its contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +# Authors: Erwan Le Merrer (erwan.lemerrer@technicolor.com) + +__all__ = ["second_order_centrality"] + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight") +def second_order_centrality(G, weight="weight"): + """Compute the second order centrality for nodes of G. + + The second order centrality of a given node is the standard deviation of + the return times to that node of a perpetual random walk on G: + + Parameters + ---------- + G : graph + A NetworkX connected and undirected graph. + + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + + Returns + ------- + nodes : dictionary + Dictionary keyed by node with second order centrality as the value. + + Examples + -------- + >>> G = nx.star_graph(10) + >>> soc = nx.second_order_centrality(G) + >>> print(sorted(soc.items(), key=lambda x: x[1])[0][0]) # pick first id + 0 + + Raises + ------ + NetworkXException + If the graph G is empty, non connected or has negative weights. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Lower values of second order centrality indicate higher centrality. + + The algorithm is from Kermarrec, Le Merrer, Sericola and Trédan [1]_. + + This code implements the analytical version of the algorithm, i.e., + there is no simulation of a random walk process involved. The random walk + is here unbiased (corresponding to eq 6 of the paper [1]_), thus the + centrality values are the standard deviations for random walk return times + on the transformed input graph G (equal in-degree at each nodes by adding + self-loops). + + Complexity of this implementation, made to run locally on a single machine, + is O(n^3), with n the size of G, which makes it viable only for small + graphs. + + References + ---------- + .. [1] Anne-Marie Kermarrec, Erwan Le Merrer, Bruno Sericola, Gilles Trédan + "Second order centrality: Distributed assessment of nodes criticity in + complex networks", Elsevier Computer Communications 34(5):619-628, 2011. + """ + import numpy as np + + n = len(G) + + if n == 0: + raise nx.NetworkXException("Empty graph.") + if not nx.is_connected(G): + raise nx.NetworkXException("Non connected graph.") + if any(d.get(weight, 0) < 0 for u, v, d in G.edges(data=True)): + raise nx.NetworkXException("Graph has negative edge weights.") + + # balancing G for Metropolis-Hastings random walks + G = nx.DiGraph(G) + in_deg = dict(G.in_degree(weight=weight)) + d_max = max(in_deg.values()) + for i, deg in in_deg.items(): + if deg < d_max: + G.add_edge(i, i, weight=d_max - deg) + + P = nx.to_numpy_array(G) + P /= P.sum(axis=1)[:, np.newaxis] # to transition probability matrix + + def _Qj(P, j): + P = P.copy() + P[:, j] = 0 + return P + + M = np.empty([n, n]) + + for i in range(n): + M[:, i] = np.linalg.solve( + np.identity(n) - _Qj(P, i), np.ones([n, 1])[:, 0] + ) # eq 3 + + return dict( + zip( + G.nodes, + (float(np.sqrt(2 * np.sum(M[:, i]) - n * (n + 1))) for i in range(n)), + ) + ) # eq 6 diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/subgraph_alg.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/subgraph_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..29a284c547c54999daefe05750934a6205adb24e --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/subgraph_alg.py @@ -0,0 +1,339 @@ +""" +Subraph centrality and communicability betweenness. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "subgraph_centrality_exp", + "subgraph_centrality", + "communicability_betweenness_centrality", + "estrada_index", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def subgraph_centrality_exp(G): + r"""Returns the subgraph centrality for each node of G. + + Subgraph centrality of a node `n` is the sum of weighted closed + walks of all lengths starting and ending at node `n`. The weights + decrease with path length. Each closed walk is associated with a + connected subgraph ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + nodes:dictionary + Dictionary of nodes with subgraph centrality as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + subgraph_centrality: + Alternative algorithm of the subgraph centrality for each node of G. + + Notes + ----- + This version of the algorithm exponentiates the adjacency matrix. + + The subgraph centrality of a node `u` in G can be found using + the matrix exponential of the adjacency matrix of G [1]_, + + .. math:: + + SC(u)=(e^A)_{uu} . + + References + ---------- + .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez, + "Subgraph centrality in complex networks", + Physical Review E 71, 056103 (2005). + https://arxiv.org/abs/cond-mat/0504730 + + Examples + -------- + (Example from [1]_) + >>> G = nx.Graph( + ... [ + ... (1, 2), + ... (1, 5), + ... (1, 8), + ... (2, 3), + ... (2, 8), + ... (3, 4), + ... (3, 6), + ... (4, 5), + ... (4, 7), + ... (5, 6), + ... (6, 7), + ... (7, 8), + ... ] + ... ) + >>> sc = nx.subgraph_centrality_exp(G) + >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)]) + ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] + """ + # alternative implementation that calculates the matrix exponential + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + expA = sp.linalg.expm(A) + # convert diagonal to dictionary keyed by node + sc = dict(zip(nodelist, map(float, expA.diagonal()))) + return sc + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def subgraph_centrality(G): + r"""Returns subgraph centrality for each node in G. + + Subgraph centrality of a node `n` is the sum of weighted closed + walks of all lengths starting and ending at node `n`. The weights + decrease with path length. Each closed walk is associated with a + connected subgraph ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with subgraph centrality as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + subgraph_centrality_exp: + Alternative algorithm of the subgraph centrality for each node of G. + + Notes + ----- + This version of the algorithm computes eigenvalues and eigenvectors + of the adjacency matrix. + + Subgraph centrality of a node `u` in G can be found using + a spectral decomposition of the adjacency matrix [1]_, + + .. math:: + + SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}}, + + where `v_j` is an eigenvector of the adjacency matrix `A` of G + corresponding to the eigenvalue `\lambda_j`. + + Examples + -------- + (Example from [1]_) + >>> G = nx.Graph( + ... [ + ... (1, 2), + ... (1, 5), + ... (1, 8), + ... (2, 3), + ... (2, 8), + ... (3, 4), + ... (3, 6), + ... (4, 5), + ... (4, 7), + ... (5, 6), + ... (6, 7), + ... (7, 8), + ... ] + ... ) + >>> sc = nx.subgraph_centrality(G) + >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)]) + ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] + + References + ---------- + .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez, + "Subgraph centrality in complex networks", + Physical Review E 71, 056103 (2005). + https://arxiv.org/abs/cond-mat/0504730 + + """ + import numpy as np + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[np.nonzero(A)] = 1 + w, v = np.linalg.eigh(A) + vsquare = np.array(v) ** 2 + expw = np.exp(w) + xg = vsquare @ expw + # convert vector dictionary keyed by node + sc = dict(zip(nodelist, map(float, xg))) + return sc + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def communicability_betweenness_centrality(G): + r"""Returns subgraph communicability for all pairs of nodes in G. + + Communicability betweenness measure makes use of the number of walks + connecting every pair of nodes as the basis of a betweenness centrality + measure. + + Parameters + ---------- + G: graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with communicability betweenness as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + Notes + ----- + Let `G=(V,E)` be a simple undirected graph with `n` nodes and `m` edges, + and `A` denote the adjacency matrix of `G`. + + Let `G(r)=(V,E(r))` be the graph resulting from + removing all edges connected to node `r` but not the node itself. + + The adjacency matrix for `G(r)` is `A+E(r)`, where `E(r)` has nonzeros + only in row and column `r`. + + The subraph betweenness of a node `r` is [1]_ + + .. math:: + + \omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}}, + p\neq q, q\neq r, + + where + `G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}` is the number of walks + involving node r, + `G_{pq}=(e^{A})_{pq}` is the number of closed walks starting + at node `p` and ending at node `q`, + and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the + number of terms in the sum. + + The resulting `\omega_{r}` takes values between zero and one. + The lower bound cannot be attained for a connected + graph, and the upper bound is attained in the star graph. + + References + ---------- + .. [1] Ernesto Estrada, Desmond J. Higham, Naomichi Hatano, + "Communicability Betweenness in Complex Networks" + Physica A 388 (2009) 764-774. + https://arxiv.org/abs/0905.4102 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> cbc = nx.communicability_betweenness_centrality(G) + >>> print([f"{node} {cbc[node]:0.2f}" for node in sorted(cbc)]) + ['0 0.03', '1 0.45', '2 0.51', '3 0.45', '4 0.40', '5 0.19', '6 0.03'] + """ + import numpy as np + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + n = len(nodelist) + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[np.nonzero(A)] = 1 + expA = sp.linalg.expm(A) + mapping = dict(zip(nodelist, range(n))) + cbc = {} + for v in G: + # remove row and col of node v + i = mapping[v] + row = A[i, :].copy() + col = A[:, i].copy() + A[i, :] = 0 + A[:, i] = 0 + B = (expA - sp.linalg.expm(A)) / expA + # sum with row/col of node v and diag set to zero + B[i, :] = 0 + B[:, i] = 0 + B -= np.diag(np.diag(B)) + cbc[v] = float(B.sum()) + # put row and col back + A[i, :] = row + A[:, i] = col + # rescale when more than two nodes + order = len(cbc) + if order > 2: + scale = 1.0 / ((order - 1.0) ** 2 - (order - 1.0)) + cbc = {node: value * scale for node, value in cbc.items()} + return cbc + + +@nx._dispatchable +def estrada_index(G): + r"""Returns the Estrada index of a the graph G. + + The Estrada Index is a topological index of folding or 3D "compactness" ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + estrada index: float + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + Notes + ----- + Let `G=(V,E)` be a simple undirected graph with `n` nodes and let + `\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}` + be a non-increasing ordering of the eigenvalues of its adjacency + matrix `A`. The Estrada index is ([1]_, [2]_) + + .. math:: + EE(G)=\sum_{j=1}^n e^{\lambda _j}. + + References + ---------- + .. [1] E. Estrada, "Characterization of 3D molecular structure", + Chem. Phys. Lett. 319, 713 (2000). + https://doi.org/10.1016/S0009-2614(00)00158-5 + .. [2] José Antonio de la Peñaa, Ivan Gutman, Juan Rada, + "Estimating the Estrada index", + Linear Algebra and its Applications. 427, 1 (2007). + https://doi.org/10.1016/j.laa.2007.06.020 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> ei = nx.estrada_index(G) + >>> print(f"{ei:0.5}") + 20.55 + """ + return sum(subgraph_centrality(G).values()) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/__init__.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..4e3d4385c9b266975140d49b739d09fbd449d8a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py @@ -0,0 +1,197 @@ +import pytest + +import networkx as nx +from networkx import approximate_current_flow_betweenness_centrality as approximate_cfbc +from networkx import edge_current_flow_betweenness_centrality as edge_current_flow + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +class TestFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + G.add_edge(0, 1, weight=0.5, other=0.3) + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + wb_answer = {0: 0.2222222, 1: 0.2222222, 2: 0.30555555, 3: 0.30555555} + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="weight") + for n in sorted(G): + assert b[n] == pytest.approx(wb_answer[n], abs=1e-7) + wb_answer = {0: 0.2051282, 1: 0.2051282, 2: 0.33974358, 3: 0.33974358} + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="other") + for n in sorted(G): + assert b[n] == pytest.approx(wb_answer[n], abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + for solver in ["full", "lu", "cg"]: + b = nx.current_flow_betweenness_centrality( + G, normalized=False, solver=solver + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4_normalized(self): + """Betweenness centrality: P4 normalized""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {0: 0, 1: 2.0 / 3, 2: 2.0 / 3, 3: 0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=False) + b_answer = {0: 0, 1: 2, 2: 2, 3: 0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Betweenness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {"a": 1.0, "b": 0.0, "c": 0.0, "d": 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_solvers2(self): + """Betweenness centrality: alternate solvers""" + G = nx.complete_graph(4) + for solver in ["full", "lu", "cg"]: + b = nx.current_flow_betweenness_centrality( + G, normalized=False, solver=solver + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestApproximateFlowBetweennessCentrality: + def test_K4_normalized(self): + "Approximate current-flow betweenness centrality: K4 normalized" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_K4(self): + "Approximate current-flow betweenness centrality: K4" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=False) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=False, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon * len(G) ** 2) + + def test_star(self): + "Approximate current-flow betweenness centrality: star" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_grid(self): + "Approximate current-flow betweenness centrality: 2d grid" + G = nx.grid_2d_graph(4, 4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_seed(self): + G = nx.complete_graph(4) + b = approximate_cfbc(G, normalized=False, epsilon=0.05, seed=1) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + np.testing.assert_allclose(b[n], b_answer[n], atol=0.1) + + def test_solvers(self): + "Approximate current-flow betweenness centrality: solvers" + G = nx.complete_graph(4) + epsilon = 0.1 + for solver in ["full", "lu", "cg"]: + b = approximate_cfbc( + G, normalized=False, solver=solver, epsilon=0.5 * epsilon + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + np.testing.assert_allclose(b[n], b_answer[n], atol=epsilon) + + def test_lower_kmax(self): + G = nx.complete_graph(4) + with pytest.raises(nx.NetworkXError, match="Increase kmax or epsilon"): + nx.approximate_current_flow_betweenness_centrality(G, kmax=4) + + +class TestWeightedFlowBetweennessCentrality: + pass + + +class TestEdgeFlowBetweennessCentrality: + def test_K4(self): + """Edge flow betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow(G, normalized=True) + b_answer = dict.fromkeys(G.edges(), 0.25) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_K4_normalized(self): + """Edge flow betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = dict.fromkeys(G.edges(), 0.75) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_C4(self): + """Edge flow betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = {(0, 1): 1.25, (0, 3): 1.25, (1, 2): 1.25, (2, 3): 1.25} + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = {(0, 1): 1.5, (1, 2): 2.0, (2, 3): 1.5} + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + +@pytest.mark.parametrize( + "centrality_func", + ( + nx.current_flow_betweenness_centrality, + nx.edge_current_flow_betweenness_centrality, + nx.approximate_current_flow_betweenness_centrality, + ), +) +def test_unconnected_graphs_betweenness_centrality(centrality_func): + G = nx.Graph([(1, 2), (3, 4)]) + G.add_node(5) + with pytest.raises(nx.NetworkXError, match="Graph not connected"): + centrality_func(G) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py new file mode 100644 index 0000000000000000000000000000000000000000..05de1c43659a44f2dbf45368bf2ee552dd61dd78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py @@ -0,0 +1,73 @@ +import networkx as nx + + +def small_ego_G(): + """The sample network from https://arxiv.org/pdf/1310.6753v1.pdf""" + edges = [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), + ("b", "e"), + ("b", "f"), + ("c", "d"), + ("c", "f"), + ("c", "h"), + ("d", "f"), + ("e", "f"), + ("f", "h"), + ("h", "j"), + ("h", "k"), + ("i", "j"), + ("i", "k"), + ("j", "k"), + ("u", "a"), + ("u", "b"), + ("u", "c"), + ("u", "d"), + ("u", "e"), + ("u", "f"), + ("u", "g"), + ("u", "h"), + ("u", "i"), + ("u", "j"), + ("u", "k"), + ] + G = nx.Graph() + G.add_edges_from(edges) + + return G + + +class TestDispersion: + def test_article(self): + """our algorithm matches article's""" + G = small_ego_G() + disp_uh = nx.dispersion(G, "u", "h", normalized=False) + disp_ub = nx.dispersion(G, "u", "b", normalized=False) + assert disp_uh == 4 + assert disp_ub == 1 + + def test_results_length(self): + """there is a result for every node""" + G = small_ego_G() + disp = nx.dispersion(G) + disp_Gu = nx.dispersion(G, "u") + disp_uv = nx.dispersion(G, "u", "h") + assert len(disp) == len(G) + assert len(disp_Gu) == len(G) - 1 + assert isinstance(disp_uv, float) + + def test_dispersion_v_only(self): + G = small_ego_G() + disp_G_h = nx.dispersion(G, v="h", normalized=False) + disp_G_h_normalized = nx.dispersion(G, v="h", normalized=True) + assert disp_G_h == {"c": 0, "f": 0, "j": 0, "k": 0, "u": 4} + assert disp_G_h_normalized == {"c": 0.0, "f": 0.0, "j": 0.0, "k": 0.0, "u": 1.0} + + def test_impossible_things(self): + G = nx.karate_club_graph() + disp = nx.dispersion(G) + for u in disp: + for v in disp[u]: + assert disp[u][v] >= 0 diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_group.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_group.py new file mode 100644 index 0000000000000000000000000000000000000000..3f5559dcd73a268c28b513678b1fe3dd058220cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_group.py @@ -0,0 +1,278 @@ +""" +Tests for Group Centrality Measures +""" + + +import pytest + +import networkx as nx + + +class TestGroupBetweennessCentrality: + def test_group_betweenness_single_node(self): + """ + Group betweenness centrality for single node group + """ + G = nx.path_graph(5) + C = [1] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=False, endpoints=False + ) + b_answer = 3.0 + assert b == b_answer + + def test_group_betweenness_with_endpoints(self): + """ + Group betweenness centrality for single node group + """ + G = nx.path_graph(5) + C = [1] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=False, endpoints=True + ) + b_answer = 7.0 + assert b == b_answer + + def test_group_betweenness_normalized(self): + """ + Group betweenness centrality for group with more than + 1 node and normalized + """ + G = nx.path_graph(5) + C = [1, 3] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=True, endpoints=False + ) + b_answer = 1.0 + assert b == b_answer + + def test_two_group_betweenness_value_zero(self): + """ + Group betweenness centrality value of 0 + """ + G = nx.cycle_graph(7) + C = [[0, 1, 6], [0, 1, 5]] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = [0.0, 3.0] + assert b == b_answer + + def test_group_betweenness_value_zero(self): + """ + Group betweenness centrality value of 0 + """ + G = nx.cycle_graph(6) + C = [0, 1, 5] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = 0.0 + assert b == b_answer + + def test_group_betweenness_disconnected_graph(self): + """ + Group betweenness centrality in a disconnected graph + """ + G = nx.path_graph(5) + G.remove_edge(0, 1) + C = [1] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = 0.0 + assert b == b_answer + + def test_group_betweenness_node_not_in_graph(self): + """ + Node(s) in C not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.group_betweenness_centrality(nx.path_graph(5), [4, 7, 8]) + + def test_group_betweenness_directed_weighted(self): + """ + Group betweenness centrality in a directed and weighted graph + """ + G = nx.DiGraph() + G.add_edge(1, 0, weight=1) + G.add_edge(0, 2, weight=2) + G.add_edge(1, 2, weight=3) + G.add_edge(3, 1, weight=4) + G.add_edge(2, 3, weight=1) + G.add_edge(4, 3, weight=6) + G.add_edge(2, 4, weight=7) + C = [1, 2] + b = nx.group_betweenness_centrality(G, C, weight="weight", normalized=False) + b_answer = 5.0 + assert b == b_answer + + +class TestProminentGroup: + np = pytest.importorskip("numpy") + pd = pytest.importorskip("pandas") + + def test_prominent_group_single_node(self): + """ + Prominent group for single node + """ + G = nx.path_graph(5) + k = 1 + b, g = nx.prominent_group(G, k, normalized=False, endpoints=False) + b_answer, g_answer = 4.0, [2] + assert b == b_answer and g == g_answer + + def test_prominent_group_with_c(self): + """ + Prominent group without some nodes + """ + G = nx.path_graph(5) + k = 1 + b, g = nx.prominent_group(G, k, normalized=False, C=[2]) + b_answer, g_answer = 3.0, [1] + assert b == b_answer and g == g_answer + + def test_prominent_group_normalized_endpoints(self): + """ + Prominent group with normalized result, with endpoints + """ + G = nx.cycle_graph(7) + k = 2 + b, g = nx.prominent_group(G, k, normalized=True, endpoints=True) + b_answer, g_answer = 1.7, [2, 5] + assert b == b_answer and g == g_answer + + def test_prominent_group_disconnected_graph(self): + """ + Prominent group of disconnected graph + """ + G = nx.path_graph(6) + G.remove_edge(0, 1) + k = 1 + b, g = nx.prominent_group(G, k, weight=None, normalized=False) + b_answer, g_answer = 4.0, [3] + assert b == b_answer and g == g_answer + + def test_prominent_group_node_not_in_graph(self): + """ + Node(s) in C not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.prominent_group(nx.path_graph(5), 1, C=[10]) + + def test_group_betweenness_directed_weighted(self): + """ + Group betweenness centrality in a directed and weighted graph + """ + G = nx.DiGraph() + G.add_edge(1, 0, weight=1) + G.add_edge(0, 2, weight=2) + G.add_edge(1, 2, weight=3) + G.add_edge(3, 1, weight=4) + G.add_edge(2, 3, weight=1) + G.add_edge(4, 3, weight=6) + G.add_edge(2, 4, weight=7) + k = 2 + b, g = nx.prominent_group(G, k, weight="weight", normalized=False) + b_answer, g_answer = 5.0, [1, 2] + assert b == b_answer and g == g_answer + + def test_prominent_group_greedy_algorithm(self): + """ + Group betweenness centrality in a greedy algorithm + """ + G = nx.cycle_graph(7) + k = 2 + b, g = nx.prominent_group(G, k, normalized=True, endpoints=True, greedy=True) + b_answer, g_answer = 1.7, [6, 3] + assert b == b_answer and g == g_answer + + +class TestGroupClosenessCentrality: + def test_group_closeness_single_node(self): + """ + Group closeness centrality for a single node group + """ + G = nx.path_graph(5) + c = nx.group_closeness_centrality(G, [1]) + c_answer = nx.closeness_centrality(G, 1) + assert c == c_answer + + def test_group_closeness_disconnected(self): + """ + Group closeness centrality for a disconnected graph + """ + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + c = nx.group_closeness_centrality(G, [1, 2]) + c_answer = 0 + assert c == c_answer + + def test_group_closeness_multiple_node(self): + """ + Group closeness centrality for a group with more than + 1 node + """ + G = nx.path_graph(4) + c = nx.group_closeness_centrality(G, [1, 2]) + c_answer = 1 + assert c == c_answer + + def test_group_closeness_node_not_in_graph(self): + """ + Node(s) in S not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.group_closeness_centrality(nx.path_graph(5), [6, 7, 8]) + + +class TestGroupDegreeCentrality: + def test_group_degree_centrality_single_node(self): + """ + Group degree centrality for a single node group + """ + G = nx.path_graph(4) + d = nx.group_degree_centrality(G, [1]) + d_answer = nx.degree_centrality(G)[1] + assert d == d_answer + + def test_group_degree_centrality_multiple_node(self): + """ + Group degree centrality for group with more than + 1 node + """ + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_degree_centrality(G, [1, 2]) + d_answer = 1 + assert d == d_answer + + def test_group_in_degree_centrality(self): + """ + Group in-degree centrality in a DiGraph + """ + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_in_degree_centrality(G, [1, 2]) + d_answer = 0 + assert d == d_answer + + def test_group_out_degree_centrality(self): + """ + Group out-degree centrality in a DiGraph + """ + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_out_degree_centrality(G, [1, 2]) + d_answer = 1 + assert d == d_answer + + def test_group_degree_centrality_node_not_in_graph(self): + """ + Node(s) in S not in graph, raises NetworkXError + """ + with pytest.raises(nx.NetworkXError): + nx.group_degree_centrality(nx.path_graph(5), [6, 7, 8]) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..21aa28b0b7c155078ab9c1a25e14d9aafa65683d --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py @@ -0,0 +1,221 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") + + +def test_laplacian_centrality_null_graph(): + G = nx.Graph() + with pytest.raises(nx.NetworkXPointlessConcept): + d = nx.laplacian_centrality(G, normalized=False) + + +def test_laplacian_centrality_single_node(): + """See gh-6571""" + G = nx.empty_graph(1) + assert nx.laplacian_centrality(G, normalized=False) == {0: 0} + with pytest.raises(ZeroDivisionError): + nx.laplacian_centrality(G, normalized=True) + + +def test_laplacian_centrality_unconnected_nodes(): + """laplacian_centrality on a unconnected node graph should return 0 + + For graphs without edges, the Laplacian energy is 0 and is unchanged with + node removal, so:: + + LC(v) = LE(G) - LE(G - v) = 0 - 0 = 0 + """ + G = nx.empty_graph(3) + assert nx.laplacian_centrality(G, normalized=False) == {0: 0, 1: 0, 2: 0} + + +def test_laplacian_centrality_empty_graph(): + G = nx.empty_graph(3) + with pytest.raises(ZeroDivisionError): + d = nx.laplacian_centrality(G, normalized=True) + + +def test_laplacian_centrality_E(): + E = nx.Graph() + E.add_weighted_edges_from( + [(0, 1, 4), (4, 5, 1), (0, 2, 2), (2, 1, 1), (1, 3, 2), (1, 4, 2)] + ) + d = nx.laplacian_centrality(E) + exact = { + 0: 0.700000, + 1: 0.900000, + 2: 0.280000, + 3: 0.220000, + 4: 0.260000, + 5: 0.040000, + } + + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 200 + dnn = nx.laplacian_centrality(E, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-7) + + # Check unweighted not-normalized version + duw_nn = nx.laplacian_centrality(E, normalized=False, weight=None) + print(duw_nn) + exact_uw_nn = { + 0: 18, + 1: 34, + 2: 18, + 3: 10, + 4: 16, + 5: 6, + } + for n, dc in duw_nn.items(): + assert exact_uw_nn[n] == pytest.approx(dc, abs=1e-7) + + # Check unweighted version + duw = nx.laplacian_centrality(E, weight=None) + full_energy = 42 + for n, dc in duw.items(): + assert exact_uw_nn[n] / full_energy == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_KC(): + KC = nx.karate_club_graph() + d = nx.laplacian_centrality(KC) + exact = { + 0: 0.2543593, + 1: 0.1724524, + 2: 0.2166053, + 3: 0.0964646, + 4: 0.0350344, + 5: 0.0571109, + 6: 0.0540713, + 7: 0.0788674, + 8: 0.1222204, + 9: 0.0217565, + 10: 0.0308751, + 11: 0.0215965, + 12: 0.0174372, + 13: 0.118861, + 14: 0.0366341, + 15: 0.0548712, + 16: 0.0172772, + 17: 0.0191969, + 18: 0.0225564, + 19: 0.0331147, + 20: 0.0279955, + 21: 0.0246361, + 22: 0.0382339, + 23: 0.1294193, + 24: 0.0227164, + 25: 0.0644697, + 26: 0.0281555, + 27: 0.075188, + 28: 0.0364742, + 29: 0.0707087, + 30: 0.0708687, + 31: 0.131019, + 32: 0.2370821, + 33: 0.3066709, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 12502 + dnn = nx.laplacian_centrality(KC, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-3) + + +def test_laplacian_centrality_K(): + K = nx.krackhardt_kite_graph() + d = nx.laplacian_centrality(K) + exact = { + 0: 0.3010753, + 1: 0.3010753, + 2: 0.2258065, + 3: 0.483871, + 4: 0.2258065, + 5: 0.3870968, + 6: 0.3870968, + 7: 0.1935484, + 8: 0.0752688, + 9: 0.0322581, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 186 + dnn = nx.laplacian_centrality(K, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-3) + + +def test_laplacian_centrality_P3(): + P3 = nx.path_graph(3) + d = nx.laplacian_centrality(P3) + exact = {0: 0.6, 1: 1.0, 2: 0.6} + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_K5(): + K5 = nx.complete_graph(5) + d = nx.laplacian_centrality(K5) + exact = {0: 0.52, 1: 0.52, 2: 0.52, 3: 0.52, 4: 0.52} + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_FF(): + FF = nx.florentine_families_graph() + d = nx.laplacian_centrality(FF) + exact = { + "Acciaiuoli": 0.0804598, + "Medici": 0.4022989, + "Castellani": 0.1724138, + "Peruzzi": 0.183908, + "Strozzi": 0.2528736, + "Barbadori": 0.137931, + "Ridolfi": 0.2183908, + "Tornabuoni": 0.2183908, + "Albizzi": 0.1954023, + "Salviati": 0.1149425, + "Pazzi": 0.0344828, + "Bischeri": 0.1954023, + "Guadagni": 0.2298851, + "Ginori": 0.045977, + "Lamberteschi": 0.0574713, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_DG(): + DG = nx.DiGraph([(0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (5, 6), (5, 7), (5, 8)]) + d = nx.laplacian_centrality(DG) + exact = { + 0: 0.2123352, + 5: 0.515391, + 1: 0.2123352, + 2: 0.2123352, + 3: 0.2123352, + 4: 0.2123352, + 6: 0.2952031, + 7: 0.2952031, + 8: 0.2952031, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 9.50704 + dnn = nx.laplacian_centrality(DG, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-4) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_reaching.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_reaching.py new file mode 100644 index 0000000000000000000000000000000000000000..02ad8322cb6cf2a550afec0980ead8537abd55d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_reaching.py @@ -0,0 +1,117 @@ +"""Unit tests for the :mod:`networkx.algorithms.centrality.reaching` module.""" +import pytest + +import networkx as nx + + +class TestGlobalReachingCentrality: + """Unit tests for the global reaching centrality function.""" + + def test_non_positive_weights(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.global_reaching_centrality(G, weight="weight") + + def test_negatively_weighted(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)]) + nx.global_reaching_centrality(G, weight="weight") + + def test_directed_star(self): + G = nx.DiGraph() + G.add_weighted_edges_from([(1, 2, 0.5), (1, 3, 0.5)]) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight="weight") == 0.5 + assert grc(G) == 1 + + def test_undirected_unweighted_star(self): + G = nx.star_graph(2) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight=None) == 0.25 + + def test_undirected_weighted_star(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight="weight") == 0.375 + + def test_cycle_directed_unweighted(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 1) + assert nx.global_reaching_centrality(G, weight=None) == 0 + + def test_cycle_undirected_unweighted(self): + G = nx.Graph() + G.add_edge(1, 2) + assert nx.global_reaching_centrality(G, weight=None) == 0 + + def test_cycle_directed_weighted(self): + G = nx.DiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 1, 1)]) + assert nx.global_reaching_centrality(G) == 0 + + def test_cycle_undirected_weighted(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False) == 0 + + def test_directed_weighted(self): + G = nx.DiGraph() + G.add_edge("A", "B", weight=5) + G.add_edge("B", "C", weight=1) + G.add_edge("B", "D", weight=0.25) + G.add_edge("D", "E", weight=1) + + denom = len(G) - 1 + A_local = sum([5, 3, 2.625, 2.0833333333333]) / denom + B_local = sum([1, 0.25, 0.625]) / denom + C_local = 0 + D_local = sum([1]) / denom + E_local = 0 + + local_reach_ctrs = [A_local, C_local, B_local, D_local, E_local] + max_local = max(local_reach_ctrs) + expected = sum(max_local - lrc for lrc in local_reach_ctrs) / denom + grc = nx.global_reaching_centrality + actual = grc(G, normalized=False, weight="weight") + assert expected == pytest.approx(actual, abs=1e-7) + + +class TestLocalReachingCentrality: + """Unit tests for the local reaching centrality function.""" + + def test_non_positive_weights(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + G.add_weighted_edges_from([(0, 1, 0)]) + nx.local_reaching_centrality(G, 0, weight="weight") + + def test_negatively_weighted(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)]) + nx.local_reaching_centrality(G, 0, weight="weight") + + def test_undirected_unweighted_star(self): + G = nx.star_graph(2) + grc = nx.local_reaching_centrality + assert grc(G, 1, weight=None, normalized=False) == 0.75 + + def test_undirected_weighted_star(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + centrality = nx.local_reaching_centrality( + G, 1, normalized=False, weight="weight" + ) + assert centrality == 1.5 + + def test_undirected_weighted_normalized(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + centrality = nx.local_reaching_centrality( + G, 1, normalized=True, weight="weight" + ) + assert centrality == 1.0 diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..cc3047866079fd9fe4cf43a6793cf160a0c0cdce --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py @@ -0,0 +1,82 @@ +""" +Tests for second order centrality. +""" + +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +def test_empty(): + with pytest.raises(nx.NetworkXException): + G = nx.empty_graph() + nx.second_order_centrality(G) + + +def test_non_connected(): + with pytest.raises(nx.NetworkXException): + G = nx.Graph() + G.add_node(0) + G.add_node(1) + nx.second_order_centrality(G) + + +def test_non_negative_edge_weights(): + with pytest.raises(nx.NetworkXException): + G = nx.path_graph(2) + G.add_edge(0, 1, weight=-1) + nx.second_order_centrality(G) + + +def test_weight_attribute(): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1.0), (1, 2, 3.5)], weight="w") + expected = {0: 3.431, 1: 3.082, 2: 5.612} + b = nx.second_order_centrality(G, weight="w") + + for n in sorted(G): + assert b[n] == pytest.approx(expected[n], abs=1e-2) + + +def test_one_node_graph(): + """Second order centrality: single node""" + G = nx.Graph() + G.add_node(0) + G.add_edge(0, 0) + assert nx.second_order_centrality(G)[0] == 0 + + +def test_P3(): + """Second order centrality: line graph, as defined in paper""" + G = nx.path_graph(3) + b_answer = {0: 3.741, 1: 1.414, 2: 3.741} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) + + +def test_K3(): + """Second order centrality: complete graph, as defined in paper""" + G = nx.complete_graph(3) + b_answer = {0: 1.414, 1: 1.414, 2: 1.414} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) + + +def test_ring_graph(): + """Second order centrality: ring graph, as defined in paper""" + G = nx.cycle_graph(5) + b_answer = {0: 4.472, 1: 4.472, 2: 4.472, 3: 4.472, 4: 4.472} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_trophic.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_trophic.py new file mode 100644 index 0000000000000000000000000000000000000000..f1d6813160eed6da3cd1fd0b254b7352bd1bd4ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/tests/test_trophic.py @@ -0,0 +1,302 @@ +"""Test trophic levels, trophic differences and trophic coherence +""" +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +def test_trophic_levels(): + """Trivial example""" + G = nx.DiGraph() + G.add_edge("a", "b") + G.add_edge("b", "c") + + d = nx.trophic_levels(G) + assert d == {"a": 1, "b": 2, "c": 3} + + +def test_trophic_levels_levine(): + """Example from Figure 5 in Stephen Levine (1980) J. theor. Biol. 83, + 195-207 + """ + S = nx.DiGraph() + S.add_edge(1, 2, weight=1.0) + S.add_edge(1, 3, weight=0.2) + S.add_edge(1, 4, weight=0.8) + S.add_edge(2, 3, weight=0.2) + S.add_edge(2, 5, weight=0.3) + S.add_edge(4, 3, weight=0.6) + S.add_edge(4, 5, weight=0.7) + S.add_edge(5, 4, weight=0.2) + + # save copy for later, test intermediate implementation details first + S2 = S.copy() + + # drop nodes of in-degree zero + z = [nid for nid, d in S.in_degree if d == 0] + for nid in z: + S.remove_node(nid) + + # find adjacency matrix + q = nx.linalg.graphmatrix.adjacency_matrix(S).T + + # fmt: off + expected_q = np.array([ + [0, 0, 0., 0], + [0.2, 0, 0.6, 0], + [0, 0, 0, 0.2], + [0.3, 0, 0.7, 0] + ]) + # fmt: on + assert np.array_equal(q.todense(), expected_q) + + # must be square, size of number of nodes + assert len(q.shape) == 2 + assert q.shape[0] == q.shape[1] + assert q.shape[0] == len(S) + + nn = q.shape[0] + + i = np.eye(nn) + n = np.linalg.inv(i - q) + y = np.asarray(n) @ np.ones(nn) + + expected_y = np.array([1, 2.07906977, 1.46511628, 2.3255814]) + assert np.allclose(y, expected_y) + + expected_d = {1: 1, 2: 2, 3: 3.07906977, 4: 2.46511628, 5: 3.3255814} + + d = nx.trophic_levels(S2) + + for nid, level in d.items(): + expected_level = expected_d[nid] + assert expected_level == pytest.approx(level, abs=1e-7) + + +def test_trophic_levels_simple(): + matrix_a = np.array([[0, 0], [1, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + assert d[0] == pytest.approx(2, abs=1e-7) + assert d[1] == pytest.approx(1, abs=1e-7) + + +def test_trophic_levels_more_complex(): + # fmt: off + matrix = np.array([ + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + expected_result = [1, 2, 3, 4] + for ind in range(4): + assert d[ind] == pytest.approx(expected_result[ind], abs=1e-7) + + # fmt: off + matrix = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + + expected_result = [1, 2, 2.5, 3.25] + print("Calculated result: ", d) + print("Expected Result: ", expected_result) + + for ind in range(4): + assert d[ind] == pytest.approx(expected_result[ind], abs=1e-7) + + +def test_trophic_levels_even_more_complex(): + # fmt: off + # Another, bigger matrix + matrix = np.array([ + [0, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 1, 0] + ]) + # Generated this linear system using pen and paper: + K = np.array([ + [1, 0, -1, 0, 0], + [0, 0.5, 0, -0.5, 0], + [0, 0, 1, 0, 0], + [0, -0.5, 0, 1, -0.5], + [0, 0, 0, 0, 1], + ]) + # fmt: on + result_1 = np.ravel(np.linalg.inv(K) @ np.ones(5)) + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + result_2 = nx.trophic_levels(G) + + for ind in range(5): + assert result_1[ind] == pytest.approx(result_2[ind], abs=1e-7) + + +def test_trophic_levels_singular_matrix(): + """Should raise an error with graphs with only non-basal nodes""" + matrix = np.identity(4) + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + +def test_trophic_levels_singular_with_basal(): + """Should fail to compute if there are any parts of the graph which are not + reachable from any basal node (with in-degree zero). + """ + G = nx.DiGraph() + # a has in-degree zero + G.add_edge("a", "b") + + # b is one level above a, c and d + G.add_edge("c", "b") + G.add_edge("d", "b") + + # c and d form a loop, neither are reachable from a + G.add_edge("c", "d") + G.add_edge("d", "c") + + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + # if self-loops are allowed, smaller example: + G = nx.DiGraph() + G.add_edge("a", "b") # a has in-degree zero + G.add_edge("c", "b") # b is one level above a and c + G.add_edge("c", "c") # c has a self-loop + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + +def test_trophic_differences(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + diffs = nx.trophic_differences(G) + assert diffs[(0, 1)] == pytest.approx(1, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + diffs = nx.trophic_differences(G) + + assert diffs[(0, 1)] == pytest.approx(1, abs=1e-7) + assert diffs[(0, 2)] == pytest.approx(1.5, abs=1e-7) + assert diffs[(1, 2)] == pytest.approx(0.5, abs=1e-7) + assert diffs[(1, 3)] == pytest.approx(1.25, abs=1e-7) + assert diffs[(2, 3)] == pytest.approx(0.75, abs=1e-7) + + +def test_trophic_incoherence_parameter_no_cannibalism(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + assert q == pytest.approx(0, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + # fmt: off + matrix_c = np.array([ + [0, 1, 1, 0], + [0, 1, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 1] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_c, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + # no self-loops case + # fmt: off + matrix_d = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_d, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + +def test_trophic_incoherence_parameter_cannibalism(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + assert q == pytest.approx(0, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 1, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + assert q == pytest.approx(2, abs=1e-7) + + # fmt: off + matrix_c = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_c, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/trophic.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/trophic.py new file mode 100644 index 0000000000000000000000000000000000000000..6d1ba960ba9a9cb44d4034ff169bcead5001fa0f --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/trophic.py @@ -0,0 +1,162 @@ +"""Trophic levels""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["trophic_levels", "trophic_differences", "trophic_incoherence_parameter"] + + +@not_implemented_for("undirected") +@nx._dispatchable(edge_attrs="weight") +def trophic_levels(G, weight="weight"): + r"""Compute the trophic levels of nodes. + + The trophic level of a node $i$ is + + .. math:: + + s_i = 1 + \frac{1}{k^{in}_i} \sum_{j} a_{ij} s_j + + where $k^{in}_i$ is the in-degree of i + + .. math:: + + k^{in}_i = \sum_{j} a_{ij} + + and nodes with $k^{in}_i = 0$ have $s_i = 1$ by convention. + + These are calculated using the method outlined in Levine [1]_. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + Returns + ------- + nodes : dict + Dictionary of nodes with trophic level as the value. + + References + ---------- + .. [1] Stephen Levine (1980) J. theor. Biol. 83, 195-207 + """ + import numpy as np + + # find adjacency matrix + a = nx.adjacency_matrix(G, weight=weight).T.toarray() + + # drop rows/columns where in-degree is zero + rowsum = np.sum(a, axis=1) + p = a[rowsum != 0][:, rowsum != 0] + # normalise so sum of in-degree weights is 1 along each row + p = p / rowsum[rowsum != 0][:, np.newaxis] + + # calculate trophic levels + nn = p.shape[0] + i = np.eye(nn) + try: + n = np.linalg.inv(i - p) + except np.linalg.LinAlgError as err: + # LinAlgError is raised when there is a non-basal node + msg = ( + "Trophic levels are only defined for graphs where every " + + "node has a path from a basal node (basal nodes are nodes " + + "with no incoming edges)." + ) + raise nx.NetworkXError(msg) from err + y = n.sum(axis=1) + 1 + + levels = {} + + # all nodes with in-degree zero have trophic level == 1 + zero_node_ids = (node_id for node_id, degree in G.in_degree if degree == 0) + for node_id in zero_node_ids: + levels[node_id] = 1 + + # all other nodes have levels as calculated + nonzero_node_ids = (node_id for node_id, degree in G.in_degree if degree != 0) + for i, node_id in enumerate(nonzero_node_ids): + levels[node_id] = y.item(i) + + return levels + + +@not_implemented_for("undirected") +@nx._dispatchable(edge_attrs="weight") +def trophic_differences(G, weight="weight"): + r"""Compute the trophic differences of the edges of a directed graph. + + The trophic difference $x_ij$ for each edge is defined in Johnson et al. + [1]_ as: + + .. math:: + x_ij = s_j - s_i + + Where $s_i$ is the trophic level of node $i$. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + Returns + ------- + diffs : dict + Dictionary of edges with trophic differences as the value. + + References + ---------- + .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A. + Munoz (2014) PNAS "Trophic coherence determines food-web stability" + """ + levels = trophic_levels(G, weight=weight) + diffs = {} + for u, v in G.edges: + diffs[(u, v)] = levels[v] - levels[u] + return diffs + + +@not_implemented_for("undirected") +@nx._dispatchable(edge_attrs="weight") +def trophic_incoherence_parameter(G, weight="weight", cannibalism=False): + r"""Compute the trophic incoherence parameter of a graph. + + Trophic coherence is defined as the homogeneity of the distribution of + trophic distances: the more similar, the more coherent. This is measured by + the standard deviation of the trophic differences and referred to as the + trophic incoherence parameter $q$ by [1]. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + cannibalism: Boolean + If set to False, self edges are not considered in the calculation + + Returns + ------- + trophic_incoherence_parameter : float + The trophic coherence of a graph + + References + ---------- + .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A. + Munoz (2014) PNAS "Trophic coherence determines food-web stability" + """ + import numpy as np + + if cannibalism: + diffs = trophic_differences(G, weight=weight) + else: + # If no cannibalism, remove self-edges + self_loops = list(nx.selfloop_edges(G)) + if self_loops: + # Make a copy so we do not change G's edges in memory + G_2 = G.copy() + G_2.remove_edges_from(self_loops) + else: + # Avoid copy otherwise + G_2 = G + diffs = trophic_differences(G_2, weight=weight) + return float(np.std(list(diffs.values()))) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/voterank_alg.py b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/voterank_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..063dfdd64b3c99a35b8a96cef0e3b0161e1999d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/centrality/voterank_alg.py @@ -0,0 +1,94 @@ +"""Algorithm to select influential nodes in a graph using VoteRank.""" +import networkx as nx + +__all__ = ["voterank"] + + +@nx._dispatchable +def voterank(G, number_of_nodes=None): + """Select a list of influential nodes in a graph using VoteRank algorithm + + VoteRank [1]_ computes a ranking of the nodes in a graph G based on a + voting scheme. With VoteRank, all nodes vote for each of its in-neighbors + and the node with the highest votes is elected iteratively. The voting + ability of out-neighbors of elected nodes is decreased in subsequent turns. + + Parameters + ---------- + G : graph + A NetworkX graph. + + number_of_nodes : integer, optional + Number of ranked nodes to extract (default all nodes). + + Returns + ------- + voterank : list + Ordered list of computed seeds. + Only nodes with positive number of votes are returned. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 4)]) + >>> nx.voterank(G) + [0, 1] + + The algorithm can be used both for undirected and directed graphs. + However, the directed version is different in two ways: + (i) nodes only vote for their in-neighbors and + (ii) only the voting ability of elected node and its out-neighbors are updated: + + >>> G = nx.DiGraph([(0, 1), (2, 1), (2, 3), (3, 4)]) + >>> nx.voterank(G) + [2, 3] + + Notes + ----- + Each edge is treated independently in case of multigraphs. + + References + ---------- + .. [1] Zhang, J.-X. et al. (2016). + Identifying a set of influential spreaders in complex networks. + Sci. Rep. 6, 27823; doi: 10.1038/srep27823. + """ + influential_nodes = [] + vote_rank = {} + if len(G) == 0: + return influential_nodes + if number_of_nodes is None or number_of_nodes > len(G): + number_of_nodes = len(G) + if G.is_directed(): + # For directed graphs compute average out-degree + avgDegree = sum(deg for _, deg in G.out_degree()) / len(G) + else: + # For undirected graphs compute average degree + avgDegree = sum(deg for _, deg in G.degree()) / len(G) + # step 1 - initiate all nodes to (0,1) (score, voting ability) + for n in G.nodes(): + vote_rank[n] = [0, 1] + # Repeat steps 1b to 4 until num_seeds are elected. + for _ in range(number_of_nodes): + # step 1b - reset rank + for n in G.nodes(): + vote_rank[n][0] = 0 + # step 2 - vote + for n, nbr in G.edges(): + # In directed graphs nodes only vote for their in-neighbors + vote_rank[n][0] += vote_rank[nbr][1] + if not G.is_directed(): + vote_rank[nbr][0] += vote_rank[n][1] + for n in influential_nodes: + vote_rank[n][0] = 0 + # step 3 - select top node + n = max(G.nodes, key=lambda x: vote_rank[x][0]) + if vote_rank[n][0] == 0: + return influential_nodes + influential_nodes.append(n) + # weaken the selected node + vote_rank[n] = [0, 0] + # step 4 - update voterank properties + for _, nbr in G.edges(n): + vote_rank[nbr][1] -= 1 / avgDegree + vote_rank[nbr][1] = max(vote_rank[nbr][1], 0) + return influential_nodes diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b10d66c270263b0e27943a0ac150ca0ad0362da Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/capacityscaling.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/capacityscaling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..292b938d59c31ebcf2f55fd8165a1387b14b11dd Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/capacityscaling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49fbee47bfd3242bd2ad73d029da330b60c4bd6b Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/gomory_hu.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/gomory_hu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cba3318f211e7a4127eb61e17992239a2d06626e Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/gomory_hu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/maxflow.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/maxflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1cb5bf0157356b5f335fbddcf26a5b56afe9945 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/maxflow.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/networksimplex.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/networksimplex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a680649877f3ed9e305c7e9793c952ed100127c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/networksimplex.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78f9b345a925f77156dacbcc6279851df2061ef4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/capacityscaling.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/capacityscaling.py new file mode 100644 index 0000000000000000000000000000000000000000..bf68565c5486bb7b60e7ddcf6089e448bc6ddef1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/capacityscaling.py @@ -0,0 +1,407 @@ +""" +Capacity scaling minimum cost flow algorithm. +""" + +__all__ = ["capacity_scaling"] + +from itertools import chain +from math import log + +import networkx as nx + +from ...utils import BinaryHeap, arbitrary_element, not_implemented_for + + +def _detect_unboundedness(R): + """Detect infinite-capacity negative cycles.""" + G = nx.DiGraph() + G.add_nodes_from(R) + + # Value simulating infinity. + inf = R.graph["inf"] + # True infinity. + f_inf = float("inf") + for u in R: + for v, e in R[u].items(): + # Compute the minimum weight of infinite-capacity (u, v) edges. + w = f_inf + for k, e in e.items(): + if e["capacity"] == inf: + w = min(w, e["weight"]) + if w != f_inf: + G.add_edge(u, v, weight=w) + + if nx.negative_edge_cycle(G): + raise nx.NetworkXUnbounded( + "Negative cost cycle of infinite capacity found. " + "Min cost flow may be unbounded below." + ) + + +@not_implemented_for("undirected") +def _build_residual_network(G, demand, capacity, weight): + """Build a residual network and initialize a zero flow.""" + if sum(G.nodes[u].get(demand, 0) for u in G) != 0: + raise nx.NetworkXUnfeasible("Sum of the demands should be 0.") + + R = nx.MultiDiGraph() + R.add_nodes_from( + (u, {"excess": -G.nodes[u].get(demand, 0), "potential": 0}) for u in G + ) + + inf = float("inf") + # Detect selfloops with infinite capacities and negative weights. + for u, v, e in nx.selfloop_edges(G, data=True): + if e.get(weight, 0) < 0 and e.get(capacity, inf) == inf: + raise nx.NetworkXUnbounded( + "Negative cost cycle of infinite capacity found. " + "Min cost flow may be unbounded below." + ) + + # Extract edges with positive capacities. Self loops excluded. + if G.is_multigraph(): + edge_list = [ + (u, v, k, e) + for u, v, k, e in G.edges(data=True, keys=True) + if u != v and e.get(capacity, inf) > 0 + ] + else: + edge_list = [ + (u, v, 0, e) + for u, v, e in G.edges(data=True) + if u != v and e.get(capacity, inf) > 0 + ] + # Simulate infinity with the larger of the sum of absolute node imbalances + # the sum of finite edge capacities or any positive value if both sums are + # zero. This allows the infinite-capacity edges to be distinguished for + # unboundedness detection and directly participate in residual capacity + # calculation. + inf = ( + max( + sum(abs(R.nodes[u]["excess"]) for u in R), + 2 + * sum( + e[capacity] + for u, v, k, e in edge_list + if capacity in e and e[capacity] != inf + ), + ) + or 1 + ) + for u, v, k, e in edge_list: + r = min(e.get(capacity, inf), inf) + w = e.get(weight, 0) + # Add both (u, v) and (v, u) into the residual network marked with the + # original key. (key[1] == True) indicates the (u, v) is in the + # original network. + R.add_edge(u, v, key=(k, True), capacity=r, weight=w, flow=0) + R.add_edge(v, u, key=(k, False), capacity=0, weight=-w, flow=0) + + # Record the value simulating infinity. + R.graph["inf"] = inf + + _detect_unboundedness(R) + + return R + + +def _build_flow_dict(G, R, capacity, weight): + """Build a flow dictionary from a residual network.""" + inf = float("inf") + flow_dict = {} + if G.is_multigraph(): + for u in G: + flow_dict[u] = {} + for v, es in G[u].items(): + flow_dict[u][v] = { + # Always saturate negative selfloops. + k: ( + 0 + if ( + u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0 + ) + else e[capacity] + ) + for k, e in es.items() + } + for v, es in R[u].items(): + if v in flow_dict[u]: + flow_dict[u][v].update( + (k[0], e["flow"]) for k, e in es.items() if e["flow"] > 0 + ) + else: + for u in G: + flow_dict[u] = { + # Always saturate negative selfloops. + v: ( + 0 + if (u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0) + else e[capacity] + ) + for v, e in G[u].items() + } + flow_dict[u].update( + (v, e["flow"]) + for v, es in R[u].items() + for e in es.values() + if e["flow"] > 0 + ) + return flow_dict + + +@nx._dispatchable( + node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0} +) +def capacity_scaling( + G, demand="demand", capacity="capacity", weight="weight", heap=BinaryHeap +): + r"""Find a minimum cost flow satisfying all demands in digraph G. + + This is a capacity scaling successive shortest augmenting path algorithm. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph or MultiDiGraph on which a minimum cost flow satisfying all + demands is to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + heap : class + Type of heap to be used in the algorithm. It should be a subclass of + :class:`MinHeap` or implement a compatible interface. + + If a stock heap implementation is to be used, :class:`BinaryHeap` is + recommended over :class:`PairingHeap` for Python implementations without + optimized attribute accesses (e.g., CPython) despite a slower + asymptotic running time. For Python implementations with optimized + attribute accesses (e.g., PyPy), :class:`PairingHeap` provides better + performance. Default value: :class:`BinaryHeap`. + + Returns + ------- + flowCost : integer + Cost of a minimum cost flow satisfying all demands. + + flowDict : dictionary + If G is a digraph, a dict-of-dicts keyed by nodes such that + flowDict[u][v] is the flow on edge (u, v). + If G is a MultiDiGraph, a dict-of-dicts-of-dicts keyed by nodes + so that flowDict[u][v][key] is the flow on edge (u, v, key). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed, + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + Notes + ----- + This algorithm does not work if edge weights are floating-point numbers. + + See also + -------- + :meth:`network_simplex` + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowCost, flowDict = nx.capacity_scaling(G) + >>> flowCost + 24 + >>> flowDict + {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}} + + It is possible to change the name of the attributes used for the + algorithm. + + >>> G = nx.DiGraph() + >>> G.add_node("p", spam=-4) + >>> G.add_node("q", spam=2) + >>> G.add_node("a", spam=-2) + >>> G.add_node("d", spam=-1) + >>> G.add_node("t", spam=2) + >>> G.add_node("w", spam=3) + >>> G.add_edge("p", "q", cost=7, vacancies=5) + >>> G.add_edge("p", "a", cost=1, vacancies=4) + >>> G.add_edge("q", "d", cost=2, vacancies=3) + >>> G.add_edge("t", "q", cost=1, vacancies=2) + >>> G.add_edge("a", "t", cost=2, vacancies=4) + >>> G.add_edge("d", "w", cost=3, vacancies=4) + >>> G.add_edge("t", "w", cost=4, vacancies=1) + >>> flowCost, flowDict = nx.capacity_scaling( + ... G, demand="spam", capacity="vacancies", weight="cost" + ... ) + >>> flowCost + 37 + >>> flowDict + {'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}} + """ + R = _build_residual_network(G, demand, capacity, weight) + + inf = float("inf") + # Account cost of negative selfloops. + flow_cost = sum( + 0 + if e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0 + else e[capacity] * e[weight] + for u, v, e in nx.selfloop_edges(G, data=True) + ) + + # Determine the maximum edge capacity. + wmax = max(chain([-inf], (e["capacity"] for u, v, e in R.edges(data=True)))) + if wmax == -inf: + # Residual network has no edges. + return flow_cost, _build_flow_dict(G, R, capacity, weight) + + R_nodes = R.nodes + R_succ = R.succ + + delta = 2 ** int(log(wmax, 2)) + while delta >= 1: + # Saturate Δ-residual edges with negative reduced costs to achieve + # Δ-optimality. + for u in R: + p_u = R_nodes[u]["potential"] + for v, es in R_succ[u].items(): + for k, e in es.items(): + flow = e["capacity"] - e["flow"] + if e["weight"] - p_u + R_nodes[v]["potential"] < 0: + flow = e["capacity"] - e["flow"] + if flow >= delta: + e["flow"] += flow + R_succ[v][u][(k[0], not k[1])]["flow"] -= flow + R_nodes[u]["excess"] -= flow + R_nodes[v]["excess"] += flow + # Determine the Δ-active nodes. + S = set() + T = set() + S_add = S.add + S_remove = S.remove + T_add = T.add + T_remove = T.remove + for u in R: + excess = R_nodes[u]["excess"] + if excess >= delta: + S_add(u) + elif excess <= -delta: + T_add(u) + # Repeatedly augment flow from S to T along shortest paths until + # Δ-feasibility is achieved. + while S and T: + s = arbitrary_element(S) + t = None + # Search for a shortest path in terms of reduce costs from s to + # any t in T in the Δ-residual network. + d = {} + pred = {s: None} + h = heap() + h_insert = h.insert + h_get = h.get + h_insert(s, 0) + while h: + u, d_u = h.pop() + d[u] = d_u + if u in T: + # Path found. + t = u + break + p_u = R_nodes[u]["potential"] + for v, es in R_succ[u].items(): + if v in d: + continue + wmin = inf + # Find the minimum-weighted (u, v) Δ-residual edge. + for k, e in es.items(): + if e["capacity"] - e["flow"] >= delta: + w = e["weight"] + if w < wmin: + wmin = w + kmin = k + emin = e + if wmin == inf: + continue + # Update the distance label of v. + d_v = d_u + wmin - p_u + R_nodes[v]["potential"] + if h_insert(v, d_v): + pred[v] = (u, kmin, emin) + if t is not None: + # Augment Δ units of flow from s to t. + while u != s: + v = u + u, k, e = pred[v] + e["flow"] += delta + R_succ[v][u][(k[0], not k[1])]["flow"] -= delta + # Account node excess and deficit. + R_nodes[s]["excess"] -= delta + R_nodes[t]["excess"] += delta + if R_nodes[s]["excess"] < delta: + S_remove(s) + if R_nodes[t]["excess"] > -delta: + T_remove(t) + # Update node potentials. + d_t = d[t] + for u, d_u in d.items(): + R_nodes[u]["potential"] -= d_u - d_t + else: + # Path not found. + S_remove(s) + delta //= 2 + + if any(R.nodes[u]["excess"] != 0 for u in R): + raise nx.NetworkXUnfeasible("No flow satisfying all demands.") + + # Calculate the flow cost. + for u in R: + for v, es in R_succ[u].items(): + for e in es.values(): + flow = e["flow"] + if flow > 0: + flow_cost += flow * e["weight"] + + return flow_cost, _build_flow_dict(G, R, capacity, weight) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/edmondskarp.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/edmondskarp.py new file mode 100644 index 0000000000000000000000000000000000000000..50063268355ccc2e2ecbdf7f1a6704e7404475ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/edmondskarp.py @@ -0,0 +1,241 @@ +""" +Edmonds-Karp algorithm for maximum flow problems. +""" + +import networkx as nx +from networkx.algorithms.flow.utils import build_residual_network + +__all__ = ["edmonds_karp"] + + +def edmonds_karp_core(R, s, t, cutoff): + """Implementation of the Edmonds-Karp algorithm.""" + R_nodes = R.nodes + R_pred = R.pred + R_succ = R.succ + + inf = R.graph["inf"] + + def augment(path): + """Augment flow along a path from s to t.""" + # Determine the path residual capacity. + flow = inf + it = iter(path) + u = next(it) + for v in it: + attr = R_succ[u][v] + flow = min(flow, attr["capacity"] - attr["flow"]) + u = v + if flow * 2 > inf: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + # Augment flow along the path. + it = iter(path) + u = next(it) + for v in it: + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + u = v + return flow + + def bidirectional_bfs(): + """Bidirectional breadth-first search for an augmenting path.""" + pred = {s: None} + q_s = [s] + succ = {t: None} + q_t = [t] + while True: + q = [] + if len(q_s) <= len(q_t): + for u in q_s: + for v, attr in R_succ[u].items(): + if v not in pred and attr["flow"] < attr["capacity"]: + pred[v] = u + if v in succ: + return v, pred, succ + q.append(v) + if not q: + return None, None, None + q_s = q + else: + for u in q_t: + for v, attr in R_pred[u].items(): + if v not in succ and attr["flow"] < attr["capacity"]: + succ[v] = u + if v in pred: + return v, pred, succ + q.append(v) + if not q: + return None, None, None + q_t = q + + # Look for shortest augmenting paths using breadth-first search. + flow_value = 0 + while flow_value < cutoff: + v, pred, succ = bidirectional_bfs() + if pred is None: + break + path = [v] + # Trace a path from s to v. + u = v + while u != s: + u = pred[u] + path.append(u) + path.reverse() + # Trace a path from v to t. + u = v + while u != t: + u = succ[u] + path.append(u) + flow_value += augment(path) + + return flow_value + + +def edmonds_karp_impl(G, s, t, capacity, residual, cutoff): + """Implementation of the Edmonds-Karp algorithm.""" + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + # Initialize/reset the residual network. + for u in R: + for e in R[u].values(): + e["flow"] = 0 + + if cutoff is None: + cutoff = float("inf") + R.graph["flow_value"] = edmonds_karp_core(R, s, t, cutoff) + + return R + + +@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True) +def edmonds_karp( + G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None +): + """Find a maximum single-commodity flow using the Edmonds-Karp algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n m^2)$ for $n$ nodes and $m$ + edges. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import edmonds_karp + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = edmonds_karp(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + """ + R = edmonds_karp_impl(G, s, t, capacity, residual, cutoff) + R.graph["algorithm"] = "edmonds_karp" + nx._clear_cache(R) + return R diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/gomory_hu.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/gomory_hu.py new file mode 100644 index 0000000000000000000000000000000000000000..951abaeb517846910d8bb05446f4667a8bd7e869 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/gomory_hu.py @@ -0,0 +1,177 @@ +""" +Gomory-Hu tree of undirected Graphs. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +from .edmondskarp import edmonds_karp +from .utils import build_residual_network + +default_flow_func = edmonds_karp + +__all__ = ["gomory_hu_tree"] + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True) +def gomory_hu_tree(G, capacity="capacity", flow_func=None): + r"""Returns the Gomory-Hu tree of an undirected graph G. + + A Gomory-Hu tree of an undirected graph with capacities is a + weighted tree that represents the minimum s-t cuts for all s-t + pairs in the graph. + + It only requires `n-1` minimum cut computations instead of the + obvious `n(n-1)/2`. The tree represents all s-t cuts as the + minimum cut value among any pair of nodes is the minimum edge + weight in the shortest path between the two nodes in the + Gomory-Hu tree. + + The Gomory-Hu tree also has the property that removing the + edge with the minimum weight in the shortest path between + any two nodes leaves two connected components that form + a partition of the nodes in G that defines the minimum s-t + cut. + + See Examples section below for details. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + Function to perform the underlying flow computations. Default value + :func:`edmonds_karp`. This function performs better in sparse graphs + with right tailed degree distributions. + :func:`shortest_augmenting_path` will perform better in denser + graphs. + + Returns + ------- + Tree : NetworkX graph + A NetworkX graph representing the Gomory-Hu tree of the input graph. + + Raises + ------ + NetworkXNotImplemented + Raised if the input graph is directed. + + NetworkXError + Raised if the input graph is an empty Graph. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> nx.set_edge_attributes(G, 1, "capacity") + >>> T = nx.gomory_hu_tree(G) + >>> # The value of the minimum cut between any pair + ... # of nodes in G is the minimum edge weight in the + ... # shortest path between the two nodes in the + ... # Gomory-Hu tree. + ... def minimum_edge_weight_in_shortest_path(T, u, v): + ... path = nx.shortest_path(T, u, v, weight="weight") + ... return min((T[u][v]["weight"], (u, v)) for (u, v) in zip(path, path[1:])) + >>> u, v = 0, 33 + >>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v) + >>> cut_value + 10 + >>> nx.minimum_cut_value(G, u, v) + 10 + >>> # The Gomory-Hu tree also has the property that removing the + ... # edge with the minimum weight in the shortest path between + ... # any two nodes leaves two connected components that form + ... # a partition of the nodes in G that defines the minimum s-t + ... # cut. + ... cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v) + >>> T.remove_edge(*edge) + >>> U, V = list(nx.connected_components(T)) + >>> # Thus U and V form a partition that defines a minimum cut + ... # between u and v in G. You can compute the edge cut set, + ... # that is, the set of edges that if removed from G will + ... # disconnect u from v in G, with this information: + ... cutset = set() + >>> for x, nbrs in ((n, G[n]) for n in U): + ... cutset.update((x, y) for y in nbrs if y in V) + >>> # Because we have set the capacities of all edges to 1 + ... # the cutset contains ten edges + ... len(cutset) + 10 + >>> # You can use any maximum flow algorithm for the underlying + ... # flow computations using the argument flow_func + ... from networkx.algorithms import flow + >>> T = nx.gomory_hu_tree(G, flow_func=flow.boykov_kolmogorov) + >>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v) + >>> cut_value + 10 + >>> nx.minimum_cut_value(G, u, v, flow_func=flow.boykov_kolmogorov) + 10 + + Notes + ----- + This implementation is based on Gusfield approach [1]_ to compute + Gomory-Hu trees, which does not require node contractions and has + the same computational complexity than the original method. + + See also + -------- + :func:`minimum_cut` + :func:`maximum_flow` + + References + ---------- + .. [1] Gusfield D: Very simple methods for all pairs network flow analysis. + SIAM J Comput 19(1):143-155, 1990. + + """ + if flow_func is None: + flow_func = default_flow_func + + if len(G) == 0: # empty graph + msg = "Empty Graph does not have a Gomory-Hu tree representation" + raise nx.NetworkXError(msg) + + # Start the tree as a star graph with an arbitrary node at the center + tree = {} + labels = {} + iter_nodes = iter(G) + root = next(iter_nodes) + for n in iter_nodes: + tree[n] = root + + # Reuse residual network + R = build_residual_network(G, capacity) + + # For all the leaves in the star graph tree (that is n-1 nodes). + for source in tree: + # Find neighbor in the tree + target = tree[source] + # compute minimum cut + cut_value, partition = nx.minimum_cut( + G, source, target, capacity=capacity, flow_func=flow_func, residual=R + ) + labels[(source, target)] = cut_value + # Update the tree + # Source will always be in partition[0] and target in partition[1] + for node in partition[0]: + if node != source and node in tree and tree[node] == target: + tree[node] = source + labels[node, source] = labels.get((node, target), cut_value) + # + if target != root and tree[target] in partition[0]: + labels[source, tree[target]] = labels[target, tree[target]] + labels[target, source] = cut_value + tree[source] = tree[target] + tree[target] = source + + # Build the tree + T = nx.Graph() + T.add_nodes_from(G) + T.add_weighted_edges_from(((u, v, labels[u, v]) for u, v in tree.items())) + return T diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/maxflow.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/maxflow.py new file mode 100644 index 0000000000000000000000000000000000000000..96bca029f00481f61fb0dc886e8b4806ac439b3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/maxflow.py @@ -0,0 +1,601 @@ +""" +Maximum flow (and minimum cut) algorithms on capacitated graphs. +""" +import networkx as nx + +from .boykovkolmogorov import boykov_kolmogorov +from .dinitz_alg import dinitz +from .edmondskarp import edmonds_karp +from .preflowpush import preflow_push +from .shortestaugmentingpath import shortest_augmenting_path +from .utils import build_flow_dict + +# Define the default flow function for computing maximum flow. +default_flow_func = preflow_push + +__all__ = ["maximum_flow", "maximum_flow_value", "minimum_cut", "minimum_cut_value"] + + +@nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")}) +def maximum_flow(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Find a maximum single-commodity flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + flow_value : integer, float + Value of the maximum flow, i.e., net outflow from the source. + + flow_dict : dict + A dictionary containing the value of the flow that went through + each edge. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow_value` + :meth:`minimum_cut` + :meth:`minimum_cut_value` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + maximum_flow returns both the value of the maximum flow and a + dictionary with all flows. + + >>> flow_value, flow_dict = nx.maximum_flow(G, "x", "y") + >>> flow_value + 3.0 + >>> print(flow_dict["x"]["b"]) + 1.0 + + You can also use alternative algorithms for computing the + maximum flow by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> flow_value == nx.maximum_flow(G, "x", "y", flow_func=shortest_augmenting_path)[0] + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=False, **kwargs) + flow_dict = build_flow_dict(flowG, R) + + return (R.graph["flow_value"], flow_dict) + + +@nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")}) +def maximum_flow_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Find the value of maximum single-commodity flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + flow_value : integer, float + Value of the maximum flow, i.e., net outflow from the source. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`minimum_cut_value` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + maximum_flow_value computes only the value of the + maximum flow: + + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + + You can also use alternative algorithms for computing the + maximum flow by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> flow_value == nx.maximum_flow_value(G, "x", "y", flow_func=shortest_augmenting_path) + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) + + return R.graph["flow_value"] + + +@nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")}) +def minimum_cut(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Compute the value and the node partition of a minimum (s, t)-cut. + + Use the max-flow min-cut theorem, i.e., the capacity of a minimum + capacity cut is equal to the flow value of a maximum flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + cut_value : integer, float + Value of the minimum cut. + + partition : pair of node sets + A partitioning of the nodes that defines a minimum cut. + + Raises + ------ + NetworkXUnbounded + If the graph has a path of infinite capacity, all cuts have + infinite capacity and the function raises a NetworkXError. + + See also + -------- + :meth:`maximum_flow` + :meth:`maximum_flow_value` + :meth:`minimum_cut_value` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + minimum_cut computes both the value of the + minimum cut and the node partition: + + >>> cut_value, partition = nx.minimum_cut(G, "x", "y") + >>> reachable, non_reachable = partition + + 'partition' here is a tuple with the two sets of nodes that define + the minimum cut. You can compute the cut set of edges that induce + the minimum cut as follows: + + >>> cutset = set() + >>> for u, nbrs in ((n, G[n]) for n in reachable): + ... cutset.update((u, v) for v in nbrs if v in non_reachable) + >>> print(sorted(cutset)) + [('c', 'y'), ('x', 'b')] + >>> cut_value == sum(G.edges[u, v]["capacity"] for (u, v) in cutset) + True + + You can also use alternative algorithms for computing the + minimum cut by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> cut_value == nx.minimum_cut(G, "x", "y", flow_func=shortest_augmenting_path)[0] + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + if kwargs.get("cutoff") is not None and flow_func is preflow_push: + raise nx.NetworkXError("cutoff should not be specified.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) + # Remove saturated edges from the residual network + cutset = [(u, v, d) for u, v, d in R.edges(data=True) if d["flow"] == d["capacity"]] + R.remove_edges_from(cutset) + + # Then, reachable and non reachable nodes from source in the + # residual network form the node partition that defines + # the minimum cut. + non_reachable = set(dict(nx.shortest_path_length(R, target=_t))) + partition = (set(flowG) - non_reachable, non_reachable) + # Finally add again cutset edges to the residual network to make + # sure that it is reusable. + if cutset is not None: + R.add_edges_from(cutset) + return (R.graph["flow_value"], partition) + + +@nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")}) +def minimum_cut_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Compute the value of a minimum (s, t)-cut. + + Use the max-flow min-cut theorem, i.e., the capacity of a minimum + capacity cut is equal to the flow value of a maximum flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + cut_value : integer, float + Value of the minimum cut. + + Raises + ------ + NetworkXUnbounded + If the graph has a path of infinite capacity, all cuts have + infinite capacity and the function raises a NetworkXError. + + See also + -------- + :meth:`maximum_flow` + :meth:`maximum_flow_value` + :meth:`minimum_cut` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + minimum_cut_value computes only the value of the + minimum cut: + + >>> cut_value = nx.minimum_cut_value(G, "x", "y") + >>> cut_value + 3.0 + + You can also use alternative algorithms for computing the + minimum cut by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> cut_value == nx.minimum_cut_value(G, "x", "y", flow_func=shortest_augmenting_path) + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + if kwargs.get("cutoff") is not None and flow_func is preflow_push: + raise nx.NetworkXError("cutoff should not be specified.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) + + return R.graph["flow_value"] diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/mincost.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/mincost.py new file mode 100644 index 0000000000000000000000000000000000000000..2f9390d7a1c1e454ed7c2f8793d591b338115107 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/mincost.py @@ -0,0 +1,356 @@ +""" +Minimum cost flow algorithms on directed connected graphs. +""" + +__all__ = ["min_cost_flow_cost", "min_cost_flow", "cost_of_flow", "max_flow_min_cost"] + +import networkx as nx + + +@nx._dispatchable( + node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0} +) +def min_cost_flow_cost(G, demand="demand", capacity="capacity", weight="weight"): + r"""Find the cost of a minimum cost flow satisfying all demands in digraph G. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowCost : integer, float + Cost of a minimum cost flow satisfying all demands. + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + See also + -------- + cost_of_flow, max_flow_min_cost, min_cost_flow, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowCost = nx.min_cost_flow_cost(G) + >>> flowCost + 24 + """ + return nx.network_simplex(G, demand=demand, capacity=capacity, weight=weight)[0] + + +@nx._dispatchable( + node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0} +) +def min_cost_flow(G, demand="demand", capacity="capacity", weight="weight"): + r"""Returns a minimum cost flow satisfying all demands in digraph G. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowDict : dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + See also + -------- + cost_of_flow, max_flow_min_cost, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowDict = nx.min_cost_flow(G) + >>> flowDict + {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}} + """ + return nx.network_simplex(G, demand=demand, capacity=capacity, weight=weight)[1] + + +@nx._dispatchable(edge_attrs={"weight": 0}) +def cost_of_flow(G, flowDict, weight="weight"): + """Compute the cost of the flow given by flowDict on graph G. + + Note that this function does not check for the validity of the + flow flowDict. This function will fail if the graph G and the + flow don't have the same edge set. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + flowDict : dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Returns + ------- + cost : Integer, float + The total cost of the flow. This is given by the sum over all + edges of the product of the edge's flow and the edge's weight. + + See also + -------- + max_flow_min_cost, min_cost_flow, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowDict = nx.min_cost_flow(G) + >>> flowDict + {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}} + >>> nx.cost_of_flow(G, flowDict) + 24 + """ + return sum((flowDict[u][v] * d.get(weight, 0) for u, v, d in G.edges(data=True))) + + +@nx._dispatchable(edge_attrs={"capacity": float("inf"), "weight": 0}) +def max_flow_min_cost(G, s, t, capacity="capacity", weight="weight"): + """Returns a maximum (s, t)-flow of minimum cost. + + G is a digraph with edge costs and capacities. There is a source + node s and a sink node t. This function finds a maximum flow from + s to t whose total cost is minimized. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + s: node label + Source of the flow. + + t: node label + Destination of the flow. + + capacity: string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight: string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowDict: dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnbounded + This exception is raised if there is an infinite capacity path + from s to t in G. In this case there is no maximum flow. This + exception is also raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + is unbounded below. + + See also + -------- + cost_of_flow, min_cost_flow, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from( + ... [ + ... (1, 2, {"capacity": 12, "weight": 4}), + ... (1, 3, {"capacity": 20, "weight": 6}), + ... (2, 3, {"capacity": 6, "weight": -3}), + ... (2, 6, {"capacity": 14, "weight": 1}), + ... (3, 4, {"weight": 9}), + ... (3, 5, {"capacity": 10, "weight": 5}), + ... (4, 2, {"capacity": 19, "weight": 13}), + ... (4, 5, {"capacity": 4, "weight": 0}), + ... (5, 7, {"capacity": 28, "weight": 2}), + ... (6, 5, {"capacity": 11, "weight": 1}), + ... (6, 7, {"weight": 8}), + ... (7, 4, {"capacity": 6, "weight": 6}), + ... ] + ... ) + >>> mincostFlow = nx.max_flow_min_cost(G, 1, 7) + >>> mincost = nx.cost_of_flow(G, mincostFlow) + >>> mincost + 373 + >>> from networkx.algorithms.flow import maximum_flow + >>> maxFlow = maximum_flow(G, 1, 7)[1] + >>> nx.cost_of_flow(G, maxFlow) >= mincost + True + >>> mincostFlowValue = sum((mincostFlow[u][7] for u in G.predecessors(7))) - sum( + ... (mincostFlow[7][v] for v in G.successors(7)) + ... ) + >>> mincostFlowValue == nx.maximum_flow_value(G, 1, 7) + True + + """ + maxFlow = nx.maximum_flow_value(G, s, t, capacity=capacity) + H = nx.DiGraph(G) + H.add_node(s, demand=-maxFlow) + H.add_node(t, demand=maxFlow) + return min_cost_flow(H, capacity=capacity, weight=weight) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/networksimplex.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/networksimplex.py new file mode 100644 index 0000000000000000000000000000000000000000..a9822d968808eb0c7bb45794e13150ad659b311a --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/networksimplex.py @@ -0,0 +1,666 @@ +""" +Minimum cost flow algorithms on directed connected graphs. +""" + +__all__ = ["network_simplex"] + +from itertools import chain, islice, repeat +from math import ceil, sqrt + +import networkx as nx +from networkx.utils import not_implemented_for + + +class _DataEssentialsAndFunctions: + def __init__( + self, G, multigraph, demand="demand", capacity="capacity", weight="weight" + ): + # Number all nodes and edges and hereafter reference them using ONLY their numbers + self.node_list = list(G) # nodes + self.node_indices = {u: i for i, u in enumerate(self.node_list)} # node indices + self.node_demands = [ + G.nodes[u].get(demand, 0) for u in self.node_list + ] # node demands + + self.edge_sources = [] # edge sources + self.edge_targets = [] # edge targets + if multigraph: + self.edge_keys = [] # edge keys + self.edge_indices = {} # edge indices + self.edge_capacities = [] # edge capacities + self.edge_weights = [] # edge weights + + if not multigraph: + edges = G.edges(data=True) + else: + edges = G.edges(data=True, keys=True) + + inf = float("inf") + edges = (e for e in edges if e[0] != e[1] and e[-1].get(capacity, inf) != 0) + for i, e in enumerate(edges): + self.edge_sources.append(self.node_indices[e[0]]) + self.edge_targets.append(self.node_indices[e[1]]) + if multigraph: + self.edge_keys.append(e[2]) + self.edge_indices[e[:-1]] = i + self.edge_capacities.append(e[-1].get(capacity, inf)) + self.edge_weights.append(e[-1].get(weight, 0)) + + # spanning tree specific data to be initialized + + self.edge_count = None # number of edges + self.edge_flow = None # edge flows + self.node_potentials = None # node potentials + self.parent = None # parent nodes + self.parent_edge = None # edges to parents + self.subtree_size = None # subtree sizes + self.next_node_dft = None # next nodes in depth-first thread + self.prev_node_dft = None # previous nodes in depth-first thread + self.last_descendent_dft = None # last descendants in depth-first thread + self._spanning_tree_initialized = ( + False # False until initialize_spanning_tree() is called + ) + + def initialize_spanning_tree(self, n, faux_inf): + self.edge_count = len(self.edge_indices) # number of edges + self.edge_flow = list( + chain(repeat(0, self.edge_count), (abs(d) for d in self.node_demands)) + ) # edge flows + self.node_potentials = [ + faux_inf if d <= 0 else -faux_inf for d in self.node_demands + ] # node potentials + self.parent = list(chain(repeat(-1, n), [None])) # parent nodes + self.parent_edge = list( + range(self.edge_count, self.edge_count + n) + ) # edges to parents + self.subtree_size = list(chain(repeat(1, n), [n + 1])) # subtree sizes + self.next_node_dft = list( + chain(range(1, n), [-1, 0]) + ) # next nodes in depth-first thread + self.prev_node_dft = list(range(-1, n)) # previous nodes in depth-first thread + self.last_descendent_dft = list( + chain(range(n), [n - 1]) + ) # last descendants in depth-first thread + self._spanning_tree_initialized = True # True only if all the assignments pass + + def find_apex(self, p, q): + """ + Find the lowest common ancestor of nodes p and q in the spanning tree. + """ + size_p = self.subtree_size[p] + size_q = self.subtree_size[q] + while True: + while size_p < size_q: + p = self.parent[p] + size_p = self.subtree_size[p] + while size_p > size_q: + q = self.parent[q] + size_q = self.subtree_size[q] + if size_p == size_q: + if p != q: + p = self.parent[p] + size_p = self.subtree_size[p] + q = self.parent[q] + size_q = self.subtree_size[q] + else: + return p + + def trace_path(self, p, w): + """ + Returns the nodes and edges on the path from node p to its ancestor w. + """ + Wn = [p] + We = [] + while p != w: + We.append(self.parent_edge[p]) + p = self.parent[p] + Wn.append(p) + return Wn, We + + def find_cycle(self, i, p, q): + """ + Returns the nodes and edges on the cycle containing edge i == (p, q) + when the latter is added to the spanning tree. + + The cycle is oriented in the direction from p to q. + """ + w = self.find_apex(p, q) + Wn, We = self.trace_path(p, w) + Wn.reverse() + We.reverse() + if We != [i]: + We.append(i) + WnR, WeR = self.trace_path(q, w) + del WnR[-1] + Wn += WnR + We += WeR + return Wn, We + + def augment_flow(self, Wn, We, f): + """ + Augment f units of flow along a cycle represented by Wn and We. + """ + for i, p in zip(We, Wn): + if self.edge_sources[i] == p: + self.edge_flow[i] += f + else: + self.edge_flow[i] -= f + + def trace_subtree(self, p): + """ + Yield the nodes in the subtree rooted at a node p. + """ + yield p + l = self.last_descendent_dft[p] + while p != l: + p = self.next_node_dft[p] + yield p + + def remove_edge(self, s, t): + """ + Remove an edge (s, t) where parent[t] == s from the spanning tree. + """ + size_t = self.subtree_size[t] + prev_t = self.prev_node_dft[t] + last_t = self.last_descendent_dft[t] + next_last_t = self.next_node_dft[last_t] + # Remove (s, t). + self.parent[t] = None + self.parent_edge[t] = None + # Remove the subtree rooted at t from the depth-first thread. + self.next_node_dft[prev_t] = next_last_t + self.prev_node_dft[next_last_t] = prev_t + self.next_node_dft[last_t] = t + self.prev_node_dft[t] = last_t + # Update the subtree sizes and last descendants of the (old) ancestors + # of t. + while s is not None: + self.subtree_size[s] -= size_t + if self.last_descendent_dft[s] == last_t: + self.last_descendent_dft[s] = prev_t + s = self.parent[s] + + def make_root(self, q): + """ + Make a node q the root of its containing subtree. + """ + ancestors = [] + while q is not None: + ancestors.append(q) + q = self.parent[q] + ancestors.reverse() + for p, q in zip(ancestors, islice(ancestors, 1, None)): + size_p = self.subtree_size[p] + last_p = self.last_descendent_dft[p] + prev_q = self.prev_node_dft[q] + last_q = self.last_descendent_dft[q] + next_last_q = self.next_node_dft[last_q] + # Make p a child of q. + self.parent[p] = q + self.parent[q] = None + self.parent_edge[p] = self.parent_edge[q] + self.parent_edge[q] = None + self.subtree_size[p] = size_p - self.subtree_size[q] + self.subtree_size[q] = size_p + # Remove the subtree rooted at q from the depth-first thread. + self.next_node_dft[prev_q] = next_last_q + self.prev_node_dft[next_last_q] = prev_q + self.next_node_dft[last_q] = q + self.prev_node_dft[q] = last_q + if last_p == last_q: + self.last_descendent_dft[p] = prev_q + last_p = prev_q + # Add the remaining parts of the subtree rooted at p as a subtree + # of q in the depth-first thread. + self.prev_node_dft[p] = last_q + self.next_node_dft[last_q] = p + self.next_node_dft[last_p] = q + self.prev_node_dft[q] = last_p + self.last_descendent_dft[q] = last_p + + def add_edge(self, i, p, q): + """ + Add an edge (p, q) to the spanning tree where q is the root of a subtree. + """ + last_p = self.last_descendent_dft[p] + next_last_p = self.next_node_dft[last_p] + size_q = self.subtree_size[q] + last_q = self.last_descendent_dft[q] + # Make q a child of p. + self.parent[q] = p + self.parent_edge[q] = i + # Insert the subtree rooted at q into the depth-first thread. + self.next_node_dft[last_p] = q + self.prev_node_dft[q] = last_p + self.prev_node_dft[next_last_p] = last_q + self.next_node_dft[last_q] = next_last_p + # Update the subtree sizes and last descendants of the (new) ancestors + # of q. + while p is not None: + self.subtree_size[p] += size_q + if self.last_descendent_dft[p] == last_p: + self.last_descendent_dft[p] = last_q + p = self.parent[p] + + def update_potentials(self, i, p, q): + """ + Update the potentials of the nodes in the subtree rooted at a node + q connected to its parent p by an edge i. + """ + if q == self.edge_targets[i]: + d = self.node_potentials[p] - self.edge_weights[i] - self.node_potentials[q] + else: + d = self.node_potentials[p] + self.edge_weights[i] - self.node_potentials[q] + for q in self.trace_subtree(q): + self.node_potentials[q] += d + + def reduced_cost(self, i): + """Returns the reduced cost of an edge i.""" + c = ( + self.edge_weights[i] + - self.node_potentials[self.edge_sources[i]] + + self.node_potentials[self.edge_targets[i]] + ) + return c if self.edge_flow[i] == 0 else -c + + def find_entering_edges(self): + """Yield entering edges until none can be found.""" + if self.edge_count == 0: + return + + # Entering edges are found by combining Dantzig's rule and Bland's + # rule. The edges are cyclically grouped into blocks of size B. Within + # each block, Dantzig's rule is applied to find an entering edge. The + # blocks to search is determined following Bland's rule. + B = int(ceil(sqrt(self.edge_count))) # pivot block size + M = (self.edge_count + B - 1) // B # number of blocks needed to cover all edges + m = 0 # number of consecutive blocks without eligible + # entering edges + f = 0 # first edge in block + while m < M: + # Determine the next block of edges. + l = f + B + if l <= self.edge_count: + edges = range(f, l) + else: + l -= self.edge_count + edges = chain(range(f, self.edge_count), range(l)) + f = l + # Find the first edge with the lowest reduced cost. + i = min(edges, key=self.reduced_cost) + c = self.reduced_cost(i) + if c >= 0: + # No entering edge found in the current block. + m += 1 + else: + # Entering edge found. + if self.edge_flow[i] == 0: + p = self.edge_sources[i] + q = self.edge_targets[i] + else: + p = self.edge_targets[i] + q = self.edge_sources[i] + yield i, p, q + m = 0 + # All edges have nonnegative reduced costs. The current flow is + # optimal. + + def residual_capacity(self, i, p): + """Returns the residual capacity of an edge i in the direction away + from its endpoint p. + """ + return ( + self.edge_capacities[i] - self.edge_flow[i] + if self.edge_sources[i] == p + else self.edge_flow[i] + ) + + def find_leaving_edge(self, Wn, We): + """Returns the leaving edge in a cycle represented by Wn and We.""" + j, s = min( + zip(reversed(We), reversed(Wn)), + key=lambda i_p: self.residual_capacity(*i_p), + ) + t = self.edge_targets[j] if self.edge_sources[j] == s else self.edge_sources[j] + return j, s, t + + +@not_implemented_for("undirected") +@nx._dispatchable( + node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0} +) +def network_simplex(G, demand="demand", capacity="capacity", weight="weight"): + r"""Find a minimum cost flow satisfying all demands in digraph G. + + This is a primal network simplex algorithm that uses the leaving + arc rule to prevent cycling. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowCost : integer, float + Cost of a minimum cost flow satisfying all demands. + + flowDict : dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + See also + -------- + cost_of_flow, max_flow_min_cost, min_cost_flow, min_cost_flow_cost + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowCost, flowDict = nx.network_simplex(G) + >>> flowCost + 24 + >>> flowDict + {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}} + + The mincost flow algorithm can also be used to solve shortest path + problems. To find the shortest path between two nodes u and v, + give all edges an infinite capacity, give node u a demand of -1 and + node v a demand a 1. Then run the network simplex. The value of a + min cost flow will be the distance between u and v and edges + carrying positive flow will indicate the path. + + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... [ + ... ("s", "u", 10), + ... ("s", "x", 5), + ... ("u", "v", 1), + ... ("u", "x", 2), + ... ("v", "y", 1), + ... ("x", "u", 3), + ... ("x", "v", 5), + ... ("x", "y", 2), + ... ("y", "s", 7), + ... ("y", "v", 6), + ... ] + ... ) + >>> G.add_node("s", demand=-1) + >>> G.add_node("v", demand=1) + >>> flowCost, flowDict = nx.network_simplex(G) + >>> flowCost == nx.shortest_path_length(G, "s", "v", weight="weight") + True + >>> sorted([(u, v) for u in flowDict for v in flowDict[u] if flowDict[u][v] > 0]) + [('s', 'x'), ('u', 'v'), ('x', 'u')] + >>> nx.shortest_path(G, "s", "v", weight="weight") + ['s', 'x', 'u', 'v'] + + It is possible to change the name of the attributes used for the + algorithm. + + >>> G = nx.DiGraph() + >>> G.add_node("p", spam=-4) + >>> G.add_node("q", spam=2) + >>> G.add_node("a", spam=-2) + >>> G.add_node("d", spam=-1) + >>> G.add_node("t", spam=2) + >>> G.add_node("w", spam=3) + >>> G.add_edge("p", "q", cost=7, vacancies=5) + >>> G.add_edge("p", "a", cost=1, vacancies=4) + >>> G.add_edge("q", "d", cost=2, vacancies=3) + >>> G.add_edge("t", "q", cost=1, vacancies=2) + >>> G.add_edge("a", "t", cost=2, vacancies=4) + >>> G.add_edge("d", "w", cost=3, vacancies=4) + >>> G.add_edge("t", "w", cost=4, vacancies=1) + >>> flowCost, flowDict = nx.network_simplex( + ... G, demand="spam", capacity="vacancies", weight="cost" + ... ) + >>> flowCost + 37 + >>> flowDict + {'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}} + + References + ---------- + .. [1] Z. Kiraly, P. Kovacs. + Efficient implementation of minimum-cost flow algorithms. + Acta Universitatis Sapientiae, Informatica 4(1):67--118. 2012. + .. [2] R. Barr, F. Glover, D. Klingman. + Enhancement of spanning tree labeling procedures for network + optimization. + INFOR 17(1):16--34. 1979. + """ + ########################################################################### + # Problem essentials extraction and sanity check + ########################################################################### + + if len(G) == 0: + raise nx.NetworkXError("graph has no nodes") + + multigraph = G.is_multigraph() + + # extracting data essential to problem + DEAF = _DataEssentialsAndFunctions( + G, multigraph, demand=demand, capacity=capacity, weight=weight + ) + + ########################################################################### + # Quick Error Detection + ########################################################################### + + inf = float("inf") + for u, d in zip(DEAF.node_list, DEAF.node_demands): + if abs(d) == inf: + raise nx.NetworkXError(f"node {u!r} has infinite demand") + for e, w in zip(DEAF.edge_indices, DEAF.edge_weights): + if abs(w) == inf: + raise nx.NetworkXError(f"edge {e!r} has infinite weight") + if not multigraph: + edges = nx.selfloop_edges(G, data=True) + else: + edges = nx.selfloop_edges(G, data=True, keys=True) + for e in edges: + if abs(e[-1].get(weight, 0)) == inf: + raise nx.NetworkXError(f"edge {e[:-1]!r} has infinite weight") + + ########################################################################### + # Quick Infeasibility Detection + ########################################################################### + + if sum(DEAF.node_demands) != 0: + raise nx.NetworkXUnfeasible("total node demand is not zero") + for e, c in zip(DEAF.edge_indices, DEAF.edge_capacities): + if c < 0: + raise nx.NetworkXUnfeasible(f"edge {e!r} has negative capacity") + if not multigraph: + edges = nx.selfloop_edges(G, data=True) + else: + edges = nx.selfloop_edges(G, data=True, keys=True) + for e in edges: + if e[-1].get(capacity, inf) < 0: + raise nx.NetworkXUnfeasible(f"edge {e[:-1]!r} has negative capacity") + + ########################################################################### + # Initialization + ########################################################################### + + # Add a dummy node -1 and connect all existing nodes to it with infinite- + # capacity dummy edges. Node -1 will serve as the root of the + # spanning tree of the network simplex method. The new edges will used to + # trivially satisfy the node demands and create an initial strongly + # feasible spanning tree. + for i, d in enumerate(DEAF.node_demands): + # Must be greater-than here. Zero-demand nodes must have + # edges pointing towards the root to ensure strong feasibility. + if d > 0: + DEAF.edge_sources.append(-1) + DEAF.edge_targets.append(i) + else: + DEAF.edge_sources.append(i) + DEAF.edge_targets.append(-1) + faux_inf = ( + 3 + * max( + chain( + [ + sum(c for c in DEAF.edge_capacities if c < inf), + sum(abs(w) for w in DEAF.edge_weights), + ], + (abs(d) for d in DEAF.node_demands), + ) + ) + or 1 + ) + + n = len(DEAF.node_list) # number of nodes + DEAF.edge_weights.extend(repeat(faux_inf, n)) + DEAF.edge_capacities.extend(repeat(faux_inf, n)) + + # Construct the initial spanning tree. + DEAF.initialize_spanning_tree(n, faux_inf) + + ########################################################################### + # Pivot loop + ########################################################################### + + for i, p, q in DEAF.find_entering_edges(): + Wn, We = DEAF.find_cycle(i, p, q) + j, s, t = DEAF.find_leaving_edge(Wn, We) + DEAF.augment_flow(Wn, We, DEAF.residual_capacity(j, s)) + # Do nothing more if the entering edge is the same as the leaving edge. + if i != j: + if DEAF.parent[t] != s: + # Ensure that s is the parent of t. + s, t = t, s + if We.index(i) > We.index(j): + # Ensure that q is in the subtree rooted at t. + p, q = q, p + DEAF.remove_edge(s, t) + DEAF.make_root(q) + DEAF.add_edge(i, p, q) + DEAF.update_potentials(i, p, q) + + ########################################################################### + # Infeasibility and unboundedness detection + ########################################################################### + + if any(DEAF.edge_flow[i] != 0 for i in range(-n, 0)): + raise nx.NetworkXUnfeasible("no flow satisfies all node demands") + + if any(DEAF.edge_flow[i] * 2 >= faux_inf for i in range(DEAF.edge_count)) or any( + e[-1].get(capacity, inf) == inf and e[-1].get(weight, 0) < 0 + for e in nx.selfloop_edges(G, data=True) + ): + raise nx.NetworkXUnbounded("negative cycle with infinite capacity found") + + ########################################################################### + # Flow cost calculation and flow dict construction + ########################################################################### + + del DEAF.edge_flow[DEAF.edge_count :] + flow_cost = sum(w * x for w, x in zip(DEAF.edge_weights, DEAF.edge_flow)) + flow_dict = {n: {} for n in DEAF.node_list} + + def add_entry(e): + """Add a flow dict entry.""" + d = flow_dict[e[0]] + for k in e[1:-2]: + try: + d = d[k] + except KeyError: + t = {} + d[k] = t + d = t + d[e[-2]] = e[-1] + + DEAF.edge_sources = ( + DEAF.node_list[s] for s in DEAF.edge_sources + ) # Use original nodes. + DEAF.edge_targets = ( + DEAF.node_list[t] for t in DEAF.edge_targets + ) # Use original nodes. + if not multigraph: + for e in zip(DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_flow): + add_entry(e) + edges = G.edges(data=True) + else: + for e in zip( + DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_keys, DEAF.edge_flow + ): + add_entry(e) + edges = G.edges(data=True, keys=True) + for e in edges: + if e[0] != e[1]: + if e[-1].get(capacity, inf) == 0: + add_entry(e[:-1] + (0,)) + else: + w = e[-1].get(weight, 0) + if w >= 0: + add_entry(e[:-1] + (0,)) + else: + c = e[-1][capacity] + flow_cost += w * c + add_entry(e[:-1] + (c,)) + + return flow_cost, flow_dict diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py new file mode 100644 index 0000000000000000000000000000000000000000..9f1193f1cbfbe188ebf05105a2c6f1802baca6f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py @@ -0,0 +1,300 @@ +""" +Shortest augmenting path algorithm for maximum flow problems. +""" + +from collections import deque + +import networkx as nx + +from .edmondskarp import edmonds_karp_core +from .utils import CurrentEdge, build_residual_network + +__all__ = ["shortest_augmenting_path"] + + +def shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff): + """Implementation of the shortest augmenting path algorithm.""" + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + R_nodes = R.nodes + R_pred = R.pred + R_succ = R.succ + + # Initialize/reset the residual network. + for u in R: + for e in R_succ[u].values(): + e["flow"] = 0 + + # Initialize heights of the nodes. + heights = {t: 0} + q = deque([(t, 0)]) + while q: + u, height = q.popleft() + height += 1 + for v, attr in R_pred[u].items(): + if v not in heights and attr["flow"] < attr["capacity"]: + heights[v] = height + q.append((v, height)) + + if s not in heights: + # t is not reachable from s in the residual network. The maximum flow + # must be zero. + R.graph["flow_value"] = 0 + return R + + n = len(G) + m = R.size() / 2 + + # Initialize heights and 'current edge' data structures of the nodes. + for u in R: + R_nodes[u]["height"] = heights[u] if u in heights else n + R_nodes[u]["curr_edge"] = CurrentEdge(R_succ[u]) + + # Initialize counts of nodes in each level. + counts = [0] * (2 * n - 1) + for u in R: + counts[R_nodes[u]["height"]] += 1 + + inf = R.graph["inf"] + + def augment(path): + """Augment flow along a path from s to t.""" + # Determine the path residual capacity. + flow = inf + it = iter(path) + u = next(it) + for v in it: + attr = R_succ[u][v] + flow = min(flow, attr["capacity"] - attr["flow"]) + u = v + if flow * 2 > inf: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + # Augment flow along the path. + it = iter(path) + u = next(it) + for v in it: + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + u = v + return flow + + def relabel(u): + """Relabel a node to create an admissible edge.""" + height = n - 1 + for v, attr in R_succ[u].items(): + if attr["flow"] < attr["capacity"]: + height = min(height, R_nodes[v]["height"]) + return height + 1 + + if cutoff is None: + cutoff = float("inf") + + # Phase 1: Look for shortest augmenting paths using depth-first search. + + flow_value = 0 + path = [s] + u = s + d = n if not two_phase else int(min(m**0.5, 2 * n ** (2.0 / 3))) + done = R_nodes[s]["height"] >= d + while not done: + height = R_nodes[u]["height"] + curr_edge = R_nodes[u]["curr_edge"] + # Depth-first search for the next node on the path to t. + while True: + v, attr = curr_edge.get() + if height == R_nodes[v]["height"] + 1 and attr["flow"] < attr["capacity"]: + # Advance to the next node following an admissible edge. + path.append(v) + u = v + break + try: + curr_edge.move_to_next() + except StopIteration: + counts[height] -= 1 + if counts[height] == 0: + # Gap heuristic: If relabeling causes a level to become + # empty, a minimum cut has been identified. The algorithm + # can now be terminated. + R.graph["flow_value"] = flow_value + return R + height = relabel(u) + if u == s and height >= d: + if not two_phase: + # t is disconnected from s in the residual network. No + # more augmenting paths exist. + R.graph["flow_value"] = flow_value + return R + else: + # t is at least d steps away from s. End of phase 1. + done = True + break + counts[height] += 1 + R_nodes[u]["height"] = height + if u != s: + # After relabeling, the last edge on the path is no longer + # admissible. Retreat one step to look for an alternative. + path.pop() + u = path[-1] + break + if u == t: + # t is reached. Augment flow along the path and reset it for a new + # depth-first search. + flow_value += augment(path) + if flow_value >= cutoff: + R.graph["flow_value"] = flow_value + return R + path = [s] + u = s + + # Phase 2: Look for shortest augmenting paths using breadth-first search. + flow_value += edmonds_karp_core(R, s, t, cutoff - flow_value) + + R.graph["flow_value"] = flow_value + return R + + +@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True) +def shortest_augmenting_path( + G, + s, + t, + capacity="capacity", + residual=None, + value_only=False, + two_phase=False, + cutoff=None, +): + r"""Find a maximum single-commodity flow using the shortest augmenting path + algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n^2 m)$ for $n$ nodes and $m$ + edges. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + two_phase : bool + If True, a two-phase variant is used. The two-phase variant improves + the running time on unit-capacity networks from $O(nm)$ to + $O(\min(n^{2/3}, m^{1/2}) m)$. Default value: False. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`edmonds_karp` + :meth:`preflow_push` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import shortest_augmenting_path + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = shortest_augmenting_path(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + """ + R = shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff) + R.graph["algorithm"] = "shortest_augmenting_path" + nx._clear_cache(R) + return R diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__init__.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fe0fd5f1b48c4e8bd71da083882811d0f3149e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20c05a9fb865b60673aa2ef78312ef3771861000 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61c64a057d6d57a711125fdf15a5871040fb8074 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..894f293d0dbba5edc3af4fb2fe79d5a93beb0bf7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03c79ca3e9367ea6ac8fa57e486354e5ad25bc33 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f77b37898363e9fe86510b40ef92ddc35db8481d Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py new file mode 100644 index 0000000000000000000000000000000000000000..1649ec82c719226e9caa68268d8953f7cae6ef74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py @@ -0,0 +1,128 @@ +from itertools import combinations + +import pytest + +import networkx as nx +from networkx.algorithms.flow import ( + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +flow_funcs = [ + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +] + + +class TestGomoryHuTree: + def minimum_edge_weight(self, T, u, v): + path = nx.shortest_path(T, u, v, weight="weight") + return min((T[u][v]["weight"], (u, v)) for (u, v) in zip(path, path[1:])) + + def compute_cutset(self, G, T_orig, edge): + T = T_orig.copy() + T.remove_edge(*edge) + U, V = list(nx.connected_components(T)) + cutset = set() + for x, nbrs in ((n, G[n]) for n in U): + cutset.update((x, y) for y in nbrs if y in V) + return cutset + + def test_default_flow_function_karate_club_graph(self): + G = nx.karate_club_graph() + nx.set_edge_attributes(G, 1, "capacity") + T = nx.gomory_hu_tree(G) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_karate_club_graph(self): + G = nx.karate_club_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_davis_southern_women_graph(self): + G = nx.davis_southern_women_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_florentine_families_graph(self): + G = nx.florentine_families_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + @pytest.mark.slow + def test_les_miserables_graph_cutset(self): + G = nx.les_miserables_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_karate_club_graph_cutset(self): + G = nx.karate_club_graph() + nx.set_edge_attributes(G, 1, "capacity") + T = nx.gomory_hu_tree(G) + assert nx.is_tree(T) + u, v = 0, 33 + cut_value, edge = self.minimum_edge_weight(T, u, v) + cutset = self.compute_cutset(G, T, edge) + assert cut_value == len(cutset) + + def test_wikipedia_example(self): + # Example from https://en.wikipedia.org/wiki/Gomory%E2%80%93Hu_tree + G = nx.Graph() + G.add_weighted_edges_from( + ( + (0, 1, 1), + (0, 2, 7), + (1, 2, 1), + (1, 3, 3), + (1, 4, 2), + (2, 4, 4), + (3, 4, 1), + (3, 5, 6), + (4, 5, 2), + ) + ) + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, capacity="weight", flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v, capacity="weight") == cut_value + + def test_directed_raises(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + T = nx.gomory_hu_tree(G) + + def test_empty_raises(self): + with pytest.raises(nx.NetworkXError): + G = nx.empty_graph() + T = nx.gomory_hu_tree(G) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_maxflow.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_maxflow.py new file mode 100644 index 0000000000000000000000000000000000000000..c31d8422843cdc8407032220ab9a76de52661a6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_maxflow.py @@ -0,0 +1,560 @@ +"""Maximum flow algorithms test suite. +""" +import pytest + +import networkx as nx +from networkx.algorithms.flow import ( + boykov_kolmogorov, + build_flow_dict, + build_residual_network, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +flow_funcs = { + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +} + +max_min_funcs = {nx.maximum_flow, nx.minimum_cut} +flow_value_funcs = {nx.maximum_flow_value, nx.minimum_cut_value} +interface_funcs = max_min_funcs & flow_value_funcs +all_funcs = flow_funcs & interface_funcs + + +def compute_cutset(G, partition): + reachable, non_reachable = partition + cutset = set() + for u, nbrs in ((n, G[n]) for n in reachable): + cutset.update((u, v) for v in nbrs if v in non_reachable) + return cutset + + +def validate_flows(G, s, t, flowDict, solnValue, capacity, flow_func): + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert set(G) == set(flowDict), errmsg + for u in G: + assert set(G[u]) == set(flowDict[u]), errmsg + excess = {u: 0 for u in flowDict} + for u in flowDict: + for v, flow in flowDict[u].items(): + if capacity in G[u][v]: + assert flow <= G[u][v][capacity] + assert flow >= 0, errmsg + excess[u] -= flow + excess[v] += flow + for u, exc in excess.items(): + if u == s: + assert exc == -solnValue, errmsg + elif u == t: + assert exc == solnValue, errmsg + else: + assert exc == 0, errmsg + + +def validate_cuts(G, s, t, solnValue, partition, capacity, flow_func): + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert all(n in G for n in partition[0]), errmsg + assert all(n in G for n in partition[1]), errmsg + cutset = compute_cutset(G, partition) + assert all(G.has_edge(u, v) for (u, v) in cutset), errmsg + assert solnValue == sum(G[u][v][capacity] for (u, v) in cutset), errmsg + H = G.copy() + H.remove_edges_from(cutset) + if not G.is_directed(): + assert not nx.is_connected(H), errmsg + else: + assert not nx.is_strongly_connected(H), errmsg + + +def compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity="capacity"): + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + R = flow_func(G, s, t, capacity) + # Test both legacy and new implementations. + flow_value = R.graph["flow_value"] + flow_dict = build_flow_dict(G, R) + assert flow_value == solnValue, errmsg + validate_flows(G, s, t, flow_dict, solnValue, capacity, flow_func) + # Minimum cut + cut_value, partition = nx.minimum_cut( + G, s, t, capacity=capacity, flow_func=flow_func + ) + validate_cuts(G, s, t, solnValue, partition, capacity, flow_func) + + +class TestMaxflowMinCutCommon: + def test_graph1(self): + # Trivial undirected graph + G = nx.Graph() + G.add_edge(1, 2, capacity=1.0) + + solnFlows = {1: {2: 1.0}, 2: {1: 1.0}} + + compare_flows_and_cuts(G, 1, 2, solnFlows, 1.0) + + def test_graph2(self): + # A more complex undirected graph + # adapted from https://web.archive.org/web/20220815055650/https://www.topcoder.com/thrive/articles/Maximum%20Flow:%20Part%20One + G = nx.Graph() + G.add_edge("x", "a", capacity=3.0) + G.add_edge("x", "b", capacity=1.0) + G.add_edge("a", "c", capacity=3.0) + G.add_edge("b", "c", capacity=5.0) + G.add_edge("b", "d", capacity=4.0) + G.add_edge("d", "e", capacity=2.0) + G.add_edge("c", "y", capacity=2.0) + G.add_edge("e", "y", capacity=3.0) + + H = { + "x": {"a": 3, "b": 1}, + "a": {"c": 3, "x": 3}, + "b": {"c": 1, "d": 2, "x": 1}, + "c": {"a": 3, "b": 1, "y": 2}, + "d": {"b": 2, "e": 2}, + "e": {"d": 2, "y": 2}, + "y": {"c": 2, "e": 2}, + } + + compare_flows_and_cuts(G, "x", "y", H, 4.0) + + def test_digraph1(self): + # The classic directed graph example + G = nx.DiGraph() + G.add_edge("a", "b", capacity=1000.0) + G.add_edge("a", "c", capacity=1000.0) + G.add_edge("b", "c", capacity=1.0) + G.add_edge("b", "d", capacity=1000.0) + G.add_edge("c", "d", capacity=1000.0) + + H = { + "a": {"b": 1000.0, "c": 1000.0}, + "b": {"c": 0, "d": 1000.0}, + "c": {"d": 1000.0}, + "d": {}, + } + + compare_flows_and_cuts(G, "a", "d", H, 2000.0) + + def test_digraph2(self): + # An example in which some edges end up with zero flow. + G = nx.DiGraph() + G.add_edge("s", "b", capacity=2) + G.add_edge("s", "c", capacity=1) + G.add_edge("c", "d", capacity=1) + G.add_edge("d", "a", capacity=1) + G.add_edge("b", "a", capacity=2) + G.add_edge("a", "t", capacity=2) + + H = { + "s": {"b": 2, "c": 0}, + "c": {"d": 0}, + "d": {"a": 0}, + "b": {"a": 2}, + "a": {"t": 2}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", H, 2) + + def test_digraph3(self): + # A directed graph example from Cormen et al. + G = nx.DiGraph() + G.add_edge("s", "v1", capacity=16.0) + G.add_edge("s", "v2", capacity=13.0) + G.add_edge("v1", "v2", capacity=10.0) + G.add_edge("v2", "v1", capacity=4.0) + G.add_edge("v1", "v3", capacity=12.0) + G.add_edge("v3", "v2", capacity=9.0) + G.add_edge("v2", "v4", capacity=14.0) + G.add_edge("v4", "v3", capacity=7.0) + G.add_edge("v3", "t", capacity=20.0) + G.add_edge("v4", "t", capacity=4.0) + + H = { + "s": {"v1": 12.0, "v2": 11.0}, + "v2": {"v1": 0, "v4": 11.0}, + "v1": {"v2": 0, "v3": 12.0}, + "v3": {"v2": 0, "t": 19.0}, + "v4": {"v3": 7.0, "t": 4.0}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", H, 23.0) + + def test_digraph4(self): + # A more complex directed graph + # from https://web.archive.org/web/20220815055650/https://www.topcoder.com/thrive/articles/Maximum%20Flow:%20Part%20One + G = nx.DiGraph() + G.add_edge("x", "a", capacity=3.0) + G.add_edge("x", "b", capacity=1.0) + G.add_edge("a", "c", capacity=3.0) + G.add_edge("b", "c", capacity=5.0) + G.add_edge("b", "d", capacity=4.0) + G.add_edge("d", "e", capacity=2.0) + G.add_edge("c", "y", capacity=2.0) + G.add_edge("e", "y", capacity=3.0) + + H = { + "x": {"a": 2.0, "b": 1.0}, + "a": {"c": 2.0}, + "b": {"c": 0, "d": 1.0}, + "c": {"y": 2.0}, + "d": {"e": 1.0}, + "e": {"y": 1.0}, + "y": {}, + } + + compare_flows_and_cuts(G, "x", "y", H, 3.0) + + def test_wikipedia_dinitz_example(self): + # Nice example from https://en.wikipedia.org/wiki/Dinic's_algorithm + G = nx.DiGraph() + G.add_edge("s", 1, capacity=10) + G.add_edge("s", 2, capacity=10) + G.add_edge(1, 3, capacity=4) + G.add_edge(1, 4, capacity=8) + G.add_edge(1, 2, capacity=2) + G.add_edge(2, 4, capacity=9) + G.add_edge(3, "t", capacity=10) + G.add_edge(4, 3, capacity=6) + G.add_edge(4, "t", capacity=10) + + solnFlows = { + 1: {2: 0, 3: 4, 4: 6}, + 2: {4: 9}, + 3: {"t": 9}, + 4: {3: 5, "t": 10}, + "s": {1: 10, 2: 9}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", solnFlows, 19) + + def test_optional_capacity(self): + # Test optional capacity parameter. + G = nx.DiGraph() + G.add_edge("x", "a", spam=3.0) + G.add_edge("x", "b", spam=1.0) + G.add_edge("a", "c", spam=3.0) + G.add_edge("b", "c", spam=5.0) + G.add_edge("b", "d", spam=4.0) + G.add_edge("d", "e", spam=2.0) + G.add_edge("c", "y", spam=2.0) + G.add_edge("e", "y", spam=3.0) + + solnFlows = { + "x": {"a": 2.0, "b": 1.0}, + "a": {"c": 2.0}, + "b": {"c": 0, "d": 1.0}, + "c": {"y": 2.0}, + "d": {"e": 1.0}, + "e": {"y": 1.0}, + "y": {}, + } + solnValue = 3.0 + s = "x" + t = "y" + + compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity="spam") + + def test_digraph_infcap_edges(self): + # DiGraph with infinite capacity edges + G = nx.DiGraph() + G.add_edge("s", "a") + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c", capacity=25) + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t") + + H = { + "s": {"a": 85, "b": 12}, + "a": {"c": 25, "t": 60}, + "b": {"c": 12}, + "c": {"t": 37}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", H, 97) + + # DiGraph with infinite capacity digon + G = nx.DiGraph() + G.add_edge("s", "a", capacity=85) + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c") + G.add_edge("c", "a") + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t", capacity=37) + + H = { + "s": {"a": 85, "b": 12}, + "a": {"c": 25, "t": 60}, + "c": {"a": 0, "t": 37}, + "b": {"c": 12}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", H, 97) + + def test_digraph_infcap_path(self): + # Graph with infinite capacity (s, t)-path + G = nx.DiGraph() + G.add_edge("s", "a") + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c") + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t") + + for flow_func in all_funcs: + pytest.raises(nx.NetworkXUnbounded, flow_func, G, "s", "t") + + def test_graph_infcap_edges(self): + # Undirected graph with infinite capacity edges + G = nx.Graph() + G.add_edge("s", "a") + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c", capacity=25) + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t") + + H = { + "s": {"a": 85, "b": 12}, + "a": {"c": 25, "s": 85, "t": 60}, + "b": {"c": 12, "s": 12}, + "c": {"a": 25, "b": 12, "t": 37}, + "t": {"a": 60, "c": 37}, + } + + compare_flows_and_cuts(G, "s", "t", H, 97) + + def test_digraph5(self): + # From ticket #429 by mfrasca. + G = nx.DiGraph() + G.add_edge("s", "a", capacity=2) + G.add_edge("s", "b", capacity=2) + G.add_edge("a", "b", capacity=5) + G.add_edge("a", "t", capacity=1) + G.add_edge("b", "a", capacity=1) + G.add_edge("b", "t", capacity=3) + flowSoln = { + "a": {"b": 1, "t": 1}, + "b": {"a": 0, "t": 3}, + "s": {"a": 2, "b": 2}, + "t": {}, + } + compare_flows_and_cuts(G, "s", "t", flowSoln, 4) + + def test_disconnected(self): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + G.remove_node(1) + assert nx.maximum_flow_value(G, 0, 3) == 0 + flowSoln = {0: {}, 2: {3: 0}, 3: {2: 0}} + compare_flows_and_cuts(G, 0, 3, flowSoln, 0) + + def test_source_target_not_in_graph(self): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + G.remove_node(0) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 3) + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + G.remove_node(3) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 3) + + def test_source_target_coincide(self): + G = nx.Graph() + G.add_node(0) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 0) + + def test_multigraphs_raise(self): + G = nx.MultiGraph() + M = nx.MultiDiGraph() + G.add_edges_from([(0, 1), (1, 0)], capacity=True) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 0) + + +class TestMaxFlowMinCutInterface: + def setup_method(self): + G = nx.DiGraph() + G.add_edge("x", "a", capacity=3.0) + G.add_edge("x", "b", capacity=1.0) + G.add_edge("a", "c", capacity=3.0) + G.add_edge("b", "c", capacity=5.0) + G.add_edge("b", "d", capacity=4.0) + G.add_edge("d", "e", capacity=2.0) + G.add_edge("c", "y", capacity=2.0) + G.add_edge("e", "y", capacity=3.0) + self.G = G + H = nx.DiGraph() + H.add_edge(0, 1, capacity=1.0) + H.add_edge(1, 2, capacity=1.0) + self.H = H + + def test_flow_func_not_callable(self): + elements = ["this_should_be_callable", 10, {1, 2, 3}] + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + for flow_func in interface_funcs: + for element in elements: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element) + pytest.raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element) + + def test_flow_func_parameters(self): + G = self.G + fv = 3.0 + for interface_func in interface_funcs: + for flow_func in flow_funcs: + errmsg = ( + f"Assertion failed in function: {flow_func.__name__} " + f"in interface {interface_func.__name__}" + ) + result = interface_func(G, "x", "y", flow_func=flow_func) + if interface_func in max_min_funcs: + result = result[0] + assert fv == result, errmsg + + def test_minimum_cut_no_cutoff(self): + G = self.G + pytest.raises( + nx.NetworkXError, + nx.minimum_cut, + G, + "x", + "y", + flow_func=preflow_push, + cutoff=1.0, + ) + pytest.raises( + nx.NetworkXError, + nx.minimum_cut_value, + G, + "x", + "y", + flow_func=preflow_push, + cutoff=1.0, + ) + + def test_kwargs(self): + G = self.H + fv = 1.0 + to_test = ( + (shortest_augmenting_path, {"two_phase": True}), + (preflow_push, {"global_relabel_freq": 5}), + ) + for interface_func in interface_funcs: + for flow_func, kwargs in to_test: + errmsg = ( + f"Assertion failed in function: {flow_func.__name__} " + f"in interface {interface_func.__name__}" + ) + result = interface_func(G, 0, 2, flow_func=flow_func, **kwargs) + if interface_func in max_min_funcs: + result = result[0] + assert fv == result, errmsg + + def test_kwargs_default_flow_func(self): + G = self.H + for interface_func in interface_funcs: + pytest.raises( + nx.NetworkXError, interface_func, G, 0, 1, global_relabel_freq=2 + ) + + def test_reusing_residual(self): + G = self.G + fv = 3.0 + s, t = "x", "y" + R = build_residual_network(G, "capacity") + for interface_func in interface_funcs: + for flow_func in flow_funcs: + errmsg = ( + f"Assertion failed in function: {flow_func.__name__} " + f"in interface {interface_func.__name__}" + ) + for i in range(3): + result = interface_func( + G, "x", "y", flow_func=flow_func, residual=R + ) + if interface_func in max_min_funcs: + result = result[0] + assert fv == result, errmsg + + +# Tests specific to one algorithm +def test_preflow_push_global_relabel_freq(): + G = nx.DiGraph() + G.add_edge(1, 2, capacity=1) + R = preflow_push(G, 1, 2, global_relabel_freq=None) + assert R.graph["flow_value"] == 1 + pytest.raises(nx.NetworkXError, preflow_push, G, 1, 2, global_relabel_freq=-1) + + +def test_preflow_push_makes_enough_space(): + # From ticket #1542 + G = nx.DiGraph() + nx.add_path(G, [0, 1, 3], capacity=1) + nx.add_path(G, [1, 2, 3], capacity=1) + R = preflow_push(G, 0, 3, value_only=False) + assert R.graph["flow_value"] == 1 + + +def test_shortest_augmenting_path_two_phase(): + k = 5 + p = 1000 + G = nx.DiGraph() + for i in range(k): + G.add_edge("s", (i, 0), capacity=1) + nx.add_path(G, ((i, j) for j in range(p)), capacity=1) + G.add_edge((i, p - 1), "t", capacity=1) + R = shortest_augmenting_path(G, "s", "t", two_phase=True) + assert R.graph["flow_value"] == k + R = shortest_augmenting_path(G, "s", "t", two_phase=False) + assert R.graph["flow_value"] == k + + +class TestCutoff: + def test_cutoff(self): + k = 5 + p = 1000 + G = nx.DiGraph() + for i in range(k): + G.add_edge("s", (i, 0), capacity=2) + nx.add_path(G, ((i, j) for j in range(p)), capacity=2) + G.add_edge((i, p - 1), "t", capacity=2) + R = shortest_augmenting_path(G, "s", "t", two_phase=True, cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = shortest_augmenting_path(G, "s", "t", two_phase=False, cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = edmonds_karp(G, "s", "t", cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = dinitz(G, "s", "t", cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = boykov_kolmogorov(G, "s", "t", cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + + def test_complete_graph_cutoff(self): + G = nx.complete_graph(5) + nx.set_edge_attributes(G, {(u, v): 1 for u, v in G.edges()}, "capacity") + for flow_func in [ + shortest_augmenting_path, + edmonds_karp, + dinitz, + boykov_kolmogorov, + ]: + for cutoff in [3, 2, 1]: + result = nx.maximum_flow_value( + G, 0, 4, flow_func=flow_func, cutoff=cutoff + ) + assert cutoff == result, f"cutoff error in {flow_func.__name__}" diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..7b4859c8a8b325c6d2e3ab06430de4afbc007803 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py @@ -0,0 +1,157 @@ +"""Maximum flow algorithms test suite on large graphs. +""" + +import bz2 +import importlib.resources +import os +import pickle + +import pytest + +import networkx as nx +from networkx.algorithms.flow import ( + boykov_kolmogorov, + build_flow_dict, + build_residual_network, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +flow_funcs = [ + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +] + + +def gen_pyramid(N): + # This graph admits a flow of value 1 for which every arc is at + # capacity (except the arcs incident to the sink which have + # infinite capacity). + G = nx.DiGraph() + + for i in range(N - 1): + cap = 1.0 / (i + 2) + for j in range(i + 1): + G.add_edge((i, j), (i + 1, j), capacity=cap) + cap = 1.0 / (i + 1) - cap + G.add_edge((i, j), (i + 1, j + 1), capacity=cap) + cap = 1.0 / (i + 2) - cap + + for j in range(N): + G.add_edge((N - 1, j), "t") + + return G + + +def read_graph(name): + fname = ( + importlib.resources.files("networkx.algorithms.flow.tests") + / f"{name}.gpickle.bz2" + ) + + with bz2.BZ2File(fname, "rb") as f: + G = pickle.load(f) + return G + + +def validate_flows(G, s, t, soln_value, R, flow_func): + flow_value = R.graph["flow_value"] + flow_dict = build_flow_dict(G, R) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert soln_value == flow_value, errmsg + assert set(G) == set(flow_dict), errmsg + for u in G: + assert set(G[u]) == set(flow_dict[u]), errmsg + excess = {u: 0 for u in flow_dict} + for u in flow_dict: + for v, flow in flow_dict[u].items(): + assert flow <= G[u][v].get("capacity", float("inf")), errmsg + assert flow >= 0, errmsg + excess[u] -= flow + excess[v] += flow + for u, exc in excess.items(): + if u == s: + assert exc == -soln_value, errmsg + elif u == t: + assert exc == soln_value, errmsg + else: + assert exc == 0, errmsg + + +class TestMaxflowLargeGraph: + def test_complete_graph(self): + N = 50 + G = nx.complete_graph(N) + nx.set_edge_attributes(G, 5, "capacity") + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + for flow_func in flow_funcs: + kwargs["flow_func"] = flow_func + errmsg = f"Assertion failed in function: {flow_func.__name__}" + flow_value = nx.maximum_flow_value(G, 1, 2, **kwargs) + assert flow_value == 5 * (N - 1), errmsg + + def test_pyramid(self): + N = 10 + # N = 100 # this gives a graph with 5051 nodes + G = gen_pyramid(N) + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + for flow_func in flow_funcs: + kwargs["flow_func"] = flow_func + errmsg = f"Assertion failed in function: {flow_func.__name__}" + flow_value = nx.maximum_flow_value(G, (0, 0), "t", **kwargs) + assert flow_value == pytest.approx(1.0, abs=1e-7) + + def test_gl1(self): + G = read_graph("gl1") + s = 1 + t = len(G) + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + # do one flow_func to save time + flow_func = flow_funcs[0] + validate_flows(G, s, t, 156545, flow_func(G, s, t, **kwargs), flow_func) + + # for flow_func in flow_funcs: + # validate_flows(G, s, t, 156545, flow_func(G, s, t, **kwargs), + # flow_func) + + @pytest.mark.slow + def test_gw1(self): + G = read_graph("gw1") + s = 1 + t = len(G) + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + for flow_func in flow_funcs: + validate_flows(G, s, t, 1202018, flow_func(G, s, t, **kwargs), flow_func) + + def test_wlm3(self): + G = read_graph("wlm3") + s = 1 + t = len(G) + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + # do one flow_func to save time + flow_func = flow_funcs[0] + validate_flows(G, s, t, 11875108, flow_func(G, s, t, **kwargs), flow_func) + + # for flow_func in flow_funcs: + # validate_flows(G, s, t, 11875108, flow_func(G, s, t, **kwargs), + # flow_func) + + def test_preflow_push_global_relabel(self): + G = read_graph("gw1") + R = preflow_push(G, 1, len(G), global_relabel_freq=50) + assert R.graph["flow_value"] == 1202018 diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_mincost.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_mincost.py new file mode 100644 index 0000000000000000000000000000000000000000..5b1794b152f8d5523f660d3d7bd15f841e5470f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_mincost.py @@ -0,0 +1,476 @@ +import bz2 +import importlib.resources +import os +import pickle + +import pytest + +import networkx as nx + + +class TestMinCostFlow: + def test_simple_digraph(self): + G = nx.DiGraph() + G.add_node("a", demand=-5) + G.add_node("d", demand=5) + G.add_edge("a", "b", weight=3, capacity=4) + G.add_edge("a", "c", weight=6, capacity=10) + G.add_edge("b", "d", weight=1, capacity=9) + G.add_edge("c", "d", weight=2, capacity=5) + flowCost, H = nx.network_simplex(G) + soln = {"a": {"b": 4, "c": 1}, "b": {"d": 4}, "c": {"d": 1}, "d": {}} + assert flowCost == 24 + assert nx.min_cost_flow_cost(G) == 24 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 24 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 24 + assert nx.cost_of_flow(G, H) == 24 + assert H == soln + + def test_negcycle_infcap(self): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("c", "a", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("d", "c", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + + def test_sum_demands_not_zero(self): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=4) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("a", "c", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("c", "d", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + + def test_no_flow_satisfying_demands(self): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("a", "c", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("c", "d", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + + def test_transshipment(self): + G = nx.DiGraph() + G.add_node("a", demand=1) + G.add_node("b", demand=-2) + G.add_node("c", demand=-2) + G.add_node("d", demand=3) + G.add_node("e", demand=-4) + G.add_node("f", demand=-4) + G.add_node("g", demand=3) + G.add_node("h", demand=2) + G.add_node("r", demand=3) + G.add_edge("a", "c", weight=3) + G.add_edge("r", "a", weight=2) + G.add_edge("b", "a", weight=9) + G.add_edge("r", "c", weight=0) + G.add_edge("b", "r", weight=-6) + G.add_edge("c", "d", weight=5) + G.add_edge("e", "r", weight=4) + G.add_edge("e", "f", weight=3) + G.add_edge("h", "b", weight=4) + G.add_edge("f", "d", weight=7) + G.add_edge("f", "h", weight=12) + G.add_edge("g", "d", weight=12) + G.add_edge("f", "g", weight=-1) + G.add_edge("h", "g", weight=-10) + flowCost, H = nx.network_simplex(G) + soln = { + "a": {"c": 0}, + "b": {"a": 0, "r": 2}, + "c": {"d": 3}, + "d": {}, + "e": {"r": 3, "f": 1}, + "f": {"d": 0, "g": 3, "h": 2}, + "g": {"d": 0}, + "h": {"b": 0, "g": 0}, + "r": {"a": 1, "c": 1}, + } + assert flowCost == 41 + assert nx.min_cost_flow_cost(G) == 41 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 41 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 41 + assert nx.cost_of_flow(G, H) == 41 + assert H == soln + + def test_max_flow_min_cost(self): + G = nx.DiGraph() + G.add_edge("s", "a", bandwidth=6) + G.add_edge("s", "c", bandwidth=10, cost=10) + G.add_edge("a", "b", cost=6) + G.add_edge("b", "d", bandwidth=8, cost=7) + G.add_edge("c", "d", cost=10) + G.add_edge("d", "t", bandwidth=5, cost=5) + soln = { + "s": {"a": 5, "c": 0}, + "a": {"b": 5}, + "b": {"d": 5}, + "c": {"d": 0}, + "d": {"t": 5}, + "t": {}, + } + flow = nx.max_flow_min_cost(G, "s", "t", capacity="bandwidth", weight="cost") + assert flow == soln + assert nx.cost_of_flow(G, flow, weight="cost") == 90 + + G.add_edge("t", "s", cost=-100) + flowCost, flow = nx.capacity_scaling(G, capacity="bandwidth", weight="cost") + G.remove_edge("t", "s") + assert flowCost == -410 + assert flow["t"]["s"] == 5 + del flow["t"]["s"] + assert flow == soln + assert nx.cost_of_flow(G, flow, weight="cost") == 90 + + def test_digraph1(self): + # From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied + # Mathematical Programming. Addison-Wesley, 1977. + G = nx.DiGraph() + G.add_node(1, demand=-20) + G.add_node(4, demand=5) + G.add_node(5, demand=15) + G.add_edges_from( + [ + (1, 2, {"capacity": 15, "weight": 4}), + (1, 3, {"capacity": 8, "weight": 4}), + (2, 3, {"weight": 2}), + (2, 4, {"capacity": 4, "weight": 2}), + (2, 5, {"capacity": 10, "weight": 6}), + (3, 4, {"capacity": 15, "weight": 1}), + (3, 5, {"capacity": 5, "weight": 3}), + (4, 5, {"weight": 2}), + (5, 3, {"capacity": 4, "weight": 1}), + ] + ) + flowCost, H = nx.network_simplex(G) + soln = { + 1: {2: 12, 3: 8}, + 2: {3: 8, 4: 4, 5: 0}, + 3: {4: 11, 5: 5}, + 4: {5: 10}, + 5: {3: 0}, + } + assert flowCost == 150 + assert nx.min_cost_flow_cost(G) == 150 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 150 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 150 + assert H == soln + assert nx.cost_of_flow(G, H) == 150 + + def test_digraph2(self): + # Example from ticket #430 from mfrasca. Original source: + # http://www.cs.princeton.edu/courses/archive/spr03/cs226/lectures/mincost.4up.pdf, slide 11. + G = nx.DiGraph() + G.add_edge("s", 1, capacity=12) + G.add_edge("s", 2, capacity=6) + G.add_edge("s", 3, capacity=14) + G.add_edge(1, 2, capacity=11, weight=4) + G.add_edge(2, 3, capacity=9, weight=6) + G.add_edge(1, 4, capacity=5, weight=5) + G.add_edge(1, 5, capacity=2, weight=12) + G.add_edge(2, 5, capacity=4, weight=4) + G.add_edge(2, 6, capacity=2, weight=6) + G.add_edge(3, 6, capacity=31, weight=3) + G.add_edge(4, 5, capacity=18, weight=4) + G.add_edge(5, 6, capacity=9, weight=5) + G.add_edge(4, "t", capacity=3) + G.add_edge(5, "t", capacity=7) + G.add_edge(6, "t", capacity=22) + flow = nx.max_flow_min_cost(G, "s", "t") + soln = { + 1: {2: 6, 4: 5, 5: 1}, + 2: {3: 6, 5: 4, 6: 2}, + 3: {6: 20}, + 4: {5: 2, "t": 3}, + 5: {6: 0, "t": 7}, + 6: {"t": 22}, + "s": {1: 12, 2: 6, 3: 14}, + "t": {}, + } + assert flow == soln + + G.add_edge("t", "s", weight=-100) + flowCost, flow = nx.capacity_scaling(G) + G.remove_edge("t", "s") + assert flow["t"]["s"] == 32 + assert flowCost == -3007 + del flow["t"]["s"] + assert flow == soln + assert nx.cost_of_flow(G, flow) == 193 + + def test_digraph3(self): + """Combinatorial Optimization: Algorithms and Complexity, + Papadimitriou Steiglitz at page 140 has an example, 7.1, but that + admits multiple solutions, so I alter it a bit. From ticket #430 + by mfrasca.""" + + G = nx.DiGraph() + G.add_edge("s", "a") + G["s"]["a"].update({0: 2, 1: 4}) + G.add_edge("s", "b") + G["s"]["b"].update({0: 2, 1: 1}) + G.add_edge("a", "b") + G["a"]["b"].update({0: 5, 1: 2}) + G.add_edge("a", "t") + G["a"]["t"].update({0: 1, 1: 5}) + G.add_edge("b", "a") + G["b"]["a"].update({0: 1, 1: 3}) + G.add_edge("b", "t") + G["b"]["t"].update({0: 3, 1: 2}) + + "PS.ex.7.1: testing main function" + sol = nx.max_flow_min_cost(G, "s", "t", capacity=0, weight=1) + flow = sum(v for v in sol["s"].values()) + assert 4 == flow + assert 23 == nx.cost_of_flow(G, sol, weight=1) + assert sol["s"] == {"a": 2, "b": 2} + assert sol["a"] == {"b": 1, "t": 1} + assert sol["b"] == {"a": 0, "t": 3} + assert sol["t"] == {} + + G.add_edge("t", "s") + G["t"]["s"].update({1: -100}) + flowCost, sol = nx.capacity_scaling(G, capacity=0, weight=1) + G.remove_edge("t", "s") + flow = sum(v for v in sol["s"].values()) + assert 4 == flow + assert sol["t"]["s"] == 4 + assert flowCost == -377 + del sol["t"]["s"] + assert sol["s"] == {"a": 2, "b": 2} + assert sol["a"] == {"b": 1, "t": 1} + assert sol["b"] == {"a": 0, "t": 3} + assert sol["t"] == {} + assert nx.cost_of_flow(G, sol, weight=1) == 23 + + def test_zero_capacity_edges(self): + """Address issue raised in ticket #617 by arv.""" + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2, {"capacity": 1, "weight": 1}), + (1, 5, {"capacity": 1, "weight": 1}), + (2, 3, {"capacity": 0, "weight": 1}), + (2, 5, {"capacity": 1, "weight": 1}), + (5, 3, {"capacity": 2, "weight": 1}), + (5, 4, {"capacity": 0, "weight": 1}), + (3, 4, {"capacity": 2, "weight": 1}), + ] + ) + G.nodes[1]["demand"] = -1 + G.nodes[2]["demand"] = -1 + G.nodes[4]["demand"] = 2 + + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0, 5: 1}, 2: {3: 0, 5: 1}, 3: {4: 2}, 4: {}, 5: {3: 2, 4: 0}} + assert flowCost == 6 + assert nx.min_cost_flow_cost(G) == 6 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 6 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 6 + assert H == soln + assert nx.cost_of_flow(G, H) == 6 + + def test_digon(self): + """Check if digons are handled properly. Taken from ticket + #618 by arv.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"capacity": 3, "weight": 600000}), + (2, 1, {"capacity": 2, "weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0}, 2: {1: 0, 3: 4}, 3: {2: 0}} + assert flowCost == 2857140 + assert nx.min_cost_flow_cost(G) == 2857140 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 2857140 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 2857140 + assert H == soln + assert nx.cost_of_flow(G, H) == 2857140 + + def test_deadend(self): + """Check if one-node cycles are handled properly. Taken from ticket + #2906 from @sshraven.""" + G = nx.DiGraph() + + G.add_nodes_from(range(5), demand=0) + G.nodes[4]["demand"] = -13 + G.nodes[3]["demand"] = 13 + + G.add_edges_from([(0, 2), (0, 3), (2, 1)], capacity=20, weight=0.1) + pytest.raises(nx.NetworkXUnfeasible, nx.min_cost_flow, G) + + def test_infinite_capacity_neg_digon(self): + """An infinite capacity negative cost digon results in an unbounded + instance.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"weight": -600}), + (2, 1, {"weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + + def test_finite_capacity_neg_digon(self): + """The digon should receive the maximum amount of flow it can handle. + Taken from ticket #749 by @chuongdo.""" + G = nx.DiGraph() + G.add_edge("a", "b", capacity=1, weight=-1) + G.add_edge("b", "a", capacity=1, weight=-1) + min_cost = -2 + assert nx.min_cost_flow_cost(G) == min_cost + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == -2 + assert H == {"a": {"b": 1}, "b": {"a": 1}} + assert nx.cost_of_flow(G, H) == -2 + + def test_multidigraph(self): + """Multidigraphs are acceptable.""" + G = nx.MultiDiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight="capacity") + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}} + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 0 + assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}} + + def test_negative_selfloops(self): + """Negative selfloops should cause an exception if uncapacitated and + always be saturated otherwise. + """ + G = nx.DiGraph() + G.add_edge(1, 1, weight=-1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + G[1][1]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: 2}} + flowCost, H = nx.capacity_scaling(G) + assert flowCost == -2 + assert H == {1: {1: 2}} + + G = nx.MultiDiGraph() + G.add_edge(1, 1, "x", weight=-1) + G.add_edge(1, 1, "y", weight=1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + G[1][1]["x"]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: {"x": 2, "y": 0}}} + flowCost, H = nx.capacity_scaling(G) + assert flowCost == -2 + assert H == {1: {1: {"x": 2, "y": 0}}} + + def test_bone_shaped(self): + # From #1283 + G = nx.DiGraph() + G.add_node(0, demand=-4) + G.add_node(1, demand=2) + G.add_node(2, demand=2) + G.add_node(3, demand=4) + G.add_node(4, demand=-2) + G.add_node(5, demand=-2) + G.add_edge(0, 1, capacity=4) + G.add_edge(0, 2, capacity=4) + G.add_edge(4, 3, capacity=4) + G.add_edge(5, 3, capacity=4) + G.add_edge(0, 3, capacity=0) + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}} + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 0 + assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}} + + def test_exceptions(self): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + pytest.raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G) + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + pytest.raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G) + G = nx.DiGraph() + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + # pytest.raises(nx.NetworkXError, nx.capacity_scaling, G) + G.add_node(0, demand=float("inf")) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + G.nodes[0]["demand"] = 0 + G.add_node(1, demand=0) + G.add_edge(0, 1, weight=-float("inf")) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + G[0][1]["weight"] = 0 + G.add_edge(0, 0, weight=float("inf")) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + # pytest.raises(nx.NetworkXError, nx.capacity_scaling, G) + G[0][0]["weight"] = 0 + G[0][1]["capacity"] = -1 + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + # pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + G[0][1]["capacity"] = 0 + G[0][0]["capacity"] = -1 + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + # pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + + def test_large(self): + fname = ( + importlib.resources.files("networkx.algorithms.flow.tests") + / "netgen-2.gpickle.bz2" + ) + with bz2.BZ2File(fname, "rb") as f: + G = pickle.load(f) + flowCost, flowDict = nx.network_simplex(G) + assert 6749969302 == flowCost + assert 6749969302 == nx.cost_of_flow(G, flowDict) + flowCost, flowDict = nx.capacity_scaling(G) + assert 6749969302 == flowCost + assert 6749969302 == nx.cost_of_flow(G, flowDict) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py new file mode 100644 index 0000000000000000000000000000000000000000..5b3b5f6dd069e38b8be536cf4037a46da2366cc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py @@ -0,0 +1,387 @@ +import bz2 +import importlib.resources +import os +import pickle + +import pytest + +import networkx as nx + + +@pytest.fixture +def simple_flow_graph(): + G = nx.DiGraph() + G.add_node("a", demand=0) + G.add_node("b", demand=-5) + G.add_node("c", demand=50000000) + G.add_node("d", demand=-49999995) + G.add_edge("a", "b", weight=3, capacity=4) + G.add_edge("a", "c", weight=6, capacity=10) + G.add_edge("b", "d", weight=1, capacity=9) + G.add_edge("c", "d", weight=2, capacity=5) + return G + + +@pytest.fixture +def simple_no_flow_graph(): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("a", "c", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("c", "d", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + return G + + +def get_flowcost_from_flowdict(G, flowDict): + """Returns flow cost calculated from flow dictionary""" + flowCost = 0 + for u in flowDict: + for v in flowDict[u]: + flowCost += flowDict[u][v] * G[u][v]["weight"] + return flowCost + + +def test_infinite_demand_raise(simple_flow_graph): + G = simple_flow_graph + inf = float("inf") + nx.set_node_attributes(G, {"a": {"demand": inf}}) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + + +def test_neg_infinite_demand_raise(simple_flow_graph): + G = simple_flow_graph + inf = float("inf") + nx.set_node_attributes(G, {"a": {"demand": -inf}}) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + + +def test_infinite_weight_raise(simple_flow_graph): + G = simple_flow_graph + inf = float("inf") + nx.set_edge_attributes( + G, {("a", "b"): {"weight": inf}, ("b", "d"): {"weight": inf}} + ) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + + +def test_nonzero_net_demand_raise(simple_flow_graph): + G = simple_flow_graph + nx.set_node_attributes(G, {"b": {"demand": -4}}) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_negative_capacity_raise(simple_flow_graph): + G = simple_flow_graph + nx.set_edge_attributes(G, {("a", "b"): {"weight": 1}, ("b", "d"): {"capacity": -9}}) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_no_flow_satisfying_demands(simple_no_flow_graph): + G = simple_no_flow_graph + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_sum_demands_not_zero(simple_no_flow_graph): + G = simple_no_flow_graph + nx.set_node_attributes(G, {"t": {"demand": 4}}) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_google_or_tools_example(): + """ + https://developers.google.com/optimization/flow/mincostflow + """ + G = nx.DiGraph() + start_nodes = [0, 0, 1, 1, 1, 2, 2, 3, 4] + end_nodes = [1, 2, 2, 3, 4, 3, 4, 4, 2] + capacities = [15, 8, 20, 4, 10, 15, 4, 20, 5] + unit_costs = [4, 4, 2, 2, 6, 1, 3, 2, 3] + supplies = [20, 0, 0, -5, -15] + answer = 150 + + for i in range(len(supplies)): + G.add_node(i, demand=(-1) * supplies[i]) # supplies are negative of demand + + for i in range(len(start_nodes)): + G.add_edge( + start_nodes[i], end_nodes[i], weight=unit_costs[i], capacity=capacities[i] + ) + + flowCost, flowDict = nx.network_simplex(G) + assert flowCost == answer + assert flowCost == get_flowcost_from_flowdict(G, flowDict) + + +def test_google_or_tools_example2(): + """ + https://developers.google.com/optimization/flow/mincostflow + """ + G = nx.DiGraph() + start_nodes = [0, 0, 1, 1, 1, 2, 2, 3, 4, 3] + end_nodes = [1, 2, 2, 3, 4, 3, 4, 4, 2, 5] + capacities = [15, 8, 20, 4, 10, 15, 4, 20, 5, 10] + unit_costs = [4, 4, 2, 2, 6, 1, 3, 2, 3, 4] + supplies = [23, 0, 0, -5, -15, -3] + answer = 183 + + for i in range(len(supplies)): + G.add_node(i, demand=(-1) * supplies[i]) # supplies are negative of demand + + for i in range(len(start_nodes)): + G.add_edge( + start_nodes[i], end_nodes[i], weight=unit_costs[i], capacity=capacities[i] + ) + + flowCost, flowDict = nx.network_simplex(G) + assert flowCost == answer + assert flowCost == get_flowcost_from_flowdict(G, flowDict) + + +def test_large(): + fname = ( + importlib.resources.files("networkx.algorithms.flow.tests") + / "netgen-2.gpickle.bz2" + ) + + with bz2.BZ2File(fname, "rb") as f: + G = pickle.load(f) + flowCost, flowDict = nx.network_simplex(G) + assert 6749969302 == flowCost + assert 6749969302 == nx.cost_of_flow(G, flowDict) + + +def test_simple_digraph(): + G = nx.DiGraph() + G.add_node("a", demand=-5) + G.add_node("d", demand=5) + G.add_edge("a", "b", weight=3, capacity=4) + G.add_edge("a", "c", weight=6, capacity=10) + G.add_edge("b", "d", weight=1, capacity=9) + G.add_edge("c", "d", weight=2, capacity=5) + flowCost, H = nx.network_simplex(G) + soln = {"a": {"b": 4, "c": 1}, "b": {"d": 4}, "c": {"d": 1}, "d": {}} + assert flowCost == 24 + assert nx.min_cost_flow_cost(G) == 24 + assert H == soln + + +def test_negcycle_infcap(): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("c", "a", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("d", "c", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_transshipment(): + G = nx.DiGraph() + G.add_node("a", demand=1) + G.add_node("b", demand=-2) + G.add_node("c", demand=-2) + G.add_node("d", demand=3) + G.add_node("e", demand=-4) + G.add_node("f", demand=-4) + G.add_node("g", demand=3) + G.add_node("h", demand=2) + G.add_node("r", demand=3) + G.add_edge("a", "c", weight=3) + G.add_edge("r", "a", weight=2) + G.add_edge("b", "a", weight=9) + G.add_edge("r", "c", weight=0) + G.add_edge("b", "r", weight=-6) + G.add_edge("c", "d", weight=5) + G.add_edge("e", "r", weight=4) + G.add_edge("e", "f", weight=3) + G.add_edge("h", "b", weight=4) + G.add_edge("f", "d", weight=7) + G.add_edge("f", "h", weight=12) + G.add_edge("g", "d", weight=12) + G.add_edge("f", "g", weight=-1) + G.add_edge("h", "g", weight=-10) + flowCost, H = nx.network_simplex(G) + soln = { + "a": {"c": 0}, + "b": {"a": 0, "r": 2}, + "c": {"d": 3}, + "d": {}, + "e": {"r": 3, "f": 1}, + "f": {"d": 0, "g": 3, "h": 2}, + "g": {"d": 0}, + "h": {"b": 0, "g": 0}, + "r": {"a": 1, "c": 1}, + } + assert flowCost == 41 + assert H == soln + + +def test_digraph1(): + # From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied + # Mathematical Programming. Addison-Wesley, 1977. + G = nx.DiGraph() + G.add_node(1, demand=-20) + G.add_node(4, demand=5) + G.add_node(5, demand=15) + G.add_edges_from( + [ + (1, 2, {"capacity": 15, "weight": 4}), + (1, 3, {"capacity": 8, "weight": 4}), + (2, 3, {"weight": 2}), + (2, 4, {"capacity": 4, "weight": 2}), + (2, 5, {"capacity": 10, "weight": 6}), + (3, 4, {"capacity": 15, "weight": 1}), + (3, 5, {"capacity": 5, "weight": 3}), + (4, 5, {"weight": 2}), + (5, 3, {"capacity": 4, "weight": 1}), + ] + ) + flowCost, H = nx.network_simplex(G) + soln = { + 1: {2: 12, 3: 8}, + 2: {3: 8, 4: 4, 5: 0}, + 3: {4: 11, 5: 5}, + 4: {5: 10}, + 5: {3: 0}, + } + assert flowCost == 150 + assert nx.min_cost_flow_cost(G) == 150 + assert H == soln + + +def test_zero_capacity_edges(): + """Address issue raised in ticket #617 by arv.""" + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2, {"capacity": 1, "weight": 1}), + (1, 5, {"capacity": 1, "weight": 1}), + (2, 3, {"capacity": 0, "weight": 1}), + (2, 5, {"capacity": 1, "weight": 1}), + (5, 3, {"capacity": 2, "weight": 1}), + (5, 4, {"capacity": 0, "weight": 1}), + (3, 4, {"capacity": 2, "weight": 1}), + ] + ) + G.nodes[1]["demand"] = -1 + G.nodes[2]["demand"] = -1 + G.nodes[4]["demand"] = 2 + + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0, 5: 1}, 2: {3: 0, 5: 1}, 3: {4: 2}, 4: {}, 5: {3: 2, 4: 0}} + assert flowCost == 6 + assert nx.min_cost_flow_cost(G) == 6 + assert H == soln + + +def test_digon(): + """Check if digons are handled properly. Taken from ticket + #618 by arv.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"capacity": 3, "weight": 600000}), + (2, 1, {"capacity": 2, "weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0}, 2: {1: 0, 3: 4}, 3: {2: 0}} + assert flowCost == 2857140 + + +def test_deadend(): + """Check if one-node cycles are handled properly. Taken from ticket + #2906 from @sshraven.""" + G = nx.DiGraph() + + G.add_nodes_from(range(5), demand=0) + G.nodes[4]["demand"] = -13 + G.nodes[3]["demand"] = 13 + + G.add_edges_from([(0, 2), (0, 3), (2, 1)], capacity=20, weight=0.1) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_infinite_capacity_neg_digon(): + """An infinite capacity negative cost digon results in an unbounded + instance.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"weight": -600}), + (2, 1, {"weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + +def test_multidigraph(): + """Multidigraphs are acceptable.""" + G = nx.MultiDiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight="capacity") + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}} + + +def test_negative_selfloops(): + """Negative selfloops should cause an exception if uncapacitated and + always be saturated otherwise. + """ + G = nx.DiGraph() + G.add_edge(1, 1, weight=-1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + G[1][1]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: 2}} + + G = nx.MultiDiGraph() + G.add_edge(1, 1, "x", weight=-1) + G.add_edge(1, 1, "y", weight=1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + G[1][1]["x"]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: {"x": 2, "y": 0}}} + + +def test_bone_shaped(): + # From #1283 + G = nx.DiGraph() + G.add_node(0, demand=-4) + G.add_node(1, demand=2) + G.add_node(2, demand=2) + G.add_node(3, demand=4) + G.add_node(4, demand=-2) + G.add_node(5, demand=-2) + G.add_edge(0, 1, capacity=4) + G.add_edge(0, 2, capacity=4) + G.add_edge(4, 3, capacity=4) + G.add_edge(5, 3, capacity=4) + G.add_edge(0, 3, capacity=0) + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}} + + +def test_graphs_type_exceptions(): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + G = nx.DiGraph() + pytest.raises(nx.NetworkXError, nx.network_simplex, G) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/utils.py b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..03f1d10f75a9f2b3d80bdea8b4c086cebe1df966 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/utils.py @@ -0,0 +1,189 @@ +""" +Utility classes and functions for network flow algorithms. +""" + +from collections import deque + +import networkx as nx + +__all__ = [ + "CurrentEdge", + "Level", + "GlobalRelabelThreshold", + "build_residual_network", + "detect_unboundedness", + "build_flow_dict", +] + + +class CurrentEdge: + """Mechanism for iterating over out-edges incident to a node in a circular + manner. StopIteration exception is raised when wraparound occurs. + """ + + __slots__ = ("_edges", "_it", "_curr") + + def __init__(self, edges): + self._edges = edges + if self._edges: + self._rewind() + + def get(self): + return self._curr + + def move_to_next(self): + try: + self._curr = next(self._it) + except StopIteration: + self._rewind() + raise + + def _rewind(self): + self._it = iter(self._edges.items()) + self._curr = next(self._it) + + +class Level: + """Active and inactive nodes in a level.""" + + __slots__ = ("active", "inactive") + + def __init__(self): + self.active = set() + self.inactive = set() + + +class GlobalRelabelThreshold: + """Measurement of work before the global relabeling heuristic should be + applied. + """ + + def __init__(self, n, m, freq): + self._threshold = (n + m) / freq if freq else float("inf") + self._work = 0 + + def add_work(self, work): + self._work += work + + def is_reached(self): + return self._work >= self._threshold + + def clear_work(self): + self._work = 0 + + +@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True) +def build_residual_network(G, capacity): + """Build a residual network and initialize a zero flow. + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + """ + if G.is_multigraph(): + raise nx.NetworkXError("MultiGraph and MultiDiGraph not supported (yet).") + + R = nx.DiGraph() + R.__networkx_cache__ = None # Disable caching + R.add_nodes_from(G) + + inf = float("inf") + # Extract edges with positive capacities. Self loops excluded. + edge_list = [ + (u, v, attr) + for u, v, attr in G.edges(data=True) + if u != v and attr.get(capacity, inf) > 0 + ] + # Simulate infinity with three times the sum of the finite edge capacities + # or any positive value if the sum is zero. This allows the + # infinite-capacity edges to be distinguished for unboundedness detection + # and directly participate in residual capacity calculation. If the maximum + # flow is finite, these edges cannot appear in the minimum cut and thus + # guarantee correctness. Since the residual capacity of an + # infinite-capacity edge is always at least 2/3 of inf, while that of an + # finite-capacity edge is at most 1/3 of inf, if an operation moves more + # than 1/3 of inf units of flow to t, there must be an infinite-capacity + # s-t path in G. + inf = ( + 3 + * sum( + attr[capacity] + for u, v, attr in edge_list + if capacity in attr and attr[capacity] != inf + ) + or 1 + ) + if G.is_directed(): + for u, v, attr in edge_list: + r = min(attr.get(capacity, inf), inf) + if not R.has_edge(u, v): + # Both (u, v) and (v, u) must be present in the residual + # network. + R.add_edge(u, v, capacity=r) + R.add_edge(v, u, capacity=0) + else: + # The edge (u, v) was added when (v, u) was visited. + R[u][v]["capacity"] = r + else: + for u, v, attr in edge_list: + # Add a pair of edges with equal residual capacities. + r = min(attr.get(capacity, inf), inf) + R.add_edge(u, v, capacity=r) + R.add_edge(v, u, capacity=r) + + # Record the value simulating infinity. + R.graph["inf"] = inf + + return R + + +@nx._dispatchable( + graphs="R", + preserve_edge_attrs={"R": {"capacity": float("inf")}}, + preserve_graph_attrs=True, +) +def detect_unboundedness(R, s, t): + """Detect an infinite-capacity s-t path in R.""" + q = deque([s]) + seen = {s} + inf = R.graph["inf"] + while q: + u = q.popleft() + for v, attr in R[u].items(): + if attr["capacity"] == inf and v not in seen: + if v == t: + raise nx.NetworkXUnbounded( + "Infinite capacity path, flow unbounded above." + ) + seen.add(v) + q.append(v) + + +@nx._dispatchable(graphs={"G": 0, "R": 1}, preserve_edge_attrs={"R": {"flow": None}}) +def build_flow_dict(G, R): + """Build a flow dictionary from a residual network.""" + flow_dict = {} + for u in G: + flow_dict[u] = {v: 0 for v in G[u]} + flow_dict[u].update( + (v, attr["flow"]) for v, attr in R[u].items() if attr["flow"] > 0 + ) + return flow_dict diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/traversal/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/traversal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c196331810e24751461e7eed9a49e58bc70c7163 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/traversal/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a346a21a7e423795856e8f4266739ccff297925a Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-310.pyc differ