diff --git a/ckpts/universal/global_step80/zero/1.word_embeddings.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/1.word_embeddings.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..901324c285f01c6d06159533edb0661629a901c8 --- /dev/null +++ b/ckpts/universal/global_step80/zero/1.word_embeddings.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8fc8e060da6651bab7e1102e5380f2358de5efce7080bda1b00f00354d4b48f +size 415237419 diff --git a/ckpts/universal/global_step80/zero/3.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/3.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..98d667484f698b5eabf5e13bdb2747bab36ab595 --- /dev/null +++ b/ckpts/universal/global_step80/zero/3.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92a80bd64be5eba997980a2627b669500ea36f9a38fc189e5c421d2a203a13f6 +size 16778396 diff --git a/ckpts/universal/global_step80/zero/3.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/3.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..6f1d66e16619db592b53f2d788137ad6028bfda8 --- /dev/null +++ b/ckpts/universal/global_step80/zero/3.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14f728afccf4d8d783a46de041deaaeb16e1e9fb7185087383b44cfcbb434588 +size 16778411 diff --git a/ckpts/universal/global_step80/zero/3.attention.dense.weight/fp32.pt b/ckpts/universal/global_step80/zero/3.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..9b381783bb4379bb2e8b45213568c54491af32f7 --- /dev/null +++ b/ckpts/universal/global_step80/zero/3.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8226d59673cf2a085adc0c0f2794196701f0755cc65202d85297fbae0f64663b +size 16778317 diff --git a/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..4d7bd88a56a229b8b1a03e1093467471a02eb57a --- /dev/null +++ b/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f1f02f7e113d1a38c9620bbff3cfc3cdef272f7b2e1a3157f266de6638f0ddc +size 33555612 diff --git a/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..5fa35bc3021d369f8f188ce393284602a918044f --- /dev/null +++ b/ckpts/universal/global_step80/zero/5.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac371da278a2db15a650dac557983f0d2e12c67784d23f8320eeed6141716e5e +size 33555533 diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__init__.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..40549aff2386df8653a70f40fec6ca1356b89a38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__init__.py @@ -0,0 +1,25 @@ +"""Functions for computing and measuring community structure. + +The ``community`` subpackage can be accessed by using :mod:`networkx.community`, then accessing the +functions as attributes of ``community``. For example:: + + >>> import networkx as nx + >>> G = nx.barbell_graph(5, 1) + >>> communities_generator = nx.community.girvan_newman(G) + >>> top_level_communities = next(communities_generator) + >>> next_level_communities = next(communities_generator) + >>> sorted(map(sorted, next_level_communities)) + [[0, 1, 2, 3, 4], [5], [6, 7, 8, 9, 10]] + +""" +from networkx.algorithms.community.asyn_fluid import * +from networkx.algorithms.community.centrality import * +from networkx.algorithms.community.divisive import * +from networkx.algorithms.community.kclique import * +from networkx.algorithms.community.kernighan_lin import * +from networkx.algorithms.community.label_propagation import * +from networkx.algorithms.community.lukes import * +from networkx.algorithms.community.modularity_max import * +from networkx.algorithms.community.quality import * +from networkx.algorithms.community.community_utils import * +from networkx.algorithms.community.louvain import * diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d46fccd43abf276cc5cd9bceed0611288d65e0eb Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/asyn_fluid.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/asyn_fluid.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f30704c07e55fc963e98f6cfca210fb29e1684a Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/asyn_fluid.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/centrality.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/centrality.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8da259201913e277ad9ac2f504200695106f4a7d Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/centrality.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/community_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/community_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19b3f6668dbb45a1370ec6e473be5fff5c101e94 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/community_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/divisive.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/divisive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5f2169429eae20f0b54c47bd8b1599313ef6045 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/divisive.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/kclique.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/kclique.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f18b0ecbc5ff8082c22ccb1f1a4a213a94c17aa2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/kclique.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/kernighan_lin.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/kernighan_lin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b852dbb2651906fc9b53f279a7dc0fb002eb5ff9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/kernighan_lin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/label_propagation.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/label_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..579bf52ae2cdce14f2696bfe40817bda0483f365 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/label_propagation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/louvain.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/louvain.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e686bf4d0d0d1874af70f3868663073d80914337 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/louvain.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/lukes.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/lukes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9129f95d7517fed4907586cc1280397c69af945 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/lukes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/modularity_max.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/modularity_max.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6768454fa210d07d2e1021d71dcf33bd6cc7c33 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/modularity_max.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/quality.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/quality.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64a9b7e74f9e7ad4b39cf1cae6a6869d5168ff9b Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/__pycache__/quality.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/asyn_fluid.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/asyn_fluid.py new file mode 100644 index 0000000000000000000000000000000000000000..fea72c1bfdbdd451ef653751b56c22445d5da51d --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/asyn_fluid.py @@ -0,0 +1,151 @@ +"""Asynchronous Fluid Communities algorithm for community detection.""" + +from collections import Counter + +import networkx as nx +from networkx.algorithms.components import is_connected +from networkx.exception import NetworkXError +from networkx.utils import groups, not_implemented_for, py_random_state + +__all__ = ["asyn_fluidc"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatchable +def asyn_fluidc(G, k, max_iter=100, seed=None): + """Returns communities in `G` as detected by Fluid Communities algorithm. + + The asynchronous fluid communities algorithm is described in + [1]_. The algorithm is based on the simple idea of fluids interacting + in an environment, expanding and pushing each other. Its initialization is + random, so found communities may vary on different executions. + + The algorithm proceeds as follows. First each of the initial k communities + is initialized in a random vertex in the graph. Then the algorithm iterates + over all vertices in a random order, updating the community of each vertex + based on its own community and the communities of its neighbors. This + process is performed several times until convergence. + At all times, each community has a total density of 1, which is equally + distributed among the vertices it contains. If a vertex changes of + community, vertex densities of affected communities are adjusted + immediately. When a complete iteration over all vertices is done, such that + no vertex changes the community it belongs to, the algorithm has converged + and returns. + + This is the original version of the algorithm described in [1]_. + Unfortunately, it does not support weighted graphs yet. + + Parameters + ---------- + G : NetworkX graph + Graph must be simple and undirected. + + k : integer + The number of communities to be found. + + max_iter : integer + The number of maximum iterations allowed. By default 100. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + k variable is not an optional argument. + + References + ---------- + .. [1] Parés F., Garcia-Gasulla D. et al. "Fluid Communities: A + Competitive and Highly Scalable Community Detection Algorithm". + [https://arxiv.org/pdf/1703.09307.pdf]. + """ + # Initial checks + if not isinstance(k, int): + raise NetworkXError("k must be an integer.") + if not k > 0: + raise NetworkXError("k must be greater than 0.") + if not is_connected(G): + raise NetworkXError("Fluid Communities require connected Graphs.") + if len(G) < k: + raise NetworkXError("k cannot be bigger than the number of nodes.") + # Initialization + max_density = 1.0 + vertices = list(G) + seed.shuffle(vertices) + communities = {n: i for i, n in enumerate(vertices[:k])} + density = {} + com_to_numvertices = {} + for vertex in communities: + com_to_numvertices[communities[vertex]] = 1 + density[communities[vertex]] = max_density + # Set up control variables and start iterating + iter_count = 0 + cont = True + while cont: + cont = False + iter_count += 1 + # Loop over all vertices in graph in a random order + vertices = list(G) + seed.shuffle(vertices) + for vertex in vertices: + # Updating rule + com_counter = Counter() + # Take into account self vertex community + try: + com_counter.update({communities[vertex]: density[communities[vertex]]}) + except KeyError: + pass + # Gather neighbor vertex communities + for v in G[vertex]: + try: + com_counter.update({communities[v]: density[communities[v]]}) + except KeyError: + continue + # Check which is the community with highest density + new_com = -1 + if len(com_counter.keys()) > 0: + max_freq = max(com_counter.values()) + best_communities = [ + com + for com, freq in com_counter.items() + if (max_freq - freq) < 0.0001 + ] + # If actual vertex com in best communities, it is preserved + try: + if communities[vertex] in best_communities: + new_com = communities[vertex] + except KeyError: + pass + # If vertex community changes... + if new_com == -1: + # Set flag of non-convergence + cont = True + # Randomly chose a new community from candidates + new_com = seed.choice(best_communities) + # Update previous community status + try: + com_to_numvertices[communities[vertex]] -= 1 + density[communities[vertex]] = ( + max_density / com_to_numvertices[communities[vertex]] + ) + except KeyError: + pass + # Update new community status + communities[vertex] = new_com + com_to_numvertices[communities[vertex]] += 1 + density[communities[vertex]] = ( + max_density / com_to_numvertices[communities[vertex]] + ) + # If maximum iterations reached --> output actual results + if iter_count > max_iter: + break + # Return results by grouping communities as list of vertices + return iter(groups(communities).values()) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/centrality.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..43281701d2b630710acba8f3cef6693356aa461a --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/centrality.py @@ -0,0 +1,171 @@ +"""Functions for computing communities based on centrality notions.""" + +import networkx as nx + +__all__ = ["girvan_newman"] + + +@nx._dispatchable(preserve_edge_attrs="most_valuable_edge") +def girvan_newman(G, most_valuable_edge=None): + """Finds communities in a graph using the Girvan–Newman method. + + Parameters + ---------- + G : NetworkX graph + + most_valuable_edge : function + Function that takes a graph as input and outputs an edge. The + edge returned by this function will be recomputed and removed at + each iteration of the algorithm. + + If not specified, the edge with the highest + :func:`networkx.edge_betweenness_centrality` will be used. + + Returns + ------- + iterator + Iterator over tuples of sets of nodes in `G`. Each set of node + is a community, each tuple is a sequence of communities at a + particular level of the algorithm. + + Examples + -------- + To get the first pair of communities:: + + >>> G = nx.path_graph(10) + >>> comp = nx.community.girvan_newman(G) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]) + + To get only the first *k* tuples of communities, use + :func:`itertools.islice`:: + + >>> import itertools + >>> G = nx.path_graph(8) + >>> k = 2 + >>> comp = nx.community.girvan_newman(G) + >>> for communities in itertools.islice(comp, k): + ... print(tuple(sorted(c) for c in communities)) + ... + ([0, 1, 2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5, 6, 7]) + + To stop getting tuples of communities once the number of communities + is greater than *k*, use :func:`itertools.takewhile`:: + + >>> import itertools + >>> G = nx.path_graph(8) + >>> k = 4 + >>> comp = nx.community.girvan_newman(G) + >>> limited = itertools.takewhile(lambda c: len(c) <= k, comp) + >>> for communities in limited: + ... print(tuple(sorted(c) for c in communities)) + ... + ([0, 1, 2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5], [6, 7]) + + To just choose an edge to remove based on the weight:: + + >>> from operator import itemgetter + >>> G = nx.path_graph(10) + >>> edges = G.edges() + >>> nx.set_edge_attributes(G, {(u, v): v for u, v in edges}, "weight") + >>> def heaviest(G): + ... u, v, w = max(G.edges(data="weight"), key=itemgetter(2)) + ... return (u, v) + ... + >>> comp = nx.community.girvan_newman(G, most_valuable_edge=heaviest) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4, 5, 6, 7, 8], [9]) + + To utilize edge weights when choosing an edge with, for example, the + highest betweenness centrality:: + + >>> from networkx import edge_betweenness_centrality as betweenness + >>> def most_central_edge(G): + ... centrality = betweenness(G, weight="weight") + ... return max(centrality, key=centrality.get) + ... + >>> G = nx.path_graph(10) + >>> comp = nx.community.girvan_newman(G, most_valuable_edge=most_central_edge) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]) + + To specify a different ranking algorithm for edges, use the + `most_valuable_edge` keyword argument:: + + >>> from networkx import edge_betweenness_centrality + >>> from random import random + >>> def most_central_edge(G): + ... centrality = edge_betweenness_centrality(G) + ... max_cent = max(centrality.values()) + ... # Scale the centrality values so they are between 0 and 1, + ... # and add some random noise. + ... centrality = {e: c / max_cent for e, c in centrality.items()} + ... # Add some random noise. + ... centrality = {e: c + random() for e, c in centrality.items()} + ... return max(centrality, key=centrality.get) + ... + >>> G = nx.path_graph(10) + >>> comp = nx.community.girvan_newman(G, most_valuable_edge=most_central_edge) + + Notes + ----- + The Girvan–Newman algorithm detects communities by progressively + removing edges from the original graph. The algorithm removes the + "most valuable" edge, traditionally the edge with the highest + betweenness centrality, at each step. As the graph breaks down into + pieces, the tightly knit community structure is exposed and the + result can be depicted as a dendrogram. + + """ + # If the graph is already empty, simply return its connected + # components. + if G.number_of_edges() == 0: + yield tuple(nx.connected_components(G)) + return + # If no function is provided for computing the most valuable edge, + # use the edge betweenness centrality. + if most_valuable_edge is None: + + def most_valuable_edge(G): + """Returns the edge with the highest betweenness centrality + in the graph `G`. + + """ + # We have guaranteed that the graph is non-empty, so this + # dictionary will never be empty. + betweenness = nx.edge_betweenness_centrality(G) + return max(betweenness, key=betweenness.get) + + # The copy of G here must include the edge weight data. + g = G.copy().to_undirected() + # Self-loops must be removed because their removal has no effect on + # the connected components of the graph. + g.remove_edges_from(nx.selfloop_edges(g)) + while g.number_of_edges() > 0: + yield _without_most_central_edges(g, most_valuable_edge) + + +def _without_most_central_edges(G, most_valuable_edge): + """Returns the connected components of the graph that results from + repeatedly removing the most "valuable" edge in the graph. + + `G` must be a non-empty graph. This function modifies the graph `G` + in-place; that is, it removes edges on the graph `G`. + + `most_valuable_edge` is a function that takes the graph `G` as input + (or a subgraph with one or more edges of `G` removed) and returns an + edge. That edge will be removed and this process will be repeated + until the number of connected components in the graph increases. + + """ + original_num_components = nx.number_connected_components(G) + num_new_components = original_num_components + while num_new_components <= original_num_components: + edge = most_valuable_edge(G) + G.remove_edge(*edge) + new_components = tuple(nx.connected_components(G)) + num_new_components = len(new_components) + return new_components diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/community_utils.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/community_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b57cd9881cb54641bb793d2a4364fd552fc7e864 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/community_utils.py @@ -0,0 +1,29 @@ +"""Helper functions for community-finding algorithms.""" +import networkx as nx + +__all__ = ["is_partition"] + + +@nx._dispatchable +def is_partition(G, communities): + """Returns *True* if `communities` is a partition of the nodes of `G`. + + A partition of a universe set is a family of pairwise disjoint sets + whose union is the entire universe set. + + Parameters + ---------- + G : NetworkX graph. + + communities : list or iterable of sets of nodes + If not a list, the iterable is converted internally to a list. + If it is an iterator it is exhausted. + + """ + # Alternate implementation: + # return all(sum(1 if v in c else 0 for c in communities) == 1 for v in G) + if not isinstance(communities, list): + communities = list(communities) + nodes = {n for c in communities for n in c if n in G} + + return len(G) == len(nodes) == sum(len(c) for c in communities) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/divisive.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/divisive.py new file mode 100644 index 0000000000000000000000000000000000000000..1fc3959469338bb305cc4d7716e9c68a44ea173e --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/divisive.py @@ -0,0 +1,196 @@ +import functools + +import networkx as nx + +__all__ = [ + "edge_betweenness_partition", + "edge_current_flow_betweenness_partition", +] + + +@nx._dispatchable(edge_attrs="weight") +def edge_betweenness_partition(G, number_of_sets, *, weight=None): + """Partition created by iteratively removing the highest edge betweenness edge. + + This algorithm works by calculating the edge betweenness for all + edges and removing the edge with the highest value. It is then + determined whether the graph has been broken into at least + `number_of_sets` connected components. + If not the process is repeated. + + Parameters + ---------- + G : NetworkX Graph, DiGraph or MultiGraph + Graph to be partitioned + + number_of_sets : int + Number of sets in the desired partition of the graph + + weight : key, optional, default=None + The key to use if using weights for edge betweenness calculation + + Returns + ------- + C : list of sets + Partition of the nodes of G + + Raises + ------ + NetworkXError + If number_of_sets is <= 0 or if number_of_sets > len(G) + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> part = nx.community.edge_betweenness_partition(G, 2) + >>> {0, 1, 3, 4, 5, 6, 7, 10, 11, 12, 13, 16, 17, 19, 21} in part + True + >>> {2, 8, 9, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33} in part + True + + See Also + -------- + edge_current_flow_betweenness_partition + + Notes + ----- + This algorithm is fairly slow, as both the calculation of connected + components and edge betweenness relies on all pairs shortest + path algorithms. They could potentially be combined to cut down + on overall computation time. + + References + ---------- + .. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports + Volume 486, Issue 3-5 p. 75-174 + http://arxiv.org/abs/0906.0612 + """ + if number_of_sets <= 0: + raise nx.NetworkXError("number_of_sets must be >0") + if number_of_sets == 1: + return [set(G)] + if number_of_sets == len(G): + return [{n} for n in G] + if number_of_sets > len(G): + raise nx.NetworkXError("number_of_sets must be <= len(G)") + + H = G.copy() + partition = list(nx.connected_components(H)) + while len(partition) < number_of_sets: + ranking = nx.edge_betweenness_centrality(H, weight=weight) + edge = max(ranking, key=ranking.get) + H.remove_edge(*edge) + partition = list(nx.connected_components(H)) + return partition + + +@nx._dispatchable(edge_attrs="weight") +def edge_current_flow_betweenness_partition(G, number_of_sets, *, weight=None): + """Partition created by removing the highest edge current flow betweenness edge. + + This algorithm works by calculating the edge current flow + betweenness for all edges and removing the edge with the + highest value. It is then determined whether the graph has + been broken into at least `number_of_sets` connected + components. If not the process is repeated. + + Parameters + ---------- + G : NetworkX Graph, DiGraph or MultiGraph + Graph to be partitioned + + number_of_sets : int + Number of sets in the desired partition of the graph + + weight : key, optional (default=None) + The edge attribute key to use as weights for + edge current flow betweenness calculations + + Returns + ------- + C : list of sets + Partition of G + + Raises + ------ + NetworkXError + If number_of_sets is <= 0 or number_of_sets > len(G) + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> part = nx.community.edge_current_flow_betweenness_partition(G, 2) + >>> {0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 16, 17, 19, 21} in part + True + >>> {8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33} in part + True + + + See Also + -------- + edge_betweenness_partition + + Notes + ----- + This algorithm is extremely slow, as the recalculation of the edge + current flow betweenness is extremely slow. + + References + ---------- + .. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports + Volume 486, Issue 3-5 p. 75-174 + http://arxiv.org/abs/0906.0612 + """ + if number_of_sets <= 0: + raise nx.NetworkXError("number_of_sets must be >0") + elif number_of_sets == 1: + return [set(G)] + elif number_of_sets == len(G): + return [{n} for n in G] + elif number_of_sets > len(G): + raise nx.NetworkXError("number_of_sets must be <= len(G)") + + rank = functools.partial( + nx.edge_current_flow_betweenness_centrality, normalized=False, weight=weight + ) + + # current flow requires a connected network so we track the components explicitly + H = G.copy() + partition = list(nx.connected_components(H)) + if len(partition) > 1: + Hcc_subgraphs = [H.subgraph(cc).copy() for cc in partition] + else: + Hcc_subgraphs = [H] + + ranking = {} + for Hcc in Hcc_subgraphs: + ranking.update(rank(Hcc)) + + while len(partition) < number_of_sets: + edge = max(ranking, key=ranking.get) + for cc, Hcc in zip(partition, Hcc_subgraphs): + if edge[0] in cc: + Hcc.remove_edge(*edge) + del ranking[edge] + splitcc_list = list(nx.connected_components(Hcc)) + if len(splitcc_list) > 1: + # there are 2 connected components. split off smaller one + cc_new = min(splitcc_list, key=len) + Hcc_new = Hcc.subgraph(cc_new).copy() + # update edge rankings for Hcc_new + newranks = rank(Hcc_new) + for e, r in newranks.items(): + ranking[e if e in ranking else e[::-1]] = r + # append new cc and Hcc to their lists. + partition.append(cc_new) + Hcc_subgraphs.append(Hcc_new) + + # leave existing cc and Hcc in their lists, but shrink them + Hcc.remove_nodes_from(cc_new) + cc.difference_update(cc_new) + # update edge rankings for Hcc whether it was split or not + newranks = rank(Hcc) + for e, r in newranks.items(): + ranking[e if e in ranking else e[::-1]] = r + break + return partition diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/kclique.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/kclique.py new file mode 100644 index 0000000000000000000000000000000000000000..c72491042046b6f79ba5c7cb4a90ac8822491d84 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/kclique.py @@ -0,0 +1,79 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["k_clique_communities"] + + +@nx._dispatchable +def k_clique_communities(G, k, cliques=None): + """Find k-clique communities in graph using the percolation method. + + A k-clique community is the union of all cliques of size k that + can be reached through adjacent (sharing k-1 nodes) k-cliques. + + Parameters + ---------- + G : NetworkX graph + + k : int + Size of smallest clique + + cliques: list or generator + Precomputed cliques (use networkx.find_cliques(G)) + + Returns + ------- + Yields sets of nodes, one for each k-clique community. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> K5 = nx.convert_node_labels_to_integers(G, first_label=2) + >>> G.add_edges_from(K5.edges()) + >>> c = list(nx.community.k_clique_communities(G, 4)) + >>> sorted(list(c[0])) + [0, 1, 2, 3, 4, 5, 6] + >>> list(nx.community.k_clique_communities(G, 6)) + [] + + References + ---------- + .. [1] Gergely Palla, Imre Derényi, Illés Farkas1, and Tamás Vicsek, + Uncovering the overlapping community structure of complex networks + in nature and society Nature 435, 814-818, 2005, + doi:10.1038/nature03607 + """ + if k < 2: + raise nx.NetworkXError(f"k={k}, k must be greater than 1.") + if cliques is None: + cliques = nx.find_cliques(G) + cliques = [frozenset(c) for c in cliques if len(c) >= k] + + # First index which nodes are in which cliques + membership_dict = defaultdict(list) + for clique in cliques: + for node in clique: + membership_dict[node].append(clique) + + # For each clique, see which adjacent cliques percolate + perc_graph = nx.Graph() + perc_graph.add_nodes_from(cliques) + for clique in cliques: + for adj_clique in _get_adjacent_cliques(clique, membership_dict): + if len(clique.intersection(adj_clique)) >= (k - 1): + perc_graph.add_edge(clique, adj_clique) + + # Connected components of clique graph with perc edges + # are the percolated cliques + for component in nx.connected_components(perc_graph): + yield (frozenset.union(*component)) + + +def _get_adjacent_cliques(clique, membership_dict): + adjacent_cliques = set() + for n in clique: + for adj_clique in membership_dict[n]: + if clique != adj_clique: + adjacent_cliques.add(adj_clique) + return adjacent_cliques diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/kernighan_lin.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/kernighan_lin.py new file mode 100644 index 0000000000000000000000000000000000000000..f6397d82be6fa94273a81a613411cc6a74d8c4cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/kernighan_lin.py @@ -0,0 +1,139 @@ +"""Functions for computing the Kernighan–Lin bipartition algorithm.""" + +from itertools import count + +import networkx as nx +from networkx.algorithms.community.community_utils import is_partition +from networkx.utils import BinaryHeap, not_implemented_for, py_random_state + +__all__ = ["kernighan_lin_bisection"] + + +def _kernighan_lin_sweep(edges, side): + """ + This is a modified form of Kernighan-Lin, which moves single nodes at a + time, alternating between sides to keep the bisection balanced. We keep + two min-heaps of swap costs to make optimal-next-move selection fast. + """ + costs0, costs1 = costs = BinaryHeap(), BinaryHeap() + for u, side_u, edges_u in zip(count(), side, edges): + cost_u = sum(w if side[v] else -w for v, w in edges_u) + costs[side_u].insert(u, cost_u if side_u else -cost_u) + + def _update_costs(costs_x, x): + for y, w in edges[x]: + costs_y = costs[side[y]] + cost_y = costs_y.get(y) + if cost_y is not None: + cost_y += 2 * (-w if costs_x is costs_y else w) + costs_y.insert(y, cost_y, True) + + i = 0 + totcost = 0 + while costs0 and costs1: + u, cost_u = costs0.pop() + _update_costs(costs0, u) + v, cost_v = costs1.pop() + _update_costs(costs1, v) + totcost += cost_u + cost_v + i += 1 + yield totcost, i, (u, v) + + +@not_implemented_for("directed") +@py_random_state(4) +@nx._dispatchable(edge_attrs="weight") +def kernighan_lin_bisection(G, partition=None, max_iter=10, weight="weight", seed=None): + """Partition a graph into two blocks using the Kernighan–Lin + algorithm. + + This algorithm partitions a network into two sets by iteratively + swapping pairs of nodes to reduce the edge cut between the two sets. The + pairs are chosen according to a modified form of Kernighan-Lin [1]_, which + moves node individually, alternating between sides to keep the bisection + balanced. + + Parameters + ---------- + G : NetworkX graph + Graph must be undirected. + + partition : tuple + Pair of iterables containing an initial partition. If not + specified, a random balanced partition is used. + + max_iter : int + Maximum number of times to attempt swaps to find an + improvement before giving up. + + weight : key + Edge data key to use as weight. If None, the weights are all + set to one. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Only used if partition is None + + Returns + ------- + partition : tuple + A pair of sets of nodes representing the bipartition. + + Raises + ------ + NetworkXError + If partition is not a valid partition of the nodes of the graph. + + References + ---------- + .. [1] Kernighan, B. W.; Lin, Shen (1970). + "An efficient heuristic procedure for partitioning graphs." + *Bell Systems Technical Journal* 49: 291--307. + Oxford University Press 2011. + + """ + n = len(G) + labels = list(G) + seed.shuffle(labels) + index = {v: i for i, v in enumerate(labels)} + + if partition is None: + side = [0] * (n // 2) + [1] * ((n + 1) // 2) + else: + try: + A, B = partition + except (TypeError, ValueError) as err: + raise nx.NetworkXError("partition must be two sets") from err + if not is_partition(G, (A, B)): + raise nx.NetworkXError("partition invalid") + side = [0] * n + for a in A: + side[index[a]] = 1 + + if G.is_multigraph(): + edges = [ + [ + (index[u], sum(e.get(weight, 1) for e in d.values())) + for u, d in G[v].items() + ] + for v in labels + ] + else: + edges = [ + [(index[u], e.get(weight, 1)) for u, e in G[v].items()] for v in labels + ] + + for i in range(max_iter): + costs = list(_kernighan_lin_sweep(edges, side)) + min_cost, min_i, _ = min(costs) + if min_cost >= 0: + break + + for _, _, (u, v) in costs[:min_i]: + side[u] = 1 + side[v] = 0 + + A = {u for u, s in zip(labels, side) if s == 0} + B = {u for u, s in zip(labels, side) if s == 1} + return A, B diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/label_propagation.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/label_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..8690855766b9f4db085fe1e36c6d8bf8b06cb54d --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/label_propagation.py @@ -0,0 +1,337 @@ +""" +Label propagation community detection algorithms. +""" +from collections import Counter, defaultdict, deque + +import networkx as nx +from networkx.utils import groups, not_implemented_for, py_random_state + +__all__ = [ + "label_propagation_communities", + "asyn_lpa_communities", + "fast_label_propagation_communities", +] + + +@py_random_state("seed") +@nx._dispatchable(edge_attrs="weight") +def fast_label_propagation_communities(G, *, weight=None, seed=None): + """Returns communities in `G` as detected by fast label propagation. + + The fast label propagation algorithm is described in [1]_. The algorithm is + probabilistic and the found communities may vary in different executions. + + The algorithm operates as follows. First, the community label of each node is + set to a unique label. The algorithm then repeatedly updates the labels of + the nodes to the most frequent label in their neighborhood. In case of ties, + a random label is chosen from the most frequent labels. + + The algorithm maintains a queue of nodes that still need to be processed. + Initially, all nodes are added to the queue in a random order. Then the nodes + are removed from the queue one by one and processed. If a node updates its label, + all its neighbors that have a different label are added to the queue (if not + already in the queue). The algorithm stops when the queue is empty. + + Parameters + ---------- + G : Graph, DiGraph, MultiGraph, or MultiDiGraph + Any NetworkX graph. + + weight : string, or None (default) + The edge attribute representing a non-negative weight of an edge. If None, + each edge is assumed to have weight one. The weight of an edge is used in + determining the frequency with which a label appears among the neighbors of + a node (edge with weight `w` is equivalent to `w` unweighted edges). + + seed : integer, random_state, or None (default) + Indicator of random number generation state. See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + Edge directions are ignored for directed graphs. + Edge weights must be non-negative numbers. + + References + ---------- + .. [1] Vincent A. Traag & Lovro Šubelj. "Large network community detection by + fast label propagation." Scientific Reports 13 (2023): 2701. + https://doi.org/10.1038/s41598-023-29610-z + """ + + # Queue of nodes to be processed. + nodes_queue = deque(G) + seed.shuffle(nodes_queue) + + # Set of nodes in the queue. + nodes_set = set(G) + + # Assign unique label to each node. + comms = {node: i for i, node in enumerate(G)} + + while nodes_queue: + # Remove next node from the queue to process. + node = nodes_queue.popleft() + nodes_set.remove(node) + + # Isolated nodes retain their initial label. + if G.degree(node) > 0: + # Compute frequency of labels in node's neighborhood. + label_freqs = _fast_label_count(G, comms, node, weight) + max_freq = max(label_freqs.values()) + + # Always sample new label from most frequent labels. + comm = seed.choice( + [comm for comm in label_freqs if label_freqs[comm] == max_freq] + ) + + if comms[node] != comm: + comms[node] = comm + + # Add neighbors that have different label to the queue. + for nbr in nx.all_neighbors(G, node): + if comms[nbr] != comm and nbr not in nodes_set: + nodes_queue.append(nbr) + nodes_set.add(nbr) + + yield from groups(comms).values() + + +def _fast_label_count(G, comms, node, weight=None): + """Computes the frequency of labels in the neighborhood of a node. + + Returns a dictionary keyed by label to the frequency of that label. + """ + + if weight is None: + # Unweighted (un)directed simple graph. + if not G.is_multigraph(): + label_freqs = Counter(map(comms.get, nx.all_neighbors(G, node))) + + # Unweighted (un)directed multigraph. + else: + label_freqs = defaultdict(int) + for nbr in G[node]: + label_freqs[comms[nbr]] += len(G[node][nbr]) + + if G.is_directed(): + for nbr in G.pred[node]: + label_freqs[comms[nbr]] += len(G.pred[node][nbr]) + + else: + # Weighted undirected simple/multigraph. + label_freqs = defaultdict(float) + for _, nbr, w in G.edges(node, data=weight, default=1): + label_freqs[comms[nbr]] += w + + # Weighted directed simple/multigraph. + if G.is_directed(): + for nbr, _, w in G.in_edges(node, data=weight, default=1): + label_freqs[comms[nbr]] += w + + return label_freqs + + +@py_random_state(2) +@nx._dispatchable(edge_attrs="weight") +def asyn_lpa_communities(G, weight=None, seed=None): + """Returns communities in `G` as detected by asynchronous label + propagation. + + The asynchronous label propagation algorithm is described in + [1]_. The algorithm is probabilistic and the found communities may + vary on different executions. + + The algorithm proceeds as follows. After initializing each node with + a unique label, the algorithm repeatedly sets the label of a node to + be the label that appears most frequently among that nodes + neighbors. The algorithm halts when each node has the label that + appears most frequently among its neighbors. The algorithm is + asynchronous because each node is updated without waiting for + updates on the remaining nodes. + + This generalized version of the algorithm in [1]_ accepts edge + weights. + + Parameters + ---------- + G : Graph + + weight : string + The edge attribute representing the weight of an edge. + If None, each edge is assumed to have weight one. In this + algorithm, the weight of an edge is used in determining the + frequency with which a label appears among the neighbors of a + node: a higher weight means the label appears more often. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + Edge weight attributes must be numerical. + + References + ---------- + .. [1] Raghavan, Usha Nandini, Réka Albert, and Soundar Kumara. "Near + linear time algorithm to detect community structures in large-scale + networks." Physical Review E 76.3 (2007): 036106. + """ + + labels = {n: i for i, n in enumerate(G)} + cont = True + + while cont: + cont = False + nodes = list(G) + seed.shuffle(nodes) + + for node in nodes: + if not G[node]: + continue + + # Get label frequencies among adjacent nodes. + # Depending on the order they are processed in, + # some nodes will be in iteration t and others in t-1, + # making the algorithm asynchronous. + if weight is None: + # initialising a Counter from an iterator of labels is + # faster for getting unweighted label frequencies + label_freq = Counter(map(labels.get, G[node])) + else: + # updating a defaultdict is substantially faster + # for getting weighted label frequencies + label_freq = defaultdict(float) + for _, v, wt in G.edges(node, data=weight, default=1): + label_freq[labels[v]] += wt + + # Get the labels that appear with maximum frequency. + max_freq = max(label_freq.values()) + best_labels = [ + label for label, freq in label_freq.items() if freq == max_freq + ] + + # If the node does not have one of the maximum frequency labels, + # randomly choose one of them and update the node's label. + # Continue the iteration as long as at least one node + # doesn't have a maximum frequency label. + if labels[node] not in best_labels: + labels[node] = seed.choice(best_labels) + cont = True + + yield from groups(labels).values() + + +@not_implemented_for("directed") +@nx._dispatchable +def label_propagation_communities(G): + """Generates community sets determined by label propagation + + Finds communities in `G` using a semi-synchronous label propagation + method [1]_. This method combines the advantages of both the synchronous + and asynchronous models. Not implemented for directed graphs. + + Parameters + ---------- + G : graph + An undirected NetworkX graph. + + Returns + ------- + communities : iterable + A dict_values object that contains a set of nodes for each community. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed + + References + ---------- + .. [1] Cordasco, G., & Gargano, L. (2010, December). Community detection + via semi-synchronous label propagation algorithms. In Business + Applications of Social Network Analysis (BASNA), 2010 IEEE International + Workshop on (pp. 1-8). IEEE. + """ + coloring = _color_network(G) + # Create a unique label for each node in the graph + labeling = {v: k for k, v in enumerate(G)} + while not _labeling_complete(labeling, G): + # Update the labels of every node with the same color. + for color, nodes in coloring.items(): + for n in nodes: + _update_label(n, labeling, G) + + clusters = defaultdict(set) + for node, label in labeling.items(): + clusters[label].add(node) + return clusters.values() + + +def _color_network(G): + """Colors the network so that neighboring nodes all have distinct colors. + + Returns a dict keyed by color to a set of nodes with that color. + """ + coloring = {} # color => set(node) + colors = nx.coloring.greedy_color(G) + for node, color in colors.items(): + if color in coloring: + coloring[color].add(node) + else: + coloring[color] = {node} + return coloring + + +def _labeling_complete(labeling, G): + """Determines whether or not LPA is done. + + Label propagation is complete when all nodes have a label that is + in the set of highest frequency labels amongst its neighbors. + + Nodes with no neighbors are considered complete. + """ + return all( + labeling[v] in _most_frequent_labels(v, labeling, G) for v in G if len(G[v]) > 0 + ) + + +def _most_frequent_labels(node, labeling, G): + """Returns a set of all labels with maximum frequency in `labeling`. + + Input `labeling` should be a dict keyed by node to labels. + """ + if not G[node]: + # Nodes with no neighbors are themselves a community and are labeled + # accordingly, hence the immediate if statement. + return {labeling[node]} + + # Compute the frequencies of all neighbors of node + freqs = Counter(labeling[q] for q in G[node]) + max_freq = max(freqs.values()) + return {label for label, freq in freqs.items() if freq == max_freq} + + +def _update_label(node, labeling, G): + """Updates the label of a node using the Prec-Max tie breaking algorithm + + The algorithm is explained in: 'Community Detection via Semi-Synchronous + Label Propagation Algorithms' Cordasco and Gargano, 2011 + """ + high_labels = _most_frequent_labels(node, labeling, G) + if len(high_labels) == 1: + labeling[node] = high_labels.pop() + elif len(high_labels) > 1: + # Prec-Max + if labeling[node] not in high_labels: + labeling[node] = max(high_labels) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/louvain.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/louvain.py new file mode 100644 index 0000000000000000000000000000000000000000..959c93a5104b1f68ad67aef1b85bd28adbfc32ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/louvain.py @@ -0,0 +1,382 @@ +"""Function for detecting communities based on Louvain Community Detection +Algorithm""" + +import itertools +from collections import defaultdict, deque + +import networkx as nx +from networkx.algorithms.community import modularity +from networkx.utils import py_random_state + +__all__ = ["louvain_communities", "louvain_partitions"] + + +@py_random_state("seed") +@nx._dispatchable(edge_attrs="weight") +def louvain_communities( + G, weight="weight", resolution=1, threshold=0.0000001, max_level=None, seed=None +): + r"""Find the best partition of a graph using the Louvain Community Detection + Algorithm. + + Louvain Community Detection Algorithm is a simple method to extract the community + structure of a network. This is a heuristic method based on modularity optimization. [1]_ + + The algorithm works in 2 steps. On the first step it assigns every node to be + in its own community and then for each node it tries to find the maximum positive + modularity gain by moving each node to all of its neighbor communities. If no positive + gain is achieved the node remains in its original community. + + The modularity gain obtained by moving an isolated node $i$ into a community $C$ can + easily be calculated by the following formula (combining [1]_ [2]_ and some algebra): + + .. math:: + \Delta Q = \frac{k_{i,in}}{2m} - \gamma\frac{ \Sigma_{tot} \cdot k_i}{2m^2} + + where $m$ is the size of the graph, $k_{i,in}$ is the sum of the weights of the links + from $i$ to nodes in $C$, $k_i$ is the sum of the weights of the links incident to node $i$, + $\Sigma_{tot}$ is the sum of the weights of the links incident to nodes in $C$ and $\gamma$ + is the resolution parameter. + + For the directed case the modularity gain can be computed using this formula according to [3]_ + + .. math:: + \Delta Q = \frac{k_{i,in}}{m} + - \gamma\frac{k_i^{out} \cdot\Sigma_{tot}^{in} + k_i^{in} \cdot \Sigma_{tot}^{out}}{m^2} + + where $k_i^{out}$, $k_i^{in}$ are the outer and inner weighted degrees of node $i$ and + $\Sigma_{tot}^{in}$, $\Sigma_{tot}^{out}$ are the sum of in-going and out-going links incident + to nodes in $C$. + + The first phase continues until no individual move can improve the modularity. + + The second phase consists in building a new network whose nodes are now the communities + found in the first phase. To do so, the weights of the links between the new nodes are given by + the sum of the weight of the links between nodes in the corresponding two communities. Once this + phase is complete it is possible to reapply the first phase creating bigger communities with + increased modularity. + + The above two phases are executed until no modularity gain is achieved (or is less than + the `threshold`, or until `max_levels` is reached). + + Be careful with self-loops in the input graph. These are treated as + previously reduced communities -- as if the process had been started + in the middle of the algorithm. Large self-loop edge weights thus + represent strong communities and in practice may be hard to add + other nodes to. If your input graph edge weights for self-loops + do not represent already reduced communities you may want to remove + the self-loops before inputting that graph. + + Parameters + ---------- + G : NetworkX graph + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + resolution : float, optional (default=1) + If resolution is less than 1, the algorithm favors larger communities. + Greater than 1 favors smaller communities + threshold : float, optional (default=0.0000001) + Modularity gain threshold for each level. If the gain of modularity + between 2 levels of the algorithm is less than the given threshold + then the algorithm stops and returns the resulting communities. + max_level : int or None, optional (default=None) + The maximum number of levels (steps of the algorithm) to compute. + Must be a positive integer or None. If None, then there is no max + level and the threshold parameter determines the stopping condition. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + A list of sets (partition of `G`). Each set represents one community and contains + all the nodes that constitute it. + + Examples + -------- + >>> import networkx as nx + >>> G = nx.petersen_graph() + >>> nx.community.louvain_communities(G, seed=123) + [{0, 4, 5, 7, 9}, {1, 2, 3, 6, 8}] + + Notes + ----- + The order in which the nodes are considered can affect the final output. In the algorithm + the ordering happens using a random shuffle. + + References + ---------- + .. [1] Blondel, V.D. et al. Fast unfolding of communities in + large networks. J. Stat. Mech 10008, 1-12(2008). https://doi.org/10.1088/1742-5468/2008/10/P10008 + .. [2] Traag, V.A., Waltman, L. & van Eck, N.J. From Louvain to Leiden: guaranteeing + well-connected communities. Sci Rep 9, 5233 (2019). https://doi.org/10.1038/s41598-019-41695-z + .. [3] Nicolas Dugué, Anthony Perez. Directed Louvain : maximizing modularity in directed networks. + [Research Report] Université d’Orléans. 2015. hal-01231784. https://hal.archives-ouvertes.fr/hal-01231784 + + See Also + -------- + louvain_partitions + """ + + partitions = louvain_partitions(G, weight, resolution, threshold, seed) + if max_level is not None: + if max_level <= 0: + raise ValueError("max_level argument must be a positive integer or None") + partitions = itertools.islice(partitions, max_level) + final_partition = deque(partitions, maxlen=1) + return final_partition.pop() + + +@py_random_state("seed") +@nx._dispatchable(edge_attrs="weight") +def louvain_partitions( + G, weight="weight", resolution=1, threshold=0.0000001, seed=None +): + """Yields partitions for each level of the Louvain Community Detection Algorithm + + Louvain Community Detection Algorithm is a simple method to extract the community + structure of a network. This is a heuristic method based on modularity optimization. [1]_ + + The partitions at each level (step of the algorithm) form a dendrogram of communities. + A dendrogram is a diagram representing a tree and each level represents + a partition of the G graph. The top level contains the smallest communities + and as you traverse to the bottom of the tree the communities get bigger + and the overall modularity increases making the partition better. + + Each level is generated by executing the two phases of the Louvain Community + Detection Algorithm. + + Be careful with self-loops in the input graph. These are treated as + previously reduced communities -- as if the process had been started + in the middle of the algorithm. Large self-loop edge weights thus + represent strong communities and in practice may be hard to add + other nodes to. If your input graph edge weights for self-loops + do not represent already reduced communities you may want to remove + the self-loops before inputting that graph. + + Parameters + ---------- + G : NetworkX graph + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + resolution : float, optional (default=1) + If resolution is less than 1, the algorithm favors larger communities. + Greater than 1 favors smaller communities + threshold : float, optional (default=0.0000001) + Modularity gain threshold for each level. If the gain of modularity + between 2 levels of the algorithm is less than the given threshold + then the algorithm stops and returns the resulting communities. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Yields + ------ + list + A list of sets (partition of `G`). Each set represents one community and contains + all the nodes that constitute it. + + References + ---------- + .. [1] Blondel, V.D. et al. Fast unfolding of communities in + large networks. J. Stat. Mech 10008, 1-12(2008) + + See Also + -------- + louvain_communities + """ + + partition = [{u} for u in G.nodes()] + if nx.is_empty(G): + yield partition + return + mod = modularity(G, partition, resolution=resolution, weight=weight) + is_directed = G.is_directed() + if G.is_multigraph(): + graph = _convert_multigraph(G, weight, is_directed) + else: + graph = G.__class__() + graph.add_nodes_from(G) + graph.add_weighted_edges_from(G.edges(data=weight, default=1)) + + m = graph.size(weight="weight") + partition, inner_partition, improvement = _one_level( + graph, m, partition, resolution, is_directed, seed + ) + improvement = True + while improvement: + # gh-5901 protect the sets in the yielded list from further manipulation here + yield [s.copy() for s in partition] + new_mod = modularity( + graph, inner_partition, resolution=resolution, weight="weight" + ) + if new_mod - mod <= threshold: + return + mod = new_mod + graph = _gen_graph(graph, inner_partition) + partition, inner_partition, improvement = _one_level( + graph, m, partition, resolution, is_directed, seed + ) + + +def _one_level(G, m, partition, resolution=1, is_directed=False, seed=None): + """Calculate one level of the Louvain partitions tree + + Parameters + ---------- + G : NetworkX Graph/DiGraph + The graph from which to detect communities + m : number + The size of the graph `G`. + partition : list of sets of nodes + A valid partition of the graph `G` + resolution : positive number + The resolution parameter for computing the modularity of a partition + is_directed : bool + True if `G` is a directed graph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + """ + node2com = {u: i for i, u in enumerate(G.nodes())} + inner_partition = [{u} for u in G.nodes()] + if is_directed: + in_degrees = dict(G.in_degree(weight="weight")) + out_degrees = dict(G.out_degree(weight="weight")) + Stot_in = list(in_degrees.values()) + Stot_out = list(out_degrees.values()) + # Calculate weights for both in and out neighbors without considering self-loops + nbrs = {} + for u in G: + nbrs[u] = defaultdict(float) + for _, n, wt in G.out_edges(u, data="weight"): + if u != n: + nbrs[u][n] += wt + for n, _, wt in G.in_edges(u, data="weight"): + if u != n: + nbrs[u][n] += wt + else: + degrees = dict(G.degree(weight="weight")) + Stot = list(degrees.values()) + nbrs = {u: {v: data["weight"] for v, data in G[u].items() if v != u} for u in G} + rand_nodes = list(G.nodes) + seed.shuffle(rand_nodes) + nb_moves = 1 + improvement = False + while nb_moves > 0: + nb_moves = 0 + for u in rand_nodes: + best_mod = 0 + best_com = node2com[u] + weights2com = _neighbor_weights(nbrs[u], node2com) + if is_directed: + in_degree = in_degrees[u] + out_degree = out_degrees[u] + Stot_in[best_com] -= in_degree + Stot_out[best_com] -= out_degree + remove_cost = ( + -weights2com[best_com] / m + + resolution + * (out_degree * Stot_in[best_com] + in_degree * Stot_out[best_com]) + / m**2 + ) + else: + degree = degrees[u] + Stot[best_com] -= degree + remove_cost = -weights2com[best_com] / m + resolution * ( + Stot[best_com] * degree + ) / (2 * m**2) + for nbr_com, wt in weights2com.items(): + if is_directed: + gain = ( + remove_cost + + wt / m + - resolution + * ( + out_degree * Stot_in[nbr_com] + + in_degree * Stot_out[nbr_com] + ) + / m**2 + ) + else: + gain = ( + remove_cost + + wt / m + - resolution * (Stot[nbr_com] * degree) / (2 * m**2) + ) + if gain > best_mod: + best_mod = gain + best_com = nbr_com + if is_directed: + Stot_in[best_com] += in_degree + Stot_out[best_com] += out_degree + else: + Stot[best_com] += degree + if best_com != node2com[u]: + com = G.nodes[u].get("nodes", {u}) + partition[node2com[u]].difference_update(com) + inner_partition[node2com[u]].remove(u) + partition[best_com].update(com) + inner_partition[best_com].add(u) + improvement = True + nb_moves += 1 + node2com[u] = best_com + partition = list(filter(len, partition)) + inner_partition = list(filter(len, inner_partition)) + return partition, inner_partition, improvement + + +def _neighbor_weights(nbrs, node2com): + """Calculate weights between node and its neighbor communities. + + Parameters + ---------- + nbrs : dictionary + Dictionary with nodes' neighbors as keys and their edge weight as value. + node2com : dictionary + Dictionary with all graph's nodes as keys and their community index as value. + + """ + weights = defaultdict(float) + for nbr, wt in nbrs.items(): + weights[node2com[nbr]] += wt + return weights + + +def _gen_graph(G, partition): + """Generate a new graph based on the partitions of a given graph""" + H = G.__class__() + node2com = {} + for i, part in enumerate(partition): + nodes = set() + for node in part: + node2com[node] = i + nodes.update(G.nodes[node].get("nodes", {node})) + H.add_node(i, nodes=nodes) + + for node1, node2, wt in G.edges(data=True): + wt = wt["weight"] + com1 = node2com[node1] + com2 = node2com[node2] + temp = H.get_edge_data(com1, com2, {"weight": 0})["weight"] + H.add_edge(com1, com2, weight=wt + temp) + return H + + +def _convert_multigraph(G, weight, is_directed): + """Convert a Multigraph to normal Graph""" + if is_directed: + H = nx.DiGraph() + else: + H = nx.Graph() + H.add_nodes_from(G) + for u, v, wt in G.edges(data=weight, default=1): + if H.has_edge(u, v): + H[u][v]["weight"] += wt + else: + H.add_edge(u, v, weight=wt) + return H diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/lukes.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/lukes.py new file mode 100644 index 0000000000000000000000000000000000000000..08dd7cd52ff414c1397e3effea504853f3c9caf7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/lukes.py @@ -0,0 +1,227 @@ +"""Lukes Algorithm for exact optimal weighted tree partitioning.""" + +from copy import deepcopy +from functools import lru_cache +from random import choice + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["lukes_partitioning"] + +D_EDGE_W = "weight" +D_EDGE_VALUE = 1.0 +D_NODE_W = "weight" +D_NODE_VALUE = 1 +PKEY = "partitions" +CLUSTER_EVAL_CACHE_SIZE = 2048 + + +def _split_n_from(n, min_size_of_first_part): + # splits j in two parts of which the first is at least + # the second argument + assert n >= min_size_of_first_part + for p1 in range(min_size_of_first_part, n + 1): + yield p1, n - p1 + + +@nx._dispatchable(node_attrs="node_weight", edge_attrs="edge_weight") +def lukes_partitioning(G, max_size, node_weight=None, edge_weight=None): + """Optimal partitioning of a weighted tree using the Lukes algorithm. + + This algorithm partitions a connected, acyclic graph featuring integer + node weights and float edge weights. The resulting clusters are such + that the total weight of the nodes in each cluster does not exceed + max_size and that the weight of the edges that are cut by the partition + is minimum. The algorithm is based on [1]_. + + Parameters + ---------- + G : NetworkX graph + + max_size : int + Maximum weight a partition can have in terms of sum of + node_weight for all nodes in the partition + + edge_weight : key + Edge data key to use as weight. If None, the weights are all + set to one. + + node_weight : key + Node data key to use as weight. If None, the weights are all + set to one. The data must be int. + + Returns + ------- + partition : list + A list of sets of nodes representing the clusters of the + partition. + + Raises + ------ + NotATree + If G is not a tree. + TypeError + If any of the values of node_weight is not int. + + References + ---------- + .. [1] Lukes, J. A. (1974). + "Efficient Algorithm for the Partitioning of Trees." + IBM Journal of Research and Development, 18(3), 217–224. + + """ + # First sanity check and tree preparation + if not nx.is_tree(G): + raise nx.NotATree("lukes_partitioning works only on trees") + else: + if nx.is_directed(G): + root = [n for n, d in G.in_degree() if d == 0] + assert len(root) == 1 + root = root[0] + t_G = deepcopy(G) + else: + root = choice(list(G.nodes)) + # this has the desirable side effect of not inheriting attributes + t_G = nx.dfs_tree(G, root) + + # Since we do not want to screw up the original graph, + # if we have a blank attribute, we make a deepcopy + if edge_weight is None or node_weight is None: + safe_G = deepcopy(G) + if edge_weight is None: + nx.set_edge_attributes(safe_G, D_EDGE_VALUE, D_EDGE_W) + edge_weight = D_EDGE_W + if node_weight is None: + nx.set_node_attributes(safe_G, D_NODE_VALUE, D_NODE_W) + node_weight = D_NODE_W + else: + safe_G = G + + # Second sanity check + # The values of node_weight MUST BE int. + # I cannot see any room for duck typing without incurring serious + # danger of subtle bugs. + all_n_attr = nx.get_node_attributes(safe_G, node_weight).values() + for x in all_n_attr: + if not isinstance(x, int): + raise TypeError( + "lukes_partitioning needs integer " + f"values for node_weight ({node_weight})" + ) + + # SUBROUTINES ----------------------- + # these functions are defined here for two reasons: + # - brevity: we can leverage global "safe_G" + # - caching: signatures are hashable + + @not_implemented_for("undirected") + # this is intended to be called only on t_G + def _leaves(gr): + for x in gr.nodes: + if not nx.descendants(gr, x): + yield x + + @not_implemented_for("undirected") + def _a_parent_of_leaves_only(gr): + tleaves = set(_leaves(gr)) + for n in set(gr.nodes) - tleaves: + if all(x in tleaves for x in nx.descendants(gr, n)): + return n + + @lru_cache(CLUSTER_EVAL_CACHE_SIZE) + def _value_of_cluster(cluster): + valid_edges = [e for e in safe_G.edges if e[0] in cluster and e[1] in cluster] + return sum(safe_G.edges[e][edge_weight] for e in valid_edges) + + def _value_of_partition(partition): + return sum(_value_of_cluster(frozenset(c)) for c in partition) + + @lru_cache(CLUSTER_EVAL_CACHE_SIZE) + def _weight_of_cluster(cluster): + return sum(safe_G.nodes[n][node_weight] for n in cluster) + + def _pivot(partition, node): + ccx = [c for c in partition if node in c] + assert len(ccx) == 1 + return ccx[0] + + def _concatenate_or_merge(partition_1, partition_2, x, i, ref_weight): + ccx = _pivot(partition_1, x) + cci = _pivot(partition_2, i) + merged_xi = ccx.union(cci) + + # We first check if we can do the merge. + # If so, we do the actual calculations, otherwise we concatenate + if _weight_of_cluster(frozenset(merged_xi)) <= ref_weight: + cp1 = list(filter(lambda x: x != ccx, partition_1)) + cp2 = list(filter(lambda x: x != cci, partition_2)) + + option_2 = [merged_xi] + cp1 + cp2 + return option_2, _value_of_partition(option_2) + else: + option_1 = partition_1 + partition_2 + return option_1, _value_of_partition(option_1) + + # INITIALIZATION ----------------------- + leaves = set(_leaves(t_G)) + for lv in leaves: + t_G.nodes[lv][PKEY] = {} + slot = safe_G.nodes[lv][node_weight] + t_G.nodes[lv][PKEY][slot] = [{lv}] + t_G.nodes[lv][PKEY][0] = [{lv}] + + for inner in [x for x in t_G.nodes if x not in leaves]: + t_G.nodes[inner][PKEY] = {} + slot = safe_G.nodes[inner][node_weight] + t_G.nodes[inner][PKEY][slot] = [{inner}] + nx._clear_cache(t_G) + + # CORE ALGORITHM ----------------------- + while True: + x_node = _a_parent_of_leaves_only(t_G) + weight_of_x = safe_G.nodes[x_node][node_weight] + best_value = 0 + best_partition = None + bp_buffer = {} + x_descendants = nx.descendants(t_G, x_node) + for i_node in x_descendants: + for j in range(weight_of_x, max_size + 1): + for a, b in _split_n_from(j, weight_of_x): + if ( + a not in t_G.nodes[x_node][PKEY] + or b not in t_G.nodes[i_node][PKEY] + ): + # it's not possible to form this particular weight sum + continue + + part1 = t_G.nodes[x_node][PKEY][a] + part2 = t_G.nodes[i_node][PKEY][b] + part, value = _concatenate_or_merge(part1, part2, x_node, i_node, j) + + if j not in bp_buffer or bp_buffer[j][1] < value: + # we annotate in the buffer the best partition for j + bp_buffer[j] = part, value + + # we also keep track of the overall best partition + if best_value <= value: + best_value = value + best_partition = part + + # as illustrated in Lukes, once we finished a child, we can + # discharge the partitions we found into the graph + # (the key phrase is make all x == x') + # so that they are used by the subsequent children + for w, (best_part_for_vl, vl) in bp_buffer.items(): + t_G.nodes[x_node][PKEY][w] = best_part_for_vl + bp_buffer.clear() + + # the absolute best partition for this node + # across all weights has to be stored at 0 + t_G.nodes[x_node][PKEY][0] = best_partition + t_G.remove_nodes_from(x_descendants) + + if x_node == root: + # the 0-labeled partition of root + # is the optimal one for the whole tree + return t_G.nodes[root][PKEY][0] diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/modularity_max.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/modularity_max.py new file mode 100644 index 0000000000000000000000000000000000000000..f465e01c6b20ec0a34c7d8402ebdfb6e3e1b4e0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/modularity_max.py @@ -0,0 +1,451 @@ +"""Functions for detecting communities based on modularity.""" + +from collections import defaultdict + +import networkx as nx +from networkx.algorithms.community.quality import modularity +from networkx.utils import not_implemented_for +from networkx.utils.mapped_queue import MappedQueue + +__all__ = [ + "greedy_modularity_communities", + "naive_greedy_modularity_communities", +] + + +def _greedy_modularity_communities_generator(G, weight=None, resolution=1): + r"""Yield community partitions of G and the modularity change at each step. + + This function performs Clauset-Newman-Moore greedy modularity maximization [2]_ + At each step of the process it yields the change in modularity that will occur in + the next step followed by yielding the new community partition after that step. + + Greedy modularity maximization begins with each node in its own community + and repeatedly joins the pair of communities that lead to the largest + modularity until one community contains all nodes (the partition has one set). + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + Yields + ------ + Alternating yield statements produce the following two objects: + + communities: dict_values + A dict_values of frozensets of nodes, one for each community. + This represents a partition of the nodes of the graph into communities. + The first yield is the partition with each node in its own community. + + dq: float + The change in modularity when merging the next two communities + that leads to the largest modularity. + + See Also + -------- + modularity + + References + ---------- + .. [1] Newman, M. E. J. "Networks: An Introduction", page 224 + Oxford University Press 2011. + .. [2] Clauset, A., Newman, M. E., & Moore, C. + "Finding community structure in very large networks." + Physical Review E 70(6), 2004. + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community + Detection" Phys. Rev. E74, 2006. + .. [4] Newman, M. E. J."Analysis of weighted networks" + Physical Review E 70(5 Pt 2):056131, 2004. + """ + directed = G.is_directed() + N = G.number_of_nodes() + + # Count edges (or the sum of edge-weights for weighted graphs) + m = G.size(weight) + q0 = 1 / m + + # Calculate degrees (notation from the papers) + # a : the fraction of (weighted) out-degree for each node + # b : the fraction of (weighted) in-degree for each node + if directed: + a = {node: deg_out * q0 for node, deg_out in G.out_degree(weight=weight)} + b = {node: deg_in * q0 for node, deg_in in G.in_degree(weight=weight)} + else: + a = b = {node: deg * q0 * 0.5 for node, deg in G.degree(weight=weight)} + + # this preliminary step collects the edge weights for each node pair + # It handles multigraph and digraph and works fine for graph. + dq_dict = defaultdict(lambda: defaultdict(float)) + for u, v, wt in G.edges(data=weight, default=1): + if u == v: + continue + dq_dict[u][v] += wt + dq_dict[v][u] += wt + + # now scale and subtract the expected edge-weights term + for u, nbrdict in dq_dict.items(): + for v, wt in nbrdict.items(): + dq_dict[u][v] = q0 * wt - resolution * (a[u] * b[v] + b[u] * a[v]) + + # Use -dq to get a max_heap instead of a min_heap + # dq_heap holds a heap for each node's neighbors + dq_heap = {u: MappedQueue({(u, v): -dq for v, dq in dq_dict[u].items()}) for u in G} + # H -> all_dq_heap holds a heap with the best items for each node + H = MappedQueue([dq_heap[n].heap[0] for n in G if len(dq_heap[n]) > 0]) + + # Initialize single-node communities + communities = {n: frozenset([n]) for n in G} + yield communities.values() + + # Merge the two communities that lead to the largest modularity + while len(H) > 1: + # Find best merge + # Remove from heap of row maxes + # Ties will be broken by choosing the pair with lowest min community id + try: + negdq, u, v = H.pop() + except IndexError: + break + dq = -negdq + yield dq + # Remove best merge from row u heap + dq_heap[u].pop() + # Push new row max onto H + if len(dq_heap[u]) > 0: + H.push(dq_heap[u].heap[0]) + # If this element was also at the root of row v, we need to remove the + # duplicate entry from H + if dq_heap[v].heap[0] == (v, u): + H.remove((v, u)) + # Remove best merge from row v heap + dq_heap[v].remove((v, u)) + # Push new row max onto H + if len(dq_heap[v]) > 0: + H.push(dq_heap[v].heap[0]) + else: + # Duplicate wasn't in H, just remove from row v heap + dq_heap[v].remove((v, u)) + + # Perform merge + communities[v] = frozenset(communities[u] | communities[v]) + del communities[u] + + # Get neighbor communities connected to the merged communities + u_nbrs = set(dq_dict[u]) + v_nbrs = set(dq_dict[v]) + all_nbrs = (u_nbrs | v_nbrs) - {u, v} + both_nbrs = u_nbrs & v_nbrs + # Update dq for merge of u into v + for w in all_nbrs: + # Calculate new dq value + if w in both_nbrs: + dq_vw = dq_dict[v][w] + dq_dict[u][w] + elif w in v_nbrs: + dq_vw = dq_dict[v][w] - resolution * (a[u] * b[w] + a[w] * b[u]) + else: # w in u_nbrs + dq_vw = dq_dict[u][w] - resolution * (a[v] * b[w] + a[w] * b[v]) + # Update rows v and w + for row, col in [(v, w), (w, v)]: + dq_heap_row = dq_heap[row] + # Update dict for v,w only (u is removed below) + dq_dict[row][col] = dq_vw + # Save old max of per-row heap + if len(dq_heap_row) > 0: + d_oldmax = dq_heap_row.heap[0] + else: + d_oldmax = None + # Add/update heaps + d = (row, col) + d_negdq = -dq_vw + # Save old value for finding heap index + if w in v_nbrs: + # Update existing element in per-row heap + dq_heap_row.update(d, d, priority=d_negdq) + else: + # We're creating a new nonzero element, add to heap + dq_heap_row.push(d, priority=d_negdq) + # Update heap of row maxes if necessary + if d_oldmax is None: + # No entries previously in this row, push new max + H.push(d, priority=d_negdq) + else: + # We've updated an entry in this row, has the max changed? + row_max = dq_heap_row.heap[0] + if d_oldmax != row_max or d_oldmax.priority != row_max.priority: + H.update(d_oldmax, row_max) + + # Remove row/col u from dq_dict matrix + for w in dq_dict[u]: + # Remove from dict + dq_old = dq_dict[w][u] + del dq_dict[w][u] + # Remove from heaps if we haven't already + if w != v: + # Remove both row and column + for row, col in [(w, u), (u, w)]: + dq_heap_row = dq_heap[row] + # Check if replaced dq is row max + d_old = (row, col) + if dq_heap_row.heap[0] == d_old: + # Update per-row heap and heap of row maxes + dq_heap_row.remove(d_old) + H.remove(d_old) + # Update row max + if len(dq_heap_row) > 0: + H.push(dq_heap_row.heap[0]) + else: + # Only update per-row heap + dq_heap_row.remove(d_old) + + del dq_dict[u] + # Mark row u as deleted, but keep placeholder + dq_heap[u] = MappedQueue() + # Merge u into v and update a + a[v] += a[u] + a[u] = 0 + if directed: + b[v] += b[u] + b[u] = 0 + + yield communities.values() + + +@nx._dispatchable(edge_attrs="weight") +def greedy_modularity_communities( + G, + weight=None, + resolution=1, + cutoff=1, + best_n=None, +): + r"""Find communities in G using greedy modularity maximization. + + This function uses Clauset-Newman-Moore greedy modularity maximization [2]_ + to find the community partition with the largest modularity. + + Greedy modularity maximization begins with each node in its own community + and repeatedly joins the pair of communities that lead to the largest + modularity until no further increase in modularity is possible (a maximum). + Two keyword arguments adjust the stopping condition. `cutoff` is a lower + limit on the number of communities so you can stop the process before + reaching a maximum (used to save computation time). `best_n` is an upper + limit on the number of communities so you can make the process continue + until at most n communities remain even if the maximum modularity occurs + for more. To obtain exactly n communities, set both `cutoff` and `best_n` to n. + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + resolution : float, optional (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + cutoff : int, optional (default=1) + A minimum number of communities below which the merging process stops. + The process stops at this number of communities even if modularity + is not maximized. The goal is to let the user stop the process early. + The process stops before the cutoff if it finds a maximum of modularity. + + best_n : int or None, optional (default=None) + A maximum number of communities above which the merging process will + not stop. This forces community merging to continue after modularity + starts to decrease until `best_n` communities remain. + If ``None``, don't force it to continue beyond a maximum. + + Raises + ------ + ValueError : If the `cutoff` or `best_n` value is not in the range + ``[1, G.number_of_nodes()]``, or if `best_n` < `cutoff`. + + Returns + ------- + communities: list + A list of frozensets of nodes, one for each community. + Sorted by length with largest communities first. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> c = nx.community.greedy_modularity_communities(G) + >>> sorted(c[0]) + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + + See Also + -------- + modularity + + References + ---------- + .. [1] Newman, M. E. J. "Networks: An Introduction", page 224 + Oxford University Press 2011. + .. [2] Clauset, A., Newman, M. E., & Moore, C. + "Finding community structure in very large networks." + Physical Review E 70(6), 2004. + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community + Detection" Phys. Rev. E74, 2006. + .. [4] Newman, M. E. J."Analysis of weighted networks" + Physical Review E 70(5 Pt 2):056131, 2004. + """ + if not G.size(): + return [{n} for n in G] + + if (cutoff < 1) or (cutoff > G.number_of_nodes()): + raise ValueError(f"cutoff must be between 1 and {len(G)}. Got {cutoff}.") + if best_n is not None: + if (best_n < 1) or (best_n > G.number_of_nodes()): + raise ValueError(f"best_n must be between 1 and {len(G)}. Got {best_n}.") + if best_n < cutoff: + raise ValueError(f"Must have best_n >= cutoff. Got {best_n} < {cutoff}") + if best_n == 1: + return [set(G)] + else: + best_n = G.number_of_nodes() + + # retrieve generator object to construct output + community_gen = _greedy_modularity_communities_generator( + G, weight=weight, resolution=resolution + ) + + # construct the first best community + communities = next(community_gen) + + # continue merging communities until one of the breaking criteria is satisfied + while len(communities) > cutoff: + try: + dq = next(community_gen) + # StopIteration occurs when communities are the connected components + except StopIteration: + communities = sorted(communities, key=len, reverse=True) + # if best_n requires more merging, merge big sets for highest modularity + while len(communities) > best_n: + comm1, comm2, *rest = communities + communities = [comm1 ^ comm2] + communities.extend(rest) + return communities + + # keep going unless max_mod is reached or best_n says to merge more + if dq < 0 and len(communities) <= best_n: + break + communities = next(community_gen) + + return sorted(communities, key=len, reverse=True) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def naive_greedy_modularity_communities(G, resolution=1, weight=None): + r"""Find communities in G using greedy modularity maximization. + + This implementation is O(n^4), much slower than alternatives, but it is + provided as an easy-to-understand reference implementation. + + Greedy modularity maximization begins with each node in its own community + and joins the pair of communities that most increases modularity until no + such pair exists. + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + Graph must be simple and undirected. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + list + A list of sets of nodes, one for each community. + Sorted by length with largest communities first. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> c = nx.community.naive_greedy_modularity_communities(G) + >>> sorted(c[0]) + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + + See Also + -------- + greedy_modularity_communities + modularity + """ + # First create one community for each node + communities = [frozenset([u]) for u in G.nodes()] + # Track merges + merges = [] + # Greedily merge communities until no improvement is possible + old_modularity = None + new_modularity = modularity(G, communities, resolution=resolution, weight=weight) + while old_modularity is None or new_modularity > old_modularity: + # Save modularity for comparison + old_modularity = new_modularity + # Find best pair to merge + trial_communities = list(communities) + to_merge = None + for i, u in enumerate(communities): + for j, v in enumerate(communities): + # Skip i==j and empty communities + if j <= i or len(u) == 0 or len(v) == 0: + continue + # Merge communities u and v + trial_communities[j] = u | v + trial_communities[i] = frozenset([]) + trial_modularity = modularity( + G, trial_communities, resolution=resolution, weight=weight + ) + if trial_modularity >= new_modularity: + # Check if strictly better or tie + if trial_modularity > new_modularity: + # Found new best, save modularity and group indexes + new_modularity = trial_modularity + to_merge = (i, j, new_modularity - old_modularity) + elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]): + # Break ties by choosing pair with lowest min id + new_modularity = trial_modularity + to_merge = (i, j, new_modularity - old_modularity) + # Un-merge + trial_communities[i] = u + trial_communities[j] = v + if to_merge is not None: + # If the best merge improves modularity, use it + merges.append(to_merge) + i, j, dq = to_merge + u, v = communities[i], communities[j] + communities[j] = u | v + communities[i] = frozenset([]) + # Remove empty communities and sort + return sorted((c for c in communities if len(c) > 0), key=len, reverse=True) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/quality.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/quality.py new file mode 100644 index 0000000000000000000000000000000000000000..f09a6d454af24b38841de473d9e4584bb3a460e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/quality.py @@ -0,0 +1,346 @@ +"""Functions for measuring the quality of a partition (into +communities). + +""" + +from itertools import combinations + +import networkx as nx +from networkx import NetworkXError +from networkx.algorithms.community.community_utils import is_partition +from networkx.utils.decorators import argmap + +__all__ = ["modularity", "partition_quality"] + + +class NotAPartition(NetworkXError): + """Raised if a given collection is not a partition.""" + + def __init__(self, G, collection): + msg = f"{collection} is not a valid partition of the graph {G}" + super().__init__(msg) + + +def _require_partition(G, partition): + """Decorator to check that a valid partition is input to a function + + Raises :exc:`networkx.NetworkXError` if the partition is not valid. + + This decorator should be used on functions whose first two arguments + are a graph and a partition of the nodes of that graph (in that + order):: + + >>> @require_partition + ... def foo(G, partition): + ... print("partition is valid!") + ... + >>> G = nx.complete_graph(5) + >>> partition = [{0, 1}, {2, 3}, {4}] + >>> foo(G, partition) + partition is valid! + >>> partition = [{0}, {2, 3}, {4}] + >>> foo(G, partition) + Traceback (most recent call last): + ... + networkx.exception.NetworkXError: `partition` is not a valid partition of the nodes of G + >>> partition = [{0, 1}, {1, 2, 3}, {4}] + >>> foo(G, partition) + Traceback (most recent call last): + ... + networkx.exception.NetworkXError: `partition` is not a valid partition of the nodes of G + + """ + if is_partition(G, partition): + return G, partition + raise nx.NetworkXError("`partition` is not a valid partition of the nodes of G") + + +require_partition = argmap(_require_partition, (0, 1)) + + +@nx._dispatchable +def intra_community_edges(G, partition): + """Returns the number of intra-community edges for a partition of `G`. + + Parameters + ---------- + G : NetworkX graph. + + partition : iterable of sets of nodes + This must be a partition of the nodes of `G`. + + The "intra-community edges" are those edges joining a pair of nodes + in the same block of the partition. + + """ + return sum(G.subgraph(block).size() for block in partition) + + +@nx._dispatchable +def inter_community_edges(G, partition): + """Returns the number of inter-community edges for a partition of `G`. + according to the given + partition of the nodes of `G`. + + Parameters + ---------- + G : NetworkX graph. + + partition : iterable of sets of nodes + This must be a partition of the nodes of `G`. + + The *inter-community edges* are those edges joining a pair of nodes + in different blocks of the partition. + + Implementation note: this function creates an intermediate graph + that may require the same amount of memory as that of `G`. + + """ + # Alternate implementation that does not require constructing a new + # graph object (but does require constructing an affiliation + # dictionary): + # + # aff = dict(chain.from_iterable(((v, block) for v in block) + # for block in partition)) + # return sum(1 for u, v in G.edges() if aff[u] != aff[v]) + # + MG = nx.MultiDiGraph if G.is_directed() else nx.MultiGraph + return nx.quotient_graph(G, partition, create_using=MG).size() + + +@nx._dispatchable +def inter_community_non_edges(G, partition): + """Returns the number of inter-community non-edges according to the + given partition of the nodes of `G`. + + Parameters + ---------- + G : NetworkX graph. + + partition : iterable of sets of nodes + This must be a partition of the nodes of `G`. + + A *non-edge* is a pair of nodes (undirected if `G` is undirected) + that are not adjacent in `G`. The *inter-community non-edges* are + those non-edges on a pair of nodes in different blocks of the + partition. + + Implementation note: this function creates two intermediate graphs, + which may require up to twice the amount of memory as required to + store `G`. + + """ + # Alternate implementation that does not require constructing two + # new graph objects (but does require constructing an affiliation + # dictionary): + # + # aff = dict(chain.from_iterable(((v, block) for v in block) + # for block in partition)) + # return sum(1 for u, v in nx.non_edges(G) if aff[u] != aff[v]) + # + return inter_community_edges(nx.complement(G), partition) + + +@nx._dispatchable(edge_attrs="weight") +def modularity(G, communities, weight="weight", resolution=1): + r"""Returns the modularity of the given partition of the graph. + + Modularity is defined in [1]_ as + + .. math:: + Q = \frac{1}{2m} \sum_{ij} \left( A_{ij} - \gamma\frac{k_ik_j}{2m}\right) + \delta(c_i,c_j) + + where $m$ is the number of edges (or sum of all edge weights as in [5]_), + $A$ is the adjacency matrix of `G`, $k_i$ is the (weighted) degree of $i$, + $\gamma$ is the resolution parameter, and $\delta(c_i, c_j)$ is 1 if $i$ and + $j$ are in the same community else 0. + + According to [2]_ (and verified by some algebra) this can be reduced to + + .. math:: + Q = \sum_{c=1}^{n} + \left[ \frac{L_c}{m} - \gamma\left( \frac{k_c}{2m} \right) ^2 \right] + + where the sum iterates over all communities $c$, $m$ is the number of edges, + $L_c$ is the number of intra-community links for community $c$, + $k_c$ is the sum of degrees of the nodes in community $c$, + and $\gamma$ is the resolution parameter. + + The resolution parameter sets an arbitrary tradeoff between intra-group + edges and inter-group edges. More complex grouping patterns can be + discovered by analyzing the same network with multiple values of gamma + and then combining the results [3]_. That said, it is very common to + simply use gamma=1. More on the choice of gamma is in [4]_. + + The second formula is the one actually used in calculation of the modularity. + For directed graphs the second formula replaces $k_c$ with $k^{in}_c k^{out}_c$. + + Parameters + ---------- + G : NetworkX Graph + + communities : list or iterable of set of nodes + These node sets must represent a partition of G's nodes. + + weight : string or None, optional (default="weight") + The edge attribute that holds the numerical value used + as a weight. If None or an edge does not have that attribute, + then that edge has weight 1. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + Returns + ------- + Q : float + The modularity of the partition. + + Raises + ------ + NotAPartition + If `communities` is not a partition of the nodes of `G`. + + Examples + -------- + >>> G = nx.barbell_graph(3, 0) + >>> nx.community.modularity(G, [{0, 1, 2}, {3, 4, 5}]) + 0.35714285714285715 + >>> nx.community.modularity(G, nx.community.label_propagation_communities(G)) + 0.35714285714285715 + + References + ---------- + .. [1] M. E. J. Newman "Networks: An Introduction", page 224. + Oxford University Press, 2011. + .. [2] Clauset, Aaron, Mark EJ Newman, and Cristopher Moore. + "Finding community structure in very large networks." + Phys. Rev. E 70.6 (2004). + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community Detection" + Phys. Rev. E 74, 016110, 2006. https://doi.org/10.1103/PhysRevE.74.016110 + .. [4] M. E. J. Newman, "Equivalence between modularity optimization and + maximum likelihood methods for community detection" + Phys. Rev. E 94, 052315, 2016. https://doi.org/10.1103/PhysRevE.94.052315 + .. [5] Blondel, V.D. et al. "Fast unfolding of communities in large + networks" J. Stat. Mech 10008, 1-12 (2008). + https://doi.org/10.1088/1742-5468/2008/10/P10008 + """ + if not isinstance(communities, list): + communities = list(communities) + if not is_partition(G, communities): + raise NotAPartition(G, communities) + + directed = G.is_directed() + if directed: + out_degree = dict(G.out_degree(weight=weight)) + in_degree = dict(G.in_degree(weight=weight)) + m = sum(out_degree.values()) + norm = 1 / m**2 + else: + out_degree = in_degree = dict(G.degree(weight=weight)) + deg_sum = sum(out_degree.values()) + m = deg_sum / 2 + norm = 1 / deg_sum**2 + + def community_contribution(community): + comm = set(community) + L_c = sum(wt for u, v, wt in G.edges(comm, data=weight, default=1) if v in comm) + + out_degree_sum = sum(out_degree[u] for u in comm) + in_degree_sum = sum(in_degree[u] for u in comm) if directed else out_degree_sum + + return L_c / m - resolution * out_degree_sum * in_degree_sum * norm + + return sum(map(community_contribution, communities)) + + +@require_partition +@nx._dispatchable +def partition_quality(G, partition): + """Returns the coverage and performance of a partition of G. + + The *coverage* of a partition is the ratio of the number of + intra-community edges to the total number of edges in the graph. + + The *performance* of a partition is the number of + intra-community edges plus inter-community non-edges divided by the total + number of potential edges. + + This algorithm has complexity $O(C^2 + L)$ where C is the number of communities and L is the number of links. + + Parameters + ---------- + G : NetworkX graph + + partition : sequence + Partition of the nodes of `G`, represented as a sequence of + sets of nodes (blocks). Each block of the partition represents a + community. + + Returns + ------- + (float, float) + The (coverage, performance) tuple of the partition, as defined above. + + Raises + ------ + NetworkXError + If `partition` is not a valid partition of the nodes of `G`. + + Notes + ----- + If `G` is a multigraph; + - for coverage, the multiplicity of edges is counted + - for performance, the result is -1 (total number of possible edges is not defined) + + References + ---------- + .. [1] Santo Fortunato. + "Community Detection in Graphs". + *Physical Reports*, Volume 486, Issue 3--5 pp. 75--174 + + """ + + node_community = {} + for i, community in enumerate(partition): + for node in community: + node_community[node] = i + + # `performance` is not defined for multigraphs + if not G.is_multigraph(): + # Iterate over the communities, quadratic, to calculate `possible_inter_community_edges` + possible_inter_community_edges = sum( + len(p1) * len(p2) for p1, p2 in combinations(partition, 2) + ) + + if G.is_directed(): + possible_inter_community_edges *= 2 + else: + possible_inter_community_edges = 0 + + # Compute the number of edges in the complete graph -- `n` nodes, + # directed or undirected, depending on `G` + n = len(G) + total_pairs = n * (n - 1) + if not G.is_directed(): + total_pairs //= 2 + + intra_community_edges = 0 + inter_community_non_edges = possible_inter_community_edges + + # Iterate over the links to count `intra_community_edges` and `inter_community_non_edges` + for e in G.edges(): + if node_community[e[0]] == node_community[e[1]]: + intra_community_edges += 1 + else: + inter_community_non_edges -= 1 + + coverage = intra_community_edges / len(G.edges) + + if G.is_multigraph(): + performance = -1.0 + else: + performance = (intra_community_edges + inter_community_non_edges) / total_pairs + + return coverage, performance diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__init__.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5732c64ea5b1ed739995b257e4ea7e5bb3049853 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f116b8f52b78833834c18bf30cc7565ec5f14083 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75a879fffa7f5de8bb50e86faeefded022e46e4b Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_divisive.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_divisive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80e7c3e4098f201379cd0f5618791354547b464b Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_divisive.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1967327a0911cfdcfd330bfc4c3bb2bac45525c7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4824a10a002bf18fe51ab566e8dd0af82eabb3c6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f85c946c690fcc9ee4af8bd313a9f737e4185e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..948f7f821d86b7d9e7bfac38f419c4160892087c Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9121cc49c25895f661e71931d5098d24f9b1905 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f16c863c908e50222047248c3c318bd8ff931ad Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_quality.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_quality.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84ad835a7a3484a5067890215bc7f468d4912347 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_quality.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b81aaee503af199fc2a1dd593acbe9068c796b6b Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/__pycache__/test_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py new file mode 100644 index 0000000000000000000000000000000000000000..6c023be773d80b5ef71254ba55547f622ff4e43d --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py @@ -0,0 +1,136 @@ +import pytest + +import networkx as nx +from networkx import Graph, NetworkXError +from networkx.algorithms.community import asyn_fluidc + + +@pytest.mark.parametrize("graph_constructor", (nx.DiGraph, nx.MultiGraph)) +def test_raises_on_directed_and_multigraphs(graph_constructor): + G = graph_constructor([(0, 1), (1, 2)]) + with pytest.raises(nx.NetworkXNotImplemented): + nx.community.asyn_fluidc(G, 1) + + +def test_exceptions(): + test = Graph() + test.add_node("a") + pytest.raises(NetworkXError, asyn_fluidc, test, "hi") + pytest.raises(NetworkXError, asyn_fluidc, test, -1) + pytest.raises(NetworkXError, asyn_fluidc, test, 3) + test.add_node("b") + pytest.raises(NetworkXError, asyn_fluidc, test, 1) + + +def test_single_node(): + test = Graph() + + test.add_node("a") + + # ground truth + ground_truth = {frozenset(["a"])} + + communities = asyn_fluidc(test, 1) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_two_nodes(): + test = Graph() + + test.add_edge("a", "b") + + # ground truth + ground_truth = {frozenset(["a"]), frozenset(["b"])} + + communities = asyn_fluidc(test, 2) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_two_clique_communities(): + test = Graph() + + # c1 + test.add_edge("a", "b") + test.add_edge("a", "c") + test.add_edge("b", "c") + + # connection + test.add_edge("c", "d") + + # c2 + test.add_edge("d", "e") + test.add_edge("d", "f") + test.add_edge("f", "e") + + # ground truth + ground_truth = {frozenset(["a", "c", "b"]), frozenset(["e", "d", "f"])} + + communities = asyn_fluidc(test, 2, seed=7) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_five_clique_ring(): + test = Graph() + + # c1 + test.add_edge("1a", "1b") + test.add_edge("1a", "1c") + test.add_edge("1a", "1d") + test.add_edge("1b", "1c") + test.add_edge("1b", "1d") + test.add_edge("1c", "1d") + + # c2 + test.add_edge("2a", "2b") + test.add_edge("2a", "2c") + test.add_edge("2a", "2d") + test.add_edge("2b", "2c") + test.add_edge("2b", "2d") + test.add_edge("2c", "2d") + + # c3 + test.add_edge("3a", "3b") + test.add_edge("3a", "3c") + test.add_edge("3a", "3d") + test.add_edge("3b", "3c") + test.add_edge("3b", "3d") + test.add_edge("3c", "3d") + + # c4 + test.add_edge("4a", "4b") + test.add_edge("4a", "4c") + test.add_edge("4a", "4d") + test.add_edge("4b", "4c") + test.add_edge("4b", "4d") + test.add_edge("4c", "4d") + + # c5 + test.add_edge("5a", "5b") + test.add_edge("5a", "5c") + test.add_edge("5a", "5d") + test.add_edge("5b", "5c") + test.add_edge("5b", "5d") + test.add_edge("5c", "5d") + + # connections + test.add_edge("1a", "2c") + test.add_edge("2a", "3c") + test.add_edge("3a", "4c") + test.add_edge("4a", "5c") + test.add_edge("5a", "1c") + + # ground truth + ground_truth = { + frozenset(["1a", "1b", "1c", "1d"]), + frozenset(["2a", "2b", "2c", "2d"]), + frozenset(["3a", "3b", "3c", "3d"]), + frozenset(["4a", "4b", "4c", "4d"]), + frozenset(["5a", "5b", "5c", "5d"]), + } + + communities = asyn_fluidc(test, 5, seed=9) + result = {frozenset(c) for c in communities} + assert result == ground_truth diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_centrality.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..a31d9a8051e2e4362d214a0e2ac1d79694eca127 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_centrality.py @@ -0,0 +1,84 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.centrality` +module. + +""" +from operator import itemgetter + +import networkx as nx + + +def set_of_sets(iterable): + return set(map(frozenset, iterable)) + + +def validate_communities(result, expected): + assert set_of_sets(result) == set_of_sets(expected) + + +def validate_possible_communities(result, *expected): + assert any(set_of_sets(result) == set_of_sets(p) for p in expected) + + +class TestGirvanNewman: + """Unit tests for the + :func:`networkx.algorithms.community.centrality.girvan_newman` + function. + + """ + + def test_no_edges(self): + G = nx.empty_graph(3) + communities = list(nx.community.girvan_newman(G)) + assert len(communities) == 1 + validate_communities(communities[0], [{0}, {1}, {2}]) + + def test_undirected(self): + # Start with the graph .-.-.-. + G = nx.path_graph(4) + communities = list(nx.community.girvan_newman(G)) + assert len(communities) == 3 + # After one removal, we get the graph .-. .-. + validate_communities(communities[0], [{0, 1}, {2, 3}]) + # After the next, we get the graph .-. . ., but there are two + # symmetric possible versions. + validate_possible_communities( + communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}] + ) + # After the last removal, we always get the empty graph. + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) + + def test_directed(self): + G = nx.DiGraph(nx.path_graph(4)) + communities = list(nx.community.girvan_newman(G)) + assert len(communities) == 3 + validate_communities(communities[0], [{0, 1}, {2, 3}]) + validate_possible_communities( + communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}] + ) + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) + + def test_selfloops(self): + G = nx.path_graph(4) + G.add_edge(0, 0) + G.add_edge(2, 2) + communities = list(nx.community.girvan_newman(G)) + assert len(communities) == 3 + validate_communities(communities[0], [{0, 1}, {2, 3}]) + validate_possible_communities( + communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}] + ) + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) + + def test_most_valuable_edge(self): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 3), (1, 2, 2), (2, 3, 1)]) + # Let the most valuable edge be the one with the highest weight. + + def heaviest(G): + return max(G.edges(data="weight"), key=itemgetter(2))[:2] + + communities = list(nx.community.girvan_newman(G, heaviest)) + assert len(communities) == 3 + validate_communities(communities[0], [{0}, {1, 2, 3}]) + validate_communities(communities[1], [{0}, {1}, {2, 3}]) + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_divisive.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_divisive.py new file mode 100644 index 0000000000000000000000000000000000000000..6331503f97eaabee965a7f7f302b30e88601687e --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_divisive.py @@ -0,0 +1,106 @@ +import pytest + +import networkx as nx + + +def test_edge_betweenness_partition(): + G = nx.barbell_graph(3, 0) + C = nx.community.edge_betweenness_partition(G, 2) + answer = [{0, 1, 2}, {3, 4, 5}] + assert len(C) == len(answer) + for s in answer: + assert s in C + + G = nx.barbell_graph(3, 1) + C = nx.community.edge_betweenness_partition(G, 3) + answer = [{0, 1, 2}, {4, 5, 6}, {3}] + assert len(C) == len(answer) + for s in answer: + assert s in C + + C = nx.community.edge_betweenness_partition(G, 7) + answer = [{n} for n in G] + assert len(C) == len(answer) + for s in answer: + assert s in C + + C = nx.community.edge_betweenness_partition(G, 1) + assert C == [set(G)] + + C = nx.community.edge_betweenness_partition(G, 1, weight="weight") + assert C == [set(G)] + + with pytest.raises(nx.NetworkXError): + nx.community.edge_betweenness_partition(G, 0) + + with pytest.raises(nx.NetworkXError): + nx.community.edge_betweenness_partition(G, -1) + + with pytest.raises(nx.NetworkXError): + nx.community.edge_betweenness_partition(G, 10) + + +def test_edge_current_flow_betweenness_partition(): + pytest.importorskip("scipy") + + G = nx.barbell_graph(3, 0) + C = nx.community.edge_current_flow_betweenness_partition(G, 2) + answer = [{0, 1, 2}, {3, 4, 5}] + assert len(C) == len(answer) + for s in answer: + assert s in C + + G = nx.barbell_graph(3, 1) + C = nx.community.edge_current_flow_betweenness_partition(G, 2) + answers = [[{0, 1, 2, 3}, {4, 5, 6}], [{0, 1, 2}, {3, 4, 5, 6}]] + assert len(C) == len(answers[0]) + assert any(all(s in answer for s in C) for answer in answers) + + C = nx.community.edge_current_flow_betweenness_partition(G, 3) + answer = [{0, 1, 2}, {4, 5, 6}, {3}] + assert len(C) == len(answer) + for s in answer: + assert s in C + + C = nx.community.edge_current_flow_betweenness_partition(G, 4) + answers = [[{1, 2}, {4, 5, 6}, {3}, {0}], [{0, 1, 2}, {5, 6}, {3}, {4}]] + assert len(C) == len(answers[0]) + assert any(all(s in answer for s in C) for answer in answers) + + C = nx.community.edge_current_flow_betweenness_partition(G, 5) + answer = [{1, 2}, {5, 6}, {3}, {0}, {4}] + assert len(C) == len(answer) + for s in answer: + assert s in C + + C = nx.community.edge_current_flow_betweenness_partition(G, 6) + answers = [[{2}, {5, 6}, {3}, {0}, {4}, {1}], [{1, 2}, {6}, {3}, {0}, {4}, {5}]] + assert len(C) == len(answers[0]) + assert any(all(s in answer for s in C) for answer in answers) + + C = nx.community.edge_current_flow_betweenness_partition(G, 7) + answer = [{n} for n in G] + assert len(C) == len(answer) + for s in answer: + assert s in C + + C = nx.community.edge_current_flow_betweenness_partition(G, 1) + assert C == [set(G)] + + C = nx.community.edge_current_flow_betweenness_partition(G, 1, weight="weight") + assert C == [set(G)] + + with pytest.raises(nx.NetworkXError): + nx.community.edge_current_flow_betweenness_partition(G, 0) + + with pytest.raises(nx.NetworkXError): + nx.community.edge_current_flow_betweenness_partition(G, -1) + + with pytest.raises(nx.NetworkXError): + nx.community.edge_current_flow_betweenness_partition(G, 10) + + N = 10 + G = nx.empty_graph(N) + for i in range(2, N - 1): + C = nx.community.edge_current_flow_betweenness_partition(G, i) + assert C == [{n} for n in G] diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_kclique.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_kclique.py new file mode 100644 index 0000000000000000000000000000000000000000..aa0b7e823e2780f734180562fa3fb8ce5a671312 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_kclique.py @@ -0,0 +1,91 @@ +from itertools import combinations + +import pytest + +import networkx as nx + + +def test_overlapping_K5(): + G = nx.Graph() + G.add_edges_from(combinations(range(5), 2)) # Add a five clique + G.add_edges_from(combinations(range(2, 7), 2)) # Add another five clique + c = list(nx.community.k_clique_communities(G, 4)) + assert c == [frozenset(range(7))] + c = set(nx.community.k_clique_communities(G, 5)) + assert c == {frozenset(range(5)), frozenset(range(2, 7))} + + +def test_isolated_K5(): + G = nx.Graph() + G.add_edges_from(combinations(range(5), 2)) # Add a five clique + G.add_edges_from(combinations(range(5, 10), 2)) # Add another five clique + c = set(nx.community.k_clique_communities(G, 5)) + assert c == {frozenset(range(5)), frozenset(range(5, 10))} + + +class TestZacharyKarateClub: + def setup_method(self): + self.G = nx.karate_club_graph() + + def _check_communities(self, k, expected): + communities = set(nx.community.k_clique_communities(self.G, k)) + assert communities == expected + + def test_k2(self): + # clique percolation with k=2 is just connected components + expected = {frozenset(self.G)} + self._check_communities(2, expected) + + def test_k3(self): + comm1 = [ + 0, + 1, + 2, + 3, + 7, + 8, + 12, + 13, + 14, + 15, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + ] + comm2 = [0, 4, 5, 6, 10, 16] + comm3 = [24, 25, 31] + expected = {frozenset(comm1), frozenset(comm2), frozenset(comm3)} + self._check_communities(3, expected) + + def test_k4(self): + expected = { + frozenset([0, 1, 2, 3, 7, 13]), + frozenset([8, 32, 30, 33]), + frozenset([32, 33, 29, 23]), + } + self._check_communities(4, expected) + + def test_k5(self): + expected = {frozenset([0, 1, 2, 3, 7, 13])} + self._check_communities(5, expected) + + def test_k6(self): + expected = set() + self._check_communities(6, expected) + + +def test_bad_k(): + with pytest.raises(nx.NetworkXError): + list(nx.community.k_clique_communities(nx.Graph(), 1)) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py new file mode 100644 index 0000000000000000000000000000000000000000..3dc555475dd55dbe401c2d009ccd4fbdc0cb2d8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py @@ -0,0 +1,91 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.kernighan_lin` +module. +""" +from itertools import permutations + +import pytest + +import networkx as nx +from networkx.algorithms.community import kernighan_lin_bisection + + +def assert_partition_equal(x, y): + assert set(map(frozenset, x)) == set(map(frozenset, y)) + + +def test_partition(): + G = nx.barbell_graph(3, 0) + C = kernighan_lin_bisection(G) + assert_partition_equal(C, [{0, 1, 2}, {3, 4, 5}]) + + +def test_partition_argument(): + G = nx.barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + C = kernighan_lin_bisection(G, partition) + assert_partition_equal(C, partition) + + +def test_partition_argument_non_integer_nodes(): + G = nx.Graph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + partition = ({"A", "B"}, {"C", "D"}) + C = kernighan_lin_bisection(G, partition) + assert_partition_equal(C, partition) + + +def test_seed_argument(): + G = nx.barbell_graph(3, 0) + C = kernighan_lin_bisection(G, seed=1) + assert_partition_equal(C, [{0, 1, 2}, {3, 4, 5}]) + + +def test_non_disjoint_partition(): + with pytest.raises(nx.NetworkXError): + G = nx.barbell_graph(3, 0) + partition = ({0, 1, 2}, {2, 3, 4, 5}) + kernighan_lin_bisection(G, partition) + + +def test_too_many_blocks(): + with pytest.raises(nx.NetworkXError): + G = nx.barbell_graph(3, 0) + partition = ({0, 1}, {2}, {3, 4, 5}) + kernighan_lin_bisection(G, partition) + + +def test_multigraph(): + G = nx.cycle_graph(4) + M = nx.MultiGraph(G.edges()) + M.add_edges_from(G.edges()) + M.remove_edge(1, 2) + for labels in permutations(range(4)): + mapping = dict(zip(M, labels)) + A, B = kernighan_lin_bisection(nx.relabel_nodes(M, mapping), seed=0) + assert_partition_equal( + [A, B], [{mapping[0], mapping[1]}, {mapping[2], mapping[3]}] + ) + + +def test_max_iter_argument(): + G = nx.Graph( + [ + ("A", "B", {"weight": 1}), + ("A", "C", {"weight": 2}), + ("A", "D", {"weight": 3}), + ("A", "E", {"weight": 2}), + ("A", "F", {"weight": 4}), + ("B", "C", {"weight": 1}), + ("B", "D", {"weight": 4}), + ("B", "E", {"weight": 2}), + ("B", "F", {"weight": 1}), + ("C", "D", {"weight": 3}), + ("C", "E", {"weight": 2}), + ("C", "F", {"weight": 1}), + ("D", "E", {"weight": 4}), + ("D", "F", {"weight": 3}), + ("E", "F", {"weight": 2}), + ] + ) + partition = ({"A", "B", "C"}, {"D", "E", "F"}) + C = kernighan_lin_bisection(G, partition, max_iter=1) + assert_partition_equal(C, ({"A", "F", "C"}, {"D", "E", "B"})) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_label_propagation.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_label_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..4be72dbf27281c58d973e1d0d84d101fa369d43d --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_label_propagation.py @@ -0,0 +1,241 @@ +from itertools import chain, combinations + +import pytest + +import networkx as nx + + +def test_directed_not_supported(): + with pytest.raises(nx.NetworkXNotImplemented): + # not supported for directed graphs + test = nx.DiGraph() + test.add_edge("a", "b") + test.add_edge("a", "c") + test.add_edge("b", "d") + result = nx.community.label_propagation_communities(test) + + +def test_iterator_vs_iterable(): + G = nx.empty_graph("a") + assert list(nx.community.label_propagation_communities(G)) == [{"a"}] + for community in nx.community.label_propagation_communities(G): + assert community == {"a"} + pytest.raises(TypeError, next, nx.community.label_propagation_communities(G)) + + +def test_one_node(): + test = nx.Graph() + test.add_node("a") + + # The expected communities are: + ground_truth = {frozenset(["a"])} + + communities = nx.community.label_propagation_communities(test) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_unconnected_communities(): + test = nx.Graph() + # community 1 + test.add_edge("a", "c") + test.add_edge("a", "d") + test.add_edge("d", "c") + # community 2 + test.add_edge("b", "e") + test.add_edge("e", "f") + test.add_edge("f", "b") + + # The expected communities are: + ground_truth = {frozenset(["a", "c", "d"]), frozenset(["b", "e", "f"])} + + communities = nx.community.label_propagation_communities(test) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_connected_communities(): + test = nx.Graph() + # community 1 + test.add_edge("a", "b") + test.add_edge("c", "a") + test.add_edge("c", "b") + test.add_edge("d", "a") + test.add_edge("d", "b") + test.add_edge("d", "c") + test.add_edge("e", "a") + test.add_edge("e", "b") + test.add_edge("e", "c") + test.add_edge("e", "d") + # community 2 + test.add_edge("1", "2") + test.add_edge("3", "1") + test.add_edge("3", "2") + test.add_edge("4", "1") + test.add_edge("4", "2") + test.add_edge("4", "3") + test.add_edge("5", "1") + test.add_edge("5", "2") + test.add_edge("5", "3") + test.add_edge("5", "4") + # edge between community 1 and 2 + test.add_edge("a", "1") + # community 3 + test.add_edge("x", "y") + # community 4 with only a single node + test.add_node("z") + + # The expected communities are: + ground_truth1 = { + frozenset(["a", "b", "c", "d", "e"]), + frozenset(["1", "2", "3", "4", "5"]), + frozenset(["x", "y"]), + frozenset(["z"]), + } + ground_truth2 = { + frozenset(["a", "b", "c", "d", "e", "1", "2", "3", "4", "5"]), + frozenset(["x", "y"]), + frozenset(["z"]), + } + ground_truth = (ground_truth1, ground_truth2) + + communities = nx.community.label_propagation_communities(test) + result = {frozenset(c) for c in communities} + assert result in ground_truth + + +def test_termination(): + # ensure termination of asyn_lpa_communities in two cases + # that led to an endless loop in a previous version + test1 = nx.karate_club_graph() + test2 = nx.caveman_graph(2, 10) + test2.add_edges_from([(0, 20), (20, 10)]) + nx.community.asyn_lpa_communities(test1) + nx.community.asyn_lpa_communities(test2) + + +class TestAsynLpaCommunities: + def _check_communities(self, G, expected): + """Checks that the communities computed from the given graph ``G`` + using the :func:`~networkx.asyn_lpa_communities` function match + the set of nodes given in ``expected``. + + ``expected`` must be a :class:`set` of :class:`frozenset` + instances, each element of which is a node in the graph. + + """ + communities = nx.community.asyn_lpa_communities(G) + result = {frozenset(c) for c in communities} + assert result == expected + + def test_null_graph(self): + G = nx.null_graph() + ground_truth = set() + self._check_communities(G, ground_truth) + + def test_single_node(self): + G = nx.empty_graph(1) + ground_truth = {frozenset([0])} + self._check_communities(G, ground_truth) + + def test_simple_communities(self): + # This graph is the disjoint union of two triangles. + G = nx.Graph(["ab", "ac", "bc", "de", "df", "fe"]) + ground_truth = {frozenset("abc"), frozenset("def")} + self._check_communities(G, ground_truth) + + def test_seed_argument(self): + G = nx.Graph(["ab", "ac", "bc", "de", "df", "fe"]) + ground_truth = {frozenset("abc"), frozenset("def")} + communities = nx.community.asyn_lpa_communities(G, seed=1) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + def test_several_communities(self): + # This graph is the disjoint union of five triangles. + ground_truth = {frozenset(range(3 * i, 3 * (i + 1))) for i in range(5)} + edges = chain.from_iterable(combinations(c, 2) for c in ground_truth) + G = nx.Graph(edges) + self._check_communities(G, ground_truth) + + +class TestFastLabelPropagationCommunities: + N = 100 # number of nodes + K = 15 # average node degree + + def _check_communities(self, G, truth, weight=None, seed=42): + C = nx.community.fast_label_propagation_communities(G, weight=weight, seed=seed) + assert {frozenset(c) for c in C} == truth + + def test_null_graph(self): + G = nx.null_graph() + truth = set() + self._check_communities(G, truth) + + def test_empty_graph(self): + G = nx.empty_graph(self.N) + truth = {frozenset([i]) for i in G} + self._check_communities(G, truth) + + def test_star_graph(self): + G = nx.star_graph(self.N) + truth = {frozenset(G)} + self._check_communities(G, truth) + + def test_complete_graph(self): + G = nx.complete_graph(self.N) + truth = {frozenset(G)} + self._check_communities(G, truth) + + def test_bipartite_graph(self): + G = nx.complete_bipartite_graph(self.N // 2, self.N // 2) + truth = {frozenset(G)} + self._check_communities(G, truth) + + def test_random_graph(self): + G = nx.gnm_random_graph(self.N, self.N * self.K // 2, seed=42) + truth = {frozenset(G)} + self._check_communities(G, truth) + + def test_disjoin_cliques(self): + G = nx.Graph(["ab", "AB", "AC", "BC", "12", "13", "14", "23", "24", "34"]) + truth = {frozenset("ab"), frozenset("ABC"), frozenset("1234")} + self._check_communities(G, truth) + + def test_ring_of_cliques(self): + N, K = self.N, self.K + G = nx.ring_of_cliques(N, K) + truth = {frozenset([K * i + k for k in range(K)]) for i in range(N)} + self._check_communities(G, truth) + + def test_larger_graph(self): + G = nx.gnm_random_graph(100 * self.N, 50 * self.N * self.K, seed=42) + nx.community.fast_label_propagation_communities(G) + + def test_graph_type(self): + G1 = nx.complete_graph(self.N, nx.MultiDiGraph()) + G2 = nx.MultiGraph(G1) + G3 = nx.DiGraph(G1) + G4 = nx.Graph(G1) + truth = {frozenset(G1)} + self._check_communities(G1, truth) + self._check_communities(G2, truth) + self._check_communities(G3, truth) + self._check_communities(G4, truth) + + def test_weight_argument(self): + G = nx.MultiDiGraph() + G.add_edge(1, 2, weight=1.41) + G.add_edge(2, 1, weight=1.41) + G.add_edge(2, 3) + G.add_edge(3, 4, weight=3.14) + truth = {frozenset({1, 2}), frozenset({3, 4})} + self._check_communities(G, truth, weight="weight") + + def test_seed_argument(self): + G = nx.karate_club_graph() + C = nx.community.fast_label_propagation_communities(G, seed=2023) + truth = {frozenset(c) for c in C} + self._check_communities(G, truth, seed=2023) + # smoke test that seed=None works + C = nx.community.fast_label_propagation_communities(G, seed=None) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_louvain.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_louvain.py new file mode 100644 index 0000000000000000000000000000000000000000..816e6f143fe0837b5d8617a927f49e557ccd9668 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_louvain.py @@ -0,0 +1,264 @@ +import pytest + +import networkx as nx + + +def test_modularity_increase(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + partition = [{u} for u in G.nodes()] + mod = nx.community.modularity(G, partition) + partition = nx.community.louvain_communities(G) + + assert nx.community.modularity(G, partition) > mod + + +def test_valid_partition(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + H = G.to_directed() + partition = nx.community.louvain_communities(G) + partition2 = nx.community.louvain_communities(H) + + assert nx.community.is_partition(G, partition) + assert nx.community.is_partition(H, partition2) + + +def test_karate_club_partition(): + G = nx.karate_club_graph() + part = [ + {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, + {16, 4, 5, 6, 10}, + {23, 25, 27, 28, 24, 31}, + {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, + ] + partition = nx.community.louvain_communities(G, seed=2, weight=None) + + assert part == partition + + +def test_partition_iterator(): + G = nx.path_graph(15) + parts_iter = nx.community.louvain_partitions(G, seed=42) + first_part = next(parts_iter) + first_copy = [s.copy() for s in first_part] + + # gh-5901 reports sets changing after next partition is yielded + assert first_copy[0] == first_part[0] + second_part = next(parts_iter) + assert first_copy[0] == first_part[0] + + +def test_undirected_selfloops(): + G = nx.karate_club_graph() + expected_partition = nx.community.louvain_communities(G, seed=2, weight=None) + part = [ + {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, + {16, 4, 5, 6, 10}, + {23, 25, 27, 28, 24, 31}, + {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, + ] + assert expected_partition == part + + G.add_weighted_edges_from([(i, i, i * 1000) for i in range(9)]) + # large self-loop weight impacts partition + partition = nx.community.louvain_communities(G, seed=2, weight="weight") + assert part != partition + + # small self-loop weights aren't enough to impact partition in this graph + partition = nx.community.louvain_communities(G, seed=2, weight=None) + assert part == partition + + +def test_directed_selfloops(): + G = nx.DiGraph() + G.add_nodes_from(range(11)) + G_edges = [ + (0, 2), + (0, 1), + (1, 0), + (2, 1), + (2, 0), + (3, 4), + (4, 3), + (7, 8), + (8, 7), + (9, 10), + (10, 9), + ] + G.add_edges_from(G_edges) + G_expected_partition = nx.community.louvain_communities(G, seed=123, weight=None) + + G.add_weighted_edges_from([(i, i, i * 1000) for i in range(3)]) + # large self-loop weight impacts partition + G_partition = nx.community.louvain_communities(G, seed=123, weight="weight") + assert G_partition != G_expected_partition + + # small self-loop weights aren't enough to impact partition in this graph + G_partition = nx.community.louvain_communities(G, seed=123, weight=None) + assert G_partition == G_expected_partition + + +def test_directed_partition(): + """ + Test 2 cases that were looping infinitely + from issues #5175 and #5704 + """ + G = nx.DiGraph() + H = nx.DiGraph() + G.add_nodes_from(range(10)) + H.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + G_edges = [ + (0, 2), + (0, 1), + (1, 0), + (2, 1), + (2, 0), + (3, 4), + (4, 3), + (7, 8), + (8, 7), + (9, 10), + (10, 9), + ] + H_edges = [ + (1, 2), + (1, 6), + (1, 9), + (2, 3), + (2, 4), + (2, 5), + (3, 4), + (4, 3), + (4, 5), + (5, 4), + (6, 7), + (6, 8), + (9, 10), + (9, 11), + (10, 11), + (11, 10), + ] + G.add_edges_from(G_edges) + H.add_edges_from(H_edges) + + G_expected_partition = [{0, 1, 2}, {3, 4}, {5}, {6}, {8, 7}, {9, 10}] + G_partition = nx.community.louvain_communities(G, seed=123, weight=None) + + H_expected_partition = [{2, 3, 4, 5}, {8, 1, 6, 7}, {9, 10, 11}] + H_partition = nx.community.louvain_communities(H, seed=123, weight=None) + + assert G_partition == G_expected_partition + assert H_partition == H_expected_partition + + +def test_none_weight_param(): + G = nx.karate_club_graph() + nx.set_edge_attributes( + G, {edge: i * i for i, edge in enumerate(G.edges)}, name="foo" + ) + + part = [ + {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, + {16, 4, 5, 6, 10}, + {23, 25, 27, 28, 24, 31}, + {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, + ] + partition1 = nx.community.louvain_communities(G, weight=None, seed=2) + partition2 = nx.community.louvain_communities(G, weight="foo", seed=2) + partition3 = nx.community.louvain_communities(G, weight="weight", seed=2) + + assert part == partition1 + assert part != partition2 + assert part != partition3 + assert partition2 != partition3 + + +def test_quality(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + H = nx.gn_graph(200, seed=1234) + I = nx.MultiGraph(G) + J = nx.MultiDiGraph(H) + + partition = nx.community.louvain_communities(G) + partition2 = nx.community.louvain_communities(H) + partition3 = nx.community.louvain_communities(I) + partition4 = nx.community.louvain_communities(J) + + quality = nx.community.partition_quality(G, partition)[0] + quality2 = nx.community.partition_quality(H, partition2)[0] + quality3 = nx.community.partition_quality(I, partition3)[0] + quality4 = nx.community.partition_quality(J, partition4)[0] + + assert quality >= 0.65 + assert quality2 >= 0.65 + assert quality3 >= 0.65 + assert quality4 >= 0.65 + + +def test_multigraph(): + G = nx.karate_club_graph() + H = nx.MultiGraph(G) + G.add_edge(0, 1, weight=10) + H.add_edge(0, 1, weight=9) + G.add_edge(0, 9, foo=20) + H.add_edge(0, 9, foo=20) + + partition1 = nx.community.louvain_communities(G, seed=1234) + partition2 = nx.community.louvain_communities(H, seed=1234) + partition3 = nx.community.louvain_communities(H, weight="foo", seed=1234) + + assert partition1 == partition2 != partition3 + + +def test_resolution(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + + partition1 = nx.community.louvain_communities(G, resolution=0.5, seed=12) + partition2 = nx.community.louvain_communities(G, seed=12) + partition3 = nx.community.louvain_communities(G, resolution=2, seed=12) + + assert len(partition1) <= len(partition2) <= len(partition3) + + +def test_threshold(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + partition1 = nx.community.louvain_communities(G, threshold=0.3, seed=2) + partition2 = nx.community.louvain_communities(G, seed=2) + mod1 = nx.community.modularity(G, partition1) + mod2 = nx.community.modularity(G, partition2) + + assert mod1 <= mod2 + + +def test_empty_graph(): + G = nx.Graph() + G.add_nodes_from(range(5)) + expected = [{0}, {1}, {2}, {3}, {4}] + assert nx.community.louvain_communities(G) == expected + + +def test_max_level(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + parts_iter = nx.community.louvain_partitions(G, seed=42) + for max_level, expected in enumerate(parts_iter, 1): + partition = nx.community.louvain_communities(G, max_level=max_level, seed=42) + assert partition == expected + assert max_level > 1 # Ensure we are actually testing max_level + # max_level is an upper limit; it's okay if we stop before it's hit. + partition = nx.community.louvain_communities(G, max_level=max_level + 1, seed=42) + assert partition == expected + with pytest.raises( + ValueError, match="max_level argument must be a positive integer" + ): + nx.community.louvain_communities(G, max_level=0) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_lukes.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_lukes.py new file mode 100644 index 0000000000000000000000000000000000000000..cfa48f0f47667ce4c4fa96c175bee4cb95a4852f --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_lukes.py @@ -0,0 +1,152 @@ +from itertools import product + +import pytest + +import networkx as nx + +EWL = "e_weight" +NWL = "n_weight" + + +# first test from the Lukes original paper +def paper_1_case(float_edge_wt=False, explicit_node_wt=True, directed=False): + # problem-specific constants + limit = 3 + + # configuration + if float_edge_wt: + shift = 0.001 + else: + shift = 0 + + if directed: + example_1 = nx.DiGraph() + else: + example_1 = nx.Graph() + + # graph creation + example_1.add_edge(1, 2, **{EWL: 3 + shift}) + example_1.add_edge(1, 4, **{EWL: 2 + shift}) + example_1.add_edge(2, 3, **{EWL: 4 + shift}) + example_1.add_edge(2, 5, **{EWL: 6 + shift}) + + # node weights + if explicit_node_wt: + nx.set_node_attributes(example_1, 1, NWL) + wtu = NWL + else: + wtu = None + + # partitioning + clusters_1 = { + frozenset(x) + for x in nx.community.lukes_partitioning( + example_1, limit, node_weight=wtu, edge_weight=EWL + ) + } + + return clusters_1 + + +# second test from the Lukes original paper +def paper_2_case(explicit_edge_wt=True, directed=False): + # problem specific constants + byte_block_size = 32 + + # configuration + if directed: + example_2 = nx.DiGraph() + else: + example_2 = nx.Graph() + + if explicit_edge_wt: + edic = {EWL: 1} + wtu = EWL + else: + edic = {} + wtu = None + + # graph creation + example_2.add_edge("name", "home_address", **edic) + example_2.add_edge("name", "education", **edic) + example_2.add_edge("education", "bs", **edic) + example_2.add_edge("education", "ms", **edic) + example_2.add_edge("education", "phd", **edic) + example_2.add_edge("name", "telephone", **edic) + example_2.add_edge("telephone", "home", **edic) + example_2.add_edge("telephone", "office", **edic) + example_2.add_edge("office", "no1", **edic) + example_2.add_edge("office", "no2", **edic) + + example_2.nodes["name"][NWL] = 20 + example_2.nodes["education"][NWL] = 10 + example_2.nodes["bs"][NWL] = 1 + example_2.nodes["ms"][NWL] = 1 + example_2.nodes["phd"][NWL] = 1 + example_2.nodes["home_address"][NWL] = 8 + example_2.nodes["telephone"][NWL] = 8 + example_2.nodes["home"][NWL] = 8 + example_2.nodes["office"][NWL] = 4 + example_2.nodes["no1"][NWL] = 1 + example_2.nodes["no2"][NWL] = 1 + + # partitioning + clusters_2 = { + frozenset(x) + for x in nx.community.lukes_partitioning( + example_2, byte_block_size, node_weight=NWL, edge_weight=wtu + ) + } + + return clusters_2 + + +def test_paper_1_case(): + ground_truth = {frozenset([1, 4]), frozenset([2, 3, 5])} + + tf = (True, False) + for flt, nwt, drc in product(tf, tf, tf): + part = paper_1_case(flt, nwt, drc) + assert part == ground_truth + + +def test_paper_2_case(): + ground_truth = { + frozenset(["education", "bs", "ms", "phd"]), + frozenset(["name", "home_address"]), + frozenset(["telephone", "home", "office", "no1", "no2"]), + } + + tf = (True, False) + for ewt, drc in product(tf, tf): + part = paper_2_case(ewt, drc) + assert part == ground_truth + + +def test_mandatory_tree(): + not_a_tree = nx.complete_graph(4) + + with pytest.raises(nx.NotATree): + nx.community.lukes_partitioning(not_a_tree, 5) + + +def test_mandatory_integrality(): + byte_block_size = 32 + + ex_1_broken = nx.DiGraph() + + ex_1_broken.add_edge(1, 2, **{EWL: 3.2}) + ex_1_broken.add_edge(1, 4, **{EWL: 2.4}) + ex_1_broken.add_edge(2, 3, **{EWL: 4.0}) + ex_1_broken.add_edge(2, 5, **{EWL: 6.3}) + + ex_1_broken.nodes[1][NWL] = 1.2 # ! + ex_1_broken.nodes[2][NWL] = 1 + ex_1_broken.nodes[3][NWL] = 1 + ex_1_broken.nodes[4][NWL] = 1 + ex_1_broken.nodes[5][NWL] = 2 + + with pytest.raises(TypeError): + nx.community.lukes_partitioning( + ex_1_broken, byte_block_size, node_weight=NWL, edge_weight=EWL + ) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_modularity_max.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_modularity_max.py new file mode 100644 index 0000000000000000000000000000000000000000..0121367fc4ef766e2587610c3ea32ba33b12b259 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_modularity_max.py @@ -0,0 +1,340 @@ +import pytest + +import networkx as nx +from networkx.algorithms.community import ( + greedy_modularity_communities, + naive_greedy_modularity_communities, +) + + +@pytest.mark.parametrize( + "func", (greedy_modularity_communities, naive_greedy_modularity_communities) +) +def test_modularity_communities(func): + G = nx.karate_club_graph() + john_a = frozenset( + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + ) + mr_hi = frozenset([0, 4, 5, 6, 10, 11, 16, 19]) + overlap = frozenset([1, 2, 3, 7, 9, 12, 13, 17, 21]) + expected = {john_a, overlap, mr_hi} + assert set(func(G, weight=None)) == expected + + +@pytest.mark.parametrize( + "func", (greedy_modularity_communities, naive_greedy_modularity_communities) +) +def test_modularity_communities_categorical_labels(func): + # Using other than 0-starting contiguous integers as node-labels. + G = nx.Graph( + [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), # inter-community edge + ("d", "e"), + ("d", "f"), + ("d", "g"), + ("f", "g"), + ("d", "e"), + ("f", "e"), + ] + ) + expected = {frozenset({"f", "g", "e", "d"}), frozenset({"a", "b", "c"})} + assert set(func(G)) == expected + + +def test_greedy_modularity_communities_components(): + # Test for gh-5530 + G = nx.Graph([(0, 1), (2, 3), (4, 5), (5, 6)]) + # usual case with 3 components + assert greedy_modularity_communities(G) == [{4, 5, 6}, {0, 1}, {2, 3}] + # best_n can make the algorithm continue even when modularity goes down + assert greedy_modularity_communities(G, best_n=3) == [{4, 5, 6}, {0, 1}, {2, 3}] + assert greedy_modularity_communities(G, best_n=2) == [{0, 1, 4, 5, 6}, {2, 3}] + assert greedy_modularity_communities(G, best_n=1) == [{0, 1, 2, 3, 4, 5, 6}] + + +def test_greedy_modularity_communities_relabeled(): + # Test for gh-4966 + G = nx.balanced_tree(2, 2) + mapping = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 7: "h"} + G = nx.relabel_nodes(G, mapping) + expected = [frozenset({"e", "d", "a", "b"}), frozenset({"c", "f", "g"})] + assert greedy_modularity_communities(G) == expected + + +def test_greedy_modularity_communities_directed(): + G = nx.DiGraph( + [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), # inter-community edge + ("d", "e"), + ("d", "f"), + ("d", "g"), + ("f", "g"), + ("d", "e"), + ("f", "e"), + ] + ) + expected = [frozenset({"f", "g", "e", "d"}), frozenset({"a", "b", "c"})] + assert greedy_modularity_communities(G) == expected + + # with loops + G = nx.DiGraph() + G.add_edges_from( + [(1, 1), (1, 2), (1, 3), (2, 3), (1, 4), (4, 4), (5, 5), (4, 5), (4, 6), (5, 6)] + ) + expected = [frozenset({1, 2, 3}), frozenset({4, 5, 6})] + assert greedy_modularity_communities(G) == expected + + +@pytest.mark.parametrize( + "func", (greedy_modularity_communities, naive_greedy_modularity_communities) +) +def test_modularity_communities_weighted(func): + G = nx.balanced_tree(2, 3) + for a, b in G.edges: + if ((a == 1) or (a == 2)) and (b != 0): + G[a][b]["weight"] = 10.0 + else: + G[a][b]["weight"] = 1.0 + + expected = [{0, 1, 3, 4, 7, 8, 9, 10}, {2, 5, 6, 11, 12, 13, 14}] + + assert func(G, weight="weight") == expected + assert func(G, weight="weight", resolution=0.9) == expected + assert func(G, weight="weight", resolution=0.3) == expected + assert func(G, weight="weight", resolution=1.1) != expected + + +def test_modularity_communities_floating_point(): + # check for floating point error when used as key in the mapped_queue dict. + # Test for gh-4992 and gh-5000 + G = nx.Graph() + G.add_weighted_edges_from( + [(0, 1, 12), (1, 4, 71), (2, 3, 15), (2, 4, 10), (3, 6, 13)] + ) + expected = [{0, 1, 4}, {2, 3, 6}] + assert greedy_modularity_communities(G, weight="weight") == expected + assert ( + greedy_modularity_communities(G, weight="weight", resolution=0.99) == expected + ) + + +def test_modularity_communities_directed_weighted(): + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + (1, 2, 5), + (1, 3, 3), + (2, 3, 6), + (2, 6, 1), + (1, 4, 1), + (4, 5, 3), + (4, 6, 7), + (5, 6, 2), + (5, 7, 5), + (5, 8, 4), + (6, 8, 3), + ] + ) + expected = [frozenset({4, 5, 6, 7, 8}), frozenset({1, 2, 3})] + assert greedy_modularity_communities(G, weight="weight") == expected + + # A large weight of the edge (2, 6) causes 6 to change group, even if it shares + # only one connection with the new group and 3 with the old one. + G[2][6]["weight"] = 20 + expected = [frozenset({1, 2, 3, 6}), frozenset({4, 5, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_greedy_modularity_communities_multigraph(): + G = nx.MultiGraph() + G.add_edges_from( + [ + (1, 2), + (1, 2), + (1, 3), + (2, 3), + (1, 4), + (2, 4), + (4, 5), + (5, 6), + (5, 7), + (5, 7), + (6, 7), + (7, 8), + (5, 8), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G) == expected + + # Converting (4, 5) into a multi-edge causes node 4 to change group. + G.add_edge(4, 5) + expected = [frozenset({4, 5, 6, 7, 8}), frozenset({1, 2, 3})] + assert greedy_modularity_communities(G) == expected + + +def test_greedy_modularity_communities_multigraph_weighted(): + G = nx.MultiGraph() + G.add_weighted_edges_from( + [ + (1, 2, 5), + (1, 2, 3), + (1, 3, 6), + (1, 3, 6), + (2, 3, 4), + (1, 4, 1), + (1, 4, 1), + (2, 4, 3), + (2, 4, 3), + (4, 5, 1), + (5, 6, 3), + (5, 6, 7), + (5, 6, 4), + (5, 7, 9), + (5, 7, 9), + (6, 7, 8), + (7, 8, 2), + (7, 8, 2), + (5, 8, 6), + (5, 8, 6), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + # Adding multi-edge (4, 5, 16) causes node 4 to change group. + G.add_edge(4, 5, weight=16) + expected = [frozenset({4, 5, 6, 7, 8}), frozenset({1, 2, 3})] + assert greedy_modularity_communities(G, weight="weight") == expected + + # Increasing the weight of edge (1, 4) causes node 4 to return to the former group. + G[1][4][1]["weight"] = 3 + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_greed_modularity_communities_multidigraph(): + G = nx.MultiDiGraph() + G.add_edges_from( + [ + (1, 2), + (1, 2), + (3, 1), + (2, 3), + (2, 3), + (3, 2), + (1, 4), + (2, 4), + (4, 2), + (4, 5), + (5, 6), + (5, 6), + (6, 5), + (5, 7), + (6, 7), + (7, 8), + (5, 8), + (8, 4), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_greed_modularity_communities_multidigraph_weighted(): + G = nx.MultiDiGraph() + G.add_weighted_edges_from( + [ + (1, 2, 5), + (1, 2, 3), + (3, 1, 6), + (1, 3, 6), + (3, 2, 4), + (1, 4, 2), + (1, 4, 5), + (2, 4, 3), + (3, 2, 8), + (4, 2, 3), + (4, 3, 5), + (4, 5, 2), + (5, 6, 3), + (5, 6, 7), + (6, 5, 4), + (5, 7, 9), + (5, 7, 9), + (7, 6, 8), + (7, 8, 2), + (8, 7, 2), + (5, 8, 6), + (5, 8, 6), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_resolution_parameter_impact(): + G = nx.barbell_graph(5, 3) + + gamma = 1 + expected = [frozenset(range(5)), frozenset(range(8, 13)), frozenset(range(5, 8))] + assert greedy_modularity_communities(G, resolution=gamma) == expected + assert naive_greedy_modularity_communities(G, resolution=gamma) == expected + + gamma = 2.5 + expected = [{0, 1, 2, 3}, {9, 10, 11, 12}, {5, 6, 7}, {4}, {8}] + assert greedy_modularity_communities(G, resolution=gamma) == expected + assert naive_greedy_modularity_communities(G, resolution=gamma) == expected + + gamma = 0.3 + expected = [frozenset(range(8)), frozenset(range(8, 13))] + assert greedy_modularity_communities(G, resolution=gamma) == expected + assert naive_greedy_modularity_communities(G, resolution=gamma) == expected + + +def test_cutoff_parameter(): + G = nx.circular_ladder_graph(4) + + # No aggregation: + expected = [{k} for k in range(8)] + assert greedy_modularity_communities(G, cutoff=8) == expected + + # Aggregation to half order (number of nodes) + expected = [{k, k + 1} for k in range(0, 8, 2)] + assert greedy_modularity_communities(G, cutoff=4) == expected + + # Default aggregation case (here, 2 communities emerge) + expected = [frozenset(range(4)), frozenset(range(4, 8))] + assert greedy_modularity_communities(G, cutoff=1) == expected + + +def test_best_n(): + G = nx.barbell_graph(5, 3) + + # Same result as without enforcing cutoff: + best_n = 3 + expected = [frozenset(range(5)), frozenset(range(8, 13)), frozenset(range(5, 8))] + assert greedy_modularity_communities(G, best_n=best_n) == expected + + # One additional merging step: + best_n = 2 + expected = [frozenset(range(8)), frozenset(range(8, 13))] + assert greedy_modularity_communities(G, best_n=best_n) == expected + + # Two additional merging steps: + best_n = 1 + expected = [frozenset(range(13))] + assert greedy_modularity_communities(G, best_n=best_n) == expected + + +def test_greedy_modularity_communities_corner_cases(): + G = nx.empty_graph() + assert nx.community.greedy_modularity_communities(G) == [] + G.add_nodes_from(range(3)) + assert nx.community.greedy_modularity_communities(G) == [{0}, {1}, {2}] diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_quality.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_quality.py new file mode 100644 index 0000000000000000000000000000000000000000..3447c9484e6da68e1245e71cbed5762a18827136 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_quality.py @@ -0,0 +1,138 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.quality` +module. + +""" +import pytest + +import networkx as nx +from networkx import barbell_graph +from networkx.algorithms.community import modularity, partition_quality +from networkx.algorithms.community.quality import inter_community_edges + + +class TestPerformance: + """Unit tests for the :func:`performance` function.""" + + def test_bad_partition(self): + """Tests that a poor partition has a low performance measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 4}, {2, 3, 5}] + assert 8 / 15 == pytest.approx(partition_quality(G, partition)[1], abs=1e-7) + + def test_good_partition(self): + """Tests that a good partition has a high performance measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + assert 14 / 15 == pytest.approx(partition_quality(G, partition)[1], abs=1e-7) + + +class TestCoverage: + """Unit tests for the :func:`coverage` function.""" + + def test_bad_partition(self): + """Tests that a poor partition has a low coverage measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 4}, {2, 3, 5}] + assert 3 / 7 == pytest.approx(partition_quality(G, partition)[0], abs=1e-7) + + def test_good_partition(self): + """Tests that a good partition has a high coverage measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + assert 6 / 7 == pytest.approx(partition_quality(G, partition)[0], abs=1e-7) + + +def test_modularity(): + G = nx.barbell_graph(3, 0) + C = [{0, 1, 4}, {2, 3, 5}] + assert (-16 / (14**2)) == pytest.approx(modularity(G, C), abs=1e-7) + C = [{0, 1, 2}, {3, 4, 5}] + assert (35 * 2) / (14**2) == pytest.approx(modularity(G, C), abs=1e-7) + + n = 1000 + G = nx.erdos_renyi_graph(n, 0.09, seed=42, directed=True) + C = [set(range(n // 2)), set(range(n // 2, n))] + assert 0.00017154251389292754 == pytest.approx(modularity(G, C), abs=1e-7) + + G = nx.margulis_gabber_galil_graph(10) + mid_value = G.number_of_nodes() // 2 + nodes = list(G.nodes) + C = [set(nodes[:mid_value]), set(nodes[mid_value:])] + assert 0.13 == pytest.approx(modularity(G, C), abs=1e-7) + + G = nx.DiGraph() + G.add_edges_from([(2, 1), (2, 3), (3, 4)]) + C = [{1, 2}, {3, 4}] + assert 2 / 9 == pytest.approx(modularity(G, C), abs=1e-7) + + +def test_modularity_resolution(): + G = nx.barbell_graph(3, 0) + C = [{0, 1, 4}, {2, 3, 5}] + assert modularity(G, C) == pytest.approx(3 / 7 - 100 / 14**2) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(3 / 7 - gamma * 100 / 14**2) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(3 / 7 - gamma * 100 / 14**2) + + C = [{0, 1, 2}, {3, 4, 5}] + assert modularity(G, C) == pytest.approx(6 / 7 - 98 / 14**2) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(6 / 7 - gamma * 98 / 14**2) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(6 / 7 - gamma * 98 / 14**2) + + G = nx.barbell_graph(5, 3) + C = [frozenset(range(5)), frozenset(range(8, 13)), frozenset(range(5, 8))] + gamma = 1 + result = modularity(G, C, resolution=gamma) + # This C is maximal for gamma=1: modularity = 0.518229 + assert result == pytest.approx((22 / 24) - gamma * (918 / (48**2))) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((22 / 24) - gamma * (918 / (48**2))) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((22 / 24) - gamma * (918 / (48**2))) + + C = [{0, 1, 2, 3}, {9, 10, 11, 12}, {5, 6, 7}, {4}, {8}] + gamma = 1 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((14 / 24) - gamma * (598 / (48**2))) + gamma = 2.5 + result = modularity(G, C, resolution=gamma) + # This C is maximal for gamma=2.5: modularity = -0.06553819 + assert result == pytest.approx((14 / 24) - gamma * (598 / (48**2))) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((14 / 24) - gamma * (598 / (48**2))) + + C = [frozenset(range(8)), frozenset(range(8, 13))] + gamma = 1 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((23 / 24) - gamma * (1170 / (48**2))) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((23 / 24) - gamma * (1170 / (48**2))) + gamma = 0.3 + result = modularity(G, C, resolution=gamma) + # This C is maximal for gamma=0.3: modularity = 0.805990 + assert result == pytest.approx((23 / 24) - gamma * (1170 / (48**2))) + + +def test_inter_community_edges_with_digraphs(): + G = nx.complete_graph(2, create_using=nx.DiGraph()) + partition = [{0}, {1}] + assert inter_community_edges(G, partition) == 2 + + G = nx.complete_graph(10, create_using=nx.DiGraph()) + partition = [{0}, {1, 2}, {3, 4, 5}, {6, 7, 8, 9}] + assert inter_community_edges(G, partition) == 70 + + G = nx.cycle_graph(4, create_using=nx.DiGraph()) + partition = [{0, 1}, {2, 3}] + assert inter_community_edges(G, partition) == 2 diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_utils.py b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..329ff66a4229ee67e0e7c58ddaf22aa4f22b290b --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/community/tests/test_utils.py @@ -0,0 +1,28 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.utils` module. + +""" + +import networkx as nx + + +def test_is_partition(): + G = nx.empty_graph(3) + assert nx.community.is_partition(G, [{0, 1}, {2}]) + assert nx.community.is_partition(G, ({0, 1}, {2})) + assert nx.community.is_partition(G, ([0, 1], [2])) + assert nx.community.is_partition(G, [[0, 1], [2]]) + + +def test_not_covering(): + G = nx.empty_graph(3) + assert not nx.community.is_partition(G, [{0}, {1}]) + + +def test_not_disjoint(): + G = nx.empty_graph(3) + assert not nx.community.is_partition(G, [{0, 1}, {1, 2}]) + + +def test_not_node(): + G = nx.empty_graph(3) + assert not nx.community.is_partition(G, [{0, 1}, {3}]) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/link_analysis/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/link_analysis/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40138a5f4374c56c1fe633728f87280ef3aff4fe Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/link_analysis/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b4445436e9552c462d40aa3b375e18ea5f332db Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24e7ed3735930e1ad8c4829ece609c6bae4cbfb5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__init__.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7120d4bc7ef25279b68eaa23690b6ff4574ed676 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__init__.py @@ -0,0 +1,6 @@ +from .branchings import * +from .coding import * +from .mst import * +from .recognition import * +from .operations import * +from .decomposition import * diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..760bfd8e7aa70580a9f57926a5acf7393cbf52e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/branchings.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/branchings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b01eb619ef71c288d2ec86a2e27ddf9928ee70b Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/branchings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/coding.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/coding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0edf90e042fa01e68140937261ba84f18e5b130 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/coding.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/decomposition.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/decomposition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d45dc6cb23ccd1f7c88692ad4febc0c93a6c61a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/decomposition.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/mst.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/mst.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26671fc5feda2cfa5c9d04d12603cb39df6112a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/mst.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/operations.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/operations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66cb3bdfb36b5a44e9542936aa0301880af42b15 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/operations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/recognition.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/recognition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a711edd14e8598f9abe81c1a13268145331d7c3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/__pycache__/recognition.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/branchings.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/branchings.py new file mode 100644 index 0000000000000000000000000000000000000000..6c0e349060dac43cc5ffb46033e4f0184999c4c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/branchings.py @@ -0,0 +1,1597 @@ +""" +Algorithms for finding optimum branchings and spanning arborescences. + +This implementation is based on: + + J. Edmonds, Optimum branchings, J. Res. Natl. Bur. Standards 71B (1967), + 233–240. URL: http://archive.org/details/jresv71Bn4p233 + +""" +# TODO: Implement method from Gabow, Galil, Spence and Tarjan: +# +# @article{ +# year={1986}, +# issn={0209-9683}, +# journal={Combinatorica}, +# volume={6}, +# number={2}, +# doi={10.1007/BF02579168}, +# title={Efficient algorithms for finding minimum spanning trees in +# undirected and directed graphs}, +# url={https://doi.org/10.1007/BF02579168}, +# publisher={Springer-Verlag}, +# keywords={68 B 15; 68 C 05}, +# author={Gabow, Harold N. and Galil, Zvi and Spencer, Thomas and Tarjan, +# Robert E.}, +# pages={109-122}, +# language={English} +# } +import string +from dataclasses import dataclass, field +from operator import itemgetter +from queue import PriorityQueue + +import networkx as nx +from networkx.utils import py_random_state + +from .recognition import is_arborescence, is_branching + +__all__ = [ + "branching_weight", + "greedy_branching", + "maximum_branching", + "minimum_branching", + "minimal_branching", + "maximum_spanning_arborescence", + "minimum_spanning_arborescence", + "ArborescenceIterator", + "Edmonds", +] + +KINDS = {"max", "min"} + +STYLES = { + "branching": "branching", + "arborescence": "arborescence", + "spanning arborescence": "arborescence", +} + +INF = float("inf") + + +@py_random_state(1) +def random_string(L=15, seed=None): + return "".join([seed.choice(string.ascii_letters) for n in range(L)]) + + +def _min_weight(weight): + return -weight + + +def _max_weight(weight): + return weight + + +@nx._dispatchable(edge_attrs={"attr": "default"}) +def branching_weight(G, attr="weight", default=1): + """ + Returns the total weight of a branching. + + You must access this function through the networkx.algorithms.tree module. + + Parameters + ---------- + G : DiGraph + The directed graph. + attr : str + The attribute to use as weights. If None, then each edge will be + treated equally with a weight of 1. + default : float + When `attr` is not None, then if an edge does not have that attribute, + `default` specifies what value it should take. + + Returns + ------- + weight: int or float + The total weight of the branching. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from([(0, 1, 2), (1, 2, 4), (2, 3, 3), (3, 4, 2)]) + >>> nx.tree.branching_weight(G) + 11 + + """ + return sum(edge[2].get(attr, default) for edge in G.edges(data=True)) + + +@py_random_state(4) +@nx._dispatchable(edge_attrs={"attr": "default"}, returns_graph=True) +def greedy_branching(G, attr="weight", default=1, kind="max", seed=None): + """ + Returns a branching obtained through a greedy algorithm. + + This algorithm is wrong, and cannot give a proper optimal branching. + However, we include it for pedagogical reasons, as it can be helpful to + see what its outputs are. + + The output is a branching, and possibly, a spanning arborescence. However, + it is not guaranteed to be optimal in either case. + + Parameters + ---------- + G : DiGraph + The directed graph to scan. + attr : str + The attribute to use as weights. If None, then each edge will be + treated equally with a weight of 1. + default : float + When `attr` is not None, then if an edge does not have that attribute, + `default` specifies what value it should take. + kind : str + The type of optimum to search for: 'min' or 'max' greedy branching. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + B : directed graph + The greedily obtained branching. + + """ + if kind not in KINDS: + raise nx.NetworkXException("Unknown value for `kind`.") + + if kind == "min": + reverse = False + else: + reverse = True + + if attr is None: + # Generate a random string the graph probably won't have. + attr = random_string(seed=seed) + + edges = [(u, v, data.get(attr, default)) for (u, v, data) in G.edges(data=True)] + + # We sort by weight, but also by nodes to normalize behavior across runs. + try: + edges.sort(key=itemgetter(2, 0, 1), reverse=reverse) + except TypeError: + # This will fail in Python 3.x if the nodes are of varying types. + # In that case, we use the arbitrary order. + edges.sort(key=itemgetter(2), reverse=reverse) + + # The branching begins with a forest of no edges. + B = nx.DiGraph() + B.add_nodes_from(G) + + # Now we add edges greedily so long we maintain the branching. + uf = nx.utils.UnionFind() + for i, (u, v, w) in enumerate(edges): + if uf[u] == uf[v]: + # Adding this edge would form a directed cycle. + continue + elif B.in_degree(v) == 1: + # The edge would increase the degree to be greater than one. + continue + else: + # If attr was None, then don't insert weights... + data = {} + if attr is not None: + data[attr] = w + B.add_edge(u, v, **data) + uf.union(u, v) + + return B + + +class MultiDiGraph_EdgeKey(nx.MultiDiGraph): + """ + MultiDiGraph which assigns unique keys to every edge. + + Adds a dictionary edge_index which maps edge keys to (u, v, data) tuples. + + This is not a complete implementation. For Edmonds algorithm, we only use + add_node and add_edge, so that is all that is implemented here. During + additions, any specified keys are ignored---this means that you also + cannot update edge attributes through add_node and add_edge. + + Why do we need this? Edmonds algorithm requires that we track edges, even + as we change the head and tail of an edge, and even changing the weight + of edges. We must reliably track edges across graph mutations. + """ + + def __init__(self, incoming_graph_data=None, **attr): + cls = super() + cls.__init__(incoming_graph_data=incoming_graph_data, **attr) + + self._cls = cls + self.edge_index = {} + + import warnings + + msg = "MultiDiGraph_EdgeKey has been deprecated and will be removed in NetworkX 3.4." + warnings.warn(msg, DeprecationWarning) + + def remove_node(self, n): + keys = set() + for keydict in self.pred[n].values(): + keys.update(keydict) + for keydict in self.succ[n].values(): + keys.update(keydict) + + for key in keys: + del self.edge_index[key] + + self._cls.remove_node(n) + + def remove_nodes_from(self, nbunch): + for n in nbunch: + self.remove_node(n) + + def add_edge(self, u_for_edge, v_for_edge, key_for_edge, **attr): + """ + Key is now required. + + """ + u, v, key = u_for_edge, v_for_edge, key_for_edge + if key in self.edge_index: + uu, vv, _ = self.edge_index[key] + if (u != uu) or (v != vv): + raise Exception(f"Key {key!r} is already in use.") + + self._cls.add_edge(u, v, key, **attr) + self.edge_index[key] = (u, v, self.succ[u][v][key]) + + def add_edges_from(self, ebunch_to_add, **attr): + for u, v, k, d in ebunch_to_add: + self.add_edge(u, v, k, **d) + + def remove_edge_with_key(self, key): + try: + u, v, _ = self.edge_index[key] + except KeyError as err: + raise KeyError(f"Invalid edge key {key!r}") from err + else: + del self.edge_index[key] + self._cls.remove_edge(u, v, key) + + def remove_edges_from(self, ebunch): + raise NotImplementedError + + +def get_path(G, u, v): + """ + Returns the edge keys of the unique path between u and v. + + This is not a generic function. G must be a branching and an instance of + MultiDiGraph_EdgeKey. + + """ + nodes = nx.shortest_path(G, u, v) + + # We are guaranteed that there is only one edge connected every node + # in the shortest path. + + def first_key(i, vv): + # Needed for 2.x/3.x compatibility + keys = G[nodes[i]][vv].keys() + # Normalize behavior + keys = list(keys) + return keys[0] + + edges = [first_key(i, vv) for i, vv in enumerate(nodes[1:])] + return nodes, edges + + +class Edmonds: + """ + Edmonds algorithm [1]_ for finding optimal branchings and spanning + arborescences. + + This algorithm can find both minimum and maximum spanning arborescences and + branchings. + + Notes + ----- + While this algorithm can find a minimum branching, since it isn't required + to be spanning, the minimum branching is always from the set of negative + weight edges which is most likely the empty set for most graphs. + + References + ---------- + .. [1] J. Edmonds, Optimum Branchings, Journal of Research of the National + Bureau of Standards, 1967, Vol. 71B, p.233-240, + https://archive.org/details/jresv71Bn4p233 + + """ + + def __init__(self, G, seed=None): + self.G_original = G + + # Need to fix this. We need the whole tree. + self.store = True + + # The final answer. + self.edges = [] + + # Since we will be creating graphs with new nodes, we need to make + # sure that our node names do not conflict with the real node names. + self.template = random_string(seed=seed) + "_{0}" + + import warnings + + msg = "Edmonds has been deprecated and will be removed in NetworkX 3.4. Please use the appropriate minimum or maximum branching or arborescence function directly." + warnings.warn(msg, DeprecationWarning) + + def _init(self, attr, default, kind, style, preserve_attrs, seed, partition): + """ + So we need the code in _init and find_optimum to successfully run edmonds algorithm. + Responsibilities of the _init function: + - Check that the kind argument is in {min, max} or raise a NetworkXException. + - Transform the graph if we need a minimum arborescence/branching. + - The current method is to map weight -> -weight. This is NOT a good approach since + the algorithm can and does choose to ignore negative weights when creating a branching + since that is always optimal when maximzing the weights. I think we should set the edge + weights to be (max_weight + 1) - edge_weight. + - Transform the graph into a MultiDiGraph, adding the partition information and potoentially + other edge attributes if we set preserve_attrs = True. + - Setup the buckets and union find data structures required for the algorithm. + """ + if kind not in KINDS: + raise nx.NetworkXException("Unknown value for `kind`.") + + # Store inputs. + self.attr = attr + self.default = default + self.kind = kind + self.style = style + + # Determine how we are going to transform the weights. + if kind == "min": + self.trans = trans = _min_weight + else: + self.trans = trans = _max_weight + + if attr is None: + # Generate a random attr the graph probably won't have. + attr = random_string(seed=seed) + + # This is the actual attribute used by the algorithm. + self._attr = attr + + # This attribute is used to store whether a particular edge is still + # a candidate. We generate a random attr to remove clashes with + # preserved edges + self.candidate_attr = "candidate_" + random_string(seed=seed) + + # The object we manipulate at each step is a multidigraph. + self.G = G = MultiDiGraph_EdgeKey() + self.G.__networkx_cache__ = None # Disable caching + for key, (u, v, data) in enumerate(self.G_original.edges(data=True)): + d = {attr: trans(data.get(attr, default))} + + if data.get(partition) is not None: + d[partition] = data.get(partition) + + if preserve_attrs: + for d_k, d_v in data.items(): + if d_k != attr: + d[d_k] = d_v + + G.add_edge(u, v, key, **d) + + self.level = 0 + + # These are the "buckets" from the paper. + # + # As in the paper, G^i are modified versions of the original graph. + # D^i and E^i are nodes and edges of the maximal edges that are + # consistent with G^i. These are dashed edges in figures A-F of the + # paper. In this implementation, we store D^i and E^i together as a + # graph B^i. So we will have strictly more B^i than the paper does. + self.B = MultiDiGraph_EdgeKey() + self.B.edge_index = {} + self.graphs = [] # G^i + self.branchings = [] # B^i + self.uf = nx.utils.UnionFind() + + # A list of lists of edge indexes. Each list is a circuit for graph G^i. + # Note the edge list will not, in general, be a circuit in graph G^0. + self.circuits = [] + # Stores the index of the minimum edge in the circuit found in G^i + # and B^i. The ordering of the edges seems to preserve the weight + # ordering from G^0. So even if the circuit does not form a circuit + # in G^0, it is still true that the minimum edge of the circuit in + # G^i is still the minimum edge in circuit G^0 (despite their weights + # being different). + self.minedge_circuit = [] + + # TODO: separate each step into an inner function. Then the overall loop would become + # while True: + # step_I1() + # if cycle detected: + # step_I2() + # elif every node of G is in D and E is a branching + # break + + def find_optimum( + self, + attr="weight", + default=1, + kind="max", + style="branching", + preserve_attrs=False, + partition=None, + seed=None, + ): + """ + Returns a branching from G. + + Parameters + ---------- + attr : str + The edge attribute used to in determining optimality. + default : float + The value of the edge attribute used if an edge does not have + the attribute `attr`. + kind : {'min', 'max'} + The type of optimum to search for, either 'min' or 'max'. + style : {'branching', 'arborescence'} + If 'branching', then an optimal branching is found. If `style` is + 'arborescence', then a branching is found, such that if the + branching is also an arborescence, then the branching is an + optimal spanning arborescences. A given graph G need not have + an optimal spanning arborescence. + preserve_attrs : bool + If True, preserve the other edge attributes of the original + graph (that are not the one passed to `attr`) + partition : str + The edge attribute holding edge partition data. Used in the + spanning arborescence iterator. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + H : (multi)digraph + The branching. + + """ + self._init(attr, default, kind, style, preserve_attrs, seed, partition) + uf = self.uf + + # This enormous while loop could use some refactoring... + + G, B = self.G, self.B + D = set() + nodes = iter(list(G.nodes())) + attr = self._attr + + def desired_edge(v): + """ + Find the edge directed toward v with maximal weight. + + If an edge partition exists in this graph, return the included edge + if it exists and no not return any excluded edges. There can only + be one included edge for each vertex otherwise the edge partition is + empty. + """ + edge = None + weight = -INF + for u, _, key, data in G.in_edges(v, data=True, keys=True): + # Skip excluded edges + if data.get(partition) == nx.EdgePartition.EXCLUDED: + continue + new_weight = data[attr] + # Return the included edge + if data.get(partition) == nx.EdgePartition.INCLUDED: + weight = new_weight + edge = (u, v, key, new_weight, data) + return edge, weight + # Find the best open edge + if new_weight > weight: + weight = new_weight + edge = (u, v, key, new_weight, data) + + return edge, weight + + while True: + # (I1): Choose a node v in G^i not in D^i. + try: + v = next(nodes) + except StopIteration: + # If there are no more new nodes to consider, then we *should* + # meet the break condition (b) from the paper: + # (b) every node of G^i is in D^i and E^i is a branching + # Construction guarantees that it's a branching. + assert len(G) == len(B) + if len(B): + assert is_branching(B) + + if self.store: + self.graphs.append(G.copy()) + self.branchings.append(B.copy()) + + # Add these to keep the lengths equal. Element i is the + # circuit at level i that was merged to form branching i+1. + # There is no circuit for the last level. + self.circuits.append([]) + self.minedge_circuit.append(None) + break + else: + if v in D: + # print("v in D", v) + continue + + # Put v into bucket D^i. + # print(f"Adding node {v}") + D.add(v) + B.add_node(v) + # End (I1) + + # Start cycle detection + edge, weight = desired_edge(v) + # print(f"Max edge is {edge!r}") + if edge is None: + # If there is no edge, continue with a new node at (I1). + continue + else: + # Determine if adding the edge to E^i would mean its no longer + # a branching. Presently, v has indegree 0 in B---it is a root. + u = edge[0] + + if uf[u] == uf[v]: + # Then adding the edge will create a circuit. Then B + # contains a unique path P from v to u. So condition (a) + # from the paper does hold. We need to store the circuit + # for future reference. + Q_nodes, Q_edges = get_path(B, v, u) + Q_edges.append(edge[2]) # Edge key + else: + # Then B with the edge is still a branching and condition + # (a) from the paper does not hold. + Q_nodes, Q_edges = None, None + # End cycle detection + + # THIS WILL PROBABLY BE REMOVED? MAYBE A NEW ARG FOR THIS FEATURE? + # Conditions for adding the edge. + # If weight < 0, then it cannot help in finding a maximum branching. + # This is the root of the problem with minimum branching. + if self.style == "branching" and weight <= 0: + acceptable = False + else: + acceptable = True + + # print(f"Edge is acceptable: {acceptable}") + if acceptable: + dd = {attr: weight} + if edge[4].get(partition) is not None: + dd[partition] = edge[4].get(partition) + B.add_edge(u, v, edge[2], **dd) + G[u][v][edge[2]][self.candidate_attr] = True + uf.union(u, v) + if Q_edges is not None: + # print("Edge introduced a simple cycle:") + # print(Q_nodes, Q_edges) + + # Move to method + # Previous meaning of u and v is no longer important. + + # Apply (I2). + # Get the edge in the cycle with the minimum weight. + # Also, save the incoming weights for each node. + minweight = INF + minedge = None + Q_incoming_weight = {} + for edge_key in Q_edges: + u, v, data = B.edge_index[edge_key] + # We cannot remove an included edges, even if it is + # the minimum edge in the circuit + w = data[attr] + Q_incoming_weight[v] = w + if data.get(partition) == nx.EdgePartition.INCLUDED: + continue + if w < minweight: + minweight = w + minedge = edge_key + + self.circuits.append(Q_edges) + self.minedge_circuit.append(minedge) + + if self.store: + self.graphs.append(G.copy()) + # Always need the branching with circuits. + self.branchings.append(B.copy()) + + # Now we mutate it. + new_node = self.template.format(self.level) + + # print(minweight, minedge, Q_incoming_weight) + + G.add_node(new_node) + new_edges = [] + for u, v, key, data in G.edges(data=True, keys=True): + if u in Q_incoming_weight: + if v in Q_incoming_weight: + # Circuit edge, do nothing for now. + # Eventually delete it. + continue + else: + # Outgoing edge. Make it from new node + dd = data.copy() + new_edges.append((new_node, v, key, dd)) + else: + if v in Q_incoming_weight: + # Incoming edge. Change its weight + w = data[attr] + w += minweight - Q_incoming_weight[v] + dd = data.copy() + dd[attr] = w + new_edges.append((u, new_node, key, dd)) + else: + # Outside edge. No modification necessary. + continue + + G.remove_nodes_from(Q_nodes) + B.remove_nodes_from(Q_nodes) + D.difference_update(set(Q_nodes)) + + for u, v, key, data in new_edges: + G.add_edge(u, v, key, **data) + if self.candidate_attr in data: + del data[self.candidate_attr] + B.add_edge(u, v, key, **data) + uf.union(u, v) + + nodes = iter(list(G.nodes())) + self.level += 1 + # END STEP (I2)? + + # (I3) Branch construction. + # print(self.level) + H = self.G_original.__class__() + + def is_root(G, u, edgekeys): + """ + Returns True if `u` is a root node in G. + + Node `u` will be a root node if its in-degree, restricted to the + specified edges, is equal to 0. + + """ + if u not in G: + # print(G.nodes(), u) + raise Exception(f"{u!r} not in G") + for v in G.pred[u]: + for edgekey in G.pred[u][v]: + if edgekey in edgekeys: + return False, edgekey + else: + return True, None + + # Start with the branching edges in the last level. + edges = set(self.branchings[self.level].edge_index) + while self.level > 0: + self.level -= 1 + + # The current level is i, and we start counting from 0. + + # We need the node at level i+1 that results from merging a circuit + # at level i. randomname_0 is the first merged node and this + # happens at level 1. That is, randomname_0 is a node at level 1 + # that results from merging a circuit at level 0. + merged_node = self.template.format(self.level) + + # The circuit at level i that was merged as a node the graph + # at level i+1. + circuit = self.circuits[self.level] + # print + # print(merged_node, self.level, circuit) + # print("before", edges) + # Note, we ask if it is a root in the full graph, not the branching. + # The branching alone doesn't have all the edges. + isroot, edgekey = is_root(self.graphs[self.level + 1], merged_node, edges) + edges.update(circuit) + if isroot: + minedge = self.minedge_circuit[self.level] + if minedge is None: + raise Exception + + # Remove the edge in the cycle with minimum weight. + edges.remove(minedge) + else: + # We have identified an edge at next higher level that + # transitions into the merged node at the level. That edge + # transitions to some corresponding node at the current level. + # We want to remove an edge from the cycle that transitions + # into the corresponding node. + # print("edgekey is: ", edgekey) + # print("circuit is: ", circuit) + # The branching at level i + G = self.graphs[self.level] + # print(G.edge_index) + target = G.edge_index[edgekey][1] + for edgekey in circuit: + u, v, data = G.edge_index[edgekey] + if v == target: + break + else: + raise Exception("Couldn't find edge incoming to merged node.") + + edges.remove(edgekey) + + self.edges = edges + + H.add_nodes_from(self.G_original) + for edgekey in edges: + u, v, d = self.graphs[0].edge_index[edgekey] + dd = {self.attr: self.trans(d[self.attr])} + + # Optionally, preserve the other edge attributes of the original + # graph + if preserve_attrs: + for key, value in d.items(): + if key not in [self.attr, self.candidate_attr]: + dd[key] = value + + # TODO: make this preserve the key. + H.add_edge(u, v, **dd) + + return H + + +@nx._dispatchable(preserve_edge_attrs=True, returns_graph=True) +def maximum_branching( + G, + attr="weight", + default=1, + preserve_attrs=False, + partition=None, +): + ####################################### + ### Data Structure Helper Functions ### + ####################################### + + def edmonds_add_edge(G, edge_index, u, v, key, **d): + """ + Adds an edge to `G` while also updating the edge index. + + This algorithm requires the use of an external dictionary to track + the edge keys since it is possible that the source or destination + node of an edge will be changed and the default key-handling + capabilities of the MultiDiGraph class do not account for this. + + Parameters + ---------- + G : MultiDiGraph + The graph to insert an edge into. + edge_index : dict + A mapping from integers to the edges of the graph. + u : node + The source node of the new edge. + v : node + The destination node of the new edge. + key : int + The key to use from `edge_index`. + d : keyword arguments, optional + Other attributes to store on the new edge. + """ + + if key in edge_index: + uu, vv, _ = edge_index[key] + if (u != uu) or (v != vv): + raise Exception(f"Key {key!r} is already in use.") + + G.add_edge(u, v, key, **d) + edge_index[key] = (u, v, G.succ[u][v][key]) + + def edmonds_remove_node(G, edge_index, n): + """ + Remove a node from the graph, updating the edge index to match. + + Parameters + ---------- + G : MultiDiGraph + The graph to remove an edge from. + edge_index : dict + A mapping from integers to the edges of the graph. + n : node + The node to remove from `G`. + """ + keys = set() + for keydict in G.pred[n].values(): + keys.update(keydict) + for keydict in G.succ[n].values(): + keys.update(keydict) + + for key in keys: + del edge_index[key] + + G.remove_node(n) + + ####################### + ### Algorithm Setup ### + ####################### + + # Pick an attribute name that the original graph is unlikly to have + candidate_attr = "edmonds' secret candidate attribute" + new_node_base_name = "edmonds new node base name " + + G_original = G + G = nx.MultiDiGraph() + G.__networkx_cache__ = None # Disable caching + + # A dict to reliably track mutations to the edges using the key of the edge. + G_edge_index = {} + # Each edge is given an arbitrary numerical key + for key, (u, v, data) in enumerate(G_original.edges(data=True)): + d = {attr: data.get(attr, default)} + + if data.get(partition) is not None: + d[partition] = data.get(partition) + + if preserve_attrs: + for d_k, d_v in data.items(): + if d_k != attr: + d[d_k] = d_v + + edmonds_add_edge(G, G_edge_index, u, v, key, **d) + + level = 0 # Stores the number of contracted nodes + + # These are the buckets from the paper. + # + # In the paper, G^i are modified versions of the original graph. + # D^i and E^i are the nodes and edges of the maximal edges that are + # consistent with G^i. In this implementation, D^i and E^i are stored + # together as the graph B^i. We will have strictly more B^i then the + # paper will have. + # + # Note that the data in graphs and branchings are tuples with the graph as + # the first element and the edge index as the second. + B = nx.MultiDiGraph() + B_edge_index = {} + graphs = [] # G^i list + branchings = [] # B^i list + selected_nodes = set() # D^i bucket + uf = nx.utils.UnionFind() + + # A list of lists of edge indices. Each list is a circuit for graph G^i. + # Note the edge list is not required to be a circuit in G^0. + circuits = [] + + # Stores the index of the minimum edge in the circuit found in G^i and B^i. + # The ordering of the edges seems to preserver the weight ordering from + # G^0. So even if the circuit does not form a circuit in G^0, it is still + # true that the minimum edges in circuit G^0 (despite their weights being + # different) + minedge_circuit = [] + + ########################### + ### Algorithm Structure ### + ########################### + + # Each step listed in the algorithm is an inner function. Thus, the overall + # loop structure is: + # + # while True: + # step_I1() + # if cycle detected: + # step_I2() + # elif every node of G is in D and E is a branching: + # break + + ################################## + ### Algorithm Helper Functions ### + ################################## + + def edmonds_find_desired_edge(v): + """ + Find the edge directed towards v with maximal weight. + + If an edge partition exists in this graph, return the included + edge if it exists and never return any excluded edge. + + Note: There can only be one included edge for each vertex otherwise + the edge partition is empty. + + Parameters + ---------- + v : node + The node to search for the maximal weight incoming edge. + """ + edge = None + max_weight = -INF + for u, _, key, data in G.in_edges(v, data=True, keys=True): + # Skip excluded edges + if data.get(partition) == nx.EdgePartition.EXCLUDED: + continue + + new_weight = data[attr] + + # Return the included edge + if data.get(partition) == nx.EdgePartition.INCLUDED: + max_weight = new_weight + edge = (u, v, key, new_weight, data) + break + + # Find the best open edge + if new_weight > max_weight: + max_weight = new_weight + edge = (u, v, key, new_weight, data) + + return edge, max_weight + + def edmonds_step_I2(v, desired_edge, level): + """ + Perform step I2 from Edmonds' paper + + First, check if the last step I1 created a cycle. If it did not, do nothing. + If it did, store the cycle for later reference and contract it. + + Parameters + ---------- + v : node + The current node to consider + desired_edge : edge + The minimum desired edge to remove from the cycle. + level : int + The current level, i.e. the number of cycles that have already been removed. + """ + u = desired_edge[0] + + Q_nodes = nx.shortest_path(B, v, u) + Q_edges = [ + list(B[Q_nodes[i]][vv].keys())[0] for i, vv in enumerate(Q_nodes[1:]) + ] + Q_edges.append(desired_edge[2]) # Add the new edge key to complete the circuit + + # Get the edge in the circuit with the minimum weight. + # Also, save the incoming weights for each node. + minweight = INF + minedge = None + Q_incoming_weight = {} + for edge_key in Q_edges: + u, v, data = B_edge_index[edge_key] + w = data[attr] + # We cannot remove an included edge, even if it is the + # minimum edge in the circuit + Q_incoming_weight[v] = w + if data.get(partition) == nx.EdgePartition.INCLUDED: + continue + if w < minweight: + minweight = w + minedge = edge_key + + circuits.append(Q_edges) + minedge_circuit.append(minedge) + graphs.append((G.copy(), G_edge_index.copy())) + branchings.append((B.copy(), B_edge_index.copy())) + + # Mutate the graph to contract the circuit + new_node = new_node_base_name + str(level) + G.add_node(new_node) + new_edges = [] + for u, v, key, data in G.edges(data=True, keys=True): + if u in Q_incoming_weight: + if v in Q_incoming_weight: + # Circuit edge. For the moment do nothing, + # eventually it will be removed. + continue + else: + # Outgoing edge from a node in the circuit. + # Make it come from the new node instead + dd = data.copy() + new_edges.append((new_node, v, key, dd)) + else: + if v in Q_incoming_weight: + # Incoming edge to the circuit. + # Update it's weight + w = data[attr] + w += minweight - Q_incoming_weight[v] + dd = data.copy() + dd[attr] = w + new_edges.append((u, new_node, key, dd)) + else: + # Outside edge. No modification needed + continue + + for node in Q_nodes: + edmonds_remove_node(G, G_edge_index, node) + edmonds_remove_node(B, B_edge_index, node) + + selected_nodes.difference_update(set(Q_nodes)) + + for u, v, key, data in new_edges: + edmonds_add_edge(G, G_edge_index, u, v, key, **data) + if candidate_attr in data: + del data[candidate_attr] + edmonds_add_edge(B, B_edge_index, u, v, key, **data) + uf.union(u, v) + + def is_root(G, u, edgekeys): + """ + Returns True if `u` is a root node in G. + + Node `u` is a root node if its in-degree over the specified edges is zero. + + Parameters + ---------- + G : Graph + The current graph. + u : node + The node in `G` to check if it is a root. + edgekeys : iterable of edges + The edges for which to check if `u` is a root of. + """ + if u not in G: + raise Exception(f"{u!r} not in G") + + for v in G.pred[u]: + for edgekey in G.pred[u][v]: + if edgekey in edgekeys: + return False, edgekey + else: + return True, None + + nodes = iter(list(G.nodes)) + while True: + try: + v = next(nodes) + except StopIteration: + # If there are no more new nodes to consider, then we should + # meet stopping condition (b) from the paper: + # (b) every node of G^i is in D^i and E^i is a branching + assert len(G) == len(B) + if len(B): + assert is_branching(B) + + graphs.append((G.copy(), G_edge_index.copy())) + branchings.append((B.copy(), B_edge_index.copy())) + circuits.append([]) + minedge_circuit.append(None) + + break + else: + ##################### + ### BEGIN STEP I1 ### + ##################### + + # This is a very simple step, so I don't think it needs a method of it's own + if v in selected_nodes: + continue + + selected_nodes.add(v) + B.add_node(v) + desired_edge, desired_edge_weight = edmonds_find_desired_edge(v) + + # There might be no desired edge if all edges are excluded or + # v is the last node to be added to B, the ultimate root of the branching + if desired_edge is not None and desired_edge_weight > 0: + u = desired_edge[0] + # Flag adding the edge will create a circuit before merging the two + # connected components of u and v in B + circuit = uf[u] == uf[v] + dd = {attr: desired_edge_weight} + if desired_edge[4].get(partition) is not None: + dd[partition] = desired_edge[4].get(partition) + + edmonds_add_edge(B, B_edge_index, u, v, desired_edge[2], **dd) + G[u][v][desired_edge[2]][candidate_attr] = True + uf.union(u, v) + + ################### + ### END STEP I1 ### + ################### + + ##################### + ### BEGIN STEP I2 ### + ##################### + + if circuit: + edmonds_step_I2(v, desired_edge, level) + nodes = iter(list(G.nodes())) + level += 1 + + ################### + ### END STEP I2 ### + ################### + + ##################### + ### BEGIN STEP I3 ### + ##################### + + # Create a new graph of the same class as the input graph + H = G_original.__class__() + + # Start with the branching edges in the last level. + edges = set(branchings[level][1]) + while level > 0: + level -= 1 + + # The current level is i, and we start counting from 0. + # + # We need the node at level i+1 that results from merging a circuit + # at level i. basename_0 is the first merged node and this happens + # at level 1. That is basename_0 is a node at level 1 that results + # from merging a circuit at level 0. + + merged_node = new_node_base_name + str(level) + circuit = circuits[level] + isroot, edgekey = is_root(graphs[level + 1][0], merged_node, edges) + edges.update(circuit) + + if isroot: + minedge = minedge_circuit[level] + if minedge is None: + raise Exception + + # Remove the edge in the cycle with minimum weight + edges.remove(minedge) + else: + # We have identified an edge at the next higher level that + # transitions into the merged node at this level. That edge + # transitions to some corresponding node at the current level. + # + # We want to remove an edge from the cycle that transitions + # into the corresponding node, otherwise the result would not + # be a branching. + + G, G_edge_index = graphs[level] + target = G_edge_index[edgekey][1] + for edgekey in circuit: + u, v, data = G_edge_index[edgekey] + if v == target: + break + else: + raise Exception("Couldn't find edge incoming to merged node.") + + edges.remove(edgekey) + + H.add_nodes_from(G_original) + for edgekey in edges: + u, v, d = graphs[0][1][edgekey] + dd = {attr: d[attr]} + + if preserve_attrs: + for key, value in d.items(): + if key not in [attr, candidate_attr]: + dd[key] = value + + H.add_edge(u, v, **dd) + + ################### + ### END STEP I3 ### + ################### + + return H + + +@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True, returns_graph=True) +def minimum_branching( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + for _, _, d in G.edges(data=True): + d[attr] = -d.get(attr, default) + nx._clear_cache(G) + + B = maximum_branching(G, attr, default, preserve_attrs, partition) + + for _, _, d in G.edges(data=True): + d[attr] = -d.get(attr, default) + nx._clear_cache(G) + + for _, _, d in B.edges(data=True): + d[attr] = -d.get(attr, default) + nx._clear_cache(B) + + return B + + +@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True, returns_graph=True) +def minimal_branching( + G, /, *, attr="weight", default=1, preserve_attrs=False, partition=None +): + """ + Returns a minimal branching from `G`. + + A minimal branching is a branching similar to a minimal arborescence but + without the requirement that the result is actually a spanning arborescence. + This allows minimal branchinges to be computed over graphs which may not + have arborescence (such as multiple components). + + Parameters + ---------- + G : (multi)digraph-like + The graph to be searched. + attr : str + The edge attribute used in determining optimality. + default : float + The value of the edge attribute used if an edge does not have + the attribute `attr`. + preserve_attrs : bool + If True, preserve the other attributes of the original graph (that are not + passed to `attr`) + partition : str + The key for the edge attribute containing the partition + data on the graph. Edges can be included, excluded or open using the + `EdgePartition` enum. + + Returns + ------- + B : (multi)digraph-like + A minimal branching. + """ + max_weight = -INF + min_weight = INF + for _, _, w in G.edges(data=attr, default=default): + if w > max_weight: + max_weight = w + if w < min_weight: + min_weight = w + + for _, _, d in G.edges(data=True): + # Transform the weights so that the minimum weight is larger than + # the difference between the max and min weights. This is important + # in order to prevent the edge weights from becoming negative during + # computation + d[attr] = max_weight + 1 + (max_weight - min_weight) - d.get(attr, default) + nx._clear_cache(G) + + B = maximum_branching(G, attr, default, preserve_attrs, partition) + + # Reverse the weight transformations + for _, _, d in G.edges(data=True): + d[attr] = max_weight + 1 + (max_weight - min_weight) - d.get(attr, default) + nx._clear_cache(G) + + for _, _, d in B.edges(data=True): + d[attr] = max_weight + 1 + (max_weight - min_weight) - d.get(attr, default) + nx._clear_cache(B) + + return B + + +@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True, returns_graph=True) +def maximum_spanning_arborescence( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + # In order to use the same algorithm is the maximum branching, we need to adjust + # the weights of the graph. The branching algorithm can choose to not include an + # edge if it doesn't help find a branching, mainly triggered by edges with negative + # weights. + # + # To prevent this from happening while trying to find a spanning arborescence, we + # just have to tweak the edge weights so that they are all positive and cannot + # become negative during the branching algorithm, find the maximum branching and + # then return them to their original values. + + min_weight = INF + max_weight = -INF + for _, _, w in G.edges(data=attr, default=default): + if w < min_weight: + min_weight = w + if w > max_weight: + max_weight = w + + for _, _, d in G.edges(data=True): + d[attr] = d.get(attr, default) - min_weight + 1 - (min_weight - max_weight) + nx._clear_cache(G) + + B = maximum_branching(G, attr, default, preserve_attrs, partition) + + for _, _, d in G.edges(data=True): + d[attr] = d.get(attr, default) + min_weight - 1 + (min_weight - max_weight) + nx._clear_cache(G) + + for _, _, d in B.edges(data=True): + d[attr] = d.get(attr, default) + min_weight - 1 + (min_weight - max_weight) + nx._clear_cache(B) + + if not is_arborescence(B): + raise nx.exception.NetworkXException("No maximum spanning arborescence in G.") + + return B + + +@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True, returns_graph=True) +def minimum_spanning_arborescence( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + B = minimal_branching( + G, + attr=attr, + default=default, + preserve_attrs=preserve_attrs, + partition=partition, + ) + + if not is_arborescence(B): + raise nx.exception.NetworkXException("No minimum spanning arborescence in G.") + + return B + + +docstring_branching = """ +Returns a {kind} {style} from G. + +Parameters +---------- +G : (multi)digraph-like + The graph to be searched. +attr : str + The edge attribute used to in determining optimality. +default : float + The value of the edge attribute used if an edge does not have + the attribute `attr`. +preserve_attrs : bool + If True, preserve the other attributes of the original graph (that are not + passed to `attr`) +partition : str + The key for the edge attribute containing the partition + data on the graph. Edges can be included, excluded or open using the + `EdgePartition` enum. + +Returns +------- +B : (multi)digraph-like + A {kind} {style}. +""" + +docstring_arborescence = ( + docstring_branching + + """ +Raises +------ +NetworkXException + If the graph does not contain a {kind} {style}. + +""" +) + +maximum_branching.__doc__ = docstring_branching.format( + kind="maximum", style="branching" +) + +minimum_branching.__doc__ = ( + docstring_branching.format(kind="minimum", style="branching") + + """ +See Also +-------- + minimal_branching +""" +) + +maximum_spanning_arborescence.__doc__ = docstring_arborescence.format( + kind="maximum", style="spanning arborescence" +) + +minimum_spanning_arborescence.__doc__ = docstring_arborescence.format( + kind="minimum", style="spanning arborescence" +) + + +class ArborescenceIterator: + """ + Iterate over all spanning arborescences of a graph in either increasing or + decreasing cost. + + Notes + ----- + This iterator uses the partition scheme from [1]_ (included edges, + excluded edges and open edges). It generates minimum spanning + arborescences using a modified Edmonds' Algorithm which respects the + partition of edges. For arborescences with the same weight, ties are + broken arbitrarily. + + References + ---------- + .. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning + trees in order of increasing cost, Pesquisa Operacional, 2005-08, + Vol. 25 (2), p. 219-229, + https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en + """ + + @dataclass(order=True) + class Partition: + """ + This dataclass represents a partition and stores a dict with the edge + data and the weight of the minimum spanning arborescence of the + partition dict. + """ + + mst_weight: float + partition_dict: dict = field(compare=False) + + def __copy__(self): + return ArborescenceIterator.Partition( + self.mst_weight, self.partition_dict.copy() + ) + + def __init__(self, G, weight="weight", minimum=True, init_partition=None): + """ + Initialize the iterator + + Parameters + ---------- + G : nx.DiGraph + The directed graph which we need to iterate trees over + + weight : String, default = "weight" + The edge attribute used to store the weight of the edge + + minimum : bool, default = True + Return the trees in increasing order while true and decreasing order + while false. + + init_partition : tuple, default = None + In the case that certain edges have to be included or excluded from + the arborescences, `init_partition` should be in the form + `(included_edges, excluded_edges)` where each edges is a + `(u, v)`-tuple inside an iterable such as a list or set. + + """ + self.G = G.copy() + self.weight = weight + self.minimum = minimum + self.method = ( + minimum_spanning_arborescence if minimum else maximum_spanning_arborescence + ) + # Randomly create a key for an edge attribute to hold the partition data + self.partition_key = ( + "ArborescenceIterators super secret partition attribute name" + ) + if init_partition is not None: + partition_dict = {} + for e in init_partition[0]: + partition_dict[e] = nx.EdgePartition.INCLUDED + for e in init_partition[1]: + partition_dict[e] = nx.EdgePartition.EXCLUDED + self.init_partition = ArborescenceIterator.Partition(0, partition_dict) + else: + self.init_partition = None + + def __iter__(self): + """ + Returns + ------- + ArborescenceIterator + The iterator object for this graph + """ + self.partition_queue = PriorityQueue() + self._clear_partition(self.G) + + # Write the initial partition if it exists. + if self.init_partition is not None: + self._write_partition(self.init_partition) + + mst_weight = self.method( + self.G, + self.weight, + partition=self.partition_key, + preserve_attrs=True, + ).size(weight=self.weight) + + self.partition_queue.put( + self.Partition( + mst_weight if self.minimum else -mst_weight, + {} + if self.init_partition is None + else self.init_partition.partition_dict, + ) + ) + + return self + + def __next__(self): + """ + Returns + ------- + (multi)Graph + The spanning tree of next greatest weight, which ties broken + arbitrarily. + """ + if self.partition_queue.empty(): + del self.G, self.partition_queue + raise StopIteration + + partition = self.partition_queue.get() + self._write_partition(partition) + next_arborescence = self.method( + self.G, + self.weight, + partition=self.partition_key, + preserve_attrs=True, + ) + self._partition(partition, next_arborescence) + + self._clear_partition(next_arborescence) + return next_arborescence + + def _partition(self, partition, partition_arborescence): + """ + Create new partitions based of the minimum spanning tree of the + current minimum partition. + + Parameters + ---------- + partition : Partition + The Partition instance used to generate the current minimum spanning + tree. + partition_arborescence : nx.Graph + The minimum spanning arborescence of the input partition. + """ + # create two new partitions with the data from the input partition dict + p1 = self.Partition(0, partition.partition_dict.copy()) + p2 = self.Partition(0, partition.partition_dict.copy()) + for e in partition_arborescence.edges: + # determine if the edge was open or included + if e not in partition.partition_dict: + # This is an open edge + p1.partition_dict[e] = nx.EdgePartition.EXCLUDED + p2.partition_dict[e] = nx.EdgePartition.INCLUDED + + self._write_partition(p1) + try: + p1_mst = self.method( + self.G, + self.weight, + partition=self.partition_key, + preserve_attrs=True, + ) + + p1_mst_weight = p1_mst.size(weight=self.weight) + p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight + self.partition_queue.put(p1.__copy__()) + except nx.NetworkXException: + pass + + p1.partition_dict = p2.partition_dict.copy() + + def _write_partition(self, partition): + """ + Writes the desired partition into the graph to calculate the minimum + spanning tree. Also, if one incoming edge is included, mark all others + as excluded so that if that vertex is merged during Edmonds' algorithm + we cannot still pick another of that vertex's included edges. + + Parameters + ---------- + partition : Partition + A Partition dataclass describing a partition on the edges of the + graph. + """ + for u, v, d in self.G.edges(data=True): + if (u, v) in partition.partition_dict: + d[self.partition_key] = partition.partition_dict[(u, v)] + else: + d[self.partition_key] = nx.EdgePartition.OPEN + nx._clear_cache(self.G) + + for n in self.G: + included_count = 0 + excluded_count = 0 + for u, v, d in self.G.in_edges(nbunch=n, data=True): + if d.get(self.partition_key) == nx.EdgePartition.INCLUDED: + included_count += 1 + elif d.get(self.partition_key) == nx.EdgePartition.EXCLUDED: + excluded_count += 1 + # Check that if there is an included edges, all other incoming ones + # are excluded. If not fix it! + if included_count == 1 and excluded_count != self.G.in_degree(n) - 1: + for u, v, d in self.G.in_edges(nbunch=n, data=True): + if d.get(self.partition_key) != nx.EdgePartition.INCLUDED: + d[self.partition_key] = nx.EdgePartition.EXCLUDED + + def _clear_partition(self, G): + """ + Removes partition data from the graph + """ + for u, v, d in G.edges(data=True): + if self.partition_key in d: + del d[self.partition_key] + nx._clear_cache(self.G) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/coding.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/coding.py new file mode 100644 index 0000000000000000000000000000000000000000..8cec023c228652842282843687fa2bb41fbda8b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/coding.py @@ -0,0 +1,412 @@ +"""Functions for encoding and decoding trees. + +Since a tree is a highly restricted form of graph, it can be represented +concisely in several ways. This module includes functions for encoding +and decoding trees in the form of nested tuples and Prüfer +sequences. The former requires a rooted tree, whereas the latter can be +applied to unrooted trees. Furthermore, there is a bijection from Prüfer +sequences to labeled trees. + +""" +from collections import Counter +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "from_nested_tuple", + "from_prufer_sequence", + "NotATree", + "to_nested_tuple", + "to_prufer_sequence", +] + + +class NotATree(nx.NetworkXException): + """Raised when a function expects a tree (that is, a connected + undirected graph with no cycles) but gets a non-tree graph as input + instead. + + """ + + +@not_implemented_for("directed") +@nx._dispatchable(graphs="T") +def to_nested_tuple(T, root, canonical_form=False): + """Returns a nested tuple representation of the given tree. + + The nested tuple representation of a tree is defined + recursively. The tree with one node and no edges is represented by + the empty tuple, ``()``. A tree with ``k`` subtrees is represented + by a tuple of length ``k`` in which each element is the nested tuple + representation of a subtree. + + Parameters + ---------- + T : NetworkX graph + An undirected graph object representing a tree. + + root : node + The node in ``T`` to interpret as the root of the tree. + + canonical_form : bool + If ``True``, each tuple is sorted so that the function returns + a canonical form for rooted trees. This means "lighter" subtrees + will appear as nested tuples before "heavier" subtrees. In this + way, each isomorphic rooted tree has the same nested tuple + representation. + + Returns + ------- + tuple + A nested tuple representation of the tree. + + Notes + ----- + This function is *not* the inverse of :func:`from_nested_tuple`; the + only guarantee is that the rooted trees are isomorphic. + + See also + -------- + from_nested_tuple + to_prufer_sequence + + Examples + -------- + The tree need not be a balanced binary tree:: + + >>> T = nx.Graph() + >>> T.add_edges_from([(0, 1), (0, 2), (0, 3)]) + >>> T.add_edges_from([(1, 4), (1, 5)]) + >>> T.add_edges_from([(3, 6), (3, 7)]) + >>> root = 0 + >>> nx.to_nested_tuple(T, root) + (((), ()), (), ((), ())) + + Continuing the above example, if ``canonical_form`` is ``True``, the + nested tuples will be sorted:: + + >>> nx.to_nested_tuple(T, root, canonical_form=True) + ((), ((), ()), ((), ())) + + Even the path graph can be interpreted as a tree:: + + >>> T = nx.path_graph(4) + >>> root = 0 + >>> nx.to_nested_tuple(T, root) + ((((),),),) + + """ + + def _make_tuple(T, root, _parent): + """Recursively compute the nested tuple representation of the + given rooted tree. + + ``_parent`` is the parent node of ``root`` in the supertree in + which ``T`` is a subtree, or ``None`` if ``root`` is the root of + the supertree. This argument is used to determine which + neighbors of ``root`` are children and which is the parent. + + """ + # Get the neighbors of `root` that are not the parent node. We + # are guaranteed that `root` is always in `T` by construction. + children = set(T[root]) - {_parent} + if len(children) == 0: + return () + nested = (_make_tuple(T, v, root) for v in children) + if canonical_form: + nested = sorted(nested) + return tuple(nested) + + # Do some sanity checks on the input. + if not nx.is_tree(T): + raise nx.NotATree("provided graph is not a tree") + if root not in T: + raise nx.NodeNotFound(f"Graph {T} contains no node {root}") + + return _make_tuple(T, root, None) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_nested_tuple(sequence, sensible_relabeling=False): + """Returns the rooted tree corresponding to the given nested tuple. + + The nested tuple representation of a tree is defined + recursively. The tree with one node and no edges is represented by + the empty tuple, ``()``. A tree with ``k`` subtrees is represented + by a tuple of length ``k`` in which each element is the nested tuple + representation of a subtree. + + Parameters + ---------- + sequence : tuple + A nested tuple representing a rooted tree. + + sensible_relabeling : bool + Whether to relabel the nodes of the tree so that nodes are + labeled in increasing order according to their breadth-first + search order from the root node. + + Returns + ------- + NetworkX graph + The tree corresponding to the given nested tuple, whose root + node is node 0. If ``sensible_labeling`` is ``True``, nodes will + be labeled in breadth-first search order starting from the root + node. + + Notes + ----- + This function is *not* the inverse of :func:`to_nested_tuple`; the + only guarantee is that the rooted trees are isomorphic. + + See also + -------- + to_nested_tuple + from_prufer_sequence + + Examples + -------- + Sensible relabeling ensures that the nodes are labeled from the root + starting at 0:: + + >>> balanced = (((), ()), ((), ())) + >>> T = nx.from_nested_tuple(balanced, sensible_relabeling=True) + >>> edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + >>> all((u, v) in T.edges() or (v, u) in T.edges() for (u, v) in edges) + True + + """ + + def _make_tree(sequence): + """Recursively creates a tree from the given sequence of nested + tuples. + + This function employs the :func:`~networkx.tree.join` function + to recursively join subtrees into a larger tree. + + """ + # The empty sequence represents the empty tree, which is the + # (unique) graph with a single node. We mark the single node + # with an attribute that indicates that it is the root of the + # graph. + if len(sequence) == 0: + return nx.empty_graph(1) + # For a nonempty sequence, get the subtrees for each child + # sequence and join all the subtrees at their roots. After + # joining the subtrees, the root is node 0. + return nx.tree.join_trees([(_make_tree(child), 0) for child in sequence]) + + # Make the tree and remove the `is_root` node attribute added by the + # helper function. + T = _make_tree(sequence) + if sensible_relabeling: + # Relabel the nodes according to their breadth-first search + # order, starting from the root node (that is, the node 0). + bfs_nodes = chain([0], (v for u, v in nx.bfs_edges(T, 0))) + labels = {v: i for i, v in enumerate(bfs_nodes)} + # We would like to use `copy=False`, but `relabel_nodes` doesn't + # allow a relabel mapping that can't be topologically sorted. + T = nx.relabel_nodes(T, labels) + return T + + +@not_implemented_for("directed") +@nx._dispatchable(graphs="T") +def to_prufer_sequence(T): + r"""Returns the Prüfer sequence of the given tree. + + A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and + *n* - 1, inclusive. The tree corresponding to a given Prüfer + sequence can be recovered by repeatedly joining a node in the + sequence with a node with the smallest potential degree according to + the sequence. + + Parameters + ---------- + T : NetworkX graph + An undirected graph object representing a tree. + + Returns + ------- + list + The Prüfer sequence of the given tree. + + Raises + ------ + NetworkXPointlessConcept + If the number of nodes in `T` is less than two. + + NotATree + If `T` is not a tree. + + KeyError + If the set of nodes in `T` is not {0, …, *n* - 1}. + + Notes + ----- + There is a bijection from labeled trees to Prüfer sequences. This + function is the inverse of the :func:`from_prufer_sequence` + function. + + Sometimes Prüfer sequences use nodes labeled from 1 to *n* instead + of from 0 to *n* - 1. This function requires nodes to be labeled in + the latter form. You can use :func:`~networkx.relabel_nodes` to + relabel the nodes of your tree to the appropriate format. + + This implementation is from [1]_ and has a running time of + $O(n)$. + + See also + -------- + to_nested_tuple + from_prufer_sequence + + References + ---------- + .. [1] Wang, Xiaodong, Lei Wang, and Yingjie Wu. + "An optimal algorithm for Prufer codes." + *Journal of Software Engineering and Applications* 2.02 (2009): 111. + + + Examples + -------- + There is a bijection between Prüfer sequences and labeled trees, so + this function is the inverse of the :func:`from_prufer_sequence` + function: + + >>> edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + >>> tree = nx.Graph(edges) + >>> sequence = nx.to_prufer_sequence(tree) + >>> sequence + [3, 3, 3, 4] + >>> tree2 = nx.from_prufer_sequence(sequence) + >>> list(tree2.edges()) == edges + True + + """ + # Perform some sanity checks on the input. + n = len(T) + if n < 2: + msg = "Prüfer sequence undefined for trees with fewer than two nodes" + raise nx.NetworkXPointlessConcept(msg) + if not nx.is_tree(T): + raise nx.NotATree("provided graph is not a tree") + if set(T) != set(range(n)): + raise KeyError("tree must have node labels {0, ..., n - 1}") + + degree = dict(T.degree()) + + def parents(u): + return next(v for v in T[u] if degree[v] > 1) + + index = u = next(k for k in range(n) if degree[k] == 1) + result = [] + for i in range(n - 2): + v = parents(u) + result.append(v) + degree[v] -= 1 + if v < index and degree[v] == 1: + u = v + else: + index = u = next(k for k in range(index + 1, n) if degree[k] == 1) + return result + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_prufer_sequence(sequence): + r"""Returns the tree corresponding to the given Prüfer sequence. + + A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and + *n* - 1, inclusive. The tree corresponding to a given Prüfer + sequence can be recovered by repeatedly joining a node in the + sequence with a node with the smallest potential degree according to + the sequence. + + Parameters + ---------- + sequence : list + A Prüfer sequence, which is a list of *n* - 2 integers between + zero and *n* - 1, inclusive. + + Returns + ------- + NetworkX graph + The tree corresponding to the given Prüfer sequence. + + Raises + ------ + NetworkXError + If the Prüfer sequence is not valid. + + Notes + ----- + There is a bijection from labeled trees to Prüfer sequences. This + function is the inverse of the :func:`from_prufer_sequence` function. + + Sometimes Prüfer sequences use nodes labeled from 1 to *n* instead + of from 0 to *n* - 1. This function requires nodes to be labeled in + the latter form. You can use :func:`networkx.relabel_nodes` to + relabel the nodes of your tree to the appropriate format. + + This implementation is from [1]_ and has a running time of + $O(n)$. + + References + ---------- + .. [1] Wang, Xiaodong, Lei Wang, and Yingjie Wu. + "An optimal algorithm for Prufer codes." + *Journal of Software Engineering and Applications* 2.02 (2009): 111. + + + See also + -------- + from_nested_tuple + to_prufer_sequence + + Examples + -------- + There is a bijection between Prüfer sequences and labeled trees, so + this function is the inverse of the :func:`to_prufer_sequence` + function: + + >>> edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + >>> tree = nx.Graph(edges) + >>> sequence = nx.to_prufer_sequence(tree) + >>> sequence + [3, 3, 3, 4] + >>> tree2 = nx.from_prufer_sequence(sequence) + >>> list(tree2.edges()) == edges + True + + """ + n = len(sequence) + 2 + # `degree` stores the remaining degree (plus one) for each node. The + # degree of a node in the decoded tree is one more than the number + # of times it appears in the code. + degree = Counter(chain(sequence, range(n))) + T = nx.empty_graph(n) + # `not_orphaned` is the set of nodes that have a parent in the + # tree. After the loop, there should be exactly two nodes that are + # not in this set. + not_orphaned = set() + index = u = next(k for k in range(n) if degree[k] == 1) + for v in sequence: + # check the validity of the prufer sequence + if v < 0 or v > n - 1: + raise nx.NetworkXError( + f"Invalid Prufer sequence: Values must be between 0 and {n-1}, got {v}" + ) + T.add_edge(u, v) + not_orphaned.add(u) + degree[v] -= 1 + if v < index and degree[v] == 1: + u = v + else: + index = u = next(k for k in range(index + 1, n) if degree[k] == 1) + # At this point, there must be exactly two orphaned nodes; join them. + orphans = set(T) - not_orphaned + u, v = orphans + T.add_edge(u, v) + return T diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/decomposition.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/decomposition.py new file mode 100644 index 0000000000000000000000000000000000000000..c8b8f2477b47581cd6010aba7e3329f5044e0da4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/decomposition.py @@ -0,0 +1,88 @@ +r"""Function for computing a junction tree of a graph.""" + +from itertools import combinations + +import networkx as nx +from networkx.algorithms import chordal_graph_cliques, complete_to_chordal_graph, moral +from networkx.utils import not_implemented_for + +__all__ = ["junction_tree"] + + +@not_implemented_for("multigraph") +@nx._dispatchable(returns_graph=True) +def junction_tree(G): + r"""Returns a junction tree of a given graph. + + A junction tree (or clique tree) is constructed from a (un)directed graph G. + The tree is constructed based on a moralized and triangulated version of G. + The tree's nodes consist of maximal cliques and sepsets of the revised graph. + The sepset of two cliques is the intersection of the nodes of these cliques, + e.g. the sepset of (A,B,C) and (A,C,E,F) is (A,C). These nodes are often called + "variables" in this literature. The tree is bipartite with each sepset + connected to its two cliques. + + Junction Trees are not unique as the order of clique consideration determines + which sepsets are included. + + The junction tree algorithm consists of five steps [1]_: + + 1. Moralize the graph + 2. Triangulate the graph + 3. Find maximal cliques + 4. Build the tree from cliques, connecting cliques with shared + nodes, set edge-weight to number of shared variables + 5. Find maximum spanning tree + + + Parameters + ---------- + G : networkx.Graph + Directed or undirected graph. + + Returns + ------- + junction_tree : networkx.Graph + The corresponding junction tree of `G`. + + Raises + ------ + NetworkXNotImplemented + Raised if `G` is an instance of `MultiGraph` or `MultiDiGraph`. + + References + ---------- + .. [1] Junction tree algorithm: + https://en.wikipedia.org/wiki/Junction_tree_algorithm + + .. [2] Finn V. Jensen and Frank Jensen. 1994. Optimal + junction trees. In Proceedings of the Tenth international + conference on Uncertainty in artificial intelligence (UAI’94). + Morgan Kaufmann Publishers Inc., San Francisco, CA, USA, 360–366. + """ + + clique_graph = nx.Graph() + + if G.is_directed(): + G = moral.moral_graph(G) + chordal_graph, _ = complete_to_chordal_graph(G) + + cliques = [tuple(sorted(i)) for i in chordal_graph_cliques(chordal_graph)] + clique_graph.add_nodes_from(cliques, type="clique") + + for edge in combinations(cliques, 2): + set_edge_0 = set(edge[0]) + set_edge_1 = set(edge[1]) + if not set_edge_0.isdisjoint(set_edge_1): + sepset = tuple(sorted(set_edge_0.intersection(set_edge_1))) + clique_graph.add_edge(edge[0], edge[1], weight=len(sepset), sepset=sepset) + + junction_tree = nx.maximum_spanning_tree(clique_graph) + + for edge in list(junction_tree.edges(data=True)): + junction_tree.add_node(edge[2]["sepset"], type="sepset") + junction_tree.add_edge(edge[0], edge[2]["sepset"]) + junction_tree.add_edge(edge[1], edge[2]["sepset"]) + junction_tree.remove_edge(edge[0], edge[1]) + + return junction_tree diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/mst.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/mst.py new file mode 100644 index 0000000000000000000000000000000000000000..9e8ea3843f979c3ff84ed4267f4d79b5efe85fa6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/mst.py @@ -0,0 +1,1274 @@ +""" +Algorithms for calculating min/max spanning trees/forests. + +""" +from dataclasses import dataclass, field +from enum import Enum +from heapq import heappop, heappush +from itertools import count +from math import isnan +from operator import itemgetter +from queue import PriorityQueue + +import networkx as nx +from networkx.utils import UnionFind, not_implemented_for, py_random_state + +__all__ = [ + "minimum_spanning_edges", + "maximum_spanning_edges", + "minimum_spanning_tree", + "maximum_spanning_tree", + "number_of_spanning_trees", + "random_spanning_tree", + "partition_spanning_tree", + "EdgePartition", + "SpanningTreeIterator", +] + + +class EdgePartition(Enum): + """ + An enum to store the state of an edge partition. The enum is written to the + edges of a graph before being pasted to `kruskal_mst_edges`. Options are: + + - EdgePartition.OPEN + - EdgePartition.INCLUDED + - EdgePartition.EXCLUDED + """ + + OPEN = 0 + INCLUDED = 1 + EXCLUDED = 2 + + +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data") +def boruvka_mst_edges( + G, minimum=True, weight="weight", keys=False, data=True, ignore_nan=False +): + """Iterate over edges of a Borůvka's algorithm min/max spanning tree. + + Parameters + ---------- + G : NetworkX Graph + The edges of `G` must have distinct weights, + otherwise the edges may not form a tree. + + minimum : bool (default: True) + Find the minimum (True) or maximum (False) spanning tree. + + weight : string (default: 'weight') + The name of the edge attribute holding the edge weights. + + keys : bool (default: True) + This argument is ignored since this function is not + implemented for multigraphs; it exists only for consistency + with the other minimum spanning tree functions. + + data : bool (default: True) + Flag for whether to yield edge attribute dicts. + If True, yield edges `(u, v, d)`, where `d` is the attribute dict. + If False, yield edges `(u, v)`. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + """ + # Initialize a forest, assuming initially that it is the discrete + # partition of the nodes of the graph. + forest = UnionFind(G) + + def best_edge(component): + """Returns the optimum (minimum or maximum) edge on the edge + boundary of the given set of nodes. + + A return value of ``None`` indicates an empty boundary. + + """ + sign = 1 if minimum else -1 + minwt = float("inf") + boundary = None + for e in nx.edge_boundary(G, component, data=True): + wt = e[-1].get(weight, 1) * sign + if isnan(wt): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {e}" + raise ValueError(msg) + if wt < minwt: + minwt = wt + boundary = e + return boundary + + # Determine the optimum edge in the edge boundary of each component + # in the forest. + best_edges = (best_edge(component) for component in forest.to_sets()) + best_edges = [edge for edge in best_edges if edge is not None] + # If each entry was ``None``, that means the graph was disconnected, + # so we are done generating the forest. + while best_edges: + # Determine the optimum edge in the edge boundary of each + # component in the forest. + # + # This must be a sequence, not an iterator. In this list, the + # same edge may appear twice, in different orientations (but + # that's okay, since a union operation will be called on the + # endpoints the first time it is seen, but not the second time). + # + # Any ``None`` indicates that the edge boundary for that + # component was empty, so that part of the forest has been + # completed. + # + # TODO This can be parallelized, both in the outer loop over + # each component in the forest and in the computation of the + # minimum. (Same goes for the identical lines outside the loop.) + best_edges = (best_edge(component) for component in forest.to_sets()) + best_edges = [edge for edge in best_edges if edge is not None] + # Join trees in the forest using the best edges, and yield that + # edge, since it is part of the spanning tree. + # + # TODO This loop can be parallelized, to an extent (the union + # operation must be atomic). + for u, v, d in best_edges: + if forest[u] != forest[v]: + if data: + yield u, v, d + else: + yield u, v + forest.union(u, v) + + +@nx._dispatchable( + edge_attrs={"weight": None, "partition": None}, preserve_edge_attrs="data" +) +def kruskal_mst_edges( + G, minimum, weight="weight", keys=True, data=True, ignore_nan=False, partition=None +): + """ + Iterate over edge of a Kruskal's algorithm min/max spanning tree. + + Parameters + ---------- + G : NetworkX Graph + The graph holding the tree of interest. + + minimum : bool (default: True) + Find the minimum (True) or maximum (False) spanning tree. + + weight : string (default: 'weight') + The name of the edge attribute holding the edge weights. + + keys : bool (default: True) + If `G` is a multigraph, `keys` controls whether edge keys ar yielded. + Otherwise `keys` is ignored. + + data : bool (default: True) + Flag for whether to yield edge attribute dicts. + If True, yield edges `(u, v, d)`, where `d` is the attribute dict. + If False, yield edges `(u, v)`. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + partition : string (default: None) + The name of the edge attribute holding the partition data, if it exists. + Partition data is written to the edges using the `EdgePartition` enum. + If a partition exists, all included edges and none of the excluded edges + will appear in the final tree. Open edges may or may not be used. + + Yields + ------ + edge tuple + The edges as discovered by Kruskal's method. Each edge can + take the following forms: `(u, v)`, `(u, v, d)` or `(u, v, k, d)` + depending on the `key` and `data` parameters + """ + subtrees = UnionFind() + if G.is_multigraph(): + edges = G.edges(keys=True, data=True) + else: + edges = G.edges(data=True) + + """ + Sort the edges of the graph with respect to the partition data. + Edges are returned in the following order: + + * Included edges + * Open edges from smallest to largest weight + * Excluded edges + """ + included_edges = [] + open_edges = [] + for e in edges: + d = e[-1] + wt = d.get(weight, 1) + if isnan(wt): + if ignore_nan: + continue + raise ValueError(f"NaN found as an edge weight. Edge {e}") + + edge = (wt,) + e + if d.get(partition) == EdgePartition.INCLUDED: + included_edges.append(edge) + elif d.get(partition) == EdgePartition.EXCLUDED: + continue + else: + open_edges.append(edge) + + if minimum: + sorted_open_edges = sorted(open_edges, key=itemgetter(0)) + else: + sorted_open_edges = sorted(open_edges, key=itemgetter(0), reverse=True) + + # Condense the lists into one + included_edges.extend(sorted_open_edges) + sorted_edges = included_edges + del open_edges, sorted_open_edges, included_edges + + # Multigraphs need to handle edge keys in addition to edge data. + if G.is_multigraph(): + for wt, u, v, k, d in sorted_edges: + if subtrees[u] != subtrees[v]: + if keys: + if data: + yield u, v, k, d + else: + yield u, v, k + else: + if data: + yield u, v, d + else: + yield u, v + subtrees.union(u, v) + else: + for wt, u, v, d in sorted_edges: + if subtrees[u] != subtrees[v]: + if data: + yield u, v, d + else: + yield u, v + subtrees.union(u, v) + + +@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data") +def prim_mst_edges(G, minimum, weight="weight", keys=True, data=True, ignore_nan=False): + """Iterate over edges of Prim's algorithm min/max spanning tree. + + Parameters + ---------- + G : NetworkX Graph + The graph holding the tree of interest. + + minimum : bool (default: True) + Find the minimum (True) or maximum (False) spanning tree. + + weight : string (default: 'weight') + The name of the edge attribute holding the edge weights. + + keys : bool (default: True) + If `G` is a multigraph, `keys` controls whether edge keys ar yielded. + Otherwise `keys` is ignored. + + data : bool (default: True) + Flag for whether to yield edge attribute dicts. + If True, yield edges `(u, v, d)`, where `d` is the attribute dict. + If False, yield edges `(u, v)`. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + """ + is_multigraph = G.is_multigraph() + push = heappush + pop = heappop + + nodes = set(G) + c = count() + + sign = 1 if minimum else -1 + + while nodes: + u = nodes.pop() + frontier = [] + visited = {u} + if is_multigraph: + for v, keydict in G.adj[u].items(): + for k, d in keydict.items(): + wt = d.get(weight, 1) * sign + if isnan(wt): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(u, v, k, d)}" + raise ValueError(msg) + push(frontier, (wt, next(c), u, v, k, d)) + else: + for v, d in G.adj[u].items(): + wt = d.get(weight, 1) * sign + if isnan(wt): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(u, v, d)}" + raise ValueError(msg) + push(frontier, (wt, next(c), u, v, d)) + while nodes and frontier: + if is_multigraph: + W, _, u, v, k, d = pop(frontier) + else: + W, _, u, v, d = pop(frontier) + if v in visited or v not in nodes: + continue + # Multigraphs need to handle edge keys in addition to edge data. + if is_multigraph and keys: + if data: + yield u, v, k, d + else: + yield u, v, k + else: + if data: + yield u, v, d + else: + yield u, v + # update frontier + visited.add(v) + nodes.discard(v) + if is_multigraph: + for w, keydict in G.adj[v].items(): + if w in visited: + continue + for k2, d2 in keydict.items(): + new_weight = d2.get(weight, 1) * sign + if isnan(new_weight): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(v, w, k2, d2)}" + raise ValueError(msg) + push(frontier, (new_weight, next(c), v, w, k2, d2)) + else: + for w, d2 in G.adj[v].items(): + if w in visited: + continue + new_weight = d2.get(weight, 1) * sign + if isnan(new_weight): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(v, w, d2)}" + raise ValueError(msg) + push(frontier, (new_weight, next(c), v, w, d2)) + + +ALGORITHMS = { + "boruvka": boruvka_mst_edges, + "borůvka": boruvka_mst_edges, + "kruskal": kruskal_mst_edges, + "prim": prim_mst_edges, +} + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data") +def minimum_spanning_edges( + G, algorithm="kruskal", weight="weight", keys=True, data=True, ignore_nan=False +): + """Generate edges in a minimum spanning forest of an undirected + weighted graph. + + A minimum spanning tree is a subgraph of the graph (a tree) + with the minimum sum of edge weights. A spanning forest is a + union of the spanning trees for each connected component of the graph. + + Parameters + ---------- + G : undirected Graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + algorithm : string + The algorithm to use when finding a minimum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'. + + weight : string + Edge data key to use for weight (default 'weight'). + + keys : bool + Whether to yield edge key in multigraphs in addition to the edge. + If `G` is not a multigraph, this is ignored. + + data : bool, optional + If True yield the edge data along with the edge. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + Returns + ------- + edges : iterator + An iterator over edges in a maximum spanning tree of `G`. + Edges connecting nodes `u` and `v` are represented as tuples: + `(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)` + + If `G` is a multigraph, `keys` indicates whether the edge key `k` will + be reported in the third position in the edge tuple. `data` indicates + whether the edge datadict `d` will appear at the end of the edge tuple. + + If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True + or `(u, v)` if `data` is False. + + Examples + -------- + >>> from networkx.algorithms import tree + + Find minimum spanning edges by Kruskal's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> mst = tree.minimum_spanning_edges(G, algorithm="kruskal", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [1, 2], [2, 3]] + + Find minimum spanning edges by Prim's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> mst = tree.minimum_spanning_edges(G, algorithm="prim", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [1, 2], [2, 3]] + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + Modified code from David Eppstein, April 2006 + http://www.ics.uci.edu/~eppstein/PADS/ + + """ + try: + algo = ALGORITHMS[algorithm] + except KeyError as err: + msg = f"{algorithm} is not a valid choice for an algorithm." + raise ValueError(msg) from err + + return algo( + G, minimum=True, weight=weight, keys=keys, data=data, ignore_nan=ignore_nan + ) + + +@not_implemented_for("directed") +@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data") +def maximum_spanning_edges( + G, algorithm="kruskal", weight="weight", keys=True, data=True, ignore_nan=False +): + """Generate edges in a maximum spanning forest of an undirected + weighted graph. + + A maximum spanning tree is a subgraph of the graph (a tree) + with the maximum possible sum of edge weights. A spanning forest is a + union of the spanning trees for each connected component of the graph. + + Parameters + ---------- + G : undirected Graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + algorithm : string + The algorithm to use when finding a maximum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'. + + weight : string + Edge data key to use for weight (default 'weight'). + + keys : bool + Whether to yield edge key in multigraphs in addition to the edge. + If `G` is not a multigraph, this is ignored. + + data : bool, optional + If True yield the edge data along with the edge. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + Returns + ------- + edges : iterator + An iterator over edges in a maximum spanning tree of `G`. + Edges connecting nodes `u` and `v` are represented as tuples: + `(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)` + + If `G` is a multigraph, `keys` indicates whether the edge key `k` will + be reported in the third position in the edge tuple. `data` indicates + whether the edge datadict `d` will appear at the end of the edge tuple. + + If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True + or `(u, v)` if `data` is False. + + Examples + -------- + >>> from networkx.algorithms import tree + + Find maximum spanning edges by Kruskal's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> mst = tree.maximum_spanning_edges(G, algorithm="kruskal", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [0, 3], [1, 2]] + + Find maximum spanning edges by Prim's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) # assign weight 2 to edge 0-3 + >>> mst = tree.maximum_spanning_edges(G, algorithm="prim", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [0, 3], [2, 3]] + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + Modified code from David Eppstein, April 2006 + http://www.ics.uci.edu/~eppstein/PADS/ + """ + try: + algo = ALGORITHMS[algorithm] + except KeyError as err: + msg = f"{algorithm} is not a valid choice for an algorithm." + raise ValueError(msg) from err + + return algo( + G, minimum=False, weight=weight, keys=keys, data=data, ignore_nan=ignore_nan + ) + + +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def minimum_spanning_tree(G, weight="weight", algorithm="kruskal", ignore_nan=False): + """Returns a minimum spanning tree or forest on an undirected graph `G`. + + Parameters + ---------- + G : undirected graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + weight : str + Data key to use for edge weights. + + algorithm : string + The algorithm to use when finding a minimum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is + 'kruskal'. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + Returns + ------- + G : NetworkX Graph + A minimum spanning tree or forest. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> T = nx.minimum_spanning_tree(G) + >>> sorted(T.edges(data=True)) + [(0, 1, {}), (1, 2, {}), (2, 3, {})] + + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + There may be more than one tree with the same minimum or maximum weight. + See :mod:`networkx.tree.recognition` for more detailed definitions. + + Isolated nodes with self-loops are in the tree as edgeless isolated nodes. + + """ + edges = minimum_spanning_edges( + G, algorithm, weight, keys=True, data=True, ignore_nan=ignore_nan + ) + T = G.__class__() # Same graph class as G + T.graph.update(G.graph) + T.add_nodes_from(G.nodes.items()) + T.add_edges_from(edges) + return T + + +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def partition_spanning_tree( + G, minimum=True, weight="weight", partition="partition", ignore_nan=False +): + """ + Find a spanning tree while respecting a partition of edges. + + Edges can be flagged as either `INCLUDED` which are required to be in the + returned tree, `EXCLUDED`, which cannot be in the returned tree and `OPEN`. + + This is used in the SpanningTreeIterator to create new partitions following + the algorithm of Sörensen and Janssens [1]_. + + Parameters + ---------- + G : undirected graph + An undirected graph. + + minimum : bool (default: True) + Determines whether the returned tree is the minimum spanning tree of + the partition of the maximum one. + + weight : str + Data key to use for edge weights. + + partition : str + The key for the edge attribute containing the partition + data on the graph. Edges can be included, excluded or open using the + `EdgePartition` enum. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + + Returns + ------- + G : NetworkX Graph + A minimum spanning tree using all of the included edges in the graph and + none of the excluded edges. + + References + ---------- + .. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning + trees in order of increasing cost, Pesquisa Operacional, 2005-08, + Vol. 25 (2), p. 219-229, + https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en + """ + edges = kruskal_mst_edges( + G, + minimum, + weight, + keys=True, + data=True, + ignore_nan=ignore_nan, + partition=partition, + ) + T = G.__class__() # Same graph class as G + T.graph.update(G.graph) + T.add_nodes_from(G.nodes.items()) + T.add_edges_from(edges) + return T + + +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def maximum_spanning_tree(G, weight="weight", algorithm="kruskal", ignore_nan=False): + """Returns a maximum spanning tree or forest on an undirected graph `G`. + + Parameters + ---------- + G : undirected graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + weight : str + Data key to use for edge weights. + + algorithm : string + The algorithm to use when finding a maximum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is + 'kruskal'. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + + Returns + ------- + G : NetworkX Graph + A maximum spanning tree or forest. + + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> T = nx.maximum_spanning_tree(G) + >>> sorted(T.edges(data=True)) + [(0, 1, {}), (0, 3, {'weight': 2}), (1, 2, {})] + + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + There may be more than one tree with the same minimum or maximum weight. + See :mod:`networkx.tree.recognition` for more detailed definitions. + + Isolated nodes with self-loops are in the tree as edgeless isolated nodes. + + """ + edges = maximum_spanning_edges( + G, algorithm, weight, keys=True, data=True, ignore_nan=ignore_nan + ) + edges = list(edges) + T = G.__class__() # Same graph class as G + T.graph.update(G.graph) + T.add_nodes_from(G.nodes.items()) + T.add_edges_from(edges) + return T + + +@py_random_state(3) +@nx._dispatchable(preserve_edge_attrs=True, returns_graph=True) +def random_spanning_tree(G, weight=None, *, multiplicative=True, seed=None): + """ + Sample a random spanning tree using the edges weights of `G`. + + This function supports two different methods for determining the + probability of the graph. If ``multiplicative=True``, the probability + is based on the product of edge weights, and if ``multiplicative=False`` + it is based on the sum of the edge weight. However, since it is + easier to determine the total weight of all spanning trees for the + multiplicative version, that is significantly faster and should be used if + possible. Additionally, setting `weight` to `None` will cause a spanning tree + to be selected with uniform probability. + + The function uses algorithm A8 in [1]_ . + + Parameters + ---------- + G : nx.Graph + An undirected version of the original graph. + + weight : string + The edge key for the edge attribute holding edge weight. + + multiplicative : bool, default=True + If `True`, the probability of each tree is the product of its edge weight + over the sum of the product of all the spanning trees in the graph. If + `False`, the probability is the sum of its edge weight over the sum of + the sum of weights for all spanning trees in the graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + nx.Graph + A spanning tree using the distribution defined by the weight of the tree. + + References + ---------- + .. [1] V. Kulkarni, Generating random combinatorial objects, Journal of + Algorithms, 11 (1990), pp. 185–207 + """ + + def find_node(merged_nodes, node): + """ + We can think of clusters of contracted nodes as having one + representative in the graph. Each node which is not in merged_nodes + is still its own representative. Since a representative can be later + contracted, we need to recursively search though the dict to find + the final representative, but once we know it we can use path + compression to speed up the access of the representative for next time. + + This cannot be replaced by the standard NetworkX union_find since that + data structure will merge nodes with less representing nodes into the + one with more representing nodes but this function requires we merge + them using the order that contract_edges contracts using. + + Parameters + ---------- + merged_nodes : dict + The dict storing the mapping from node to representative + node + The node whose representative we seek + + Returns + ------- + The representative of the `node` + """ + if node not in merged_nodes: + return node + else: + rep = find_node(merged_nodes, merged_nodes[node]) + merged_nodes[node] = rep + return rep + + def prepare_graph(): + """ + For the graph `G`, remove all edges not in the set `V` and then + contract all edges in the set `U`. + + Returns + ------- + A copy of `G` which has had all edges not in `V` removed and all edges + in `U` contracted. + """ + + # The result is a MultiGraph version of G so that parallel edges are + # allowed during edge contraction + result = nx.MultiGraph(incoming_graph_data=G) + + # Remove all edges not in V + edges_to_remove = set(result.edges()).difference(V) + result.remove_edges_from(edges_to_remove) + + # Contract all edges in U + # + # Imagine that you have two edges to contract and they share an + # endpoint like this: + # [0] ----- [1] ----- [2] + # If we contract (0, 1) first, the contraction function will always + # delete the second node it is passed so the resulting graph would be + # [0] ----- [2] + # and edge (1, 2) no longer exists but (0, 2) would need to be contracted + # in its place now. That is why I use the below dict as a merge-find + # data structure with path compression to track how the nodes are merged. + merged_nodes = {} + + for u, v in U: + u_rep = find_node(merged_nodes, u) + v_rep = find_node(merged_nodes, v) + # We cannot contract a node with itself + if u_rep == v_rep: + continue + nx.contracted_nodes(result, u_rep, v_rep, self_loops=False, copy=False) + merged_nodes[v_rep] = u_rep + + return merged_nodes, result + + def spanning_tree_total_weight(G, weight): + """ + Find the sum of weights of the spanning trees of `G` using the + appropriate `method`. + + This is easy if the chosen method is 'multiplicative', since we can + use Kirchhoff's Tree Matrix Theorem directly. However, with the + 'additive' method, this process is slightly more complex and less + computationally efficient as we have to find the number of spanning + trees which contain each possible edge in the graph. + + Parameters + ---------- + G : NetworkX Graph + The graph to find the total weight of all spanning trees on. + + weight : string + The key for the weight edge attribute of the graph. + + Returns + ------- + float + The sum of either the multiplicative or additive weight for all + spanning trees in the graph. + """ + if multiplicative: + return nx.total_spanning_tree_weight(G, weight) + else: + # There are two cases for the total spanning tree additive weight. + # 1. There is one edge in the graph. Then the only spanning tree is + # that edge itself, which will have a total weight of that edge + # itself. + if G.number_of_edges() == 1: + return G.edges(data=weight).__iter__().__next__()[2] + # 2. There are no edges or two or more edges in the graph. Then, we find the + # total weight of the spanning trees using the formula in the + # reference paper: take the weight of each edge and multiply it by + # the number of spanning trees which include that edge. This + # can be accomplished by contracting the edge and finding the + # multiplicative total spanning tree weight if the weight of each edge + # is assumed to be 1, which is conveniently built into networkx already, + # by calling total_spanning_tree_weight with weight=None. + # Note that with no edges the returned value is just zero. + else: + total = 0 + for u, v, w in G.edges(data=weight): + total += w * nx.total_spanning_tree_weight( + nx.contracted_edge(G, edge=(u, v), self_loops=False), None + ) + return total + + if G.number_of_nodes() < 2: + # no edges in the spanning tree + return nx.empty_graph(G.nodes) + + U = set() + st_cached_value = 0 + V = set(G.edges()) + shuffled_edges = list(G.edges()) + seed.shuffle(shuffled_edges) + + for u, v in shuffled_edges: + e_weight = G[u][v][weight] if weight is not None else 1 + node_map, prepared_G = prepare_graph() + G_total_tree_weight = spanning_tree_total_weight(prepared_G, weight) + # Add the edge to U so that we can compute the total tree weight + # assuming we include that edge + # Now, if (u, v) cannot exist in G because it is fully contracted out + # of existence, then it by definition cannot influence G_e's Kirchhoff + # value. But, we also cannot pick it. + rep_edge = (find_node(node_map, u), find_node(node_map, v)) + # Check to see if the 'representative edge' for the current edge is + # in prepared_G. If so, then we can pick it. + if rep_edge in prepared_G.edges: + prepared_G_e = nx.contracted_edge( + prepared_G, edge=rep_edge, self_loops=False + ) + G_e_total_tree_weight = spanning_tree_total_weight(prepared_G_e, weight) + if multiplicative: + threshold = e_weight * G_e_total_tree_weight / G_total_tree_weight + else: + numerator = ( + st_cached_value + e_weight + ) * nx.total_spanning_tree_weight(prepared_G_e) + G_e_total_tree_weight + denominator = ( + st_cached_value * nx.total_spanning_tree_weight(prepared_G) + + G_total_tree_weight + ) + threshold = numerator / denominator + else: + threshold = 0.0 + z = seed.uniform(0.0, 1.0) + if z > threshold: + # Remove the edge from V since we did not pick it. + V.remove((u, v)) + else: + # Add the edge to U since we picked it. + st_cached_value += e_weight + U.add((u, v)) + # If we decide to keep an edge, it may complete the spanning tree. + if len(U) == G.number_of_nodes() - 1: + spanning_tree = nx.Graph() + spanning_tree.add_edges_from(U) + return spanning_tree + raise Exception(f"Something went wrong! Only {len(U)} edges in the spanning tree!") + + +class SpanningTreeIterator: + """ + Iterate over all spanning trees of a graph in either increasing or + decreasing cost. + + Notes + ----- + This iterator uses the partition scheme from [1]_ (included edges, + excluded edges and open edges) as well as a modified Kruskal's Algorithm + to generate minimum spanning trees which respect the partition of edges. + For spanning trees with the same weight, ties are broken arbitrarily. + + References + ---------- + .. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning + trees in order of increasing cost, Pesquisa Operacional, 2005-08, + Vol. 25 (2), p. 219-229, + https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en + """ + + @dataclass(order=True) + class Partition: + """ + This dataclass represents a partition and stores a dict with the edge + data and the weight of the minimum spanning tree of the partition dict. + """ + + mst_weight: float + partition_dict: dict = field(compare=False) + + def __copy__(self): + return SpanningTreeIterator.Partition( + self.mst_weight, self.partition_dict.copy() + ) + + def __init__(self, G, weight="weight", minimum=True, ignore_nan=False): + """ + Initialize the iterator + + Parameters + ---------- + G : nx.Graph + The directed graph which we need to iterate trees over + + weight : String, default = "weight" + The edge attribute used to store the weight of the edge + + minimum : bool, default = True + Return the trees in increasing order while true and decreasing order + while false. + + ignore_nan : bool, default = False + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + """ + self.G = G.copy() + self.G.__networkx_cache__ = None # Disable caching + self.weight = weight + self.minimum = minimum + self.ignore_nan = ignore_nan + # Randomly create a key for an edge attribute to hold the partition data + self.partition_key = ( + "SpanningTreeIterators super secret partition attribute name" + ) + + def __iter__(self): + """ + Returns + ------- + SpanningTreeIterator + The iterator object for this graph + """ + self.partition_queue = PriorityQueue() + self._clear_partition(self.G) + mst_weight = partition_spanning_tree( + self.G, self.minimum, self.weight, self.partition_key, self.ignore_nan + ).size(weight=self.weight) + + self.partition_queue.put( + self.Partition(mst_weight if self.minimum else -mst_weight, {}) + ) + + return self + + def __next__(self): + """ + Returns + ------- + (multi)Graph + The spanning tree of next greatest weight, which ties broken + arbitrarily. + """ + if self.partition_queue.empty(): + del self.G, self.partition_queue + raise StopIteration + + partition = self.partition_queue.get() + self._write_partition(partition) + next_tree = partition_spanning_tree( + self.G, self.minimum, self.weight, self.partition_key, self.ignore_nan + ) + self._partition(partition, next_tree) + + self._clear_partition(next_tree) + return next_tree + + def _partition(self, partition, partition_tree): + """ + Create new partitions based of the minimum spanning tree of the + current minimum partition. + + Parameters + ---------- + partition : Partition + The Partition instance used to generate the current minimum spanning + tree. + partition_tree : nx.Graph + The minimum spanning tree of the input partition. + """ + # create two new partitions with the data from the input partition dict + p1 = self.Partition(0, partition.partition_dict.copy()) + p2 = self.Partition(0, partition.partition_dict.copy()) + for e in partition_tree.edges: + # determine if the edge was open or included + if e not in partition.partition_dict: + # This is an open edge + p1.partition_dict[e] = EdgePartition.EXCLUDED + p2.partition_dict[e] = EdgePartition.INCLUDED + + self._write_partition(p1) + p1_mst = partition_spanning_tree( + self.G, + self.minimum, + self.weight, + self.partition_key, + self.ignore_nan, + ) + p1_mst_weight = p1_mst.size(weight=self.weight) + if nx.is_connected(p1_mst): + p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight + self.partition_queue.put(p1.__copy__()) + p1.partition_dict = p2.partition_dict.copy() + + def _write_partition(self, partition): + """ + Writes the desired partition into the graph to calculate the minimum + spanning tree. + + Parameters + ---------- + partition : Partition + A Partition dataclass describing a partition on the edges of the + graph. + """ + for u, v, d in self.G.edges(data=True): + if (u, v) in partition.partition_dict: + d[self.partition_key] = partition.partition_dict[(u, v)] + else: + d[self.partition_key] = EdgePartition.OPEN + + def _clear_partition(self, G): + """ + Removes partition data from the graph + """ + for u, v, d in G.edges(data=True): + if self.partition_key in d: + del d[self.partition_key] + + +@nx._dispatchable(edge_attrs="weight") +def number_of_spanning_trees(G, *, root=None, weight=None): + """Returns the number of spanning trees in `G`. + + A spanning tree for an undirected graph is a tree that connects + all nodes in the graph. For a directed graph, the analog of a + spanning tree is called a (spanning) arborescence. The arborescence + includes a unique directed path from the `root` node to each other node. + The graph must be weakly connected, and the root must be a node + that includes all nodes as successors [3]_. Note that to avoid + discussing sink-roots and reverse-arborescences, we have reversed + the edge orientation from [3]_ and use the in-degree laplacian. + + This function (when `weight` is `None`) returns the number of + spanning trees for an undirected graph and the number of + arborescences from a single root node for a directed graph. + When `weight` is the name of an edge attribute which holds the + weight value of each edge, the function returns the sum over + all trees of the multiplicative weight of each tree. That is, + the weight of the tree is the product of its edge weights. + + Kirchoff's Tree Matrix Theorem states that any cofactor of the + Laplacian matrix of a graph is the number of spanning trees in the + graph. (Here we use cofactors for a diagonal entry so that the + cofactor becomes the determinant of the matrix with one row + and its matching column removed.) For a weighted Laplacian matrix, + the cofactor is the sum across all spanning trees of the + multiplicative weight of each tree. That is, the weight of each + tree is the product of its edge weights. The theorem is also + known as Kirchhoff's theorem [1]_ and the Matrix-Tree theorem [2]_. + + For directed graphs, a similar theorem (Tutte's Theorem) holds with + the cofactor chosen to be the one with row and column removed that + correspond to the root. The cofactor is the number of arborescences + with the specified node as root. And the weighted version gives the + sum of the arborescence weights with root `root`. The arborescence + weight is the product of its edge weights. + + Parameters + ---------- + G : NetworkX graph + + root : node + A node in the directed graph `G` that has all nodes as descendants. + (This is ignored for undirected graphs.) + + weight : string or None, optional (default=None) + The name of the edge attribute holding the edge weight. + If `None`, then each edge is assumed to have a weight of 1. + + Returns + ------- + Number + Undirected graphs: + The number of spanning trees of the graph `G`. + Or the sum of all spanning tree weights of the graph `G` + where the weight of a tree is the product of its edge weights. + Directed graphs: + The number of arborescences of `G` rooted at node `root`. + Or the sum of all arborescence weights of the graph `G` with + specified root where the weight of an arborescence is the product + of its edge weights. + + Raises + ------ + NetworkXPointlessConcept + If `G` does not contain any nodes. + + NetworkXError + If the graph `G` is directed and the root node + is not specified or is not in G. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> round(nx.number_of_spanning_trees(G)) + 125 + + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=2) + >>> G.add_edge(1, 3, weight=1) + >>> G.add_edge(2, 3, weight=1) + >>> round(nx.number_of_spanning_trees(G, weight="weight")) + 5 + + Notes + ----- + Self-loops are excluded. Multi-edges are contracted in one edge + equal to the sum of the weights. + + References + ---------- + .. [1] Wikipedia + "Kirchhoff's theorem." + https://en.wikipedia.org/wiki/Kirchhoff%27s_theorem + .. [2] Kirchhoff, G. R. + Über die Auflösung der Gleichungen, auf welche man + bei der Untersuchung der linearen Vertheilung + Galvanischer Ströme geführt wird + Annalen der Physik und Chemie, vol. 72, pp. 497-508, 1847. + .. [3] Margoliash, J. + "Matrix-Tree Theorem for Directed Graphs" + https://www.math.uchicago.edu/~may/VIGRE/VIGRE2010/REUPapers/Margoliash.pdf + """ + import numpy as np + + if len(G) == 0: + raise nx.NetworkXPointlessConcept("Graph G must contain at least one node.") + + # undirected G + if not nx.is_directed(G): + if not nx.is_connected(G): + return 0 + G_laplacian = nx.laplacian_matrix(G, weight=weight).toarray() + return float(np.linalg.det(G_laplacian[1:, 1:])) + + # directed G + if root is None: + raise nx.NetworkXError("Input `root` must be provided when G is directed") + if root not in G: + raise nx.NetworkXError("The node root is not in the graph G.") + if not nx.is_weakly_connected(G): + return 0 + + # Compute directed Laplacian matrix + nodelist = [root] + [n for n in G if n != root] + A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight) + D = np.diag(A.sum(axis=0)) + G_laplacian = D - A + + # Compute number of spanning trees + return float(np.linalg.det(G_laplacian[1:, 1:])) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/operations.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/operations.py new file mode 100644 index 0000000000000000000000000000000000000000..f4368d6a322a233aa30619d031e5d27a02973e5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/operations.py @@ -0,0 +1,128 @@ +"""Operations on trees.""" +from functools import partial +from itertools import accumulate, chain + +import networkx as nx + +__all__ = ["join", "join_trees"] + + +def join(rooted_trees, label_attribute=None): + """A deprecated name for `join_trees` + + Returns a new rooted tree with a root node joined with the roots + of each of the given rooted trees. + + .. deprecated:: 3.2 + + `join` is deprecated in NetworkX v3.2 and will be removed in v3.4. + It has been renamed join_trees with the same syntax/interface. + + """ + import warnings + + warnings.warn( + "The function `join` is deprecated and is renamed `join_trees`.\n" + "The ``join`` function itself will be removed in v3.4", + DeprecationWarning, + stacklevel=2, + ) + + return join_trees(rooted_trees, label_attribute=label_attribute) + + +# Argument types don't match dispatching, but allow manual selection of backend +@nx._dispatchable(graphs=None, returns_graph=True) +def join_trees(rooted_trees, *, label_attribute=None, first_label=0): + """Returns a new rooted tree made by joining `rooted_trees` + + Constructs a new tree by joining each tree in `rooted_trees`. + A new root node is added and connected to each of the roots + of the input trees. While copying the nodes from the trees, + relabeling to integers occurs. If the `label_attribute` is provided, + the old node labels will be stored in the new tree under this attribute. + + Parameters + ---------- + rooted_trees : list + A list of pairs in which each left element is a NetworkX graph + object representing a tree and each right element is the root + node of that tree. The nodes of these trees will be relabeled to + integers. + + label_attribute : str + If provided, the old node labels will be stored in the new tree + under this node attribute. If not provided, the original labels + of the nodes in the input trees are not stored. + + first_label : int, optional (default=0) + Specifies the label for the new root node. If provided, the root node of the joined tree + will have this label. If not provided, the root node will default to a label of 0. + + Returns + ------- + NetworkX graph + The rooted tree resulting from joining the provided `rooted_trees`. The new tree has a root node + labeled as specified by `first_label` (defaulting to 0 if not provided). Subtrees from the input + `rooted_trees` are attached to this new root node. Each non-root node, if the `label_attribute` + is provided, has an attribute that indicates the original label of the node in the input tree. + + Notes + ----- + Trees are stored in NetworkX as NetworkX Graphs. There is no specific + enforcement of the fact that these are trees. Testing for each tree + can be done using :func:`networkx.is_tree`. + + Graph, edge, and node attributes are propagated from the given + rooted trees to the created tree. If there are any overlapping graph + attributes, those from later trees will overwrite those from earlier + trees in the tuple of positional arguments. + + Examples + -------- + Join two full balanced binary trees of height *h* to get a full + balanced binary tree of depth *h* + 1:: + + >>> h = 4 + >>> left = nx.balanced_tree(2, h) + >>> right = nx.balanced_tree(2, h) + >>> joined_tree = nx.join([(left, 0), (right, 0)]) + >>> nx.is_isomorphic(joined_tree, nx.balanced_tree(2, h + 1)) + True + + """ + if not rooted_trees: + return nx.empty_graph(1) + + # Unzip the zipped list of (tree, root) pairs. + trees, roots = zip(*rooted_trees) + + # The join of the trees has the same type as the type of the first tree. + R = type(trees[0])() + + lengths = (len(tree) for tree in trees[:-1]) + first_labels = list(accumulate(lengths, initial=first_label + 1)) + + new_roots = [] + for tree, root, first_node in zip(trees, roots, first_labels): + new_root = first_node + list(tree.nodes()).index(root) + new_roots.append(new_root) + + # Relabel the nodes so that their union is the integers starting at first_label. + relabel = partial( + nx.convert_node_labels_to_integers, label_attribute=label_attribute + ) + new_trees = [ + relabel(tree, first_label=first_label) + for tree, first_label in zip(trees, first_labels) + ] + + # Add all sets of nodes and edges, attributes + for tree in new_trees: + R.update(tree) + + # Finally, join the subtrees at the root. We know first_label is unused by the way we relabeled the subtrees. + R.add_node(first_label) + R.add_edges_from((first_label, root) for root in new_roots) + + return R diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/recognition.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/recognition.py new file mode 100644 index 0000000000000000000000000000000000000000..a9eae98707a6889213ff8b93887c481ba59215a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/recognition.py @@ -0,0 +1,273 @@ +""" +Recognition Tests +================= + +A *forest* is an acyclic, undirected graph, and a *tree* is a connected forest. +Depending on the subfield, there are various conventions for generalizing these +definitions to directed graphs. + +In one convention, directed variants of forest and tree are defined in an +identical manner, except that the direction of the edges is ignored. In effect, +each directed edge is treated as a single undirected edge. Then, additional +restrictions are imposed to define *branchings* and *arborescences*. + +In another convention, directed variants of forest and tree correspond to +the previous convention's branchings and arborescences, respectively. Then two +new terms, *polyforest* and *polytree*, are defined to correspond to the other +convention's forest and tree. + +Summarizing:: + + +-----------------------------+ + | Convention A | Convention B | + +=============================+ + | forest | polyforest | + | tree | polytree | + | branching | forest | + | arborescence | tree | + +-----------------------------+ + +Each convention has its reasons. The first convention emphasizes definitional +similarity in that directed forests and trees are only concerned with +acyclicity and do not have an in-degree constraint, just as their undirected +counterparts do not. The second convention emphasizes functional similarity +in the sense that the directed analog of a spanning tree is a spanning +arborescence. That is, take any spanning tree and choose one node as the root. +Then every edge is assigned a direction such there is a directed path from the +root to every other node. The result is a spanning arborescence. + +NetworkX follows convention "A". Explicitly, these are: + +undirected forest + An undirected graph with no undirected cycles. + +undirected tree + A connected, undirected forest. + +directed forest + A directed graph with no undirected cycles. Equivalently, the underlying + graph structure (which ignores edge orientations) is an undirected forest. + In convention B, this is known as a polyforest. + +directed tree + A weakly connected, directed forest. Equivalently, the underlying graph + structure (which ignores edge orientations) is an undirected tree. In + convention B, this is known as a polytree. + +branching + A directed forest with each node having, at most, one parent. So the maximum + in-degree is equal to 1. In convention B, this is known as a forest. + +arborescence + A directed tree with each node having, at most, one parent. So the maximum + in-degree is equal to 1. In convention B, this is known as a tree. + +For trees and arborescences, the adjective "spanning" may be added to designate +that the graph, when considered as a forest/branching, consists of a single +tree/arborescence that includes all nodes in the graph. It is true, by +definition, that every tree/arborescence is spanning with respect to the nodes +that define the tree/arborescence and so, it might seem redundant to introduce +the notion of "spanning". However, the nodes may represent a subset of +nodes from a larger graph, and it is in this context that the term "spanning" +becomes a useful notion. + +""" + +import networkx as nx + +__all__ = ["is_arborescence", "is_branching", "is_forest", "is_tree"] + + +@nx.utils.not_implemented_for("undirected") +@nx._dispatchable +def is_arborescence(G): + """ + Returns True if `G` is an arborescence. + + An arborescence is a directed tree with maximum in-degree equal to 1. + + Parameters + ---------- + G : graph + The graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is an arborescence. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (3, 4)]) + >>> nx.is_arborescence(G) + True + >>> G.remove_edge(0, 1) + >>> G.add_edge(1, 2) # maximum in-degree is 2 + >>> nx.is_arborescence(G) + False + + Notes + ----- + In another convention, an arborescence is known as a *tree*. + + See Also + -------- + is_tree + + """ + return is_tree(G) and max(d for n, d in G.in_degree()) <= 1 + + +@nx.utils.not_implemented_for("undirected") +@nx._dispatchable +def is_branching(G): + """ + Returns True if `G` is a branching. + + A branching is a directed forest with maximum in-degree equal to 1. + + Parameters + ---------- + G : directed graph + The directed graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is a branching. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)]) + >>> nx.is_branching(G) + True + >>> G.remove_edge(2, 3) + >>> G.add_edge(3, 1) # maximum in-degree is 2 + >>> nx.is_branching(G) + False + + Notes + ----- + In another convention, a branching is also known as a *forest*. + + See Also + -------- + is_forest + + """ + return is_forest(G) and max(d for n, d in G.in_degree()) <= 1 + + +@nx._dispatchable +def is_forest(G): + """ + Returns True if `G` is a forest. + + A forest is a graph with no undirected cycles. + + For directed graphs, `G` is a forest if the underlying graph is a forest. + The underlying graph is obtained by treating each directed edge as a single + undirected edge in a multigraph. + + Parameters + ---------- + G : graph + The graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is a forest. + + Raises + ------ + NetworkXPointlessConcept + If `G` is empty. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)]) + >>> nx.is_forest(G) + True + >>> G.add_edge(4, 1) + >>> nx.is_forest(G) + False + + Notes + ----- + In another convention, a directed forest is known as a *polyforest* and + then *forest* corresponds to a *branching*. + + See Also + -------- + is_branching + + """ + if len(G) == 0: + raise nx.exception.NetworkXPointlessConcept("G has no nodes.") + + if G.is_directed(): + components = (G.subgraph(c) for c in nx.weakly_connected_components(G)) + else: + components = (G.subgraph(c) for c in nx.connected_components(G)) + + return all(len(c) - 1 == c.number_of_edges() for c in components) + + +@nx._dispatchable +def is_tree(G): + """ + Returns True if `G` is a tree. + + A tree is a connected graph with no undirected cycles. + + For directed graphs, `G` is a tree if the underlying graph is a tree. The + underlying graph is obtained by treating each directed edge as a single + undirected edge in a multigraph. + + Parameters + ---------- + G : graph + The graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is a tree. + + Raises + ------ + NetworkXPointlessConcept + If `G` is empty. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)]) + >>> nx.is_tree(G) # n-1 edges + True + >>> G.add_edge(3, 4) + >>> nx.is_tree(G) # n edges + False + + Notes + ----- + In another convention, a directed tree is known as a *polytree* and then + *tree* corresponds to an *arborescence*. + + See Also + -------- + is_arborescence + + """ + if len(G) == 0: + raise nx.exception.NetworkXPointlessConcept("G has no nodes.") + + if G.is_directed(): + is_connected = nx.is_weakly_connected + else: + is_connected = nx.is_connected + + # A connected graph with no cycles has n-1 edges. + return len(G) - 1 == G.number_of_edges() and is_connected(G) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/__init__.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c08ec3af66e46cc49520c825890a9b7ab33b4156 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79b9c5c71738d6e00a3341680385473942c98553 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ff7996b748bbe104674cd1929873d0153311341 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_branchings.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_branchings.py new file mode 100644 index 0000000000000000000000000000000000000000..9058606714f6a75730e96a7cb3c3dfd6b9a5de6c --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_branchings.py @@ -0,0 +1,658 @@ +import math +from operator import itemgetter + +import pytest + +np = pytest.importorskip("numpy") + +import networkx as nx +from networkx.algorithms.tree import branchings, recognition + +# +# Explicitly discussed examples from Edmonds paper. +# + +# Used in Figures A-F. +# +# fmt: off +G_array = np.array([ + # 0 1 2 3 4 5 6 7 8 + [0, 0, 12, 0, 12, 0, 0, 0, 0], # 0 + [4, 0, 0, 0, 0, 13, 0, 0, 0], # 1 + [0, 17, 0, 21, 0, 12, 0, 0, 0], # 2 + [5, 0, 0, 0, 17, 0, 18, 0, 0], # 3 + [0, 0, 0, 0, 0, 0, 0, 12, 0], # 4 + [0, 0, 0, 0, 0, 0, 14, 0, 12], # 5 + [0, 0, 21, 0, 0, 0, 0, 0, 15], # 6 + [0, 0, 0, 19, 0, 0, 15, 0, 0], # 7 + [0, 0, 0, 0, 0, 0, 0, 18, 0], # 8 +], dtype=int) + +# Two copies of the graph from the original paper as disconnected components +G_big_array = np.zeros(np.array(G_array.shape) * 2, dtype=int) +G_big_array[:G_array.shape[0], :G_array.shape[1]] = G_array +G_big_array[G_array.shape[0]:, G_array.shape[1]:] = G_array + +# fmt: on + + +def G1(): + G = nx.from_numpy_array(G_array, create_using=nx.MultiDiGraph) + return G + + +def G2(): + # Now we shift all the weights by -10. + # Should not affect optimal arborescence, but does affect optimal branching. + Garr = G_array.copy() + Garr[np.nonzero(Garr)] -= 10 + G = nx.from_numpy_array(Garr, create_using=nx.MultiDiGraph) + return G + + +# An optimal branching for G1 that is also a spanning arborescence. So it is +# also an optimal spanning arborescence. +# +optimal_arborescence_1 = [ + (0, 2, 12), + (2, 1, 17), + (2, 3, 21), + (1, 5, 13), + (3, 4, 17), + (3, 6, 18), + (6, 8, 15), + (8, 7, 18), +] + +# For G2, the optimal branching of G1 (with shifted weights) is no longer +# an optimal branching, but it is still an optimal spanning arborescence +# (just with shifted weights). An optimal branching for G2 is similar to what +# appears in figure G (this is greedy_subopt_branching_1a below), but with the +# edge (3, 0, 5), which is now (3, 0, -5), removed. Thus, the optimal branching +# is not a spanning arborescence. The code finds optimal_branching_2a. +# An alternative and equivalent branching is optimal_branching_2b. We would +# need to modify the code to iterate through all equivalent optimal branchings. +# +# These are maximal branchings or arborescences. +optimal_branching_2a = [ + (5, 6, 4), + (6, 2, 11), + (6, 8, 5), + (8, 7, 8), + (2, 1, 7), + (2, 3, 11), + (3, 4, 7), +] +optimal_branching_2b = [ + (8, 7, 8), + (7, 3, 9), + (3, 4, 7), + (3, 6, 8), + (6, 2, 11), + (2, 1, 7), + (1, 5, 3), +] +optimal_arborescence_2 = [ + (0, 2, 2), + (2, 1, 7), + (2, 3, 11), + (1, 5, 3), + (3, 4, 7), + (3, 6, 8), + (6, 8, 5), + (8, 7, 8), +] + +# Two suboptimal maximal branchings on G1 obtained from a greedy algorithm. +# 1a matches what is shown in Figure G in Edmonds's paper. +greedy_subopt_branching_1a = [ + (5, 6, 14), + (6, 2, 21), + (6, 8, 15), + (8, 7, 18), + (2, 1, 17), + (2, 3, 21), + (3, 0, 5), + (3, 4, 17), +] +greedy_subopt_branching_1b = [ + (8, 7, 18), + (7, 6, 15), + (6, 2, 21), + (2, 1, 17), + (2, 3, 21), + (1, 5, 13), + (3, 0, 5), + (3, 4, 17), +] + + +def build_branching(edges, double=False): + G = nx.DiGraph() + for u, v, weight in edges: + G.add_edge(u, v, weight=weight) + if double: + G.add_edge(u + 9, v + 9, weight=weight) + return G + + +def sorted_edges(G, attr="weight", default=1): + edges = [(u, v, data.get(attr, default)) for (u, v, data) in G.edges(data=True)] + edges = sorted(edges, key=lambda x: (x[2], x[1], x[0])) + return edges + + +def assert_equal_branchings(G1, G2, attr="weight", default=1): + edges1 = list(G1.edges(data=True)) + edges2 = list(G2.edges(data=True)) + assert len(edges1) == len(edges2) + + # Grab the weights only. + e1 = sorted_edges(G1, attr, default) + e2 = sorted_edges(G2, attr, default) + + for a, b in zip(e1, e2): + assert a[:2] == b[:2] + np.testing.assert_almost_equal(a[2], b[2]) + + +################ + + +def test_optimal_branching1(): + G = build_branching(optimal_arborescence_1) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 131 + + +def test_optimal_branching2a(): + G = build_branching(optimal_branching_2a) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 53 + + +def test_optimal_branching2b(): + G = build_branching(optimal_branching_2b) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 53 + + +def test_optimal_arborescence2(): + G = build_branching(optimal_arborescence_2) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 51 + + +def test_greedy_suboptimal_branching1a(): + G = build_branching(greedy_subopt_branching_1a) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 128 + + +def test_greedy_suboptimal_branching1b(): + G = build_branching(greedy_subopt_branching_1b) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 127 + + +def test_greedy_max1(): + # Standard test. + # + G = G1() + B = branchings.greedy_branching(G) + # There are only two possible greedy branchings. The sorting is such + # that it should equal the second suboptimal branching: 1b. + B_ = build_branching(greedy_subopt_branching_1b) + assert_equal_branchings(B, B_) + + +def test_greedy_branching_kwarg_kind(): + G = G1() + with pytest.raises(nx.NetworkXException, match="Unknown value for `kind`."): + B = branchings.greedy_branching(G, kind="lol") + + +def test_greedy_branching_for_unsortable_nodes(): + G = nx.DiGraph() + G.add_weighted_edges_from([((2, 3), 5, 1), (3, "a", 1), (2, 4, 5)]) + edges = [(u, v, data.get("weight", 1)) for (u, v, data) in G.edges(data=True)] + with pytest.raises(TypeError): + edges.sort(key=itemgetter(2, 0, 1), reverse=True) + B = branchings.greedy_branching(G, kind="max").edges(data=True) + assert list(B) == [ + ((2, 3), 5, {"weight": 1}), + (3, "a", {"weight": 1}), + (2, 4, {"weight": 5}), + ] + + +def test_greedy_max2(): + # Different default weight. + # + G = G1() + del G[1][0][0]["weight"] + B = branchings.greedy_branching(G, default=6) + # Chosen so that edge (3,0,5) is not selected and (1,0,6) is instead. + + edges = [ + (1, 0, 6), + (1, 5, 13), + (7, 6, 15), + (2, 1, 17), + (3, 4, 17), + (8, 7, 18), + (2, 3, 21), + (6, 2, 21), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_) + + +def test_greedy_max3(): + # All equal weights. + # + G = G1() + B = branchings.greedy_branching(G, attr=None) + + # This is mostly arbitrary...the output was generated by running the algo. + edges = [ + (2, 1, 1), + (3, 0, 1), + (3, 4, 1), + (5, 8, 1), + (6, 2, 1), + (7, 3, 1), + (7, 6, 1), + (8, 7, 1), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_, default=1) + + +def test_greedy_min(): + G = G1() + B = branchings.greedy_branching(G, kind="min") + + edges = [ + (1, 0, 4), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (7, 3, 19), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_) + + +def test_edmonds1_maxbranch(): + G = G1() + x = branchings.maximum_branching(G) + x_ = build_branching(optimal_arborescence_1) + assert_equal_branchings(x, x_) + + +def test_edmonds1_maxarbor(): + G = G1() + x = branchings.maximum_spanning_arborescence(G) + x_ = build_branching(optimal_arborescence_1) + assert_equal_branchings(x, x_) + + +def test_edmonds1_minimal_branching(): + # graph will have something like a minimum arborescence but no spanning one + G = nx.from_numpy_array(G_big_array, create_using=nx.DiGraph) + B = branchings.minimal_branching(G) + edges = [ + (3, 0, 5), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (2, 1, 17), + ] + B_ = build_branching(edges, double=True) + assert_equal_branchings(B, B_) + + +def test_edmonds2_maxbranch(): + G = G2() + x = branchings.maximum_branching(G) + x_ = build_branching(optimal_branching_2a) + assert_equal_branchings(x, x_) + + +def test_edmonds2_maxarbor(): + G = G2() + x = branchings.maximum_spanning_arborescence(G) + x_ = build_branching(optimal_arborescence_2) + assert_equal_branchings(x, x_) + + +def test_edmonds2_minarbor(): + G = G1() + x = branchings.minimum_spanning_arborescence(G) + # This was obtained from algorithm. Need to verify it independently. + # Branch weight is: 96 + edges = [ + (3, 0, 5), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (2, 1, 17), + ] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edmonds3_minbranch1(): + G = G1() + x = branchings.minimum_branching(G) + edges = [] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edmonds3_minbranch2(): + G = G1() + G.add_edge(8, 9, weight=-10) + x = branchings.minimum_branching(G) + edges = [(8, 9, -10)] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +# Need more tests + + +def test_mst(): + # Make sure we get the same results for undirected graphs. + # Example from: https://en.wikipedia.org/wiki/Kruskal's_algorithm + G = nx.Graph() + edgelist = [ + (0, 3, [("weight", 5)]), + (0, 1, [("weight", 7)]), + (1, 3, [("weight", 9)]), + (1, 2, [("weight", 8)]), + (1, 4, [("weight", 7)]), + (3, 4, [("weight", 15)]), + (3, 5, [("weight", 6)]), + (2, 4, [("weight", 5)]), + (4, 5, [("weight", 8)]), + (4, 6, [("weight", 9)]), + (5, 6, [("weight", 11)]), + ] + G.add_edges_from(edgelist) + G = G.to_directed() + x = branchings.minimum_spanning_arborescence(G) + + edges = [ + ({0, 1}, 7), + ({0, 3}, 5), + ({3, 5}, 6), + ({1, 4}, 7), + ({4, 2}, 5), + ({4, 6}, 9), + ] + + assert x.number_of_edges() == len(edges) + for u, v, d in x.edges(data=True): + assert ({u, v}, d["weight"]) in edges + + +def test_mixed_nodetypes(): + # Smoke test to make sure no TypeError is raised for mixed node types. + G = nx.Graph() + edgelist = [(0, 3, [("weight", 5)]), (0, "1", [("weight", 5)])] + G.add_edges_from(edgelist) + G = G.to_directed() + x = branchings.minimum_spanning_arborescence(G) + + +def test_edmonds1_minbranch(): + # Using -G_array and min should give the same as optimal_arborescence_1, + # but with all edges negative. + edges = [(u, v, -w) for (u, v, w) in optimal_arborescence_1] + + G = nx.from_numpy_array(-G_array, create_using=nx.DiGraph) + + # Quickly make sure max branching is empty. + x = branchings.maximum_branching(G) + x_ = build_branching([]) + assert_equal_branchings(x, x_) + + # Now test the min branching. + x = branchings.minimum_branching(G) + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edge_attribute_preservation_normal_graph(): + # Test that edge attributes are preserved when finding an optimum graph + # using the Edmonds class for normal graphs. + G = nx.Graph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist) + + B = branchings.maximum_branching(G, preserve_attrs=True) + + assert B[0][1]["otherattr"] == 1 + assert B[0][1]["otherattr2"] == 3 + + +def test_edge_attribute_preservation_multigraph(): + # Test that edge attributes are preserved when finding an optimum graph + # using the Edmonds class for multigraphs. + G = nx.MultiGraph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist * 2) # Make sure we have duplicate edge paths + + B = branchings.maximum_branching(G, preserve_attrs=True) + + assert B[0][1][0]["otherattr"] == 1 + assert B[0][1][0]["otherattr2"] == 3 + + +# TODO remove with Edmonds +def test_Edmond_kind(): + G = nx.MultiGraph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist * 2) # Make sure we have duplicate edge paths + ed = branchings.Edmonds(G) + with pytest.raises(nx.NetworkXException, match="Unknown value for `kind`."): + ed.find_optimum(kind="lol", preserve_attrs=True) + + +# TODO remove with MultiDiGraph_EdgeKey +def test_MultiDiGraph_EdgeKey(): + # test if more than one edges has the same key + G = branchings.MultiDiGraph_EdgeKey() + G.add_edge(1, 2, "A") + with pytest.raises(Exception, match="Key 'A' is already in use."): + G.add_edge(3, 4, "A") + # test if invalid edge key was specified + with pytest.raises(KeyError, match="Invalid edge key 'B'"): + G.remove_edge_with_key("B") + # test remove_edge_with_key works + if G.remove_edge_with_key("A"): + assert list(G.edges(data=True)) == [] + # test that remove_edges_from doesn't work + G.add_edge(1, 3, "A") + with pytest.raises(NotImplementedError): + G.remove_edges_from([(1, 3)]) + + +def test_edge_attribute_discard(): + # Test that edge attributes are discarded if we do not specify to keep them + G = nx.Graph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist) + + B = branchings.maximum_branching(G, preserve_attrs=False) + + edge_dict = B[0][1] + with pytest.raises(KeyError): + _ = edge_dict["otherattr"] + + +def test_partition_spanning_arborescence(): + """ + Test that we can generate minimum spanning arborescences which respect the + given partition. + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + G[3][0]["partition"] = nx.EdgePartition.EXCLUDED + G[2][3]["partition"] = nx.EdgePartition.INCLUDED + G[7][3]["partition"] = nx.EdgePartition.EXCLUDED + G[0][2]["partition"] = nx.EdgePartition.EXCLUDED + G[6][2]["partition"] = nx.EdgePartition.INCLUDED + + actual_edges = [ + (0, 4, 12), + (1, 0, 4), + (1, 5, 13), + (2, 3, 21), + (4, 7, 12), + (5, 6, 14), + (5, 8, 12), + (6, 2, 21), + ] + + B = branchings.minimum_spanning_arborescence(G, partition="partition") + assert_equal_branchings(build_branching(actual_edges), B) + + +def test_arborescence_iterator_min(): + """ + Tests the arborescence iterator. + + A brute force method found 680 arborescences in this graph. + This test will not verify all of them individually, but will check two + things + + * The iterator returns 680 arborescences + * The weight of the arborescences is non-strictly increasing + + for more information please visit + https://mjschwenne.github.io/2021/06/10/implementing-the-iterators.html + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + arborescence_count = 0 + arborescence_weight = -math.inf + for B in branchings.ArborescenceIterator(G): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight >= arborescence_weight + arborescence_weight = new_arborescence_weight + + assert arborescence_count == 680 + + +def test_arborescence_iterator_max(): + """ + Tests the arborescence iterator. + + A brute force method found 680 arborescences in this graph. + This test will not verify all of them individually, but will check two + things + + * The iterator returns 680 arborescences + * The weight of the arborescences is non-strictly decreasing + + for more information please visit + https://mjschwenne.github.io/2021/06/10/implementing-the-iterators.html + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + arborescence_count = 0 + arborescence_weight = math.inf + for B in branchings.ArborescenceIterator(G, minimum=False): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight <= arborescence_weight + arborescence_weight = new_arborescence_weight + + assert arborescence_count == 680 + + +def test_arborescence_iterator_initial_partition(): + """ + Tests the arborescence iterator with three included edges and three excluded + in the initial partition. + + A brute force method similar to the one used in the above tests found that + there are 16 arborescences which contain the included edges and not the + excluded edges. + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + included_edges = [(1, 0), (5, 6), (8, 7)] + excluded_edges = [(0, 2), (3, 6), (1, 5)] + + arborescence_count = 0 + arborescence_weight = -math.inf + for B in branchings.ArborescenceIterator( + G, init_partition=(included_edges, excluded_edges) + ): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight >= arborescence_weight + arborescence_weight = new_arborescence_weight + for e in included_edges: + assert e in B.edges + for e in excluded_edges: + assert e not in B.edges + assert arborescence_count == 16 + + +def test_branchings_with_default_weights(): + """ + Tests that various brancing algorithms work on graphs without weights. + For more information, see issue #7279. + """ + graph = nx.erdos_renyi_graph(10, p=0.2, directed=True, seed=123) + + assert all( + "weight" not in d for (u, v, d) in graph.edges(data=True) + ), "test is for graphs without a weight attribute" + + # Calling these functions will modify graph inplace to add weights + # copy the graph to avoid this. + nx.minimum_spanning_arborescence(graph.copy()) + nx.maximum_spanning_arborescence(graph.copy()) + nx.minimum_branching(graph.copy()) + nx.maximum_branching(graph.copy()) + nx.algorithms.tree.minimal_branching(graph.copy()) + nx.algorithms.tree.branching_weight(graph.copy()) + nx.algorithms.tree.greedy_branching(graph.copy()) + + assert all( + "weight" not in d for (u, v, d) in graph.edges(data=True) + ), "The above calls should not modify the initial graph in-place" diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_coding.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_coding.py new file mode 100644 index 0000000000000000000000000000000000000000..c695fea5fdab4f09f79c69f8837bb07f65d16540 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_coding.py @@ -0,0 +1,113 @@ +"""Unit tests for the :mod:`~networkx.algorithms.tree.coding` module.""" +from itertools import product + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestPruferSequence: + """Unit tests for the Prüfer sequence encoding and decoding + functions. + + """ + + def test_nontree(self): + with pytest.raises(nx.NotATree): + G = nx.cycle_graph(3) + nx.to_prufer_sequence(G) + + def test_null_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.to_prufer_sequence(nx.null_graph()) + + def test_trivial_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.to_prufer_sequence(nx.trivial_graph()) + + def test_bad_integer_labels(self): + with pytest.raises(KeyError): + T = nx.Graph(nx.utils.pairwise("abc")) + nx.to_prufer_sequence(T) + + def test_encoding(self): + """Tests for encoding a tree as a Prüfer sequence using the + iterative strategy. + + """ + # Example from Wikipedia. + tree = nx.Graph([(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)]) + sequence = nx.to_prufer_sequence(tree) + assert sequence == [3, 3, 3, 4] + + def test_decoding(self): + """Tests for decoding a tree from a Prüfer sequence.""" + # Example from Wikipedia. + sequence = [3, 3, 3, 4] + tree = nx.from_prufer_sequence(sequence) + assert nodes_equal(list(tree), list(range(6))) + edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + assert edges_equal(list(tree.edges()), edges) + + def test_decoding2(self): + # Example from "An Optimal Algorithm for Prufer Codes". + sequence = [2, 4, 0, 1, 3, 3] + tree = nx.from_prufer_sequence(sequence) + assert nodes_equal(list(tree), list(range(8))) + edges = [(0, 1), (0, 4), (1, 3), (2, 4), (2, 5), (3, 6), (3, 7)] + assert edges_equal(list(tree.edges()), edges) + + def test_inverse(self): + """Tests that the encoding and decoding functions are inverses.""" + for T in nx.nonisomorphic_trees(4): + T2 = nx.from_prufer_sequence(nx.to_prufer_sequence(T)) + assert nodes_equal(list(T), list(T2)) + assert edges_equal(list(T.edges()), list(T2.edges())) + + for seq in product(range(4), repeat=2): + seq2 = nx.to_prufer_sequence(nx.from_prufer_sequence(seq)) + assert list(seq) == seq2 + + +class TestNestedTuple: + """Unit tests for the nested tuple encoding and decoding functions.""" + + def test_nontree(self): + with pytest.raises(nx.NotATree): + G = nx.cycle_graph(3) + nx.to_nested_tuple(G, 0) + + def test_unknown_root(self): + with pytest.raises(nx.NodeNotFound): + G = nx.path_graph(2) + nx.to_nested_tuple(G, "bogus") + + def test_encoding(self): + T = nx.full_rary_tree(2, 2**3 - 1) + expected = (((), ()), ((), ())) + actual = nx.to_nested_tuple(T, 0) + assert nodes_equal(expected, actual) + + def test_canonical_form(self): + T = nx.Graph() + T.add_edges_from([(0, 1), (0, 2), (0, 3)]) + T.add_edges_from([(1, 4), (1, 5)]) + T.add_edges_from([(3, 6), (3, 7)]) + root = 0 + actual = nx.to_nested_tuple(T, root, canonical_form=True) + expected = ((), ((), ()), ((), ())) + assert actual == expected + + def test_decoding(self): + balanced = (((), ()), ((), ())) + expected = nx.full_rary_tree(2, 2**3 - 1) + actual = nx.from_nested_tuple(balanced) + assert nx.is_isomorphic(expected, actual) + + def test_sensible_relabeling(self): + balanced = (((), ()), ((), ())) + T = nx.from_nested_tuple(balanced, sensible_relabeling=True) + edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + assert nodes_equal(list(T), list(range(2**3 - 1))) + assert edges_equal(list(T.edges()), edges) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_decomposition.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_decomposition.py new file mode 100644 index 0000000000000000000000000000000000000000..8c376053794537611f46c038ed074eb92b1ba676 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_decomposition.py @@ -0,0 +1,79 @@ +import networkx as nx +from networkx.algorithms.tree.decomposition import junction_tree + + +def test_junction_tree_directed_confounders(): + B = nx.DiGraph() + B.add_edges_from([("A", "C"), ("B", "C"), ("C", "D"), ("C", "E")]) + + G = junction_tree(B) + J = nx.Graph() + J.add_edges_from( + [ + (("C", "E"), ("C",)), + (("C",), ("A", "B", "C")), + (("A", "B", "C"), ("C",)), + (("C",), ("C", "D")), + ] + ) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_unconnected_nodes(): + B = nx.DiGraph() + B.add_nodes_from([("A", "B", "C", "D")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_nodes_from([("A", "B", "C", "D")]) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_cascade(): + B = nx.DiGraph() + B.add_edges_from([("A", "B"), ("B", "C"), ("C", "D")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_edges_from( + [ + (("A", "B"), ("B",)), + (("B",), ("B", "C")), + (("B", "C"), ("C",)), + (("C",), ("C", "D")), + ] + ) + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_unconnected_edges(): + B = nx.DiGraph() + B.add_edges_from([("A", "B"), ("C", "D"), ("E", "F")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_nodes_from([("A", "B"), ("C", "D"), ("E", "F")]) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_undirected(): + B = nx.Graph() + B.add_edges_from([("A", "C"), ("A", "D"), ("B", "C"), ("C", "E")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_edges_from( + [ + (("A", "D"), ("A",)), + (("A",), ("A", "C")), + (("A", "C"), ("C",)), + (("C",), ("B", "C")), + (("B", "C"), ("C",)), + (("C",), ("C", "E")), + ] + ) + + assert nx.is_isomorphic(G, J) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_mst.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_mst.py new file mode 100644 index 0000000000000000000000000000000000000000..65000c4257f63a860873c942039a792479c86625 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_mst.py @@ -0,0 +1,856 @@ +"""Unit tests for the :mod:`networkx.algorithms.tree.mst` module.""" + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +def test_unknown_algorithm(): + with pytest.raises(ValueError): + nx.minimum_spanning_tree(nx.Graph(), algorithm="random") + with pytest.raises( + ValueError, match="random is not a valid choice for an algorithm." + ): + nx.maximum_spanning_edges(nx.Graph(), algorithm="random") + + +class MinimumSpanningTreeTestBase: + """Base class for test classes for minimum spanning tree algorithms. + This class contains some common tests that will be inherited by + subclasses. Each subclass must have a class attribute + :data:`algorithm` that is a string representing the algorithm to + run, as described under the ``algorithm`` keyword argument for the + :func:`networkx.minimum_spanning_edges` function. Subclasses can + then implement any algorithm-specific tests. + """ + + def setup_method(self, method): + """Creates an example graph and stores the expected minimum and + maximum spanning tree edges. + """ + # This stores the class attribute `algorithm` in an instance attribute. + self.algo = self.algorithm + # This example graph comes from Wikipedia: + # https://en.wikipedia.org/wiki/Kruskal's_algorithm + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + self.G = nx.Graph() + self.G.add_weighted_edges_from(edges) + self.minimum_spanning_edgelist = [ + (0, 1, {"weight": 7}), + (0, 3, {"weight": 5}), + (1, 4, {"weight": 7}), + (2, 4, {"weight": 5}), + (3, 5, {"weight": 6}), + (4, 6, {"weight": 9}), + ] + self.maximum_spanning_edgelist = [ + (0, 1, {"weight": 7}), + (1, 2, {"weight": 8}), + (1, 3, {"weight": 9}), + (3, 4, {"weight": 15}), + (4, 6, {"weight": 9}), + (5, 6, {"weight": 11}), + ] + + def test_minimum_edges(self): + edges = nx.minimum_spanning_edges(self.G, algorithm=self.algo) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + def test_maximum_edges(self): + edges = nx.maximum_spanning_edges(self.G, algorithm=self.algo) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.maximum_spanning_edgelist) + + def test_without_data(self): + edges = nx.minimum_spanning_edges(self.G, algorithm=self.algo, data=False) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + expected = [(u, v) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, expected) + + def test_nan_weights(self): + # Edge weights NaN never appear in the spanning tree. see #2164 + G = self.G + G.add_edge(0, 12, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + expected = [(u, v) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, expected) + # Now test for raising exception + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=False + ) + with pytest.raises(ValueError): + list(edges) + # test default for ignore_nan as False + edges = nx.minimum_spanning_edges(G, algorithm=self.algo, data=False) + with pytest.raises(ValueError): + list(edges) + + def test_nan_weights_MultiGraph(self): + G = nx.MultiGraph() + G.add_edge(0, 12, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm="prim", data=False, ignore_nan=False + ) + with pytest.raises(ValueError): + list(edges) + # test default for ignore_nan as False + edges = nx.minimum_spanning_edges(G, algorithm="prim", data=False) + with pytest.raises(ValueError): + list(edges) + + def test_nan_weights_order(self): + # now try again with a nan edge at the beginning of G.nodes + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + G = nx.Graph() + G.add_weighted_edges_from([(u + 1, v + 1, wt) for u, v, wt in edges]) + G.add_edge(0, 7, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + shift = [(u + 1, v + 1) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, shift) + + def test_isolated_node(self): + # now try again with an isolated node + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + G = nx.Graph() + G.add_weighted_edges_from([(u + 1, v + 1, wt) for u, v, wt in edges]) + G.add_node(0) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + shift = [(u + 1, v + 1) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, shift) + + def test_minimum_tree(self): + T = nx.minimum_spanning_tree(self.G, algorithm=self.algo) + actual = sorted(T.edges(data=True)) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + def test_maximum_tree(self): + T = nx.maximum_spanning_tree(self.G, algorithm=self.algo) + actual = sorted(T.edges(data=True)) + assert edges_equal(actual, self.maximum_spanning_edgelist) + + def test_disconnected(self): + G = nx.Graph([(0, 1, {"weight": 1}), (2, 3, {"weight": 2})]) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert nodes_equal(list(T), list(range(4))) + assert edges_equal(list(T.edges()), [(0, 1), (2, 3)]) + + def test_empty_graph(self): + G = nx.empty_graph(3) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert nodes_equal(sorted(T), list(range(3))) + assert T.number_of_edges() == 0 + + def test_attributes(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1, color="red", distance=7) + G.add_edge(2, 3, weight=1, color="green", distance=2) + G.add_edge(1, 3, weight=10, color="blue", distance=1) + G.graph["foo"] = "bar" + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert T.graph == G.graph + assert nodes_equal(T, G) + for u, v in T.edges(): + assert T.adj[u][v] == G.adj[u][v] + + def test_weight_attribute(self): + G = nx.Graph() + G.add_edge(0, 1, weight=1, distance=7) + G.add_edge(0, 2, weight=30, distance=1) + G.add_edge(1, 2, weight=1, distance=1) + G.add_node(3) + T = nx.minimum_spanning_tree(G, algorithm=self.algo, weight="distance") + assert nodes_equal(sorted(T), list(range(4))) + assert edges_equal(sorted(T.edges()), [(0, 2), (1, 2)]) + T = nx.maximum_spanning_tree(G, algorithm=self.algo, weight="distance") + assert nodes_equal(sorted(T), list(range(4))) + assert edges_equal(sorted(T.edges()), [(0, 1), (0, 2)]) + + +class TestBoruvka(MinimumSpanningTreeTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Borůvka's algorithm. + """ + + algorithm = "boruvka" + + def test_unicode_name(self): + """Tests that using a Unicode string can correctly indicate + Borůvka's algorithm. + """ + edges = nx.minimum_spanning_edges(self.G, algorithm="borůvka") + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + +class MultigraphMSTTestBase(MinimumSpanningTreeTestBase): + # Abstract class + + def test_multigraph_keys_min(self): + """Tests that the minimum spanning edges of a multigraph + preserves edge keys. + """ + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + min_edges = nx.minimum_spanning_edges + mst_edges = min_edges(G, algorithm=self.algo, data=False) + assert edges_equal([(0, 1, "b")], list(mst_edges)) + + def test_multigraph_keys_max(self): + """Tests that the maximum spanning edges of a multigraph + preserves edge keys. + """ + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + max_edges = nx.maximum_spanning_edges + mst_edges = max_edges(G, algorithm=self.algo, data=False) + assert edges_equal([(0, 1, "a")], list(mst_edges)) + + +class TestKruskal(MultigraphMSTTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Kruskal's algorithm. + """ + + algorithm = "kruskal" + + def test_key_data_bool(self): + """Tests that the keys and data values are included in + MST edges based on whether keys and data parameters are + true or false""" + G = nx.MultiGraph() + G.add_edge(1, 2, key=1, weight=2) + G.add_edge(1, 2, key=2, weight=3) + G.add_edge(3, 2, key=1, weight=2) + G.add_edge(3, 1, key=1, weight=4) + + # keys are included and data is not included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=True, data=False + ) + assert edges_equal([(1, 2, 1), (2, 3, 1)], list(mst_edges)) + + # keys are not included and data is included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=False, data=True + ) + assert edges_equal( + [(1, 2, {"weight": 2}), (2, 3, {"weight": 2})], list(mst_edges) + ) + + # both keys and data are not included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=False, data=False + ) + assert edges_equal([(1, 2), (2, 3)], list(mst_edges)) + + # both keys and data are included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=True, data=True + ) + assert edges_equal( + [(1, 2, 1, {"weight": 2}), (2, 3, 1, {"weight": 2})], list(mst_edges) + ) + + +class TestPrim(MultigraphMSTTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Prim's algorithm. + """ + + algorithm = "prim" + + def test_prim_mst_edges_simple_graph(self): + H = nx.Graph() + H.add_edge(1, 2, key=2, weight=3) + H.add_edge(3, 2, key=1, weight=2) + H.add_edge(3, 1, key=1, weight=4) + + mst_edges = nx.minimum_spanning_edges(H, algorithm=self.algo, ignore_nan=True) + assert edges_equal( + [(1, 2, {"key": 2, "weight": 3}), (2, 3, {"key": 1, "weight": 2})], + list(mst_edges), + ) + + def test_ignore_nan(self): + """Tests that the edges with NaN weights are ignored or + raise an Error based on ignore_nan is true or false""" + H = nx.MultiGraph() + H.add_edge(1, 2, key=1, weight=float("nan")) + H.add_edge(1, 2, key=2, weight=3) + H.add_edge(3, 2, key=1, weight=2) + H.add_edge(3, 1, key=1, weight=4) + + # NaN weight edges are ignored when ignore_nan=True + mst_edges = nx.minimum_spanning_edges(H, algorithm=self.algo, ignore_nan=True) + assert edges_equal( + [(1, 2, 2, {"weight": 3}), (2, 3, 1, {"weight": 2})], list(mst_edges) + ) + + # NaN weight edges raise Error when ignore_nan=False + with pytest.raises(ValueError): + list(nx.minimum_spanning_edges(H, algorithm=self.algo, ignore_nan=False)) + + def test_multigraph_keys_tree(self): + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert edges_equal([(0, 1, 1)], list(T.edges(data="weight"))) + + def test_multigraph_keys_tree_max(self): + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + T = nx.maximum_spanning_tree(G, algorithm=self.algo) + assert edges_equal([(0, 1, 2)], list(T.edges(data="weight"))) + + +class TestSpanningTreeIterator: + """ + Tests the spanning tree iterator on the example graph in the 2005 Sörensen + and Janssens paper An Algorithm to Generate all Spanning Trees of a Graph in + Order of Increasing Cost + """ + + def setup_method(self): + # Original Graph + edges = [(0, 1, 5), (1, 2, 4), (1, 4, 6), (2, 3, 5), (2, 4, 7), (3, 4, 3)] + self.G = nx.Graph() + self.G.add_weighted_edges_from(edges) + # List of lists of spanning trees in increasing order + self.spanning_trees = [ + # 1, MST, cost = 17 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 3, {"weight": 5}), + (3, 4, {"weight": 3}), + ], + # 2, cost = 18 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (1, 4, {"weight": 6}), + (3, 4, {"weight": 3}), + ], + # 3, cost = 19 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + (3, 4, {"weight": 3}), + ], + # 4, cost = 19 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 4, {"weight": 7}), + (3, 4, {"weight": 3}), + ], + # 5, cost = 20 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + ], + # 6, cost = 21 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 4, {"weight": 7}), + (3, 4, {"weight": 3}), + ], + # 7, cost = 21 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 3, {"weight": 5}), + (2, 4, {"weight": 7}), + ], + # 8, cost = 23 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + (2, 4, {"weight": 7}), + ], + ] + + def test_minimum_spanning_tree_iterator(self): + """ + Tests that the spanning trees are correctly returned in increasing order + """ + tree_index = 0 + for tree in nx.SpanningTreeIterator(self.G): + actual = sorted(tree.edges(data=True)) + assert edges_equal(actual, self.spanning_trees[tree_index]) + tree_index += 1 + + def test_maximum_spanning_tree_iterator(self): + """ + Tests that the spanning trees are correctly returned in decreasing order + """ + tree_index = 7 + for tree in nx.SpanningTreeIterator(self.G, minimum=False): + actual = sorted(tree.edges(data=True)) + assert edges_equal(actual, self.spanning_trees[tree_index]) + tree_index -= 1 + + +def test_random_spanning_tree_multiplicative_small(): + """ + Using a fixed seed, sample one tree for repeatability. + """ + from math import exp + + pytest.importorskip("scipy") + + gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of gamma + G = nx.Graph() + for u, v in gamma: + G.add_edge(u, v, lambda_key=exp(gamma[(u, v)])) + + solution_edges = [(2, 3), (3, 4), (0, 5), (5, 4), (4, 1)] + solution = nx.Graph() + solution.add_edges_from(solution_edges) + + sampled_tree = nx.random_spanning_tree(G, "lambda_key", seed=42) + + assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) + + +@pytest.mark.slow +def test_random_spanning_tree_multiplicative_large(): + """ + Sample many trees from the distribution created in the last test + """ + from math import exp + from random import Random + + pytest.importorskip("numpy") + stats = pytest.importorskip("scipy.stats") + + gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of gamma + G = nx.Graph() + for u, v in gamma: + G.add_edge(u, v, lambda_key=exp(gamma[(u, v)])) + + # Find the multiplicative weight for each tree. + total_weight = 0 + tree_expected = {} + for t in nx.SpanningTreeIterator(G): + # Find the multiplicative weight of the spanning tree + weight = 1 + for u, v, d in t.edges(data="lambda_key"): + weight *= d + tree_expected[t] = weight + total_weight += weight + + # Assert that every tree has an entry in the expected distribution + assert len(tree_expected) == 75 + + # Set the sample size and then calculate the expected number of times we + # expect to see each tree. This test uses a near minimum sample size where + # the most unlikely tree has an expected frequency of 5.15. + # (Minimum required is 5) + # + # Here we also initialize the tree_actual dict so that we know the keys + # match between the two. We will later take advantage of the fact that since + # python 3.7 dict order is guaranteed so the expected and actual data will + # have the same order. + sample_size = 1200 + tree_actual = {} + for t in tree_expected: + tree_expected[t] = (tree_expected[t] / total_weight) * sample_size + tree_actual[t] = 0 + + # Sample the spanning trees + # + # Assert that they are actually trees and record which of the 75 trees we + # have sampled. + # + # For repeatability, we want to take advantage of the decorators in NetworkX + # to randomly sample the same sample each time. However, if we pass in a + # constant seed to sample_spanning_tree we will get the same tree each time. + # Instead, we can create our own random number generator with a fixed seed + # and pass those into sample_spanning_tree. + rng = Random(37) + for _ in range(sample_size): + sampled_tree = nx.random_spanning_tree(G, "lambda_key", seed=rng) + assert nx.is_tree(sampled_tree) + + for t in tree_expected: + if nx.utils.edges_equal(t.edges, sampled_tree.edges): + tree_actual[t] += 1 + break + + # Conduct a Chi squared test to see if the actual distribution matches the + # expected one at an alpha = 0.05 significance level. + # + # H_0: The distribution of trees in tree_actual matches the normalized product + # of the edge weights in the tree. + # + # H_a: The distribution of trees in tree_actual follows some other + # distribution of spanning trees. + _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values())) + + # Assert that p is greater than the significance level so that we do not + # reject the null hypothesis + assert not p < 0.05 + + +def test_random_spanning_tree_additive_small(): + """ + Sample a single spanning tree from the additive method. + """ + pytest.importorskip("scipy") + + edges = { + (0, 1): 1, + (0, 2): 1, + (0, 5): 3, + (1, 2): 2, + (1, 4): 3, + (2, 3): 3, + (5, 3): 4, + (5, 4): 5, + (4, 3): 4, + } + + # Build the graph + G = nx.Graph() + for u, v in edges: + G.add_edge(u, v, weight=edges[(u, v)]) + + solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)] + solution = nx.Graph() + solution.add_edges_from(solution_edges) + + sampled_tree = nx.random_spanning_tree( + G, weight="weight", multiplicative=False, seed=37 + ) + + assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) + + +@pytest.mark.slow +def test_random_spanning_tree_additive_large(): + """ + Sample many spanning trees from the additive method. + """ + from random import Random + + pytest.importorskip("numpy") + stats = pytest.importorskip("scipy.stats") + + edges = { + (0, 1): 1, + (0, 2): 1, + (0, 5): 3, + (1, 2): 2, + (1, 4): 3, + (2, 3): 3, + (5, 3): 4, + (5, 4): 5, + (4, 3): 4, + } + + # Build the graph + G = nx.Graph() + for u, v in edges: + G.add_edge(u, v, weight=edges[(u, v)]) + + # Find the additive weight for each tree. + total_weight = 0 + tree_expected = {} + for t in nx.SpanningTreeIterator(G): + # Find the multiplicative weight of the spanning tree + weight = 0 + for u, v, d in t.edges(data="weight"): + weight += d + tree_expected[t] = weight + total_weight += weight + + # Assert that every tree has an entry in the expected distribution + assert len(tree_expected) == 75 + + # Set the sample size and then calculate the expected number of times we + # expect to see each tree. This test uses a near minimum sample size where + # the most unlikely tree has an expected frequency of 5.07. + # (Minimum required is 5) + # + # Here we also initialize the tree_actual dict so that we know the keys + # match between the two. We will later take advantage of the fact that since + # python 3.7 dict order is guaranteed so the expected and actual data will + # have the same order. + sample_size = 500 + tree_actual = {} + for t in tree_expected: + tree_expected[t] = (tree_expected[t] / total_weight) * sample_size + tree_actual[t] = 0 + + # Sample the spanning trees + # + # Assert that they are actually trees and record which of the 75 trees we + # have sampled. + # + # For repeatability, we want to take advantage of the decorators in NetworkX + # to randomly sample the same sample each time. However, if we pass in a + # constant seed to sample_spanning_tree we will get the same tree each time. + # Instead, we can create our own random number generator with a fixed seed + # and pass those into sample_spanning_tree. + rng = Random(37) + for _ in range(sample_size): + sampled_tree = nx.random_spanning_tree( + G, "weight", multiplicative=False, seed=rng + ) + assert nx.is_tree(sampled_tree) + + for t in tree_expected: + if nx.utils.edges_equal(t.edges, sampled_tree.edges): + tree_actual[t] += 1 + break + + # Conduct a Chi squared test to see if the actual distribution matches the + # expected one at an alpha = 0.05 significance level. + # + # H_0: The distribution of trees in tree_actual matches the normalized product + # of the edge weights in the tree. + # + # H_a: The distribution of trees in tree_actual follows some other + # distribution of spanning trees. + _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values())) + + # Assert that p is greater than the significance level so that we do not + # reject the null hypothesis + assert not p < 0.05 + + +def test_random_spanning_tree_empty_graph(): + G = nx.Graph() + rst = nx.tree.random_spanning_tree(G) + assert len(rst.nodes) == 0 + assert len(rst.edges) == 0 + + +def test_random_spanning_tree_single_node_graph(): + G = nx.Graph() + G.add_node(0) + rst = nx.tree.random_spanning_tree(G) + assert len(rst.nodes) == 1 + assert len(rst.edges) == 0 + + +def test_random_spanning_tree_single_node_loop(): + G = nx.Graph() + G.add_node(0) + G.add_edge(0, 0) + rst = nx.tree.random_spanning_tree(G) + assert len(rst.nodes) == 1 + assert len(rst.edges) == 0 + + +class TestNumberSpanningTrees: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + sp = pytest.importorskip("scipy") + + def test_nst_disconnected(self): + G = nx.empty_graph(2) + assert np.isclose(nx.number_of_spanning_trees(G), 0) + + def test_nst_no_nodes(self): + G = nx.Graph() + with pytest.raises(nx.NetworkXPointlessConcept): + nx.number_of_spanning_trees(G) + + def test_nst_weight(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1) + G.add_edge(1, 3, weight=1) + G.add_edge(2, 3, weight=2) + # weights are ignored + assert np.isclose(nx.number_of_spanning_trees(G), 3) + # including weight + assert np.isclose(nx.number_of_spanning_trees(G, weight="weight"), 5) + + def test_nst_negative_weight(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1) + G.add_edge(1, 3, weight=-1) + G.add_edge(2, 3, weight=-2) + # weights are ignored + assert np.isclose(nx.number_of_spanning_trees(G), 3) + # including weight + assert np.isclose(nx.number_of_spanning_trees(G, weight="weight"), -1) + + def test_nst_selfloop(self): + # self-loops are ignored + G = nx.complete_graph(3) + G.add_edge(1, 1) + assert np.isclose(nx.number_of_spanning_trees(G), 3) + + def test_nst_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1, 2) + G.add_edge(1, 2) + G.add_edge(1, 3) + G.add_edge(2, 3) + assert np.isclose(nx.number_of_spanning_trees(G), 5) + + def test_nst_complete_graph(self): + # this is known as Cayley's formula + N = 5 + G = nx.complete_graph(N) + assert np.isclose(nx.number_of_spanning_trees(G), N ** (N - 2)) + + def test_nst_path_graph(self): + G = nx.path_graph(5) + assert np.isclose(nx.number_of_spanning_trees(G), 1) + + def test_nst_cycle_graph(self): + G = nx.cycle_graph(5) + assert np.isclose(nx.number_of_spanning_trees(G), 5) + + def test_nst_directed_noroot(self): + G = nx.empty_graph(3, create_using=nx.MultiDiGraph) + with pytest.raises(nx.NetworkXError): + nx.number_of_spanning_trees(G) + + def test_nst_directed_root_not_exist(self): + G = nx.empty_graph(3, create_using=nx.MultiDiGraph) + with pytest.raises(nx.NetworkXError): + nx.number_of_spanning_trees(G, root=42) + + def test_nst_directed_not_weak_connected(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(3, 4) + assert np.isclose(nx.number_of_spanning_trees(G, root=1), 0) + + def test_nst_directed_cycle_graph(self): + G = nx.DiGraph() + G = nx.cycle_graph(7, G) + assert np.isclose(nx.number_of_spanning_trees(G, root=0), 1) + + def test_nst_directed_complete_graph(self): + G = nx.DiGraph() + G = nx.complete_graph(7, G) + assert np.isclose(nx.number_of_spanning_trees(G, root=0), 7**5) + + def test_nst_directed_multi(self): + G = nx.MultiDiGraph() + G = nx.cycle_graph(3, G) + G.add_edge(1, 2) + assert np.isclose(nx.number_of_spanning_trees(G, root=0), 2) + + def test_nst_directed_selfloop(self): + G = nx.MultiDiGraph() + G = nx.cycle_graph(3, G) + G.add_edge(1, 1) + assert np.isclose(nx.number_of_spanning_trees(G, root=0), 1) + + def test_nst_directed_weak_connected(self): + G = nx.MultiDiGraph() + G = nx.cycle_graph(3, G) + G.remove_edge(1, 2) + assert np.isclose(nx.number_of_spanning_trees(G, root=0), 0) + + def test_nst_directed_weighted(self): + # from root=1: + # arborescence 1: 1->2, 1->3, weight=2*1 + # arborescence 2: 1->2, 2->3, weight=2*3 + G = nx.DiGraph() + G.add_edge(1, 2, weight=2) + G.add_edge(1, 3, weight=1) + G.add_edge(2, 3, weight=3) + Nst = nx.number_of_spanning_trees(G, root=1, weight="weight") + assert np.isclose(Nst, 8) + Nst = nx.number_of_spanning_trees(G, root=2, weight="weight") + assert np.isclose(Nst, 0) + Nst = nx.number_of_spanning_trees(G, root=3, weight="weight") + assert np.isclose(Nst, 0) diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_operations.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_operations.py new file mode 100644 index 0000000000000000000000000000000000000000..284d94e2e5059de267b5ea47f6012a42c6ac4639 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_operations.py @@ -0,0 +1,53 @@ +from itertools import chain + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +def _check_custom_label_attribute(input_trees, res_tree, label_attribute): + res_attr_dict = nx.get_node_attributes(res_tree, label_attribute) + res_attr_set = set(res_attr_dict.values()) + input_label = (tree for tree, root in input_trees) + input_label_set = set(chain.from_iterable(input_label)) + return res_attr_set == input_label_set + + +def test_empty_sequence(): + """Joining the empty sequence results in the tree with one node.""" + T = nx.join_trees([]) + assert len(T) == 1 + assert T.number_of_edges() == 0 + + +def test_single(): + """Joining just one tree yields a tree with one more node.""" + T = nx.empty_graph(1) + trees = [(T, 0)] + actual_with_label = nx.join_trees(trees, label_attribute="custom_label") + expected = nx.path_graph(2) + assert nodes_equal(list(expected), list(actual_with_label)) + assert edges_equal(list(expected.edges()), list(actual_with_label.edges())) + + +def test_basic(): + """Joining multiple subtrees at a root node.""" + trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)] + expected = nx.full_rary_tree(2, 2**3 - 1) + actual = nx.join_trees(trees, label_attribute="old_labels") + assert nx.is_isomorphic(actual, expected) + assert _check_custom_label_attribute(trees, actual, "old_labels") + + actual_without_label = nx.join_trees(trees) + assert nx.is_isomorphic(actual_without_label, expected) + # check that no labels were stored + assert all(not data for _, data in actual_without_label.nodes(data=True)) + + +def test_first_label(): + """Test the functionality of the first_label argument.""" + T1 = nx.path_graph(3) + T2 = nx.path_graph(2) + actual = nx.join_trees([(T1, 0), (T2, 0)], first_label=10) + expected_nodes = set(range(10, 16)) + assert set(actual.nodes()) == expected_nodes + assert set(actual.neighbors(10)) == {11, 14} diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_recognition.py b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_recognition.py new file mode 100644 index 0000000000000000000000000000000000000000..105f5a89e9b10d37d1cc140880a66bc860d2e9f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/tree/tests/test_recognition.py @@ -0,0 +1,174 @@ +import pytest + +import networkx as nx + + +class TestTreeRecognition: + graph = nx.Graph + multigraph = nx.MultiGraph + + @classmethod + def setup_class(cls): + cls.T1 = cls.graph() + + cls.T2 = cls.graph() + cls.T2.add_node(1) + + cls.T3 = cls.graph() + cls.T3.add_nodes_from(range(5)) + edges = [(i, i + 1) for i in range(4)] + cls.T3.add_edges_from(edges) + + cls.T5 = cls.multigraph() + cls.T5.add_nodes_from(range(5)) + edges = [(i, i + 1) for i in range(4)] + cls.T5.add_edges_from(edges) + + cls.T6 = cls.graph() + cls.T6.add_nodes_from([6, 7]) + cls.T6.add_edge(6, 7) + + cls.F1 = nx.compose(cls.T6, cls.T3) + + cls.N4 = cls.graph() + cls.N4.add_node(1) + cls.N4.add_edge(1, 1) + + cls.N5 = cls.graph() + cls.N5.add_nodes_from(range(5)) + + cls.N6 = cls.graph() + cls.N6.add_nodes_from(range(3)) + cls.N6.add_edges_from([(0, 1), (1, 2), (2, 0)]) + + cls.NF1 = nx.compose(cls.T6, cls.N6) + + def test_null_tree(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_tree(self.graph()) + + def test_null_tree2(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_tree(self.multigraph()) + + def test_null_forest(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_forest(self.graph()) + + def test_null_forest2(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_forest(self.multigraph()) + + def test_is_tree(self): + assert nx.is_tree(self.T2) + assert nx.is_tree(self.T3) + assert nx.is_tree(self.T5) + + def test_is_not_tree(self): + assert not nx.is_tree(self.N4) + assert not nx.is_tree(self.N5) + assert not nx.is_tree(self.N6) + + def test_is_forest(self): + assert nx.is_forest(self.T2) + assert nx.is_forest(self.T3) + assert nx.is_forest(self.T5) + assert nx.is_forest(self.F1) + assert nx.is_forest(self.N5) + + def test_is_not_forest(self): + assert not nx.is_forest(self.N4) + assert not nx.is_forest(self.N6) + assert not nx.is_forest(self.NF1) + + +class TestDirectedTreeRecognition(TestTreeRecognition): + graph = nx.DiGraph + multigraph = nx.MultiDiGraph + + +def test_disconnected_graph(): + # https://github.com/networkx/networkx/issues/1144 + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)]) + assert not nx.is_tree(G) + + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)]) + assert not nx.is_tree(G) + + +def test_dag_nontree(): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (0, 2), (1, 2)]) + assert not nx.is_tree(G) + assert nx.is_directed_acyclic_graph(G) + + +def test_multicycle(): + G = nx.MultiDiGraph() + G.add_edges_from([(0, 1), (0, 1)]) + assert not nx.is_tree(G) + assert nx.is_directed_acyclic_graph(G) + + +def test_emptybranch(): + G = nx.DiGraph() + G.add_nodes_from(range(10)) + assert nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_is_branching_empty_graph_raises(): + G = nx.DiGraph() + with pytest.raises(nx.NetworkXPointlessConcept, match="G has no nodes."): + nx.is_branching(G) + + +def test_path(): + G = nx.DiGraph() + nx.add_path(G, range(5)) + assert nx.is_branching(G) + assert nx.is_arborescence(G) + + +def test_notbranching1(): + # Acyclic violation. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (1, 0)]) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notbranching2(): + # In-degree violation. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (0, 2), (3, 2)]) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notarborescence1(): + # Not an arborescence due to not spanning. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (5, 6)]) + assert nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notarborescence2(): + # Not an arborescence due to in-degree violation. + G = nx.MultiDiGraph() + nx.add_path(G, range(5)) + G.add_edge(6, 4) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_is_arborescense_empty_graph_raises(): + G = nx.DiGraph() + with pytest.raises(nx.NetworkXPointlessConcept, match="G has no nodes."): + nx.is_arborescence(G)