code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
# build structures
n = len(mir)
orien = [None] * (n + 2)
orien[n] = 0 # arbitrary orientations
orien[n + 1] = 0
succ = [[None for direc in range(4)] for i in range(n + 2)]
L = [(mir[i][0], mir[i][1], i) for i in range(n)]
L.append((0, -1, n)) # enter
L.append((0, cols, n + 1)) # exit
last_r, last_i = None, None
for (r, c, i) in sorted(L): # sweep by row
if last_r == r:
succ[i][LEFT] = last_i
succ[last_i][RIGHT] = i
last_r, last_i = r, i
last_c = None
for (r, c, i) in sorted(L, key=lambda rci: (rci[1], rci[0])):
if last_c == c: # sweep by column
succ[i][UP] = last_i
succ[last_i][DOWN] = i
last_c, last_i = c, i
if solve(succ, orien, n, RIGHT): # exploration
return orien[:n]
else:
return None | def laser_mirrors(rows, cols, mir) | Orienting mirrors to allow reachability by laser beam
:param int rows:
:param int cols: rows and cols are the dimension of the grid
:param mir: list of mirror coordinates, except
mir[0]= laser entrance,
mir[-1]= laser exit.
:complexity: :math:`O(2^n)` | 3.324342 | 3.292652 | 1.009624 |
assert orien[i] is not None
j = succ[i][direc]
if j is None: # basic case
return False
if j == len(orien) - 1:
return True
if orien[j] is None: # try both orientations
for x in [0, 1]:
orien[j] = x
if solve(succ, orien, j, reflex[direc][x]):
return True
orien[j] = None
return False
else:
return solve(succ, orien, j, reflex[direc][orien[j]]) | def solve(succ, orien, i, direc) | Can a laser leaving mirror i in direction direc reach exit ?
:param i: mirror index
:param direc: direction leaving mirror i
:param orient: orient[i]=orientation of mirror i
:param succ: succ[i][direc]=succ mirror reached
when leaving i in direction direc | 2.814427 | 3.113912 | 0.903824 |
assert len(L) >= 2
L.sort()
valmin, argmin = min((L[i] - L[i - 1], i) for i in range(1, len(L)))
return L[argmin - 1], L[argmin] | def closest_values(L) | Closest values
:param L: list of values
:returns: two values from L with minimal distance
:modifies: the order of L
:complexity: O(n log n), for n=len(L) | 3.085531 | 3.17859 | 0.970723 |
header = Cell(None, None, 0, None) # building the cell structure
col = []
for j in range(size_universe):
col.append(Cell(header, None, 0, None))
for i in range(len(sets)):
row = None
for j in sets[i]:
col[j].S += 1 # one more entry in this column
row = Cell(row, col[j], i, col[j])
sol = []
if solve(header, sol):
return sol
else:
return None | def dancing_links(size_universe, sets) | Exact set cover by the dancing links algorithm
:param size_universe: universe = {0, 1, ..., size_universe - 1}
:param sets: list of sets
:returns: list of set indices partitioning the universe, or None
:complexity: huge | 5.371777 | 5.296935 | 1.014129 |
# -- n is the number of variables
n = max(abs(clause[p]) for p in (0, 1) for clause in formula)
graph = [[] for node in range(2 * n)]
for x, y in formula: # x or y
graph[_vertex(-x)].append(_vertex(y)) # -x => y
graph[_vertex(-y)].append(_vertex(x)) # -y => x
sccp = tarjan(graph)
comp_id = [None] * (2 * n) # for each node the ID of its component
assignment = [None] * (2 * n)
for component in sccp:
rep = min(component) # representative of the component
for vtx in component:
comp_id[vtx] = rep
if assignment[vtx] is None:
assignment[vtx] = True
assignment[vtx ^ 1] = False # complementary literal
for i in range(n):
if comp_id[2 * i] == comp_id[2 * i + 1]:
return None # insatisfiable formula
return assignment[::2] | def two_sat(formula) | Solving a 2-SAT boolean formula
:param formula: list of clauses, a clause is pair of literals
over X1,...,Xn for some n.
a literal is an integer, for example -1 = not X1, 3 = X3
:returns: table with boolean assignment satisfying the formula or None
:complexity: linear | 4.596848 | 4.406732 | 1.043142 |
# snip}
assert L # majorité n'est pas définie sur ensemble vide
# snip{
count = {}
for word in L:
if word in count:
count[word] += 1
else:
count[word] = 1
valmin, argmin = min((-count[word], word) for word in count)
return argmin | def majority(L) | Majority
:param L: list of elements
:returns: element that appears most in L,
tie breaking with smallest element
:complexity: :math:`O(nk)` in average,
where n = len(L) and k = max(w for w in L)
:math:`O(n^2k)` in worst case due to the use of a dictionary | 6.683589 | 7.317562 | 0.913363 |
global N, N2, N4
if len(G) == 16: # for a 16 x 16 sudoku grid
N, N2, N4 = 4, 16, 256
e = 4 * N4
universe = e + 1
S = [[rc(a), rv(a), cv(a), bv(a)] for a in range(N4 * N2)]
A = [e]
for r in range(N2):
for c in range(N2):
if G[r][c] != 0:
a = assignation(r, c, G[r][c] - 1)
A += S[a]
sol = dancing_links(universe, S + [A])
if sol:
for a in sol:
if a < len(S):
G[row(a)][col(a)] = val(a) + 1
return True
else:
return False | def sudoku(G) | Solving Sudoku
:param G: integer matrix with 0 at empty cells
:returns bool: True if grid could be solved
:modifies: G will contain the solution
:complexity: huge, but linear for usual published 9x9 grids | 4.577556 | 4.592597 | 0.996725 |
rows = len(P)
cols = len(P[0])
t = [0] * cols
best = None
for i in range(rows):
for j in range(cols):
if P[i][j] == black:
t[j] += 1
else:
t[j] = 0
(area, left, height, right) = rectangles_from_histogram(t)
alt = (area, left, i, right, i-height)
if best is None or alt > best:
best = alt
return best | def rectangles_from_grid(P, black=1) | Largest area rectangle in a binary matrix
:param P: matrix
:param black: search for rectangles filled with value black
:returns: area, left, top, right, bottom of optimal rectangle
consisting of all (i,j) with
left <= j < right and top <= i <= bottom
:complexity: linear | 2.957201 | 2.715526 | 1.088998 |
INF = float('inf')
n = len(graph) # compute distances
dist = [[INF] * n]
prec = [[None] * n]
dist[0][start] = 0
for ell in range(1, n + 1):
dist.append([INF] * n)
prec.append([None] * n)
for node in range(n):
for neighbor in graph[node]:
alt = dist[ell - 1][node] + weight[node][neighbor]
if alt < dist[ell][neighbor]:
dist[ell][neighbor] = alt
prec[ell][neighbor] = node
# -- find the optimal value
valmin = INF
argmin = None
for node in range(n):
valmax = -INF
argmax = None
for k in range(n):
alt = (dist[n][node] - dist[k][node]) / float(n - k)
# do not divide by float(n-k) => cycle of minimal total weight
if alt >= valmax: # with >= we get simple cycles
valmax = alt
argmax = k
if argmax is not None and valmax < valmin:
valmin = valmax
argmin = (node, argmax)
# -- extract cycle
if valmin == INF: # -- there is no cycle
return None
C = []
node, k = argmin
for l in range(n, k, -1):
C.append(node)
node = prec[l][node]
return C[::-1], valmin | def min_mean_cycle(graph, weight, start=0) | Minimum mean cycle by Karp
:param graph: directed graph in listlist or listdict format
:param weight: in matrix format or same listdict graph
:param int start: vertex that should be contained in cycle
:returns: cycle as vertex list, average arc weights
or None if there is no cycle from start
:complexity: `O(|V|*|E|)` | 3.31807 | 3.293611 | 1.007426 |
V = range(len(weight))
for k in V:
for u in V:
for v in V:
weight[u][v] = min(weight[u][v],
weight[u][k] + weight[k][v])
for v in V:
if weight[v][v] < 0: # negative cycle found
return True
return False | def floyd_warshall(weight) | All pairs shortest paths by Floyd-Warshall
:param weight: edge weight matrix
:modifies: weight matrix to contain distances in graph
:returns: True if there are negative cycles
:complexity: :math:`O(|V|^3)` | 2.432654 | 2.465919 | 0.98651 |
n = len(graph)
dist = [float('inf')] * n
prec = [None] * n
dist[source] = 0
for nb_iterations in range(n):
changed = False
for node in range(n):
for neighbor in graph[node]:
alt = dist[node] + weight[node][neighbor]
if alt < dist[neighbor]:
dist[neighbor] = alt
prec[neighbor] = node
changed = True
if not changed: # fixed point
return dist, prec, False
return dist, prec, True | def bellman_ford(graph, weight, source=0) | Single source shortest paths by Bellman-Ford
:param graph: directed graph in listlist or listdict format
:param weight: can be negative.
in matrix format or same listdict graph
:returns: distance table, precedence table, bool
:explanation: bool is True if a negative circuit is
reachable from the source, circuits
can have length 2.
:complexity: `O(|V|*|E|)` | 2.306648 | 2.328285 | 0.990707 |
val = 0
pos10 = 1000
beg = 0
for pos in range(3, -1, -1):
for digit in range(9,-1,-1):
r = roman[pos][digit]
if s.startswith(r, beg): # footnote 1
beg += len(r)
val += digit * pos10
break
pos10 //= 10
return val | def roman2int(s) | Decode roman number
:param s: string representing a roman number between 1 and 9999
:returns: the decoded roman number
:complexity: linear (if that makes sense for constant bounded input size) | 4.314249 | 4.65358 | 0.927082 |
s = ''
pos10 = 1000
for pos in range(3, -1, -1):
digit = val // pos10
s += roman[pos][digit]
val %= pos10
pos10 //= 10
return s | def int2roman(val) | Code roman number
:param val: integer between 1 and 9999
:returns: the corresponding roman number
:complexity: linear (if that makes sense for constant bounded input size) | 3.084187 | 3.68873 | 0.836111 |
n = len(graph)
assert all(weight[u][v] >= 0 for u in range(n) for v in graph[u])
prec = [None] * n
black = [False] * n
dist = [float('inf')] * n
dist[source] = 0
heap = [(0, source)]
while heap:
dist_node, node = heappop(heap) # Closest node from source
if not black[node]:
black[node] = True
if node == target:
break
for neighbor in graph[node]:
dist_neighbor = dist_node + weight[node][neighbor]
if dist_neighbor < dist[neighbor]:
dist[neighbor] = dist_neighbor
prec[neighbor] = node
heappush(heap, (dist_neighbor, neighbor))
return dist, prec | def dijkstra(graph, weight, source=0, target=None) | single source shortest paths by Dijkstra
:param graph: directed graph in listlist or listdict format
:param weight: in matrix format or same listdict graph
:assumes: weights are non-negative
:param source: source vertex
:type source: int
:param target: if given, stops once distance to target found
:type target: int
:returns: distance table, precedence table
:complexity: `O(|V| + |E|log|V|)` | 1.959839 | 2.034641 | 0.963236 |
n = len(graph)
assert all(weight[u][v] >= 0 for u in range(n) for v in graph[u])
prec = [None] * n
dist = [float('inf')] * n
dist[source] = 0
heap = OurHeap([(dist[node], node) for node in range(n)])
while heap:
dist_node, node = heap.pop() # Closest node from source
if node == target:
break
for neighbor in graph[node]:
old = dist[neighbor]
new = dist_node + weight[node][neighbor]
if new < old:
dist[neighbor] = new
prec[neighbor] = node
heap.update((old, neighbor), (new, neighbor))
return dist, prec | def dijkstra_update_heap(graph, weight, source=0, target=None) | single source shortest paths by Dijkstra
with a heap implementing item updates
:param graph: adjacency list or adjacency dictionary of a directed graph
:param weight: matrix or adjacency dictionary
:assumes: weights are non-negatif and weights are infinite for non edges
:param source: source vertex
:type source: int
:param target: if given, stops once distance to target found
:type target: int
:returns: distance table, precedence table
:complexity: `O(|V| + |E|log|V|)` | 2.529709 | 2.522342 | 1.002921 |
assert set.isdisjoint({'$', '^', '#'}, s) # Forbidden letters
if s == "":
return (0, 1)
t = "^#" + "#".join(s) + "#$"
c = 1
d = 1
p = [0] * len(t)
for i in range(2, len(t) - 1):
# -- reflect index i with respect to c
mirror = 2 * c - i # = c - (i-c)
p[i] = max(0, min(d - i, p[mirror]))
# -- grow palindrome centered in i
while t[i + 1 + p[i]] == t[i - 1 - p[i]]:
p[i] += 1
# -- adjust center if necessary
if i + p[i] > d:
c = i
d = i + p[i]
(k, i) = max((p[i], i) for i in range(1, len(t) - 1))
return ((i - k) // 2, (i + k) // 2) | def manacher(s) | Longest palindrome in a string by Manacher
:param s: string
:requires: s is not empty
:returns: i,j such that s[i:j] is the longest palindrome in s
:complexity: O(len(s)) | 4.119489 | 3.755533 | 1.096912 |
to_visit = deque()
dist = [float('inf')] * len(graph)
prec = [None] * len(graph)
dist[start] = 0
to_visit.appendleft(start)
while to_visit: # an empty queue is considered False
node = to_visit.pop()
for neighbor in graph[node]:
if dist[neighbor] == float('inf'):
dist[neighbor] = dist[node] + 1
prec[neighbor] = node
to_visit.appendleft(neighbor)
return dist, prec | def bfs(graph, start=0) | Shortest path in unweighted graph by BFS
:param graph: directed graph in listlist or listdict format
:param int start: source vertex
:returns: distance table, precedence table
:complexity: `O(|V|+|E|)` | 2.071559 | 2.111598 | 0.981039 |
if T is None:
T = Trie_Node()
if i == len(w):
T.isWord = True
else:
T.s[w[i]] = add(T.s[w[i]], w, i + 1)
return T | def add(T, w, i=0) | :param T: trie
:param string w: word to be added to T
:returns: new trie consisting of w added into T
:complexity: O(len(w)) | 2.953323 | 2.454844 | 1.203059 |
T = None
for w in S:
T = add(T, w)
return T | def Trie(S) | :param S: set of words
:returns: trie containing all words from S
:complexity: linear in total word sizes from S | 5.006452 | 5.215498 | 0.959918 |
assert T is not None
dist = 0
while True: # Try increasing distances
u = search(T, dist, w)
if u is not None:
return u
dist += 1 | def spell_check(T, w) | Spellchecker
:param T: trie encoding the dictionary
:param w: given word
:returns: a closest word from the dictionary
:complexity: linear if distance was constant | 7.611362 | 8.660924 | 0.878816 |
if i == len(w):
if T is not None and T.isWord and dist == 0:
return ""
else:
return None
if T is None:
return None
f = search(T.s[w[i]], dist, w, i + 1) # matching
if f is not None:
return w[i] + f
if dist == 0:
return None
for c in ascii_letters:
f = search(T.s[c], dist - 1, w, i) # insertion
if f is not None:
return c + f
f = search(T.s[c], dist - 1, w, i + 1) # substitution
if f is not None:
return c + f
return search(T, dist - 1, w, i + 1) | def search(T, dist, w, i=0) | Searches for w[i:] in trie T with distance at most dist | 2.519835 | 2.457038 | 1.025558 |
with open(filename, 'r') as f:
while True:
line = f.readline() # ignore leading comments
if line[0] != '#':
break
nb_nodes, nb_edges = tuple(map(int, line.split()))
graph = [[] for u in range(nb_nodes)]
if weighted:
weight = [[default_weight] * nb_nodes for v in range(nb_nodes)]
for v in range(nb_nodes):
weight[v][v] = 0
for _ in range(nb_edges):
u, v, w = readtab(f, int)
graph[u].append(v)
weight[u][v] = w
if not directed:
graph[v].append(u)
weight[v][u] = w
return graph, weight
else:
for _ in range(nb_edges):
# si le fichier contient des poids, ils seront ignorés
u, v = readtab(f, int)[:2]
graph[u].append(v)
if not directed:
graph[v].append(u)
return graph | def read_graph(filename, directed=False, weighted=False, default_weight=None) | Read a graph from a text file
:param filename: plain text file. All numbers are separated by space.
Starts with a line containing n (#vertices) and m (#edges).
Then m lines follow, for each edge.
Vertices are numbered from 0 to n-1.
Line for unweighted edge u,v contains two integers u, v.
Line for weighted edge u,v contains three integers u, v, w[u,v].
:param directed: true for a directed graph, false for undirected
:param weighted: true for an edge weighted graph
:returns: graph in listlist format, possibly followed by weight matrix
:complexity: O(n + m) for unweighted graph,
:math:`O(n^2)` for weighted graph | 2.365549 | 2.333696 | 1.013649 |
with open(dotfile, 'w') as f:
if directed:
f.write("digraph G{\n")
else:
f.write("graph G{\n")
if comment:
f.write('label="%s";\n' % comment)
V = range(len(graph))
# -- vertices
for u in V:
if node_mark and u in node_mark:
f.write('%d [style=filled, color="lightgrey", ' % u)
else:
f.write('%d [' % u)
if node_label:
f.write('label="%u [%s]"];\n' % (u, node_label[u]))
else:
f.write('shape=circle, label="%u"];\n' % u)
# -- edges
if isinstance(arc_mark, list):
arc_mark = set((u, arc_mark[u]) for u in V)
for u in V:
for v in graph[u]:
if not directed and u > v:
continue # don't show twice the edge
if arc_label and arc_label[u][v] == None:
continue # suppress arcs with no label
if directed:
arc = "%d -> %d " % (u, v)
else:
arc = "%d -- %d " % (u, v)
if arc_mark and ( (v,u) in arc_mark or (not directed and (u,v) in arc_mark) ):
pen = 'color="red"'
else:
pen = ""
if arc_label:
tag = 'label="%s"' % arc_label[u][v]
else:
tag = ""
if tag and pen:
sep = ", "
else:
sep = ""
f.write(arc + "[" + tag + sep + pen + "];\n")
f.write("}") | def write_graph(dotfile, graph, directed=False,
node_label=None, arc_label=None, comment="",
node_mark=set(), arc_mark=set()) | Writes a graph to a file in the DOT format
:param dotfile: the filename.
:param graph: directed graph in listlist or listdict format
:param directed: true if graph is directed, false if undirected
:param weight: in matrix format or same listdict graph or None
:param node_label: vertex label table or None
:param arc_label: arc label matrix or None
:param comment: comment string for the dot file or None
:param node_mark: set of nodes to be shown in gray
:param arc_marc: set of arcs to be shown in red
:complexity: `O(|V| + |E|)` | 2.496696 | 2.546763 | 0.980341 |
n = len(prec)
graph = [[prec[u]] for u in range(n)] # add predecessors
graph[root] = []
for u in range(n): # add successors
if u != root:
graph[prec[u]].append(u)
return graph | def tree_prec_to_adj(prec, root=0) | Transforms a tree given as predecessor table into adjacency list form
:param prec: predecessor table representing a tree, prec[u] == v iff u is successor of v,
except for the root where prec[root] == root
:param root: root vertex of the tree
:returns: undirected graph in listlist representation
:complexity: linear | 3.926877 | 3.43189 | 1.144231 |
prec = [None] * len(graph)
prec[root] = root # mark to visit root only once
to_visit = [root]
while to_visit: # DFS
node = to_visit.pop()
for neighbor in graph[node]:
if prec[neighbor] is None:
prec[neighbor] = node
to_visit.append(neighbor)
prec[root] = None # put the standard mark for root
return prec | def tree_adj_to_prec(graph, root=0) | Transforms a tree given as adjacency list into predecessor table form.
if graph is not a tree: will return a DFS spanning tree
:param graph: directed graph in listlist or listdict format
:returns: tree in predecessor table representation
:complexity: linear | 3.732276 | 4.047039 | 0.922224 |
for u in range(len(graph)):
for v in graph[u]:
if u not in graph[v]:
if type(graph[v]) is list:
graph[v].append(u)
if capac:
capac[v][u] = 0
else:
assert type(graph[v]) is dict
graph[v][u] = 0 | def add_reverse_arcs(graph, capac=None) | Utility function for flow algorithms that need for every arc (u,v),
the existence of an (v,u) arc, by default with zero capacity.
graph can be in adjacency list, possibly with capacity matrix capac.
or graph can be in adjacency dictionary, then capac parameter is ignored.
:param capac: arc capacity matrix
:param graph: in listlist representation, or in listdict representation, in this case capac is ignored
:complexity: linear
:returns: nothing, but graph is modified | 2.368279 | 2.495054 | 0.949189 |
graph = [[] for _ in range(len(weight))]
for u in range(len(graph)):
for v in range(len(graph)):
if weight[u][v] != None:
graph[u].append(v)
return graph | def matrix_to_listlist(weight) | transforms a squared weight matrix in a adjacency table of type listlist
encoding the directed graph corresponding to the entries of the matrix
different from None
:param weight: squared weight matrix, weight[u][v] != None iff arc (u,v) exists
:complexity: linear
:returns: the unweighted directed graph in the listlist representation,
listlist[u] contains all v for which arc (u,v) exists. | 2.225829 | 2.177643 | 1.022128 |
if weight:
return [{v:weight[u][v] for v in graph[u]} for u in range(len(graph))]
else:
return [{v:None for v in graph[u]} for u in range(len(graph))] | def listlist_and_matrix_to_listdict(graph, weight=None) | Transforms the weighted adjacency list representation of a graph
of type listlist + optional weight matrix
into the listdict representation
:param graph: in listlist representation
:param weight: optional weight matrix
:returns: graph in listdict representation
:complexity: linear | 2.338096 | 2.651943 | 0.881654 |
V = range(len(sparse))
graph = [[] for _ in V]
weight = [[None for v in V] for u in V]
for u in V:
for v in sparse[u]:
graph[u].append(v)
weight[u][v] = sparse[u][v]
return graph, weight | def listdict_to_listlist_and_matrix(sparse) | Transforms the adjacency list representation of a graph
of type listdict into the listlist + weight matrix representation
:param sparse: graph in listdict representation
:returns: couple with listlist representation, and weight matrix
:complexity: linear | 2.725251 | 2.69184 | 1.012412 |
n = len(dictgraph) # vertices
node_to_name = [name for name in dictgraph] # bijection indices <-> names
node_to_name.sort() # to make it more readable
name_to_node = {}
for i in range(n):
name_to_node[node_to_name[i]] = i
sparse = [{} for _ in range(n)] # build sparse graph
for u in dictgraph:
for v in dictgraph[u]:
sparse[name_to_node[u]][name_to_node[v]] = dictgraph[u][v]
return sparse, name_to_node, node_to_name | def dictdict_to_listdict(dictgraph) | Transforms a dict-dict graph representation into a
adjacency dictionary representation (list-dict)
:param dictgraph: dictionary mapping vertices to dictionary
such that dictgraph[u][v] is weight of arc (u,v)
:complexity: linear
:returns: tuple with graph (listdict), name_to_node (dict), node_to_name (list) | 3.033109 | 2.735046 | 1.108979 |
L = []
while v is not None:
L.append(v)
v = prec[v]
assert v not in L # prevent infinite loops for a bad formed table prec
return L[::-1] | def extract_path(prec, v) | extracts a path in form of vertex list from source to vertex v
given a precedence table prec leading to the source
:param prec: precedence table of a tree
:param v: vertex on the tree
:returns: path from root to v, in form of a list
:complexity: linear | 8.644954 | 8.796906 | 0.982727 |
V = range(len(graph))
arc_label = [{v:"" for v in graph[u]} for u in V]
for u in V:
for v in graph[u]:
if flow[u][v] >= 0:
arc_label[u][v] = "%s/%s" % (flow[u][v], capac[u][v])
else:
arc_label[u][v] = None # do not show negative flow arcs
return arc_label | def make_flow_labels(graph, flow, capac) | Generate arc labels for a flow in a graph with capacities.
:param graph: adjacency list or adjacency dictionary
:param flow: flow matrix or adjacency dictionary
:param capac: capacity matrix or adjacency dictionary
:returns: listdic graph representation, with the arc label strings | 2.85742 | 2.778512 | 1.0284 |
n = len(graph)
match = max_bipartite_matching(graph) # maximum matching
part = [None] * n # partition into chains
nb_chains = 0
for v in range(n - 1, -1, -1): # in inverse topological order
if part[v] is None: # start of chain
u = v
while u is not None: # follow the chain
part[u] = nb_chains # mark
u = match[u]
nb_chains += 1
return part | def dilworth(graph) | Decompose a DAG into a minimum number of chains by Dilworth
:param graph: directed graph in listlist or listdict format
:assumes: graph is acyclic
:returns: table giving for each vertex the number of its chains
:complexity: same as matching | 4.238423 | 4.051129 | 1.046233 |
h = []
for a in freq:
heappush(h, (freq[a], a))
while len(h) > 1:
(fl, l) = heappop(h)
(fr, r) = heappop(h)
heappush(h, (fl + fr, [l, r]))
code = {}
extract(code, h[0][1])
return code | def huffman(freq) | Huffman code
:param freq: dictionary with frequencies for each item
:returns: dictionary with binary code string for each item
:complexity: O(n log n) | 2.603627 | 2.805361 | 0.92809 |
if isinstance(tree, list):
l, r = tree
prefix.append('0')
extract(code, l, prefix)
prefix.pop()
prefix.append('1')
extract(code, r, prefix)
prefix.pop()
else:
code[tree] = ''.join(prefix) | def extract(code, tree, prefix=[]) | Extract Huffman code from a Huffman tree
:param tree: a node of the tree
:param prefix: a list with the 01 characters encoding the path from
the root to the node `tree`
:complexity: O(n) | 2.247418 | 2.692705 | 0.834632 |
n = len(tab)
pivot = None # find pivot
for i in range(n - 1):
if tab[i] < tab[i + 1]:
pivot = i
if pivot is None: # tab is already the last perm.
return False
for i in range(pivot + 1, n): # find the element to swap
if tab[i] > tab[pivot]:
swap = i
tab[swap], tab[pivot] = tab[pivot], tab[swap]
i = pivot + 1
j = n - 1 # invert suffix
while i < j:
tab[i], tab[j] = tab[j], tab[i]
i += 1
j -= 1
return True | def next_permutation(tab) | find the next permutation of tab in the lexicographical order
:param tab: table with n elements from an ordered set
:modifies: table to next permutation
:returns: False if permutation is already lexicographical maximal
:complexity: O(n) | 2.402094 | 2.49367 | 0.963277 |
E = [(low, -1) for (low, high) in S]
E += [(high, +1) for (low, high) in S]
nb_open = 0
last = None
retval = []
for x, _dir in sorted(E):
if _dir == -1:
if nb_open == 0:
last = x
nb_open += 1
else:
nb_open -= 1
if nb_open == 0:
retval.append((last, x))
return retval | def intervals_union(S) | Union of intervals
:param S: list of pairs (low, high) defining intervals [low, high)
:returns: ordered list of disjoint intervals with the same union as S
:complexity: O(n log n) | 2.937036 | 3.238546 | 0.9069 |
# -- assume w.l.o.g. that v is not higher than u in the tree
if self.level[u] > self.level[v]:
u, v = v, u
# -- put v at the same level as u
depth = len(self.anc)
for k in range(depth-1, -1, -1):
if self.level[u] <= self.level[v] - (1 << k):
v = self.anc[k][v]
assert self.level[u] == self.level[v]
if u == v:
return u
# -- climb until the lowest common ancestor
for k in range(depth-1, -1, -1):
if self.anc[k][u] != self.anc[k][v]:
u = self.anc[k][u]
v = self.anc[k][v]
assert self.anc[0][u] == self.anc[0][v]
return self.anc[0][u] | def query(self, u, v) | :returns: the lowest common ancestor of u and v
:complexity: O(log n) | 2.807122 | 2.668321 | 1.052018 |
lu = self.last[u]
lv = self.last[v]
if lu > lv:
lu, lv = lv, lu
return self.rmq.range_min(lu, lv + 1)[1] | def query(self, u, v) | :returns: the lowest common ancestor of u and v
:complexity: O(log n) | 5.090863 | 5.660057 | 0.899437 |
assert a >= 0 and b >= 0 and q >= 1
p = 0 # only for documentation
p2 = 1 # 2 ** p
ap2 = a % q # a ** (2 ** p)
result = 1
while b > 0:
if p2 & b > 0: # b's binary decomposition contains 2 ** p
b -= p2
result = (result * ap2) % q
p += 1
p2 *= 2
ap2 = (ap2 * ap2) % q
return result | def fast_exponentiation2(a, b, q) | Compute (a pow b) % q
:param int a b: non negative
:param int q: positive
:complexity: O(log b) | 4.311985 | 4.522902 | 0.953367 |
assert a >= 0 and b >= 0 and q >= 1
result = 1
while b:
if b % 2 == 1:
result = (result * a) % q
a = (a * a) % q
b >>= 1
return result | def fast_exponentiation(a, b, q) | Compute (a pow b) % q, alternative shorter implementation
:param int a b: non negative
:param int q: positive
:complexity: O(log b) | 1.796124 | 2.094866 | 0.857393 |
visit[u] = True
if u == target:
return val
for v in graph[u]:
cuv = capacity[u][v]
if not visit[v] and cuv > flow[u][v]: # reachable arc
res = min(val, cuv - flow[u][v])
delta = _augment(graph, capacity, flow, res, v, target, visit)
if delta > 0:
flow[u][v] += delta # augment flow
flow[v][u] -= delta
return delta
return 0 | def _augment(graph, capacity, flow, val, u, target, visit) | Find an augmenting path from u to target with value at most val | 2.660559 | 2.550047 | 1.043337 |
add_reverse_arcs(graph, capacity)
n = len(graph)
flow = [[0] * n for _ in range(n)]
INF = float('inf')
while _augment(graph, capacity, flow, INF, s, t, [False] * n) > 0:
pass # work already done in _augment
return (flow, sum(flow[s])) | def ford_fulkerson(graph, capacity, s, t) | Maximum flow by Ford-Fulkerson
:param graph: directed graph in listlist or listdict format
:param capacity: in matrix format or same listdict graph
:param int s: source vertex
:param int t: target vertex
:returns: flow matrix, flow value
:complexity: `O(|V|*|E|*max_capacity)` | 5.046917 | 5.502668 | 0.917176 |
dist, i, j = 0, 0, 0 # dist = |{x[i], ..., x[j-1]}|
occ = {xi: 0 for xi in x} # number of occurrences in x[i:j]
while j < len(x):
while dist == k: # move start of interval
occ[x[i]] -= 1 # update counters
if occ[x[i]] == 0:
dist -= 1
i += 1
while j < len(x) and (dist < k or occ[x[j]]):
if occ[x[j]] == 0: # update counters
dist += 1
occ[x[j]] += 1
j += 1 # move end of interval
if dist == k:
yield (i, j) | def windows_k_distinct(x, k) | Find all largest windows containing exactly k distinct elements
:param x: list or string
:param k: positive integer
:yields: largest intervals [i, j) with len(set(x[i:j])) == k
:complexity: `O(|x|)` | 3.239561 | 3.068288 | 1.05582 |
if b == 0:
return (1, 0)
else:
u, v = bezout(b, a % b)
return (v, u - (a // b) * v) | def bezout(a, b) | Bezout coefficients for a and b
:param a,b: non-negative integers
:complexity: O(log a + log b) | 2.372169 | 2.652587 | 0.894285 |
prod = 1
for i in range(k):
prod = (prod * (n - i)) // (i + 1)
return prod | def binom(n, k) | Binomial coefficients for :math:`n \choose k`
:param n,k: non-negative integers
:complexity: O(k) | 2.571579 | 3.503433 | 0.734017 |
prod = 1
for i in range(k):
prod = (prod * (n - i) * inv(i + 1, p)) % p
return prod | def binom_modulo(n, k, p) | Binomial coefficients for :math:`n \choose k`, modulo p
:param n,k: non-negative integers
:complexity: O(k) | 2.834732 | 3.50485 | 0.808802 |
n = len(bigraph) # same domain for U and V
match = [None] * n
for u in range(n):
augment(u, bigraph, [False] * n, match)
return match | def max_bipartite_matching(bigraph) | Bipartie maximum matching
:param bigraph: adjacency list, index = vertex in U,
value = neighbor list in V
:assumption: U = V = {0, 1, 2, ..., n - 1} for n = len(bigraph)
:returns: matching list, match[v] == u iff (u, v) in matching
:complexity: `O(|V|*|E|)` | 7.715509 | 7.183265 | 1.074095 |
nU = len(bigraph)
# the following line works only in Python version ≥ 2.5
# nV = max(max(adjlist, default=-1) for adjlist in bigraph) + 1
nV = 0
for adjlist in bigraph:
for v in adjlist:
if v + 1 > nV:
nV = v + 1
match = [None] * nV
for u in range(nU):
augment(u, bigraph, [False] * nV, match)
return match | def max_bipartite_matching2(bigraph) | Bipartie maximum matching
:param bigraph: adjacency list, index = vertex in U,
value = neighbor list in V
:comment: U and V can have different cardinalities
:returns: matching list, match[v] == u iff (u, v) in matching
:complexity: `O(|V|*|E|)` | 3.991101 | 4.062602 | 0.9824 |
freq = {} # freq[p] = total weight of words having prefix p
for word, weight in dic:
prefix = ""
for x in word:
prefix += x
if prefix in freq:
freq[prefix] += weight
else:
freq[prefix] = weight
# prop[s] = prefix to display for s
prop = {}
for prefix in freq:
code = code_word(prefix)
if code not in prop or freq[prop[code]] < freq[prefix]:
prop[code] = prefix
return prop | def predictive_text(dic) | Predictive text for mobile phones
:param dic: associates weights to words from [a-z]*
:returns: a dictionary associating to words from [2-9]*
a corresponding word from the dictionary with highest weight
:complexity: linear in total word length | 4.809137 | 4.8561 | 0.990329 |
answ = 0
pairs = {}
for j in range(len(S)):
for i in range(j):
px, py = S[i]
qx, qy = S[j]
center = (px + qx, py + qy)
dist = (px - qx) ** 2 + (py - qy) ** 2
sign = (center, dist)
if sign in pairs:
answ += len(pairs[sign])
pairs[sign].append((i, j))
else:
pairs[sign] = [(i, j)]
return answ | def rectangles_from_points(S) | How many rectangles can be formed from a set of points
:param S: list of points, as coordinate pairs
:returns: the number of rectangles
:complexity: :math:`O(n^2)` | 2.497782 | 2.605762 | 0.958561 |
n = len(graph)
time = 0
num = [None] * n
low = [n] * n
father = [None] * n # father[v] = None if root else father of v
critical_childs = [0] * n # c_c[u] = #childs v s.t. low[v] >= num[u]
times_seen = [-1] * n
for start in range(n):
if times_seen[start] == -1: # init DFS path
times_seen[start] = 0
to_visit = [start]
while to_visit:
node = to_visit[-1]
if times_seen[node] == 0: # start processing
num[node] = time
time += 1
low[node] = float('inf')
children = graph[node]
if times_seen[node] == len(children): # end processing
to_visit.pop()
up = father[node] # propagate low to father
if up is not None:
low[up] = min(low[up], low[node])
if low[node] >= num[up]:
critical_childs[up] += 1
else:
child = children[times_seen[node]] # next arrow
times_seen[node] += 1
if times_seen[child] == -1: # not visited yet
father[child] = node # link arrow
times_seen[child] = 0
to_visit.append(child) # (below) back arrow
elif num[child] < num[node] and father[node] != child:
low[node] = min(low[node], num[child])
cut_edges = []
cut_nodes = [] # extract solution
for node in range(n):
if father[node] is None: # characteristics
if critical_childs[node] >= 2:
cut_nodes.append(node)
else: # internal nodes
if critical_childs[node] >= 1:
cut_nodes.append(node)
if low[node] >= num[node]:
cut_edges.append((father[node], node))
return cut_nodes, cut_edges | def cut_nodes_edges(graph) | Bi-connected components
:param graph: undirected graph. in listlist format. Cannot be in listdict format.
:returns: a tuple with the list of cut-nodes and the list of cut-edges
:complexity: `O(|V|+|E|)` | 3.300678 | 3.300629 | 1.000015 |
N = len(graph)
assert N <= 5000
recursionlimit = getrecursionlimit()
setrecursionlimit(max(recursionlimit, N + 42))
edges = set((i, j) for i in range(N) for j in graph[i] if i <= j)
nodes = set()
NOT = -2 # not visited yet; -1 would be buggy `marked[v] != prof - 1`
FIN = -3 # already visited
marked = [NOT] * N # if >= 0, it means depth within the DFS
def DFS(n, prof=0):
if marked[n] == FIN:
return # only when there are several connected components
if marked[n] != NOT:
return marked[n]
marked[n] = prof
m = float('inf')
count = 0 # useful only for prof == 0
for v in graph[n]:
if marked[v] != FIN and marked[v] != prof - 1:
count += 1
r = DFS(v, prof+1)
if r <= prof:
edges.discard(tuple(sorted((n, v))))
if prof and r >= prof: # only if we are not at root
nodes.add(n)
m = min(m, r)
# root is an articulation point iff it has more than 2 childs
if prof == 0 and count >= 2:
nodes.add(n)
marked[n] = FIN
return m
for r in range(N):
DFS(r) # we can count connected components by nb += DFS(r)
setrecursionlimit(recursionlimit)
return nodes, edges | def cut_nodes_edges2(graph) | Bi-connected components, alternative recursive implementation
:param graph: undirected graph. in listlist format. Cannot be in listdict format.
:assumes: graph has about 5000 vertices at most, otherwise memory limit is reached
:returns: a tuple with the list of cut-nodes and the list of cut-edges
:complexity: `O(|V|+|E|)` in average, `O(|V|+|E|^2)` in worst case due to use of dictionary | 5.276485 | 5.225827 | 1.009694 |
A = 0
for i in range(len(p)):
A += p[i - 1][0] * p[i][1] - p[i][0] * p[i - 1][1]
return A / 2. | def area(p) | Area of a polygone
:param p: list of the points taken in any orientation,
p[0] can differ from p[-1]
:returns: area
:complexity: linear | 1.937161 | 2.004974 | 0.966177 |
n = len(polygon)
order = list(range(n))
order.sort(key=lambda i: polygon[i]) # lexicographic order
rank_to_y = list(set(p[1] for p in polygon))
rank_to_y.sort()
y_to_rank = {y: rank for rank, y in enumerate(rank_to_y)}
S = RangeMinQuery([0] * len(rank_to_y)) # sweep structure
last_y = None
for i in order:
x, y = polygon[i]
rank = y_to_rank[y]
# -- type of point
right_x = max(polygon[i - 1][0], polygon[(i + 1) % n][0])
left = x < right_x
below_y = min(polygon[i - 1][1], polygon[(i + 1) % n][1])
high = y > below_y
if left: # y does not need to be in S yet
if S[rank]:
return False # two horizontal segments intersect
S[rank] = -1 # add y to S
else:
S[rank] = 0 # remove y from S
if high:
lo = y_to_rank[below_y] # check S between [lo + 1, rank - 1]
if (below_y != last_y or last_y == y or
rank - lo >= 2 and S.range_min(lo + 1, rank)):
return False # horiz. & vert. segments intersect
last_y = y # remember for next iteration
return True | def is_simple(polygon) | Test if a rectilinear polygon is is_simple
:param polygon: list of points as (x,y) pairs along the closed polygon
:returns: True if the segements do not intersect
:complexity: O(n log n) for n=len(polygon) | 4.173465 | 4.219836 | 0.989011 |
shuffle(S)
assert len(S) >= 2
p = S[0]
q = S[1]
d = dist(p, q)
while d > 0:
r = improve(S, d)
if r:
d, p, q = r
else:
break
return p, q | def closest_points(S) | Closest pair of points
:param S: list of points
:requires: size of S at least 2
:modifies: changes the order in S
:returns: pair of points p,q from S with minimum Euclidean distance
:complexity: expected linear time | 3.126878 | 3.142718 | 0.99496 |
n = len(x)
p = [None] * n
h = [None]
b = [float('-inf')] # - infinity
for i in range(n):
if x[i] > b[-1]:
p[i] = h[-1]
h.append(i)
b.append(x[i])
else:
# -- binary search: b[k - 1] < x[i] <= b[k]
k = bisect_left(b, x[i])
h[k] = i
b[k] = x[i]
p[i] = h[k - 1]
# extract solution
q = h[-1]
s = []
while q is not None:
s.append(x[q])
q = p[q]
return s[::-1] | def longest_increasing_subsequence(x) | Longest increasing subsequence
:param x: sequence
:returns: longest strictly increasing subsequence y
:complexity: `O(|x|*log(|y|))` | 2.641481 | 2.689547 | 0.982129 |
seen[node] = True
for neighbor in graph[node]:
if not seen[neighbor]:
dfs_recursive(graph, neighbor, seen) | def dfs_recursive(graph, node, seen) | DFS, detect connected component, recursive implementation
:param graph: directed graph in listlist or listdict format
:param int node: to start graph exploration
:param boolean-table seen: will be set true for the connected component
containing node.
:complexity: `O(|V|+|E|)` | 1.927156 | 3.227336 | 0.597135 |
seen[start] = True
to_visit = [start]
while to_visit:
node = to_visit.pop()
for neighbor in graph[node]:
if not seen[neighbor]:
seen[neighbor] = True
to_visit.append(neighbor) | def dfs_iterative(graph, start, seen) | DFS, detect connected component, iterative implementation
:param graph: directed graph in listlist or listdict format
:param int node: to start graph exploration
:param boolean-table seen: will be set true for the connected component
containing node.
:complexity: `O(|V|+|E|)` | 1.738724 | 2.347103 | 0.740796 |
to_visit = [start]
prec = [None] * len(graph)
while to_visit: # an empty queue equals False
node = to_visit.pop()
for neighbor in graph[node]:
if prec[neighbor] is None:
prec[neighbor] = node
to_visit.append(neighbor)
return prec | def dfs_tree(graph, start=0) | DFS, build DFS tree in unweighted graph
:param graph: directed graph in listlist or listdict format
:param int start: source vertex
:returns: precedence table
:complexity: `O(|V|+|E|)` | 3.213079 | 3.30424 | 0.972411 |
height = len(grid)
width = len(grid[0])
grid[i][j] = mark # mark path
for ni, nj in [(i + 1, j), (i, j + 1),
(i - 1, j), (i, j - 1)]:
if 0 <= ni < height and 0 <= nj < width:
if grid[ni][nj] == free:
dfs_grid(grid, ni, nj) | def dfs_grid_recursive(grid, i, j, mark='X', free='.') | DFS on a grid, mark connected component, iterative version
:param grid: matrix, 4-neighborhood
:param i,j: cell in this matrix, start of DFS exploration
:param free: symbol for walkable cells
:param mark: symbol to overwrite visited vertices
:complexity: linear | 2.037915 | 2.089715 | 0.975212 |
height = len(grid)
width = len(grid[0])
to_visit = [(i, j)]
grid[i][j] = mark
while to_visit:
i1, j1 = to_visit.pop()
for i2, j2 in [(i1 + 1, j1), (i1, j1 + 1),
(i1 - 1, j1), (i1, j1 - 1)]:
if (0 <= i2 < height and 0 <= j2 < width and
grid[i2][j2] == free):
grid[i2][j2] = mark # mark path
to_visit.append((i2, j2)) | def dfs_grid(grid, i, j, mark='X', free='.') | DFS on a grid, mark connected component, iterative version
:param grid: matrix, 4-neighborhood
:param i,j: cell in this matrix, start of DFS exploration
:param free: symbol for walkable cells
:param mark: symbol to overwrite visited vertices
:complexity: linear | 1.640324 | 1.66959 | 0.982471 |
n = len(graph)
prec = [None] * n # ancestor marks for visited vertices
for u in range(n):
if prec[u] is None: # unvisited vertex
S = [u] # start new DFS
prec[u] = u # mark root (not necessary for this algorithm)
while S:
u = S.pop()
for v in graph[u]: # for all neighbors
if v != prec[u]: # except arcs to father in DFS tree
if prec[v] is not None:
cycle = [v, u] # cycle found, (u,v) back edge
while u != prec[v] and u != prec[u]: # directed
u = prec[u] # climb up the tree
cycle.append(u)
return cycle
else:
prec[v] = u # v is new vertex in tree
S.append(v)
return None | def find_cycle(graph) | find a cycle in an undirected graph
:param graph: undirected graph in listlist or listdict format
:returns: list of vertices in a cycle or None
:complexity: `O(|V|+|E|)` | 4.766705 | 4.673367 | 1.019972 |
if isinstance(expr, tuple):
(left, op, right) = expr
lval = arithm_expr_eval(cell, left)
rval = arithm_expr_eval(cell, right)
if op == '+':
return lval + rval
if op == '-':
return lval - rval
if op == '*':
return lval * rval
if op == '/':
return lval // rval
elif isinstance(expr, int):
return expr
else:
cell[expr] = arithm_expr_eval(cell, cell[expr])
return cell[expr] | def arithm_expr_eval(cell, expr) | Evaluates a given expression
:param expr: expression
:param cell: dictionary variable name -> expression
:returns: numerical value of expression
:complexity: linear | 1.535183 | 1.686752 | 0.910141 |
vals = []
ops = []
for tok in line + [';']:
if tok in priority: # tok is an operator
while (tok != '(' and ops and
priority[ops[-1]] >= priority[tok]):
right = vals.pop()
left = vals.pop()
vals.append((left, ops.pop(), right))
if tok == ')':
ops.pop() # this is the corresponding '('
else:
ops.append(tok)
elif tok.isdigit(): # tok is an integer
vals.append(int(tok))
else: # tok is an identifier
vals.append(tok)
return vals.pop() | def arithm_expr_parse(line) | Constructs an arithmetic expression tree
:param line: list of token strings containing the expression
:returns: expression tree
:complexity: linear | 3.238914 | 3.163956 | 1.023691 |
sum = 0
while i > 0:
sum += self.s[i]
i -= (i & -i)
return sum | def prefixSum(self, i) | :param int i: non negative
:returns: t[1] + ... + t[i] | 2.810672 | 2.9662 | 0.947567 |
return self.prefixSum(b) - self.prefixSum(a-1) | def intervalSum(self, a, b) | :param int a b: with 1 <= a <= b
:returns: t[a] + ... + t[b] | 5.546747 | 4.140946 | 1.339488 |
assert i > 0
while i < len(self.s):
self.s[i] += val
i += (i & -i) | def add(self, i, val) | :param int i: positive
:modifies: adds val to t[i] | 3.515872 | 3.356416 | 1.047508 |
self.add(a, +val)
self.add(b + 1, -val) | def intervalAdd(self, a, b, val) | Variant, adds val to t[a], to t[a + 1] ... and to t[b]
:param int a b: with 1 <= a <= b | 6.574204 | 7.103208 | 0.925526 |
visitU[u] = True
for v in bigraph[u]:
if not visitV[v]:
visitV[v] = True
assert matchV[v] is not None # otherwise match is not maximum
_alternate(matchV[v], bigraph, visitU, visitV, matchV) | def _alternate(u, bigraph, visitU, visitV, matchV) | extend alternating tree from free vertex u.
visitU, visitV marks all vertices covered by the tree. | 3.090788 | 3.256078 | 0.949237 |
V = range(len(bigraph))
matchV = max_bipartite_matching(bigraph)
matchU = [None for u in V]
for v in V: # -- build the mapping from U to V
if matchV[v] is not None:
matchU[matchV[v]] = v
visitU = [False for u in V] # -- build max alternating forest
visitV = [False for v in V]
for u in V:
if matchU[u] is None: # -- starting with free vertices in U
_alternate(u, bigraph, visitU, visitV, matchV)
inverse = [not b for b in visitU]
return (inverse, visitV) | def bipartite_vertex_cover(bigraph) | Bipartite minimum vertex cover by Koenig's theorem
:param bigraph: adjacency list, index = vertex in U,
value = neighbor list in V
:assumption: U = V = {0, 1, 2, ..., n - 1} for n = len(bigraph)
:returns: boolean table for U, boolean table for V
:comment: selected vertices form a minimum vertex cover,
i.e. every edge is adjacent to at least one selected vertex
and number of selected vertices is minimum
:complexity: `O(|V|*|E|)` | 4.811944 | 4.777521 | 1.007205 |
if R == []:
return 0
X = []
Y = []
for j in range(len(R)):
(x1, y1, x2, y2) = R[j]
assert x1 <= x2 and y1 <= y2
X.append(x1)
X.append(x2)
Y.append((y1, +1, j)) # generate events
Y.append((y2, -1, j))
X.sort()
Y.sort()
X2i = {X[i]: i for i in range(len(X))}
L = [X[i + 1] - X[i] for i in range(len(X) - 1)]
C = Cover_query(L)
area = 0
last = 0
for (y, delta, j) in Y:
area += (y - last) * C.cover()
last = y
(x1, y1, x2, y2) = R[j]
i = X2i[x1]
k = X2i[x2]
C.change(i, k, delta)
return area | def union_rectangles(R) | Area of union of rectangles
:param R: list of rectangles defined by (x1, y1, x2, y2)
where (x1, y1) is top left corner and (x2, y2) bottom right corner
:returns: area
:complexity: :math:`O(n^2)` | 2.827785 | 2.843084 | 0.994619 |
self._change(1, 0, self.N, i, k, delta) | def change(self, i, k, delta) | when delta = +1, adds an interval [i, k], when delta = -1, removes it
:complexity: O(log L) | 8.652453 | 8.289505 | 1.043784 |
formula = []
for line in open(filename, 'r'):
line = line.strip()
if line[0] == "#":
continue
lit = line.split(":-")
if len(lit) == 1:
posvar = lit[0]
negvars = []
else:
assert len(lit) == 2
posvar = lit[0].strip()
if posvar == '':
posvar = None
negvars = lit[1].split(',')
for i in range(len(negvars)):
negvars[i] = negvars[i].strip()
formula.append((posvar, negvars))
return formula | def read(filename) | reads a Horn SAT formula from a text file
:file format:
# comment
A # clause with unique positive literal
:- A # clause with unique negative literal
A :- B, C, D # clause where A is positive and B,C,D negative
# variables are strings without spaces | 2.337332 | 2.094463 | 1.115958 |
# --- construct data structures
CLAUSES = range(len(formula))
score = [0 for c in CLAUSES] # number of negative vars that are not yet in solution
posvar_in_clause = [None for c in CLAUSES] # the unique positive variable of a clause (if any)
clauses_with_negvar = defaultdict(set) # all clauses where a variable appears negatively
for c in CLAUSES:
posvar, negvars = formula[c]
score[c] = len(set(negvars)) # do not count twice repeated negative variables
posvar_in_clause[c] = posvar
for v in negvars:
clauses_with_negvar[v].add(c)
pool = [set() for s in range(max(score) + 1)] # create the pool
for c in CLAUSES:
pool[score[c]].add(c) # pool[s] = set of clauses with score s
# --- solve Horn SAT formula
solution = set() # contains all variables set to True
while pool[0]:
curr = pool[0].pop() # arbitrary zero score clause
v = posvar_in_clause[curr]
if v == None: # formula is not satisfiable
return None
if v in solution or curr in clauses_with_negvar[v]:
continue # clause is already satisfied
solution.add(v)
for c in clauses_with_negvar[v]: # update score
pool[score[c]].remove(c)
score[c] -= 1
pool[score[c]].add(c) # change c to lower score in pool
return solution | def horn_sat(formula) | Solving a HORN Sat formula
:param formula: list of couple(posvar, negvars).
negvars is a list of the negative variables and can be empty.
posvar is the positive variable and can be None.
Variables can be any hashable objects, as integers or strings
for example.
:returns: None if formula is not satisfiable, else a minimal set of variables
that have to be set to true in order to satisfy the formula.
:complexity: linear | 4.123975 | 3.851963 | 1.070616 |
assert source != target
add_reverse_arcs(graph, capacity)
Q = deque()
total = 0
n = len(graph)
flow = [[0] * n for u in range(n)] # flow initially empty
while True: # repeat while we can increase
Q.appendleft(source)
lev = [None] * n # build levels, None = inaccessible
lev[source] = 0 # by BFS
while Q:
u = Q.pop()
for v in graph[u]:
if lev[v] is None and capacity[u][v] > flow[u][v]:
lev[v] = lev[u] + 1
Q.appendleft(v)
if lev[target] is None: # stop if sink is not reachable
return flow, total
up_bound = sum(capacity[source][v] for v in graph[source]) - total
total += _dinic_step(graph, capacity, lev, flow, source, target,
up_bound) | def dinic(graph, capacity, source, target) | Maximum flow by Dinic
:param graph: directed graph in listlist or listdict format
:param capacity: in matrix format or same listdict graph
:param int source: vertex
:param int target: vertex
:returns: skew symmetric flow matrix, flow value
:complexity: :math:`O(|V|^2 |E|)` | 4.284496 | 4.309346 | 0.994234 |
if limit <= 0:
return 0
if u == target:
return limit
val = 0
for v in graph[u]:
residual = capacity[u][v] - flow[u][v]
if lev[v] == lev[u] + 1 and residual > 0:
z = min(limit, residual)
aug = _dinic_step(graph, capacity, lev, flow, v, target, z)
flow[u][v] += aug
flow[v][u] -= aug
val += aug
limit -= aug
if val == 0:
lev[u] = None # remove unreachable node
return val | def _dinic_step(graph, capacity, lev, flow, u, target, limit) | tenter de pousser le plus de flot de u à target, sans dépasser limit | 2.816087 | 2.878124 | 0.978445 |
assert x not in self.rank
i = len(self.heap)
self.heap.append(x) # add a new leaf
self.rank[x] = i
self.up(i) | def push(self, x) | Insert new element x in the heap.
Assumption: x is not already in the heap | 4.966285 | 4.447943 | 1.116535 |
root = self.heap[1]
del self.rank[root]
x = self.heap.pop() # remove last leaf
if self: # if heap is not empty
self.heap[1] = x # put last leaf to root
self.rank[x] = 1
self.down(1) # maintain heap order
return root | def pop(self) | Remove and return smallest element | 5.026011 | 5.011926 | 1.00281 |
x = self.heap[i]
while i > 1 and x < self.heap[i // 2]:
self.heap[i] = self.heap[i // 2]
self.rank[self.heap[i // 2]] = i
i //= 2
self.heap[i] = x # insertion index found
self.rank[x] = i | def up(self, i) | The value of heap[i] has decreased. Maintain heap invariant. | 2.713619 | 2.351341 | 1.154073 |
x = self.heap[i]
n = len(self.heap)
while True:
left = 2 * i # climb down the tree
right = left + 1
if (right < n and self.heap[right] < x and
self.heap[right] < self.heap[left]):
self.heap[i] = self.heap[right]
self.rank[self.heap[right]] = i # go back up right child
i = right
elif left < n and self.heap[left] < x:
self.heap[i] = self.heap[left]
self.rank[self.heap[left]] = i # go back up left child
i = left
else:
self.heap[i] = x # insertion index found
self.rank[x] = i
return | def down(self, i) | the value of heap[i] has increased. Maintain heap invariant. | 2.526685 | 2.389108 | 1.057585 |
i = self.rank[old] # change value at index i
del self.rank[old]
self.heap[i] = new
self.rank[new] = i
if old < new: # maintain heap order
self.down(i)
else:
self.up(i) | def update(self, old, new) | Replace an element in the heap | 4.724 | 4.253144 | 1.110708 |
uf = UnionFind(len(graph))
edges = []
for u in range(len(graph)):
for v in graph[u]:
edges.append((weight[u][v], u, v))
edges.sort()
mst = []
for w, u, v in edges:
if uf.union(u, v):
mst.append((u, v))
return mst | def kruskal(graph, weight) | Minimum spanning tree by Kruskal
:param graph: undirected graph in listlist or listdict format
:param weight: in matrix format or same listdict graph
:returns: list of edges of the tree
:complexity: ``O(|E|log|E|)`` | 1.858301 | 2.147258 | 0.86543 |
if self.up[x] == x:
return x
else:
self.up[x] = self.find(self.up[x])
return self.up[x] | def find(self, x) | :returns: identifier of part containing x
:complexity: O(inverse_ackerman(n)) | 2.309671 | 2.378569 | 0.971034 |
repr_x = self.find(x)
repr_y = self.find(y)
if repr_x == repr_y: # already in the same component
return False
if self.rank[repr_x] == self.rank[repr_y]:
self.rank[repr_x] += 1
self.up[repr_y] = repr_x
elif self.rank[repr_x] > self.rank[repr_y]:
self.up[repr_y] = repr_x
else:
self.up[repr_x] = repr_y
return True | def union(self, x, y) | Merges part that contain x and part containing y
:returns: False if x, y are already in same part
:complexity: O(inverse_ackerman(n)) | 1.888129 | 1.986948 | 0.950266 |
self.prec = anchor.prec # point to neighbors
self.succ = anchor
self.succ.prec = self # make neighbors point to item
self.prec.succ = self | def insert(self, anchor) | insert list item before anchor | 10.312098 | 9.932246 | 1.038244 |
if not self.items: # was list empty ?
self.items = item # then this is the new head
item.insert(self.items) | def append(self, item) | add item to the end of the item list | 13.069942 | 12.483626 | 1.046967 |
DoubleLinkedListItem.remove(self) # remove from double linked list
if self.succ is self: # list was a singleton
self.theclass.items = None # class is empty
elif self.theclass.items is self: # oups we removed the head
self.theclass.items = self.succ | def remove(self) | remove item from its class | 10.895876 | 9.001258 | 1.210484 |
has_split = [] # remember which classes split
for i in pivot:
if 0 <= i < len(self.items): # ignore if outside of domain
x = self.items[i]
c = x.theclass # c = class of x
if not c.split: # possibly create new split class
c.split = PartitionClass(c)
if self.classes is c:
self.classes = c.split # always make self.classes point to the first class
has_split.append(c)
x.remove() # remove from its class
x.theclass = c.split
c.split.append(x) # append to the split class
for c in has_split: # clean information about split classes
c.split = None
if not c.items: # delete class if it became empty
c.remove()
del c | def refine(self, pivot) | Split every class C in the partition into C intersection pivot and C setminus pivot
complexity: linear in size of pivot | 6.14483 | 5.841888 | 1.051857 |
return [[x.val for x in theclass.items] for theclass in self.classes] | def tolist(self) | produce a list representation of the partition | 13.084668 | 10.987804 | 1.190836 |
return [x.val for theclass in self.classes for x in theclass.items] | def order(self) | Produce a flatten list of the partition, ordered by classes | 18.233137 | 10.259227 | 1.777243 |
P = [True] * n
answ = [2]
for i in range(3, n, 2):
if P[i]:
answ.append(i)
for j in range(2 * i, n, i):
P[j] = False
return answ | def eratosthene(n) | Prime numbers by sieve of Eratosthene
:param n: positive integer
:assumes: n > 2
:returns: list of prime numbers <n
:complexity: O(n loglog n) | 2.652076 | 2.308539 | 1.148811 |
primes = []
factor = [0] * n
for x in range(2, n):
if not factor[x]: # no factor found
factor[x] = x # meaning x is prime
primes.append(x)
for p in primes: # loop over all non primes of the form p * x
if p > factor[x] or p * x >= n:
break
factor[p * x] = p
return primes, factor | def gries_misra(n) | Prime numbers by the sieve of Gries-Misra
Computes both the list of all prime numbers less than n,
and a table mapping every integer 2 ≤ x < n to its smallest prime factor
:param n: positive integer
:returns: list of prime numbers, and list of prime factors
:complexity: O(n) | 4.260213 | 4.118789 | 1.034336 |
n = len(graph)
A = [0] * n # A[v] = min residual cap. on path source->v
augm_path = [None] * n # None = node was not visited yet
Q = deque() # BFS
Q.append(source)
augm_path[source] = source
A[source] = float('inf')
while Q:
u = Q.popleft()
for v in graph[u]:
cuv = capacity[u][v]
residual = cuv - flow[u][v]
if residual > 0 and augm_path[v] is None:
augm_path[v] = u # store predecessor
A[v] = min(A[u], residual)
if v == target:
break
else:
Q.append(v)
return (augm_path, A[target]) | def _augment(graph, capacity, flow, source, target) | find a shortest augmenting path | 3.080327 | 2.977833 | 1.034419 |
add_reverse_arcs(graph, capacity)
V = range(len(graph))
flow = [[0 for v in V] for u in V]
while True:
augm_path, delta = _augment(graph, capacity, flow, source, target)
if delta == 0:
break
v = target # go back to source
while v != source:
u = augm_path[v] # augment flow
flow[u][v] += delta
flow[v][u] -= delta
v = u
return (flow, sum(flow[source])) | def edmonds_karp(graph, capacity, source, target) | Maximum flow by Edmonds-Karp
:param graph: directed graph in listlist or listdict format
:param capacity: in matrix format or same listdict graph
:param int source: vertex
:param int target: vertex
:returns: flow matrix, flow value
:complexity: :math:`O(|V|*|E|^2)` | 4.018068 | 4.19777 | 0.957191 |
n = len(x)
m = len(b)
assert len(A) == m and len(A[0]) == n
S = [] # put linear system in a single matrix S
for i in range(m):
S.append(A[i][:] + [b[i]])
S.append(list(range(n))) # indices in x
k = diagonalize(S, n, m)
if k < m:
for i in range(k, m):
if not is_zero(S[i][n]):
return GJ_ZERO_SOLUTIONS
for j in range(k):
x[S[m][j]] = S[j][n]
if k < n:
for j in range(k, n):
x[S[m][j]] = 0
return GJ_SEVERAL_SOLUTIONS
return GJ_SINGLE_SOLUTION | def gauss_jordan(A, x, b) | Linear equation system Ax=b by Gauss-Jordan
:param A: n by m matrix
:param x: table of size n
:param b: table of size m
:modifies: x will contain solution if any
:returns int:
0 if no solution,
1 if solution unique,
2 otherwise
:complexity: :math:`O(n^2m)` | 3.651442 | 3.579134 | 1.020203 |
for d in range(k):
if s[i + d] != t[j + d]:
return False
return True | def matches(s, t, i, j, k) | tests if s[i:i + k] equals t[j:j + k] | 2.574063 | 2.321618 | 1.108737 |
hash_s = 0
hash_t = 0
len_s = len(s)
len_t = len(t)
last_pos = pow(DOMAIN, len_t - 1) % PRIME
if len_s < len_t:
return -1
for i in range(len_t): # preprocessing
hash_s = (DOMAIN * hash_s + ord(s[i])) % PRIME
hash_t = (DOMAIN * hash_t + ord(t[i])) % PRIME
for i in range(len_s - len_t + 1):
if hash_s == hash_t: # check character by character
if matches(s, t, i, 0, len_t):
return i
if i < len_s - len_t:
hash_s = roll_hash(hash_s, ord(s[i]), ord(s[i + len_t]),
last_pos)
return -1 | def rabin_karp_matching(s, t) | Find a substring by Rabin-Karp
:param s: the haystack string
:param t: the needle string
:returns: index i such that s[i: i + len(t)] == t, or -1
:complexity: O(len(s) + len(t)) in expected time,
and O(len(s) * len(t)) in worst case | 2.345708 | 2.590363 | 0.905552 |
last_pos = pow(DOMAIN, k - 1) % PRIME
pos = {}
assert k > 0
if len(s) < k or len(t) < k:
return None
hash_t = 0
for j in range(k): # store hashing values
hash_t = (DOMAIN * hash_t + ord(t[j])) % PRIME
for j in range(len(t) - k + 1):
if hash_t in pos:
pos[hash_t].append(j)
else:
pos[hash_t] = [j]
if j < len(t) - k:
hash_t = roll_hash(hash_t, ord(t[j]), ord(t[j + k]), last_pos)
hash_s = 0
for i in range(k): # preprocessing
hash_s = (DOMAIN * hash_s + ord(s[i])) % PRIME
for i in range(len(s) - k + 1):
if hash_s in pos: # is this signature in s?
for j in pos[hash_s]:
if matches(s, t, i, j, k):
return (i, j)
if i < len(s) - k:
hash_s = roll_hash(hash_s, ord(s[i]), ord(s[i + k]), last_pos)
return None | def rabin_karp_factor(s, t, k) | Find a common factor by Rabin-Karp
:param string s: haystack
:param string t: needle
:param int k: factor length
:returns: (i, j) such that s[i:i + k] == t[j:j + k] or None.
In case of tie, lexicographical minimum (i, j) is returned
:complexity: O(len(s) + len(t)) in expected time,
and O(len(s) + len(t) * k) in worst case | 2.489112 | 2.599501 | 0.957535 |
best = (float('-inf'), 0, 0, 0)
S = []
H2 = H + [float('-inf')] # extra element to empty the queue
for right in range(len(H2)):
x = H2[right]
left = right
while len(S) > 0 and S[-1][1] >= x:
left, height = S.pop()
# first element is area of candidate
rect = (height * (right - left), left, height, right)
if rect > best:
best = rect
S.append((left, x))
return best | def rectangles_from_histogram(H) | Largest Rectangular Area in a Histogram
:param H: histogram table
:returns: area, left, height, right, rect. is [0, height] * [left, right)
:complexity: linear | 4.74811 | 4.675136 | 1.015609 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.