code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
p = np.array(bitstring_prep_histograms, dtype=float).T p /= p.sum(axis=0)[np.newaxis, :] return p
def estimate_assignment_probs(bitstring_prep_histograms)
Compute the estimated assignment probability matrix for a sequence of single shot histograms obtained by running the programs generated by `basis_state_preps()`. bitstring_prep_histograms[i,j] = #number of measured outcomes j when running program i The assignment probability is obtained by transposing and afterwards normalizing the columns. p[j, i] = Probability to measure outcome j when preparing the state with program i. :param list|numpy.ndarray bitstring_prep_histograms: A nested list or 2d array with shape (d, d), where ``d = 2**nqubits`` is the dimension of the Hilbert space. The first axis varies over the state preparation program index, the second axis corresponds to the measured bitstring. :return: The assignment probability matrix. :rtype: numpy.ndarray
3.111325
3.731136
0.833881
im = ax.imshow(ptransfermatrix, interpolation="nearest", cmap=rigetti_3_color_cm, vmin=-1, vmax=1) dim = len(labels) plt.colorbar(im, ax=ax) ax.set_xticks(range(dim)) ax.set_xlabel("Input Pauli Operator", fontsize=20) ax.set_yticks(range(dim)) ax.set_ylabel("Output Pauli Operator", fontsize=20) ax.set_title(title, fontsize=25) ax.set_xticklabels(labels, rotation=45) ax.set_yticklabels(labels) ax.grid(False) return ax
def plot_pauli_transfer_matrix(ptransfermatrix, ax, labels, title)
Visualize the Pauli Transfer Matrix of a process. :param numpy.ndarray ptransfermatrix: The Pauli Transfer Matrix :param ax: The matplotlib axes. :param labels: The labels for the operator basis states. :param title: The title for the plot :return: The modified axis object. :rtype: AxesSubplot
2.497169
2.552888
0.978174
rho_amps = rho.data.toarray().ravel() nqc = int(round(np.log2(rho.shape[0]))) if ax is None: fig = plt.figure(figsize=(10, 6)) ax = Axes3D(fig, azim=-35, elev=35) cmap = rigetti_4_color_cm norm = mpl.colors.Normalize(-np.pi, np.pi) colors = cmap(norm(np.angle(rho_amps))) dzs = abs(rho_amps) colors[:, 3] = 1.0 * (dzs > threshold) xs, ys = np.meshgrid(range(2 ** nqc), range(2 ** nqc)) xs = xs.ravel() ys = ys.ravel() zs = np.zeros_like(xs) dxs = dys = np.ones_like(xs) * 0.8 _ = ax.bar3d(xs, ys, zs, dxs, dys, dzs, color=colors) ax.set_xticks(np.arange(2 ** nqc) + .4) ax.set_xticklabels(basis_labels(nqc)) ax.set_yticks(np.arange(2 ** nqc) + .4) ax.set_yticklabels(basis_labels(nqc)) ax.set_zlim3d([0, 1]) cax, kw = mpl.colorbar.make_axes(ax, shrink=.75, pad=.1) cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm) cb.set_ticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi]) cb.set_ticklabels((r'$-\pi$', r'$-\pi/2$', r'$0$', r'$\pi/2$', r'$\pi$')) cb.set_label('arg') ax.view_init(azim=-55, elev=45) ax.set_title(title) return ax
def state_histogram(rho, ax=None, title="", threshold=0.001)
Visualize a density matrix as a 3d bar plot with complex phase encoded as the bar color. This code is a modified version of `an equivalent function in qutip <http://qutip.org/docs/3.1.0/apidoc/functions.html#qutip.visualization.matrix_histogram_complex>`_ which is released under the (New) BSD license. :param qutip.Qobj rho: The density matrix. :param Axes3D ax: The axes object. :param str title: The axes title. :param float threshold: (Optional) minimum magnitude of matrix elements. Values below this are hidden. :return: The axis :rtype: mpl_toolkits.mplot3d.Axes3D
2.304015
2.244376
1.026573
ret = 0 for b in bitlist: ret = (ret << 1) | (int(b) & 1) return ret
def bitlist_to_int(bitlist)
Convert a binary bitstring into the corresponding unsigned integer. :param list bitlist: A list of ones of zeros. :return: The corresponding integer. :rtype: int
2.960408
3.19097
0.927745
num_qubits = len(qubits) dimension = 2 ** num_qubits hists = [] preps = basis_state_preps(*qubits) jobs = [] _log.info('Submitting jobs...') for jj, p in izip(TRANGE(dimension), preps): jobs.append(cxn.run_and_measure_async(p, qubits, nsamples)) _log.info('Waiting for results...') for jj, job_id in izip(TRANGE(dimension), jobs): job = cxn.wait_for_job(job_id) results = job.result() idxs = list(map(bitlist_to_int, results)) hists.append(make_histogram(idxs, dimension)) return estimate_assignment_probs(hists)
def sample_assignment_probs(qubits, nsamples, cxn)
Sample the assignment probabilities of qubits using nsamples per measurement, and then compute the estimated assignment probability matrix. See the docstring for estimate_assignment_probs for more information. :param list qubits: Qubits to sample the assignment probabilities for. :param int nsamples: The number of samples to use in each measurement. :param QPUConnection|QVMConnection cxn: The Connection object to connect to Forest. :return: The assignment probability matrix. :rtype: numpy.ndarray
4.485295
4.506832
0.995221
if shuffle: n_groups = len(programs) n_progs_per_group = len(programs[0]) permutations = np.outer(np.ones(n_groups, dtype=int), np.arange(n_progs_per_group, dtype=int)) inverse_permutations = np.zeros_like(permutations) for jj in range(n_groups): # in-place operation np.random.shuffle(permutations[jj]) # store inverse permutation inverse_permutations[jj] = np.argsort(permutations[jj]) # apply to programs shuffled_programs = np.empty((n_groups, n_progs_per_group), dtype=object) for jdx, (progsj, pj) in enumerate(zip(programs, permutations)): shuffled_programs[jdx] = [progsj[pjk] for pjk in pj] shuffled_results = _run_in_parallel(shuffled_programs, nsamples, cxn) # reverse shuffling of results results = np.array([resultsj[pj] for resultsj, pj in zip(shuffled_results, inverse_permutations)]) return results else: return _run_in_parallel(programs, nsamples, cxn)
def run_in_parallel(programs, nsamples, cxn, shuffle=True)
Take sequences of Protoquil programs on disjoint qubits and execute a single sequence of programs that executes the input programs in parallel. Optionally randomize within each qubit-specific sequence. The programs are passed as a 2d array of Quil programs, where the (first) outer axis iterates over disjoint sets of qubits that the programs involve and the inner axis iterates over a sequence of related programs, e.g., tomography sequences, on the same set of qubits. :param Union[np.ndarray,List[List[Program]]] programs: A rectangular list of lists, or a 2d array of Quil Programs. The outer list iterates over disjoint qubit groups as targets, the inner list over programs to run on those qubits, e.g., tomographic sequences. :param int nsamples: Number of repetitions for executing each Program. :param QPUConnection|QVMConnection cxn: The quantum machine connection. :param bool shuffle: If True, the order of each qubit specific sequence (2nd axis) is randomized Default is True. :return: An array of 2d arrays that provide bitstring histograms for each input program. The axis of the outer array iterates over the disjoint qubit groups, the outer axis of the inner 2d array iterates over the programs for that group and the inner most axis iterates over all possible bitstrings for the qubit group under consideration. :rtype np.array
2.404378
2.447537
0.982367
n_groups = len(programs) n_progs_per_group = len(programs[0]) for progs in programs[1:]: if not len(progs) == n_progs_per_group: raise ValueError("Non-rectangular grid of programs specified: {}".format(programs)) # identify qubit groups, ensure disjointedness qubit_groups = [set() for _ in range(n_groups)] for group_idx, group in enumerate(qubit_groups): for prog in programs[group_idx]: group.update(set(prog.get_qubits())) # test that groups are actually disjoint by comparing with the ones already created for other_idx, other_group in enumerate(qubit_groups[:group_idx]): intersection = other_group & group if intersection: raise ValueError( "Programs from groups {} and {} intersect on qubits {}".format( other_idx, group_idx, intersection)) qubit_groups = [sorted(c) for c in qubit_groups] all_qubits = sum(qubit_groups, []) n_qubits_per_group = [len(c) for c in qubit_groups] # create joint programs parallel_programs = [sum(progsj, Program()) for progsj in zip(*programs)] # execute on cxn all_results = [] for i, prog in izip(TRANGE(n_progs_per_group), parallel_programs): try: results = cxn.run_and_measure(prog, all_qubits, nsamples) all_results.append(np.array(results)) except QPUError as e: _log.error("Could not execute parallel program:\n%s", prog.out()) raise e # generate histograms per qubit group all_histograms = np.array([np.zeros((n_progs_per_group, 2 ** n_qubits), dtype=int) for n_qubits in n_qubits_per_group]) for idx, results in enumerate(all_results): n_qubits_seen = 0 for jdx, n_qubits in enumerate(n_qubits_per_group): group_results = results[:, n_qubits_seen:n_qubits_seen + n_qubits] outcome_labels = list(map(bitlist_to_int, group_results)) dimension = 2 ** n_qubits all_histograms[jdx][idx] = make_histogram(outcome_labels, dimension) n_qubits_seen += n_qubits return all_histograms
def _run_in_parallel(programs, nsamples, cxn)
See docs for ``run_in_parallel()``. :param Union[np.ndarray,List[List[Program]]] programs: A rectangular list of lists, or a 2d array of Quil Programs. The outer list iterates over disjoint qubit groups as targets, the inner list over programs to run on those qubits, e.g., tomographic sequences. :param int nsamples: Number of repetitions for executing each Program. :param QPUConnection|QVMConnection cxn: The quantum machine connection. :return: An array of 2d arrays that provide bitstring histograms for each input program. The axis of the outer array iterates over the disjoint qubit groups, the outer axis of the inner 2d array iterates over the programs for that group and the inner most axis iterates over all possible bitstrings for the qubit group under consideration. The bitstrings are enumerated in lexicographical order, i.e., for a program with qubits {3, 1, 2} the qubits are first sorted -> [1, 2, 3] and then the bitstrings are enumerated as 000, 001, 010, where the bits ijk correspond to the states of qubits 1,2 and 3, respectively. :rtype np.array
3.032603
2.888785
1.049785
if not isinstance(pauli_sums, PauliSum): raise TypeError("not a pauli sum. please give me one") new_term = sI(0) * 0.0 for term in pauli_sums: new_term += term_with_coeff(term, term.coefficient.real) return new_term
def remove_imaginary_terms(pauli_sums: PauliSum) -> PauliSum
Remove the imaginary component of each term in a Pauli sum. :param pauli_sums: The Pauli sum to process. :return: a purely Hermitian Pauli sum.
5.366851
5.645974
0.950563
meas_basis_change = Program() for index, gate in pauli_term: if gate == 'X': meas_basis_change.inst(RY(-np.pi / 2, index)) elif gate == 'Y': meas_basis_change.inst(RX(np.pi / 2, index)) elif gate == 'Z': pass else: raise ValueError() return meas_basis_change
def get_rotation_program(pauli_term: PauliTerm) -> Program
Generate a rotation program so that the pauli term is diagonal. :param pauli_term: The Pauli term used to generate diagonalizing one-qubit rotations. :return: The rotation program.
2.502192
2.548092
0.981987
rows, cols = m.shape assert rows == cols n = rows I = np.eye(n) Z = np.zeros((n, n)) controlled_m = np.bmat([[I, Z], [Z, m]]) return controlled_m
def controlled(m: np.ndarray) -> np.ndarray
Make a one-qubit-controlled version of a matrix. :param m: A matrix. :return: A controlled version of that matrix.
3.798263
3.416131
1.111861
assert isinstance(accuracy, int) rows, cols = U.shape m = int(log2(rows)) output_qubits = range(0, accuracy) U_qubits = range(accuracy, accuracy + m) p = Program() ro = p.declare('ro', 'BIT', len(output_qubits)) # Hadamard initialization for i in output_qubits: p.inst(H(i)) # Controlled unitaries for i in output_qubits: if i > 0: U = np.dot(U, U) cU = controlled(U) name = "CONTROLLED-U{0}".format(2 ** i) # define the gate p.defgate(name, cU) # apply it p.inst((name, i) + tuple(U_qubits)) # Compute the QFT p = p + inverse_qft(output_qubits) # Perform the measurements for i in output_qubits: p.measure(i, ro[reg_offset + i]) return p
def phase_estimation(U: np.ndarray, accuracy: int, reg_offset: int = 0) -> Program
Generate a circuit for quantum phase estimation. :param U: A unitary matrix. :param accuracy: Number of bits of accuracy desired. :param reg_offset: Where to start writing measurements (default 0). :return: A Quil program to perform phase estimation.
3.911328
3.883209
1.007241
if isinstance(number, str): if number[0] == '-': n_sign = -1 else: n_sign = 1 elif isinstance(number, float): n_sign = np.sign(number) number = str(number) deci = 0 for ndx, val in enumerate(number.split('.')[-1]): deci += float(val) / 2**(ndx+1) deci *= n_sign return deci
def binary_float_to_decimal_float(number: Union[float, str]) -> float
Convert binary floating point to decimal floating point. :param number: Binary floating point. :return: Decimal floating point representation of binary floating point.
3.019392
3.129408
0.964845
try: measurements.sum(axis=0) except AttributeError: measurements = np.asarray(measurements) finally: stats = measurements.sum(axis=0) / len(measurements) stats_str = [str(int(i)) for i in np.round(stats[::-1][1:])] bf_str = '0.' + ''.join(stats_str) bf = float(bf_str) return bf
def measurements_to_bf(measurements: np.ndarray) -> float
Convert measurements into gradient binary fraction. :param measurements: Output measurements of gradient program. :return: Binary fraction representation of gradient estimate.
3.965471
3.814071
1.039695
program = Program() uniform_superimposer = Program().inst([H(qubit) for qubit in qubits]) program += uniform_superimposer if decompose_diffusion: diffusion = decomposed_diffusion_program(qubits) else: diffusion = diffusion_program(qubits) # To avoid redefining gates, we collect them before building our program. defined_gates = oracle.defined_gates + algorithm.defined_gates + diffusion.defined_gates for _ in range(num_iter): program += (oracle.instructions + algorithm.dagger().instructions + diffusion.instructions + algorithm.instructions) # We redefine the gates in the new program. for gate in defined_gates: program.defgate(gate.name, gate.matrix) return program
def amplification_circuit(algorithm: Program, oracle: Program, qubits: List[int], num_iter: int, decompose_diffusion: bool = False) -> Program
Returns a program that does ``num_iter`` rounds of amplification, given a measurement-less algorithm, an oracle, and a list of qubits to operate on. :param algorithm: A program representing a measurement-less algorithm run on qubits. :param oracle: An oracle maps any basis vector ``|psi>`` to either ``+|psi>`` or ``-|psi>`` depending on whether ``|psi>`` is in the desirable subspace or the undesirable subspace. :param qubits: the qubits to operate on :param num_iter: number of iterations of amplifications to run :param decompose_diffusion: If True, decompose the Grover diffusion gate into two qubit gates. If False, use a defgate to define the gate. :return: The amplified algorithm.
3.862964
3.960703
0.975323
program = Program() if len(qubits) == 1: program.inst(Z(qubits[0])) else: program.inst([X(q) for q in qubits]) program.inst(H(qubits[-1])) program.inst(RZ(-np.pi, qubits[0])) program += (ControlledProgramBuilder() .with_controls(qubits[:-1]) .with_target(qubits[-1]) .with_operation(X_GATE) .with_gate_name(X_GATE_LABEL).build()) program.inst(RZ(-np.pi, qubits[0])) program.inst(H(qubits[-1])) program.inst([X(q) for q in qubits]) return program
def decomposed_diffusion_program(qubits: List[int]) -> Program
Constructs the diffusion operator used in Grover's Algorithm, acted on both sides by an a Hadamard gate on each qubit. Note that this means that the matrix representation of this operator is diag(1, -1, ..., -1). In particular, this decomposes the diffusion operator, which is a :math:`2**{len(qubits)}\times2**{len(qubits)}` sparse matrix, into :math:`\mathcal{O}(len(qubits)**2) single and two qubit gates. See C. Lavor, L.R.U. Manssur, and R. Portugal (2003) `Grover's Algorithm: Quantum Database Search`_ for more information. .. _`Grover's Algorithm: Quantum Database Search`: https://arxiv.org/abs/quant-ph/0301079 :param qubits: A list of ints corresponding to the qubits to operate on. The operator operates on bistrings of the form ``|qubits[0], ..., qubits[-1]>``.
2.461009
2.705452
0.909648
pterm = PauliTerm('I', 0, 1.0) for conj, index in zip(conjugate, indices): pterm = pterm * self._operator_generator(index, conj) pterm = pterm.simplify() return pterm
def product_ops(self, indices, conjugate)
Convert a list of site indices and coefficients to a Pauli Operators list with the Jordan-Wigner (JW) transformation :param List indices: list of ints specifying the site the fermionic operator acts on, e.g. [0,2,4,6] :param List conjugate: List of -1, 1 specifying which of the indices are creation operators (-1) and which are annihilation operators (1). e.g. [-1,-1,1,1]
4.997725
5.206631
0.959877
pterm = PauliTerm('I', 0, 1.0) Zstring = PauliTerm('I', 0, 1.0) for j in range(index): Zstring = Zstring*PauliTerm('Z', j, 1.0) pterm1 = Zstring*PauliTerm('X', index, 0.5) scalar = 0.5 * conj * 1.0j pterm2 = Zstring*PauliTerm('Y', index, scalar) pterm = pterm * (pterm1 + pterm2) pterm = pterm.simplify() return pterm
def _operator_generator(index, conj)
Internal method to generate the appropriate operator
3.275784
3.23945
1.011216
n_bits = len(mask) form_string = "{0:0" + str(n_bits) + "b}" bit_map_dct = {} for idx in range(2**n_bits): bit_string = form_string.format(idx) bit_map_dct[bit_string] = utils.bitwise_xor(bit_string, mask) return bit_map_dct
def create_1to1_bitmap(mask: str) -> Dict[str, str]
Create a bit map function (as a dictionary) for a given mask. e.g., for a mask :math:`m = 10` the return is a dictionary: >>> create_1to1_bitmap('10') ... { ... '00': '10', ... '01': '11', ... '10': '00', ... '11': '01' ... } :param mask: A binary mask as a string of 0's and 1's. :return: A dictionary containing a mapping of all possible bit strings of the same length as the mask's string and their mapped bit-string value.
2.728001
2.709969
1.006654
if not isinstance(graph, nx.Graph) and isinstance(graph, list): maxcut_graph = nx.Graph() for edge in graph: maxcut_graph.add_edge(*edge) graph = maxcut_graph.copy() cost_operators = [] driver_operators = [] for i, j in graph.edges(): cost_operators.append(PauliTerm("Z", i, 0.5)*PauliTerm("Z", j) + PauliTerm("I", 0, -0.5)) for i in graph.nodes(): driver_operators.append(PauliSum([PauliTerm("X", i, -1.0)])) if connection is None: connection = get_qc(f"{len(graph.nodes)}q-qvm") if minimizer_kwargs is None: minimizer_kwargs = {'method': 'Nelder-Mead', 'options': {'ftol': 1.0e-2, 'xtol': 1.0e-2, 'disp': False}} if vqe_option is None: vqe_option = {'disp': print, 'return_all': True, 'samples': samples} qaoa_inst = QAOA(connection, list(graph.nodes()), steps=steps, cost_ham=cost_operators, ref_ham=driver_operators, store_basis=True, rand_seed=rand_seed, init_betas=initial_beta, init_gammas=initial_gamma, minimizer=minimize, minimizer_kwargs=minimizer_kwargs, vqe_options=vqe_option) return qaoa_inst
def maxcut_qaoa(graph, steps=1, rand_seed=None, connection=None, samples=None, initial_beta=None, initial_gamma=None, minimizer_kwargs=None, vqe_option=None)
Max cut set up method :param graph: Graph definition. Either networkx or list of tuples :param steps: (Optional. Default=1) Trotterization order for the QAOA algorithm. :param rand_seed: (Optional. Default=None) random seed when beta and gamma angles are not provided. :param connection: (Optional) connection to the QVM. Default is None. :param samples: (Optional. Default=None) VQE option. Number of samples (circuit preparation and measurement) to use in operator averaging. :param initial_beta: (Optional. Default=None) Initial guess for beta parameters. :param initial_gamma: (Optional. Default=None) Initial guess for gamma parameters. :param minimizer_kwargs: (Optional. Default=None). Minimizer optional arguments. If None set to ``{'method': 'Nelder-Mead', 'options': {'ftol': 1.0e-2, 'xtol': 1.0e-2, 'disp': False}`` :param vqe_option: (Optional. Default=None). VQE optional arguments. If None set to ``vqe_option = {'disp': print_fun, 'return_all': True, 'samples': samples}``
2.572996
2.284247
1.126409
for gates in cartesian_product(TOMOGRAPHY_GATES.keys(), repeat=len(qubits)): tomography_program = Program() for qubit, gate in izip(qubits, gates): tomography_program.inst(gate(qubit)) yield tomography_program
def default_rotations(*qubits)
Generates the Quil programs for the tomographic pre- and post-rotations of any number of qubits. :param list qubits: A list of qubits to perform tomography on.
3.441456
2.85165
1.20683
for gates in cartesian_product(TOMOGRAPHY_GATES.values(), repeat=nqubits): yield qt.tensor(*gates)
def default_channel_ops(nqubits)
Generate the tomographic pre- and post-rotations of any number of qubits as qutip operators. :param int nqubits: The number of qubits to perform tomography on. :return: Qutip object corresponding to the tomographic rotation. :rtype: Qobj
9.955977
7.792881
1.277573
if not cls._tested: cls._tested = True np.random.seed(SEED) test_problem_dimension = 10 mat = np.random.randn(test_problem_dimension, test_problem_dimension) posmat = mat.dot(mat.T) posvar = cvxpy.Variable(test_problem_dimension, test_problem_dimension) prob = cvxpy.Problem(cvxpy.Minimize((cvxpy.trace(posmat * posvar) + cvxpy.norm(posvar))), [posvar >> 0, cvxpy.trace(posvar) >= 1.]) try: prob.solve(SOLVER) cls._functional = True except cvxpy.SolverError: # pragma no coverage _log.warning("No convex SDP solver found. You will not be able to solve" " tomography problems with matrix positivity constraints.") return cls._functional
def is_functional(cls)
Checks lazily whether a convex solver is installed that handles positivity constraints. :return: True if a solver supporting positivity constraints is installed. :rtype: bool
4.265232
3.840594
1.110566
if not isinstance(ket_op, int): raise TypeError("ket_op needs to be an integer") if not isinstance(bra_op, int): raise TypeError("ket_op needs to be an integer") if ket_op not in [0, 1] or bra_op not in [0, 1]: raise ValueError("bra and ket op needs to be either 0 or 1") if ket_op == 0 and bra_op == 0: return 0.5 * (sZ(index) + sI(index)) elif ket_op == 0 and bra_op == 1: return 0.5 * (sX(index) + 1j * sY(index)) elif ket_op == 1 and bra_op == 0: return 0.5 * (sX(index) - 1j * sY(index)) else: return 0.5 * (sI(index) - sZ(index))
def _single_projector_generator(ket_op, bra_op, index)
Generate the pauli sum terms corresponding to |ket_op><brak_op| :param ket_op: single qubit computational basis state :param bra_op: single qubit computational basis state :param index: qubit index to assign to the projector :return: pauli sum of single qubit projection operator :rtype: PauliSum
1.687763
1.701154
0.992129
projectors = [] for index, (ket_one_qubit, bra_one_qubit) in enumerate(zip(ket[::-1], bra[::-1])): projectors.append(_single_projector_generator(ket_one_qubit, bra_one_qubit, index)) return reduce(lambda x, y: x * y, projectors)
def projector_generator(ket, bra)
Generate a Pauli Sum that corresponds to the projection operator |ket><bra| note: ket and bra are numerically ordered such that ket = [msd, ..., lsd] where msd == most significant digit and lsd = least significant digit. :param List ket: string of zeros and ones corresponding to a computational basis state. :param List bra: string of zeros and ones corresponding to a computational basis state. :return: projector as a pauli sum :rytpe: PauliSum
2.691397
2.84889
0.944718
num_qubits = len(prep_program.get_qubits()) normalizer_ops = projector_generator(reference_state, reference_state) c0_coeff, _, _ = estimate_locally_commuting_operator( prep_program, normalizer_ops, variance_bound=variance_bound, quantum_resource=quantum_resource) c0_coeff = np.sqrt(c0_coeff) amplitudes = [] for ii in coeff_list: if ii == reference_state: amplitudes.append(c0_coeff) else: bra = list(map(int, np.binary_repr(ii, width=num_qubits))) c_ii_op = projector_generator(reference_state, bra) result = estimate_locally_commuting_operator( prep_program, c_ii_op, variance_bound=variance_bound, quantum_resource=quantum_resource) amplitudes.append(result[0] / c0_coeff) return amplitudes
def measure_wf_coefficients(prep_program, coeff_list, reference_state, quantum_resource, variance_bound=1.0E-6)
Measure a set of coefficients with a phase relative to the reference_state :param prep_program: pyQuil program to prepare the state :param coeff_list: list of integers labeling amplitudes to measure :param reference_state: Integer of the computational basis state to use as a reference :param quantum_resource: An instance of a quantum abstract machine :param variance_bound: Default 1.0E-6. variance of the monte carlo estimator for the non-hermitian operator :return: returns a list of reference_state amplitude + coeff_list amplitudes
2.810386
2.898232
0.96969
num_qubits = len(prep_program.get_qubits()) amplitudes_to_measure = list(range(2 ** num_qubits)) amplitudes = measure_wf_coefficients(prep_program, amplitudes_to_measure, reference_state, quantum_resource, variance_bound=variance_bound) wavefunction = np.asarray(amplitudes) return wavefunction.reshape((-1, 1))
def measure_pure_state(prep_program, reference_state, quantum_resource, variance_bound=1.0E-6)
Measure the coefficients of the pure state :param prep_program: pyQuil program to prepare the state :param reference_state: Integer of the computational basis state to use as a reference :param quantum_resource: An instance of a quantum abstract machine :param variance_bound: Default 1.0E-6. variance of the monte carlo estimator for the non-hermitian operator :return: an estimate of the wavefunction as a numpy.ndarray
3.230632
3.394687
0.951673
if conj != -1 and conj != +1: raise ValueError("Improper conjugate coefficient") if index >= self.n_qubits or index < 0: raise IndexError("Operator index outside number of qubits for " "current Bravyi-Kitaev transform.") # parity set P(j). apply Z to, for parity sign. parity_set = [node.index for node in self.tree.get_parity_set(index)] # update set U(j). apply X to, for updating purposes. ancestors = [node.index for node in self.tree.get_update_set(index)] # remainder set C(j) = P(j) \ F(j) ancestor_children = [node.index for node in self.tree.get_remainder_set(index)] # Under Majorana basis, creation/annihilation operators given by # a^{\pm} = (c \mp id) / 2 # c_j = a_j + a_j^{\dagger} = X_{U(j)} X_j Z_{P(j)} c_maj = PauliTerm('X', index) for node_idx in parity_set: c_maj *= PauliTerm('Z', node_idx) for node_idx in ancestors: c_maj *= PauliTerm('X', node_idx) # d_j = i(a_j^{\dagger} - a_j) = X_{U(j)} Y_j Z_{C(j)} d_maj = PauliTerm('Y', index) for node_idx in ancestors: d_maj *= PauliTerm('X', node_idx) for node_idx in ancestor_children: d_maj *= PauliTerm('Z', node_idx) result = 0.5 * (c_maj + 1j * conj * d_maj) return result.simplify()
def _operator_generator(self, index, conj)
Internal method to generate the appropriate ladder operator at fermion orbital at 'index' If conj == -1 --> creation conj == +1 --> annihilation :param int index: fermion orbital to generate ladder operator at :param int conj: -1 for creation, +1 for annihilation
4.412524
4.334743
1.017944
self.defined_gates = set(STANDARD_GATE_NAMES) prog = self._recursive_builder(self.operation, self.gate_name, self.control_qubits, self.target_qubit) return prog
def build(self)
Builds this controlled gate. :return: The controlled gate, defined by this object. :rtype: Program
8.652682
7.95202
1.088111
new_program = pq.Program() new_program += program if gate_name not in self.defined_gates: new_program.defgate(gate_name, gate_matrix) self.defined_gates.add(gate_name) return new_program
def _defgate(self, program, gate_name, gate_matrix)
Defines a gate named gate_name with matrix gate_matrix in program. In addition, updates self.defined_gates to track what has been defined. :param Program program: Pyquil Program to add the defgate and gate to. :param str gate_name: The name of the gate to add to program. :param numpy.ndarray gate_matrix: The array corresponding to the gate to define. :return: the modified Program. :retype: Program
2.29258
2.406461
0.952677
control_true = np.kron(ONE_PROJECTION, operation) control_false = np.kron(ZERO_PROJECTION, np.eye(2, 2)) control_root_true = np.kron(ONE_PROJECTION, sqrtm(operation, disp=True)) controlled_gate = control_true + control_false controlled_root_gate = control_root_true + control_false sqrt_name = self.format_gate_name(SQRT_PREFIX, gate_name) controlled_subprogram = pq.Program() control_gate = pq.Program() # For the base case, we check to see if there is just one control qubit. # If it is a CNOT we explicitly break the naming convention so as not to redefine the gate. if len(control_qubits) == 1: if gate_name == NOT_GATE_LABEL: control_name = CONTROL_PREFIX + gate_name else: control_name = self.format_gate_name(CONTROL_PREFIX, gate_name) control_gate = self._defgate(control_gate, control_name, controlled_gate) control_gate.inst((control_name, control_qubits[0], target_qubit)) return control_gate else: control_sqrt_name = self.format_gate_name(CONTROL_PREFIX, sqrt_name) control_gate = self._defgate(control_gate, control_sqrt_name, controlled_root_gate) control_gate.inst((control_sqrt_name, control_qubits[-1], target_qubit)) # Here we recurse to build a toffoli gate on n - 1 of the qubits. n_minus_one_toffoli = self._recursive_builder(NOT_GATE, NOT_GATE_LABEL, control_qubits[:-1], control_qubits[-1]) # We recurse to build a controlled sqrt of the target_gate, excluding the last control. n_minus_one_controlled_sqrt = self._recursive_builder(sqrtm(operation, disp=True), sqrt_name, control_qubits[:-1], target_qubit) controlled_subprogram += control_gate controlled_subprogram += n_minus_one_toffoli controlled_subprogram += control_gate.dagger() # We only add the instructions so that we don't redefine gates controlled_subprogram += n_minus_one_toffoli.instructions controlled_subprogram += n_minus_one_controlled_sqrt return controlled_subprogram
def _recursive_builder(self, operation, gate_name, control_qubits, target_qubit)
Helper function used to define the controlled gate recursively. It uses the algorithm in the reference above. Namely it recursively constructs a controlled gate by applying a controlled square root of the gate, followed by a toffoli gate with len(control_qubits) - 1 controls, applying the controlled adjoint of the square root, another toffoli with len(control_qubits) - 1 controls, and finally another controlled copy of the gate. :param numpy.ndarray operation: The matrix for the unitary to be controlled. :param String gate_name: The name for the gate being controlled. :param Sequence control_qubits: The qubits that are the controls. :param Qubit or Int target_qubit: The qubit that the gate should be applied to. :return: The intermediate Program being built. :rtype: Program
3.213745
3.013792
1.066346
assert isinstance(state, int), \ f"{state} is not an integer. Must call parity_even_p with an integer state." mask = 0 for q in marked_qubits: mask |= 1 << q return bin(mask & state).count("1") % 2 == 0
def parity_even_p(state, marked_qubits)
Calculates the parity of elements at indexes in marked_qubits Parity is relative to the binary representation of the integer state. :param state: The wavefunction index that corresponds to this state. :param marked_qubits: The indexes to be considered in the parity sum. :returns: A boolean corresponding to the parity.
3.585081
3.63889
0.985213
program = Program() ro = program.declare('ro', 'BIT', max(marked_qubits) + 1) program += pyquil_program program += [MEASURE(qubit, r) for qubit, r in zip(list(range(max(marked_qubits) + 1)), ro)] program.wrap_in_numshots_loop(samples) executable = qc.compile(program) bitstring_samples = qc.run(executable) bitstring_tuples = list(map(tuple, bitstring_samples)) freq = Counter(bitstring_tuples) # perform weighted average expectation = 0 for bitstring, count in freq.items(): bitstring_int = int("".join([str(x) for x in bitstring[::-1]]), 2) if parity_even_p(bitstring_int, marked_qubits): expectation += float(count) / samples else: expectation -= float(count) / samples return expectation
def expectation_from_sampling(pyquil_program: Program, marked_qubits: List[int], qc: QuantumComputer, samples: int) -> float
Calculation of Z_{i} at marked_qubits Given a wavefunctions, this calculates the expectation value of the Zi operator where i ranges over all the qubits given in marked_qubits. :param pyquil_program: pyQuil program generating some state :param marked_qubits: The qubits within the support of the Z pauli operator whose expectation value is being calculated :param qc: A QuantumComputer object. :param samples: Number of bitstrings collected to calculate expectation from sampling. :returns: The expectation value as a float.
2.979279
2.999828
0.99315
if isinstance(pauli_sum, np.ndarray): # debug mode by passing an array wf = WavefunctionSimulator().wavefunction(pyquil_prog) wf = np.reshape(wf.amplitudes, (-1, 1)) average_exp = np.conj(wf).T.dot(pauli_sum.dot(wf)).real return average_exp else: if not isinstance(pauli_sum, (PauliTerm, PauliSum)): raise TypeError("pauli_sum variable must be a PauliTerm or PauliSum object") if isinstance(pauli_sum, PauliTerm): pauli_sum = PauliSum([pauli_sum]) if samples is None: operator_progs = [] operator_coeffs = [] for p_term in pauli_sum.terms: op_prog = Program() for qindex, op in p_term: op_prog.inst(STANDARD_GATES[op](qindex)) operator_progs.append(op_prog) operator_coeffs.append(p_term.coefficient) result_overlaps = WavefunctionSimulator().expectation(pyquil_prog, pauli_sum.terms) result_overlaps = list(result_overlaps) assert len(result_overlaps) == len(operator_progs),\ expectation = sum(list(map(lambda x: x[0] * x[1], zip(result_overlaps, operator_coeffs)))) return expectation.real else: if not isinstance(samples, int): raise TypeError("samples variable must be an integer") if samples <= 0: raise ValueError("samples variable must be a positive integer") # normal execution via fake sampling # stores the sum of contributions to the energy from each operator term expectation = 0.0 for j, term in enumerate(pauli_sum.terms): meas_basis_change = Program() qubits_to_measure = [] if term.id() == "": meas_outcome = 1.0 else: for index, gate in term: qubits_to_measure.append(index) if gate == 'X': meas_basis_change.inst(RY(-np.pi / 2, index)) elif gate == 'Y': meas_basis_change.inst(RX(np.pi / 2, index)) meas_outcome = \ expectation_from_sampling(pyquil_prog + meas_basis_change, qubits_to_measure, qc, samples) expectation += term.coefficient * meas_outcome return expectation.real
def expectation(pyquil_prog: Program, pauli_sum: Union[PauliSum, PauliTerm, np.ndarray], samples: int, qc: QuantumComputer) -> float
Compute the expectation value of pauli_sum over the distribution generated from pyquil_prog. :param pyquil_prog: The state preparation Program to calculate the expectation value of. :param pauli_sum: PauliSum representing the operator of which to calculate the expectation value or a numpy matrix representing the Hamiltonian tensored up to the appropriate size. :param samples: The number of samples used to calculate the expectation value. If samples is None then the expectation value is calculated by calculating <psi|O|psi>. Error models will not work if samples is None. :param qc: The QuantumComputer object. :return: A float representing the expectation value of pauli_sum given the distribution generated from quil_prog.
2.970314
2.96905
1.000426
if hasattr(self, 'get_path_from_parent'): return self.get_path_from_parent(parent) if self.model is parent: return [] model = self.concrete_model # Get a reversed base chain including both the current and parent # models. chain = model._meta.get_base_chain(parent) or [] chain.reverse() chain.append(model) # Construct a list of the PathInfos between models in chain. path = [] for i, ancestor in enumerate(chain[:-1]): child = chain[i + 1] link = child._meta.get_ancestor_link(ancestor) path.extend(link.get_reverse_path_info()) return path
def _get_path_from_parent(self, parent)
Return a list of PathInfos containing the path from the parent model to the current model, or an empty list if parent is not a parent of the current model.
4.340638
3.749124
1.157774
return cls.maybe_optimize(info, cls._meta.model.objects, id)
def get_node(cls, info, id)
Bear in mind that if you are overriding this method get_node(info, pk), you should always call maybe_optimize(info, qs, pk) and never directly call get_optimized_node(info, qs, pk) as it would result to the node being attempted to be optimized when it is not supposed to actually get optimized. :param info: :param id: :return:
15.165039
7.298691
2.077775
'''Patience sort an iterable, xs. This function generates a series of pairs (x, pile), where "pile" is the 0-based index of the pile "x" should be placed on top of. Elements of "xs" must be less-than comparable. ''' pile_tops = list() for x in xs: pile = bisect.bisect_left(pile_tops, x) if pile == len(pile_tops): pile_tops.append(x) else: pile_tops[pile] = x yield x, pile
def patience_sort(xs)
Patience sort an iterable, xs. This function generates a series of pairs (x, pile), where "pile" is the 0-based index of the pile "x" should be placed on top of. Elements of "xs" must be less-than comparable.
4.222832
1.765229
2.392229
'''Return the length of the longest monotonic subsequence of xs, second return value is the difference between increasing and decreasing lengths. >>> longest_monotonic_subseq_length((4, 5, 1, 2, 3)) (3, 1) >>> longest_monotonic_subseq_length((1, 2, 3, 5, 4)) (4, 2) >>> longest_monotonic_subseq_length((1, 2, 1)) (2, 0) ''' li = longest_increasing_subseq_length(xs) ld = longest_decreasing_subseq_length(xs) return max(li, ld), li - ld
def longest_monotonic_subseq_length(xs)
Return the length of the longest monotonic subsequence of xs, second return value is the difference between increasing and decreasing lengths. >>> longest_monotonic_subseq_length((4, 5, 1, 2, 3)) (3, 1) >>> longest_monotonic_subseq_length((1, 2, 3, 5, 4)) (4, 2) >>> longest_monotonic_subseq_length((1, 2, 1)) (2, 0)
2.460661
1.439353
1.709561
'''Return a longest increasing subsequence of xs. (Note that there may be more than one such subsequence.) >>> longest_increasing_subsequence(range(3)) [0, 1, 2] >>> longest_increasing_subsequence([3, 1, 2, 0]) [1, 2] ''' # Patience sort xs, stacking (x, prev_ix) pairs on the piles. # Prev_ix indexes the element at the top of the previous pile, # which has a lower x value than the current x value. piles = [[]] # Create a dummy pile 0 for x, p in patience_sort(xs): if p + 1 == len(piles): piles.append([]) # backlink to the top of the previous pile piles[p + 1].append((x, len(piles[p]) - 1)) # Backtrack to find a longest increasing subsequence npiles = len(piles) - 1 prev = 0 lis = list() for pile in range(npiles, 0, -1): x, prev = piles[pile][prev] lis.append(x) lis.reverse() return lis
def longest_increasing_subsequence(xs)
Return a longest increasing subsequence of xs. (Note that there may be more than one such subsequence.) >>> longest_increasing_subsequence(range(3)) [0, 1, 2] >>> longest_increasing_subsequence([3, 1, 2, 0]) [1, 2]
3.775366
3.178162
1.187909
w, j = max(L.items()) while j != -1: yield j w, j = bestsofar[j]
def backtracking(a, L, bestsofar)
Start with the heaviest weight and emit index
8.294218
5.571355
1.488726
# Stores the smallest idx of last element of a subsequence of weight w L = {0: -1} bestsofar = [(0, -1)] * len(a) # (best weight, from_idx) for i, (key, weight) in enumerate(a): for w, j in L.items(): if j != -1 and a[j][0] >= key: continue new_weight = w + weight if new_weight in L and a[L[new_weight]][0] <= key: continue L[new_weight] = i newbest = (new_weight, j) if newbest > bestsofar[i]: bestsofar[i] = newbest if debug: #print (key, weight), L print((key, weight), bestsofar) tb = reversed(list(backtracking(a, L, bestsofar))) return [a[x] for x in tb], max(L.items())[0]
def heaviest_increasing_subsequence(a, debug=False)
Returns the heaviest increasing subsequence for array a. Elements are (key, weight) pairs. >>> heaviest_increasing_subsequence([(3, 3), (2, 2), (1, 1), (0, 5)]) ([(0, 5)], 5)
4.83117
4.702701
1.027318
p = OptionParser(mappability.__doc__) p.add_option("--mer", default=50, type="int", help="User mer size") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) ref, = args K = opts.mer pf = ref.rsplit(".", 1)[0] mm = MakeManager() gem = pf + ".gem" cmd = "gem-indexer -i {} -o {}".format(ref, pf) mm.add(ref, gem, cmd) mer = pf + ".{}mer".format(K) mapb = mer + ".mappability" cmd = "gem-mappability -I {} -l {} -o {} -T {}".\ format(gem, K, mer, opts.cpus) mm.add(gem, mapb, cmd) wig = mer + ".wig" cmd = "gem-2-wig -I {} -i {} -o {}".format(gem, mapb, mer) mm.add(mapb, wig, cmd) bw = mer + ".bw" cmd = "wigToBigWig {} {}.sizes {}".format(wig, mer, bw) mm.add(wig, bw, cmd) bg = mer + ".bedGraph" cmd = "bigWigToBedGraph {} {}".format(bw, bg) mm.add(bw, bg, cmd) merged = mer + ".filtered-1.merge.bed" cmd = "python -m jcvi.formats.bed filterbedgraph {} 1".format(bg) mm.add(bg, merged, cmd) mm.write()
def mappability(args)
%prog mappability reference.fasta Generate 50mer mappability for reference genome. Commands are based on gem mapper. See instructions: <https://github.com/xuefzhao/Reference.Mappability>
3.2913
3.139376
1.048393
p = OptionParser(somatic.__doc__) opts, args = p.parse_args(args) if len(args) < 3: sys.exit(not p.print_help()) ref, bams = args[0], args[1:] tcmd = "~/export/speedseq/bin/speedseq somatic" tcmd += " -t 32 -F .2 -C 3 -q 30" cmds = [] for b in bams: pf = b.split(".")[0] cmd = tcmd cmd += " -o {0}".format(pf) others = ",".join(sorted(set(bams) - set([b]))) cmd += " {0} {1} {2}".format(ref, others, b) cmds.append(cmd) write_file("somatic.sh", "\n".join(cmds))
def somatic(args)
%prog somatic ref.fasta *.bam > somatic.sh Useful to identify somatic mutations in each sample compared to all other samples. Script using SPEEDSEQ-somatic will be written to stdout.
3.567096
3.079238
1.158435
p = OptionParser(rmdup.__doc__) p.add_option("-S", default=False, action="store_true", help="Treat PE reads as SE in rmdup") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) bams = args cmd = "samtools rmdup" if opts.S: cmd += " -S" for b in bams: if "rmdup" in b: continue rb = b.rsplit(".", 1)[0] + ".rmdup.bam" if not need_update(b, rb): continue print(" ".join((cmd, b, rb)))
def rmdup(args)
%prog rmdup *.bam > rmdup.cmds Remove PCR duplicates from BAM files, generate a list of commands.
2.742859
2.510469
1.092568
p = OptionParser(mpileup.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) prefix, ref = args[0:2] bams = args[2:] cmd = "samtools mpileup -P ILLUMINA -E -ugD -r {0}" cmd += " -f {0} {1}".format(ref, " ".join(bams)) fmd = "bcftools view -cvg -" seqids = list(Fasta(ref).iterkeys_ordered()) for s in seqids: outfile = prefix + ".{0}.vcf".format(s) print(cmd.format(s), "|", fmd, ">", outfile)
def mpileup(args)
%prog mpileup prefix ref.fa *.bam Call SNPs using samtools mpileup.
4.275011
3.84585
1.111591
p = OptionParser(freebayes.__doc__) p.add_option("--mindepth", default=3, type="int", help="Minimum depth [default: %default]") p.add_option("--minqual", default=20, type="int", help="Minimum quality [default: %default]") opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) prefix, ref = args[0:2] bams = args[2:] cmd = "bamaddrg -R {0}" cmd += " " + " ".join("-b {0}".format(x) for x in bams) fmd = "freebayes --stdin -C {0} -f {1}".format(opts.mindepth, ref) seqids = list(Fasta(ref).iterkeys_ordered()) for s in seqids: outfile = prefix + ".{0}.vcf".format(s) print(cmd.format(s), "|", fmd + " -r {0} -v {1}".format(s, outfile))
def freebayes(args)
%prog freebayes prefix ref.fa *.bam Call SNPs using freebayes.
3.265408
3.050368
1.070496
p = OptionParser(freq.__doc__) p.add_option("--mindepth", default=3, type="int", help="Minimum depth [default: %default]") p.add_option("--minqual", default=20, type="int", help="Minimum quality [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, bamfile = args cmd = "freebayes -f {0} --pooled-continuous {1}".format(fastafile, bamfile) cmd += " -F 0 -C {0}".format(opts.mindepth) cmd += ' | vcffilter -f "QUAL > {0}"'.format(opts.minqual) cmd += " | vcfkeepinfo - AO RO TYPE" sh(cmd, outfile=opts.outfile)
def freq(args)
%prog freq fastafile bamfile Call SNP frequencies and generate GFF file.
3.219139
3.016384
1.067218
p = OptionParser(frommaf.__doc__) p.add_option("--validate", help="Validate coordinates against FASTA [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) maf, = args snpfile = maf.rsplit(".", 1)[0] + ".vcf" fp = open(maf) fw = open(snpfile, "w") total = 0 id = "." qual = 20 filter = "PASS" info = "DP=20" print("##fileformat=VCFv4.0", file=fw) print("#CHROM POS ID REF ALT QUAL FILTER INFO".replace(" ", "\t"), file=fw) for row in fp: atoms = row.split() c, pos, ref, alt = atoms[:4] try: c = int(c) except: continue c = "chr{0:02d}".format(c) pos = int(pos) print("\t".join(str(x) for x in \ (c, pos, id, ref, alt, qual, filter, info)), file=fw) total += 1 fw.close() validate = opts.validate if not validate: return from jcvi.utils.cbook import percentage f = Fasta(validate) fp = open(snpfile) nsnps = 0 for row in fp: if row[0] == '#': continue c, pos, id, ref, alt, qual, filter, info = row.split("\t") pos = int(pos) feat = dict(chr=c, start=pos, stop=pos) s = f.sequence(feat) s = str(s) assert s == ref, "Validation error: {0} is {1} (expect: {2})".\ format(feat, s, ref) nsnps += 1 if nsnps % 50000 == 0: logging.debug("SNPs parsed: {0}".format(percentage(nsnps, total))) logging.debug("A total of {0} SNPs validated and written to `{1}`.".\ format(nsnps, snpfile))
def frommaf(args)
%prog frommaf maffile Convert to four-column tabular format from MAF.
2.642374
2.680731
0.985692
if connector == 'Sybase': shost, suser, spass = None, None, None _ = lambda x: x.split("=")[-1].translate(None, "\"'").strip() sqshrc = op.expanduser(sqshrc) if op.exists(sqshrc): for row in open(sqshrc): row = row.strip() if not row.startswith("\\set") or "prompt" in row: continue if "password" in row: spass = _(row) if "hostname" in row: shost = _(row) if "username" in row: suser = _(row) else: print("[warning] file `{0}` not found".format(sqshrc), file=sys.stderr) if suser and spass: username, password = suser, spass if shost: hostname = shost dhost, duser, dpass = db_defaults(connector=connector) if not password: username, password = duser, dpass elif not username: username = getusername() if not hostname: hostname = dhost return hostname, username, password
def get_profile(sqshrc="~/.sqshrc", connector='Sybase', hostname=None, username=None, password=None)
get database, username, password from .sqshrc file e.g. \set username="user"
3.013036
3.00549
1.002511
p = OptionParser(libs.__doc__) p.set_db_opts(dbname="track", credentials=None) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) libfile, = args sqlcmd = "select library.lib_id, library.name, bac.gb# from library join bac on " + \ "library.bac_id=bac.id where bac.lib_name='Medicago'" cur = connect(opts.dbname) results = fetchall(cur, sqlcmd) fw = open(libfile, "w") for lib_id, name, gb in results: name = name.translate(None, "\n") if not gb: gb = "None" print("|".join((lib_id, name, gb)), file=fw) fw.close()
def libs(args)
%prog libs libfile Get list of lib_ids to be run by pull(). The SQL commands: select library.lib_id, library.name from library join bac on library.bac_id=bac.id where bac.lib_name="Medicago"; select seq_name from sequence where seq_name like 'MBE%' and trash is null;
4.289815
3.318115
1.292847
p = OptionParser(pull.__doc__) p.set_db_opts(dbname="mtg2", credentials=None) p.add_option("--frag", default=False, action="store_true", help="The command to pull sequences from db [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) libfile, = args dbname = opts.dbname frag = opts.frag fp = open(libfile) hostname, username, password = get_profile() for row in fp: lib_id, name = row.split("|", 1) sqlfile = lib_id + ".sql" if not op.exists(sqlfile): fw = open(sqlfile, "w") print("select seq_name from sequence where seq_name like" + \ " '{0}%' and trash is null".format(lib_id), file=fw) fw.close() if frag: cmd = "pullfrag -D {0} -n {1}.sql -o {1} -q -S {2}".format(dbname, lib_id, hostname) cmd += " -U {0} -P {1}".format(username, password) else: cmd = "pullseq -D {0} -n {1}.sql -o {1} -q".format(dbname, lib_id) sh(cmd)
def pull(args)
%prog pull libfile Pull the sequences using the first column in the libfile.
3.750903
3.553818
1.055457
p = OptionParser(query.__doc__) p.set_db_opts() p.add_option("--dryrun", default=False, action="store_true", help="Don't commit to database. Just print queries [default: %default]") p.set_sep(help="Specify output field separator") p.set_verbose(help="Print out all the queries") p.set_outfile() opts, args = p.parse_args(args) if len(args) == 0: sys.exit(not p.print_help()) fieldsep = opts.sep sep = ":::" files = None if sep in args: sepidx = args.index(sep) files = args[sepidx + 1:] args = args[:sepidx] if not files: files = [""] qrys = [] qry = " ".join(args) if ";" in qry: for q in qry.split(";"): if len(q.strip()) > 0: qrys.append(q) else: qrys.append(qry) queries = set() if files: for datafile in files: datafile = datafile.strip() fp = must_open(datafile) for row in fp: for qry in qrys: qry = qry.strip() m = re.findall(r"\{\d+\}", qry) if m: mi = [int(x.strip("{}")) for x in m] atoms = row.strip().split("\t") assert max(mi) <= len(atoms), \ "Number of columns in `datafile`({0})".format(len(atoms)) + \ " != number of `placeholders`({0})".format(len(m)) natoms = [atoms[x] for x in mi] for idx, (match, atom) in enumerate(zip(m, natoms)): qry = qry.replace(match, atom) queries.add(qry) else: for qry in qrys: if re.search(r"\{\d+\}", qry): logging.error("Query `{0}` contains placeholders, no datafile(s) specified".format(qry)) sys.exit() queries.add(qry) if not opts.dryrun: fw = must_open(opts.outfile, "w") dbh, cur = connect(opts.dbname, connector=opts.dbconn, hostname=opts.hostname, \ username=opts.username, password=opts.password, port=opts.port) cflag = None for qry in queries: if opts.dryrun or opts.verbose: print(qry) if not opts.dryrun: if to_commit(qry): execute(cur, qry) cflag = True else: results = fetchall(cur, qry, connector=opts.dbconn) for result in results: print(fieldsep.join([str(x) for x in result]), file=fw) if not opts.dryrun and cflag: commit(dbh)
def query(args)
%prog query "SELECT feat_name FROM asm_feature WHERE feat_type = \\"{0}\\" AND end5 <= \\"{1}\\" AND end3 >= \\"{2}\\"" ::: datafile1 .... Script takes the data from tab-delimited datafile(s) and replaces the placeholders in the query which is then executed. Depending upon the type of query, results are either printed out (when running `select`) or not (when running `insert`, `update` or `delete`) If the query contains quotes around field values, then these need to be escaped with \\
2.90603
2.871189
1.012135
if first_line is None: first_line = fp.readline() if not first_line: raise EOFError() match = _START.match(first_line) if not match: raise Exception('Bad start of message', first_line) type = match.group(1) message = Message(type) while True: row = fp.readline() match = _MULTILINE_FIELD.match(row) if match: key = match.group(1) val = "" while row: pos = fp.tell() row = fp.readline() if row[0] in '.': break elif row[0] in '{}': fp.seek(pos) # put the line back break val += row message.contents.append((key, val, True)) continue match = _FIELD.match(row) if match: key, val = match.group(1), match.group(2) message.contents.append((key, val, False)) continue match = _START.match(row) if match: message.append(read_record(fp, row)) continue if row[0] == '}': break raise Exception('Bad line', row) return message
def read_record(fp, first_line=None)
Read a record from a file of AMOS messages On success returns a Message object On end of file raises EOFError
2.612393
2.511296
1.040257
p = OptionParser(filter.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) frgfile, idsfile = args assert frgfile.endswith(".frg") fp = open(idsfile) allowed = set(x.strip() for x in fp) logging.debug("A total of {0} allowed ids loaded.".format(len(allowed))) newfrgfile = frgfile.replace(".frg", ".filtered.frg") fp = open(frgfile) fw = open(newfrgfile, "w") nfrags, discarded_frags = 0, 0 nmates, discarded_mates = 0, 0 for rec in iter_records(fp): if rec.type == "FRG": readname = rec.get_field("acc") readname = readname.rstrip("ab") nfrags += 1 if readname not in allowed: discarded_frags += 1 continue if rec.type == "LKG": readname = rec.get_field("frg") readname = readname.rstrip("ab") nmates += 1 if readname not in allowed: discarded_mates += 1 continue print(rec, file=fw) # Print out a summary survived_frags = nfrags - discarded_frags survived_mates = nmates - discarded_mates print("Survived fragments: {0}".\ format(percentage(survived_frags, nfrags)), file=sys.stderr) print("Survived mates: {0}".\ format(percentage(survived_mates, nmates)), file=sys.stderr)
def filter(args)
%prog filter frgfile idsfile Removes the reads from frgfile that are indicated as duplicates in the clstrfile (generated by CD-HIT-454). `idsfile` includes a set of names to include in the filtered frgfile. See apps.cdhit.ids().
2.566199
2.357161
1.088682
p = OptionParser(frg.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) frgfile, = args fastafile = frgfile.rsplit(".", 1)[0] + ".fasta" fp = open(frgfile) fw = open(fastafile, "w") for rec in iter_records(fp): if rec.type != "FRG": continue id = rec.get_field("acc") seq = rec.get_field("seq") s = SeqRecord(Seq(seq), id=id, description="") SeqIO.write([s], fw, "fasta") fw.close()
def frg(args)
%prog frg frgfile Extract FASTA sequences from frg reads.
2.435509
2.2723
1.071826
p = OptionParser(asm.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) asmfile, = args prefix = asmfile.rsplit(".", 1)[0] ctgfastafile = prefix + ".ctg.fasta" scffastafile = prefix + ".scf.fasta" fp = open(asmfile) ctgfw = open(ctgfastafile, "w") scffw = open(scffastafile, "w") for rec in iter_records(fp): type = rec.type if type == "CCO": fw = ctgfw pp = "ctg" elif type == "SCF": fw = scffw pp = "scf" else: continue id = rec.get_field("acc") id = id.translate(None, "()").split(",")[0] seq = rec.get_field("cns").translate(None, "-") s = SeqRecord(Seq(seq), id=pp + id, description="") SeqIO.write([s], fw, "fasta") fw.flush() fw.close()
def asm(args)
%prog asm asmfile Extract FASTA sequences from asm reads.
2.786187
2.716962
1.025479
p = OptionParser(count.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) frgfile, = args fp = open(frgfile) counts = defaultdict(int) for rec in iter_records(fp): counts[rec.type] += 1 for type, cnt in sorted(counts.items()): print('{0}: {1}'.format(type, cnt), file=sys.stderr)
def count(args)
%prog count frgfile Count each type of messages
2.719016
2.32918
1.167371
from itertools import groupby from jcvi.formats.base import FileShredder p = OptionParser(mergeclean.__doc__) p.set_sep(sep="_", help="Separator to group per prefix") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) files = sorted(args) sep = opts.sep key = lambda x: x.split(sep)[0] mtime = lambda x: os.stat(x).st_mtime for pf, fs in groupby(files, key=key): fs = list(fs) if len(fs) == 1: continue newest_f = max(fs, key=mtime) print("|".join(fs), "=>", newest_f, file=sys.stderr) fs.remove(newest_f) FileShredder(fs)
def mergeclean(args)
%prog mergeclean [*.bam|*.count] Clean redundant merged bam/count files. This usually happens after running formats.sam.merge() several times.
2.768024
2.743779
1.008836
p = OptionParser(prepare.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) counts, families = args countfiles = glob(op.join(counts, "*.count")) countsdb = defaultdict(list) for c in countfiles: rs = RiceSample(c) countsdb[(rs.tissue, rs.ind)].append(rs) # Merge duplicates - data sequenced in different batches key = lambda x: (x.label, x.rep) for (tissue, ind), rs in sorted(countsdb.items()): rs.sort(key=key) nrs = len(rs) for i in xrange(nrs): ri = rs[i] if not ri.working: continue for j in xrange(i + 1, nrs): rj = rs[j] if key(ri) != key(rj): continue ri.merge(rj) rj.working = False countsdb[(tissue, ind)] = [x for x in rs if x.working] # Group into families mkdir("families") for (tissue, ind), r in sorted(countsdb.items()): r = list(r) if r[0].label != "F1": continue P1, P2 = r[0].P1, r[0].P2 P1, P2 = countsdb[(tissue, P1)], countsdb[(tissue, P2)] rs = P1 + P2 + r groups = [1] * len(P1) + [2] * len(P2) + [3] * len(r) assert len(rs) == len(groups) outfile = "-".join((tissue, ind)) merge_counts(rs, op.join(families, outfile)) groupsfile = outfile + ".groups" fw = open(op.join(families, groupsfile), "w") print(",".join(str(x) for x in groups), file=fw) fw.close()
def prepare(args)
%prog prepare countfolder families Parse list of count files and group per family into families folder.
3.020622
2.835912
1.065132
print("Insert-size\tOverlap", file=sys.stderr) for i in range(0, 3 * readlen, step): p = gaussian_prob_le(i, i / 5, 2 * readlen) if p < cutoff or p > 1 - cutoff: continue print("{0}bp\t{1}%".format(i, int(round(100 * p))), file=sys.stderr)
def choose_insertsize(readlen=150, step=20, cutoff=.01)
Calculate ratio of overlap for a range of insert sizes. Idea borrowed from ALLPATHS code (`allpaths_cache/CacheToAllPathsInputs.pl`).
4.473078
4.631996
0.965691
from scipy import stats if not x or not y: return 0 corr, pvalue = stats.spearmanr(x, y) return corr
def spearmanr(x, y)
Michiel de Hoon's library (available in BioPython or standalone as PyCluster) returns Spearman rsb which does include a tie correction. >>> x = [5.05, 6.75, 3.21, 2.66] >>> y = [1.65, 26.5, -5.93, 7.96] >>> z = [1.65, 2.64, 2.64, 6.95] >>> round(spearmanr(x, y), 4) 0.4 >>> round(spearmanr(x, z), 4) -0.6325
3.050457
4.4314
0.688373
if len(a) < 3: return np.zeros(len(a), dtype=bool) A = np.array(a, dtype=float) lb, ub = outlier_cutoff(A, threshold=threshold) return np.logical_or(A > ub, A < lb)
def reject_outliers(a, threshold=3.5)
Iglewicz and Hoaglin's robust test for multiple outliers (two sided test). <http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm> See also: <http://contchart.com/outliers.aspx> >>> a = [0, 1, 2, 4, 12, 58, 188, 189] >>> list(reject_outliers(a)) [False, False, False, False, False, True, True, True]
3.251223
3.766159
0.863273
A = np.array(a, dtype=float) M = np.median(A) D = np.absolute(A - M) MAD = np.median(D) C = threshold / .67449 * MAD return M - C, M + C
def outlier_cutoff(a, threshold=3.5)
Iglewicz and Hoaglin's robust, returns the cutoff values - lower bound and upper bound.
3.519298
3.447173
1.020923
assert method in ("kosambi", "haldane") d = cM / 100. if method == "kosambi": e4d = exp(4 * d) return (e4d - 1) / (e4d + 1) / 2 elif method == "haldane": return (1 - exp(-2 * d)) / 2
def recomb_probability(cM, method="kosambi")
<http://statgen.ncsu.edu/qtlcart/manual/node46.html> >>> recomb_probability(1) 0.009998666879965463 >>> recomb_probability(100) 0.48201379003790845 >>> recomb_probability(10000) 0.5
3.517024
4.232502
0.830956
assert 0 <= p < .75 rD = 1 - 4. / 3 * p D = -.75 * log(rD) varD = p * (1 - p) / (rD ** 2 * L) return D, varD
def jukesCantorD(p, L=100)
>>> jukesCantorD(.1) (0.10732563273050497, 0.001198224852071006) >>> jukesCantorD(.7) (2.0310376508266565, 0.47249999999999864)
7.174315
8.883476
0.807602
ram = -109635 + 18977 * readsize + 86326 * genomesize + \ 233353 * numreads - 51092 * K print("ReadSize: {0}".format(readsize), file=sys.stderr) print("GenomeSize: {0}Mb".format(genomesize), file=sys.stderr) print("NumReads: {0}M".format(numreads), file=sys.stderr) print("K: {0}".format(K), file=sys.stderr) ram = human_size(ram * 1000, a_kilobyte_is_1024_bytes=True) print("RAM usage: {0} (MAXKMERLENGTH=31)".format(ram), file=sys.stderr)
def velvet(readsize, genomesize, numreads, K)
Calculate velvet memory requirement. <http://seqanswers.com/forums/showthread.php?t=2101> Ram required for velvetg = -109635 + 18977*ReadSize + 86326*GenomeSize + 233353*NumReads - 51092*K Read size is in bases. Genome size is in millions of bases (Mb) Number of reads is in millions K is the kmer hash value used in velveth
3.94279
2.40964
1.636257
genelist = ",".join(genelist) dataset = get_phytozome_dataset() filters = dict(gene_name_filter=genelist) attributes = "chr_name1,gene_chrom_start,gene_chrom_end,gene_name1".split(",") data = dataset.query(filters=filters, attributes=attributes) return data
def get_bed_from_phytozome(genelist)
>>> data = get_bed_from_phytozome(["AT5G54690", "AT1G01010"]) >>> print data.read() #doctest: +NORMALIZE_WHITESPACE Chr1 3631 5899 AT1G01010 Chr5 22219224 22221840 AT5G54690 <BLANKLINE>
5.013156
5.4758
0.915511
p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) idsfile, = args ids = set(x.strip() for x in open(idsfile)) data = get_bed_from_phytozome(list(ids)) pf = idsfile.rsplit(".", 1)[0] bedfile = pf + ".bed" fw = open(bedfile, "w") for i, row in enumerate(data): row = row.strip() if row == "": continue print(row, file=fw) logging.debug("A total of {0} records written to `{1}`.".format(i + 1, bedfile))
def bed(args)
%prog bed genes.ids Get gene bed from phytozome. `genes.ids` contains the list of gene you want to pull from Phytozome. Write output to .bed file.
2.912387
2.54689
1.143507
p = OptionParser(bed.__doc__) p.add_option("-o", dest="output", default="stdout", help="Output file name [default: %default]") p.add_option("--cutoff", dest="cutoff", default=10, type="int", help="Minimum read depth to report intervals [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) binfile, fastafile = args fw = must_open(opts.output, "w") cutoff = opts.cutoff assert cutoff >= 0, "Need non-negative cutoff" b = BinFile(binfile) ar = b.array fastasize, sizes, offsets = get_offsets(fastafile) s = Sizes(fastafile) for ctg, ctglen in s.iter_sizes(): offset = offsets[ctg] subarray = ar[offset:offset + ctglen] key = lambda x: x[1] >= cutoff for tf, array_elements in groupby(enumerate(subarray), key=key): array_elements = list(array_elements) if not tf: continue # 0-based system => 1-based system start = array_elements[0][0] + 1 end = array_elements[-1][0] + 1 mean_depth = sum([x[1] for x in array_elements]) / \ len(array_elements) mean_depth = int(mean_depth) name = "na" print("\t".join(str(x) for x in (ctg, \ start - 1, end, name, mean_depth)), file=fw)
def bed(args)
%prog bed binfile fastafile Write bed files where the bases have at least certain depth.
3.208673
3.068756
1.045594
p = OptionParser(merge.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) binfiles = args[:-1] mergedbin = args[-1] if op.exists(mergedbin): logging.error("`{0}` file exists. Remove before proceed."\ .format(mergedbin)) return b = BinFile(binfiles[0]) ar = b.mmarray fastasize, = ar.shape logging.debug("Initialize array of uint16 with size {0}".format(fastasize)) merged_ar = np.zeros(fastasize, dtype=np.uint16) for binfile in binfiles: b = BinFile(binfile) merged_ar += b.array logging.debug("Resetting the count max to 255.") merged_ar[merged_ar > 255] = 255 logging.debug("Compact array back to uint8 with size {0}".format(fastasize)) merged_ar = np.array(merged_ar, dtype=np.uint8) merged_ar.tofile(mergedbin) logging.debug("Merged array written to `{0}`".format(mergedbin))
def merge(args)
%prog merge *.bin merged.bin Merge several count arrays into one. Overflows will be capped at uint8_max (255).
3.159402
2.883598
1.095646
p = OptionParser(query.__doc__) opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) binfile, fastafile, ctgID, baseID = args b = BinFile(binfile, fastafile) ar = b.mmarray fastasize, sizes, offsets = get_offsets(fastafile) oi = offsets[ctgID] + int(baseID) - 1 print("\t".join((ctgID, baseID, str(ar[oi]))))
def query(args)
%prog query binfile fastafile ctgID baseID Get the depth at a particular base.
4.700685
3.223071
1.458449
p = OptionParser(count.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) coveragefile, fastafile = args countsfile = coveragefile.split(".")[0] + ".bin" if op.exists(countsfile): logging.error("`{0}` file exists. Remove before proceed."\ .format(countsfile)) return fastasize, sizes, offsets = get_offsets(fastafile) logging.debug("Initialize array of uint8 with size {0}".format(fastasize)) ar = np.zeros(fastasize, dtype=np.uint8) update_array(ar, coveragefile, sizes, offsets) ar.tofile(countsfile) logging.debug("Array written to `{0}`".format(countsfile))
def count(args)
%prog count t.coveragePerBase fastafile Serialize the genomeCoverage results. The coordinate system of the count array will be based on the fastafile.
3.426536
3.204495
1.069291
if not edges: return None G = edges_to_graph(edges) path = nx.topological_sort(G) return path
def edges_to_path(edges)
Connect edges and return a path.
4.260436
3.587854
1.187461
edges = populate_edge_weights(edges) incident, nodes = node_to_edge(edges, directed=False) if not directed: # Make graph symmetric dual_edges = edges[:] for a, b, w in edges: dual_edges.append((b, a, w)) edges = dual_edges DUMMY = "DUMMY" dummy_edges = edges + [(DUMMY, x, 0) for x in nodes] + \ [(x, DUMMY, 0) for x in nodes] #results = tsp(dummy_edges, constraint_generation=constraint_generation) results = tsp_gurobi(dummy_edges) if results: results = [x for x in results if DUMMY not in x] results = edges_to_path(results) if not directed: results = min(results, results[::-1]) return results
def hamiltonian(edges, directed=False, constraint_generation=True)
Calculates shortest path that traverses each node exactly once. Convert Hamiltonian path problem to TSP by adding one dummy point that has a distance of zero to all your other points. Solve the TSP and get rid of the dummy point - what remains is the Hamiltonian Path. >>> g = [(1,2), (2,3), (3,4), (4,2), (3,5)] >>> hamiltonian(g) [1, 2, 4, 3, 5] >>> g = [(1,2), (2,3), (1,4), (2,5), (3,6)] >>> hamiltonian(g)
4.215369
4.756359
0.88626
from gurobipy import Model, GRB, quicksum edges = populate_edge_weights(edges) incoming, outgoing, nodes = node_to_edge(edges) idx = dict((n, i) for i, n in enumerate(nodes)) nedges = len(edges) n = len(nodes) m = Model() def step(x): return "u_{0}".format(x) # Create variables vars = {} for i, (a, b, w) in enumerate(edges): vars[i] = m.addVar(obj=w, vtype=GRB.BINARY, name=str(i)) for u in nodes[1:]: u = step(u) vars[u] = m.addVar(obj=0, vtype=GRB.INTEGER, name=u) m.update() # Bounds for step variables for u in nodes[1:]: u = step(u) vars[u].lb = 1 vars[u].ub = n - 1 # Add degree constraint for v in nodes: incoming_edges = incoming[v] outgoing_edges = outgoing[v] m.addConstr(quicksum(vars[x] for x in incoming_edges) == 1) m.addConstr(quicksum(vars[x] for x in outgoing_edges) == 1) # Subtour elimination edge_store = dict(((idx[a], idx[b]), i) for i, (a, b, w) in enumerate(edges)) # Given a list of edges, finds the shortest subtour def subtour(s_edges): visited = [False] * n cycles = [] lengths = [] selected = [[] for i in range(n)] for x, y in s_edges: selected[x].append(y) while True: current = visited.index(False) thiscycle = [current] while True: visited[current] = True neighbors = [x for x in selected[current] if not visited[x]] if len(neighbors) == 0: break current = neighbors[0] thiscycle.append(current) cycles.append(thiscycle) lengths.append(len(thiscycle)) if sum(lengths) == n: break return cycles[lengths.index(min(lengths))] def subtourelim(model, where): if where != GRB.callback.MIPSOL: return selected = [] # make a list of edges selected in the solution sol = model.cbGetSolution([model._vars[i] for i in range(nedges)]) selected = [edges[i] for i, x in enumerate(sol) if x > .5] selected = [(idx[a], idx[b]) for a, b, w in selected] # find the shortest cycle in the selected edge list tour = subtour(selected) if len(tour) == n: return # add a subtour elimination constraint c = tour incident = [edge_store[a, b] for a, b in pairwise(c + [c[0]])] model.cbLazy(quicksum(model._vars[x] for x in incident) <= len(tour) - 1) m.update() m._vars = vars m.params.LazyConstraints = 1 m.optimize(subtourelim) selected = [v.varName for v in m.getVars() if v.x > .5] selected = [int(x) for x in selected if x[:2] != "u_"] results = sorted(x for i, x in enumerate(edges) if i in selected) \ if selected else None return results
def tsp_gurobi(edges)
Modeled using GUROBI python example.
2.809856
2.80192
1.002833
edges = populate_edge_weights(edges) incoming, outgoing, nodes = node_to_edge(edges) nedges, nnodes = len(edges), len(nodes) L = LPInstance() L.add_objective(edges, objective=MINIMIZE) balance = [] # For each node, select exactly 1 incoming and 1 outgoing edge for v in nodes: incoming_edges = incoming[v] outgoing_edges = outgoing[v] icc = summation(incoming_edges) occ = summation(outgoing_edges) balance.append("{0} = 1".format(icc)) balance.append("{0} = 1".format(occ)) # Subtour elimination - Miller-Tucker-Zemlin (MTZ) formulation # <http://en.wikipedia.org/wiki/Travelling_salesman_problem> # Desrochers and laporte, 1991 (DFJ) has a stronger constraint # See also: # G. Laporte / The traveling salesman problem: Overview of algorithms start_step = nedges + 1 u0 = nodes[0] nodes_to_steps = dict((n, start_step + i) for i, n in enumerate(nodes[1:])) edge_store = dict((e[:2], i) for i, e in enumerate(edges)) mtz = [] for i, e in enumerate(edges): a, b = e[:2] if u0 in (a, b): continue na, nb = nodes_to_steps[a], nodes_to_steps[b] con_ab = " x{0} - x{1} + {2}x{3}".format(na, nb, nnodes - 1, i + 1) if (b, a) in edge_store: # This extra term is the stronger DFJ formulation j = edge_store[(b, a)] con_ab += " + {0}x{1}".format(nnodes - 3, j + 1) con_ab += " <= {0}".format(nnodes - 2) mtz.append(con_ab) # Step variables u_i bound between 1 and n, as additional variables bounds = [] for i in xrange(start_step, nedges + nnodes): bounds.append(" 1 <= x{0} <= {1}".format(i, nnodes - 1)) L.add_vars(nedges) if constraint_generation: L.constraints = balance subtours = [] while True: selected, obj_val = L.lpsolve() results = sorted(x for i, x in enumerate(edges) if i in selected) \ if selected else None if not results: break G = edges_to_graph(results) cycles = list(nx.simple_cycles(G)) if len(cycles) == 1: break for c in cycles: incident = [edge_store[a, b] for a, b in pairwise(c + [c[0]])] icc = summation(incident) subtours.append("{0} <= {1}".format(icc, len(incident) - 1)) L.constraints = balance + subtours else: L.constraints = balance + mtz L.add_vars(nnodes - 1, offset=start_step, binary=False) L.bounds = bounds selected, obj_val = L.lpsolve() results = sorted(x for i, x in enumerate(edges) if i in selected) \ if selected else None return results
def tsp(edges, constraint_generation=False)
Calculates shortest cycle that traverses each node exactly once. Also known as the Traveling Salesman Problem (TSP).
4.147531
4.186896
0.990598
outgoing, incoming, nodes = node_to_edge(edges) nedges = len(edges) L = LPInstance() assert flavor in ("longest", "shortest") objective = MAXIMIZE if flavor == "longest" else MINIMIZE L.add_objective(edges, objective=objective) # Balancing constraint, incoming edges equal to outgoing edges except # source and sink constraints = [] for v in nodes: incoming_edges = incoming[v] outgoing_edges = outgoing[v] icc = summation(incoming_edges) occ = summation(outgoing_edges) if v == source: if not outgoing_edges: return None constraints.append("{0} = 1".format(occ)) elif v == sink: if not incoming_edges: return None constraints.append("{0} = 1".format(icc)) else: # Balancing constraints.append("{0}{1} = 0".format(icc, occ.replace('+', '-'))) # Simple path if incoming_edges: constraints.append("{0} <= 1".format(icc)) if outgoing_edges: constraints.append("{0} <= 1".format(occ)) L.constraints = constraints L.add_vars(nedges) selected, obj_val = L.lpsolve() results = sorted(x for i, x in enumerate(edges) if i in selected) \ if selected else None results = edges_to_path(results) return results, obj_val
def path(edges, source, sink, flavor="longest")
Calculates shortest/longest path from list of edges in a graph >>> g = [(1,2,1),(2,3,9),(2,4,3),(2,5,2),(3,6,8),(4,6,10),(4,7,4)] >>> g += [(6,8,7),(7,9,5),(8,9,6),(9,10,11)] >>> path(g, 1, 8, flavor="shortest") ([1, 2, 4, 6, 8], 21) >>> path(g, 1, 8, flavor="longest") ([1, 2, 3, 6, 8], 25)
4.017703
4.367337
0.919943
G = nx.DiGraph() edge_to_index = {} for i, (a, b, w) in enumerate(edges): G.add_edge(a, b) edge_to_index[a, b] = i nedges = len(edges) L = LPInstance() L.add_objective(edges, objective=MINIMIZE) constraints = [] ncycles = 0 for c in nx.simple_cycles(G): cycle_edges = [] rc = c + [c[0]] # Rotate the cycle for a, b in pairwise(rc): cycle_edges.append(edge_to_index[a, b]) cc = summation(cycle_edges) constraints.append("{0} >= 1".format(cc)) ncycles += 1 if ncycles == maxcycles: break logging.debug("A total of {0} cycles found.".format(ncycles)) L.constraints = constraints L.add_vars(nedges) selected, obj_val = L.lpsolve(clean=False) if remove: results = [x for i, x in enumerate(edges) if i not in selected] \ if selected else None else: results = [x for i, x in enumerate(edges) if i in selected] \ if selected else None return results, obj_val
def min_feedback_arc_set(edges, remove=False, maxcycles=20000)
A directed graph may contain directed cycles, when such cycles are undesirable, we wish to eliminate them and obtain a directed acyclic graph (DAG). A feedback arc set has the property that it has at least one edge of every cycle in the graph. A minimum feedback arc set is the set that minimizes the total weight of the removed edges; or alternatively maximize the remaining edges. See: <http://en.wikipedia.org/wiki/Feedback_arc_set>. The MIP formulation proceeds as follows: use 0/1 indicator variable to select whether an edge is in the set, subject to constraint that each cycle must pick at least one such edge. >>> g = [(1, 2, 2), (2, 3, 2), (3, 4, 2)] + [(1, 3, 1), (3, 2, 1), (2, 4, 1)] >>> min_feedback_arc_set(g) ([(3, 2, 1)], 1) >>> min_feedback_arc_set(g, remove=True) # Return DAG ([(1, 2, 2), (2, 3, 2), (3, 4, 2), (1, 3, 1), (2, 4, 1)], 1)
3.299301
3.370556
0.97886
max_sum, max_start_index, max_end_index = -Infinity, 0, 0 current_max_sum = 0 current_start_index = 0 for current_end_index, x in enumerate(a): current_max_sum += x if current_max_sum > max_sum: max_sum, max_start_index, max_end_index = current_max_sum, \ current_start_index, current_end_index if current_max_sum < 0: current_max_sum = 0 current_start_index = current_end_index + 1 return max_sum, max_start_index, max_end_index
def max_sum(a)
For an input array a, output the range that gives the largest sum >>> max_sum([4, 4, 9, -5, -6, -1, 5, -6, -8, 9]) (17, 0, 2) >>> max_sum([8, -10, 10, -9, -6, 9, -7, -4, -10, -8]) (10, 2, 2) >>> max_sum([10, 1, -10, -8, 6, 10, -10, 6, -3, 10]) (19, 4, 9)
1.79848
1.976982
0.90971
p = OptionParser(silicosoma.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) silicofile, = args fp = must_open(silicofile) fw = must_open(opts.outfile, "w") next(fp) positions = [int(x) for x in fp.next().split()] for a, b in pairwise(positions): assert a <= b fragsize = int(round((b - a) / 1000.)) # kb if fragsize: print(fragsize, 0, file=fw)
def silicosoma(args)
%prog silicosoma in.silico > out.soma Convert .silico to .soma file. Format of .silico A text file containing in-silico digested contigs. This file contains pairs of lines. The first line in each pair constains an identifier, this contig length in bp, and the number of restriction sites, separated by white space. The second line contains a white space delimited list of the restriction site positions. Format of .soma Each line of the text file contains two decimal numbers: The size of the fragment and the standard deviation (both in kb), separated by white space. The standard deviation is ignored.
3.224824
3.055316
1.05548
from itertools import groupby from jcvi.assembly.patch import merge_ranges p = OptionParser(condense.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args bed = Bed(bedfile, sorted=False) key = lambda x: (x.seqid, x.start, x.end) for k, sb in groupby(bed, key=key): sb = list(sb) b = sb[0] chr, start, end, strand = merge_ranges(sb) id = "{0}:{1}-{2}".format(chr, start, end) b.accn = id print(b)
def condense(args)
%prog condense OM.bed Merge split alignments in OM bed.
3.190137
2.995006
1.065152
p = OptionParser(chimera.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args bed = Bed(bedfile) selected = select_bed(bed) mapped = defaultdict(set) # scaffold => chr chimerabed = "chimera.bed" fw = open(chimerabed, "w") for b in selected: scf = range_parse(b.accn).seqid chr = b.seqid mapped[scf].add(chr) nchimera = 0 for s, chrs in sorted(mapped.items()): if len(chrs) == 1: continue print("=" * 80, file=sys.stderr) print("{0} mapped to multiple locations: {1}".\ format(s, ",".join(sorted(chrs))), file=sys.stderr) ranges = [] for b in selected: rr = range_parse(b.accn) scf = rr.seqid if scf == s: print(b, file=sys.stderr) ranges.append(rr) # Identify breakpoints ranges.sort(key=lambda x: (x.seqid, x.start, x.end)) for a, b in pairwise(ranges): seqid = a.seqid if seqid != b.seqid: continue start, end = a.end, b.start if start > end: start, end = end, start chimeraline = "\t".join(str(x) for x in (seqid, start, end)) print(chimeraline, file=fw) print(chimeraline, file=sys.stderr) nchimera += 1 fw.close() logging.debug("A total of {0} junctions written to `{1}`.".\ format(nchimera, chimerabed))
def chimera(args)
%prog chimera bedfile Scan the bed file to break scaffolds that multi-maps.
2.761213
2.62126
1.053391
ranges = [Range(x.seqid, x.start, x.end, float(x.score), i) for i, x in enumerate(bed)] selected, score = range_chain(ranges) selected = [bed[x.id] for x in selected] return selected
def select_bed(bed)
Return non-overlapping set of ranges, choosing high scoring blocks over low scoring alignments when there are conflicts.
5.375878
4.776206
1.125554
from jcvi.formats.sizes import Sizes from jcvi.formats.agp import OO, build p = OptionParser(fasta.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) bedfile, scffasta, pmolfasta = args pf = bedfile.rsplit(".", 1)[0] bed = Bed(bedfile) selected = select_bed(bed) oo = OO() seen = set() sizes = Sizes(scffasta).mapping agpfile = pf + ".agp" agp = open(agpfile, "w") for b in selected: scf = range_parse(b.accn).seqid chr = b.seqid cs = (chr, scf) if cs not in seen: oo.add(chr, scf, sizes[scf], b.strand) seen.add(cs) else: logging.debug("Seen {0}, ignored.".format(cs)) oo.write_AGP(agp, gaptype="contig") agp.close() build([agpfile, scffasta, pmolfasta])
def fasta(args)
%prog fasta bedfile scf.fasta pseudomolecules.fasta Use OM bed to scaffold and create pseudomolecules. bedfile can be generated by running jcvi.assembly.opticalmap bed --blockonly
4.281721
3.902131
1.097278
from jcvi.formats.bed import sort p = OptionParser(bed.__doc__) p.add_option("--blockonly", default=False, action="store_true", help="Only print out large blocks, not fragments [default: %default]") p.add_option("--point", default=False, action="store_true", help="Print accesssion as single point instead of interval") p.add_option("--scale", type="float", help="Scale the OM distance by factor") p.add_option("--switch", default=False, action="store_true", help="Switch reference and aligned map elements [default: %default]") p.add_option("--nosort", default=False, action="store_true", help="Do not sort bed [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) xmlfile, = args bedfile = xmlfile.rsplit(".", 1)[0] + ".bed" om = OpticalMap(xmlfile) om.write_bed(bedfile, point=opts.point, scale=opts.scale, blockonly=opts.blockonly, switch=opts.switch) if not opts.nosort: sort([bedfile, "--inplace"])
def bed(args)
%prog bed xmlfile Print summary of optical map alignment in BED format.
3.046853
2.752326
1.10701
from jcvi.formats.sizes import Sizes from jcvi.formats.sam import index p = OptionParser(bam.__doc__) p.set_home("eddyyeh") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gsnapfile, fastafile = args EYHOME = opts.eddyyeh_home pf = gsnapfile.rsplit(".", 1)[0] uniqsam = pf + ".unique.sam" samstats = uniqsam + ".stats" sizesfile = Sizes(fastafile).filename if need_update((gsnapfile, sizesfile), samstats): cmd = op.join(EYHOME, "gsnap2gff3.pl") cmd += " --format sam -i {0} -o {1}".format(gsnapfile, uniqsam) cmd += " -u -l {0} -p {1}".format(sizesfile, opts.cpus) sh(cmd) index([uniqsam]) return uniqsam
def bam(args)
%prog snp input.gsnap ref.fasta Convert GSNAP output to BAM.
4.102281
3.908465
1.049589
p = OptionParser(index.__doc__) p.add_option("--supercat", default=False, action="store_true", help="Concatenate reference to speed up alignment") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) dbfile, = args check_index(dbfile, supercat=opts.supercat)
def index(args)
%prog index database.fasta ` Wrapper for `gmap_build`. Same interface.
3.437486
3.155945
1.08921
p = OptionParser(gmap.__doc__) p.add_option("--cross", default=False, action="store_true", help="Cross-species alignment") p.add_option("--npaths", default=0, type="int", help="Maximum number of paths to show." " If set to 0, prints two paths if chimera" " detected, else one.") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) dbfile, fastafile = args assert op.exists(dbfile) and op.exists(fastafile) prefix = get_prefix(fastafile, dbfile) logfile = prefix + ".log" gmapfile = prefix + ".gmap.gff3" if not need_update((dbfile, fastafile), gmapfile): logging.error("`{0}` exists. `gmap` already run.".format(gmapfile)) else: dbdir, dbname = check_index(dbfile) cmd = "gmap -D {0} -d {1}".format(dbdir, dbname) cmd += " -f 2 --intronlength=100000" # Output format 2 cmd += " -t {0}".format(opts.cpus) cmd += " --npaths {0}".format(opts.npaths) if opts.cross: cmd += " --cross-species" cmd += " " + fastafile sh(cmd, outfile=gmapfile, errfile=logfile) return gmapfile, logfile
def gmap(args)
%prog gmap database.fasta fastafile Wrapper for `gmap`.
3.38347
3.189818
1.060709
from jcvi.formats.fastq import guessoffset p = OptionParser(align.__doc__) p.add_option("--rnaseq", default=False, action="store_true", help="Input is RNA-seq reads, turn splicing on") p.add_option("--native", default=False, action="store_true", help="Convert GSNAP output to NATIVE format") p.set_home("eddyyeh") p.set_outdir() p.set_cpus() opts, args = p.parse_args(args) if len(args) == 2: logging.debug("Single-end alignment") elif len(args) == 3: logging.debug("Paired-end alignment") else: sys.exit(not p.print_help()) dbfile, readfile = args[:2] outdir = opts.outdir assert op.exists(dbfile) and op.exists(readfile) prefix = get_prefix(readfile, dbfile) logfile = op.join(outdir, prefix + ".log") gsnapfile = op.join(outdir, prefix + ".gsnap") nativefile = gsnapfile.rsplit(".", 1)[0] + ".unique.native" if not need_update((dbfile, readfile), gsnapfile): logging.error("`{0}` exists. `gsnap` already run.".format(gsnapfile)) else: dbdir, dbname = check_index(dbfile) cmd = "gsnap -D {0} -d {1}".format(dbdir, dbname) cmd += " -B 5 -m 0.1 -i 2 -n 3" # memory, mismatch, indel penalty, nhits if opts.rnaseq: cmd += " -N 1" cmd += " -t {0}".format(opts.cpus) cmd += " --gmap-mode none --nofails" if readfile.endswith(".gz"): cmd += " --gunzip" try: offset = "sanger" if guessoffset([readfile]) == 33 else "illumina" cmd += " --quality-protocol {0}".format(offset) except AssertionError: pass cmd += " " + " ".join(args[1:]) sh(cmd, outfile=gsnapfile, errfile=logfile) if opts.native: EYHOME = opts.eddyyeh_home if need_update(gsnapfile, nativefile): cmd = op.join(EYHOME, "convert2native.pl") cmd += " --gsnap {0} -o {1}".format(gsnapfile, nativefile) cmd += " -proc {0}".format(opts.cpus) sh(cmd) return gsnapfile, logfile
def align(args)
%prog align database.fasta read1.fq read2.fq Wrapper for `gsnap` single-end or paired-end, depending on the number of args.
3.593532
3.404952
1.055384
overlap_set = set() active = set() ends = [] for i, (chr, left, right) in enumerate(eclusters): ends.append((chr, left, 0, i)) # 0/1 for left/right-ness ends.append((chr, right, 1, i)) ends.sort() chr_last = "" for chr, pos, left_right, i in ends: if chr != chr_last: active.clear() if left_right == 0: active.add(i) else: active.remove(i) if len(active) > depth: overlap_set.add(tuple(sorted(active))) chr_last = chr return overlap_set
def get_1D_overlap(eclusters, depth=1)
Find blocks that are 1D overlapping, returns cliques of block ids that are in conflict
3.081103
3.044353
1.012071
mergeables = Grouper() active = set() x_ends = [] for i, (range_x, range_y, score) in enumerate(eclusters): chr, left, right = range_x x_ends.append((chr, left, 0, i)) # 0/1 for left/right-ness x_ends.append((chr, right, 1, i)) x_ends.sort() chr_last = "" for chr, pos, left_right, i in x_ends: if chr != chr_last: active.clear() if left_right == 0: active.add(i) for x in active: # check y-overlap if range_overlap(eclusters[x][1], eclusters[i][1]): mergeables.join(x, i) else: # right end active.remove(i) chr_last = chr return mergeables
def get_2D_overlap(chain, eclusters)
Implements a sweep line algorithm, that has better running time than naive O(n^2): assume block has x_ends, and y_ends for the bounds 1. sort x_ends, and take a sweep line to scan the x_ends 2. if left end, test y-axis intersection of current block with `active` set; also put this block in the `active` set 3. if right end, remove block from the `active` set
4.074193
3.767814
1.081315
eclusters = [] for cluster in clusters: xlist, ylist, scores = zip(*cluster) score = _score(cluster) xchr, xmin = min(xlist) xchr, xmax = max(xlist) ychr, ymin = min(ylist) ychr, ymax = max(ylist) # allow fuzziness to the boundary xmax += extend ymax += extend # because extend can be negative values, we don't want it to be less than min if xmax < xmin: xmin, xmax = xmax, xmin if ymax < ymin: ymin, ymax = ymax, ymin eclusters.append(((xchr, xmin, xmax), (ychr, ymin, ymax), score)) return eclusters
def make_range(clusters, extend=0)
Convert to interval ends from a list of anchors extend modifies the xmax, ymax boundary of the box, which can be positive or negative very useful when we want to make the range as fuzzy as we specify
3.821817
3.736899
1.022724
qa, qb = quota eclusters = make_range(clusters, extend=-Nmax) # (1-based index, cluster score) nodes = [(i+1, c[-1]) for i, c in enumerate(eclusters)] eclusters_x, eclusters_y, scores = zip(*eclusters) # represents the contraints over x-axis and y-axis constraints_x = get_1D_overlap(eclusters_x, qa) constraints_y = get_1D_overlap(eclusters_y, qb) return nodes, constraints_x, constraints_y
def get_constraints(clusters, quota=(1, 1), Nmax=0)
Check pairwise cluster comparison, if they overlap then mark edge as conflict
5.67973
5.588982
1.016237
lp_handle = cStringIO.StringIO() lp_handle.write("Maximize\n ") records = 0 for i, score in nodes: lp_handle.write("+ %d x%d " % (score, i)) # SCIP does not like really long string per row records += 1 if records % 10 == 0: lp_handle.write("\n") lp_handle.write("\n") num_of_constraints = 0 lp_handle.write("Subject To\n") for c in constraints_x: additions = " + ".join("x%d" % (x+1) for x in c) lp_handle.write(" %s <= %d\n" % (additions, qa)) num_of_constraints += len(constraints_x) # non-self if not (constraints_x is constraints_y): for c in constraints_y: additions = " + ".join("x%d" % (x+1) for x in c) lp_handle.write(" %s <= %d\n" % (additions, qb)) num_of_constraints += len(constraints_y) print("number of variables (%d), number of constraints (%d)" % (len(nodes), num_of_constraints), file=sys.stderr) lp_handle.write("Binary\n") for i, score in nodes: lp_handle.write(" x%d\n" % i) lp_handle.write("End\n") lp_data = lp_handle.getvalue() lp_handle.close() return lp_data
def format_lp(nodes, constraints_x, qa, constraints_y, qb)
Maximize 4 x1 + 2 x2 + 3 x3 + x4 Subject To x1 + x2 <= 1 End
2.539375
2.62387
0.967797
qb, qa = quota # flip it nodes, constraints_x, constraints_y = get_constraints( clusters, (qa, qb), Nmax=Nmax) if self_match: constraints_x = constraints_y = constraints_x | constraints_y lp_data = format_lp(nodes, constraints_x, qa, constraints_y, qb) if solver == "SCIP": filtered_list = SCIPSolver(lp_data, work_dir, verbose=verbose).results if not filtered_list: print("SCIP fails... trying GLPK", file=sys.stderr) filtered_list = GLPKSolver( lp_data, work_dir, verbose=verbose).results elif solver == "GLPK": filtered_list = GLPKSolver(lp_data, work_dir, verbose=verbose).results if not filtered_list: print("GLPK fails... trying SCIP", file=sys.stderr) filtered_list = SCIPSolver( lp_data, work_dir, verbose=verbose).results return filtered_list
def solve_lp(clusters, quota, work_dir="work", Nmax=0, self_match=False, solver="SCIP", verbose=False)
Solve the formatted LP instance
2.875002
2.857194
1.006233
if not map_type and not number: print_all_maps() elif map_type: print_maps_by_type(map_type, number) else: s = ('Invalid parameter combination. ' 'number without map_type is not supported.') raise ValueError(s)
def print_maps(map_type=None, number=None)
Print maps by type and/or number of defined colors. Parameters ---------- map_type : {'Sequential', 'Diverging', 'Qualitative'}, optional Filter output by map type. By default all maps are printed. number : int, optional Filter output by number of defined colors. By default there is no numeric filtering.
4.074062
4.769933
0.854113
map_type = map_type.lower().capitalize() if map_type not in MAP_TYPES: s = 'Invalid map type, must be one of {0}'.format(MAP_TYPES) raise ValueError(s) print(map_type) map_keys = sorted(COLOR_MAPS[map_type].keys()) format_str = '{0:8} : {1}' for mk in map_keys: num_keys = sorted(COLOR_MAPS[map_type][mk].keys(), key=int) if not number or str(number) in num_keys: num_str = '{' + ', '.join(num_keys) + '}' print(format_str.format(mk, num_str))
def print_maps_by_type(map_type, number=None)
Print all available maps of a given type. Parameters ---------- map_type : {'Sequential', 'Diverging', 'Qualitative'} Select map type to print. number : int, optional Filter output by number of defined colors. By default there is no numeric filtering.
2.902419
3.095158
0.937729
number = str(number) map_type = map_type.lower().capitalize() # check for valid type if map_type not in MAP_TYPES: s = 'Invalid map type, must be one of {0}'.format(MAP_TYPES) raise ValueError(s) # make a dict of lower case map name to map name so this can be # insensitive to case. # this would be a perfect spot for a dict comprehension but going to # wait on that to preserve 2.6 compatibility. # map_names = {k.lower(): k for k in COLOR_MAPS[map_type].iterkeys()} map_names = dict((k.lower(), k) for k in COLOR_MAPS[map_type].keys()) # check for valid name if name.lower() not in map_names: s = 'Invalid color map name {0!r} for type {1!r}.\n' s = s.format(name, map_type) valid_names = [str(k) for k in COLOR_MAPS[map_type].keys()] valid_names.sort() s += 'Valid names are: {0}'.format(valid_names) raise ValueError(s) name = map_names[name.lower()] # check for valid number if number not in COLOR_MAPS[map_type][name]: s = 'Invalid number for map type {0!r} and name {1!r}.\n' s = s.format(map_type, str(name)) valid_numbers = [int(k) for k in COLOR_MAPS[map_type][name].keys()] valid_numbers.sort() s += 'Valid numbers are : {0}'.format(valid_numbers) raise ValueError(s) colors = COLOR_MAPS[map_type][name][number]['Colors'] if reverse: name += '_r' colors = [x for x in reversed(colors)] return BrewerMap(name, map_type, colors)
def get_map(name, map_type, number, reverse=False)
Return a `BrewerMap` representation of the specified color map. Parameters ---------- name : str Name of color map. Use `print_maps` to see available color maps. map_type : {'Sequential', 'Diverging', 'Qualitative'} Select color map type. number : int Number of defined colors in color map. reverse : bool, optional Set to True to get the reversed color map.
2.622341
2.598634
1.009123
seq_maps = COLOR_MAPS[map_type] loaded_maps = {} for map_name in seq_maps: loaded_maps[map_name] = {} for num in seq_maps[map_name]: inum = int(num) colors = seq_maps[map_name][num]['Colors'] bmap = BrewerMap(map_name, map_type, colors) loaded_maps[map_name][inum] = bmap max_num = int(max(seq_maps[map_name].keys(), key=int)) loaded_maps[map_name]['max'] = loaded_maps[map_name][max_num] return loaded_maps
def _load_maps_by_type(map_type)
Load all maps of a given type into a dictionary. Color maps are loaded as BrewerMap objects. Dictionary is keyed by map name and then integer numbers of defined colors. There is an additional 'max' key that points to the color map with the largest number of defined colors. Parameters ---------- map_type : {'Sequential', 'Diverging', 'Qualitative'} Returns ------- maps : dict of BrewerMap
3.060318
2.74498
1.114878
hc = [] for color in self.colors: h = '#' + ''.join('{0:>02}'.format(hex(c)[2:].upper()) for c in color) hc.append(h) return hc
def hex_colors(self)
Colors as a tuple of hex strings. (e.g. '#A912F4')
4.544262
4.13151
1.099904
mc = [] for color in self.colors: mc.append(tuple([x / 255. for x in color])) return mc
def mpl_colors(self)
Colors expressed on the range 0-1 as used by matplotlib.
4.647808
4.435851
1.047783