code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
a_chr, a_min, a_max = a b_chr, b_min, b_max = b a_min, a_max = sorted((a_min, a_max)) b_min, b_max = sorted((b_min, b_max)) shorter = min((a_max - a_min), (b_max - b_min)) + 1 # must be on the same chromosome if a_chr != b_chr: ov = 0 else: ov = min(shorter, (a_max - b_min + 1), (b_max - a_min + 1)) ov = max(ov, 0) if ratio: ov /= float(shorter) return ov
def range_overlap(a, b, ratio=False)
Returns whether two ranges overlap. Set percentage=True returns overlap ratio over the shorter range of the two. >>> range_overlap(("1", 30, 45), ("1", 41, 55)) 5 >>> range_overlap(("1", 21, 45), ("1", 41, 75), ratio=True) 0.2 >>> range_overlap(("1", 30, 45), ("1", 15, 55)) 16 >>> range_overlap(("1", 30, 45), ("1", 15, 55), ratio=True) 1.0 >>> range_overlap(("1", 30, 45), ("1", 57, 68)) 0 >>> range_overlap(("1", 30, 45), ("2", 42, 55)) 0 >>> range_overlap(("1", 30, 45), ("2", 42, 55), ratio=True) 0.0
2.3335
2.51693
0.927122
assert distmode in ('ss', 'ee') a_chr, a_min, a_max, a_strand = a b_chr, b_min, b_max, b_strand = b # must be on the same chromosome if a_chr != b_chr: dist = -1 #elif range_overlap(a[:3], b[:3]): # dist = 0 else: # If the two ranges do not overlap, check stranded-ness and distance if a_min > b_min: a_min, b_min = b_min, a_min a_max, b_max = b_max, a_max a_strand, b_strand = b_strand, a_strand if distmode == "ss": dist = b_max - a_min + 1 elif distmode == "ee": dist = b_min - a_max - 1 orientation = a_strand + b_strand return dist, orientation
def range_distance(a, b, distmode='ss')
Returns the distance between two ranges. distmode is ss, se, es, ee and sets the place on read one and two to measure the distance (s = start, e = end) >>> range_distance(("1", 30, 45, '+'), ("1", 45, 55, '+')) (26, '++') >>> range_distance(("1", 30, 45, '-'), ("1", 57, 68, '-')) (39, '--') >>> range_distance(("1", 30, 42, '-'), ("1", 45, 55, '+')) (26, '-+') >>> range_distance(("1", 30, 42, '+'), ("1", 45, 55, '-'), distmode='ee') (2, '+-')
2.730655
2.886739
0.945931
rmin = min(ranges)[0] rmax = max(ranges, key=lambda x: x[1])[1] return rmin, rmax
def range_minmax(ranges)
Returns the span of a collection of ranges where start is the smallest of all starts, and end is the largest of all ends. >>> ranges = [(30, 45), (40, 50), (10, 100)] >>> range_minmax(ranges) (10, 100)
2.966094
4.827997
0.614353
from jcvi.utils.orderedcollections import SortedCollection key = (lambda x: x) if left else (lambda x: (x[0], x[2], x[1])) rr = SortedCollection(ranges, key=key) try: if left: s = rr.find_le(b) assert key(s) <= key(b), (s, b) else: s = rr.find_ge(b) assert key(s) >= key(b), (s, b) except ValueError: s = None return s
def range_closest(ranges, b, left=True)
Returns the range that's closest to the given position. Notice that the behavior is to return ONE closest range to the left end (if left is True). This is a SLOW method. >>> ranges = [("1", 30, 40), ("1", 33, 35), ("1", 10, 20)] >>> b = ("1", 22, 25) >>> range_closest(ranges, b) ('1', 10, 20) >>> range_closest(ranges, b, left=False) ('1', 33, 35) >>> b = ("1", 2, 5) >>> range_closest(ranges, b)
3.354698
3.686493
0.909997
from jcvi.utils.iter import pairwise ranges = range_merge(ranges) interleaved_ranges = [] for ch, cranges in groupby(ranges, key=lambda x: x[0]): cranges = list(cranges) size = sizes.get(ch, None) if size: ch, astart, aend = cranges[0] if astart > 1: interleaved_ranges.append((ch, 1, astart - 1)) elif empty: interleaved_ranges.append(None) for a, b in pairwise(cranges): ch, astart, aend = a ch, bstart, bend = b istart, iend = aend + 1, bstart - 1 if istart <= iend: interleaved_ranges.append((ch, istart, iend)) elif empty: interleaved_ranges.append(None) if size: ch, astart, aend = cranges[-1] if aend < size: interleaved_ranges.append((ch, aend + 1, size)) elif empty: interleaved_ranges.append(None) return interleaved_ranges
def range_interleave(ranges, sizes={}, empty=False)
Returns the ranges in between the given ranges. >>> ranges = [("1", 30, 40), ("1", 45, 50), ("1", 10, 30)] >>> range_interleave(ranges) [('1', 41, 44)] >>> ranges = [("1", 30, 40), ("1", 42, 50)] >>> range_interleave(ranges) [('1', 41, 41)] >>> range_interleave(ranges, sizes={"1": 70}) [('1', 1, 29), ('1', 41, 41), ('1', 51, 70)]
2.020427
2.156713
0.936809
if not ranges: return [] ranges.sort() cur_range = list(ranges[0]) merged_ranges = [] for r in ranges[1:]: # open new range if start > cur_end or seqid != cur_seqid if r[1] - cur_range[2] > dist or r[0] != cur_range[0]: merged_ranges.append(tuple(cur_range)) cur_range = list(r) else: cur_range[2] = max(cur_range[2], r[2]) merged_ranges.append(tuple(cur_range)) return merged_ranges
def range_merge(ranges, dist=0)
Returns merged range. Similar to range_union, except this returns new ranges. >>> ranges = [("1", 30, 45), ("1", 40, 50), ("1", 10, 50)] >>> range_merge(ranges) [('1', 10, 50)] >>> ranges = [("1", 30, 40), ("1", 45, 50)] >>> range_merge(ranges) [('1', 30, 40), ('1', 45, 50)] >>> ranges = [("1", 30, 40), ("1", 45, 50)] >>> range_merge(ranges, dist=5) [('1', 30, 50)]
2.320565
2.563124
0.905366
if not ranges: return 0 ranges.sort() total_len = 0 cur_chr, cur_left, cur_right = ranges[0] # left-most range for r in ranges: # open new range if left > cur_right or chr != cur_chr if r[1] > cur_right or r[0] != cur_chr: total_len += cur_right - cur_left + 1 cur_chr, cur_left, cur_right = r else: # update cur_right cur_right = max(r[2], cur_right) # the last one total_len += cur_right - cur_left + 1 return total_len
def range_union(ranges)
Returns total size of ranges, expect range as (chr, left, right) >>> ranges = [("1", 30, 45), ("1", 40, 50), ("1", 10, 50)] >>> range_union(ranges) 41 >>> ranges = [("1", 30, 45), ("2", 40, 50)] >>> range_union(ranges) 27 >>> ranges = [("1", 30, 45), ("1", 45, 50)] >>> range_union(ranges) 21 >>> range_union([]) 0
2.370536
2.213782
1.070808
if not ranges: return 0 ranges.sort() ans = 0 for seq, lt in groupby(ranges, key=lambda x: x[0]): lt = list(lt) ans += max(max(lt)[1:]) - min(min(lt)[1:]) + 1 return ans
def range_span(ranges)
Returns the total span between the left most range to the right most range. >>> ranges = [("1", 30, 45), ("1", 40, 50), ("1", 10, 50)] >>> range_span(ranges) 41 >>> ranges = [("1", 30, 45), ("2", 40, 50)] >>> range_span(ranges) 27 >>> ranges = [("1", 30, 45), ("1", 45, 50)] >>> range_span(ranges) 21 >>> range_span([]) 0
4.167857
4.457263
0.935071
endpoints = _make_endpoints(ranges) for seqid, ends in groupby(endpoints, lambda x: x[0]): active = [] depth = 0 for seqid, pos, leftright, i, score in ends: if leftright == LEFT: active.append(i) depth += 1 else: depth -= 1 if depth == 0 and active: yield active active = []
def range_piles(ranges)
Return piles of intervals that overlap. The piles are only interrupted by regions of zero coverage. >>> ranges = [Range("2", 0, 1, 3, 0), Range("2", 1, 4, 3, 1), Range("3", 5, 7, 3, 2)] >>> list(range_piles(ranges)) [[0, 1], [2]]
5.532663
5.969149
0.926876
overlap = set() active = set() endpoints = _make_endpoints(ranges) for seqid, ends in groupby(endpoints, lambda x: x[0]): active.clear() for seqid, pos, leftright, i, score in ends: if leftright == LEFT: active.add(i) else: active.remove(i) if len(active) > depth: overlap.add(tuple(sorted(active))) for ov in overlap: yield ov
def range_conflict(ranges, depth=1)
Find intervals that are overlapping in 1-dimension. Return groups of block IDs that are in conflict. >>> ranges = [Range("2", 0, 1, 3, 0), Range("2", 1, 4, 3, 1), Range("3", 5, 7, 3, 2)] >>> list(range_conflict(ranges)) [(0, 1)]
5.797117
6.793574
0.853324
endpoints = _make_endpoints(ranges) # stores the left end index for quick retrieval left_index = {} # dynamic programming, each entry [score, from_index, which_chain] scores = [] for i, (seqid, pos, leftright, j, score) in enumerate(endpoints): cur_score = [0, -1, -1] if i == 0 else scores[-1][:] if leftright == LEFT: left_index[j] = i else: # this is right end of j-th interval # update if chaining j-th interval gives a better score left_j = left_index[j] chain_score = scores[left_j][0] + score if chain_score > cur_score[0]: cur_score = [chain_score, left_j, j] scores.append(cur_score) chains = [] score, last, chain_id = scores[-1] # start backtracking while last != -1: if chain_id != -1: chains.append(chain_id) _, last, chain_id = scores[last] chains.reverse() selected = [ranges[x] for x in chains] return selected, score
def range_chain(ranges)
Take list of weighted intervals, find non-overlapping set with max weight. We proceed with each end point (sorted by their relative positions). The input are a list of ranges of the form (start, stop, score), output is subset of the non-overlapping ranges that give the highest score, score >>> ranges = [Range("1", 0, 9, 22, 0), Range("1", 3, 18, 24, 1), Range("1", 10, 28, 20, 2)] >>> range_chain(ranges) ([Range(seqid='1', start=0, end=9, score=22, id=0), Range(seqid='1', start=10, end=28, score=20, id=2)], 42) >>> ranges = [Range("2", 0, 1, 3, 0), Range("2", 1, 4, 3, 1), Range("3", 5, 7, 3, 2)] >>> range_chain(ranges) ([Range(seqid='2', start=0, end=1, score=3, id=0), Range(seqid='3', start=5, end=7, score=3, id=2)], 6)
5.016494
4.940298
1.015423
ranges.sort() for seqid, rrs in groupby(ranges, key=lambda x: x[0]): rrs = [(a, b) for (s, a, b) in rrs] size = sizes[seqid] ds, depthdetails = range_depth(rrs, size, verbose=verbose) depthdetails = [(seqid, s, e, d) for s, e, d in depthdetails] yield depthdetails
def ranges_depth(ranges, sizes, verbose=True)
Allow triple (seqid, start, end) rather than just tuple (start, end)
4.519156
3.965022
1.139755
from jcvi.utils.iter import pairwise from jcvi.utils.cbook import percentage # Make endpoints endpoints = [] for a, b in ranges: endpoints.append((a, LEFT)) endpoints.append((b, RIGHT)) endpoints.sort() vstart, vend = min(endpoints)[0], max(endpoints)[0] assert 0 <= vstart < size assert 0 <= vend < size depth = 0 depthstore = defaultdict(int) depthstore[depth] += vstart depthdetails = [(0, vstart, depth)] for (a, atag), (b, btag) in pairwise(endpoints): if atag == LEFT: depth += 1 elif atag == RIGHT: depth -= 1 depthstore[depth] += b - a depthdetails.append((a, b, depth)) assert btag == RIGHT depth -= 1 assert depth == 0 depthstore[depth] += size - vend depthdetails.append((vend, size, depth)) assert sum(depthstore.values()) == size if verbose: for depth, count in sorted(depthstore.items()): print("Depth {0}: {1}".\ format(depth, percentage(count, size)), file=sys.stderr) return depthstore, depthdetails
def range_depth(ranges, size, verbose=True)
Overlay ranges on [start, end], and summarize the ploidy of the intervals.
3.04568
3.042346
1.001096
formatted = load_csv(header, rows, sep=" ", thousands=thousands) header, rows = formatted[0], formatted[1:] return banner(header, rows)
def loadtable(header, rows, major='=', minor='-', thousands=True)
Print a tabular output, with horizontal separators
7.59774
8.557369
0.887859
pairs = d.keys() rows, cols = zip(*pairs) if transpose: rows, cols = cols, rows rows = sorted(set(rows)) cols = sorted(set(cols)) header = ["o"] + list(cols) table = [] for r in rows: combo = [(r, c) for c in cols] if transpose: combo = [(c, r) for (r, c) in combo] data = [d.get(x, "n/a") for x in combo] data = ["{0:.1f}".format(x) if isinstance(x, float) else x for x in data] if key_fun: data = [key_fun(x) for x in data] table.append([str(r)] + data) if not align: formatted = load_csv(header, table, sep=sep) return "\n".join(formatted) return loadtable(header, table, thousands=thousands)
def tabulate(d, transpose=False, thousands=True, key_fun=None, sep=',', align=True)
d is a dictionary, keyed by tuple(A, B). Goal is to put A in rows, B in columns, report data in table form. >>> d = {(1,'a'):3, (1,'b'):4, (2,'a'):5, (2,'b'):0} >>> print tabulate(d) =========== o a b ----------- 1 3 4 2 5 0 ----------- >>> print tabulate(d, transpose=True) =========== o 1 2 ----------- a 3 5 b 4 0 -----------
2.751005
2.748505
1.00091
from jcvi.formats.base import must_open formatted = load_csv(header, contents, sep=sep, thousands=thousands, align=align) if comment: formatted[0] = '#' + formatted[0][1:] formatted = "\n".join(formatted) fw = must_open(filename, "w") print(formatted, file=fw) if tee and filename != "stdout": print(formatted)
def write_csv(header, contents, sep=",", filename="stdout", thousands=False, tee=False, align=True, comment=False)
Write csv that are aligned with the column headers. >>> header = ["x_value", "y_value"] >>> contents = [(1, 100), (2, 200)] >>> write_csv(header, contents) x_value, y_value 1, 100 2, 200
3.082751
4.132022
0.746063
from jcvi.formats.base import is_number from jcvi.formats.blast import best as blast_best, bed as blast_bed from jcvi.apps.align import blat as blat_align p = OptionParser(blat.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) maptxt, ref = args pf = maptxt.rsplit(".", 1)[0] register = {} fastafile = pf + ".fasta" fp = open(maptxt) fw = open(fastafile, "w") for row in fp: name, lg, pos, seq = row.split() if not is_number(pos): continue register[name] = (pf + '-' + lg, pos) print(">{0}\n{1}\n".format(name, seq), file=fw) fw.close() blatfile = blat_align([ref, fastafile]) bestfile = blast_best([blatfile]) bedfile = blast_bed([bestfile]) b = Bed(bedfile).order pf = ".".join((op.basename(maptxt).split(".")[0], op.basename(ref).split(".")[0])) csvfile = pf + ".csv" fp = open(maptxt) fw = open(csvfile, "w") for row in fp: name, lg, pos, seq = row.split() if name not in b: continue bbi, bb = b[name] scaffold, scaffold_pos = bb.seqid, bb.start print(",".join(str(x) for x in \ (scaffold, scaffold_pos, lg, pos)), file=fw) fw.close()
def blat(args)
%prog blat map1.txt ref.fasta Make ALLMAPS input csv based on sequences. The tab-delimited txt file include: name, LG, position, sequence.
2.875085
2.729492
1.053341
from jcvi.assembly.allmaps import CSVMapLine from jcvi.formats.sizes import Sizes from jcvi.utils.natsort import natsorted from jcvi.graphics.base import shorten from jcvi.graphics.dotplot import plt, savefig, markup, normalize_axes, \ downsample, plot_breaks_and_labels, thousands p = OptionParser(dotplot.__doc__) p.set_outfile(outfile=None) opts, args, iopts = p.set_image_options(args, figsize="8x8", style="dark", dpi=90, cmap="copper") if len(args) != 2: sys.exit(not p.print_help()) csvfile, fastafile = args sizes = natsorted(Sizes(fastafile).mapping.items()) seen = set() raw_data = [] fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) # the whole canvas ax = fig.add_axes([.1, .1, .8, .8]) # the dot plot fp = must_open(csvfile) for row in fp: m = CSVMapLine(row) seen.add(m.seqid) raw_data.append(m) # X-axis is the genome assembly ctgs, ctg_sizes = zip(*sizes) xsize = sum(ctg_sizes) qb = list(np.cumsum(ctg_sizes)) qbreaks = list(zip(ctgs, [0] + qb, qb)) qstarts = dict(zip(ctgs, [0] + qb)) # Y-axis is the map key = lambda x: x.lg raw_data.sort(key=key) ssizes = {} for lg, d in groupby(raw_data, key=key): ssizes[lg] = max([x.cm for x in d]) ssizes = natsorted(ssizes.items()) lgs, lg_sizes = zip(*ssizes) ysize = sum(lg_sizes) sb = list(np.cumsum(lg_sizes)) sbreaks = list(zip([("LG" + x) for x in lgs], [0] + sb, sb)) sstarts = dict(zip(lgs, [0] + sb)) # Re-code all the scatter dots data = [(qstarts[x.seqid] + x.pos, sstarts[x.lg] + x.cm, 'g') \ for x in raw_data if (x.seqid in qstarts)] npairs = downsample(data) x, y, c = zip(*data) ax.scatter(x, y, c=c, edgecolors="none", s=2, lw=0) # Flip X-Y label gy, gx = op.basename(csvfile).split(".")[:2] gx, gy = shorten(gx, maxchar=30), shorten(gy, maxchar=30) xlim, ylim = plot_breaks_and_labels(fig, root, ax, gx, gy, xsize, ysize, qbreaks, sbreaks) ax.set_xlim(xlim) ax.set_ylim(ylim) title = "Alignment: {} vs {}".format(gx, gy) title += " ({} markers)".format(thousands(npairs)) root.set_title(markup(title), x=.5, y=.96, color="k") logging.debug(title) normalize_axes(root) image_name = opts.outfile or \ (csvfile.rsplit(".", 1)[0] + "." + iopts.format) savefig(image_name, dpi=iopts.dpi, iopts=iopts) fig.clear()
def dotplot(args)
%prog dotplot map.csv ref.fasta Make dotplot between chromosomes and linkage maps. The input map is csv formatted, for example: ScaffoldID,ScaffoldPosition,LinkageGroup,GeneticPosition scaffold_2707,11508,1,0 scaffold_2707,11525,1,1.2
3.418679
3.422986
0.998742
from jcvi.formats.base import DictFile p = OptionParser(header.__doc__) p.add_option("--prefix", default="", help="Prepend text to line number [default: %default]") p.add_option("--ids", help="Write ids to file [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mstmap, conversion_table = args data = MSTMap(mstmap) hd = data.header conversion = DictFile(conversion_table) newhd = [opts.prefix + conversion.get(x, x) for x in hd] print("\t".join(hd)) print("--->") print("\t".join(newhd)) ids = opts.ids if ids: fw = open(ids, "w") print("\n".join(newhd), file=fw) fw.close()
def header(args)
%prog header map conversion_table Rename lines in the map header. The mapping of old names to new names are stored in two-column `conversion_table`.
3.328811
3.059796
1.087919
p = OptionParser(rename.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mstmap, bedfile = args markersbed = Bed(bedfile) markers = markersbed.order data = MSTMap(mstmap) header = data.header header = [header[0]] + ["seqid", "start"] + header[1:] renamed = [] for b in data: m, geno = b.id, b.genotype om = m if m not in markers: m = m.rsplit(".", 1)[0] if m not in markers: continue i, mb = markers[m] renamed.append([om, mb.seqid, mb.start, "\t".join(list(geno))]) renamed.sort(key=lambda x: (x[1], x[2])) fw = must_open(opts.outfile, "w") print("\t".join(header), file=fw) for d in renamed: print("\t".join(str(x) for x in d), file=fw)
def rename(args)
%prog rename map markers.bed > renamed.map Rename markers according to the new mapping locations.
3.530998
3.243754
1.088553
from jcvi.formats.blast import bed p = OptionParser(anchor.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mapbed, blastfile = args bedfile = bed([blastfile]) markersbed = Bed(bedfile) markers = markersbed.order mapbed = Bed(mapbed, sorted=False) for b in mapbed: m = b.accn if m not in markers: continue i, mb = markers[m] new_accn = "{0}:{1}-{2}".format(mb.seqid, mb.start, mb.end) b.accn = new_accn print(b)
def anchor(args)
%prog anchor map.bed markers.blast > anchored.bed Anchor scaffolds based on map.
3.566977
3.295757
1.082294
p = OptionParser(bed.__doc__) p.add_option("--switch", default=False, action="store_true", help="Switch reference and aligned map elements [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) mapout, = args pf = mapout.split(".")[0] mapbed = pf + ".bed" bm = BinMap(mapout) bm.print_to_bed(mapbed, switch=opts.switch) return mapbed
def bed(args)
%prog fasta map.out Convert MSTMAP output into bed format.
3.514513
3.138759
1.119714
from jcvi.formats.sizes import Sizes p = OptionParser(fasta.__doc__) p.add_option("--extend", default=1000, type="int", help="Extend seq flanking the gaps [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mapout, sfasta = args Flank = opts.extend pf = mapout.split(".")[0] mapbed = pf + ".bed" bm = BinMap(mapout) bm.print_to_bed(mapbed) bed = Bed(mapbed, sorted=False) markersbed = pf + ".markers.bed" fw = open(markersbed, "w") sizes = Sizes(sfasta).mapping for b in bed: accn = b.accn scf, pos = accn.split(".") pos = int(pos) start = max(0, pos - Flank) end = min(pos + Flank, sizes[scf]) print("\t".join(str(x) for x in \ (scf, start, end, accn)), file=fw) fw.close() fastaFromBed(markersbed, sfasta, name=True)
def fasta(args)
%prog fasta map.out scaffolds.fasta Extract marker sequences based on map.
3.582543
3.27566
1.093686
from jcvi.utils.iter import pairwise p = OptionParser(breakpoint.__doc__) p.add_option("--diff", default=.1, type="float", help="Maximum ratio of differences allowed [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) mstmap, = args diff = opts.diff data = MSTMap(mstmap) # Remove singleton markers (avoid double cross-over) good = [] nsingletons = 0 for i in xrange(1, len(data) - 1): a = data[i] left_label, left_rr = check_markers(data[i - 1], a, diff) right_label, right_rr = check_markers(a, data[i + 1], diff) if left_label == BREAK and right_label == BREAK: nsingletons += 1 continue good.append(a) logging.debug("A total of {0} singleton markers removed.".format(nsingletons)) for a, b in pairwise(good): label, rr = check_markers(a, b, diff) if label == BREAK: print("\t".join(str(x) for x in rr))
def breakpoint(args)
%prog breakpoint mstmap.input > breakpoints.bed Find scaffold breakpoints using genetic map. Use variation.vcf.mstmap() to generate the input for this routine.
3.209451
2.984936
1.075216
start, end = line.component_beg, line.component_end size = end - start + 1 leftNs, rightNs = 0, 0 lid, lo = line.component_id, line.orientation for s in seq: if s in 'nN': leftNs += 1 else: break for s in seq[::-1]: if s in 'nN': rightNs += 1 else: break if lo == '-': trimstart = start + rightNs trimend = end - leftNs else: trimstart = start + leftNs trimend = end - rightNs trimrange = (trimstart, trimend) oldrange = (start, end) if trimrange != oldrange: logging.debug("{0} trimmed of N's: {1} => {2}".\ format(lid, oldrange, trimrange)) if leftNs: print("\t".join(str(x) for x in (line.object, 0, 0, 0, 'N', leftNs, "fragment", "yes", "")), file=newagp) if trimend > trimstart: print("\t".join(str(x) for x in (line.object, 0, 0, 0, line.component_type, lid, trimstart, trimend, lo)), file=newagp) if rightNs and rightNs != size: print("\t".join(str(x) for x in (line.object, 0, 0, 0, 'N', rightNs, "fragment", "yes", "")), file=newagp) else: print(line, file=newagp)
def trimNs(seq, line, newagp)
Test if the sequences contain dangling N's on both sides. This component needs to be adjusted to the 'actual' sequence range.
2.778
2.741169
1.013436
import csv from jcvi.formats.sizes import Sizes p = OptionParser(fromcsv.__doc__) p.add_option("--evidence", default="map", help="Linkage evidence to add in AGP") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) contigsfasta, mapcsv, mapagp = args reader = csv.reader(open(mapcsv)) sizes = Sizes(contigsfasta).mapping next(reader) # Header fwagp = must_open(mapagp, "w") o = OO() for row in reader: if len(row) == 2: object, ctg = row strand = '?' elif len(row) == 3: object, ctg, strand = row size = sizes[ctg] o.add(object, ctg, size, strand) o.write_AGP(fwagp, gapsize=100, gaptype="scaffold", phases={}, evidence=opts.evidence)
def fromcsv(args)
%prog fromcsv contigs.fasta map.csv map.agp Convert csv which contains list of scaffolds/contigs to AGP file.
4.313989
3.578337
1.205585
p = OptionParser(compress.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) aagpfile, bagpfile = args # First AGP provides the mapping store = {} agp = AGP(aagpfile) for a in agp: if a.is_gap: continue # Ignore '?' in the mapping if a.sign == 0: a.sign = 1 store[(a.object, a.object_beg, a.object_end)] = \ (a.component_id, a.component_beg, a.component_end, a.sign) # Second AGP forms the backbone agp = AGP(bagpfile) fw = must_open(opts.outfile, "w") print("\n".join(agp.header), file=fw) for a in agp: if a.is_gap: print(a, file=fw) continue component_id, component_beg, component_end, sign = \ store[(a.component_id, a.component_beg, a.component_end)] orientation = {1: '+', -1: '-', 0: '?'}.get(sign * a.sign) atoms = (a.object, a.object_beg, a.object_end, a.part_number, a.component_type, component_id, component_beg, component_end, orientation) a = AGPLine("\t".join(str(x) for x in atoms)) print(a, file=fw)
def compress(args)
%prog compress a.agp b.agp Convert coordinates based on multiple AGP files. Useful to simplify multiple liftOvers to compress multiple chain files into a single chain file, in upgrading locations of genomic features. Example: `a.agp` could contain split scaffolds: scaffold_0.1 1 600309 1 W scaffold_0 1 600309 + `b.agp` could contain mapping to chromosomes: LG05 6435690 7035998 53 W scaffold_0.1 1 600309 + The final AGP we want is: LG05 6435690 7035998 53 W scaffold_0 1 600309 +
3.090767
2.910426
1.061964
from jcvi.apps.grid import WriteJobs from jcvi.formats.bed import sort p = OptionParser(infer.__doc__) p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) scaffoldsf, genomef = args inferbed = "infer-components.bed" if need_update((scaffoldsf, genomef), inferbed): scaffolds = Fasta(scaffoldsf, lazy=True) genome = Fasta(genomef) genome = genome.tostring() args = [(scaffold_name, scaffold, genome) \ for scaffold_name, scaffold in scaffolds.iteritems_ordered()] pool = WriteJobs(map_one_scaffold, args, inferbed, cpus=opts.cpus) pool.run() sort([inferbed, "-i"]) bed = Bed(inferbed) inferagpbed = "infer.bed" fw = open(inferagpbed, "w") seen = [] for b in bed: r = (b.seqid, b.start, b.end) if check_seen(r, seen): continue print("\t".join(str(x) for x in \ (b.accn, 0, b.span, b.seqid, b.score, b.strand)), file=fw) seen.append(r) fw.close() frombed([inferagpbed])
def infer(args)
%prog infer scaffolds.fasta genome.fasta Infer where the components are in the genome. This function is rarely used, but can be useful when distributor does not ship an AGP file.
4.288779
3.972972
1.079489
from jcvi.formats.base import DictFile p = OptionParser(format.__doc__) p.add_option("--switchcomponent", help="Switch component id based on") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) oldagpfile, newagpfile = args switchcomponent = opts.switchcomponent if switchcomponent: switchcomponent = DictFile(switchcomponent) agp = AGP(oldagpfile) fw = open(newagpfile, "w") nconverts = 0 for i, a in enumerate(agp): if not a.is_gap and a.component_id in switchcomponent: oldid = a.component_id newid = switchcomponent[a.component_id] a.component_id = newid logging.debug("Covert {0} to {1} on line {2}".\ format(oldid, newid, i+1)) nconverts += 1 print(a, file=fw) logging.debug("Total converted records: {0}".format(nconverts))
def format(args)
%prog format oldagpfile newagpfile Reformat AGP file. --switch will replace the ids in the AGP file.
2.809294
2.462143
1.140995
p = OptionParser(frombed.__doc__) p.add_option("--gapsize", default=100, type="int", help="Insert gaps of size [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args gapsize = opts.gapsize agpfile = bedfile.replace(".bed", ".agp") fw = open(agpfile, "w") bed = Bed(bedfile, sorted=False) for object, beds in groupby(bed, key=lambda x: x.accn): beds = list(beds) for i, b in enumerate(beds): if gapsize and i != 0: print("\t".join(str(x) for x in \ (object, 0, 0, 0, "U", \ gapsize, "scaffold", "yes", "map")), file=fw) print("\t".join(str(x) for x in \ (object, 0, 0, 0, "W", \ b.seqid, b.start, b.end, b.strand)), file=fw) fw.close() # Reindex return reindex([agpfile, "--inplace"])
def frombed(args)
%prog frombed bedfile Generate AGP file based on bed file. The bed file must have at least 6 columns. With the 4-th column indicating the new object.
3.124982
2.905679
1.075474
from jcvi.utils.range import range_interleave p = OptionParser(swap.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) agpfile, = args agp = AGP(agpfile, nogaps=True, validate=False) agp.sort(key=lambda x: (x.component_id, x.component_beg)) newagpfile = agpfile.rsplit(".", 1)[0] + ".swapped.agp" fw = open(newagpfile, "w") agp.transfer_header(fw) for cid, aa in groupby(agp, key=(lambda x: x.component_id)): aa = list(aa) aranges = [(x.component_id, x.component_beg, x.component_end) \ for x in aa] gaps = range_interleave(aranges) for a, g in zip_longest(aa, gaps): a.object, a.component_id = a.component_id, a.object a.component_beg = a.object_beg a.component_end = a.object_end print(a, file=fw) if not g: continue aline = [cid, 0, 0, 0] gseq, ga, gb = g cspan = gb - ga + 1 aline += ["N", cspan, "fragment", "yes"] print("\t".join(str(x) for x in aline), file=fw) fw.close() # Reindex idxagpfile = reindex([newagpfile, "--inplace"]) return newagpfile
def swap(args)
%prog swap agpfile Swap objects and components. Will add gap lines. This is often used in conjuction with formats.chain.fromagp() to convert between different coordinate systems.
3.370698
3.245463
1.038588
from jcvi.utils.table import tabulate p = OptionParser(stats.__doc__) p.add_option("--warn", default=False, action="store_true", help="Warnings on small component spans [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) agpfile, = args agp = AGP(agpfile) gap_lengths = [] component_lengths = [] for a in agp: span = a.object_span if a.is_gap: label = a.gap_type gap_lengths.append((span, label)) else: label = "{0}:{1}-{2}".format(a.component_id, a.component_beg, \ a.component_end) component_lengths.append((span, label)) if opts.warn and span < 50: logging.error("component span too small ({0}):\n{1}".\ format(span, a)) table = dict() for label, lengths in zip(("Gaps", "Components"), (gap_lengths, component_lengths)): if not lengths: table[(label, "Min")] = table[(label, "Max")] \ = table[(label, "Sum")] = "n.a." continue table[(label, "Min")] = "{0} ({1})".format(*min(lengths)) table[(label, "Max")] = "{0} ({1})".format(*max(lengths)) table[(label, "Sum")] = sum(x[0] for x in lengths) print(tabulate(table), file=sys.stderr)
def stats(args)
%prog stats agpfile Print out a report for length of gaps and components.
2.808308
2.570849
1.092366
p = OptionParser(cut.__doc__) p.add_option("--sep", default=".", help="Separator for splits") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) agpfile, bedfile = args sep = opts.sep agp = AGP(agpfile) bed = Bed(bedfile) simple_agp = agp.order newagpfile = agpfile.replace(".agp", ".cut.agp") fw = open(newagpfile, "w") agp_fixes = defaultdict(list) for component, intervals in bed.sub_beds(): i, a = simple_agp[component] object = a.object component_span = a.component_span orientation = a.orientation assert a.component_beg, a.component_end cuts = set() for i in intervals: start, end = i.start, i.end end -= 1 assert start <= end cuts.add(start) cuts.add(end) cuts.add(0) cuts.add(component_span) cuts = list(sorted(cuts)) sum_of_spans = 0 for i, (a, b) in enumerate(pairwise(cuts)): oid = object + "{0}{1}".format(sep, i + 1) aline = [oid, 0, 0, 0] cspan = b - a aline += ['D', component, a + 1, b, orientation] sum_of_spans += cspan aline = "\t".join(str(x) for x in aline) agp_fixes[component].append(aline) assert component_span == sum_of_spans # Finally write the masked agp for a in agp: if not a.is_gap and a.component_id in agp_fixes: print("\n".join(agp_fixes[a.component_id]), file=fw) else: print(a, file=fw) fw.close() # Reindex reindex([newagpfile, "--inplace"]) return newagpfile
def cut(args)
%prog cut agpfile bedfile Cut at the boundaries of the ranges in the bedfile.
3.342185
3.202973
1.043463
p = OptionParser(reindex.__doc__) p.add_option("--nogaps", default=False, action="store_true", help="Remove all gap lines [default: %default]") p.add_option("--inplace", default=False, action="store_true", help="Replace input file [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) agpfile, = args inplace = opts.inplace agp = AGP(agpfile, validate=False) pf = agpfile.rsplit(".", 1)[0] newagpfile = pf + ".reindexed.agp" fw = open(newagpfile, "w") agp.transfer_header(fw) for chr, chr_agp in groupby(agp, lambda x: x.object): chr_agp = list(chr_agp) object_beg = 1 for i, b in enumerate(chr_agp): b.object_beg = object_beg b.part_number = i + 1 if opts.nogaps and b.is_gap: continue if b.is_gap: b.object_end = object_beg + b.gap_length - 1 else: b.object_end = object_beg + b.component_span - 1 object_beg = b.object_end + 1 print(str(b), file=fw) # Last step: validate the new agpfile fw.close() agp = AGP(newagpfile, validate=True) if inplace: shutil.move(newagpfile, agpfile) logging.debug("Rename file `{0}` to `{1}`".format(newagpfile, agpfile)) newagpfile = agpfile return newagpfile
def reindex(args)
%prog agpfile assume the component line order is correct, modify coordinates, this is necessary mostly due to manual edits (insert/delete) that disrupts the target coordinates.
2.674221
2.511151
1.064939
from jcvi.utils.table import write_csv p = OptionParser(summary.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) agpfile, = args header = "Chromosome #_Distinct #_Components #_Scaffolds " \ "Scaff_N50 Scaff_L50 Length".split() agp = AGP(agpfile) data = list(agp.summary_all()) write_csv(header, data, sep=" ")
def summary(args)
%prog summary agpfile print a table of scaffold statistics, number of BACs, no of scaffolds, scaffold N50, scaffold L50, actual sequence, PSMOL NNNs, PSMOL-length, % of PSMOL sequenced.
5.121868
3.927873
1.30398
s = rec.description chr = re.search(chr_pat, s) clone = re.search(clone_pat, s) chr = chr.group(1) if chr else "" clone = clone.group(1) if clone else "" return chr, clone
def get_clone(rec)
>>> get_clone("Medicago truncatula chromosome 2 clone mth2-48e18") ('2', 'mth2-48e18')
3.263369
2.829587
1.153302
p = OptionParser(phase.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) fw = must_open(opts.outfile, "w") for gbfile in args: for rec in SeqIO.parse(gbfile, "gb"): bac_phase, keywords = get_phase(rec) chr, clone = get_clone(rec) keyword_field = ";".join(keywords) print("\t".join((rec.id, str(bac_phase), keyword_field, chr, clone)), file=fw)
def phase(args)
%prog phase genbankfiles Input has to be gb file. Search the `KEYWORDS` section to look for PHASE. Also look for "chromosome" and "clone" in the definition line.
3.785066
3.016686
1.25471
p = OptionParser(tpf.__doc__) p.add_option("--noversion", default=False, action="store_true", help="Remove trailing accession versions [default: %default]") p.add_option("--gaps", default=False, action="store_true", help="Include gaps in the output [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) agpfile, = args agp = AGP(agpfile) for a in agp: object = a.object if a.is_gap: if opts.gaps and a.isCloneGap: print("\t".join((a.gap_type, object, "na"))) continue component_id = a.component_id orientation = a.orientation if opts.noversion: component_id = component_id.rsplit(".", 1)[0] print("\t".join((component_id, object, orientation)))
def tpf(args)
%prog tpf agpfile Print out a list of ids, one per line. Also known as the Tiling Path. AC225490.9 chr6 Can optionally output scaffold gaps.
3.027007
2.820009
1.073403
from jcvi.formats.obo import validate_term p = OptionParser(bed.__doc__) p.add_option("--gaps", default=False, action="store_true", help="Only print bed lines for gaps [default: %default]") p.add_option("--nogaps", default=False, action="store_true", help="Do not print bed lines for gaps [default: %default]") p.add_option("--bed12", default=False, action="store_true", help="Produce bed12 formatted output [default: %default]") p.add_option("--component", default=False, action="store_true", help="Generate bed file for components [default: %default]") p.set_outfile() g1 = OptionGroup(p, "GFF specific parameters", "Note: If not specified, output will be in `bed` format") g1.add_option("--gff", default=False, action="store_true", help="Produce gff3 formatted output. By default, ignores " +\ "AGP gap lines. [default: %default]") g1.add_option("--source", default="MGSC", help="Specify a gff3 source [default: `%default`]") g1.add_option("--feature", default="golden_path_fragment", help="Specify a gff3 feature type [default: `%default`]") p.add_option_group(g1) p.set_SO_opts() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) if opts.component: opts.nogaps = True # If output format is gff3 and 'verifySO' option is invoked, validate the SO term if opts.gff and opts.verifySO: validate_term(opts.feature, method=opts.verifySO) agpfile, = args agp = AGP(agpfile) fw = must_open(opts.outfile, "w") if opts.gff: print("##gff-version 3", file=fw) for a in agp: if opts.nogaps and a.is_gap: continue if opts.gaps and not a.is_gap: continue if opts.bed12: print(a.bed12line, file=fw) elif opts.gff: print(a.gffline(gff_source=opts.source, gff_feat_type=opts.feature), file=fw) elif opts.component: name = "{0}:{1}-{2}".\ format(a.component_id, a.component_beg, a.component_end) print("\t".join(str(x) for x in (a.component_id, a.component_beg - 1, a.component_end, name, a.component_type, a.orientation)), file=fw) else: print(a.bedline, file=fw) fw.close() return fw.name
def bed(args)
%prog bed agpfile print out the tiling paths in bed/gff3 format
2.74566
2.663605
1.030806
from jcvi.formats.sizes import Sizes p = OptionParser(extendbed.__doc__) p.add_option("--nogaps", default=False, action="store_true", help="Do not print bed lines for gaps [default: %default]") p.add_option("--bed12", default=False, action="store_true", help="Produce bed12 formatted output [default: %default]") p.add_option("--gff", default=False, action="store_true", help="Produce gff3 formatted output. By default, ignores " +\ " AGP gap lines. [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) # If output format is GFF3, ignore AGP gap lines. if opts.gff: opts.nogaps = True agpfile, fastafile = args agp = AGP(agpfile) fw = must_open(opts.outfile, "w") if opts.gff: print("##gff-version 3", file=fw) ranges = defaultdict(list) thickCoords = [] # These are the coordinates before modify ranges # Make the first pass to record all the component ranges for a in agp: thickCoords.append((a.object_beg, a.object_end)) if a.is_gap: continue ranges[a.component_id].append(a) # Modify the ranges sizes = Sizes(fastafile).mapping for accn, rr in ranges.items(): alen = sizes[accn] a = rr[0] if a.orientation == "+": hang = a.component_beg - 1 else: hang = alen - a.component_end a.object_beg -= hang a = rr[-1] if a.orientation == "+": hang = alen - a.component_end else: hang = a.component_beg - 1 a.object_end += hang for a, (ts, te) in zip(agp, thickCoords): if opts.nogaps and a.is_gap: continue if opts.bed12: line = a.bedline a.object_beg, a.object_end = ts, te line += "\t" + a.bedextra print(line, file=fw) elif opts.gff: print(a.gffline(), file=fw) else: print(a.bedline, file=fw)
def extendbed(args)
%prog extend agpfile componentfasta Extend the components to fill the component range. For example, a bed/gff3 file that was converted from the agp will contain only the BAC sequence intervals that are 'represented' - sometimes leaving the 5` and 3` out (those that overlap with adjacent sequences. This script fill up those ranges, potentially to make graphics for tiling path.
2.967428
2.862996
1.036477
from jcvi.graphics.histogram import loghistogram p = OptionParser(gaps.__doc__) p.add_option("--merge", dest="merge", default=False, action="store_true", help="Merge adjacent gaps (to conform to AGP specification)") p.add_option("--header", default=False, action="store_true", help="Produce an AGP header [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) merge = opts.merge agpfile, = args if merge: merged_agpfile = agpfile.replace(".agp", ".merged.agp") fw = open(merged_agpfile, "w") agp = AGP(agpfile) sizes = [] data = [] # store merged AGPLine's priorities = ("centromere", "telomere", "scaffold", "contig", \ "clone", "fragment") for is_gap, alines in groupby(agp, key=lambda x: (x.object, x.is_gap)): alines = list(alines) is_gap = is_gap[1] if is_gap: gap_size = sum(x.gap_length for x in alines) gap_types = set(x.gap_type for x in alines) for gtype in ("centromere", "telomere"): if gtype in gap_types: gap_size = gtype sizes.append(gap_size) b = deepcopy(alines[0]) b.object_beg = min(x.object_beg for x in alines) b.object_end = max(x.object_end for x in alines) b.gap_length = sum(x.gap_length for x in alines) assert b.gap_length == b.object_end - b.object_beg + 1 b.component_type = 'U' if b.gap_length == 100 else 'N' gtypes = [x.gap_type for x in alines] for gtype in priorities: if gtype in gtypes: b.gap_type = gtype break linkages = [x.linkage for x in alines] for linkage in ("no", "yes"): if linkage in linkages: b.linkage = linkage break alines = [b] data.extend(alines) loghistogram(sizes) if opts.header: AGP.print_header(fw, organism="Medicago truncatula", taxid=3880, source="J. Craig Venter Institute") if merge: for ob, bb in groupby(data, lambda x: x.object): for i, b in enumerate(bb): b.part_number = i + 1 print(b, file=fw) return merged_agpfile
def gaps(args)
%prog gaps agpfile Print out the distribution of gapsizes. Option --merge allows merging of adjacent gaps which is used by tidy().
3.204218
3.064405
1.045625
p = OptionParser(tidy.__doc__) p.add_option("--nogaps", default=False, action="store_true", help="Remove all gap lines [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) agpfile, componentfasta = args originalagpfile = agpfile # Step 1: Trim terminal Ns tmpfasta = "tmp.fasta" trimmed_agpfile = build([agpfile, componentfasta, tmpfasta, "--newagp", "--novalidate"]) os.remove(tmpfasta) agpfile = trimmed_agpfile agpfile = reindex([agpfile, "--inplace"]) # Step 2: Merge adjacent gaps merged_agpfile = gaps([agpfile, "--merge"]) os.remove(agpfile) # Step 3: Trim gaps at the end of object agpfile = merged_agpfile agp = AGP(agpfile) newagpfile = agpfile.replace(".agp", ".fixed.agp") fw = open(newagpfile, "w") for object, a in groupby(agp, key=lambda x: x.object): a = list(a) if a[0].is_gap: g, a = a[0], a[1:] logging.debug("Trim beginning Ns({0}) of {1}".\ format(g.gap_length, object)) if a and a[-1].is_gap: a, g = a[:-1], a[-1] logging.debug("Trim trailing Ns({0}) of {1}".\ format(g.gap_length, object)) print("\n".join(str(x) for x in a), file=fw) fw.close() os.remove(agpfile) # Step 4: Final reindex agpfile = newagpfile reindex_opts = [agpfile, "--inplace"] if opts.nogaps: reindex_opts += ["--nogaps"] agpfile = reindex(reindex_opts) tidyagpfile = originalagpfile.replace(".agp", ".tidy.agp") shutil.move(agpfile, tidyagpfile) logging.debug("File written to `{0}`.".format(tidyagpfile)) return tidyagpfile
def tidy(args)
%prog tidy agpfile componentfasta Given an agp file, run through the following steps: 1. Trim components with dangling N's 2. Merge adjacent gaps 3. Trim gaps at the end of an object 4. Reindex the agp Final output is in `.tidy.agp`.
2.768222
2.382331
1.16198
p = OptionParser(build.__doc__) p.add_option("--newagp", dest="newagp", default=False, action="store_true", help="Check components to trim dangling N's [default: %default]") p.add_option("--novalidate", dest="novalidate", default=False, action="store_true", help="Don't validate the agpfile [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) agpfile, componentfasta, targetfasta = args validate = not opts.novalidate if opts.newagp: assert agpfile.endswith(".agp") newagpfile = agpfile.replace(".agp", ".trimmed.agp") newagp = open(newagpfile, "w") else: newagpfile = None newagp = None agp = AGP(agpfile, validate=validate, sorted=True) agp.build_all(componentfasta=componentfasta, targetfasta=targetfasta, newagp=newagp) logging.debug("Target fasta written to `{0}`.".format(targetfasta)) return newagpfile
def build(args)
%prog build agpfile componentfasta targetfasta Build targetfasta based on info from agpfile
2.809321
2.346373
1.197304
p = OptionParser(validate.__doc__) opts, args = p.parse_args(args) try: agpfile, componentfasta, targetfasta = args except Exception as e: sys.exit(p.print_help()) agp = AGP(agpfile) build = Fasta(targetfasta) bacs = Fasta(componentfasta, index=False) # go through this line by line for aline in agp: try: build_seq = build.sequence(dict(chr=aline.object, start=aline.object_beg, stop=aline.object_end)) if aline.is_gap: assert build_seq.upper() == aline.gap_length * 'N', \ "gap mismatch: %s" % aline else: bac_seq = bacs.sequence(dict(chr=aline.component_id, start=aline.component_beg, stop=aline.component_end, strand=aline.orientation)) assert build_seq.upper() == bac_seq.upper(), \ "sequence mismatch: %s" % aline logging.debug("%s:%d-%d verified" % (aline.object, aline.object_beg, aline.object_end)) except Exception as e: logging.error(e)
def validate(args)
%prog validate agpfile componentfasta targetfasta validate consistency between agpfile and targetfasta
3.667872
3.058733
1.199148
d = {} for (i, x) in enumerate(self): if x.is_gap: continue xid = x.component_id d[xid] = (i, x) xid = xid.rsplit(".", 1)[0] # Remove Genbank version if xid not in d: d[xid] = (i, x) return d
def order(self)
Returns a dict with component_id => (i, agpline)
4.569528
3.396743
1.345268
north = self.getAdjacentClone(i, south=False) south = self.getAdjacentClone(i) return north, south
def getNorthSouthClone(self, i)
Returns the adjacent clone name from both sides.
5.888653
3.992929
1.47477
print("\n".join(self.header), file=fw)
def transfer_header(self, fw=sys.stdout)
transfer_header() copies header to a new file. print_header() creates a new header.
6.754886
5.773347
1.170012
components = [] total_bp = 0 for line in lines: if line.is_gap: seq = 'N' * line.gap_length if newagp: print(line, file=newagp) else: seq = fasta.sequence(dict(chr=line.component_id, start=line.component_beg, stop=line.component_end, strand=line.orientation)) # Check for dangling N's if newagp: trimNs(seq, line, newagp) components.append(seq) total_bp += len(seq) if self.validate: assert total_bp == line.object_end, \ "cumulative base pairs (%d) does not match (%d)" % \ (total_bp, line.object_end) if not newagp: rec = SeqRecord(Seq(''.join(components)), id=object, description="") SeqIO.write([rec], fw, "fasta") if len(rec) > 1000000: logging.debug("Write object %s to `%s`" % (object, fw.name))
def build_one(self, object, lines, fasta, fw, newagp=None)
Construct molecule using component fasta sequence
4.039339
3.996304
1.010769
rr = xrange(i + 1, len(self)) if south else xrange(i - 1, -1, -1) a = self[i] for ix in rr: x = self[ix] if x.object != a.object: break return x return None
def getAdjacentClone(self, i, south=True)
Returns adjacent clone name, either the line before or after the current line.
3.642102
3.478243
1.04711
from jcvi.formats.base import SetFile from jcvi.formats.gff import Gff p = OptionParser(pasa.__doc__) p.set_home("pasa") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, gffile = args transcodergff = fastafile + ".transdecoder.gff3" transcodergenomegff = fastafile + ".transdecoder.genome.gff3" if need_update((fastafile, gffile), (transcodergff, transcodergenomegff)): cmd = "{0}/scripts/pasa_asmbls_to_training_set.dbi".format(opts.pasa_home) cmd += " --pasa_transcripts_fasta {0} --pasa_transcripts_gff3 {1}".\ format(fastafile, gffile) sh(cmd) completeids = fastafile.rsplit(".", 1)[0] + ".complete.ids" if need_update(transcodergff, completeids): cmd = "grep complete {0} | cut -f1 | sort -u".format(transcodergff) sh(cmd, outfile=completeids) complete = SetFile(completeids) seen = set() completegff = transcodergenomegff.rsplit(".", 1)[0] + ".complete.gff3" fw = open(completegff, "w") gff = Gff(transcodergenomegff) for g in gff: a = g.attributes if "Parent" in a: id = a["Parent"][0] else: id = a["ID"][0] asmbl_id = id.split("|")[0] if asmbl_id not in complete: continue print(g, file=fw) if g.type == "gene": seen.add(id) fw.close() logging.debug("A total of {0} complete models extracted to `{1}`.".\ format(len(seen), completegff))
def pasa(args)
%prog ${pasadb}.assemblies.fasta ${pasadb}.pasa_assemblies.gff3 Wraps `pasa_asmbls_to_training_set.dbi`.
2.920346
2.604823
1.12113
p = OptionParser(genemark.__doc__) p.add_option("--junctions", help="Path to `junctions.bed` from Tophat2") p.set_home("gmes") p.set_cpus(cpus=32) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) species, fastafile = args junctions = opts.junctions mhome = opts.gmes_home license = op.expanduser("~/.gm_key") assert op.exists(license), "License key ({0}) not found!".format(license) cmd = "{0}/gmes_petap.pl --sequence {1}".format(mhome, fastafile) cmd += " --cores {0}".format(opts.cpus) if junctions: intronsgff = "introns.gff" if need_update(junctions, intronsgff): jcmd = "{0}/bet_to_gff.pl".format(mhome) jcmd += " --bed {0} --gff {1} --label Tophat2".\ format(junctions, intronsgff) sh(jcmd) cmd += " --ET {0} --et_score 10".format(intronsgff) else: cmd += " --ES" sh(cmd) logging.debug("GENEMARK matrix written to `output/gmhmm.mod")
def genemark(args)
%prog genemark species fastafile Train GENEMARK model given fastafile. GENEMARK self-trains so no trainig model gff file is needed.
4.903391
4.63233
1.058515
p = OptionParser(snap.__doc__) p.set_home("maker") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) species, gffile, fastafile = args mhome = opts.maker_home snapdir = "snap" mkdir(snapdir) cwd = os.getcwd() os.chdir(snapdir) newgffile = "training.gff3" logging.debug("Construct GFF file combined with sequence ...") sh("cat ../{0} > {1}".format(gffile, newgffile)) sh('echo "##FASTA" >> {0}'.format(newgffile)) sh("cat ../{0} >> {1}".format(fastafile, newgffile)) logging.debug("Make models ...") sh("{0}/src/bin/maker2zff training.gff3".format(mhome)) sh("{0}/exe/snap/fathom -categorize 1000 genome.ann genome.dna".format(mhome)) sh("{0}/exe/snap/fathom -export 1000 -plus uni.ann uni.dna".format(mhome)) sh("{0}/exe/snap/forge export.ann export.dna".format(mhome)) sh("{0}/exe/snap/hmm-assembler.pl {1} . > {1}.hmm".format(mhome, species)) os.chdir(cwd) logging.debug("SNAP matrix written to `{0}/{1}.hmm`".format(snapdir, species))
def snap(args)
%prog snap species gffile fastafile Train SNAP model given gffile and fastafile. Whole procedure taken from: <http://gmod.org/wiki/MAKER_Tutorial_2012>
4.54285
4.106202
1.106339
p = OptionParser(augustus.__doc__) p.add_option("--autotrain", default=False, action="store_true", help="Run autoAugTrain.pl to iteratively train AUGUSTUS") p.set_home("augustus") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) species, gffile, fastafile = args mhome = opts.augustus_home augdir = "augustus" cwd = os.getcwd() mkdir(augdir) os.chdir(augdir) target = "{0}/config/species/{1}".format(mhome, species) if op.exists(target): logging.debug("Removing existing target `{0}`".format(target)) sh("rm -rf {0}".format(target)) sh("{0}/scripts/new_species.pl --species={1}".format(mhome, species)) sh("{0}/scripts/gff2gbSmallDNA.pl ../{1} ../{2} 1000 raw.gb".\ format(mhome, gffile, fastafile)) sh("{0}/bin/etraining --species={1} raw.gb 2> train.err".\ format(mhome, species)) sh("cat train.err | perl -pe 's/.*in sequence (\S+): .*/$1/' > badgenes.lst") sh("{0}/scripts/filterGenes.pl badgenes.lst raw.gb > training.gb".\ format(mhome)) sh("grep -c LOCUS raw.gb training.gb") # autoAugTrain failed to execute, disable for now if opts.autotrain: sh("rm -rf {0}".format(target)) sh("{0}/scripts/autoAugTrain.pl --trainingset=training.gb --species={1}".\ format(mhome, species)) os.chdir(cwd) sh("cp -r {0} augustus/".format(target))
def augustus(args)
%prog augustus species gffile fastafile Train AUGUSTUS model given gffile and fastafile. Whole procedure taken from: <http://www.molecularevolution.org/molevolfiles/exercises/augustus/training.html>
4.006951
3.778232
1.060536
from collections import defaultdict p = OptionParser(prune.__doc__) add_graph_options(p) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bestedges, = args G = read_graph(bestedges, maxerr=opts.maxerr) reads_to_ctgs = parse_ctgs(bestedges, opts.frgctg) edges = defaultdict(int) r = defaultdict(int) for a, b, d in G.edges_iter(data=True): ua, ub = reads_to_ctgs.get(a), reads_to_ctgs.get(b) nn = (ua, ub).count(None) if nn == 0: if ua == ub: r["Same tigs"] += 1 else: r["Diff tigs"] += 1 if ua > ub: ua, ub = ub, ua edges[(ua, ub)] += 1 elif nn == 1: r["One null"] += 1 else: assert nn == 2 r["Two nulls"] += 1 U = nx.Graph() difftigs = "diff_tigs.txt" neighbors = defaultdict(list) fw = open(difftigs, "w") for (ua, ub), count in edges.items(): print("\t".join((ua, ub, str(count))), file=fw) U.add_edge(ua, ub, weight=count) neighbors[ua].append((ub, count)) neighbors[ub].append((ua, count)) fw.close() print("[Unitig edge property]", file=sys.stderr) for k, v in r.items(): print(": ".join((k, str(v))), file=sys.stderr) print("Total: {0}".format(sum(r.values())), file=sys.stderr) print("[Unitig degree distribution]", file=sys.stderr) degrees = U.degree() degree_counter = Counter(degrees.values()) for degree, count in sorted(degree_counter.items()): print("{0}\t{1}".format(degree, count), file=sys.stderr) # To find associative contigs, one look for a contig that is connected and # only connected to another single contig - and do that recursively until no # more contigs can be found associative = {} for ua, ubs in neighbors.items(): if len(ubs) == 1: # Only one neighbor ub, count = ubs[0] if count >= 2: # Bubble associative[ua] = (ub, count) print("A total of {0} associative contigs found"\ .format(len(associative)), file=sys.stderr) # Keep only one for mutual associative for ua, ub in associative.items(): if ub in associative and ua < ub: print(ua, "mutually associative with", ub, file=sys.stderr) del associative[ub] print("A total of {0} associative contigs retained"\ .format(len(associative)), file=sys.stderr) assids = "associative.ids" fw = open(assids, "w") for ua, (ub, count) in sorted(associative.items(), key=lambda x:(x[1], x[0])): print("\t".join((ua, ub, str(count))), file=fw) fw.close() logging.debug("Associative contigs written to `{0}`".format(assids))
def prune(args)
%prog prune best.edges Prune overlap graph.
2.989878
2.934553
1.018853
p = OptionParser(removecontains.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) contains, gkpStore = args s = set() fp = open(contains) for row in fp: if row[0] == '#': continue iid = int(row.split()[0]) s.add(iid) cmd = "gatekeeper -dumpfragments -lastfragiid {}".format(gkpStore) gkpmsg = popen(cmd).read() last_iid = int(gkpmsg.strip().split()[-1]) ndeleted = 0 editfile = "delete.edit" fw = open(editfile, "w") for iid in xrange(1, last_iid + 1): if iid in s: print("frg iid {0} isdeleted 1".format(iid), file=fw) ndeleted += 1 fw.close() assert len(s) == ndeleted logging.debug("A total of {0} contained reads flagged as deleted."\ .format(ndeleted)) print("Now you can run:", file=sys.stderr) print("$ gatekeeper --edit {0} {1}".format(editfile, gkpStore), file=sys.stderr)
def removecontains(args)
%prog removecontains 4-unitigger/best.contains asm.gkpStore Remove contained reads from gkpStore. This will improve assembly contiguity without sacrificing accuracy, when using bogart unitigger.
4.311437
3.782345
1.139885
from jcvi.apps.console import green p = OptionParser(overlap.__doc__) p.add_option("--maxerr", default=2, type="int", help="Maximum error rate") p.add_option("--canvas", default=100, type="int", help="Canvas size") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) bestcontains, iid = args canvas = opts.canvas bestcontainscache = bestcontains + ".cache" if need_update(bestcontains, bestcontainscache): fp = open(bestcontains) fw = open(bestcontainscache, "w") exclude = set() for row in fp: if row[0] == '#': continue j = int(row.split()[0]) exclude.add(j) dump(exclude, fw) fw.close() exclude = load(open(bestcontainscache)) logging.debug("A total of {0} reads to exclude".format(len(exclude))) cmd = "overlapStore -d ../asm.ovlStore -b {0} -e {0}".format(iid) cmd += " -E {0}".format(opts.maxerr) frags = [] for row in popen(cmd): r = OverlapLine(row) if r.bid in exclude: continue frags.append(r) # Also include to query fragment frags.append(OverlapLine("{0} {0} N 0 0 0 0".format(iid))) frags.sort(key=lambda x: x.ahang) # Determine size of the query fragment cmd = "gatekeeper -b {0} -e {0}".format(iid) cmd += " -tabular -dumpfragments ../asm.gkpStore" fp = popen(cmd) row = next(fp) size = int(fp.next().split()[-1]) # Determine size of canvas xmin = min(x.ahang for x in frags) xmax = max(x.bhang for x in frags) xsize = -xmin + size + xmax ratio = xsize / canvas fw = sys.stdout for f in frags: fsize = -f.ahang + size + f.bhang a = (f.ahang - xmin) / ratio b = fsize / ratio t = '-' * b if f.orientation == 'N': t = t[:-1] + '>' else: t = '<' + t[1:] if f.ahang == 0 and f.bhang == 0: t = green(t) c = canvas - a - b fw.write(' ' * a) fw.write(t) fw.write(' ' * c) print("{0} ({1})".format(str(f.bid).rjust(10), f.erate_adj), file=fw)
def overlap(args)
%prog overlap best.contains iid Visualize overlaps for a given fragment. Must be run in 4-unitigger. All overlaps for iid were retrieved, excluding the ones matching best.contains.
3.961695
3.751572
1.056009
p = OptionParser(merger.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) layout, gkpstore, contigs = args fp = open(layout) pf = "0" iidfile = pf + ".iids" for i, row in enumerate(fp): logging.debug("Read unitig {0}".format(i)) fw = open(iidfile, "w") layout = row.split("|") print("\n".join(layout), file=fw) fw.close() cmd = "gatekeeper -iid {0}.iids -dumpfasta {0} {1}".format(pf, gkpstore) sh(cmd) fastafile = "{0}.fasta".format(pf) newfastafile = "{0}.new.fasta".format(pf) format([fastafile, newfastafile, "--sequential=replace", \ "--sequentialoffset=1", "--nodesc"]) fasta([newfastafile]) sh("rm -rf {0}".format(pf)) cmd = "runCA {0}.frg -p {0} -d {0} consensus=pbutgcns".format(pf) cmd += " unitigger=bogart doFragmentCorrection=0 doUnitigSplitting=0" sh(cmd) outdir = "{0}/9-terminator".format(pf) cmd = "cat {0}/{1}.ctg.fasta {0}/{1}.deg.fasta {0}/{1}.singleton.fasta"\ .format(outdir, pf) sh(cmd, outfile=contigs, append=True)
def merger(args)
%prog merger layout gkpStore contigs.fasta Merge reads into one contig.
7.591412
6.817116
1.113581
p = OptionParser(unitigs.__doc__) p.add_option("--maxerr", default=2, type="int", help="Maximum error rate") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bestedges, = args G = read_graph(bestedges, maxerr=opts.maxerr, directed=True) H = nx.Graph() intconv = lambda x: int(x.split("-")[0]) for k, v in G.iteritems(): if k == G.get(v, None): H.add_edge(intconv(k), intconv(v)) nunitigs = nreads = 0 for h in nx.connected_component_subgraphs(H, copy=False): st = [x for x in h if h.degree(x) == 1] if len(st) != 2: continue src, target = st path = list(nx.all_simple_paths(h, src, target)) assert len(path) == 1 path, = path print("|".join(str(x) for x in path)) nunitigs += 1 nreads += len(path) logging.debug("A total of {0} unitigs built from {1} reads."\ .format(nunitigs, nreads))
def unitigs(args)
%prog unitigs best.edges Reads Celera Assembler's "best.edges" and extract all unitigs.
3.054122
2.873889
1.062714
p = OptionParser(graph.__doc__) p.add_option("--query", default=-1, type="int", help="Search from node, -1 to select random node, 0 to disable") p.add_option("--contig", help="Search from contigs, use comma to separate") p.add_option("--largest", default=0, type="int", help="Only show largest components") p.add_option("--maxsize", default=500, type="int", help="Max graph size") p.add_option("--nomutualbest", default=False, action="store_true", help="Do not plot mutual best edges as heavy") add_graph_options(p) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bestedges, = args query = opts.query contig = opts.contig largest = opts.largest frgctg = opts.frgctg edgeweight = not opts.nomutualbest G = read_graph(bestedges, maxerr=opts.maxerr) if largest: H = list(nx.connected_component_subgraphs(G)) c = min(len(H), largest) logging.debug("{0} components found, {1} retained".format(len(H), c)) G = nx.Graph() for x in H[:c]: G.add_edges_from(x.edges()) if query: if query == -1: query = choice(G.nodes()) reads_to_ctgs = parse_ctgs(bestedges, frgctg) if contig: contigs = set(contig.split(",")) core = [k for k, v in reads_to_ctgs.items() if v in contigs] else: ctg = reads_to_ctgs.get(query) core = [k for k, v in reads_to_ctgs.items() if v == ctg] logging.debug("Reads ({0}) extended from the same contig {1}".\ format(len(core), ctg)) # Extract a local neighborhood SG = nx.Graph() H = graph_local_neighborhood(G, query=core, maxsize=opts.maxsize) SG.add_edges_from(H.edges(data=edgeweight)) G = SG seen = [] for n, attrib in G.nodes_iter(data=True): contig = reads_to_ctgs.get(n, "na") attrib['label'] = contig seen.append(contig) c = Counter(seen) cc = ["{0}({1})".format(k, v) for k, v in c.most_common()] print("Contigs: {0}".format(" ".join(cc)), file=sys.stderr) gexf = "best" if query >= 0: gexf += ".{0}".format(query) gexf += ".gexf" nx.write_gexf(G, gexf) logging.debug("Graph written to `{0}` (|V|={1}, |E|={2})".\ format(gexf, len(G), G.size()))
def graph(args)
%prog graph best.edges Convert Celera Assembler's "best.edges" to a GEXF which can be used to feed into Gephi to check the topology of the best overlapping graph. Mutual best edges are represented as thicker edges. Reference: https://github.com/PacificBiosciences/Bioinformatics-Training/blob/master/scripts/CeleraToGephi.py
3.300206
3.179338
1.038017
p = OptionParser(astat.__doc__) p.add_option("--cutoff", default=1000, type="int", help="Length cutoff [default: %default]") p.add_option("--genome", default="", help="Genome name [default: %default]") p.add_option("--arrDist", default=False, action="store_true", help="Use arrDist instead [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) covfile, = args cutoff = opts.cutoff genome = opts.genome plot_arrDist = opts.arrDist suffix = ".{0}".format(cutoff) small_covfile = covfile + suffix update_covfile = need_update(covfile, small_covfile) if update_covfile: fw = open(small_covfile, "w") else: logging.debug("Found `{0}`, will use this one".format(small_covfile)) covfile = small_covfile fp = open(covfile) header = next(fp) if update_covfile: fw.write(header) data = [] msg = "{0} tigs scanned ..." for row in fp: tigID, rho, covStat, arrDist = row.split() tigID = int(tigID) if tigID % 1000000 == 0: sys.stderr.write(msg.format(tigID) + "\r") rho, covStat, arrDist = [float(x) for x in (rho, covStat, arrDist)] if rho < cutoff: continue if update_covfile: fw.write(row) data.append((tigID, rho, covStat, arrDist)) print(msg.format(tigID), file=sys.stderr) from jcvi.graphics.base import plt, savefig logging.debug("Plotting {0} data points.".format(len(data))) tigID, rho, covStat, arrDist = zip(*data) y = arrDist if plot_arrDist else covStat ytag = "arrDist" if plot_arrDist else "covStat" fig = plt.figure(1, (7, 7)) ax = fig.add_axes([.12, .1, .8, .8]) ax.plot(rho, y, ".", color="lightslategrey") xtag = "rho" info = (genome, xtag, ytag) title = "{0} {1} vs. {2}".format(*info) ax.set_title(title) ax.set_xlabel(xtag) ax.set_ylabel(ytag) if plot_arrDist: ax.set_yscale('log') imagename = "{0}.png".format(".".join(info)) savefig(imagename, dpi=150)
def astat(args)
%prog astat coverage.log Create coverage-rho scatter plot.
2.594877
2.540494
1.021406
if fasta: s = SeqRecord(shredded_seq, id=fragID, description="") SeqIO.write([s], fw, "fasta") return seq = str(shredded_seq) slen = len(seq) qvs = qvchar * slen # shredded reads have default low qv if clr is None: clr_beg, clr_end = 0, slen else: clr_beg, clr_end = clr print(frgTemplate.format(fragID=fragID, libID=libID, seq=seq, qvs=qvs, clr_beg=clr_beg, clr_end=clr_end), file=fw)
def emitFragment(fw, fragID, libID, shredded_seq, clr=None, qvchar='l', fasta=False)
Print out the shredded sequence.
2.924759
2.886835
1.013137
p = OptionParser(shred.__doc__) p.set_depth(depth=2) p.add_option("--readlen", default=1000, type="int", help="Desired length of the reads [default: %default]") p.add_option("--minctglen", default=0, type="int", help="Ignore contig sequence less than [default: %default]") p.add_option("--shift", default=50, type="int", help="Overlap between reads must be at least [default: %default]") p.add_option("--fasta", default=False, action="store_true", help="Output shredded reads as FASTA sequences [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args libID = fastafile.split(".")[0] depth = opts.depth readlen = opts.readlen shift = opts.shift outfile = libID + ".depth{0}".format(depth) if opts.fasta: outfile += ".fasta" else: outfile += ".frg" f = Fasta(fastafile, lazy=True) fw = must_open(outfile, "w", checkexists=True) if not opts.fasta: print(headerTemplate.format(libID=libID), file=fw) for ctgID, (name, rec) in enumerate(f.iteritems_ordered()): seq = rec.seq seqlen = len(seq) if seqlen < opts.minctglen: continue shredlen = min(seqlen - shift, readlen) numreads = max(seqlen * depth / shredlen, 1) center_range_width = seqlen - shredlen ranges = [] if depth == 1: if seqlen < readlen: ranges.append((0, seqlen)) else: for begin in xrange(0, seqlen, readlen - shift): end = min(seqlen, begin + readlen) ranges.append((begin, end)) else: if numreads == 1: ranges.append((0, shredlen)) else: prev_begin = -1 center_increments = center_range_width * 1. / (numreads - 1) for i in xrange(numreads): begin = center_increments * i end = begin + shredlen begin, end = int(begin), int(end) if begin == prev_begin: continue ranges.append((begin, end)) prev_begin = begin for shredID, (begin, end) in enumerate(ranges): shredded_seq = seq[begin:end] fragID = "{0}.{1}.frag{2}.{3}-{4}".format(libID, ctgID, shredID, begin, end) emitFragment(fw, fragID, libID, shredded_seq, fasta=opts.fasta) fw.close() logging.debug("Shredded reads are written to `{0}`.".format(outfile)) return outfile
def shred(args)
%prog shred fastafile Similar to the method of `shredContig` in runCA script. The contigs are shredded into pseudo-reads with certain length and depth.
2.748945
2.694163
1.020334
p = OptionParser(tracedb.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) action, = args assert action in ("xml", "lib", "frg") CMD = "tracedb-to-frg.pl" xmls = glob("xml*") if action == "xml": for xml in xmls: cmd = CMD + " -xml {0}".format(xml) sh(cmd, outfile="/dev/null", errfile="/dev/null", background=True) elif action == "lib": cmd = CMD + " -lib {0}".format(" ".join(xmls)) sh(cmd) elif action == "frg": for xml in xmls: cmd = CMD + " -frg {0}".format(xml) sh(cmd, background=True)
def tracedb(args)
%prog tracedb <xml|lib|frg> Run `tracedb-to-frg.pl` within current folder.
2.972987
2.260542
1.315165
assert op.exists(fastafile) matefile = fastafile.rsplit(".", 1)[0] + ".mates" if op.exists(matefile): logging.debug("matepairs file `{0}` found".format(matefile)) else: logging.debug("parsing matepairs from `{0}`".format(fastafile)) matefw = open(matefile, "w") it = SeqIO.parse(fastafile, "fasta") for fwd, rev in zip(it, it): print("{0}\t{1}".format(fwd.id, rev.id), file=matefw) matefw.close() return matefile
def make_matepairs(fastafile)
Assumes the mates are adjacent sequence records
2.475625
2.513575
0.984902
from jcvi.formats.fasta import clean, make_qual p = OptionParser(fasta.__doc__) p.add_option("--clean", default=False, action="store_true", help="Clean up irregular chars in seq") p.add_option("--matefile", help="Matepairs file") p.add_option("--maxreadlen", default=262143, type="int", help="Maximum read length allowed") p.add_option("--minreadlen", default=1000, type="int", help="Minimum read length allowed") p.add_option("--sequential", default=False, action="store_true", help="Overwrite read name (e.g. long Pacbio name)") p.set_size() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args maxreadlen = opts.maxreadlen minreadlen = opts.minreadlen if maxreadlen > 0: split = False f = Fasta(fastafile, lazy=True) for id, size in f.itersizes_ordered(): if size > maxreadlen: logging.debug("Sequence {0} (size={1}) longer than max read len {2}".\ format(id, size, maxreadlen)) split = True break if split: for f in split_fastafile(fastafile, maxreadlen=maxreadlen): fasta([f, "--maxreadlen=0"]) return plate = op.basename(fastafile).split(".")[0] mated = (opts.size != 0) mean, sv = get_mean_sv(opts.size) if mated: libname = "Sanger{0}Kb-".format(opts.size / 1000) + plate else: libname = plate frgfile = libname + ".frg" if opts.clean: cleanfasta = fastafile.rsplit(".", 1)[0] + ".clean.fasta" if need_update(fastafile, cleanfasta): clean([fastafile, "--canonical", "-o", cleanfasta]) fastafile = cleanfasta if mated: qualfile = make_qual(fastafile, score=21) if opts.matefile: matefile = opts.matefile assert op.exists(matefile) else: matefile = make_matepairs(fastafile) cmd = "convert-fasta-to-v2.pl" cmd += " -l {0} -s {1} -q {2} ".format(libname, fastafile, qualfile) if mated: cmd += "-mean {0} -stddev {1} -m {2} ".format(mean, sv, matefile) sh(cmd, outfile=frgfile) return fw = must_open(frgfile, "w") print(headerTemplate.format(libID=libname), file=fw) sequential = opts.sequential i = j = 0 for fragID, seq in parse_fasta(fastafile): if len(seq) < minreadlen: j += 1 continue i += 1 if sequential: fragID = libname + str(100000000 + i) emitFragment(fw, fragID, libname, seq) fw.close() logging.debug("A total of {0} fragments written to `{1}` ({2} discarded).".\ format(i, frgfile, j))
def fasta(args)
%prog fasta fastafile Convert reads formatted as FASTA file, and convert to CA frg file. If .qual file is found, then use it, otherwise just make a fake qual file. Mates are assumed as adjacent sequence records (i.e. /1, /2, /1, /2 ...) unless a matefile is given.
3.382193
3.294873
1.026502
p = OptionParser(sff.__doc__) p.add_option("--prefix", dest="prefix", default=None, help="Output frg filename prefix") p.add_option("--nodedup", default=False, action="store_true", help="Do not remove duplicates [default: %default]") p.set_size() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(p.print_help()) sffiles = args plates = [x.split(".")[0].split("_")[-1] for x in sffiles] mated = (opts.size != 0) mean, sv = get_mean_sv(opts.size) if len(plates) > 1: plate = plates[0][:-1] + 'X' else: plate = "_".join(plates) if mated: libname = "Titan{0}Kb-".format(opts.size / 1000) + plate else: libname = "TitanFrags-" + plate if opts.prefix: libname = opts.prefix cmd = "sffToCA" cmd += " -libraryname {0} -output {0} ".format(libname) cmd += " -clear 454 -trim chop " if mated: cmd += " -linker titanium -insertsize {0} {1} ".format(mean, sv) if opts.nodedup: cmd += " -nodedup " cmd += " ".join(sffiles) sh(cmd)
def sff(args)
%prog sff sffiles Convert reads formatted as 454 SFF file, and convert to CA frg file. Turn --nodedup on if another deduplication mechanism is used (e.g. CD-HIT-454). See assembly.sff.deduplicate().
4.257879
3.683872
1.155816
from jcvi.formats.fastq import guessoffset p = OptionParser(fastq.__doc__) p.add_option("--outtie", dest="outtie", default=False, action="store_true", help="Are these outie reads? [default: %default]") p.set_phred() p.set_size() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(p.print_help()) fastqfiles = [get_abs_path(x) for x in args] size = opts.size outtie = opts.outtie if size > 1000 and (not outtie): logging.debug("[warn] long insert size {0} but not outtie".format(size)) mated = (size != 0) libname = op.basename(args[0]).split(".")[0] libname = libname.replace("_1_sequence", "") frgfile = libname + ".frg" mean, sv = get_mean_sv(opts.size) cmd = "fastqToCA" cmd += " -libraryname {0} ".format(libname) fastqs = " ".join("-reads {0}".format(x) for x in fastqfiles) if mated: assert len(args) in (1, 2), "you need one or two fastq files for mated library" fastqs = "-mates {0}".format(",".join(fastqfiles)) cmd += "-insertsize {0} {1} ".format(mean, sv) cmd += fastqs offset = int(opts.phred) if opts.phred else guessoffset([fastqfiles[0]]) illumina = (offset == 64) if illumina: cmd += " -type illumina" if outtie: cmd += " -outtie" sh(cmd, outfile=frgfile)
def fastq(args)
%prog fastq fastqfile Convert reads formatted as FASTQ file, and convert to CA frg file.
4.039793
3.915254
1.031808
p = OptionParser(clr.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) blastfile = args[0] fastafiles = args[1:] sizes = {} for fa in fastafiles: f = Fasta(fa) sizes.update(f.itersizes()) b = Blast(blastfile) for query, hits in b.iter_hits(): qsize = sizes[query] vectors = list((x.qstart, x.qstop) for x in hits) vmin, vmax = range_minmax(vectors) left_size = vmin - 1 right_size = qsize - vmax if left_size > right_size: clr_start, clr_end = 0, vmin else: clr_start, clr_end = vmax, qsize print("\t".join(str(x) for x in (query, clr_start, clr_end))) del sizes[query] for q, size in sorted(sizes.items()): print("\t".join(str(x) for x in (q, 0, size)))
def clr(args)
%prog blastfile fastafiles Calculate the vector clear range file based BLAST to the vectors.
2.688478
2.366682
1.135969
import re if rule is None: return name k = re.search("(?<=^head)[0-9]{1,2}$", rule) if k: k = k.group(0) tname = name[int(k):] else: k = re.search("(?<=^ohead)[0-9]{1,2}$", rule) if k: k = k.group(0) tname = name[:int(k)] else: k = re.search("(?<=^tail)[0-9]{1,2}$", rule) if k: k = k.group(0) tname = name[:-int(k)] else: k = re.search("(?<=^otail)[0-9]{1,2}$", rule) if k: k = k.group(0) tname = name[-int(k):] else: print(truncate_name.__doc__, file=sys.stderr) raise ValueError('Wrong rule for truncation!') return tname
def truncate_name(name, rule=None)
shorten taxa names for tree display Options of rule. This only affects tree display. - headn (eg. head3 truncates first 3 chars) - oheadn (eg. ohead3 retains only the first 3 chars) - tailn (eg. tail3 truncates last 3 chars) - otailn (eg. otail3 retains only the last 3 chars) n = 1 ~ 99
1.991637
1.812523
1.09882
for barcode in barcodemap: if barcode in name: return barcodemap[barcode] return name
def decode_name(name, barcodemap)
rename seq/taxon name, typically for a tree display, according to a barcode map given in a dictionary By definition barcodes should be distinctive.
3.392759
4.137232
0.820055
a, b = .1, .6 # Correspond to 200mya and 0mya def cv(x): return b - (x - b) / (maxx - minx) * (b - a) ax.plot((a, b), (.5, .5), "k-") tick = .015 for mya in xrange(maxx - 25, 0, -25): p = cv(mya) ax.plot((p, p), (.5, .5 - tick), "k-") ax.text(p, .5 - 2.5 * tick, str(mya), ha="center", va="center") ax.text((a + b) / 2, .5 - 5 * tick, "Time before present (million years)", ha="center", va="center") # Source: # http://www.weston.org/schools/ms/biologyweb/evolution/handouts/GSAchron09.jpg Geo = (("Neogene", 2.6, 23.0, "#fee400"), ("Paleogene", 23.0, 65.5, "#ff9a65"), ("Cretaceous", 65.5, 145.5, "#80ff40"), ("Jurassic", 145.5, 201.6, "#33fff3")) h = .05 for era, start, end, color in Geo: start, end = cv(start), cv(end) end = max(a, end) p = Rectangle((end, .5 + tick / 2), abs(start - end), h, lw=1, ec="w", fc=color) ax.text((start + end) / 2, .5 + (tick + h) / 2, era, ha="center", va="center", size=9) ax.add_patch(p)
def draw_geoscale(ax, minx=0, maxx=175)
Draw geological epoch on million year ago (mya) scale.
4.852648
4.508296
1.076382
p = OptionParser(main.__doc__) p.add_option("--outgroup", help="Outgroup for rerooting the tree. " + "Use comma to separate multiple taxa.") p.add_option("--noreroot", default=False, action="store_true", help="Don't reroot the input tree [default: %default]") p.add_option("--rmargin", default=.3, type="float", help="Set blank rmargin to the right [default: %default]") p.add_option("--gffdir", default=None, help="The directory that contain GFF files [default: %default]") p.add_option("--sizes", default=None, help="The FASTA file or the sizes file [default: %default]") p.add_option("--SH", default=None, type="string", help="SH test p-value [default: %default]") p.add_option("--scutoff", default=0, type="int", help="cutoff for displaying node support, 0-100 [default: %default]") p.add_option("--barcode", default=None, help="path to seq names barcode mapping file: " "barcode<tab>new_name [default: %default]") p.add_option("--leafcolor", default="k", help="Font color for the OTUs, or path to a file " "containing color mappings: leafname<tab>color [default: %default]") p.add_option("--leaffont", default=12, help="Font size for the OTUs") p.add_option("--geoscale", default=False, action="store_true", help="Plot geological scale") opts, args, iopts = p.set_image_options(args, figsize="8x6") if len(args) != 1: sys.exit(not p.print_help()) datafile, = args outgroup = None reroot = not opts.noreroot if opts.outgroup: outgroup = opts.outgroup.split(",") if datafile == "demo": tx = else: logging.debug("Load tree file `{0}`.".format(datafile)) tx = open(datafile).read() pf = datafile.rsplit(".", 1)[0] fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) if opts.geoscale: draw_geoscale(root) else: if op.isfile(opts.leafcolor): leafcolor = "k" leafcolorfile = opts.leafcolor else: leafcolor = opts.leafcolor leafcolorfile = None draw_tree(root, tx, rmargin=opts.rmargin, leafcolor=leafcolor, outgroup=outgroup, reroot=reroot, gffdir=opts.gffdir, sizes=opts.sizes, SH=opts.SH, scutoff=opts.scutoff, barcodefile=opts.barcode, leafcolorfile=leafcolorfile, leaffont=opts.leaffont) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def main(args)
%prog newicktree Plot Newick formatted tree. The gene structure can be plotted along if --gffdir is given. The gff file needs to be `genename.gff`. If --sizes is on, also show the number of amino acids. With --barcode a mapping file can be provided to convert seq names to eg. species names, useful in unified tree display. This file should have distinctive barcodes in column1 and new names in column2, tab delimited.
2.992423
2.849976
1.049982
cl = TreeFixCommandline(input=input, \ stree_file=stree_file, smap_file=smap_file, a_ext=a_ext, \ o=o_ext, n=n_ext, **kwargs) outtreefile = input.rsplit(o_ext, 1)[0] + n_ext print("TreeFix:", cl, file=sys.stderr) r, e = cl.run() if e: print("***TreeFix could not run", file=sys.stderr) return None else: logging.debug("new tree written to {0}".format(outtreefile)) return outtreefile
def run_treefix(input, stree_file, smap_file, a_ext=".fasta", \ o_ext=".dnd", n_ext = ".treefix.dnd", **kwargs)
get the ML tree closest to the species tree
3.280392
3.277595
1.000853
cl = GblocksCommandline(aln_file=align_fasta_file, **kwargs) r, e = cl.run() print("Gblocks:", cl, file=sys.stderr) if e: print("***Gblocks could not run", file=sys.stderr) return None else: print(r, file=sys.stderr) alignp = re.sub(r'.*Gblocks alignment:.*\(([0-9]{1,3}) %\).*', \ r'\1', r, flags=re.DOTALL) alignp = int(alignp) if alignp <= 10: print("** WARNING ** Only %s %% positions retained by Gblocks. " \ "Results aborted. Using original alignment instead.\n" % alignp, file=sys.stderr) return None else: return align_fasta_file+"-gb"
def run_gblocks(align_fasta_file, **kwargs)
remove poorly aligned positions and divergent regions with Gblocks
5.344467
5.155605
1.036632
cl = FfitchCommandline(datafile=distfile, outtreefile=outtreefile, \ intreefile=intreefile, **kwargs) r, e = cl.run() if e: print("***ffitch could not run", file=sys.stderr) return None else: print("ffitch:", cl, file=sys.stderr) return outtreefile
def run_ffitch(distfile, outtreefile, intreefile=None, **kwargs)
Infer tree branch lengths using ffitch in EMBOSS PHYLIP
4.079683
4.297816
0.949246
tree = Tree(treefile, format=format) leaves = [t.name for t in tree.get_leaves()][::-1] outgroup = [] for o in must_open(outgroupfile): o = o.strip() for leaf in leaves: if leaf[:len(o)] == o: outgroup.append(leaf) if outgroup: break if not outgroup: print("Outgroup not found. Tree {0} cannot be rerooted.".format(treefile), file=sys.stderr) return treefile try: tree.set_outgroup(tree.get_common_ancestor(*outgroup)) except ValueError: assert type(outgroup) == list outgroup = outgroup[0] tree.set_outgroup(outgroup) tree.write(outfile=outfile, format=format) logging.debug("Rerooted tree printed to {0}".format(outfile)) return outfile
def smart_reroot(treefile, outgroupfile, outfile, format=0)
simple function to reroot Newick format tree using ete2 Tree reading format options see here: http://packages.python.org/ete2/tutorial/tutorial_trees.html#reading-newick-trees
2.805085
2.967949
0.945126
phy_file = op.join(work_dir, "work", "aln.phy") AlignIO.write(alignment, file(phy_file, "w"), "phylip-relaxed") phyml_cl = PhymlCommandline(cmd=PHYML_BIN("phyml"), input=phy_file, **kwargs) logging.debug("Building ML tree using PhyML: %s" % phyml_cl) stdout, stderr = phyml_cl() tree_file = phy_file + "_phyml_tree.txt" if not op.exists(tree_file): print("***PhyML failed.", file=sys.stderr) return None sh("cp {0} {1}".format(tree_file, outfile), log=False) logging.debug("ML tree printed to %s" % outfile) return outfile, phy_file
def build_ml_phyml(alignment, outfile, work_dir=".", **kwargs)
build maximum likelihood tree of DNA seqs with PhyML
3.842254
3.708928
1.035947
work_dir = op.join(work_dir, "work") mkdir(work_dir) phy_file = op.join(work_dir, "aln.phy") AlignIO.write(alignment, file(phy_file, "w"), "phylip-relaxed") raxml_work = op.abspath(op.join(op.dirname(phy_file), "raxml_work")) mkdir(raxml_work) raxml_cl = RaxmlCommandline(cmd=RAXML_BIN("raxmlHPC"), \ sequences=phy_file, algorithm="a", model="GTRGAMMA", \ parsimony_seed=12345, rapid_bootstrap_seed=12345, \ num_replicates=100, name="aln", \ working_dir=raxml_work, **kwargs) logging.debug("Building ML tree using RAxML: %s" % raxml_cl) stdout, stderr = raxml_cl() tree_file = "{0}/RAxML_bipartitions.aln".format(raxml_work) if not op.exists(tree_file): print("***RAxML failed.", file=sys.stderr) sh("rm -rf %s" % raxml_work, log=False) return None sh("cp {0} {1}".format(tree_file, outfile), log=False) logging.debug("ML tree printed to %s" % outfile) sh("rm -rf %s" % raxml_work) return outfile, phy_file
def build_ml_raxml(alignment, outfile, work_dir=".", **kwargs)
build maximum likelihood tree of DNA seqs with RAxML
3.287886
3.248412
1.012152
assert op.isfile(reftree) shout = must_open(shout, "a") raxml_work = op.abspath(op.join(op.dirname(phy_file), "raxml_work")) mkdir(raxml_work) raxml_cl = RaxmlCommandline(cmd=RAXML_BIN("raxmlHPC"), \ sequences=phy_file, algorithm="h", model="GTRGAMMA", \ name="SH", starting_tree=reftree, bipartition_filename=querytree, \ working_dir=raxml_work) logging.debug("Running SH test in RAxML: %s" % raxml_cl) o, stderr = raxml_cl() # hard coded try: pval = re.search('(Significantly.*:.*)', o).group(0) except: print("SH test failed.", file=sys.stderr) else: pval = pval.strip().replace("\t"," ").replace("%","\%") print("{0}\t{1}".format(op.basename(querytree), pval), file=shout) logging.debug("SH p-value appended to %s" % shout.name) shout.close() return shout.name
def SH_raxml(reftree, querytree, phy_file, shout="SH_out.txt")
SH test using RAxML querytree can be a single tree or a bunch of trees (eg. from bootstrapping)
4.58297
4.599276
0.996455
aln = AlignIO.read(alnfle, alntype) alnlen = aln.get_alignment_length() nseq = len(aln) subaln = None subalnfile = alnfle.rsplit(".", 1)[0] + "_{0}.{1}".format(subtype, alntype) if subtype == "synonymous": for j in range( 0, alnlen, 3 ): aa = None for i in range(nseq): codon = str(aln[i, j: j + 3].seq) if codon not in CODON_TRANSLATION: break if aa and CODON_TRANSLATION[codon] != aa: break else: aa = CODON_TRANSLATION[codon] else: if subaln is None: subaln = aln[:, j: j + 3] else: subaln += aln[:, j: j + 3] if subtype == "fourfold": for j in range( 0, alnlen, 3 ): for i in range(nseq): codon = str(aln[i, j: j + 3].seq) if codon not in FOURFOLD: break else: if subaln is None: subaln = aln[:, j: j + 3] else: subaln += aln[:, j: j + 3] if subaln: AlignIO.write(subaln, subalnfile, alntype) return subalnfile else: print("No sites {0} selected.".format(subtype), file=sys.stderr) return None
def subalignment(alnfle, subtype, alntype="fasta")
Subset synonymous or fourfold degenerate sites from an alignment input should be a codon alignment
2.104942
1.96585
1.070754
fw = must_open(filename+".merged", "w") rows = file(filename).readlines() rows = [row.strip().split(colsep) for row in rows] l = len(rows[0]) for rowi, row in enumerate(rows): n = len(rows) i = rowi+1 while i <= min(rowi+local, n-1): merge = 1 row2 = rows[i] for j in range(l): a = row[j] b = row2[j] if fieldcheck: a = set(a.split(fsep)) a = fsep.join(sorted(list(a))) b = set(b.split(fsep)) b = fsep.join(sorted(list(b))) if all([a!=ignore, b!=ignore, a not in b, b not in a]): merge = 0 i += 1 break if merge: for x in range(l): if row[x] == ignore: rows[rowi][x] = row2[x] elif row[x] in row2[x]: rows[rowi][x] = row2[x] else: rows[rowi][x] = row[x] row = rows[rowi] rows.remove(row2) print(colsep.join(row), file=fw) fw.close() return fw.name
def merge_rows_local(filename, ignore=".", colsep="\t", local=10, \ fieldcheck=True, fsep=",")
merge overlapping rows within given row count distance
2.430209
2.420234
1.004121
tandems = [f.strip().split(",") for f in file(tandemfile)] fw = must_open(mcscanfile+".withtandems", "w") fp = must_open(mcscanfile) seen =set() for i, row in enumerate(fp): if row[0] == '#': continue anchorslist = row.strip().split("\t") anchors = set([a.split(",")[0] for a in anchorslist]) anchors.remove(".") if anchors & seen == anchors: continue newanchors = [] for a in anchorslist: if a == ".": newanchors.append(a) continue for t in tandems: if a in t: newanchors.append(",".join(t)) seen.update(t) break else: newanchors.append(a) seen.add(a) print("\t".join(newanchors), file=fw) fw.close() newmcscanfile = merge_rows_local(fw.name) logging.debug("Tandems added to `{0}`. Results in `{1}`".\ format(mcscanfile, newmcscanfile)) fp.seek(0) logging.debug("{0} rows merged to {1} rows".\ format(len(fp.readlines()), len(file(newmcscanfile).readlines()))) sh("rm %s" % fw.name) return newmcscanfile
def add_tandems(mcscanfile, tandemfile)
add tandem genes to anchor genes in mcscan file
3.290733
3.216578
1.023054
from jcvi.graphics.base import discrete_rainbow p = OptionParser(prepare.__doc__) p.add_option("--addtandem", help="path to tandemfile [default: %default]") p.add_option("--writecolors", default=False, action="store_true", \ help="generate a gene_name to color mapping file which will be taken " \ "by jcvi.apps.phylo.draw [default: %default]") p.set_outdir(outdir="sequences") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mcscanfile, cdsfile = args if opts.addtandem: tandemfile = opts.addtandem mcscanfile_with_tandems = add_tandems(mcscanfile, tandemfile) mcscanfile = mcscanfile_with_tandems seqdir = opts.outdir mkdir(seqdir) f = Fasta(cdsfile) fp = must_open(mcscanfile) if opts.writecolors: fc = must_open("leafcolors.txt", "w") n = 0 for i, row in enumerate(fp): row = row.strip().split("\t") if i == 0: l = len(row) if l <= 20: colors = discrete_rainbow(l, shuffle=False)[1] else: colors = discrete_rainbow(l, usepreset=False, shuffle=False)[1] warnings.warn("*** WARNING ***\n" \ "Too many columns. Colors may not be all distinctive.") assert len(row)==l, "All rows should have same number of fields." anchors = set() for j, atom in enumerate(row): color = "%s,%s,%s" % colors[j] if atom == ".": continue elif "," in atom: atom = atom.split(",") for a in atom: fc.write("{0}\t{1}\n".format(a, color)) anchors.add(a) else: fc.write("{0}\t{1}\n".format(atom, color)) anchors.add(atom) if len(anchors) <= 3: print("Not enough seqs to build trees for {0}".format(anchors), file=sys.stderr) continue pivot = row[0] fw = must_open("%s/%s.cds" % (seqdir, pivot), "w") for a in anchors: if a not in f: print(a) a = find_first_isoform(a, f) assert a, a arec = f[a] SeqIO.write((arec), fw, "fasta") fw.close() n+=1 if opts.writecolors: fc.close() logging.debug("leaf colors written to `{0}`".format(fc.name)) logging.debug("cds of {0} syntelog groups written to {1}/".format(n, seqdir)) return seqdir
def prepare(args)
%prog prepare mcscanfile cdsfile [options] Pick sequences from cdsfile to form fasta files, according to multiple alignment in the mcscanfile. The fasta sequences can then be used to construct phylogenetic tree. Use --addtandem=tandemfile to collapse tandems of anchors into single row. The tandemfile must be provided with *ALL* genomes involved, otherwise result will be incomplete and redundant.
3.817101
3.440197
1.109559
from jcvi.graphics.tree import draw_tree if shfile: SHs = DictFile(shfile, delimiter="\t") ntrees = len(trees) n = nrow * ncol for x in xrange(int(ceil(float(ntrees)/n))): fig = plt.figure(1, (iopts.w, iopts.h)) if iopts \ else plt.figure(1, (5, 5)) root = fig.add_axes([0, 0, 1, 1]) xiv = 1. / ncol yiv = 1. / nrow xstart = list(np.arange(0, 1, xiv)) * nrow ystart = list(chain(*zip(*[list(np.arange(0, 1, yiv))[::-1]] * ncol))) for i in xrange(n*x, n*(x+1)): if i == ntrees: break ax = fig.add_axes([xstart[i%n], ystart[i%n], xiv, yiv]) f = trees.keys()[i] tree = trees[f] try: SH = SHs[f] except: SH = None draw_tree(ax, tree, rmargin=rmargin, reroot=False, \ supportcolor="r", SH=SH, **kwargs) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() format = iopts.format if iopts else "pdf" dpi = iopts.dpi if iopts else 300 if n == 1: image_name = f.rsplit(".", 1)[0] + "." + format else: image_name = "trees{0}.{1}".format(x, format) image_name = op.join(outdir, image_name) savefig(image_name, dpi=dpi, iopts=iopts) plt.clf()
def _draw_trees(trees, nrow=1, ncol=1, rmargin=.3, iopts=None, outdir=".", shfile=None, **kwargs)
Draw one or multiple trees on one plot.
2.801075
2.812183
0.99605
p = OptionParser(enrich.__doc__) p.add_option("--ghost", default=False, action="store_true", help="Add ghost homologs already used [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) omgfile, groupsfile, ntaxa = args ntaxa = int(ntaxa) ghost = opts.ghost # Get gene pair => weight mapping weights = get_edges() info = get_info() # Get gene => taxon mapping info = dict((k, v.split()[5]) for k, v in info.items()) groups = Grouper() fp = open(groupsfile) for row in fp: members = row.strip().split(",") groups.join(*members) logging.debug("Imported {0} families with {1} members.".\ format(len(groups), groups.num_members)) seen = set() omggroups = Grouper() fp = open(omgfile) for row in fp: genes, idxs = row.split() genes = genes.split(",") seen.update(genes) omggroups.join(*genes) nmembers = omggroups.num_members logging.debug("Imported {0} OMG families with {1} members.".\ format(len(omggroups), nmembers)) assert nmembers == len(seen) alltaxa = set(str(x) for x in range(ntaxa)) recruited = [] fp = open(omgfile) for row in fp: genes, idxs = row.split() genes = genes.split(",") a = genes[0] idxs = set(idxs.split(",")) missing_taxa = alltaxa - idxs if not missing_taxa: print(row.rstrip()) continue leftover = groups[a] if not ghost: leftover = set(leftover) - seen if not leftover: print(row.rstrip()) continue leftover_sorted_by_taxa = dict((k, \ [x for x in leftover if info[x] == k]) \ for k in missing_taxa) #print genes, leftover #print leftover_sorted_by_taxa solutions = [] for solution in product(*leftover_sorted_by_taxa.values()): score = sum(weights.get((a, b), 0) for a in solution for b in genes) if score == 0: continue score += sum(weights.get((a, b), 0) for a, b in combinations(solution, 2)) solutions.append((score, solution)) #print solution, score best_solution = max(solutions) if solutions else None if best_solution is None: print(row.rstrip()) continue #print "best ==>", best_solution best_score, best_addition = best_solution genes.extend(best_addition) recruited.extend(best_addition) genes = sorted([(info[x], x) for x in genes]) idxs, genes = zip(*genes) if ghost: # decorate additions so it's clear that they were added pgenes = [] for g in genes: if g in recruited and g in seen: pgenes.append("|{0}|".format(g)) else: pgenes.append(g) genes = pgenes print("\t".join((",".join(genes), ",".join(idxs)))) if not ghost: seen.update(best_addition) logging.debug("Recruited {0} new genes.".format(len(recruited)))
def enrich(args)
%prog enrich omgfile groups ntaxa > enriched.omg Enrich OMG output by pulling genes misses by OMG.
3.139113
2.95506
1.062284
from jcvi.formats.base import DictFile outfile = listfile.rsplit(".", 1)[0] + ".sorted.list" threadorder = thread.order fw = open(outfile, "w") lt = DictFile(listfile, keypos=column, valuepos=None) threaded = [] imported = set() for t in thread: accn = t.accn if accn not in lt: continue imported.add(accn) atoms = lt[accn] threaded.append(atoms) assert len(threaded) == len(imported) total = sum(1 for x in open(listfile)) logging.debug("Total: {0}, currently threaded: {1}".format(total, len(threaded))) fp = open(listfile) for row in fp: atoms = row.split() accn = atoms[0] if accn in imported: continue insert_into_threaded(atoms, threaded, threadorder) for atoms in threaded: print("\t".join(atoms), file=fw) fw.close() logging.debug("File `{0}` sorted to `{1}`.".format(outfile, thread.filename))
def sort_layout(thread, listfile, column=0)
Sort the syntelog table according to chromomomal positions. First orient the contents against threadbed, then for contents not in threadbed, insert to the nearest neighbor.
3.601279
3.513239
1.025059
p = OptionParser(layout.__doc__) p.add_option("--sort", help="Sort layout file based on bedfile [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) omgfile, taxa = args listfile = omgfile.rsplit(".", 1)[0] + ".list" taxa = taxa.split(",") ntaxa = len(taxa) fw = open(listfile, "w") data = [] fp = open(omgfile) for row in fp: genes, idxs = row.split() row = ["."] * ntaxa genes = genes.split(",") ixs = [int(x) for x in idxs.split(",")] for gene, idx in zip(genes, ixs): row[idx] = gene txs = ",".join(taxa[x] for x in ixs) print("\t".join(("\t".join(row), txs)), file=fw) data.append(row) coldata = zip(*data) ngenes = [] for i, tx in enumerate(taxa): genes = [x for x in coldata[i] if x != '.'] genes = set(x.strip("|") for x in genes) ngenes.append((len(genes), tx)) details = ", ".join("{0} {1}".format(a, b) for a, b in ngenes) total = sum(a for a, b in ngenes) s = "A list of {0} orthologous families that collectively".format(len(data)) s += " contain a total of {0} genes ({1})".format(total, details) print(s, file=sys.stderr) fw.close() lastcolumn = ntaxa + 1 cmd = "sort -k{0},{0} {1} -o {1}".format(lastcolumn, listfile) sh(cmd) logging.debug("List file written to `{0}`.".format(listfile)) sort = opts.sort if sort: thread = Bed(sort) sort_layout(thread, listfile)
def layout(args)
%prog layout omgfile taxa Build column formatted gene lists after omgparse(). Use species list separated by comma in place of taxa, e.g. "BR,BO,AN,CN"
3.366796
3.118494
1.079622
p = OptionParser(omgparse.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) work, = args omgfiles = glob(op.join(work, "gf*.out")) for omgfile in omgfiles: omg = OMGFile(omgfile) best = omg.best() for bb in best: genes, taxa = zip(*bb) print("\t".join((",".join(genes), ",".join(taxa))))
def omgparse(args)
%prog omgparse work Parse the OMG outputs to get gene lists.
3.569541
2.884035
1.23769
p = OptionParser(group.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) anchorfiles = args groups = Grouper() for anchorfile in anchorfiles: ac = AnchorFile(anchorfile) for a, b, idx in ac.iter_pairs(): groups.join(a, b) logging.debug("Created {0} groups with {1} members.".\ format(len(groups), groups.num_members)) outfile = opts.outfile fw = must_open(outfile, "w") for g in groups: print(",".join(sorted(g)), file=fw) fw.close() return outfile
def group(args)
%prog group anchorfiles Group the anchors into ortho-groups. Can input multiple anchor files.
3.398792
2.947951
1.152933
p = OptionParser(omg.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) weightsfiles = args groupfile = group(weightsfiles + ["--outfile=groups"]) weights = get_weights(weightsfiles) info = get_info() fp = open(groupfile) work = "work" mkdir(work) for i, row in enumerate(fp): gf = op.join(work, "gf{0:05d}".format(i)) genes = row.rstrip().split(",") fw = open(gf, "w") contents = "" npairs = 0 for gene in genes: gene_pairs = weights[gene] for a, b, c in gene_pairs: if b not in genes: continue contents += "weight {0}".format(c) + '\n' contents += info[a] + '\n' contents += info[b] + '\n\n' npairs += 1 header = "a group of genes :length ={0}".format(npairs) print(header, file=fw) print(contents, file=fw) fw.close()
def omg(args)
%prog omg weightsfile Run Sankoff's OMG algorithm to get orthologs. Download OMG code at: <http://137.122.149.195/IsbraSoftware/OMGMec.html> This script only writes the partitions, but not launch OMGMec. You may need to: $ parallel "java -cp ~/code/OMGMec TestOMGMec {} 4 > {}.out" ::: work/gf????? Then followed by omgparse() to get the gene lists.
4.323682
4.224358
1.023512
from jcvi.formats.blast import cscore from jcvi.formats.base import DictFile p = OptionParser(omgprepare.__doc__) p.add_option("--norbh", action="store_true", help="Disable RBH hits [default: %default]") p.add_option("--pctid", default=0, type="int", help="Percent id cutoff for RBH hits [default: %default]") p.add_option("--cscore", default=90, type="int", help="C-score cutoff for RBH hits [default: %default]") p.set_stripnames() p.set_beds() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) ploidy, anchorfile, blastfile = args norbh = opts.norbh pctid = opts.pctid cs = opts.cscore qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts) fp = open(ploidy) genomeidx = dict((x.split()[0], i) for i, x in enumerate(fp)) fp.close() ploidy = DictFile(ploidy) geneinfo(qbed, qorder, genomeidx, ploidy) geneinfo(sbed, sorder, genomeidx, ploidy) pf = blastfile.rsplit(".", 1)[0] cscorefile = pf + ".cscore" cscore([blastfile, "-o", cscorefile, "--cutoff=0", "--pct"]) ac = AnchorFile(anchorfile) pairs = set((a, b) for a, b, i in ac.iter_pairs()) logging.debug("Imported {0} pairs from `{1}`.".format(len(pairs), anchorfile)) weightsfile = pf + ".weights" fp = open(cscorefile) fw = open(weightsfile, "w") npairs = 0 for row in fp: a, b, c, pct = row.split() c, pct = float(c), float(pct) c = int(c * 100) if (a, b) not in pairs: if norbh: continue if c < cs: continue if pct < pctid: continue c /= 10 # This severely penalizes RBH against synteny print("\t".join((a, b, str(c))), file=fw) npairs += 1 fw.close() logging.debug("Write {0} pairs to `{1}`.".format(npairs, weightsfile))
def omgprepare(args)
%prog omgprepare ploidy anchorsfile blastfile Prepare to run Sankoff's OMG algorithm to get orthologs.
3.229771
3.051083
1.058566
p = OptionParser(tandem.__doc__) p.add_option("--tandem_Nmax", dest="tandem_Nmax", type="int", default=3, help="merge tandem genes within distance [default: %default]") p.add_option("--percent_overlap", type="int", default=50, help="tandem genes have >=x% aligned sequence, x=0-100 \ [default: %default]") p.set_align(evalue=.01) p.add_option("--not_self", default=False, action="store_true", help="provided is not self blast file [default: %default]") p.add_option("--strip_gene_name", dest="sep", type="string", default=".", help="strip alternative splicing. Use None for no stripping. \ [default: %default]") p.add_option("--genefamily", dest="genefam", action="store_true", help="compile gene families based on similarity [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) blast_file, cds_file, bed_file = args N = opts.tandem_Nmax P = opts.percent_overlap is_self = not opts.not_self sep = opts.sep ofile = opts.outfile tandem_main(blast_file, cds_file, bed_file, N=N, P=P, is_self=is_self, \ evalue=opts.evalue, strip_name=sep, ofile=ofile, genefam=opts.genefam)
def tandem(args)
%prog tandem blast_file cds_file bed_file [options] Find tandem gene clusters that are separated by N genes, based on filtered blast_file by enforcing alignments between any two genes at least 50% (or user specified value) of either gene. pep_file can also be used in same manner.
3.614868
3.368327
1.073194
od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list) for name, value in parse_qsl(qs, keep_blank_values, strict_parsing): od[name].append(value) return od
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True)
Kind of like urlparse.parse_qs, except returns an ordered dict. Also avoids replicating that function's bad habit of overriding the built-in 'dict' type. Taken from below with modification: <https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py>
2.641404
2.917453
0.90538
'Find the position of an item. Raise ValueError if not found.' k = self._key(item) i = bisect_left(self._keys, k) j = bisect_right(self._keys, k) return self._items[i:j].index(item) + i
def index(self, item)
Find the position of an item. Raise ValueError if not found.
3.423772
2.824538
1.212153
'Insert a new item. If equal keys are found, add to the left' k = self._key(item) i = bisect_left(self._keys, k) self._keys.insert(i, k) self._items.insert(i, item)
def insert(self, item)
Insert a new item. If equal keys are found, add to the left
4.230717
2.620456
1.614497
'Insert a new item. If equal keys are found, add to the right' k = self._key(item) i = bisect_right(self._keys, k) self._keys.insert(i, k) self._items.insert(i, item)
def insert_right(self, item)
Insert a new item. If equal keys are found, add to the right
3.95288
2.625476
1.505586
'Remove first occurence of item. Raise ValueError if not found' i = self.index(item) del self._keys[i] del self._items[i]
def remove(self, item)
Remove first occurence of item. Raise ValueError if not found
4.914875
4.028533
1.220016
'Return first item with a key >= equal to item. Raise ValueError if not found' k = self._key(item) i = bisect_left(self._keys, k) if i != len(self): return self._items[i] raise ValueError('No item found with key at or above: %r' % (k,))
def find_ge(self, item)
Return first item with a key >= equal to item. Raise ValueError if not found
4.313375
3.196421
1.349439
'Return first item with a key > item. Raise ValueError if not found' k = self._key(item) i = bisect_right(self._keys, k) if i != len(self): return self._items[i] raise ValueError('No item found with key above: %r' % (k,))
def find_gt(self, item)
Return first item with a key > item. Raise ValueError if not found
4.462658
3.284884
1.358543
from jcvi.apps.grid import MakeManager p = OptionParser(batch.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) cdsfile = args[0] anchors = args[1:] workdirs = [".".join(op.basename(x).split(".")[:2]) for x in anchors] for wd in workdirs: mkdir(wd) mm = MakeManager() for wd, ac in zip(workdirs, anchors): pairscdsfile = wd + ".cds.fasta" cmd = "python -m jcvi.apps.ks prepare {} {} -o {}".\ format(ac, cdsfile, pairscdsfile) mm.add((ac, cdsfile), pairscdsfile, cmd) ksfile = wd + ".ks" cmd = "python -m jcvi.apps.ks calc {} -o {} --workdir {}".\ format(pairscdsfile, ksfile, wd) mm.add(pairscdsfile, ksfile, cmd) mm.write()
def batch(args)
%prog batch all.cds *.anchors Compute Ks values for a set of anchors file. This will generate a bunch of work directories for each comparisons. The anchorsfile should be in the form of specie1.species2.anchors.
3.51845
3.138587
1.12103