code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
p = OptionParser(grasstruth.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) james, = args fp = open(james) pairs = set() for row in fp: atoms = row.split() genes = [] idx = {} for i, a in enumerate(atoms): aa = a.split("||") for ma in aa: idx[ma] = i genes.extend(aa) genes = [x for x in genes if ":" not in x] Os = [x for x in genes if x.startswith("Os")] for o in Os: for g in genes: if idx[o] == idx[g]: continue pairs.add(tuple(sorted((o, g)))) for a, b in sorted(pairs): print("\t".join((a, b)))
def grasstruth(args)
%prog grasstruth james-pan-grass.txt Prepare truth pairs for 4 grasses.
3.154462
2.948055
1.070015
p = OptionParser(synfind.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) lastfile = args[0] bedfiles = args[1:] fp = open(lastfile) filteredlast = lastfile + ".filtered" fw = open(filteredlast, "w") for row in fp: b = BlastLine(row) if b.query == b.subject: continue print(b, file=fw) fw.close() logging.debug("Filtered LAST file written to `{0}`".format(filteredlast)) allbed = "all.bed" fw = open(allbed, "w") for i, bedfile in enumerate(bedfiles): prefix = chr(ord('A') + i) bed = Bed(bedfile) for b in bed: b.seqid = prefix + b.seqid print(b, file=fw) fw.close() logging.debug("Bed file written to `{0}`".format(allbed))
def synfind(args)
%prog synfind all.last *.bed Prepare input for SynFind.
2.587844
2.415478
1.071359
p = OptionParser(yeasttruth.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) pillars = args[0] gffiles = args[1:] aliases = {} pivot = {} for gffile in gffiles: is_pivot = op.basename(gffile).startswith("Saccharomyces_cerevisiae") gff = Gff(gffile) for g in gff: if g.type != "gene": continue for a in g.attributes["Alias"]: aliases[a] = g.accn if is_pivot: pivot[a] = g.accn logging.debug("Aliases imported: {0}".format(len(aliases))) logging.debug("Pivot imported: {0}".format(len(pivot))) fw = open("yeast.aliases", "w") for k, v in sorted(aliases.items()): print("\t".join((k, v)), file=fw) fw.close() fp = open(pillars) pairs = set() fw = must_open(opts.outfile, "w") for row in fp: atoms = [x for x in row.split() if x != "---"] pps = [pivot[x] for x in atoms if x in pivot] atoms = [aliases[x] for x in atoms if x in aliases] for p in pps: for a in atoms: if p == a: continue pairs.add(tuple(sorted((p, a)))) for a, b in sorted(pairs): print("\t".join((a, b)), file=fw) fw.close()
def yeasttruth(args)
%prog yeasttruth Pillars.tab *.gff Prepare pairs data for 14 yeasts.
2.644542
2.511631
1.052918
from matplotlib_venn import venn2 p = OptionParser(venn.__doc__) opts, args, iopts = p.set_image_options(args, figsize="9x9") if len(args) < 1: sys.exit(not p.print_help()) bcs = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) pad = .02 ystart = 1 ywidth = 1. / len(bcs) tags = ("Bowers", "YGOB", "Schnable") for bc, tag in zip(bcs, tags): fp = open(bc) data = [] for row in fp: prog, pcounts, tcounts, shared = row.split() pcounts = int(pcounts) tcounts = int(tcounts) shared = int(shared) data.append((prog, pcounts, tcounts, shared)) xstart = 0 xwidth = 1. / len(data) for prog, pcounts, tcounts, shared in data: a, b, c = pcounts - shared, tcounts - shared, shared ax = fig.add_axes([xstart + pad, ystart - ywidth + pad, xwidth - 2 * pad, ywidth - 2 * pad]) venn2(subsets=(a, b, c), set_labels=(prog, tag), ax=ax) message = "Sn={0} Pu={1}".\ format(percentage(shared, tcounts, precision=0, mode=-1), percentage(shared, pcounts, precision=0, mode=-1)) print(message, file=sys.stderr) ax.text(.5, .92, latex(message), ha="center", va="center", transform=ax.transAxes, color='b') ax.set_axis_off() xstart += xwidth ystart -= ywidth panel_labels(root, ((.04, .96, "A"), (.04, .96 - ywidth, "B"), (.04, .96 - 2 * ywidth, "C"))) panel_labels(root, ((.5, .98, "A. thaliana duplicates"), (.5, .98 - ywidth, "14 Yeast genomes"), (.5, .98 - 2 * ywidth, "4 Grass genomes"))) normalize_axes(root) savefig("venn.pdf", dpi=opts.dpi)
def venn(args)
%prog venn *.benchmark Display benchmark results as Venn diagram.
3.211409
3.181002
1.009559
p = OptionParser(coge.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) gffs = args for gff in gffs: atoms = op.basename(gff).split(".") gid = atoms[-2] assert gid.startswith("gid") gid = get_number(gid) genomefasta = "genome_{0}.faa.fasta".format(gid) species = "_".join(atoms[0].split("_")[:2]) cdsfasta = species + ".cds.fasta" load([gff, genomefasta, "--id_attribute=Parent", "--outfile={0}".format(cdsfasta)])
def coge(args)
%prog coge *.gff Prepare coge datasets.
4.656058
4.241477
1.097744
p = OptionParser(benchmark.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) pf, bedfile = args truth = pf + ".truth" synfind = pf + ".synfind" mcscanx = pf + ".mcscanx" iadhore = pf + ".iadhore" orthofinder = pf + ".orthofinder" pivots = set([x.accn for x in Bed(bedfile)]) fp = open(truth) truth = set() for row in fp: a, b = row.strip().split("\t")[:2] pivots.add(a) truth.add(tuple(sorted((a, b)))) logging.debug("Truth: {0} pairs".format(len(truth))) fp = open(synfind) benchmarkfile = pf + ".benchmark" fw = must_open(benchmarkfile, "w") synfind = set() for row in fp: atoms = row.strip().split("\t") query, hit, tag = atoms[:3] if tag != "S": continue synfind.add(tuple(sorted((query, hit)))) calc_sensitivity_specificity(synfind, truth, "SynFind", fw) fp = open(mcscanx) mcscanx = set() for row in fp: if row[0] == '#': continue atoms = row.strip().split(":")[1].split() query, hit = atoms[:2] mcscanx.add(tuple(sorted((query, hit)))) calc_sensitivity_specificity(mcscanx, truth, "MCScanX", fw) fp = open(iadhore) iadhore = set() next(fp) for row in fp: atoms = row.strip().split("\t") query, hit = atoms[3:5] iadhore.add(tuple(sorted((query, hit)))) calc_sensitivity_specificity(iadhore, truth, "iADHoRe", fw) fp = open(orthofinder) orthofinder = set() next(fp) for row in fp: row = row.replace('"', "") atoms = row.replace(",", " ").split() genes = [x.strip() for x in atoms if not x.startswith("OG")] genes = [gene_name(x) for x in genes] pps = [x for x in genes if x in pivots] for p in pps: for g in genes: if p == g: continue orthofinder.add(tuple(sorted((p, g)))) #write_pairs(orthofinder, "orthofinder.pairs") calc_sensitivity_specificity(orthofinder, truth, "OrthoFinder", fw) fw.close()
def benchmark(args)
%prog benchmark at bedfile Compare SynFind, MCScanx, iADHoRe and OrthoFinder against the truth.
2.577518
2.19212
1.17581
p = OptionParser(cyntenator.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) lastfile = args[0] fp = open(lastfile) filteredlastfile = lastfile + ".blast" fw = open(filteredlastfile, "w") for row in fp: b = BlastLine(row) if b.query == b.subject: continue print("\t".join((b.query, b.subject, str(b.score))), file=fw) fw.close() bedfiles = args[1:] fp = open(lastfile) b = BlastLine(next(fp)) subject = b.subject txtfiles = [] for bedfile in bedfiles: order = Bed(bedfile).order if subject in order: db = op.basename(bedfile).split(".")[0][:20] logging.debug("Found db: {0}".format(db)) txtfile = write_txt(bedfile) txtfiles.append(txtfile) db += ".txt" mm = MakeManager() for txtfile in txtfiles: outfile = txtfile + ".alignment" cmd = 'cyntenator -t "({0} {1})" -h blast {2} > {3}'\ .format(txtfile, db, filteredlastfile, outfile) mm.add((txtfile, db, filteredlastfile), outfile, cmd) mm.write()
def cyntenator(args)
%prog cyntenator athaliana.athaliana.last athaliana.bed Prepare input for Cyntenator.
3.852404
3.64122
1.057998
p = OptionParser(iadhore.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) lastfile = args[0] bedfiles = args[1:] blast_table = "blast_table.txt" fp = open(lastfile) seen = set() for row in fp: c = BlastLine(row) a, b = c.query, c.subject a, b = gene_name(a), gene_name(b) if a > b: a, b = b, a seen.add((a, b)) fw = open(blast_table, "w") for a, b in seen: print("\t".join((a, b)), file=fw) fw.close() logging.debug("A total of {0} pairs written to `{1}`"\ .format(len(seen), blast_table)) fw = open("config.txt", "w") for bedfile in bedfiles: pf, stanza = write_lst(bedfile) print("genome={0}".format(pf), file=fw) for seqid, fname in stanza: print(" ".join((seqid, fname)), file=fw) print(file=fw) print("blast_table={0}".format(blast_table), file=fw) print("cluster_type=colinear", file=fw) print("tandem_gap=10", file=fw) print("prob_cutoff=0.001", file=fw) print("gap_size=20", file=fw) print("cluster_gap=20", file=fw) print("q_value=0.9", file=fw) print("anchor_points=4", file=fw) print("alignment_method=gg2", file=fw) print("max_gaps_in_alignment=20", file=fw) print("output_path=i-adhore_out", file=fw) print("number_of_threads=4", file=fw) fw.close()
def iadhore(args)
%prog iadhore athaliana.athaliana.last athaliana.bed Wrap around iADHoRe.
3.101125
3.054992
1.015101
p = OptionParser(athalianatruth.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) atxt, bctxt = args g = Grouper() pairs = set() for txt in (atxt, bctxt): extract_groups(g, pairs, txt) fw = open("pairs", "w") for pair in sorted(pairs): print("\t".join(pair), file=fw) fw.close() fw = open("groups", "w") for group in list(g): print(",".join(group), file=fw) fw.close()
def athalianatruth(args)
%prog athalianatruth J_a.txt J_bc.txt Prepare pairs data for At alpha/beta/gamma.
2.825551
2.660889
1.061883
p = OptionParser(mcscanx.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) blastfile = args[0] bedfiles = args[1:] prefix = "_".join(op.basename(x)[:2] for x in bedfiles) symlink(blastfile, prefix + ".blast") allbedfile = prefix + ".gff" fw = open(allbedfile, "w") for i, bedfile in enumerate(bedfiles): prefix = chr(ord('A') + i) make_gff(bedfile, prefix, fw) fw.close()
def mcscanx(args)
%prog mcscanx athaliana.athaliana.last athaliana.bed Wrap around MCScanX.
2.915783
2.839213
1.026969
p = OptionParser(grass._doc__) p.set_verbose() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) master, james = args fp = open(master) next(fp) master_store = defaultdict(set) for row in fp: atoms = row.split() s = set() for x in atoms[1:6]: m = x.split(",") s |= set(m) if '-' in s: s.remove('-') a = atoms[1] master_store[a] |= set(s) fp = open(james) next(fp) james_store = {} tandems = set() for row in fp: atoms = row.split() s = set() Os = set() for x in atoms[:-1]: m = x.split("||") if m[0].startswith("Os"): Os |= set(m) if m[0].startswith("http"): continue if m[0].startswith("chr"): m = ["proxy"] if "||" in x: tandems |= set(m) s |= set(m) for x in Os: james_store[x] = s jaccards = [] corr_jaccards = [] perfect_matches = 0 corr_perfect_matches = 0 for k, v in james_store.items(): if k not in master_store: continue m = master_store[k] jaccard = len(v & m) * 100 / len(v | m) jaccards.append(jaccard) diff = (v ^ m ) - tandems corr_jaccard = 100 - len(diff) * 100 / len(v | m) corr_jaccards.append(corr_jaccard) if opts.verbose: print(k) print(v) print(m) print(diff) print(jaccard) if jaccard > 99: perfect_matches += 1 if corr_jaccard > 99: corr_perfect_matches += 1 logging.debug("Perfect matches: {0}".format(perfect_matches)) logging.debug("Perfect matches (corrected): {0}".format(corr_perfect_matches)) print("Jaccards:", SummaryStats(jaccards)) print("Corrected Jaccards:", SummaryStats(corr_jaccards))
def grass(args)
%prog grass coge_master_table.txt james.txt Validate SynFind pan-grass set against James. This set can be generated: https://genomevolution.org/r/fhak
2.9049
2.791263
1.040711
p = OptionParser(ecoli.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) master, querybed = args fp = open(master) header = next(fp) assert header[0] == '#' qorg = header.strip().split("\t")[1] qorg = qorg.split(":")[-1].strip() store = {} MISSING = ("proxy", "-") for row in fp: a, b, c = row.strip().split("\t")[1:4] store[a] = b in MISSING and c in MISSING bed = Bed(querybed) tags = [] for i, b in enumerate(bed): accn = b.accn if accn not in store: logging.warn("missing {0}".format(accn)) continue tags.append((store[accn], accn)) large = 4 # large segments II = [] II_large = [] for missing, aa in groupby(tags, key=lambda x: x[0]): aa = list(aa) if not missing: continue glist = list(a for missing, a in aa) II.append(glist) size = len(glist) if size >= large: II_large.append(glist) fw = must_open(opts.outfile, "w") for a, t in zip((II, II_large), ("", ">=4 ")): nmissing = sum(len(x) for x in a) logging.debug("A total of {0} {1}-specific {2}islands found with {3} genes.".\ format(len(a), qorg, t, nmissing)) for x in II: print(len(x), ",".join(x), file=fw)
def ecoli(args)
%prog ecoli coge_master_table.txt query.bed Perform gene presence / absence analysis in Ecoli master spreadsheet. Ecoli spresheets can be downloaded below: Ecoli K12 MG1655 (K) as query Regenerate this analysis: https://genomevolution.org/r/fggo Ecoli O157:H7 EDL933 (O) as query Regenerate this analysis: https://genomevolution.org/r/fgt7 Shigella flexneri 2a 301 (S) as query Regenerate this analysis: https://genomevolution.org/r/fgte Perform a similar analysis as in: Jin et al. (2002) Genome sequence of Shigella flexneri 2a: insights into pathogenicity through comparison with genomes of Escherichia coli K12 and O157. Nucleic Acid Research.
4.19033
4.09256
1.02389
p = OptionParser(cartoon.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x7") fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) # Panel A A = CartoonRegion(41) A.draw(root, .35, .85, strip=False, color=False) x1, x2 = A.x1, A.x2 lsg = "lightslategray" pad = .01 xc, yc = .35, .88 arrowlen = x2 - xc - pad arrowprops = dict(length_includes_head=True, width=.01, fc=lsg, lw=0, head_length=arrowlen * .15, head_width=.03) p = FancyArrow(xc - pad, yc, -arrowlen, 0, shape="left", **arrowprops) root.add_patch(p) p = FancyArrow(xc + pad, yc, arrowlen, 0, shape="right", **arrowprops) root.add_patch(p) yt = yc + 4 * pad root.text((x1 + xc) / 2, yt, "20 genes upstream", ha="center") root.text((x2 + xc) / 2, yt, "20 genes downstream", ha="center") root.plot((xc,), (yc,), "o", mfc='w', mec=lsg, mew=2, lw=2, color=lsg) root.text(xc, yt, "Query gene", ha="center") # Panel B A.draw(root, .35, .7, strip=False) RoundRect(root, (.07, .49), .56, .14, fc='y', alpha=.2) a = deepcopy(A) a.evolve(mode='S', target=10) a.draw(root, .35, .6) b = deepcopy(A) b.evolve(mode='F', target=8) b.draw(root, .35, .56) c = deepcopy(A) c.evolve(mode='G', target=6) c.draw(root, .35, .52) for x in (a, b, c): root.text(.64, x.y, "Score={0}".format(x.nonwhites), va="center") # Panel C A.truncate_between_flankers() a.truncate_between_flankers() b.truncate_between_flankers() c.truncate_between_flankers(target=6) plot_diagram(root, .14, .2, A, a, "S", "syntenic") plot_diagram(root, .37, .2, A, b, "F", "missing, with both flankers") plot_diagram(root, .6, .2, A, c, "G", "missing, with one flanker") labels = ((.04, .95, 'A'), (.04, .75, 'B'), (.04, .4, 'C')) panel_labels(root, labels) # Descriptions xt = .85 desc = ("Extract neighborhood", "of *window* size", "Count gene pairs within *window*", "Find regions above *score* cutoff", "Identify flankers", "Annotate syntelog class" ) for yt, t in zip((.88, .84, .64, .6, .3, .26), desc): root.text(xt, yt, markup(t), ha="center", va="center") root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "cartoon" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def cartoon(args)
%prog synteny.py Generate cartoon illustration of SynFind.
3.619598
3.581594
1.010611
from jcvi.formats.base import split p = OptionParser(parallel.__doc__) p.set_home("maker") p.set_tmpdir(tmpdir="tmp") p.set_grid_opts(array=True) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) genome, NN = args threaded = opts.threaded or 1 tmpdir = opts.tmpdir mkdir(tmpdir) tmpdir = get_abs_path(tmpdir) N = int(NN) assert 1 <= N < 1000, "Required: 1 < N < 1000!" outdir = "outdir" fs = split([genome, outdir, NN]) c = CTLFile("maker_opts.ctl") c.update_abs_path() if threaded > 1: c.update_tag("cpus", threaded) cwd = os.getcwd() dirs = [] for name in fs.names: fn = get_abs_path(name) bn = op.basename(name) dirs.append(bn) c.update_tag("genome", fn) mkdir(bn) sh("cp *.ctl {0}".format(bn)) os.chdir(bn) c.write_file("maker_opts.ctl") os.chdir(cwd) jobs = "jobs" fw = open(jobs, "w") print("\n".join(dirs), file=fw) fw.close() # Submit to grid ncmds = len(dirs) runfile = "array.sh" cmd = op.join(opts.maker_home, "bin/maker") if tmpdir: cmd += " -TMP {0}".format(tmpdir) engine = get_grid_engine() contents = arraysh.format(jobs, cmd) if engine == "SGE" \ else arraysh_ua.format(N, threaded, jobs, cmd) write_file(runfile, contents) if engine == "PBS": return # qsub script outfile = "maker.\$TASK_ID.out" p = GridProcess(runfile, outfile=outfile, errfile=outfile, arr=ncmds, grid_opts=opts) qsubfile = "qsub.sh" qsub = p.build() write_file(qsubfile, qsub)
def parallel(args)
%prog parallel genome.fasta N Partition the genome into parts and run separately. This is useful if MAKER is to be run on the grid.
4.754758
4.493413
1.058162
from jcvi.formats.gff import merge as gmerge p = OptionParser(merge.__doc__) p.set_home("maker") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) outdir, outputgff = args fsnames, suffix = get_fsnames(outdir) nfs = len(fsnames) cmd = op.join(opts.maker_home, "bin/gff3_merge") outfile = "merge.sh" write_file(outfile, mergesh.format(suffix, cmd)) # Generate per split directory # Note that gff3_merge write to /tmp, so I limit processes here to avoid # filling up disk space sh("parallel -j 8 merge.sh {} ::: " + " ".join(fsnames)) # One final output gffnames = glob("*.all.gff") assert len(gffnames) == nfs # Again, DO NOT USE gff3_merge to merge with a smallish /tmp/ area gfflist = "gfflist" fw = open(gfflist, "w") print("\n".join(gffnames), file=fw) fw.close() nlines = sum(1 for x in open(gfflist)) assert nlines == nfs # Be extra, extra careful to include all results gmerge([gfflist, "-o", outputgff]) logging.debug("Merged GFF file written to `{0}`".format(outputgff))
def merge(args)
%prog merge outdir output.gff Follow-up command after grid jobs are completed after parallel().
6.340092
6.054099
1.04724
from jcvi.utils.counter import Counter p = OptionParser(validate.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) outdir, genome = args counter = Counter() fsnames, suffix = get_fsnames(outdir) dsfile = "{0}{1}/{0}.maker.output/{0}_master_datastore_index.log" dslogs = [dsfile.format(x, suffix) for x in fsnames] all_failed = [] for f, d in zip(fsnames, dslogs): dslog = DatastoreIndexFile(d) counter.update(dslog.scaffold_status.values()) all_failed.extend([(f, x) for x in dslog.failed]) cmd = 'tail maker.*.out | grep -c "now finished"' n = int(popen(cmd).read()) assert len(fsnames) == n print("ALL jobs have been finished", file=sys.stderr) nfailed = len(all_failed) if nfailed == 0: print("ALL scaffolds are completed with no errors", file=sys.stderr) return print("Scaffold status:", file=sys.stderr) print(counter, file=sys.stderr) failed = "FAILED" fw = open(failed, "w") print("\n".join(["\t".join((f, x)) for f, x in all_failed]), file=fw) fw.close() nlines = sum(1 for x in open("FAILED")) assert nlines == nfailed print("FAILED !! {0} instances.".format(nfailed), file=sys.stderr) # Rebuild the failed batch failed_ids = failed + ".ids" failed_fasta = failed + ".fasta" cmd = "cut -f2 {0}".format(failed) sh(cmd, outfile=failed_ids) if need_update((genome, failed_ids), failed_fasta): cmd = "faSomeRecords {0} {1} {2}".\ format(genome, failed_ids, failed_fasta) sh(cmd)
def validate(args)
%prog validate outdir genome.fasta Validate current folder after MAKER run and check for failures. Failed batch will be written to a directory for additional work.
4.380803
4.107612
1.066508
from jcvi.formats.bed import evaluate from jcvi.formats.gff import make_index p = OptionParser(evaluate.__doc__) p.add_option("--type", default="CDS", help="list of features to extract, use comma to separate (e.g." "'five_prime_UTR,CDS,three_prime_UTR') [default: %default]") opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) model_ids, gff_file, evidences_bed, fastafile = args type = set(opts.type.split(",")) g = make_index(gff_file) fp = open(model_ids) prefix = model_ids.rsplit(".", 1)[0] fwscores = open(prefix + ".scores", "w") for row in fp: cid = row.strip() b = next(g.parents(cid, 1)) query = "{0}:{1}-{2}".format(b.chrom, b.start, b.stop) children = [c for c in g.children(cid, 1)] cidbed = prefix + ".bed" fw = open(cidbed, "w") for c in children: if c.featuretype not in type: continue fw.write(c.to_bed()) fw.close() b = evaluate([cidbed, evidences_bed, fastafile, "--query={0}".format(query)]) print("\t".join((cid, b.score)), file=fwscores) fwscores.flush()
def batcheval(args)
%prog batcheval model.ids gff_file evidences.bed fastafile Get the accuracy for a list of models against evidences in the range of the genes. For example: $ %prog batcheval all.gff3 isoforms.ids proteins.bed scaffolds.fasta Outfile contains the scores for the models can be found in models.scores
3.338075
3.009954
1.109012
bed_file = get_bed_file(gff_file, stype, key) cmd = "intersectBed -a {0} -b {1} -wao".format(split_bed, bed_file) cmd += " | cut -f4,10" p = popen(cmd) splits = defaultdict(set) for row in p: a, b = row.split() splits[a].add(b) return splits
def get_splits(split_bed, gff_file, stype, key)
Use intersectBed to find the fused gene => split genes mappings.
2.808934
2.614559
1.074343
from jcvi.formats.bed import evaluate bed_file = get_bed_file(gff_file, type, key) b = evaluate([bed_file, evidences_bed, sizesfile, "--query={0}".format(query)]) return b
def get_accuracy(query, gff_file, evidences_bed, sizesfile, type, key)
Get sensitivity, specificity and accuracy given gff_file, and a query range that look like "chr1:1-10000".
4.979709
5.055483
0.985011
from jcvi.formats.bed import Bed p = OptionParser(split.__doc__) p.add_option("--key", default="Name", help="Key in the attributes to extract predictor.gff [default: %default]") p.add_option("--parents", default="match", help="list of features to extract, use comma to separate (e.g." "'gene,mRNA') [default: %default]") p.add_option("--children", default="match_part", help="list of features to extract, use comma to separate (e.g." "'five_prime_UTR,CDS,three_prime_UTR') [default: %default]") opts, args = p.parse_args(args) if len(args) != 5: sys.exit(not p.print_help()) split_bed, evidences_bed, p1_gff, p2_gff, fastafile = args parents = opts.parents children = opts.children key = opts.key bed = Bed(split_bed) s1 = get_splits(split_bed, p1_gff, parents, key) s2 = get_splits(split_bed, p2_gff, parents, key) for b in bed: query = "{0}:{1}-{2}".format(b.seqid, b.start, b.end) b1 = get_accuracy(query, p1_gff, evidences_bed, fastafile, children, key) b2 = get_accuracy(query, p2_gff, evidences_bed, fastafile, children, key) accn = b.accn c1 = "|".join(s1[accn]) c2 = "|".join(s2[accn]) ac1 = b1.accuracy ac2 = b2.accuracy tag = p1_gff if ac1 >= ac2 else p2_gff tag = tag.split(".")[0] ac1 = "{0:.3f}".format(ac1) ac2 = "{0:.3f}".format(ac2) print("\t".join((accn, tag, ac1, ac2, c1, c2)))
def split(args)
%prog split split.bed evidences.bed predictor1.gff predictor2.gff fastafile Split MAKER models by checking against predictors (such as AUGUSTUS and FGENESH). For each region covered by a working model. Find out the combination of predictors that gives the best accuracy against evidences (such as PASA). `split.bed` can be generated by pulling out subset from a list of ids $ python -m jcvi.formats.base join split.ids working.bed --column=0,3 --noheader | cut -f2-7 > split.bed
2.668696
2.443971
1.091951
p = OptionParser(datastore.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) ds, = args fp = open(ds) for row in fp: fn = row.strip() assert op.exists(fn) pp, logfile = op.split(fn) flog = open(fn) for row in flog: ctg, folder, status = row.split() if status != "FINISHED": continue gff_file = op.join(pp, folder, ctg + ".gff") assert op.exists(gff_file) print(gff_file)
def datastore(args)
%prog datastore datastore.log > gfflist.log Generate a list of gff filenames to merge. The `datastore.log` file can be generated by something like: $ find /usr/local/scratch/htang/EVM_test/gannotation/maker/1132350111853_default/i1/ -maxdepth 4 -name "*datastore*.log" > datastore.log
3.739252
3.642381
1.026596
from jcvi.formats.base import DictFile p = OptionParser(libsvm.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) csvfile, prefixids = args d = DictFile(prefixids) fp = open(csvfile) next(fp) for row in fp: atoms = row.split() klass = atoms[0] kp = klass.split("_")[0] klass = d.get(kp, "0") feats = ["{0}:{1}".format(i + 1, x) for i, x in enumerate(atoms[1:])] print(" ".join([klass] + feats))
def libsvm(args)
%prog libsvm csvfile prefix.ids Convert csv file to LIBSVM format. `prefix.ids` contains the prefix mapping. Ga -1 Gr 1 So the feature in the first column of csvfile get scanned with the prefix and mapped to different classes. Formatting spec: http://svmlight.joachims.org/
3.100321
2.757751
1.124221
N = len(nodes) E = len(edges) A = np.zeros((E, N), dtype=int) for i, (a, b, distance) in enumerate(edges): A[i, a] = 1 A[i, b] = -1 K = np.eye(E, dtype=int) L = np.array([x[-1] for x in edges]) s = spring_system(A, K, L) return np.array([0] + [int(round(x, 0)) for x in s])
def determine_positions(nodes, edges)
Construct the problem instance to solve the positions of contigs. The input for spring_system() is A, K, L, which looks like the following. A = np.array([[1, -1, 0], [0, 1, -1], [1, 0, -1]]) K = np.eye(3, dtype=int) L = np.array([1, 2, 3]) For example, A-B distance 1, B-C distance 2, A-C distance 3, solve positions >>> determine_positions([0, 1, 2], [(0, 1, 1), (1, 2, 2), (0, 2, 3)]) array([0, 1, 3])
3.101441
2.536464
1.222742
N = len(nodes) M = np.zeros((N, N), dtype=float) for a, b, w in edges: M[a, b] += w M = symmetrize(M) return get_signs(M, cutoff=cutoff, validate=False)
def determine_signs(nodes, edges, cutoff=1e-10)
Construct the orientation matrix for the pairs on N molecules. >>> determine_signs([0, 1, 2], [(0, 1, 1), (0, 2, -1), (1, 2, -1)]) array([ 1, 1, -1])
3.460259
3.877702
0.892348
# Is this a symmetric matrix? assert is_symmetric(M), "the matrix is not symmetric:\n{0}".format(str(M)) N, x = M.shape # eigh() works on symmetric matrix (Hermitian) w, v = np.linalg.eigh(M) m = np.argmax(w) mv = v[:, m] f = lambda x: (x if abs(x) > cutoff else 0) mv = [f(x) for x in mv] sign_array = np.array(np.sign(mv), dtype=int) # it does not really matter, but we prefer as few flippings as possible if np.sum(sign_array) < 0: sign_array = -sign_array if validate: diag = np.matrix(np.eye(N, dtype=int) * sign_array) final = diag * M * diag # The final result should have all pairwise in the same direction assert (final >= 0).all(), \ "result check fails:\n{0}".format(final) if not ambiguous: # Do we allow ambiguous orientation (=0) ? sign_array[sign_array == 0] = 1 return sign_array
def get_signs(M, cutoff=1e-10, validate=True, ambiguous=True)
Given a numpy array M that contains pairwise orientations, find the largest eigenvalue and associated eigenvector and return the signs for the eigenvector. This should correspond to the original orientations for the individual molecule. In the first example below, let's say 3 molecules A, B and C, A-B:same direction, A-C:opposite direction, B-C:opposite direction. The final solution is to flip C. >>> M = np.array([[0,1,-1],[1,0,-1],[-1,-1,0]]) >>> get_signs(M) array([ 1, 1, -1]) >>> M = np.array([[0,1,-1],[1,0,0],[-1,0,0]]) >>> get_signs(M) array([ 1, 1, -1])
5.168143
5.298134
0.975465
# Linear equation is A'KAx = -A'KL C = np.dot(A.T, K) left = np.dot(C, A) right = - np.dot(C, L) left = left[1:, 1:] right = right[1:] x = np.linalg.solve(left, right) return x
def spring_system(A, K, L)
Solving the equilibrium positions of the objects, linked by springs of length L, stiffness of K, and connectivity matrix A. Then solving: F_nodes = -A'KAx - A'KL = 0 In the context of scaffolding, lengths (L) are inferred by mate inserts, stiffness (K) is inferred via the number of links, connectivity (A) is the contigs they connect. The mate pairs form the linkages between the contigs, and can be considered as "springs" of certain lengths. The "springs" are stretched or compressed if the distance deviates from the expected insert size. See derivation from Dayarian et al. 2010. SOPRA paper. o---------o--------------o x0 x1 x2 |~~~~L1~~~|~~~~~~L2~~~~~~| |~~~~~~~~~~L3~~~~~~~~~~~~| >>> A = np.array([[1, -1, 0], [0, 1, -1], [1, 0, -1]]) >>> K = np.eye(3, dtype=int) >>> L = np.array([1, 2, 3]) >>> print spring_system(A, K, L) [ 1. 3.]
4.700365
4.598763
1.022093
p = OptionParser(fix.__doc__) p.add_option("--ignore_sym_pat", default=False, action="store_true", help="Do not fix names matching symbol patterns i.e." + \ " names beginning or ending with gene symbols or a series of numbers." + \ " e.g. `ARM repeat superfamily protein`, `beta-hexosaminidase 3`," + \ " `CYCLIN A3;4`, `WALL ASSOCIATED KINASE (WAK)-LIKE 10`") p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) csvfile, = args fp = open(csvfile) fw = must_open(opts.outfile, "w") for row in fp: if row[0] == '#': continue if row.strip() == "": continue atoms = row.rstrip("\r\n").split("\t") name, hit, ahrd_code, desc = atoms[:4] \ if len(atoms) > 2 else \ (atoms[0], None, None, atoms[-1]) newdesc = fix_text(desc, ignore_sym_pat=opts.ignore_sym_pat) if hit and hit.strip() != "" and newdesc == Hypothetical: newdesc = "conserved " + newdesc print("\t".join(atoms[:4] + [newdesc] + atoms[4:]), file=fw)
def fix(args)
%prog fix ahrd.csv > ahrd.fixed.csv Fix ugly names from Uniprot.
5.867056
5.490234
1.068635
p = OptionParser(merge.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) csvfiles = args cf = csvfiles[0] fp = open(cf) for row in fp: if row.startswith("Protein"): break header = row.rstrip() print(header) seen = set() for cf in csvfiles: fp = open(cf) for row in fp: if row[0] == '#': continue if row.strip() == "": continue if row.strip() == header: continue atoms = row.rstrip().split("\t") id = atoms[0] if id in seen: logging.error("ID `{0}` ignored.".format(id)) continue seen.add(id) print(row.strip())
def merge(args)
%prog merge output/*.csv > ahrd.csv Merge AHRD results, remove redundant headers, empty lines, etc. If there are multiple lines containing the same ID (first column). Then whatever comes the first will get retained.
2.830992
2.734676
1.03522
p = OptionParser(batch.__doc__) ahrd_weights = { "blastp": [0.5, 0.3, 0.2], "blastx": [0.6, 0.4, 0.0] } blast_progs = tuple(ahrd_weights.keys()) p.add_option("--path", default="~/code/AHRD/", help="Path where AHRD is installed [default: %default]") p.add_option("--blastprog", default="blastp", choices=blast_progs, help="Specify the blast program being run. Based on this option," \ + " the AHRD parameters (score_weights) will be modified." \ + " [default: %default]") p.add_option("--iprscan", default=None, help="Specify path to InterProScan results file if available." \ + " If specified, the yml conf file will be modified" \ + " appropriately. [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) splits, output = args mkdir(output) bit_score, db_score, ovl_score = ahrd_weights[opts.blastprog] for f in glob("{0}/*.fa*".format(splits)): fb = op.basename(f).rsplit(".", 1)[0] fw = open(op.join(output, fb + ".yml"), "w") path = op.expanduser(opts.path) dir = op.join(path, "test/resources") outfile = op.join(output, fb + ".csv") interpro = iprscanTemplate.format(opts.iprscan) if opts.iprscan else "" print(Template.format(dir, fb, f, outfile, bit_score, db_score, ovl_score, interpro), file=fw) if opts.iprscan: if not op.lexists("interpro.xml"): symlink(op.join(iprscan_datadir, "interpro.xml"), "interpro.xml") if not op.lexists("interpro.dtd"): symlink(op.join(iprscan_datadir, "interpro.dtd"), "interpro.dtd")
def batch(args)
%prog batch splits output The arguments are two folders. Input FASTA sequences are in splits/. Output csv files are in output/. Must have folders swissprot/, tair/, trembl/ that contains the respective BLAST output. Once finished, you can run, for example: $ parallel java -Xmx2g -jar ~/code/AHRD/dist/ahrd.jar {} ::: output/*.yml
3.946648
3.587261
1.100184
if gff3: s = s.replace('+', 'PlusSign') d = parse_qs(s, keep_attr_order=keep_attr_order) for key in d: d[key][0] = unquote(d[key][0].replace('PlusSign', '+').replace('"', '')) else: attributes = s.split(";") d = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list) for a in attributes: a = a.strip() if ' ' not in a: continue key, val = a.split(' ', 1) val = unquote(val.replace('"', '').replace('=', ' ').strip()) d[key].append(val) for key, val in d.items(): d[key] = list(flatten([v.split(",") for v in val])) return d
def make_attributes(s, gff3=True, keep_attr_order=True)
In GFF3, the last column is typically: ID=cds00002;Parent=mRNA00002; In GFF2, the last column is typically: Gene 22240.t000374; Note "Carbonic anhydrase"
3.084357
3.37514
0.913846
from jcvi.utils.range import Range if score or id: _score = score if score else obj.score _id = id if id else obj.id return Range(seqid=obj.seqid, start=obj.start, end=obj.end, \ score=_score, id=_id) elif strand: return (obj.seqid, obj.start, obj.end, obj.strand) return (obj.seqid, obj.start, obj.end)
def to_range(obj, score=None, id=None, strand=None)
Given a gffutils object, convert it to a range object
2.339034
2.341828
0.998807
p = OptionParser(addparent.__doc__) p.add_option("--childfeat", default="CDS", help="Type of children feature") p.add_option("--parentfeat", default="mRNA", help="Type of merged feature") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gff_file, = args gff = Gff(gff_file) data = defaultdict(list) for g in gff: if g.type != opts.childfeat: continue data[g.parent].append(g) logging.debug("A total of {0} {1} features clustered".\ format(len(data), opts.childfeat)) parents = [] for parent, dd in data.items(): d = dd[0] start, end = min(x.start for x in dd), max(x.end for x in dd) gffline = "\t".join(str(x) for x in \ (d.seqid, d.source, opts.parentfeat, start, end, ".", d.strand, ".", "ID={0};Name={0}".format(parent))) parents.append(GffLine(gffline)) parents.sort(key=lambda x: (x.seqid, x.start)) logging.debug("Merged feature sorted") fw = must_open(opts.outfile, "w") for parent in parents: print(parent, file=fw) parent_id = parent.id for d in data[parent_id]: if d.accn == parent_id: new_id = "{0}.{1}1".format(parent_id, opts.childfeat) d.set_attr("ID", new_id) d.set_attr("Name", new_id, update=True) print(d, file=fw) fw.close()
def addparent(args)
%prog addparent file.gff Merge sister features and infer parents.
2.512474
2.440731
1.029394
_strand = 1 if strand == '+' else -1 return fasta.sequence({'chr': seqid, 'start': start, 'stop': stop, \ 'strand': _strand})
def _fasta_slice(fasta, seqid, start, stop, strand)
Return slice of fasta, given (seqid, start, stop, strand)
4.024476
3.937519
1.022084
if len(codon) != 3: return False if type == 'start': if codon != 'ATG': return False elif type == 'stop': if not any(_codon == codon for _codon in ('TGA', 'TAG', 'TAA')): return False else: logging.error("`{0}` is not a valid codon type. ".format(type) + \ "Should be one of (`start` or `stop`)") sys.exit() return True
def is_valid_codon(codon, type='start')
Given a codon sequence, check if it is a valid start/stop codon
2.671605
2.659463
1.004566
s, e = codon_span[0], codon_span[1] while True: if (type == 'start' and strand == '+') or \ (type == 'stop' and strand == '-'): s, e = s - 3, e - 3 else: s, e = s + 3, e + 3 codon = _fasta_slice(genome, seqid, s, e, strand) is_valid = is_valid_codon(codon, type=type) if not is_valid: if type == 'start': ## if we are scanning upstream for a valid start codon, ## stop scanning when we encounter a stop if is_valid_codon(codon, type='stop'): return (None, None) elif type == 'stop': ## if we are scanning downstream for a valid stop codon, ## stop scanning when we encounter a start if is_valid_codon(codon, type='start'): return (None, None) continue break return (s, e)
def scan_for_valid_codon(codon_span, strand, seqid, genome, type='start')
Given a codon span, strand and reference seqid, scan upstream/downstream to find a valid in-frame start/stop codon
2.222182
2.146001
1.035499
p = OptionParser(sizes.__doc__) p.set_outfile() p.add_option("--parents", dest="parents", default="mRNA", help="parent feature(s) for which size is to be calculated") p.add_option("--child", dest="child", default="CDS", help="child feature to use for size calculations") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args parents, cftype = set(opts.parents.split(",")), opts.child gff = make_index(gffile) fw = must_open(opts.outfile, "w") for parent in parents: for feat in gff.features_of_type(parent, order_by=('seqid', 'start')): fsize = 0 fsize = feat.end - feat.start + 1 \ if cftype == parent else \ gff.children_bp(feat, child_featuretype=cftype) print("\t".join(str(x) for x in (feat.id, fsize)), file=fw) fw.close()
def sizes(args)
%prog sizes gffile Given a gff file of features, calculate the sizes of chosen parent feature based on summation of sizes of child features. For example, for parent 'mRNA' and child 'CDS' feature types, calcuate sizes of mRNA by summing the sizes of the disjoint CDS parts.
3.274271
2.877501
1.137887
from jcvi.utils.grouper import Grouper from itertools import combinations p = OptionParser(cluster.__doc__) p.add_option("--slop", default=False, action="store_true", help="allow minor variation in terminal 5'/3' UTR" + \ " start/stop position [default: %default]") p.add_option("--inferUTR", default=False, action="store_true", help="infer presence of UTRs from exon coordinates") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args slop = opts.slop inferUTR = opts.inferUTR gff = make_index(gffile) fw = must_open(opts.outfile, "w") print("##gff-version 3", file=fw) seen = {} for gene in gff.features_of_type('gene', order_by=('seqid', 'start')): g = Grouper() mrnas = list(combinations([mrna for mrna in gff.children(gene, featuretype='mRNA', order_by=('start'))], 2)) if len(mrnas) > 0: for mrna1, mrna2 in mrnas: mrna1s, mrna2s = gff.children_bp(mrna1, child_featuretype='exon'), \ gff.children_bp(mrna2, child_featuretype='exon') g.join((mrna1.id, mrna1s)) g.join((mrna2.id, mrna2s)) if match_subfeats(mrna1, mrna2, gff, gff, featuretype='CDS'): res = [] ftypes = ['exon'] if inferUTR else ['five_prime_UTR', 'three_prime_UTR'] for ftype in ftypes: res.append(match_subfeats(mrna1, mrna2, gff, gff, featuretype=ftype, slop=slop)) if all(r == True for r in res): g.join((mrna1.id, mrna1s), (mrna2.id, mrna2s)) else: for mrna1 in gff.children(gene, featuretype='mRNA', order_by=('start')): mrna1s = gff.children_bp(mrna1, child_featuretype='exon') g.join((mrna1.id, mrna1s)) print(gene, file=fw) for group in g: group.sort(key=lambda x: x[1], reverse=True) mrnas = [el[0] for el in group] m = mrnas[0] _mrnaid = [] for x in mrnas: if x not in _mrnaid: _mrnaid.append(x) mrnaid = "{0}".format("-".join(_mrnaid)) if mrnaid not in seen: seen[mrnaid] = 0 else: seen[mrnaid] += 1 mrnaid = "{0}-{1}".format(mrnaid, seen[mrnaid]) _mrna = gff[m] _mrna.attributes['ID'] = [mrnaid] _mrna.attributes['Parent'] = [gene.id] children = gff.children(m, order_by='start') print(_mrna, file=fw) for child in children: child.attributes['ID'] = ["{0}".format(child.id)] child.attributes['Parent'] = [mrnaid] print(child, file=fw) fw.close()
def cluster(args)
%prog cluster gffile Given a gff file of gene structures (multiple transcripts per gene locus), cluster/consolidate all transcripts based on shared splicing structure. If `slop` is enabled, clustering/consolidation will collapse any variation in terminal UTR lengths, keeping only the longest as representative.
2.358458
2.27633
1.036079
from jcvi.formats.base import SetFile from jcvi.formats.bed import BedSummary from jcvi.utils.table import tabulate p = OptionParser(summary.__doc__) p.add_option("--isoform", default=False, action="store_true", help="Find longest isoform of each id") p.add_option("--ids", help="Only include features from certain IDs") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gff_file, = args ids = opts.ids if ids: ids = SetFile(ids) logging.debug("Total ids loaded: {0}".format(len(ids))) if opts.isoform: pids = set() gff = Gff(gff_file) for g in gff: if g.type != "mRNA": continue if g.parent not in ids: continue if "longest" not in g.attributes: pids = set(x + ".1" for x in ids) break if g.attributes["longest"][0] == "0": continue pids.add(g.id) ids = pids logging.debug("After checking longest: {0}".format(len(ids))) # Collects aliases gff = Gff(gff_file) for g in gff: if g.name in ids: ids.add(g.id) logging.debug("Total ids including aliases: {0}".format(len(ids))) gff = Gff(gff_file) beds = defaultdict(list) for g in gff: if ids and not (g.id in ids or g.name in ids or g.parent in ids): continue beds[g.type].append(g.bedline) table = {} for type, bb in sorted(beds.items()): bs = BedSummary(bb) table[(type, "Features")] = bs.nfeats table[(type, "Unique bases")] = bs.unique_bases table[(type, "Total bases")] = bs.total_bases print(tabulate(table), file=sys.stdout)
def summary(args)
%prog summary gffile Print summary stats for features of different types.
2.746888
2.674602
1.027027
from Bio.Alphabet import generic_dna try: from BCBio import GFF except ImportError: print("You need to install dep first: $ easy_install bcbio-gff", file=sys.stderr) p = OptionParser(gb.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gff_file, fasta_file = args pf = op.splitext(gff_file)[0] out_file = pf + ".gb" fasta_input = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta", generic_dna)) gff_iter = GFF.parse(gff_file, fasta_input) SeqIO.write(gff_iter, out_file, "genbank")
def gb(args)
%prog gb gffile fastafile Convert GFF3 to Genbank format. Recipe taken from: <http://www.biostars.org/p/2492/>
2.598323
2.422902
1.072401
from jcvi.formats.fasta import longestorf p = OptionParser(orient.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ingff3, fastafile = args idsfile = fastafile.rsplit(".", 1)[0] + ".orf.ids" if need_update(fastafile, idsfile): longestorf([fastafile, "--ids"]) orientations = DictFile(idsfile) gff = Gff(ingff3) flipped = 0 for g in gff: id = None for tag in ("ID", "Parent"): if tag in g.attributes: id, = g.attributes[tag] break assert id orientation = orientations.get(id, "+") if orientation == '-': g.strand = {"+": "-", "-": "+"}[g.strand] flipped += 1 print(g) logging.debug("A total of {0} features flipped.".format(flipped))
def orient(args)
%prog orient in.gff3 features.fasta > out.gff3 Change the feature orientations based on translation. This script is often needed in fixing the strand information after mapping RNA-seq transcripts. You can generate the features.fasta similar to this command: $ %prog load --parents=EST_match --children=match_part clc.JCVIv4a.gff JCVI.Medtr.v4.fasta -o features.fasta
3.276163
3.210238
1.020536
p = OptionParser(rename.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ingff3, switch = args switch = DictFile(switch) gff = Gff(ingff3) for g in gff: id, = g.attributes["ID"] newname = switch.get(id, id) g.attributes["ID"] = [newname] if "Parent" in g.attributes: parents = g.attributes["Parent"] g.attributes["Parent"] = [switch.get(x, x) for x in parents] g.update_attributes() print(g)
def rename(args)
%prog rename in.gff3 switch.ids > reindexed.gff3 Change the IDs within the gff3.
2.848674
2.359897
1.207118
p = OptionParser(parents.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gff_file, idsfile = args g = make_index(gff_file) fp = open(idsfile) for row in fp: cid = row.strip() b = next(g.parents(cid, 1)) print("\t".join((cid, b.id)))
def parents(args)
%prog parents gffile models.ids Find the parents given a list of IDs in "models.ids".
3.82469
3.235641
1.18205
p = OptionParser(filter.__doc__) p.add_option("--type", default="mRNA", help="The feature to scan for the attributes [default: %default]") g1 = OptionGroup(p, "Filter by identity/coverage attribute values") g1.add_option("--id", default=95, type="float", help="Minimum identity [default: %default]") g1.add_option("--coverage", default=90, type="float", help="Minimum coverage [default: %default]") g1.add_option("--nocase", default=False, action="store_true", help="Case insensitive lookup of attribute names [default: %default]") p.add_option_group(g1) g2 = OptionGroup(p, "Filter by child feature bp length") g2.add_option("--child_ftype", default=None, type="str", help="Child featuretype to consider") g2.add_option("--child_bp", default=None, type="int", help="Filter by total bp of children of chosen ftype") p.add_option_group(g2) p.set_outfile() opts, args = p.parse_args(args) otype, oid, ocov = opts.type, opts.id, opts.coverage cftype, clenbp = opts.child_ftype, opts.child_bp id_attr, cov_attr = "Identity", "Coverage" if opts.nocase: id_attr, cov_attr = id_attr.lower(), cov_attr.lower() if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gffdb = make_index(gffile) bad = set() ptype = None for g in gffdb.features_of_type(otype, order_by=('seqid', 'start')): if not ptype: parent = list(gffdb.parents(g)) ptype = parent[0].featuretype \ if len(parent) > 0 else otype if cftype and clenbp: if gffdb.children_bp(g, child_featuretype=cftype) < clenbp: bad.add(g.id) elif oid and ocov: identity = float(g.attributes[id_attr][0]) coverage = float(g.attributes[cov_attr][0]) if identity < oid or coverage < ocov: bad.add(g.id) logging.debug("{0} bad accns marked.".format(len(bad))) fw = must_open(opts.outfile, "w") for g in gffdb.features_of_type(ptype, order_by=('seqid', 'start')): if ptype != otype: feats = list(gffdb.children(g, featuretype=otype, order_by=('start'))) ok_feats = [f for f in feats if f.id not in bad] if len(ok_feats) > 0: print(g, file=fw) for feat in ok_feats: print(feat, file=fw) for child in gffdb.children(feat, order_by=('start')): print(child, file=fw) else: if g.id not in bad: print(g, file=fw) for child in gffdb.children(g, order_by=('start')): print(child, file=fw) fw.close()
def filter(args)
%prog filter gffile > filtered.gff Filter the gff file based on criteria below: (1) feature attribute values: [Identity, Coverage]. You can get this type of gff by using gmap $ gmap -f 2 .... (2) Total bp length of child features
2.386064
2.303233
1.035963
p = OptionParser(gapsplit.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gff = Gff(gffile) for g in gff: if re.match("EST_match", g.type): match = re.search(r'\S+ (\d+) \d+ ([\s{1}\-])', g.attributes["Target"][0]) if match.group(2) == "-": strand = match.group(2) else: strand = "+" g.attributes["Target"][0] = " ".join(str(x) \ for x in [g.attributes["Target"][0].rstrip(), strand]) if g.strand == "?": g.strand = strand else: match = re.match(r'\S+ (\d+) \d+', g.attributes["Target"][0]) target_start = int(match.group(1)) re_cigar = re.compile(r'(\D+)(\d+)'); cigar = g.attributes["Gap"][0].split(" ") g.attributes["Gap"] = None parts = [] if g.strand == "+": for event in cigar: match = re_cigar.match(event) op, count = match.group(1), int(match.group(2)) if op in "IHS": target_start += count elif op in "DN": g.start += count elif op == "P": continue else: parts.append([g.start, g.start + count - 1, \ target_start, target_start + count - 1]) g.start += count target_start += count else: for event in cigar: match = re_cigar.match(event) op, count = match.group(1), int(match.group(2)) if op in "IHS": target_start += count elif op in "DN": g.end -= count elif op == "P": continue else: parts.append([g.end - count + 1, g.end, \ target_start, target_start + count - 1]) g.end -= count target_start += count g.update_attributes() print(g) parent = g.attributes["Name"][0] g.type = "match_part" g.attributes.clear() for part in parts: g.start, g.end = part[0], part[1] g.score, g.strand, g.phase = ".", g.strand, "." if re.match("EST", g.type): target_list = [parent, part[2], part[3], g.strand] else: target_list = [parent, part[2], part[3]] target = " ".join(str(x) for x in target_list) g.attributes["Parent"] = [parent] g.attributes["Target"] = [target] g.update_attributes() print(g)
def gapsplit(args)
%prog gapsplit gffile > split.gff Read in the gff (normally generated by GMAP) and print it out after splitting each feature into one parent and multiple child features based on alignment information encoded in CIGAR string.
2.458143
2.390766
1.028182
p = OptionParser(fixboundaries.__doc__) p.add_option("--type", default="gene", type="str", help="Feature type for which to adjust boundaries") p.add_option("--child_ftype", default="mRNA", type="str", help="Child featuretype(s) to use for identifying boundaries") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gffdb = make_index(gffile) fw = must_open(opts.outfile, "w") for f in gffdb.all_features(order_by=('seqid', 'start')): if f.featuretype == opts.type: child_coords = [] for cftype in opts.child_ftype.split(","): for c in gffdb.children(f, featuretype=cftype, order_by=('start')): child_coords.append((c.start, c.stop)) f.start, f.stop = range_minmax(child_coords) print(f, file=fw) fw.close()
def fixboundaries(args)
%prog fixboundaries gffile --type="gene" --child_ftype="mRNA" > gffile.fixed Adjust the boundary coordinates of parents features based on range chained child features, extracting their min and max values
2.792766
2.356482
1.185142
p = OptionParser(liftover.__doc__) p.add_option("--tilesize", default=50000, type="int", help="The size for each tile [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gff = Gff(gffile) for g in gff: seqid = g.seqid seqid, tilenum = seqid.rsplit(".", 1) tilenum = int(tilenum) g.seqid = seqid offset = tilenum * opts.tilesize g.start += offset g.end += offset print(g)
def liftover(args)
%prog liftover gffile > liftover.gff Adjust gff coordinates based on tile number. For example, "gannotation.asmbl.000095.7" is the 8-th tile on asmbl.000095.
2.474503
2.16694
1.141934
from jcvi.utils.range import Range, range_piles ranges = [Range(a.seqid, a.start, a.end, 0, i) \ for i, a in enumerate(allgenes)] for pile in range_piles(ranges): yield [allgenes[x] for x in pile]
def get_piles(allgenes)
Before running uniq, we need to compute all the piles. The piles are a set of redundant features we want to get rid of. Input are a list of GffLines features. Output are list of list of features distinct "piles".
4.430434
4.237797
1.045457
f1c, f2c = list(dbx1.children(f1, featuretype=featuretype, order_by='start')), \ list(dbx2.children(f2, featuretype=featuretype, order_by='start')) lf1c, lf2c = len(f1c), len(f2c) if match_nchildren(f1c, f2c): if lf1c > 0 and lf2c > 0: exclN = set() if featuretype.endswith('UTR') or featuretype == 'exon': N = [] if featuretype.startswith('five_prime'): N = [1] if f1.strand == "+" else [lf1c] elif featuretype.startswith('three_prime'): N = [lf1c] if f1.strand == "+" else [1] else: # infer UTR from exon collection N = [1] if 1 == lf1c else [1, lf1c] for n in N: if match_Nth_child(f1c, f2c, N=n, slop=slop): exclN.add(n-1) else: return False for i, (cf1, cf2) in enumerate(zip(f1c, f2c)): if i in exclN: continue if not match_span(cf1, cf2): return False else: if (lf1c, lf2c) in [(0, 1), (1, 0)] and slop \ and featuretype.endswith('UTR'): return True return False return True
def match_subfeats(f1, f2, dbx1, dbx2, featuretype=None, slop=False)
Given 2 gffutils features located in 2 separate gffutils databases, iterate through all subfeatures of a certain type and check whether they are identical or not The `slop` parameter allows for variation in the terminal UTR region
2.889957
2.848741
1.014468
supported_modes = ("span", "score") p = OptionParser(uniq.__doc__) p.add_option("--type", default="gene", help="Types of features to non-redundify [default: %default]") p.add_option("--mode", default="span", choices=supported_modes, help="Pile mode [default: %default]") p.add_option("--best", default=1, type="int", help="Use best N features [default: %default]") p.add_option("--name", default=False, action="store_true", help="Non-redundify Name attribute [default: %default]") p.add_option("--iter", default="2", choices=("1", "2"), help="Number of iterations to grab children [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args mode = opts.mode bestn = opts.best allgenes = import_feats(gffile, opts.type) g = get_piles(allgenes) bestids = set() for group in g: if mode == "span": scores_group = [(- x.span, x) for x in group] else: scores_group = [(- float(x.score), x) for x in group] scores_group.sort() seen = set() for score, x in scores_group: if len(seen) >= bestn: break name = x.attributes["Name"][0] if opts.name else x.accn if name in seen: continue seen.add(name) bestids.add(x.accn) populate_children(opts.outfile, bestids, gffile, iter=opts.iter)
def uniq(args)
%prog uniq gffile > uniq.gff Remove redundant gene models. For overlapping gene models, take the longest gene. A second scan takes only the genes selected. --mode controls whether you want larger feature, or higher scoring feature. --best controls how many redundant features to keep, e.g. 10 for est2genome.
3.105157
2.960523
1.048854
valid_sort_methods = ("unix", "topo") p = OptionParser(sort.__doc__) p.add_option("--method", default="unix", choices=valid_sort_methods, help="Specify sort method [default: %default]") p.add_option("-i", dest="inplace", default=False, action="store_true", help="If doing a unix sort, perform sort inplace [default: %default]") p.set_tmpdir() p.set_outfile() p.set_home("gt") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args sortedgff = opts.outfile if opts.inplace: if opts.method == "topo" or (opts.method == "unix" and gffile in ("-", "stdin")): logging.error("Cannot perform inplace sort when method is `topo`" + \ " or method is `unix` and input is `stdin` stream") sys.exit() if opts.method == "unix": cmd = "sort" cmd += " -k1,1 -k4,4n {0}".format(gffile) if opts.tmpdir: cmd += " -T {0}".format(opts.tmpdir) if opts.inplace: cmd += " -o {0}".gffile sortedgff = None sh(cmd, outfile=sortedgff) elif opts.method == "topo": GT_HOME = opts.gt_home if not op.isdir(GT_HOME): logging.error("GT_HOME={0} directory does not exist".format(GT_HOME)) sys.exit() cmd = "{0}".format(op.join(GT_HOME, "bin", "gt")) cmd += " gff3 -sort -tidy -retainids -addids no {0}".format(gffile) sh(cmd, outfile=sortedgff)
def sort(args)
%prog sort gffile Sort gff file using plain old unix sort based on [chromosome, start coordinate]. or topologically based on hierarchy of features using the gt (genometools) toolkit
3.110766
2.903384
1.071428
p = OptionParser(fromgtf.__doc__) p.add_option("--transcript_id", default="transcript_id", help="Field name for transcript [default: %default]") p.add_option("--gene_id", default="gene_id", help="Field name for gene [default: %default]") p.add_option("--augustus", default=False, action="store_true", help="Input is AUGUSTUS gtf [default: %default]") p.set_home("augustus") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gtffile, = args outfile = opts.outfile if opts.augustus: ahome = opts.augustus_home s = op.join(ahome, "scripts/gtf2gff.pl") cmd = "{0} --gff3 < {1} --out={2}".format(s, gtffile, outfile) sh(cmd) return gff = Gff(gtffile) fw = must_open(outfile, "w") transcript_id = opts.transcript_id gene_id = opts.gene_id nfeats = 0 for g in gff: if g.type in ("transcript", "mRNA"): g.type = "mRNA" g.update_tag(transcript_id, "ID") g.update_tag("mRNA", "ID") g.update_tag(gene_id, "Parent") g.update_tag("Gene", "Parent") elif g.type in ("exon", "CDS") or "UTR" in g.type: g.update_tag("transcript_id", "Parent") g.update_tag(g.type, "Parent") elif g.type == "gene": g.update_tag(gene_id, "ID") g.update_tag("Gene", "ID") else: assert 0, "Don't know how to deal with {0}".format(g.type) g.update_attributes() print(g, file=fw) nfeats += 1 logging.debug("A total of {0} features written.".format(nfeats))
def fromgtf(args)
%prog fromgtf gtffile Convert gtf to gff file. In gtf, the "transcript_id" will convert to "ID=", the "transcript_id" in exon/CDS feature will be converted to "Parent=".
2.229429
2.170307
1.027241
p = OptionParser(frombed.__doc__) p.add_option("--type", default="match", help="GFF feature type [default: %default]") p.add_option("--source", default="default", help="GFF source qualifier [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args bed = Bed(bedfile) for b in bed: print(b.gffline(type=opts.type, source=opts.source))
def frombed(args)
%prog frombed bed_file [--options] > gff_file Convert bed to gff file. In bed, the accn will convert to key='ID' Default type will be `match` and default source will be `source`
2.65457
2.154772
1.231949
p = OptionParser(fromsoap.__doc__) p.add_option("--type", default="nucleotide_match", help="GFF feature type [default: %default]") p.add_option("--source", default="soap", help="GFF source qualifier [default: %default]") p.set_fixchrnames(orgn="maize") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) soapfile, = args pad0 = len(str(sum(1 for line in open(soapfile)))) fw = must_open(opts.outfile, "w") fp = must_open(soapfile) for idx, line in enumerate(fp): if opts.fix_chr_name: from jcvi.utils.cbook import fixChromName line = fixChromName(line, orgn=opts.fix_chr_name) atoms = line.strip().split("\t") attributes = "ID=match{0};Name={1}".format(str(idx).zfill(pad0), atoms[0]) start, end = int(atoms[8]), int(atoms[5]) + int(atoms[8]) - 1 seqid = atoms[7] print("\t".join(str(x) for x in (seqid, opts.source, opts.type, \ start, end, ".", atoms[6], ".", attributes)), file=fw)
def fromsoap(args)
%prog fromsoap soapfile > gff_file
3.687697
3.341352
1.103654
p = OptionParser(gtf.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gff = Gff(gffile) transcript_info = AutoVivification() for g in gff: if g.type.endswith(("RNA", "transcript")): if "ID" in g.attributes and "Parent" in g.attributes: transcript_id = g.get_attr("ID") gene_id = g.get_attr("Parent") elif "mRNA" in g.attributes and "Gene" in g.attributes: transcript_id = g.get_attr("mRNA") gene_id = g.get_attr("Gene") else: transcript_id = g.get_attr("ID") gene_id = transcript_id transcript_info[transcript_id]["gene_id"] = gene_id transcript_info[transcript_id]["gene_type"] = g.type continue if g.type not in valid_gff_to_gtf_type.keys(): continue try: transcript_id = g.get_attr("Parent", first=False) except IndexError: transcript_id = g.get_attr("mRNA", first=False) g.type = valid_gff_to_gtf_type[g.type] for tid in transcript_id: if tid not in transcript_info: continue gene_type = transcript_info[tid]["gene_type"] if not gene_type.endswith("RNA") and not gene_type.endswith("transcript"): continue gene_id = transcript_info[tid]["gene_id"] g.attributes = dict(gene_id=[gene_id], transcript_id=[tid]) g.update_attributes(gtf=True, urlquote=False) print(g)
def gtf(args)
%prog gtf gffile Convert gff to gtf file. In gtf, only exon/CDS features are important. The first 8 columns are the same as gff, but in the attributes field, we need to specify "gene_id" and "transcript_id".
2.350284
2.284036
1.029005
p = OptionParser(merge.__doc__) p.add_option("--seq", default=False, action="store_true", help="Print FASTA sequences at the end") p.set_outfile() opts, args = p.parse_args(args) nargs = len(args) if nargs < 1: sys.exit(not p.print_help()) if nargs == 1: listfile, = args fp = open(listfile) gffiles = [x.strip() for x in fp] else: gffiles = args outfile = opts.outfile deflines = set() fw = must_open(outfile, "w") fastarecs = {} for gffile in natsorted(gffiles, key=lambda x: op.basename(x)): logging.debug(gffile) fp = open(gffile) for row in fp: row = row.rstrip() if not row or row[0] == '#': if row == FastaTag: break if row in deflines: continue else: deflines.add(row) print(row, file=fw) if not opts.seq: continue f = Fasta(gffile, lazy=True) for key, rec in f.iteritems_ordered(): if key in fastarecs: continue fastarecs[key] = rec if opts.seq: print(FastaTag, file=fw) SeqIO.write(fastarecs.values(), fw, "fasta") fw.close()
def merge(args)
%prog merge gffiles Merge several gff files into one. When only one file is given, it is assumed to be a file with a list of gff files.
2.880445
2.677744
1.075698
p = OptionParser(extract.__doc__) p.add_option("--contigs", help="Extract features from certain contigs [default: %default]") p.add_option("--names", help="Extract features with certain names [default: %default]") p.add_option("--types", type="str", default=None, help="Extract features of certain feature types [default: %default]") p.add_option("--children", default=0, choices=["1", "2"], help="Specify number of iterations: `1` grabs children, " + \ "`2` grabs grand-children [default: %default]") p.add_option("--tag", default="ID", help="Scan the tags for the names [default: %default]") p.add_option("--fasta", default=False, action="store_true", help="Write FASTA if available [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args contigfile = opts.contigs namesfile = opts.names typesfile = opts.types nametag = opts.tag contigID = parse_multi_values(contigfile) names = parse_multi_values(namesfile) types = parse_multi_values(typesfile) outfile = opts.outfile if opts.children: assert types is not None or names is not None, "Must set --names or --types" if names == None: names = list() populate_children(outfile, names, gffile, iter=opts.children, types=types) return fp = must_open(gffile) fw = must_open(opts.outfile, "w") for row in fp: atoms = row.split() if len(atoms) == 0: continue tag = atoms[0] if row[0] == "#": if row.strip() == "###": continue if not (tag == RegionTag and contigID and atoms[1] not in contigID): print(row.rstrip(), file=fw) if tag == FastaTag: break continue b = GffLine(row) attrib = b.attributes if contigID and tag not in contigID: continue if types and b.type in types: _id = b.accn if _id not in names: names.append(_id) if names is not None: if nametag not in attrib: continue if attrib[nametag][0] not in names: continue print(row.rstrip(), file=fw) if not opts.fasta: return f = Fasta(gffile) for s in contigID: if s in f: SeqIO.write([f[s]], fw, "fasta")
def extract(args)
%prog extract gffile --contigs: Extract particular contig(s) from the gff file. If multiple contigs are involved, use "," to separate, e.g. "contig_12,contig_150"; or provide a file with multiple contig IDs, one per line --names: Process particular ID(s) from the gff file. If multiple IDs are involved, use "," to separate; or provide a file with multiple IDs, one per line
3.188984
2.936364
1.086031
p = OptionParser(split.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gffile, outdir = args mkdir(outdir) g = Gff(gffile) seqids = g.seqids for s in seqids: outfile = op.join(outdir, s + ".gff") extract([gffile, "--contigs=" + s, "--outfile=" + outfile])
def split(args)
%prog split gffile outdir Split the gff into one contig per file. Will also take sequences if the file contains FASTA sequences.
3.162964
2.681286
1.179644
p = OptionParser(note.__doc__) p.add_option("--type", default=None, help="Only process certain types, multiple types allowed with comma") p.add_option("--attribute", default="Parent,Note", help="Attribute field to extract, multiple fields allowd with comma") p.add_option("--AED", type="float", help="Only extract lines with AED score <=") p.add_option("--exoncount", default=False, action="store_true", help="Get the exon count for each mRNA feat") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args type = opts.type if type: type = type.split(",") g = make_index(gffile) exoncounts = {} if opts.exoncount: for feat in g.features_of_type("mRNA"): nexons = 0 for c in g.children(feat.id, 1): if c.featuretype != "exon": continue nexons += 1 exoncounts[feat.id] = nexons attrib = opts.attribute.split(",") gff = Gff(gffile) seen = set() AED = opts.AED for g in gff: if type and g.type not in type: continue if AED is not None and float(g.attributes["_AED"][0]) > AED: continue keyval = [g.accn] + [",".join(g.attributes[x]) \ for x in attrib if x in g.attributes] if exoncounts: nexons = exoncounts.get(g.accn, 0) keyval.append(str(nexons)) keyval = tuple(keyval) if keyval not in seen: print("\t".join(keyval)) seen.add(keyval)
def note(args)
%prog note gffile > tabfile Extract certain attribute field for each feature.
3.046747
2.753163
1.106635
from tempfile import mkstemp from pybedtools import BedTool from jcvi.utils.cbook import SummaryStats p = OptionParser(splicecov.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gfffile, juncsbed, = args tagged = "{0}.{1}.gff3".format(gfffile.rsplit(".", 1)[0], "tag_introns") gff3, junc = BedTool(gfffile), BedTool(juncsbed) ab = gff3.intersect(junc, wao=True, f=1.0, s=True) abfh = must_open(ab.fn) seen = set() scov = AutoVivification() fh, tmpgff = mkstemp(suffix=".gff3") fw = must_open(tmpgff, "w") for line in abfh: args = line.strip().split("\t") g = GffLine("\t".join(str(x) for x in args[:9])) if g.type == "intron" and args[10] != -1: ispan, jspan = g.span, int(args[11]) - int(args[10]) if ispan == jspan: g.set_attr("ID", args[12], update=True) g.score = int(args[13]) pparts = g.get_attr("Parent").split(".") locus, iso = pparts[0], ".".join(pparts[1:]) seen.add(iso) if not scov[locus][iso]: scov[locus][iso] = [] scov[locus][iso].append(g.score) else: continue print(g, file=fw) fw.close() format([tmpgff, "--unique", "-o", tagged]) os.unlink(tmpgff) isos = sorted(list(seen)) fw = must_open(opts.outfile, "w") h1, h2, stats = ["#"], ["#locus"], ["N", "mean", "median", "min", "max"] for iso in isos: h1.extend([str(iso)] + [""] * (len(stats) - 1)) h2.extend(stats) print("\t".join(str(x) for x in h1), file=fw) print("\t".join(str(x) for x in h2), file=fw) for locus in scov.keys(): out = [locus] for iso in isos: if iso in scov[locus].keys(): juncs = scov[locus][iso] jstats = SummaryStats(juncs, dtype="int") out.extend([jstats.size, jstats.mean, jstats.median, \ jstats.min, jstats.max]) else: out.extend(["-"] * len(stats)) print("\t".join(str(x) for x in out), file=fw) fw.close()
def splicecov(args)
%prog splicecov annotation.gff3 junctions.bed Given an annotation GFF file (containing introns) and a TopHat junctions.bed file (preprocessed using formats.bed.juncs(), each intron gets tagged with the JUNC identifier and read coverage. Output is a summary table listing for each gene locus, the isoform number, number of splice junctions and {average, median, min & max} read coverage across the junctions.
2.786511
2.699027
1.032413
''' %prog bed gff_file [--options] Parses the start, stop locations of the selected features out of GFF and generate a bed file ''' from jcvi.utils.cbook import gene_name p = OptionParser(bed.__doc__) p.add_option("--type", dest="type", default="gene", help="Feature type to extract, use comma for multiple [default: %default]") p.add_option("--key", default="ID", help="Key in the attributes to extract") p.add_option("--source", help="Source to extract from, use comma for multiple [default: %default]") p.add_option("--span", default=False, action="store_true", help="Use feature span in the score column") p.add_option("--score_attrib", dest="score_attrib", default=False, help="Attribute whose value is to be used as score in `bedline` [default: %default]") p.add_option("--append_source", default=False, action="store_true", help="Append GFF source name to extracted key value") p.add_option("--append_ftype", default=False, action="store_true", help="Append GFF feature type to extracted key value") p.add_option("--nosort", default=False, action="store_true", help="Do not sort the output bed file [default: %default]") p.set_stripnames(default=False) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args key = opts.key or None type = opts.type or set() source = opts.source or set() strip_names = opts.strip_names span = opts.span if opts.type: type = set(x.strip() for x in opts.type.split(",")) if opts.source: source = set(x.strip() for x in opts.source.split(",")) gff = Gff(gffile, key=key, append_source=opts.append_source, \ append_ftype=opts.append_ftype, score_attrib=opts.score_attrib) b = Bed() for g in gff: if type and g.type not in type: continue if source and g.source not in source: continue bl = g.bedline if strip_names: bl.accn = gene_name(bl.accn) if span: bl.score = bl.span b.append(bl) sorted = not opts.nosort b.print_to_file(opts.outfile, sorted=sorted) logging.debug("Extracted {0} features (type={1} id={2})".\ format(len(b), ",".join(type), key))
def bed(args)
%prog bed gff_file [--options] Parses the start, stop locations of the selected features out of GFF and generate a bed file
2.638214
2.374987
1.110833
import gffutils db_file = gff_file + ".db" if need_update(gff_file, db_file): if op.exists(db_file): os.remove(db_file) logging.debug("Indexing `{0}`".format(gff_file)) gffutils.create_db(gff_file, db_file, merge_strategy="create_unique") else: logging.debug("Load index `{0}`".format(gff_file)) return gffutils.FeatureDB(db_file)
def make_index(gff_file)
Make a sqlite database for fast retrieval of features.
2.340239
2.372663
0.986334
p = OptionParser(children.__doc__) p.add_option("--parents", default="gene", help="list of features to extract, use comma to separate (e.g." "'gene,mRNA') [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gff_file, = args g = make_index(gff_file) parents = set(opts.parents.split(',')) for feat in get_parents(gff_file, parents): cc = [c.id for c in g.children(feat.id, 1)] if len(cc) <= 1: continue print("\t".join(str(x) for x in \ (feat.id, feat.start, feat.stop, "|".join(cc))))
def children(args)
%prog children gff_file Get the children that have the same parent.
3.408213
3.064181
1.112275
# can request upstream sequence only from the following valid sites valid_upstream_sites = ["TSS", "TrSS"] upstream_site, upstream_len = None, None flag, error_msg = None, None parents, children = None, None if re.match(r'upstream', feature): parents, children = "mRNA", "CDS" feature, upstream_site, upstream_len = re.search(r'([A-z]+):([A-z]+):(\S+)', \ feature).groups() if not is_number(upstream_len): flag, error_msg = 1, "Error: upstream len `" + upstream_len + "` should be an integer" upstream_len = int(upstream_len) if(upstream_len < 0): flag, error_msg = 1, "Error: upstream len `" + str(upstream_len) + "` should be > 0" if not upstream_site in valid_upstream_sites: flag, error_msg = 1, "Error: upstream site `" + upstream_site + "` not valid." + \ " Please choose from " + valid_upstream_sites elif feature == "CDS": parents, children = "mRNA", "CDS" else: flag, error_msg = 1, "Error: unrecognized option --feature=" + feature return feature, parents, children, upstream_site, upstream_len, flag, error_msg
def parse_feature_param(feature)
Take the --feature param (coming from gff.load() and parse it. Returns feature, parents and children terms. Also returns length of upstream sequence (and start site) requested If erroneous, returns a flag and error message to be displayed on exit
3.330456
2.988718
1.114343
if uSite == "TSS": (upstream_start, upstream_stop) = \ (feat.start - uLen, feat.start - 1) \ if feat.strand == "+" else \ (feat.end + 1, feat.end + uLen) elif uSite == "TrSS": children = [] for c in gffdb.children(feat.id, 1): if c.featuretype not in children_list: continue children.append((c.start, c.stop)) if not children: print("[warning] %s has no children with type %s" \ % (feat.id, ','.join(children_list)), file=sys.stderr) return None, None cds_start, cds_stop = range_minmax(children) (upstream_start, upstream_stop) = \ (cds_start - uLen, cds_start - 1) \ if feat.strand == "+" else \ (cds_stop + 1, cds_stop + uLen) if feat.strand == "+" and upstream_start < 1: upstream_start = 1 elif feat.strand == "-" and upstream_stop > seqlen: upstream_stop = seqlen actual_uLen = upstream_stop - upstream_start + 1 if actual_uLen < uLen: print("[warning] sequence upstream of {0} ({1} bp) is less than upstream length {2}" \ .format(feat.id, actual_uLen, uLen), file=sys.stderr) return None, None return upstream_start, upstream_stop
def get_upstream_coords(uSite, uLen, seqlen, feat, children_list, gffdb)
Subroutine takes upstream site, length, reference sequence length, parent mRNA feature (GffLine object), list of child feature types and a GFFutils.GFFDB object as the input If upstream of TSS is requested, use the parent feature coords to extract the upstream sequence If upstream of TrSS is requested, iterates through all the children (CDS features stored in the sqlite GFFDB) and use child feature coords to extract the upstream sequence If success, returns the upstream start and stop coordinates else, returns None
2.406862
2.239722
1.074625
p = OptionParser(bed12.__doc__) p.add_option("--parent", default="mRNA", help="Top feature type [default: %default]") p.add_option("--block", default="exon", help="Feature type for regular blocks [default: %default]") p.add_option("--thick", default="CDS", help="Feature type for thick blocks [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args parent, block, thick = opts.parent, opts.block, opts.thick outfile = opts.outfile g = make_index(gffile) fw = must_open(outfile, "w") for f in g.features_of_type(parent): chrom = f.chrom chromStart = f.start - 1 chromEnd = f.stop name = f.id score = 0 strand = f.strand thickStart = 1e15 thickEnd = 0 blocks = [] for c in g.children(name, 1): cstart, cend = c.start - 1, c.stop if c.featuretype == block: blockStart = cstart - chromStart blockSize = cend - cstart blocks.append((blockStart, blockSize)) elif c.featuretype == thick: thickStart = min(thickStart, cstart) thickEnd = max(thickEnd, cend) blocks.sort() blockStarts, blockSizes = zip(*blocks) blockCount = len(blocks) blockSizes = ",".join(str(x) for x in blockSizes) + "," blockStarts = ",".join(str(x) for x in blockStarts) + "," itemRgb = 0 print("\t".join(str(x) for x in (chrom, chromStart, chromEnd, \ name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts)), file=fw)
def bed12(args)
%prog bed12 gffile > bedfile Produce bed12 file for coding features. The exons will be converted to blocks. The CDS range will be shown between thickStart to thickEnd. For reference, bed format consists of the following fields: 1. chrom 2. chromStart 3. chromEnd 4. name 5. score 6. strand 7. thickStart 8. thickEnd 9. itemRgb 10. blockCount 11. blockSizes 12. blockStarts
2.177887
2.009732
1.08367
sig_elems = [self.seqid, self.source, self.type, \ self.start, self.end, self.strand, \ self.phase] if re.search("exon|CDS|UTR", self.type): parent = self.get_attr("Parent") if parent: (locus, iso) = atg_name(parent, retval="locus,iso", \ trimpad0=False) if locus: sig_elems.append(locus) else: sig_elems.extend([self.accn]) return ",".join(str(elem) for elem in sig_elems)
def signature(self)
create a unique signature for any GFF line based on joining columns 1,2,3,4,5,7,8 (into a comma separated string)
4.999623
4.476635
1.116826
p = OptionParser(main.__doc__) p.add_option("--refids", help="Use subset of contigs in the ref") p.add_option("--refcov", default=.01, type="float", help="Minimum reference coverage [default: %default]") p.add_option("--all", default=False, action="store_true", help="Plot one pdf file per ref in refidsfile [default: %default]") p.add_option("--color", default="similarity", choices=("similarity", "direction", "none"), help="Color the dots based on") p.add_option("--nolayout", default=False, action="store_true", help="Do not rearrange contigs") p.set_align(pctid=0, hitlen=0) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) deltafile, = args reffasta, queryfasta = open(deltafile).readline().split() color = opts.color layout = not opts.nolayout prefix = op.basename(deltafile).split(".")[0] qsizes = Sizes(queryfasta).mapping rsizes = Sizes(reffasta).mapping refs = SetFile(opts.refids) if opts.refids else set(rsizes.keys()) refcov = opts.refcov pctid = opts.pctid hitlen = opts.hitlen deltafile = filter([deltafile, "--pctid={0}".format(pctid), "--hitlen={0}".format(hitlen)]) if opts.all: for r in refs: pdffile = plot_some_queries([r], qsizes, rsizes, deltafile, refcov, prefix=prefix, color=color, layout=layout) if pdffile: sh("mv {0} {1}.pdf".format(pdffile, r)) else: plot_some_queries(refs, qsizes, rsizes, deltafile, refcov, prefix=prefix, color=color, layout=layout)
def main(args)
%prog deltafile Plot one query. Extract the references that have major matches to this query. Control "major" by option --refcov.
3.481893
3.206017
1.086049
p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) flist = args prefix = flist[0].split(".")[0] j = 0 for f in flist: reader = Maf(f).reader for rec in reader: a, b = rec.components for a, tag in zip((a, b), "ab"): name = "{0}_{1:07d}{2}".format(prefix, j, tag) print("\t".join(str(x) for x in (a.src, a.forward_strand_start, \ a.forward_strand_end, name))) j += 1
def bed(args)
%prog bed maffiles > out.bed Convert a folder of maf alignments to the bed features then useful to check coverage, etc.
3.893902
3.707149
1.050377
''' %prog blast maffiles > out.blast From a folder of .maf files, generate .blast file with tabular format. ''' p = OptionParser(blast.__doc__) opts, args = p.parse_args(args) if len(args) == 0: sys.exit(p.print_help()) flist = args for f in flist: maf_to_blast8(f)
def blast(args)
%prog blast maffiles > out.blast From a folder of .maf files, generate .blast file with tabular format.
4.370489
2.25994
1.933896
indexes = interval_index_file.Indexes() in_handle = open(filename) reader = maf.Reader(in_handle) while True: pos = reader.file.tell() rec = next(reader) if rec is None: break for c in rec.components: indexes.add(c.src, c.forward_strand_start, c.forward_strand_end, pos, max=c.src_size ) index_handle = open(indexfile, "w") indexes.write(index_handle) index_handle.close()
def build_index(self, filename, indexfile)
Recipe from Brad Chapman's blog <http://bcbio.wordpress.com/2009/07/26/sorting-genomic-alignments-using-python/>
4.716141
4.494425
1.049331
size = len(candidate) prob = random.random() if prob > .5: # Inversion p = random.randint(0, size-1) q = random.randint(0, size-1) if p > q: p, q = q, p q += 1 s = candidate[p:q] x = candidate[:p] + s[::-1] + candidate[q:] return creator.Individual(x), else: # Insertion p = random.randint(0, size-1) q = random.randint(0, size-1) cq = candidate.pop(q) candidate.insert(p, cq) return candidate,
def genome_mutation(candidate)
Return the mutants created by inversion mutation on the candidates. This function performs inversion or insertion. It randomly chooses two locations along the candidate and reverses the values within that slice. Insertion is done by popping one item and insert it back at random position.
2.829352
2.634816
1.073833
# Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in population if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(population) record = stats.compile(population) if stats else {} # Begin the generational process gen = 1 best = (0,) while True: # Select the next generation individuals offspring = toolbox.select(population, len(population)) # Vary the pool of individuals offspring = varAnd(offspring, toolbox, cxpb, mutpb) # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # Update the hall of fame with the generated individuals if halloffame is not None: halloffame.update(offspring) if callback is not None: callback(halloffame[0], gen) # Replace the current population by the offspring population[:] = offspring # Append the current generation statistics to the logbook record = stats.compile(population) if stats else {} current_best = record['max'] if gen % 20 == 0 and verbose: print("Current iteration {0}: max_score={1}". format(gen, current_best), file=sys.stderr) if current_best > best: best = current_best updated = gen gen += 1 if gen - updated > ngen: break return population
def eaSimpleConverge(population, toolbox, cxpb, mutpb, ngen, stats=None, halloffame=None, callback=None, verbose=True)
This algorithm reproduce the simplest evolutionary algorithm as presented in chapter 7 of [Back2000]_. Modified to allow checking if there is no change for ngen, as a simple rule for convergence. Interface is similar to eaSimple(). However, in eaSimple, ngen is total number of iterations; in eaSimpleConverge, we terminate only when the best is NOT updated for ngen iterations.
1.693926
1.776109
0.953729
from jcvi.formats.fasta import Fasta from jcvi.formats.bed import Bed from jcvi.utils.cbook import fill p = OptionParser(frombed.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) bedfile, contigfasta, readfasta = args prefix = bedfile.rsplit(".", 1)[0] contigfile = prefix + ".contig" idsfile = prefix + ".ids" contigfasta = Fasta(contigfasta) readfasta = Fasta(readfasta) bed = Bed(bedfile) checksum = "00000000 checksum." fw_ids = open(idsfile, "w") fw = open(contigfile, "w") for ctg, reads in bed.sub_beds(): ctgseq = contigfasta[ctg] ctgline = "##{0} {1} {2} bases, {3}".format(\ ctg, len(reads), len(ctgseq), checksum) print(ctg, file=fw_ids) print(ctgline, file=fw) print(fill(ctgseq.seq), file=fw) for b in reads: read = b.accn strand = b.strand readseq = readfasta[read] rc = " [RC]" if strand == "-" else "" readlen = len(readseq) rstart, rend = 1, readlen if strand == "-": rstart, rend = rend, rstart readrange = "{{{0} {1}}}".format(rstart, rend) conrange = "<{0} {1}>".format(b.start, b.end) readline = "#{0}(0){1} {2} bases, {3} {4} {5}".format(\ read, rc, readlen, checksum, readrange, conrange) print(readline, file=fw) print(fill(readseq.seq), file=fw) logging.debug("Mapped contigs written to `{0}`.".format(contigfile)) logging.debug("Contig IDs written to `{0}`.".format(idsfile))
def frombed(args)
%prog frombed bedfile contigfasta readfasta Convert read placement to contig format. This is useful before running BAMBUS.
2.999459
2.804577
1.069487
p = OptionParser(main.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) contigfile, = args bedfile = contigfile.rsplit(".", 1)[0] + ".bed" fw = open(bedfile, "w") c = ContigFile(contigfile) for rec in c.iter_records(): for r in rec.reads: print(r.bedline, file=fw) logging.debug("File written to `{0}`.".format(bedfile)) return bedfile
def bed(args)
%prog bed contigfile Prints out the contigs and their associated reads.
3.03594
2.800821
1.083947
''' runs the command: `tesseract_cmd` `input_filename` `output_filename_base` returns the exit status of tesseract, as well as tesseract's stderr output ''' command = [tesseract_cmd, input_filename, output_filename_base] if lang is not None: command += ['-l', lang] if boxes: command += ['batch.nochop', 'makebox'] proc = subprocess.Popen(command, stderr=subprocess.PIPE) return (proc.wait(), proc.stderr.read())
def run_tesseract(input_filename, output_filename_base, lang=None, boxes=False)
runs the command: `tesseract_cmd` `input_filename` `output_filename_base` returns the exit status of tesseract, as well as tesseract's stderr output
3.760585
2.402981
1.564966
''' returns all lines in the error_string that start with the string "error" ''' lines = error_string.splitlines() error_lines = tuple(line for line in lines if line.find('Error') >= 0) if len(error_lines) > 0: return '\n'.join(error_lines) else: return error_string.strip()
def get_errors(error_string)
returns all lines in the error_string that start with the string "error"
3.510636
2.564074
1.369163
''' returns a temporary file-name ''' # prevent os.tmpname from printing an error... stderr = sys.stderr try: sys.stderr = cStringIO.StringIO() return os.tempnam(None, 'tess_') finally: sys.stderr = stderr
def tempnam()
returns a temporary file-name
7.023331
7.057039
0.995223
''' Runs tesseract on the specified image. First, the image is written to disk, and then the tesseract command is run on the image. Resseract's result is read, and the temporary files are erased. ''' input_file_name = '%s.bmp' % tempnam() output_file_name_base = tempnam() if not boxes: output_file_name = '%s.txt' % output_file_name_base else: output_file_name = '%s.box' % output_file_name_base try: image.save(input_file_name) status, error_string = run_tesseract(input_file_name, output_file_name_base, lang=lang, boxes=boxes) if status: errors = get_errors(error_string) raise TesseractError(status, errors) f = file(output_file_name) try: return f.read().strip() finally: f.close() finally: cleanup(input_file_name) cleanup(output_file_name)
def image_to_string(image, lang=None, boxes=False)
Runs tesseract on the specified image. First, the image is written to disk, and then the tesseract command is run on the image. Resseract's result is read, and the temporary files are erased.
3.181112
2.198546
1.446916
import pandas as pd p = OptionParser(mitosomatic.__doc__) p.add_option("--minaf", default=.005, type="float", help="Minimum allele fraction") p.add_option("--maxaf", default=.1, type="float", help="Maximum allele fraction") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) df, = args af_file = df.rsplit(".", 1)[0] + ".af" fw = open(af_file, "w") df = pd.read_csv(df, sep="\t") for i, row in df.iterrows(): na = row["num_A"] nt = row["num_T"] nc = row["num_C"] ng = row["num_G"] nd = row["num_D"] ni = row["num_I"] depth = row["depth"] #major, minor = sorted([na, nt, nc, ng], reverse=True)[:2] #af = minor * 1. / (major + minor) af = (nd + ni) * 1. / depth if not (opts.minaf <= af <= opts.maxaf): continue print("{}\t{}\t{:.6f}".format(row["chrom"], row["start"], af), file=fw) fw.close() logging.debug("Allele freq written to `{}`".format(af_file))
def mitosomatic(args)
%prog mitosomatic t.piledriver Find mito mosaic somatic mutations in piledriver results.
2.42432
2.446278
0.991024
p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) delt, = args dt = Delly(delt) dt.write_bed("del.bed")
def bed(args)
%prog bed del.txt Convert `del.txt` to BED format. DELLY manual here: <http://www.embl.de/~rausch/delly.html> Deletion: chr, start, end, size, #supporting_pairs, avg._mapping_quality, deletion_id chr1, 10180, 10509, 329, 75, 15.8667, Deletion_Sample_00000000
3.998654
3.428037
1.166456
from jcvi.formats.vcf import VcfLine from six.moves.urllib.parse import parse_qsl p = OptionParser(mitocompile.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) vcfs = args print("\t".join("vcf samplekey depth seqid pos alt svlen pe sr".split())) for i, vcf in enumerate(vcfs): if (i + 1) % 100 == 0: logging.debug("Process `{}` [{}]". format(vcf, percentage(i + 1, len(vcfs)))) depthfile = vcf.replace(".sv.vcf.gz", ".depth") fp = must_open(depthfile) chrm, depth = fp.next().split() depth = int(float(depth)) samplekey = op.basename(vcf).split("_")[0] fp = must_open(vcf) for row in fp: if row[0] == '#': continue v = VcfLine(row) info = dict(parse_qsl(v.info)) print("\t".join(str(x) for x in (vcf, samplekey, depth, v.seqid, v.pos, v.alt, info.get("SVLEN"), info["PE"], info["SR"])))
def mitocompile(args)
%prog mitcompile *.vcf.gz Extract information about deletions in vcf file.
3.638021
3.526044
1.031757
p = OptionParser(mito.__doc__) p.set_aws_opts(store="hli-mv-data-science/htang/mito-deletions") p.add_option("--realignonly", default=False, action="store_true", help="Realign only") p.add_option("--svonly", default=False, action="store_true", help="Run Realign => SV calls only") p.add_option("--support", default=1, type="int", help="Minimum number of supporting reads") p.set_home("speedseq", default="/mnt/software/speedseq/bin") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) chrMfa, bamfile = args store = opts.output_path cleanup = not opts.nocleanup if not op.exists(chrMfa): logging.debug("File `{}` missing. Exiting.".format(chrMfa)) return chrMfai = chrMfa + ".fai" if not op.exists(chrMfai): cmd = "samtools index {}".format(chrMfa) sh(cmd) if not bamfile.endswith(".bam"): bamfiles = [x.strip() for x in open(bamfile)] else: bamfiles = [bamfile] if store: computed = ls_s3(store) computed = [op.basename(x).split('.')[0] for x in computed if x.endswith(".depth")] remaining_samples = [x for x in bamfiles if op.basename(x).split(".")[0] not in computed] logging.debug("Already computed on `{}`: {}". format(store, len(bamfiles) - len(remaining_samples))) bamfiles = remaining_samples logging.debug("Total samples: {}".format(len(bamfiles))) for bamfile in bamfiles: run_mito(chrMfa, bamfile, opts, realignonly=opts.realignonly, svonly=opts.svonly, store=store, cleanup=cleanup)
def mito(args)
%prog mito chrM.fa input.bam Identify mitochondrial deletions.
3.545511
3.317906
1.068599
p = OptionParser(fcs.__doc__) p.add_option("--cutoff", default=200, help="Skip small components less than [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fcsfile, = args cutoff = opts.cutoff fp = open(fcsfile) for row in fp: if row[0] == "#": continue sep = "\t" if "\t" in row else None atoms = row.rstrip().split(sep, 3) contig, length = atoms[:2] length = int(length) label = atoms[-1] label = label.replace(" ", "_") if len(atoms) == 3: ranges = "{0}..{1}".format(1, length) else: assert len(atoms) == 4 ranges = atoms[2] for ab in ranges.split(","): a, b = ab.split("..") a, b = int(a), int(b) assert a <= b ahang = a - 1 bhang = length - b if ahang < cutoff: a = 1 if bhang < cutoff: b = length print("\t".join(str(x) for x in (contig, a - 1, b, label)))
def fcs(args)
%prog fcs fcsfile Process the results from Genbank contaminant screen. An example of the file looks like: contig name, length, span(s), apparent source contig0746 11760 1..141 vector contig0751 14226 13476..14226 vector contig0800 124133 30512..30559 primer/adapter
2.88502
2.814128
1.025191
from jcvi.formats.base import must_open p = OptionParser(asn.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) fw = must_open(opts.outfile, "w") for asnfile in args: fp = open(asnfile) ingeneralblock = False ingenbankblock = False gb, name = None, None for row in fp: if row.strip() == "": continue tag = row.split()[0] if tag == "general": ingeneralblock = True if ingeneralblock and tag == "str": if name is None: # Only allow first assignment name = row.split("\"")[1] ingeneralblock = False if tag == "genbank": ingenbankblock = True if ingenbankblock and tag == "accession": if gb is None: gb = row.split("\"")[1] ingenbankblock = False assert gb and name print("{0}\t{1}".format(gb, name), file=fw)
def asn(args)
%prog asn asnfiles Mainly to get this block, and extract `str` field: general { db "TIGR" , tag str "mtg2_12952" } , genbank { accession "AC148996" ,
2.84407
2.612783
1.088521
from jcvi.formats.fasta import sequin p = OptionParser(htgnew.__doc__) p.add_option("--comment", default="", help="Comments for this submission [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) fastafile, phasefile, sbtfile = args comment = opts.comment fastadir = "fasta" sqndir = "sqn" mkdir(fastadir) mkdir(sqndir) cmd = "faSplit byname {0} {1}/".format(fastafile, fastadir) sh(cmd, outfile="/dev/null", errfile="/dev/null") acmd = 'tbl2asn -a z -p fasta -r {sqndir}' acmd += ' -i {splitfile} -t {sbtfile} -C tigr' acmd += ' -j "[tech=htgs {phase}] [organism=Medicago truncatula] [strain=A17]"' acmd += ' -o {sqndir}/{accession_nv}.sqn -V Vbr' acmd += ' -y "{comment}" -W T -T T' nupdated = 0 for row in open(phasefile): name, phase = row.split()[:2] fafile = op.join(fastadir, name + ".fa") cloneopt = "--clone={0}".format(name) splitfile, gaps = sequin([fafile, cloneopt]) splitfile = op.basename(splitfile) accession = accession_nv = name phase = int(phase) assert phase in (1, 2, 3) cmd = acmd.format(accession_nv=accession_nv, sqndir=sqndir, sbtfile=sbtfile, splitfile=splitfile, phase=phase, comment=comment) sh(cmd) verify_sqn(sqndir, accession) nupdated += 1 print("A total of {0} records updated.".format(nupdated), file=sys.stderr)
def htgnew(args)
%prog htgnew fastafile phasefile template.sbt Prepare sqnfiles for submitting new Genbank HTG records. `fastafile` contains the sequences. `phasefile` contains the phase information, it is a two column file: mth2-45h12 3 `template.sbt` is the Genbank submission template. This function is simpler than htg, since the record names have not be assigned yet (so less bookkeeping).
4.904938
4.534799
1.081622
rows, cols = get_rows_cols() plate, splate = get_plate() n96 = rows.index(c96[0]) * ncols / 2 + int(c96[1:]) q = "{0:02d}{1}".format(n96, "ABCD"[quad - 1]) return splate[q]
def convert_96_to_384(c96, quad, nrows=Nrows, ncols=Ncols)
Convert the 96-well number and quad number to 384-well number >>> convert_96_to_384("B02", 1) 'C3' >>> convert_96_to_384("H09", 4) 'P18'
7.721169
8.131645
0.949521
p = OptionParser(t384.__doc__) opts, args = p.parse_args(args) plate, splate = get_plate() fw = sys.stdout for i in plate: for j, p in enumerate(i): if j != 0: fw.write('|') fw.write(p) fw.write('\n')
def t384(args)
%prog t384 Print out a table converting between 96 well to 384 well
4.8772
4.384521
1.112368
s = "".join(s.split()[1:]).replace("/", ";") a = parse_qs(s) return a
def parse_description(s)
Returns a dictionary based on the FASTA header, assuming JCVI data
11.196808
9.586869
1.167932
from jcvi.formats.base import FileMerger from jcvi.formats.bed import mates from jcvi.formats.contig import frombed from jcvi.formats.fasta import join from jcvi.utils.iter import grouper p = OptionParser(scaffold.__doc__) p.set_rclip(rclip=1) p.add_option("--conf", help="BAMBUS configuration file [default: %default]") p.add_option("--prefix", default=False, action="store_true", help="Only keep links between IDs with same prefix [default: %default]") opts, args = p.parse_args(args) nargs = len(args) if nargs < 3 or nargs % 2 != 1: sys.exit(not p.print_help()) rclip = opts.rclip ctgfasta = args[0] duos = list(grouper(args[1:], 2)) trios = [] for fastafile, bedfile in duos: prefix = bedfile.rsplit(".", 1)[0] matefile = prefix + ".mates" matebedfile = matefile + ".bed" if need_update(bedfile, [matefile, matebedfile]): matesopt = [bedfile, "--lib", "--nointra", "--rclip={0}".format(rclip), "--cutoff={0}".format(opts.cutoff)] if opts.prefix: matesopt += ["--prefix"] matefile, matebedfile = mates(matesopt) trios.append((fastafile, matebedfile, matefile)) # Merge the readfasta, bedfile and matefile bbfasta, bbbed, bbmate = "bambus.reads.fasta", "bambus.bed", "bambus.mates" for files, outfile in zip(zip(*trios), (bbfasta, bbbed, bbmate)): FileMerger(files, outfile=outfile).merge(checkexists=True) ctgfile = "bambus.contig" idsfile = "bambus.ids" frombedInputs = [bbbed, ctgfasta, bbfasta] if need_update(frombedInputs, ctgfile): frombed(frombedInputs) inputfasta = "bambus.contigs.fasta" singletonfasta = "bambus.singletons.fasta" cmd = "faSomeRecords {0} {1} ".format(ctgfasta, idsfile) sh(cmd + inputfasta) sh(cmd + singletonfasta + " -exclude") # Run bambus prefix = "bambus" cmd = "goBambus -c {0} -m {1} -o {2}".format(ctgfile, bbmate, prefix) if opts.conf: cmd += " -C {0}".format(opts.conf) sh(cmd) cmd = "untangle -e {0}.evidence.xml -s {0}.out.xml -o {0}.untangle.xml".\ format(prefix) sh(cmd) final = "final" cmd = "printScaff -e {0}.evidence.xml -s {0}.untangle.xml -l {0}.lib " \ "-merge -detail -oo -sum -o {1}".format(prefix, final) sh(cmd) oofile = final + ".oo" join([inputfasta, "--oo={0}".format(oofile)])
def scaffold(args)
%prog scaffold ctgfasta reads1.fasta mapping1.bed reads2.fasta mapping2.bed ... Run BAMBUS on set of contigs, reads and read mappings.
3.93345
3.759332
1.046316
from jcvi.formats.fastq import shuffle, pairinplace, split from jcvi.apps.base import getfilesize p = OptionParser(diginorm.__doc__) p.add_option("--single", default=False, action="store_true", help="Single end reads") p.add_option("--tablesize", help="Memory size") p.add_option("--npass", default="1", choices=("1", "2"), help="How many passes of normalization") p.set_depth(depth=50) p.set_home("khmer", default="/usr/local/bin/") opts, args = p.parse_args(args) if len(args) not in (1, 2): sys.exit(not p.print_help()) if len(args) == 2: fastq = shuffle(args + ["--tag"]) else: fastq, = args kh = opts.khmer_home depth = opts.depth PE = not opts.single sys.path.insert(0, op.join(kh, "python")) pf = fastq.rsplit(".", 1)[0] keepfile = fastq + ".keep" hashfile = pf + ".kh" mints = 10000000 ts = opts.tablesize or ((getfilesize(fastq) / 16 / mints + 1) * mints) norm_cmd = op.join(kh, "normalize-by-median.py") filt_cmd = op.join(kh, "filter-abund.py") if need_update(fastq, (hashfile, keepfile)): cmd = norm_cmd cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth, ts) if PE: cmd += " -p" cmd += " -s {0} {1}".format(hashfile, fastq) sh(cmd) abundfiltfile = keepfile + ".abundfilt" if need_update((hashfile, keepfile), abundfiltfile): cmd = filt_cmd cmd += " {0} {1}".format(hashfile, keepfile) sh(cmd) if opts.npass == "1": seckeepfile = abundfiltfile else: seckeepfile = abundfiltfile + ".keep" if need_update(abundfiltfile, seckeepfile): cmd = norm_cmd cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth - 10, ts / 2) cmd += " {0}".format(abundfiltfile) sh(cmd) if PE: pairsfile = pairinplace([seckeepfile, "--base={0}".format(pf + "_norm"), "--rclip=2"]) split([pairsfile])
def diginorm(args)
%prog diginorm fastqfile Run K-mer based normalization. Based on tutorial: <http://ged.msu.edu/angus/diginorm-2012/tutorial.html> Assume input is either an interleaved pairs file, or two separate files. To set up khmer: $ git clone git://github.com/ged-lab/screed.git $ git clone git://github.com/ged-lab/khmer.git $ cd screed $ python setup.py install $ cd ../khmer $ make test $ export PYTHONPATH=~/export/khmer
3.442291
3.438977
1.000963
import math from jcvi.formats.fasta import Fasta, SeqIO from jcvi.formats.fastq import readlen, first, fasta from jcvi.formats.blast import Blast from jcvi.formats.base import FileShredder from jcvi.apps.bowtie import align, get_samfile from jcvi.apps.align import blast p = OptionParser(expand.__doc__) p.set_depth(depth=200) p.set_firstN() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) bes, reads = args size = Fasta(bes).totalsize rl = readlen([reads]) expected_size = size + 2 * rl nreads = expected_size * opts.depth / rl nreads = int(math.ceil(nreads / 1000.)) * 1000 # Attract reads samfile, logfile = align([bes, reads, "--reorder", "--mapped", "--firstN={0}".format(opts.firstN)]) samfile, mapped, _ = get_samfile(reads, bes, bowtie=True, mapped=True) logging.debug("Extract first {0} reads from `{1}`.".format(nreads, mapped)) pf = mapped.split(".")[0] pf = pf.split("-")[0] bespf = bes.split(".")[0] reads = pf + ".expand.fastq" first([str(nreads), mapped, "-o", reads]) # Perform mini-assembly fastafile = reads.rsplit(".", 1)[0] + ".fasta" qualfile = "" if need_update(reads, fastafile): fastafile, qualfile = fasta([reads]) contigs = op.join(pf, "454LargeContigs.fna") if need_update(fastafile, contigs): cmd = "runAssembly -o {0} -cpu 8 {1}".format(pf, fastafile) sh(cmd) assert op.exists(contigs) # Annotate contigs blastfile = blast([bes, contigs]) mapping = {} for query, b in Blast(blastfile).iter_best_hit(): mapping[query] = b f = Fasta(contigs, lazy=True) annotatedfasta = ".".join((pf, bespf, "fasta")) fw = open(annotatedfasta, "w") keys = list(Fasta(bes).iterkeys_ordered()) # keep an ordered list recs = [] for key, v in f.iteritems_ordered(): vid = v.id if vid not in mapping: continue b = mapping[vid] subject = b.subject rec = v.reverse_complement() if b.orientation == '-' else v rec.id = rid = "_".join((pf, vid, subject)) rec.description = "" recs.append((keys.index(subject), rid, rec)) recs = [x[-1] for x in sorted(recs)] SeqIO.write(recs, fw, "fasta") fw.close() FileShredder([samfile, logfile, mapped, reads, fastafile, qualfile, blastfile, pf]) logging.debug("Annotated seqs (n={0}) written to `{1}`.".\ format(len(recs), annotatedfasta)) return annotatedfasta
def expand(args)
%prog expand bes.fasta reads.fastq Expand sequences using short reads. Useful, for example for getting BAC-end sequences. The template to use, in `bes.fasta` may just contain the junction sequences, then align the reads to get the 'flanks' for such sequences.
4.4566
4.348136
1.024945
from jcvi.apps.bowtie import BowtieLogFile, align p = OptionParser(contamination.__doc__) p.set_firstN() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) ecoli, genome, fq = args firstN_opt = "--firstN={0}".format(opts.firstN) samfile, logfile = align([ecoli, fq, firstN_opt]) bl = BowtieLogFile(logfile) lowerbound = bl.rate samfile, logfile = align([genome, fq, firstN_opt]) bl = BowtieLogFile(logfile) upperbound = 100 - bl.rate median = (lowerbound + upperbound) / 2 clogfile = fq + ".Ecoli" fw = open(clogfile, "w") lowerbound = "{0:.1f}".format(lowerbound) upperbound = "{0:.1f}".format(upperbound) median = "{0:.1f}".format(median) print("\t".join((fq, lowerbound, median, upperbound)), file=fw) print("{0}: Ecoli contamination rate {1}-{2}".\ format(fq, lowerbound, upperbound), file=sys.stderr) fw.close()
def contamination(args)
%prog contamination Ecoli.fasta genome.fasta read.fastq Check read contamination on a folder of paired reads. Use bowtie2 to compare the reads against: 1. Ecoli.fsata - this will tell us the lower bound of contamination 2. genome.fasta - this will tell us the upper bound of contamination
3.443541
3.246171
1.060801
choices = "prepare,align,filter,rmdup,genreads".split(",") p = OptionParser(alignextend.__doc__) p.add_option("--nosuffix", default=False, action="store_true", help="Do not add /1/2 suffix to the read [default: %default]") p.add_option("--rc", default=False, action="store_true", help="Reverse complement the reads before alignment") p.add_option("--len", default=100, type="int", help="Extend to this length") p.add_option("--stage", default="prepare", choices=choices, help="Start from certain stage") p.add_option("--dup", default=10, type="int", help="Filter duplicates with coordinates within this distance") p.add_option("--maxdiff", default=1, type="int", help="Maximum number of differences") p.set_home("amos") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) ref, r1, r2 = args pf = op.basename(r1).split(".")[0] cmd = op.join(opts.amos_home, "src/Experimental/alignextend.pl") if not opts.nosuffix: cmd += " -suffix" bwa_idx = "{0}.ref.fa.sa".format(pf) if not need_update(ref, bwa_idx): cmd += " -noindex" cmd += " -threads {0}".format(opts.cpus) offset = guessoffset([r1]) if offset == 64: cmd += " -I" if opts.rc: cmd += " -rc" cmd += " -allow -len {0} -dup {1}".format(opts.len, opts.dup) cmd += " -min {0} -max {1}".format(2 * opts.len, 20 * opts.len) cmd += " -maxdiff {0}".format(opts.maxdiff) cmd += " -stage {0}".format(opts.stage) cmd += " ".join(("", pf, ref, r1, r2)) sh(cmd)
def alignextend(args)
%prog alignextend ref.fasta read.1.fastq read.2.fastq Wrapper around AMOS alignextend.
3.195261
3.073931
1.039471
from jcvi.utils.table import loadtable, write_csv p = OptionParser(count.__doc__) p.add_option("--dir", help="Sub-directory where FASTQC was run [default: %default]") p.add_option("--human", default=False, action="store_true", help="Human friendly numbers [default: %default]") p.set_table() p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) filenames = args subdir = opts.dir header = "Filename|Total Sequences|Sequence length|Total Bases".split("|") rows = [] human = opts.human for f in filenames: folder = f.replace(".gz", "").rsplit(".", 1)[0] + "_fastqc" if subdir: folder = op.join(subdir, folder) summaryfile = op.join(folder, "fastqc_data.txt") fqcdata = FastQCdata(summaryfile, human=human) row = [fqcdata[x] for x in header] rows.append(row) print(loadtable(header, rows), file=sys.stderr) write_csv(header, rows, sep=opts.sep, filename=opts.outfile, align=opts.align)
def count(args)
%prog count *.gz Count reads based on FASTQC results. FASTQC needs to be run on all the input data given before running this command.
3.325669
3.271976
1.01641
p = OptionParser(hetsmooth.__doc__) p.add_option("-K", default=23, type="int", help="K-mer size [default: %default]") p.add_option("-L", type="int", help="Bottom threshold, first min [default: %default]") p.add_option("-U", type="int", help="Top threshold, second min [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) reads1fq, reads2fq, jfdb = args K = opts.K L = opts.L U = opts.U assert L is not None and U is not None, "Please specify -L and -U" cmd = "het-smooth --kmer-len={0}".format(K) cmd += " --bottom-threshold={0} --top-threshold={1}".format(L, U) cmd += " --no-multibase-replacements --jellyfish-hash-file={0}".format(jfdb) cmd += " --no-reads-log" cmd += " " + " ".join((reads1fq, reads2fq)) sh(cmd)
def hetsmooth(args)
%prog hetsmooth reads_1.fq reads_2.fq jf-23_0 Wrapper against het-smooth. Below is the command used in het-smooth manual. $ het-smooth --kmer-len=23 --bottom-threshold=38 --top-threshold=220 --no-multibase-replacements --jellyfish-hash-file=23-mers.jf reads_1.fq reads_2.fq
3.177659
2.148694
1.478879
from jcvi.assembly.allpaths import prepare from jcvi.assembly.base import FastqNamings p = OptionParser(correct.__doc__ + FastqNamings) p.add_option("--dir", default="data", help="Working directory [default: %default]") p.add_option("--fragsdedup", default=False, action="store_true", help="Don't deduplicate the fragment reads [default: %default]") p.add_option("--ploidy", default="2", choices=("1", "2"), help="Ploidy [default: %default]") p.add_option("--haploidify", default=False, action="store_true", help="Set HAPLOIDIFY=True [default: %default]") p.add_option("--suffix", default=False, action="store_true", help="Add suffix /1, /2 to read names") p.set_cpus() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) fastq = args tag, tagj, taglj = "frag_reads", "jump_reads", "long_jump_reads" ploidy = opts.ploidy haploidify = opts.haploidify suffix = opts.suffix assert (not haploidify) or (haploidify and ploidy == '2') prepare(["Unknown"] + fastq + ["--norun"]) datadir = opts.dir mkdir(datadir) fullpath = op.join(os.getcwd(), datadir) nthreads = " NUM_THREADS={0}".format(opts.cpus) phred64 = (guessoffset([args[0]]) == 64) orig = datadir + "/{0}_orig".format(tag) origfastb = orig + ".fastb" if need_update(fastq, origfastb): cmd = "PrepareAllPathsInputs.pl DATA_DIR={0} HOSTS='{1}' PLOIDY={2}".\ format(fullpath, opts.cpus, ploidy) if phred64: cmd += " PHRED_64=True" sh(cmd) if op.exists(origfastb): correct_frag(datadir, tag, origfastb, nthreads, dedup=opts.fragsdedup, haploidify=haploidify, suffix=suffix) origj = datadir + "/{0}_orig".format(tagj) origjfastb = origj + ".fastb" if op.exists(origjfastb): correct_jump(datadir, tagj, origjfastb, nthreads, suffix=suffix) origlj = datadir + "/{0}_orig".format(taglj) origljfastb = origlj + ".fastb" if op.exists(origljfastb): correct_jump(datadir, taglj, origljfastb, nthreads, suffix=suffix)
def correct(args)
%prog correct *.fastq Correct the fastqfile and generated corrected fastqfiles. This calls assembly.allpaths.prepare() to generate input files for ALLPATHS-LG. The naming convention for your fastqfiles are important, and are listed below. By default, this will correct all PE reads, and remove duplicates of all MP reads, and results will be placed in `frag_reads.corr.{pairs,frags}.fastq` and `jump_reads.corr.{pairs,frags}.fastq`.
3.256847
3.027355
1.075806
import re import csv p = OptionParser(fetch.__doc__) p.add_option("--format", default="tab", choices=valid_formats, help="download format [default: %default]") p.add_option("--columns", default="entry name, protein names, genes,organism", help="columns to download, if --format is `tab` or `xls`." + " [default: %default]") p.add_option("--include", default=False, action="store_true", help="Include isoforms when --format is `fasta` or include `description` when" + " --format is `rdf`. [default: %default]") p.add_option("--limit", default=10, type="int", help="Max number of results to retrieve [default: %default]") p.add_option("--offset", default=0, type="int", help="Offset of first result, used with --limit [default: %default]") p.add_option("--skipcheck", default=False, action="store_true", help="turn off prompt to check file existence [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) query, = args url_params = {} if op.exists(query): pf = query.rsplit(".", 1)[0] list_of_queries = [row.strip() for row in open(query)] else: # the query is the search term pf = query.strip().strip('\"') list_of_queries = [pf] pf = re.sub(r"\s+", '_', pf) assert len(list_of_queries) > 0, \ "Please provide atleast one input query" url_params['format'] = opts.format if opts.columns and opts.format in valid_column_formats: reader = csv.reader([opts.columns], skipinitialspace=True) cols = [col for r in reader for col in r] for col in cols: assert col in valid_columns, \ "Column '{0}' is not a valid. Allowed options are {1}".\ format(col, valid_columns) url_params['columns'] = ",".join(cols) if opts.include and opts.format in valid_include_formats: url_params['include'] = "yes" url_params['limit'] = opts.limit url_params['offset'] = opts.offset outfile = "{0}.{1}".format(pf, opts.format) # If noprompt, will not check file existence fw = must_open(outfile, "w", checkexists=True, skipcheck=opts.skipcheck) if fw is None: return seen = set() for query in list_of_queries: if query in seen: logging.error("Duplicate query ({0}) found".format(query)) continue url_params['query'] = query data = urlencode(url_params) try: request = Request(uniprot_url, data) response = urlopen(request) except (HTTPError, URLError, RuntimeError, KeyError) as e: logging.error(e) logging.debug("wait 5 seconds to reconnect...") time.sleep(5) page = response.read() if not page: logging.error("query `{0}` yielded no results".format(query)) continue print(page, file=fw) seen.add(query) if seen: print("A total of {0} out of {1} queries returned results.". format(len(seen), len(list_of_queries)), file=sys.stderr)
def fetch(args)
%prog fetch "query" OR %prog fetch queries.txt Please provide a UniProt compatible `query` to retrieve data. If `query` contains spaces, please remember to "quote" it. You can also specify a `filename` which contains queries, one per line. Follow this syntax <http://www.uniprot.org/help/text-search#text-search-syntax> to query any of the documented fields <http://www.uniprot.org/help/query-fields>
3.110633
3.037267
1.024155
chr, se = s.split(":") start, end = se.split("-") start, end = int(start), int(end) if start > end: start, end = end, start return Range(chr, start, end, 0, 0)
def range_parse(s)
>>> range_parse("chr1:1000-1") Range(seqid='chr1', start=1, end=1000, score=0, id=0)
3.141739
2.639445
1.190303
a_min, a_max = a if a_min > a_max: a_min, a_max = a_max, a_min b_min, b_max = b if b_min > b_max: b_min, b_max = b_max, b_min if a_max + extend < b_min or b_max + extend < a_min: return None i_min = max(a_min, b_min) i_max = min(a_max, b_max) if i_min > i_max + extend: return None return [i_min, i_max]
def range_intersect(a, b, extend=0)
Returns the intersection between two reanges. >>> range_intersect((30, 45), (55, 65)) >>> range_intersect((48, 65), (45, 55)) [48, 55]
1.632901
1.791242
0.911602
if not rset: return None a = rset[0] for b in rset[1:]: if not a: return None a = range_intersect(a, b) return a
def ranges_intersect(rset)
Recursively calls the range_intersect() - pairwise version. >>> ranges_intersect([(48, 65), (45, 55), (50, 56)]) [50, 55]
2.908381
3.328662
0.873739