code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
p = OptionParser(multireport.__doc__) p.set_outfile(outfile="Ks_plot.pdf") add_plot_options(p) opts, args, iopts = p.set_image_options(args, figsize="5x5") if len(args) != 1: sys.exit(not p.print_help()) layoutfile, = args ks_min = opts.vmin ks_max = opts.vmax bins = opts.bins fill = opts.fill layout = Layout(layoutfile) print(layout, file=sys.stderr) fig = plt.figure(1, (iopts.w, iopts.h)) ax = fig.add_axes([.12, .1, .8, .8]) kp = KsPlot(ax, ks_max, bins, legendp=opts.legendp) for lo in layout: data = KsFile(lo.ksfile) data = [x.ng_ks for x in data] data = [x for x in data if ks_min <= x <= ks_max] kp.add_data(data, lo.components, label=lo.label, \ color=lo.color, marker=lo.marker, fill=fill, fitted=opts.fit) kp.draw(title=opts.title, filename=opts.outfile)
def multireport(args)
%prog multireport layoutfile Generate several Ks value distributions in the same figure. If the layout file is missing then a template file listing all ks files will be written. The layout file contains the Ks file, number of components, colors, and labels: # Ks file, ncomponents, label, color, marker LAP.sorghum.ks, 1, LAP-sorghum, r, o SES.sorghum.ks, 1, SES-sorghum, g, + MOL.sorghum.ks, 1, MOL-sorghum, m, ^ If color or marker is missing, then a random one will be assigned.
3.496681
3.184669
1.097973
p = OptionParser(gc3.__doc__) p.add_option("--plot", default=False, action="store_true", help="Also plot the GC3 histogram [default: %default]") p.set_outfile() opts, args = p.parse_args(args) outfile = opts.outfile plot = opts.plot if not 1 < len(args) < 4: sys.exit(not p.print_help()) ks_file, cdsfile = args[:2] GC3 = get_GC3(cdsfile) if plot: plot_GC3(GC3, cdsfile, fill="green") if len(args) == 3: cdsfile2 = args[2] GC3_2 = get_GC3(cdsfile2) GC3.update(GC3_2) if plot: plot_GC3(GC3_2, cdsfile2, fill="lightgreen") data = KsFile(ks_file) noriginals = len(data) fw = must_open(outfile, "w") writer = csv.writer(fw) writer.writerow(fields.split(",")) nlines = 0 cutoff = .75 for d in data: a, b = d.name.split(";") aratio, bratio = GC3[a], GC3[b] if (aratio + bratio) / 2 > cutoff: continue writer.writerow(d) nlines += 1 logging.debug("{0} records written (from {1}).".format(nlines, noriginals))
def gc3(args)
%prog gc3 ksfile cdsfile [cdsfile2] -o newksfile Filter the Ks results to remove high GC3 genes. High GC3 genes are problematic in Ks calculation - see Tang et al. 2010 PNAS. Specifically, the two calculation methods produce drastically different results for these pairs. Therefore we advise to remoeve these high GC3 genes. This is often the case for studying cereal genes. If 2 genomes are involved, the cdsfile of the 2nd genome can be provided concatenated or separated.
3.200287
2.896285
1.104963
agenome = op.basename(abed.filename).split(".")[0] bgenome = op.basename(bbed.filename).split(".")[0] aorder = abed.order border = bbed.order pairsfile = "{0}.{1}.pairs".format(agenome, bgenome) fw = open(pairsfile, "w") is_self = abed.filename == bbed.filename npairs = 0 for group in groups: iter = combinations(group, 2) if is_self \ else product(group, repeat=2) for a, b in iter: if a not in aorder or b not in border: continue print("\t".join((a, b)), file=fw) npairs += 1 logging.debug("File `{0}` written with {1} pairs.".format(pairsfile, npairs))
def extract_pairs(abed, bbed, groups)
Called by fromgroups(), extract pairs specific to a pair of species.
3.224095
3.154078
1.022199
from jcvi.formats.bed import Bed p = OptionParser(fromgroups.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) groupsfile = args[0] bedfiles = args[1:] beds = [Bed(x) for x in bedfiles] fp = open(groupsfile) groups = [row.strip().split(",") for row in fp] for b1, b2 in product(beds, repeat=2): extract_pairs(b1, b2, groups)
def fromgroups(args)
%prog fromgroups groupsfile a.bed b.bed ... Flatten the gene familes into pairs, the groupsfile is a file with each line containing the members, separated by comma. The commands also require several bed files in order to sort the pairs into different piles (e.g. pairs of species in comparison.
2.570176
2.504405
1.026262
from jcvi.formats.fasta import Fasta p = OptionParser(prepare.__doc__) p.set_outfile() opts, args = p.parse_args(args) outfile = opts.outfile if len(args) == 2: pairsfile, cdsfile = args pepfile = None elif len(args) == 3: pairsfile, cdsfile, pepfile = args else: sys.exit(not p.print_help()) f = Fasta(cdsfile) fp = open(pairsfile) fw = must_open(outfile, "w") if pepfile: assert outfile != "stdout", "Please specify outfile name." f2 = Fasta(pepfile) fw2 = must_open(outfile + ".pep", "w") for row in fp: if row[0] == '#': continue a, b = row.split()[:2] if a == b: logging.debug("Self pairs found: {0} - {1}. Ignored".format(a, b)) continue if a not in f: a = find_first_isoform(a, f) assert a, a if b not in f: b = find_first_isoform(b, f) assert b, b acds = f[a] bcds = f[b] SeqIO.write((acds, bcds), fw, "fasta") if pepfile: apep = f2[a] bpep = f2[b] SeqIO.write((apep, bpep), fw2, "fasta") fw.close() if pepfile: fw2.close()
def prepare(args)
%prog prepare pairsfile cdsfile [pepfile] -o paired.cds.fasta Pick sequences from cdsfile to form pairs, ready to be calculated. The pairsfile can be generated from formats.blast.cscore(). The first two columns contain the pair.
2.372474
2.190009
1.083317
from jcvi.formats.fasta import translate p = OptionParser(calc.__doc__) p.add_option("--longest", action="store_true", help="Get longest ORF, only works if no pep file, "\ "e.g. ESTs [default: %default]") p.add_option("--msa", default="clustalw", choices=("clustalw", "muscle"), help="software used to align the proteins [default: %default]") p.add_option("--workdir", default=os.getcwd(), help="Work directory") p.set_outfile() opts, args = p.parse_args(args) if len(args) == 1: protein_file, dna_file = None, args[0] elif len(args) == 2: protein_file, dna_file = args else: print("Incorrect arguments", file=sys.stderr) sys.exit(not p.print_help()) output_h = must_open(opts.outfile, "w") print(fields, file=output_h) work_dir = op.join(opts.workdir, "syn_analysis") mkdir(work_dir) if not protein_file: protein_file = dna_file + ".pep" translate_args = [dna_file, "--outfile=" + protein_file] if opts.longest: translate_args += ["--longest"] dna_file, protein_file = translate(translate_args) prot_iterator = SeqIO.parse(open(protein_file), "fasta") dna_iterator = SeqIO.parse(open(dna_file), "fasta") for p_rec_1, p_rec_2, n_rec_1, n_rec_2 in \ zip(prot_iterator, prot_iterator, dna_iterator, dna_iterator): print("--------", p_rec_1.name, p_rec_2.name, file=sys.stderr) if opts.msa == "clustalw": align_fasta = clustal_align_protein((p_rec_1, p_rec_2), work_dir) elif opts.msa == "muscle": align_fasta = muscle_align_protein((p_rec_1, p_rec_2), work_dir) mrtrans_fasta = run_mrtrans(align_fasta, (n_rec_1, n_rec_2), work_dir) if mrtrans_fasta: ds_subs_yn, dn_subs_yn, ds_subs_ng, dn_subs_ng = \ find_synonymous(mrtrans_fasta, work_dir) if ds_subs_yn is not None: pair_name = "%s;%s" % (p_rec_1.name, p_rec_2.name) output_h.write("%s\n" % (",".join(str(x) for x in (pair_name, ds_subs_yn, dn_subs_yn, ds_subs_ng, dn_subs_ng)))) output_h.flush() # Clean-up sh("rm -rf 2YN.t 2YN.dN 2YN.dS rst rub rst1 syn_analysis")
def calc(args)
%prog calc [prot.fasta] cds.fasta > out.ks Protein file is optional. If only one file is given, it is assumed to be CDS sequences with correct frame (frame 0). Results will be written to stdout. Both protein file and nucleotide file are assumed to be Fasta format, with adjacent records as the pairs to compare. Author: Haibao Tang <[email protected]>, Brad Chapman, Jingping Li Calculate synonymous mutation rates for gene pairs This does the following: 1. Fetches a protein pair. 2. Aligns the protein pair with clustalw (default) or muscle. 3. Convert the output to Fasta format. 4. Use this alignment info to align gene sequences using PAL2NAL 5. Run PAML yn00 to calculate synonymous mutation rates.
3.051007
2.903537
1.05079
cwd = os.getcwd() os.chdir(work_dir) # create the .ctl file ctl_file = "yn-input.ctl" output_file = "nuc-subs.yn" ctl_h = open(ctl_file, "w") ctl_h.write("seqfile = %s\noutfile = %s\nverbose = 0\n" % (op.basename(input_file), output_file)) ctl_h.write("icode = 0\nweighting = 0\ncommonf3x4 = 0\n") ctl_h.close() cl = YnCommandline(ctl_file) print("\tyn00:", cl, file=sys.stderr) r, e = cl.run() ds_value_yn = None ds_value_ng = None dn_value_yn = None dn_value_ng = None # Nei-Gojobori output_h = open(output_file) row = output_h.readline() while row: if row.find("Nei & Gojobori") >=0: for x in xrange(5): row = next(output_h) dn_value_ng, ds_value_ng = row.split('(')[1].split(')')[0].split() break row = output_h.readline() output_h.close() # Yang output_h = open(output_file) for line in output_h: if line.find("+-") >= 0 and line.find("dS") == -1: parts = line.split(" +-") ds_value_yn = extract_subs_value(parts[1]) dn_value_yn = extract_subs_value(parts[0]) if ds_value_yn is None or ds_value_ng is None: h = open(output_file) print("yn00 didn't work: \n%s" % h.read(), file=sys.stderr) os.chdir(cwd) return ds_value_yn, dn_value_yn, ds_value_ng, dn_value_ng
def find_synonymous(input_file, work_dir)
Run yn00 to find the synonymous subsitution rate for the alignment.
3.955684
3.681017
1.074617
align_file = op.join(work_dir, "prot-align.fasta") nuc_file = op.join(work_dir, "nuc.fasta") output_file = op.join(work_dir, "nuc-align.mrtrans") # make the prot_align file and nucleotide file align_h0 = open(align_file + "0", "w") align_h0.write(str(align_fasta)) align_h0.close() prot_seqs = {} i = 0 for rec in SeqIO.parse(align_h0.name, "fasta"): prot_seqs[i] = rec.seq i += 1 align_h = open(align_file, "w") for i, rec in enumerate(recs): if len(rec.id) > 30: rec.id = rec.id[:28] + "_" + str(i) rec.description = "" print(">{0}\n{1}".format(rec.id, prot_seqs[i]), file=align_h) align_h.close() SeqIO.write(recs, file(nuc_file, "w"), "fasta") # run the program cl = MrTransCommandline(align_file, nuc_file, output_file, outfmt=outfmt) r, e = cl.run() if e is None: print("\tpal2nal:", cl, file=sys.stderr) return output_file elif e.read().find("could not translate") >= 0: print("***pal2nal could not translate", file=sys.stderr) return None
def run_mrtrans(align_fasta, recs, work_dir, outfmt="paml")
Align nucleotide sequences with mrtrans and the protein alignment.
3.005509
2.97525
1.01017
fasta_file = op.join(work_dir, "prot-start.fasta") align_file = op.join(work_dir, "prot.aln") SeqIO.write(recs, file(fasta_file, "w"), "fasta") clustal_cl = ClustalwCommandline(cmd=CLUSTALW_BIN("clustalw2"), infile=fasta_file, outfile=align_file, outorder="INPUT", type="PROTEIN") stdout, stderr = clustal_cl() aln_file = file(clustal_cl.outfile) alignment = AlignIO.read(aln_file, "clustal") print("\tDoing clustalw alignment: %s" % clustal_cl, file=sys.stderr) if outfmt == "fasta": return alignment.format("fasta") if outfmt == "clustal": return alignment
def clustal_align_protein(recs, work_dir, outfmt="fasta")
Align given proteins with clustalw. recs are iterable of Biopython SeqIO objects
3.278415
3.288432
0.996954
fasta_file = op.join(work_dir, "prot-start.fasta") align_file = op.join(work_dir, "prot.aln") SeqIO.write(recs, file(fasta_file, "w"), "fasta") muscle_cl = MuscleCommandline(cmd=MUSCLE_BIN("muscle"), input=fasta_file, out=align_file, seqtype="protein", clwstrict=True) stdout, stderr = muscle_cl() alignment = AlignIO.read(muscle_cl.out, "clustal") if inputorder: try: muscle_inputorder(muscle_cl.input, muscle_cl.out) except ValueError: return "" alignment = AlignIO.read(muscle_cl.out, "fasta") print("\tDoing muscle alignment: %s" % muscle_cl, file=sys.stderr) if outfmt == "fasta": return alignment.format("fasta") if outfmt == "clustal": return alignment.format("clustal")
def muscle_align_protein(recs, work_dir, outfmt="fasta", inputorder=True)
Align given proteins with muscle. recs are iterable of Biopython SeqIO objects
3.183319
3.283579
0.969466
sh("cp {0} {0}.old".format(alnfile), log=False) maxi = 30 if trunc_name else 1000 aa = AlignIO.read(alnfile, "clustal") alignment = dict((a.id[:maxi], a) for a in aa) if trunc_name and len(alignment) < len(aa): raise ValueError\ ("ERROR: The first 30 chars of your seq names are not unique") fw = must_open(alnfile, "w") for rec in SeqIO.parse(inputfastafile, "fasta"): a = alignment[rec.id[:maxi]] fw.write(">{0}\n{1}\n".format(a.id[:maxi], a.seq)) fw.close() sh("rm {0}.old".format(alnfile), log=False)
def muscle_inputorder(inputfastafile, alnfile, trunc_name=True)
Fix for muscle -stable option according to here: http://drive5.com/muscle/stable.html
3.647979
3.746166
0.97379
p = OptionParser(subset.__doc__) p.add_option("--noheader", action="store_true", help="don't write ksfile header line [default: %default]") p.add_option("--block", action="store_true", help="preserve block structure in input [default: %default]") p.set_stripnames() p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) pairsfile, ksfiles = args[0], args[1:] noheader = opts.noheader block = opts.block if block: noheader = True outfile = opts.outfile ksvals = {} for ksfile in ksfiles: ksvals.update(dict((line.name, line) for line in \ KsFile(ksfile, strip_names=opts.strip_names))) fp = open(pairsfile) fw = must_open(outfile, "w") if not noheader: print(fields, file=fw) i = j = 0 for row in fp: if row[0] == '#': if block: print(row.strip(), file=fw) continue a, b = row.split()[:2] name = ";".join((a, b)) if name not in ksvals: name = ";".join((b, a)) if name not in ksvals: j += 1 print("\t".join((a, b, ".", ".")), file=fw) continue ksline = ksvals[name] if block: print("\t".join(str(x) for x in (a, b, ksline.ks)), file=fw) else: ksline.name = ";".join((a, b)) print(ksline, file=fw) i += 1 fw.close() logging.debug("{0} pairs not found in ksfiles".format(j)) logging.debug("{0} ks records written to `{1}`".format(i, outfile)) return outfile
def subset(args)
%prog subset pairsfile ksfile1 ksfile2 ... -o pairs.ks Subset some pre-calculated ks ka values (in ksfile) according to pairs in tab delimited pairsfile/anchorfile.
2.709919
2.573107
1.05317
from jcvi.apps.base import popen probs, mus, sigmas = [], [], [] fw = must_open("tmp", "w") log_data = [log(x) for x in data if x > .05] data = "\n".join(["%.4f" % x for x in log_data]).replace("inf\n", "") fw.write(data) fw.close() cmd = "gmm-bic {0} {1} {2}".format(components, len(log_data), fw.name) pipe = popen(cmd) for row in pipe: if row[0] != '#': continue atoms = row.split(",") a, b, c = atoms[1:4] a = float(a) b = float(b) c = float(c) mus.append(a) sigmas.append(b) probs.append(c) os.remove(fw.name) return probs, mus, sigmas
def get_mixture(data, components)
probs = [.476, .509] mus = [.69069, -.15038] variances = [.468982e-1, .959052e-1]
3.271788
3.239517
1.009962
''' %prog report ksfile generate a report given a Ks result file (as produced by synonymous_calc.py). describe the median Ks, Ka values, as well as the distribution in stem-leaf plot ''' from jcvi.utils.cbook import SummaryStats from jcvi.graphics.histogram import stem_leaf_plot p = OptionParser(report.__doc__) p.add_option("--pdf", default=False, action="store_true", help="Generate graphic output for the histogram [default: %default]") p.add_option("--components", default=1, type="int", help="Number of components to decompose peaks [default: %default]") add_plot_options(p) opts, args, iopts = p.set_image_options(args, figsize="5x5") if len(args) != 1: sys.exit(not p.print_help()) ks_file, = args data = KsFile(ks_file) ks_min = opts.vmin ks_max = opts.vmax bins = opts.bins for f in fields.split(",")[1:]: columndata = [getattr(x, f) for x in data] ks = ("ks" in f) if not ks: continue columndata = [x for x in columndata if ks_min <= x <= ks_max] st = SummaryStats(columndata) title = "{0} ({1}): ".format(descriptions[f], ks_file) title += "Median:{0:.3f} (1Q:{1:.3f}|3Q:{2:.3f}||".\ format(st.median, st.firstq, st.thirdq) title += "Mean:{0:.3f}|Std:{1:.3f}||N:{2})".\ format(st.mean, st.sd, st.size) tbins = (0, ks_max, bins) if ks else (0, .6, 10) digit = 2 if (ks_max * 1. / bins) < .1 else 1 stem_leaf_plot(columndata, *tbins, digit=digit, title=title) if not opts.pdf: return components = opts.components data = [x.ng_ks for x in data] data = [x for x in data if ks_min <= x <= ks_max] fig = plt.figure(1, (iopts.w, iopts.h)) ax = fig.add_axes([.12, .1, .8, .8]) kp = KsPlot(ax, ks_max, opts.bins, legendp=opts.legendp) kp.add_data(data, components, fill=opts.fill, fitted=opts.fit) kp.draw(title=opts.title)
def report(args)
%prog report ksfile generate a report given a Ks result file (as produced by synonymous_calc.py). describe the median Ks, Ka values, as well as the distribution in stem-leaf plot
3.796185
3.03931
1.249028
p = OptionParser(passthrough.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) vcffile, newvcffile = args fp = open(vcffile) fw = open(newvcffile, "w") gg = ["0/0", "0/1", "1/1"] for row in fp: if row[0] == "#": print(row.strip(), file=fw) continue v = VcfLine(row) v.filter = "PASS" v.format = "GT:GP" probs = [0] * 3 probs[gg.index(v.genotype)] = 1 v.genotype = v.genotype.replace("/", "|") + \ ":{0}".format(",".join("{0:.3f}".format(x) for x in probs)) print(v, file=fw) fw.close()
def passthrough(args)
%prog passthrough chrY.vcf chrY.new.vcf Pass through Y and MT vcf.
2.850423
2.547111
1.119081
p = OptionParser(validate.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) imputed, withheld = args register = {} fp = open(withheld) for row in fp: if row[0] == "#": continue v = VcfLine(row) register[(v.seqid, v.pos)] = v.genotype logging.debug("Imported {0} records from `{1}`".\ format(len(register), withheld)) fp = must_open(imputed) hit = concordant = 0 seen = set() for row in fp: if row[0] == "#": continue v = VcfLine(row) chr, pos, genotype = v.seqid, v.pos, v.genotype if (chr, pos) in seen: continue seen.add((chr, pos)) if (chr, pos) not in register: continue truth = register[(chr, pos)] imputed = genotype.split(":")[0] if "|" in imputed: imputed = "/".join(sorted(genotype.split(":")[0].split("|"))) #probs = [float(x) for x in genotype.split(":")[-1].split(",")] #imputed = max(zip(probs, ["0/0", "0/1", "1/1"]))[-1] hit += 1 if truth == imputed: concordant += 1 else: print(row.strip(), "truth={0}".format(truth), file=sys.stderr) logging.debug("Total concordant: {0}".\ format(percentage(concordant, hit)))
def validate(args)
%prog validate imputed.vcf withheld.vcf Validate imputation against withheld variants.
3.010177
2.656217
1.133257
p = OptionParser(minimac.__doc__) p.set_home("shapeit") p.set_home("minimac") p.set_outfile() p.set_chr() p.set_ref() p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) txtfile, = args ref = opts.ref mm = MakeManager() pf = txtfile.split(".")[0] allrawvcf = [] alloutvcf = [] chrs = opts.chr.split(",") for x in chrs: px = CM[x] chrvcf = pf + ".{0}.vcf".format(px) if txtfile.endswith(".vcf"): cmd = "vcftools --vcf {0} --chr {1}".format(txtfile, x) cmd += " --out {0}.{1} --recode".format(pf, px) cmd += " && mv {0}.{1}.recode.vcf {2}".format(pf, px, chrvcf) else: # 23andme cmd = "python -m jcvi.formats.vcf from23andme {0} {1}".format(txtfile, x) cmd += " --ref {0}".format(ref) mm.add(txtfile, chrvcf, cmd) chrvcf_hg38 = pf + ".{0}.23andme.hg38.vcf".format(px) minimac_liftover(mm, chrvcf, chrvcf_hg38, opts) allrawvcf.append(chrvcf_hg38) minimacvcf = "{0}.{1}.minimac.dose.vcf".format(pf, px) if x == "X": minimac_X(mm, x, chrvcf, opts) elif x in ["Y", "MT"]: cmd = "python -m jcvi.variation.impute passthrough" cmd += " {0} {1}".format(chrvcf, minimacvcf) mm.add(chrvcf, minimacvcf, cmd) else: minimac_autosome(mm, x, chrvcf, opts) # keep the best line for multi-allelic markers uniqvcf= "{0}.{1}.minimac.uniq.vcf".format(pf, px) cmd = "python -m jcvi.formats.vcf uniq {0} > {1}".\ format(minimacvcf, uniqvcf) mm.add(minimacvcf, uniqvcf, cmd) minimacvcf_hg38 = "{0}.{1}.minimac.hg38.vcf".format(pf, px) minimac_liftover(mm, uniqvcf, minimacvcf_hg38, opts) alloutvcf.append(minimacvcf_hg38) if len(allrawvcf) > 1: rawhg38vcfgz = pf + ".all.23andme.hg38.vcf.gz" cmd = "vcf-concat {0} | bgzip > {1}".format(" ".join(allrawvcf), rawhg38vcfgz) mm.add(allrawvcf, rawhg38vcfgz, cmd) if len(alloutvcf) > 1: outhg38vcfgz = pf + ".all.minimac.hg38.vcf.gz" cmd = "vcf-concat {0} | bgzip > {1}".format(" ".join(alloutvcf), outhg38vcfgz) mm.add(alloutvcf, outhg38vcfgz, cmd) mm.write()
def minimac(args)
%prog batchminimac input.txt Use MINIMAC3 to impute vcf on all chromosomes.
2.48657
2.460213
1.010713
pf = vcffile.rsplit(".", 1)[0] ranges = [(1, 2699519), (2699520, 154931043), (154931044, 155270560)] tags = ["PAR1", "NONPAR", "PAR2"] Xvcf = [] phasedfiles = [] for tag, (start, end) in zip(tags, ranges): recodefile = pf + "_{0}.recode.vcf".format(tag) cmd = "vcftools --vcf {0} --out {1}_{2}".format(vcffile, pf, tag) cmd += " --chr X --from-bp {0} --to-bp {1} --recode".format(start, end) mm.add(vcffile, recodefile, cmd) phasedfile = shapeit_phasing(mm, chr + "_{0}".format(tag), recodefile, opts) phasedfiles.append(phasedfile) pars = [x for x in phasedfiles if "_PAR" in x] parfile = pf + "_PAR.recode.phased.vcf" nonparfile = pf + "_NONPAR.recode.phased.vcf" cmd = "vcf-concat {0} > {1}".format(" ".join(pars), parfile) mm.add(pars, parfile, cmd) for phasedfile in (parfile, nonparfile): outvcf = minimac_autosome(mm, chr, phasedfile, opts, phasing=False) Xvcf.append(outvcf) minimacvcf = pf + ".minimac.dose.vcf" cmd = "vcf-concat {0} | vcf-sort -c > {1}".format(" ".join(Xvcf), minimacvcf) mm.add(Xvcf, minimacvcf, cmd)
def minimac_X(mm, chr, vcffile, opts)
See details here: http://genome.sph.umich.edu/wiki/Minimac3_Cookbook_:_Chromosome_X_Imputation
3.46866
3.427944
1.011878
p = OptionParser(beagle.__doc__) p.set_home("beagle") p.set_ref() p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) vcffile, chr = args pf = vcffile.rsplit(".", 1)[0] outpf = pf + ".beagle" outfile = outpf + ".vcf.gz" mm = MakeManager() beagle_cmd = opts.beagle_home kg = op.join(opts.ref, "1000GP_Phase3") cmd = beagle_cmd + " gt={0}".format(vcffile) cmd += " ref={0}/chr{1}.1kg.phase3.v5a.bref".format(kg, chr) cmd += " map={0}/plink.chr{1}.GRCh37.map".format(kg, chr) cmd += " out={0}".format(outpf) cmd += " nthreads=16 gprobs=true" mm.add(vcffile, outfile, cmd) mm.write()
def beagle(args)
%prog beagle input.vcf 1 Use BEAGLE4.1 to impute vcf on chromosome 1.
4.745138
4.313373
1.100099
from pyfaidx import Fasta p = OptionParser(impute.__doc__) p.set_home("shapeit") p.set_home("impute") p.set_ref() p.set_cpus() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) vcffile, fastafile, chr = args mm = MakeManager() pf = vcffile.rsplit(".", 1)[0] hapsfile = pf + ".haps" kg = op.join(opts.ref, "1000GP_Phase3") shapeit_phasing(mm, chr, vcffile, opts) fasta = Fasta(fastafile) size = len(fasta[chr]) binsize = 5000000 bins = size / binsize # 5Mb bins if size % binsize: bins += 1 impute_cmd = op.join(opts.impute_home, "impute2") chunks = [] for x in xrange(bins + 1): chunk_start = x * binsize + 1 chunk_end = min(chunk_start + binsize - 1, size) outfile = pf + ".chunk{0:02d}.impute2".format(x) mapfile = "{0}/genetic_map_chr{1}_combined_b37.txt".format(kg, chr) rpf = "{0}/1000GP_Phase3_chr{1}".format(kg, chr) cmd = impute_cmd + " -m {0}".format(mapfile) cmd += " -known_haps_g {0}".format(hapsfile) cmd += " -h {0}.hap.gz -l {0}.legend.gz".format(rpf) cmd += " -Ne 20000 -int {0} {1}".format(chunk_start, chunk_end) cmd += " -o {0} -allow_large_regions -seed 367946".format(outfile) cmd += " && touch {0}".format(outfile) mm.add(hapsfile, outfile, cmd) chunks.append(outfile) # Combine all the files imputefile = pf + ".impute2" cmd = "cat {0} > {1}".format(" ".join(chunks), imputefile) mm.add(chunks, imputefile, cmd) # Convert to vcf vcffile = pf + ".impute2.vcf" cmd = "python -m jcvi.formats.vcf fromimpute2 {0} {1} {2} > {3}".\ format(imputefile, fastafile, chr, vcffile) mm.add(imputefile, vcffile, cmd) mm.write()
def impute(args)
%prog impute input.vcf hs37d5.fa 1 Use IMPUTE2 to impute vcf on chromosome 1.
3.390382
3.265167
1.038349
p = OptionParser(summary.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gff_file, ref = args s = Fasta(ref) g = make_index(gff_file) geneseqs, exonseqs, intronseqs = [], [], [] # Calc % GC for f in g.features_of_type("gene"): fid = f.id fseq = s.sequence({'chr': f.chrom, 'start': f.start, 'stop': f.stop}) geneseqs.append(fseq) exons = set((c.chrom, c.start, c.stop) for c in g.children(fid, 2) \ if c.featuretype == "exon") exons = list(exons) for chrom, start, stop in exons: fseq = s.sequence({'chr': chrom, 'start': start, 'stop': stop}) exonseqs.append(fseq) introns = range_interleave(exons) for chrom, start, stop in introns: fseq = s.sequence({'chr': chrom, 'start': start, 'stop': stop}) intronseqs.append(fseq) r = {} # Report for t, tseqs in zip(("Gene", "Exon", "Intron"), (geneseqs, exonseqs, intronseqs)): tsizes = [len(x) for x in tseqs] tsummary = SummaryStats(tsizes, dtype="int") r[t, "Number"] = tsummary.size r[t, "Average size (bp)"] = tsummary.mean r[t, "Median size (bp)"] = tsummary.median r[t, "Total length (Mb)"] = human_size(tsummary.sum, precision=0, target="Mb") r[t, "% of genome"] = percentage(tsummary.sum, s.totalsize, precision=0, mode=-1) r[t, "% GC"] = gc(tseqs) print(tabulate(r), file=sys.stderr)
def summary(args)
%prog summary gffile fastafile Print summary stats, including: - Gene/Exon/Intron - Number - Average size (bp) - Median size (bp) - Total length (Mb) - % of genome - % GC
3.071739
2.65697
1.156106
p = OptionParser(statstable.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) gff_files = args for metric in metrics: logging.debug("Parsing files in `{0}`..".format(metric)) table = {} for x in gff_files: pf = op.basename(x).split(".")[0] numberfile = op.join(metric, pf + ".txt") ar = [int(x.strip()) for x in open(numberfile)] sum = SummaryStats(ar).todict().items() keys, vals = zip(*sum) keys = [(pf, x) for x in keys] table.update(dict(zip(keys, vals))) print(tabulate(table), file=sys.stderr)
def statstable(args)
%prog statstable *.gff Print gene statistics table.
4.154409
3.874067
1.072364
from jcvi.graphics.histogram import histogram_multiple p = OptionParser(histogram.__doc__) p.add_option("--bins", dest="bins", default=40, type="int", help="number of bins to plot in the histogram [default: %default]") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) gff_files = args # metrics = ("Exon_Length", "Intron_Length", "Gene_Length", "Exon_Count") colors = ("red", "green", "blue", "black") vmaxes = (1000, 1000, 4000, 20) xlabels = ("bp", "bp", "bp", "number") for metric, color, vmax, xlabel in zip(metrics, colors, vmaxes, xlabels): logging.debug("Parsing files in `{0}`..".format(metric)) numberfiles = [op.join(metric, op.basename(x).split(".")[0] + ".txt") \ for x in gff_files] histogram_multiple(numberfiles, 0, vmax, xlabel, metric, bins=opts.bins, facet=True, fill=color, prefix=metric + ".")
def histogram(args)
%prog histogram *.gff Plot gene statistics based on output of stats. For each gff file, look to see if the metrics folder (i.e. Exon_Length) contains the data and plot them.
3.507729
3.259982
1.075997
p = OptionParser(stats.__doc__) p.add_option("--gene", default="mRNA", help="The gene type [default: %default]") p.add_option("--exon", default="CDS", help="The exon type [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gff_file, = args g = make_index(gff_file) exon_lengths = [] intron_lengths = [] gene_lengths = [] exon_counts = [] for feat in g.features_of_type(opts.gene): exons = [] for c in g.children(feat.id, 1): if c.featuretype != opts.exon: continue exons.append((c.chrom, c.start, c.stop)) introns = range_interleave(exons) feat_exon_lengths = [(stop - start + 1) for (chrom, start, stop) in exons] feat_intron_lengths = [(stop - start + 1) for (chrom, start, stop) in introns] exon_lengths += feat_exon_lengths intron_lengths += feat_intron_lengths gene_lengths.append(sum(feat_exon_lengths)) exon_counts.append(len(feat_exon_lengths)) a = SummaryStats(exon_lengths) b = SummaryStats(intron_lengths) c = SummaryStats(gene_lengths) d = SummaryStats(exon_counts) for x, title in zip((a, b, c, d), metrics): x.title = title print(x, file=sys.stderr) prefix = gff_file.split(".")[0] for x in (a, b, c, d): dirname = x.title mkdir(dirname) txtfile = op.join(dirname, prefix + ".txt") x.tofile(txtfile)
def stats(args)
%prog stats infile.gff Collect gene statistics based on gff file. There are some terminology issues here and so normally we call "gene" are actually mRNA, and sometimes "exon" are actually CDS, but they are configurable. Thee numbers are written to text file in four separate folders, corresponding to the four metrics: Exon length, Intron length, Gene length, Exon count With data written to disk then you can run %prog histogram
2.200162
2.096383
1.049504
p = OptionParser(rdotplot.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) dotplot_template = rdotplotfile, = args assert rdotplotfile.endswith(".rdotplot") pngfile = rdotplotfile.replace(".rdotplot", ".png") rtemplate = RTemplate(dotplot_template, locals()) rtemplate.run()
def rdotplot(args)
%prog rdotplotfile Dot plot to visualize relationship between two sequences, by plotting .rdotplot file (often generated by LASTZ)
3.27188
3.205228
1.020795
template = self.template parameters = self.parameters # write to a temporary R script fw = must_open("tmp", "w") path = fw.name fw.write(template.safe_substitute(**parameters)) fw.close() sh("Rscript %s" % path) if clean: os.remove(path) # I have no idea why using ggsave, there is one extra image # generated, but here I remove it rplotspdf = "Rplots.pdf" if op.exists(rplotspdf): os.remove(rplotspdf)
def run(self, clean=True)
Create a temporary file and run it
8.266833
8.018145
1.031016
spurs = [] path_nodes = [] for k, d in G.degree().iteritems(): if d == 1: spurs.append(k) elif d == 2: path_nodes.append(k) logging.debug("Remove {0} spurs.".format(len(spurs))) G.remove_nodes_from(spurs) SG = G.subgraph(path_nodes) cc = nx.connected_components(SG) for c in cc: if len(c) == 1: continue c = set(c) neighbors = set() for x in c: neighbors |= set(G.neighbors(x)) neighbors -= c newtag = list(c)[0] + "*" for n in neighbors: G.add_edge(newtag, n) G.remove_nodes_from(c) logging.debug("Contract {0} path nodes into {1} nodes.".\ format(len(path_nodes), len(cc)))
def graph_simplify(G)
Simplify big graphs: remove spurs and contract unique paths.
2.956044
2.657378
1.112391
npaths = len(paths) weights = weights or [1] * npaths assert len(paths) == len(weights) G = nx.DiGraph() for path, w in zip(paths, weights): for a, b in pairwise(path): update_weight(G, a, b, w) return G
def make_paths(paths, weights=None)
Zip together paths. Called by merge_paths().
2.579926
2.601195
0.991823
from jcvi.algorithms.lpsolve import min_feedback_arc_set while not nx.is_directed_acyclic_graph(G): edges = [] for a, b, w in G.edges_iter(data=True): w = w['weight'] edges.append((a, b, w)) mf, mf_score = min_feedback_arc_set(edges) for a, b, w in mf: G.remove_edge(a, b) assert nx.is_directed_acyclic_graph(G) G = transitive_reduction(G) return G
def reduce_paths(G)
Make graph into a directed acyclic graph (DAG).
3.779235
3.674768
1.028428
H = G.copy() for a, b, w in G.edges_iter(data=True): # Try deleting the edge, see if we still have a path # between the vertices H.remove_edge(a, b) if not nx.has_path(H, a, b): # we shouldn't have deleted it H.add_edge(a, b, w) return H
def transitive_reduction(G)
Returns a transitive reduction of a graph. The original graph is not modified. A transitive reduction H of G has a path from x to y if and only if there was a path from x to y in G. Deleting any edge of H destroys this property. A transitive reduction is not unique in general. A transitive reduction has the same transitive closure as the original graph. A transitive reduction of a complete graph is a tree. A transitive reduction of a tree is itself. >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 4)]) >>> H = transitive_reduction(G) >>> H.edges() [(1, 2), (2, 3), (3, 4)]
3.907531
4.379351
0.892263
G = make_paths(paths, weights=weights) G = reduce_paths(G) return G
def merge_paths(paths, weights=None)
Zip together sorted lists. >>> paths = [[1, 2, 3], [1, 3, 4], [2, 4, 5]] >>> G = merge_paths(paths) >>> nx.topological_sort(G) [1, 2, 3, 4, 5] >>> paths = [[1, 2, 3, 4], [1, 2, 3, 2, 4]] >>> G = merge_paths(paths, weights=(1, 2)) >>> nx.topological_sort(G) [1, 2, 3, 4]
6.195173
9.1671
0.675805
assert nx.is_directed_acyclic_graph(G) tree = nx.topological_sort(G) node_to_index = dict((t, i) for i, t in enumerate(tree)) nnodes = len(tree) weights = [weights.get(x, 1) for x in tree] if weights else [1] * nnodes score, fromc = weights[:], [-1] * nnodes si = node_to_index[source] ti = node_to_index[target] for a in tree[si: ti]: ai = node_to_index[a] for b, w in G[a].items(): bi = node_to_index[b] w = w.get('weight', 1) d = score[ai] + weights[bi] * w # Favor heavier edges if d <= score[bi]: continue score[bi] = d # Update longest distance so far fromc[bi] = ai # Backtracking path = [] while ti != -1: path.append(ti) ti = fromc[ti] path = [tree[x] for x in path[::-1]] return path, score[ti]
def longest_path_weighted_nodes(G, source, target, weights=None)
The longest path problem is the problem of finding a simple path of maximum length in a given graph. While for general graph, this problem is NP-hard, but if G is a directed acyclic graph (DAG), longest paths in G can be found in linear time with dynamic programming. >>> G = nx.DiGraph([(1, 2), (1, 3), (2, "M"), (3, "M")]) >>> longest_path_weighted_nodes(G, 1, "M", weights={1: 1, 2: 1, 3: 2, "M": 1}) ([1, 3, 'M'], 4)
3.054782
3.264852
0.935657
next, ntag = None, None L = self.outs if tag == "<" else self.ins if len(L) == 1: e, = L if e.v1.v == self.v: next, ntag = e.v2, e.o2 ntag = "<" if ntag == ">" else ">" # Flip tag if on other end else: next, ntag = e.v1, e.o1 if next: # Validate the next vertex B = next.ins if ntag == "<" else next.outs if len(B) > 1: return None, None return next, ntag
def get_next(self, tag="<")
This function is tricky and took me a while to figure out. The tag specifies the direction where the current edge came from. tag ntag ---> V >----> U cur next This means the next vertex should follow the outs since this tag is inward '<'. Check if there are multiple branches if len(L) == 1, and also check if the next it finds has multiple incoming edges though if len(B) == 1.
4.698082
3.618507
1.298348
from jcvi.formats.base import read_block p = OptionParser(dust2bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args interval = fastafile + ".iv" if need_update(fastafile, interval): cmd = "dustmasker -in {0}".format(fastafile) sh(cmd, outfile=interval) fp = open(interval) bedfile = fastafile.rsplit(".", 1)[0] + ".dust.bed" fw = must_open(bedfile, "w") nlines = 0 nbases = 0 for header, block in read_block(fp, ">"): header = header.strip(">") for b in block: start, end = b.split(" - ") start, end = int(start), int(end) print("\t".join(str(x) for x in (header, start, end)), file=fw) nlines += 1 nbases += end - start logging.debug("A total of {0} DUST intervals ({1} bp) exported to `{2}`".\ format(nlines, nbases, bedfile))
def dust2bed(args)
%prog dust2bed fastafile Use dustmasker to find low-complexity regions (LCRs) in the genome.
2.935234
2.700697
1.086843
dustfasta = fastafile.rsplit(".", 1)[0] + ".dust.fasta" for name, seq in parse_fasta(dustfasta): for islower, ss in groupby(enumerate(seq), key=lambda x: x[-1].islower()): if not islower: continue ss = list(ss) ms, mn = min(ss) xs, xn = max(ss) print("\t".join(str(x) for x in (name, ms, xs)))
def fasta2bed(fastafile)
Alternative BED generation from FASTA file. Used for sanity check.
4.749977
4.767151
0.996397
from jcvi.assembly.goldenpath import overlap p = OptionParser(circular.__doc__) p.add_option("--flip", default=False, action="store_true", help="Reverse complement the sequence") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, startpos = args startpos = int(startpos) key, seq = next(parse_fasta(fastafile)) aseq = seq[startpos:] bseq = seq[:startpos] aseqfile, bseqfile = "a.seq", "b.seq" for f, s in zip((aseqfile, bseqfile), (aseq, bseq)): fw = must_open(f, "w") print(">{0}\n{1}".format(f, s), file=fw) fw.close() o = overlap([aseqfile, bseqfile]) seq = aseq[:o.qstop] + bseq[o.sstop:] seq = Seq(seq) if opts.flip: seq = seq.reverse_complement() for f in (aseqfile, bseqfile): os.remove(f) fw = must_open(opts.outfile, "w") rec = SeqRecord(seq, id=key, description="") SeqIO.write([rec], fw, "fasta") fw.close()
def circular(args)
%prog circular fastafile startpos Make circular genome, startpos is the place to start the sequence. This can be determined by mapping to a reference. Self overlaps are then resolved. Startpos is 1-based.
2.497519
2.33134
1.071281
p = OptionParser(dust.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args dustfastafile = fastafile.rsplit(".", 1)[0] + ".dust.fasta" if need_update(fastafile, dustfastafile): cmd = "dustmasker -in {0}".format(fastafile) cmd += " -out {0} -outfmt fasta".format(dustfastafile) sh(cmd) for name, seq in parse_fasta(dustfastafile): nlow = sum(1 for x in seq if x in "acgtnN") pctlow = nlow * 100. / len(seq) if pctlow < 98: continue #print "{0}\t{1:.1f}".format(name, pctlow) print(name)
def dust(args)
%prog dust assembly.fasta Remove low-complexity contigs within assembly.
2.919239
2.760217
1.057612
from jcvi.formats.blast import BlastLine p = OptionParser(dedup.__doc__) p.set_align(pctid=0, pctcov=98) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) blastfile, fastafile = args cov = opts.pctcov / 100. sizes = Sizes(fastafile).mapping fp = open(blastfile) removed = set() for row in fp: b = BlastLine(row) query, subject = b.query, b.subject if query == subject: continue qsize, ssize = sizes[query], sizes[subject] qspan = abs(b.qstop - b.qstart) if qspan < qsize * cov: continue if (qsize, query) < (ssize, subject): removed.add(query) print("\n".join(sorted(removed)))
def dedup(args)
%prog dedup assembly.assembly.blast assembly.fasta Remove duplicate contigs within assembly.
3.196972
3.007254
1.063087
from jcvi.apps.cdhit import deduplicate from jcvi.apps.vecscreen import mask from jcvi.formats.fasta import sort p = OptionParser(build.__doc__) p.add_option("--nodedup", default=False, action="store_true", help="Do not deduplicate [default: deduplicate]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) fastafile, bacteria, pf = args dd = deduplicate([fastafile, "--pctid=100"]) \ if not opts.nodedup else fastafile screenfasta = screen([dd, bacteria]) tidyfasta = mask([screenfasta]) sortedfasta = sort([tidyfasta, "--sizes"]) scaffoldfasta = pf + ".assembly.fasta" format([sortedfasta, scaffoldfasta, "--prefix=scaffold_", "--sequential"]) gapsplitfasta = pf + ".gapSplit.fasta" cmd = "gapSplit -minGap=10 {0} {1}".format(scaffoldfasta, gapsplitfasta) sh(cmd) contigsfasta = pf + ".contigs.fasta" format([gapsplitfasta, contigsfasta, "--prefix=contig_", "--sequential"])
def build(args)
%prog build current.fasta Bacteria_Virus.fasta prefix Build assembly files after a set of clean-ups: 1. Use cdhit (100%) to remove duplicate scaffolds 2. Screen against the bacteria and virus database (remove scaffolds 95% id, 50% cov) 3. Mask matches to UniVec_Core 4. Sort by decreasing scaffold sizes 5. Rename the scaffolds sequentially 6. Build the contigs by splitting scaffolds at gaps 7. Rename the contigs sequentially
4.593752
3.925843
1.170131
from jcvi.apps.align import blast from jcvi.formats.blast import covfilter p = OptionParser(screen.__doc__) p.set_align(pctid=95, pctcov=50) p.add_option("--best", default=1, type="int", help="Get the best N hit [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) scaffolds, library = args pctidflag = "--pctid={0}".format(opts.pctid) blastfile = blast([library, scaffolds, pctidflag, "--best={0}".format(opts.best)]) idsfile = blastfile.rsplit(".", 1)[0] + ".ids" covfilter([blastfile, scaffolds, "--ids=" + idsfile, pctidflag, "--pctcov={0}".format(opts.pctcov)]) pf = scaffolds.rsplit(".", 1)[0] nf = pf + ".screen.fasta" cmd = "faSomeRecords {0} -exclude {1} {2}".format(scaffolds, idsfile, nf) sh(cmd) logging.debug("Screened FASTA written to `{0}`.".format(nf)) return nf
def screen(args)
%prog screen scaffolds.fasta library.fasta Screen sequences against FASTA library. Sequences that have 95% id and 50% cov will be removed by default.
3.610709
3.208077
1.125506
from jcvi.formats.agp import bed, order_to_agp, build from jcvi.formats.bed import Bed p = OptionParser(scaffold.__doc__) p.add_option("--prefix", default=False, action="store_true", help="Keep IDs with same prefix together [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ctgfasta, agpfile = args sizes = Sizes(ctgfasta).mapping pf = ctgfasta.rsplit(".", 1)[0] phasefile = pf + ".phases" fwphase = open(phasefile, "w") newagpfile = pf + ".new.agp" fwagp = open(newagpfile, "w") scaffoldbuckets = defaultdict(list) bedfile = bed([agpfile, "--nogaps", "--outfile=tmp"]) bb = Bed(bedfile) for s, partialorder in bb.sub_beds(): name = partialorder[0].accn bname = name.rsplit("_", 1)[0] if opts.prefix else s scaffoldbuckets[bname].append([(b.accn, b.strand) for b in partialorder]) # Now the buckets contain a mixture of singletons and partially resolved # scaffolds. Print the scaffolds first then remaining singletons. for bname, scaffolds in sorted(scaffoldbuckets.items()): ctgorder = [] singletons = set() for scaf in sorted(scaffolds): for node, orientation in scaf: ctgorder.append((node, orientation)) if len(scaf) == 1: singletons.add(node) nscaffolds = len(scaffolds) nsingletons = len(singletons) if nsingletons == 1 and nscaffolds == 0: phase = 3 elif nsingletons == 0 and nscaffolds == 1: phase = 2 else: phase = 1 msg = "{0}: Scaffolds={1} Singletons={2} Phase={3}".\ format(bname, nscaffolds, nsingletons, phase) print(msg, file=sys.stderr) print("\t".join((bname, str(phase))), file=fwphase) order_to_agp(bname, ctgorder, sizes, fwagp) fwagp.close() os.remove(bedfile) fastafile = "final.fasta" build([newagpfile, ctgfasta, fastafile]) tidy([fastafile])
def scaffold(args)
%prog scaffold ctgfasta agpfile Build scaffolds based on ordering in the AGP file.
3.560594
3.391075
1.04999
p = OptionParser(overlap.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ctgfasta, poolfasta = args f = Fasta(ctgfasta) for k, rec in f.iteritems_ordered(): fastafile = k + ".fasta" fw = open(fastafile, "w") SeqIO.write([rec], fw, "fasta") fw.close() overlap([fastafile, poolfasta])
def overlapbatch(args)
%prog overlapbatch ctgfasta poolfasta Fish out the sequences in `poolfasta` that overlap with `ctgfasta`. Mix and combine using `minimus2`.
3.338158
2.875571
1.160868
p = OptionParser(array.__doc__) p.set_grid_opts(array=True) p.set_params(prog="grid") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) cmds, = args fp = open(cmds) N = sum(1 for x in fp) fp.close() pf = cmds.rsplit(".", 1)[0] runfile = pf + ".sh" assert runfile != cmds, \ "Commands list file should not have a `.sh` extension" engine = get_grid_engine() threaded = opts.threaded or 1 contents = arraysh.format(cmds) if engine == "SGE" \ else arraysh_ua.format(N, threaded, cmds) write_file(runfile, contents) if engine == "PBS": return outfile = "{0}.{1}.out".format(pf, "\$TASK_ID") errfile = "{0}.{1}.err".format(pf, "\$TASK_ID") p = GridProcess("sh {0}".format(runfile), outfile=outfile, errfile=errfile, arr=N, extra_opts=opts.extra, grid_opts=opts) p.start()
def array(args)
%prog array commands.list Parallelize a set of commands on grid using array jobs.
4.796079
4.586178
1.045768
p = OptionParser(run.__doc__) p.set_grid_opts() p.set_params(prog="grid") opts, args = p.parse_args(args) if len(args) == 0: sys.exit(not p.print_help()) sep = ":::" if sep in args: sepidx = args.index(sep) filenames = args[sepidx + 1:] args = args[:sepidx] if not filenames: filenames = [""] else: filenames = sys.stdin if not sys.stdin.isatty() else [""] cmd = " ".join(args) cmds = [] if filenames else [(cmd, None)] for i, filename in enumerate(filenames): filename = filename.strip() noextname = filename.rsplit(".", 1)[0] prefix, basename = op.split(filename) basenoextname = basename.rsplit(".", 1)[0] basefirstname = basename.split(".")[0] firstname = op.join(prefix, basefirstname) ncmd = cmd if "{" in ncmd: ncmd = ncmd.replace("{}", filename) else: ncmd += " " + filename ncmd = ncmd.replace("{.}", noextname) ncmd = ncmd.replace("{_}", firstname) ncmd = ncmd.replace("{/}", basename) ncmd = ncmd.replace("{/.}", basenoextname) ncmd = ncmd.replace("{/_}", basefirstname) ncmd = ncmd.replace("{#}", str(i)) outfile = None if ">" in ncmd: ncmd, outfile = ncmd.split(">", 1) ncmd, outfile = ncmd.strip(), outfile.strip() ncmd = ncmd.strip() cmds.append((ncmd, outfile)) for ncmd, outfile in cmds: p = GridProcess(ncmd, outfile=outfile, extra_opts=opts.extra, grid_opts=opts) p.start()
def run(args)
%prog run command ::: file1 file2 Parallelize a set of commands on grid. The syntax is modeled after GNU parallel <http://www.gnu.org/s/parallel/man.html#options> {} - input line {.} - input line without extension {_} - input line first part {/} - basename of input line {/.} - basename of input line without extension {/_} - basename of input line first part {#} - sequence number of job to run ::: - Use arguments from the command line as input source instead of stdin (standard input). If file name is `t/example.tar.gz`, then, {} is "t/example.tar.gz", {.} is "t/example.tar", {_} is "t/example" {/} is "example.tar.gz", {/.} is "example.tar", {/_} is "example" A few examples: ls -1 *.fastq | %prog run process {} {.}.pdf # use stdin %prog run process {} {.}.pdf ::: *fastq # use ::: %prog run "zcat {} > {.}" ::: *.gz # quote redirection %prog run < commands.list # run a list of commands
2.771632
2.602869
1.064837
import shlex from jcvi.apps.base import sh, getusername from subprocess import check_output, CalledProcessError import xml.etree.ElementTree as ET valid_methods = ("pattern", "jobid") p = OptionParser(kill.__doc__) p.add_option("--method", choices=valid_methods, help="Identify jobs based on [default: guess]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) username = getusername() tag, = args tag = tag.strip() if tag == "all": sh("qdel -u {0}".format(username)) return valid_jobids = set() method = opts.method or guess_method(tag) if method == "jobid": jobids = tag.split(",") valid_jobids |= set(jobids) elif method == "pattern": qsxmlcmd = 'qstat -u "{0}" -j "{1}" -nenv -njd -xml'.\ format(username, tag) try: qsxml = check_output(shlex.split(qsxmlcmd)).strip() except CalledProcessError as e: qsxml = None logging.debug('No jobs matching the pattern "{0}"'.format(tag)) if qsxml is not None: for job in ET.fromstring(qsxml).findall("djob_info"): for elem in job.findall("element"): jobid = elem.find("JB_job_number").text valid_jobids.add(jobid) if valid_jobids: sh("qdel {0}".format(",".join(valid_jobids)))
def kill(args)
%prog kill [options] JOBNAMEPAT/JOBIDs Kill jobs based on JOBNAME pattern matching (case-sensitive) or list of JOBIDs (comma separated) Examples: %prog kill "pyth*" # Use regex %prog kill 160253,160245,160252 # Use list of job ids %prog kill all # Everything
3.210731
3.261052
0.984569
import numpy as np import pandas as pd import seaborn as sns from jcvi.formats.base import DictFile p = OptionParser(covlen.__doc__) p.add_option("--maxsize", default=1000000, type="int", help="Max contig size") p.add_option("--maxcov", default=100, type="int", help="Max contig size") p.add_option("--color", default='m', help="Color of the data points") p.add_option("--kind", default="scatter", choices=("scatter", "reg", "resid", "kde", "hex"), help="Kind of plot to draw") opts, args, iopts = p.set_image_options(args, figsize="8x8") if len(args) != 2: sys.exit(not p.print_help()) covfile, fastafile = args cov = DictFile(covfile, cast=float) s = Sizes(fastafile) data = [] maxsize, maxcov = opts.maxsize, opts.maxcov for ctg, size in s.iter_sizes(): c = cov.get(ctg, 0) if size > maxsize: continue if c > maxcov: continue data.append((size, c)) x, y = zip(*data) x = np.array(x) y = np.array(y) logging.debug("X size {0}, Y size {1}".format(x.size, y.size)) df = pd.DataFrame() xlab, ylab = "Length", "Coverage of depth (X)" df[xlab] = x df[ylab] = y sns.jointplot(xlab, ylab, kind=opts.kind, data=df, xlim=(0, maxsize), ylim=(0, maxcov), stat_func=None, edgecolor="w", color=opts.color) figname = covfile + ".pdf" savefig(figname, dpi=iopts.dpi, iopts=iopts)
def covlen(args)
%prog covlen covfile fastafile Plot coverage vs length. `covfile` is two-column listing contig id and depth of coverage.
2.355743
2.312276
1.018798
from jcvi.formats.bed import mates, bedpe p = OptionParser(coverage.__doc__) p.add_option("--ymax", default=None, type="int", help="Limit ymax [default: %default]") p.add_option("--spans", default=False, action="store_true", help="BED files already contain clone spans [default: %default]") opts, args, iopts = p.set_image_options(args, figsize="8x5") if len(args) < 3: sys.exit(not p.print_help()) fastafile, ctg = args[0:2] bedfiles = args[2:] sizes = Sizes(fastafile) size = sizes.mapping[ctg] plt.figure(1, (iopts.w, iopts.h)) ax = plt.gca() bins = 100 # smooth the curve lines = [] legends = [] not_covered = [] yy = .9 for bedfile, c in zip(bedfiles, "rgbcky"): if not opts.spans: pf = bedfile.rsplit(".", 1)[0] matesfile = pf + ".mates" if need_update(bedfile, matesfile): matesfile, matesbedfile = mates([bedfile, "--lib"]) bedspanfile = pf + ".spans.bed" if need_update(matesfile, bedspanfile): bedpefile, bedspanfile = bedpe([bedfile, "--span", "--mates={0}".format(matesfile)]) bedfile = bedspanfile bedsum = Bed(bedfile).sum(seqid=ctg) notcoveredbases = size - bedsum legend = bedfile.split(".")[0] msg = "{0}: {1} bp not covered".format(legend, thousands(notcoveredbases)) not_covered.append(msg) print(msg, file=sys.stderr) ax.text(.1, yy, msg, color=c, size=9, transform=ax.transAxes) yy -= .08 cov = Coverage(bedfile, sizes.filename) x, y = cov.get_plot_data(ctg, bins=bins) line, = ax.plot(x, y, '-', color=c, lw=2, alpha=.5) lines.append(line) legends.append(legend) leg = ax.legend(lines, legends, shadow=True, fancybox=True) leg.get_frame().set_alpha(.5) ylabel = "Average depth per {0}Kb".format(size / bins / 1000) ax.set_xlim(0, size) ax.set_ylim(0, opts.ymax) ax.set_xlabel(ctg) ax.set_ylabel(ylabel) set_human_base_axis(ax) figname ="{0}.{1}.pdf".format(fastafile, ctg) savefig(figname, dpi=iopts.dpi, iopts=iopts)
def coverage(args)
%prog coverage fastafile ctg bedfile1 bedfile2 .. Plot coverage from a set of BED files that contain the read mappings. The paired read span will be converted to a new bedfile that contain the happy mates. ctg is the chr/scf/ctg that you want to plot the histogram on. If the bedfiles already contain the clone spans, turn on --spans.
3.313109
3.080179
1.075622
from jcvi.utils.iter import grouper p = OptionParser(scaffold.__doc__) p.add_option("--cutoff", type="int", default=1000000, help="Plot scaffolds with size larger than [default: %default]") p.add_option("--highlights", help="A set of regions in BED format to highlight [default: %default]") opts, args, iopts = p.set_image_options(args, figsize="14x8", dpi=150) if len(args) < 4 or len(args) % 3 != 1: sys.exit(not p.print_help()) highlights = opts.highlights scafsizes = Sizes(args[0]) trios = list(grouper(args[1:], 3)) trios = [(a, Sizes(b), Bed(c)) for a, b, c in trios] if highlights: hlbed = Bed(highlights) for scaffoldID, scafsize in scafsizes.iter_sizes(): if scafsize < opts.cutoff: continue logging.debug("Loading {0} (size={1})".format(scaffoldID, thousands(scafsize))) tmpname = scaffoldID + ".sizes" tmp = open(tmpname, "w") tmp.write("{0}\t{1}".format(scaffoldID, scafsize)) tmp.close() tmpsizes = Sizes(tmpname) tmpsizes.close(clean=True) if highlights: subhighlights = list(hlbed.sub_bed(scaffoldID)) imagename = ".".join((scaffoldID, opts.format)) plot_one_scaffold(scaffoldID, tmpsizes, None, trios, imagename, iopts, highlights=subhighlights)
def scaffold(args)
%prog scaffold scaffold.fasta synteny.blast synteny.sizes synteny.bed physicalmap.blast physicalmap.sizes physicalmap.bed As evaluation of scaffolding, visualize external line of evidences: * Plot synteny to an external genome * Plot alignments to physical map * Plot alignments to genetic map (TODO) Each trio defines one panel to be plotted. blastfile defines the matchings between the evidences vs scaffolds. Then the evidence sizes, and evidence bed to plot dot plots. This script will plot a dot in the dot plot in the corresponding location the plots are one contig/scaffold per plot.
3.584146
3.466348
1.033983
p = OptionParser(A50.__doc__) p.add_option("--overwrite", default=False, action="store_true", help="overwrite .rplot file if exists [default: %default]") p.add_option("--cutoff", default=0, type="int", dest="cutoff", help="use contigs above certain size [default: %default]") p.add_option("--stepsize", default=10, type="int", dest="stepsize", help="stepsize for the distribution [default: %default]") opts, args = p.parse_args(args) if not args: sys.exit(p.print_help()) import numpy as np from jcvi.utils.table import loadtable stepsize = opts.stepsize # use stepsize to speed up drawing rplot = "A50.rplot" if not op.exists(rplot) or opts.overwrite: fw = open(rplot, "w") header = "\t".join(("index", "cumsize", "fasta")) statsheader = ("Fasta", "L50", "N50", "Min", "Max", "Average", "Sum", "Counts") statsrows = [] print(header, file=fw) for fastafile in args: f = Fasta(fastafile, index=False) ctgsizes = [length for k, length in f.itersizes()] ctgsizes = np.array(ctgsizes) a50, l50, n50 = calculate_A50(ctgsizes, cutoff=opts.cutoff) cmin, cmax, cmean = min(ctgsizes), max(ctgsizes), np.mean(ctgsizes) csum, counts = np.sum(ctgsizes), len(ctgsizes) cmean = int(round(cmean)) statsrows.append((fastafile, l50, n50, cmin, cmax, cmean, csum, counts)) logging.debug("`{0}` ctgsizes: {1}".format(fastafile, ctgsizes)) tag = "{0} (L50={1})".format(\ op.basename(fastafile).rsplit(".", 1)[0], l50) logging.debug(tag) for i, s in zip(xrange(0, len(a50), stepsize), a50[::stepsize]): print("\t".join((str(i), str(s / 1000000.), tag)), file=fw) fw.close() table = loadtable(statsheader, statsrows) print(table, file=sys.stderr) generate_plot(rplot)
def A50(args)
%prog A50 contigs_A.fasta contigs_B.fasta ... Plots A50 graphics, see blog post (http://blog.malde.org/index.php/a50/)
3.059055
2.997527
1.020526
p = OptionParser(merge.__doc__) p.set_outfile(outfile="merged_results.delta") opts, args = p.parse_args(args) if len(args) < 3: sys.exit(not p.print_help()) ref, query = args[:2] deltafiles = args[2:] outfile = opts.outfile ref = get_abs_path(ref) query = get_abs_path(query) fw = must_open(outfile, "w") print(" ".join((ref, query)), file=fw) print("NUCMER", file=fw) fw.close() for d in deltafiles: cmd = "awk 'NR > 2 {{print $0}}' {0}".format(d) sh(cmd, outfile=outfile, append=True)
def merge(args)
%prog merge ref.fasta query.fasta *.delta Merge delta files into a single delta.
3.522174
2.914587
1.208464
p = OptionParser(blast.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) deltafile, = args blastfile = deltafile.rsplit(".", 1)[0] + ".blast" if need_update(deltafile, blastfile): coords = Coords(deltafile) fw = open(blastfile, "w") for c in coords: print(c.blastline, file=fw)
def blast(args)
%prog blast <deltafile|coordsfile> Covert delta or coordsfile to BLAST tabular output.
2.898485
2.431979
1.191822
p = OptionParser(fromdelta.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) deltafile, = args coordsfile = deltafile.rsplit(".", 1)[0] + ".coords" cmd = "show-coords -rclH {0}".format(deltafile) sh(cmd, outfile=coordsfile) return coordsfile
def fromdelta(args)
%prog fromdelta deltafile Convert deltafile to coordsfile.
3.641561
2.929904
1.242894
import jcvi.formats.blast return jcvi.formats.blast.sort(args + ["--coords"])
def sort(args)
%prog sort coordsfile Sort coordsfile based on query or ref.
12.315608
12.723311
0.967956
p = OptionParser(coverage.__doc__) p.add_option("-c", dest="cutoff", default=0.5, type="float", help="only report query with coverage greater than [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) coordsfile, = args fp = open(coordsfile) coords = [] for row in fp: try: c = CoordsLine(row) except AssertionError: continue coords.append(c) coords.sort(key=lambda x: x.query) coverages = [] for query, lines in groupby(coords, key=lambda x: x.query): cumulative_cutoff = sum(x.querycov for x in lines) coverages.append((query, cumulative_cutoff)) coverages.sort(key=lambda x: (-x[1], x[0])) for query, cumulative_cutoff in coverages: if cumulative_cutoff < opts.cutoff: break print("{0}\t{1:.2f}".format(query, cumulative_cutoff))
def coverage(args)
%prog coverage coordsfile Report the coverage per query record, useful to see which query matches reference. The coords file MUST be filtered with supermap:: jcvi.algorithms.supermap --filter query
2.609952
2.490458
1.047981
p = OptionParser(annotate.__doc__.format(", ".join(Overlap_types))) p.add_option("--maxhang", default=100, type="int", help="Max hang to call dovetail overlap [default: %default]") p.add_option("--all", default=False, action="store_true", help="Output all lines [default: terminal/containment]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) coordsfile, = args fp = open(coordsfile) for row in fp: try: c = CoordsLine(row) except AssertionError: continue ov = c.overlap(opts.maxhang) if not opts.all and ov == 0: continue print("{0}\t{1}".format(row.strip(), Overlap_types[ov]))
def annotate(args)
%prog annotate coordsfile Annotate coordsfile to append an additional column, with the following overlaps: {0}.
3.934021
3.514314
1.119428
from jcvi.formats.blast import AlignStats p = OptionParser(summary.__doc__) p.add_option("-s", dest="single", default=False, action="store_true", help="provide stats per reference seq") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) coordsfile, = args alignstats = get_stats(coordsfile) alignstats.print_stats()
def summary(args)
%prog summary coordsfile provide summary on id% and cov%, for both query and reference
4.253542
3.345288
1.271503
p = OptionParser(filter.__doc__) p.set_align(pctid=0, hitlen=0) p.add_option("--overlap", default=False, action="store_true", help="Print overlap status (e.g. terminal, contained)") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) pctid = opts.pctid hitlen = opts.hitlen filename, = args if pctid == 0 and hitlen == 0: return filename pf, suffix = filename.rsplit(".", 1) outfile = "".join((pf, ".P{0}L{1}.".format(int(pctid), int(hitlen)), suffix)) if not need_update(filename, outfile): return outfile if suffix == "delta": cmd = "delta-filter -i {0} -l {1} {2}".format(pctid, hitlen, filename) sh(cmd, outfile=outfile) return outfile fp = open(filename) fw = must_open(outfile, "w") for row in fp: try: c = CoordsLine(row) except AssertionError: continue if c.identity < pctid: continue if c.len2 < hitlen: continue if opts.overlap and not c.overlap: continue outrow = row.rstrip() if opts.overlap: ov = Overlap_types[c.overlap] outrow += "\t" + ov print(outrow, file=fw) return outfile
def filter(args)
%prog filter <deltafile|coordsfile> Produce a new delta/coords file and filter based on id% or cov%. Use `delta-filter` for .delta file.
3.515422
3.235806
1.086413
p = OptionParser(bed.__doc__) p.add_option("--query", default=False, action="store_true", help="print out query intervals rather than ref [default: %default]") p.add_option("--pctid", default=False, action="store_true", help="use pctid in score [default: %default]") p.add_option("--cutoff", dest="cutoff", default=0, type="float", help="get all the alignments with quality above threshold " +\ "[default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) coordsfile, = args query = opts.query pctid = opts.pctid quality_cutoff = opts.cutoff coords = Coords(coordsfile) for c in coords: if c.quality < quality_cutoff: continue line = c.qbedline(pctid=pctid) if query else c.bedline(pctid=pctid) print(line)
def bed(args)
%prog bed coordsfile will produce a bed list of mapped position and orientation (needs to be beyond quality cutoff, say 50) in bed format
3.0318
2.886883
1.050198
aL, aR = 1, self.reflen bL, bR = 1, self.querylen aLhang, aRhang = self.start1 - aL, aR - self.end1 bLhang, bRhang = self.start2 - bL, bR - self.end2 if self.orientation == '-': bLhang, bRhang = bRhang, bLhang s1 = aLhang + bRhang s2 = aRhang + bLhang s3 = aLhang + aRhang s4 = bLhang + bRhang # Dovetail (terminal) overlap if s1 < max_hang: type = 2 # b ~ a elif s2 < max_hang: type = 1 # a ~ b # Containment overlap elif s3 < max_hang: type = 3 # a in b elif s4 < max_hang: type = 4 # b in a else: type = 0 return type
def overlap(self, max_hang=100)
Determine the type of overlap given query, ref alignment coordinates Consider the following alignment between sequence a and b: aLhang \ / aRhang \------------/ /------------\ bLhang / \ bRhang Terminal overlap: a before b, b before a Contain overlap: a in b, b in a
2.721296
2.381608
1.14263
self.quality_sort() hits = dict((query, list(blines)) for (query, blines) in \ groupby(self, lambda x: x.query)) self.ref_sort() return hits
def hits(self)
returns a dict with query => blastline
10.198977
6.792256
1.501559
self.quality_sort() best_hits = dict((query, next(blines)) for (query, blines) in \ groupby(self, lambda x: x.query)) self.ref_sort() return best_hits
def best_hits(self)
returns a dict with query => best mapped position
9.35479
6.966965
1.342735
from jcvi.formats.fastq import guessoffset p = OptionParser(align.__doc__) p.set_firstN(firstN=0) p.add_option("--full", default=False, action="store_true", help="Enforce end-to-end alignment [default: local]") p.add_option("--reorder", default=False, action="store_true", help="Keep the input read order [default: %default]") p.add_option("--null", default=False, action="store_true", help="Do not write to SAM/BAM output") p.add_option("--fasta", default=False, action="store_true", help="Query reads are FASTA") p.set_cutoff(cutoff=800) p.set_mateorientation(mateorientation="+-") p.set_sam_options(bowtie=True) opts, args = p.parse_args(args) extra = opts.extra mo = opts.mateorientation if mo == '+-': extra += "" elif mo == '-+': extra += "--rf" else: extra += "--ff" PE = True if len(args) == 2: logging.debug("Single-end alignment") PE = False elif len(args) == 3: logging.debug("Paired-end alignment") else: sys.exit(not p.print_help()) firstN = opts.firstN mapped = opts.mapped unmapped = opts.unmapped fasta = opts.fasta gl = "--end-to-end" if opts.full else "--local" dbfile, readfile = args[0:2] dbfile = check_index(dbfile) prefix = get_prefix(readfile, dbfile) samfile, mapped, unmapped = get_samfile(readfile, dbfile, bowtie=True, mapped=mapped, unmapped=unmapped, bam=opts.bam) logfile = prefix + ".log" if not fasta: offset = guessoffset([readfile]) if not need_update(dbfile, samfile): logging.error("`{0}` exists. `bowtie2` already run.".format(samfile)) return samfile, logfile cmd = "bowtie2 -x {0}".format(dbfile) if PE: r1, r2 = args[1:3] cmd += " -1 {0} -2 {1}".format(r1, r2) cmd += " --maxins {0}".format(opts.cutoff) mtag, utag = "--al-conc", "--un-conc" else: cmd += " -U {0}".format(readfile) mtag, utag = "--al", "--un" if mapped: cmd += " {0} {1}".format(mtag, mapped) if unmapped: cmd += " {0} {1}".format(utag, unmapped) if firstN: cmd += " --upto {0}".format(firstN) cmd += " -p {0}".format(opts.cpus) if fasta: cmd += " -f" else: cmd += " --phred{0}".format(offset) cmd += " {0}".format(gl) if opts.reorder: cmd += " --reorder" cmd += " {0}".format(extra) # Finally the log cmd += " 2> {0}".format(logfile) if opts.null: samfile = "/dev/null" cmd = output_bam(cmd, samfile) sh(cmd) print(open(logfile).read(), file=sys.stderr) return samfile, logfile
def align(args)
%prog align database.fasta read1.fq [read2.fq] Wrapper for `bowtie2` single-end or paired-end, depending on the number of args.
3.193035
3.127142
1.021071
cfg = {} options = Config.options(section) for option in options: try: cfg[option] = Config.get(section, option) if cfg[option] == -1: logging.debug("skip: {0}".format(option)) except: logging.debug("exception on {0}!".format(option)) cfg[option] = None return cfg
def ConfigSectionMap(Config, section)
Read a specific section from a ConfigParser() object and return a dict() of all key-value pairs in that section
1.536069
1.707262
0.899727
"Get module-level docstring of Python module at filepath, e.g. 'path/to/file.py'." co = compile(open(filepath).read(), filepath, 'exec') if co.co_consts and isinstance(co.co_consts[0], six.string_types): docstring = co.co_consts[0] else: docstring = None return docstring
def get_module_docstring(filepath)
Get module-level docstring of Python module at filepath, e.g. 'path/to/file.py'.
3.042277
2.017523
1.507926
if not cmd: return 1 if silent: outfile = errfile = "/dev/null" if grid: from jcvi.apps.grid import GridProcess pr = GridProcess(cmd, infile=infile, outfile=outfile, errfile=errfile, threaded=threaded, grid_opts=grid_opts) pr.start() return pr.jobid else: if infile: cat = "cat" if infile.endswith(".gz"): cat = "zcat" cmd = "{0} {1} |".format(cat, infile) + cmd if outfile and outfile != "stdout": if outfile.endswith(".gz"): cmd += " | gzip" tag = ">" if append: tag = ">>" cmd += " {0}{1}".format(tag, outfile) if errfile: if errfile == outfile: errfile = "&1" cmd += " 2>{0}".format(errfile) if background: cmd += " &" if log: logging.debug(cmd) call_func = check_call if check else call return call_func(cmd, shell=True, executable=shell)
def sh(cmd, grid=False, infile=None, outfile=None, errfile=None, append=False, background=False, threaded=None, log=True, grid_opts=None, silent=False, shell="/bin/bash", check=False)
simple wrapper for system calls
2.677198
2.694782
0.993475
from subprocess import Popen as P if debug: logging.debug(cmd) # See: <https://blog.nelhage.com/2010/02/a-very-subtle-bug/> proc = P(cmd, bufsize=1, stdin=stdin, stdout=stdout, \ shell=True, executable=shell) return proc
def Popen(cmd, stdin=None, stdout=PIPE, debug=False, shell="/bin/bash")
Capture the cmd stdout output to a file handle.
5.722585
5.654891
1.011971
import glob as gl if pattern: pathname = op.join(pathname, pattern) return natsorted(gl.glob(pathname))
def glob(pathname, pattern=None)
Wraps around glob.glob(), but return a sorted list.
4.802586
4.165473
1.152951
matches = [] patterns = patterns.split(",") if "," in patterns else listify(patterns) for root, dirnames, filenames in os.walk(pathname): matching = [] for pattern in patterns: matching.extend(fnmatch.filter(filenames, pattern)) for filename in matching: matches.append(op.join(root, filename)) return natsorted(matches)
def iglob(pathname, patterns)
Allow multiple file formats. This is also recursive. For example: >>> iglob("apps", "*.py,*.pyc")
2.712045
3.383454
0.801561
if op.isdir(dirname): if overwrite: shutil.rmtree(dirname) os.mkdir(dirname) logging.debug("Overwrite folder `{0}`.".format(dirname)) else: return False # Nothing is changed else: try: os.mkdir(dirname) except: os.makedirs(dirname) logging.debug("`{0}` not found. Creating new.".format(dirname)) return True
def mkdir(dirname, overwrite=False)
Wraps around os.mkdir(), but checks for existence first.
3.724886
3.794603
0.981627
if not (op.exists(a) and op.exists(b)): return False am = os.stat(a).st_mtime bm = os.stat(b).st_mtime return am > bm
def is_newer_file(a, b)
Check if the file a is newer than file b
2.260633
2.221741
1.017505
a = listify(a) b = listify(b) return any((not op.exists(x)) for x in b) or \ all((os.stat(x).st_size == 0 for x in b)) or \ any(is_newer_file(x, y) for x in a for y in b)
def need_update(a, b)
Check if file a is newer than file b and decide whether or not to update file b. Can generalize to two lists.
4.051657
3.694931
1.096545
from jcvi.apps.console import magenta, yellow format = yellow("%(asctime)s [%(module)s]") format += magenta(" %(message)s") logging.basicConfig(level=level, format=format, datefmt="%H:%M:%S")
def debug(level=logging.DEBUG)
Turn on the debugging
4.863702
5.177522
0.939388
from jcvi.apps.grid import Jobs p = OptionParser(mdownload.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) linksfile, = args links = [(x.strip(),) for x in open(linksfile)] j = Jobs(download, links) j.run()
def mdownload(args)
%prog mdownload links.txt Multiple download a list of files. Use formats.html.links() to extract the links file.
3.145377
2.924526
1.075517
p = OptionParser(expand.__doc__) p.add_option("--symlink", default=False, action="store_true", help="Create symbolic link [default: %default]") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) seen = set() for a in args: oa = a.replace("/", "_") if oa in seen: logging.debug("Name collision `{0}`, ignored.".format(oa)) continue cmd = "cp -s" if opts.symlink else "mv" cmd += " {0} {1}".format(a, oa) sh(cmd) seen.add(oa)
def expand(args)
%prog expand */* Move files in subfolders into the current folder. Use --symlink to create a link instead.
3.044801
2.883607
1.0559
p = OptionParser(timestamp.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) path, = args for root, dirs, files in os.walk(path): for f in files: filename = op.join(root, f) atime, mtime = get_times(filename) print(filename, atime, mtime)
def timestamp(args)
%prog timestamp path > timestamp.info Record the timestamps for all files in the current folder. filename atime mtime This file can be used later to recover previous timestamps through touch().
2.257618
2.085539
1.08251
from time import ctime p = OptionParser(touch.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) info, = args fp = open(info) for row in fp: path, atime, mtime = row.split() atime = float(atime) mtime = float(mtime) current_atime, current_mtime = get_times(path) # Check if the time has changed, with resolution up to 1 sec if int(atime) == int(current_atime) and \ int(mtime) == int(current_mtime): continue times = [ctime(x) for x in (current_atime, current_mtime, atime, mtime)] msg = "{0} : ".format(path) msg += "({0}, {1}) => ({2}, {3})".format(*times) print(msg, file=sys.stderr) os.utime(path, (atime, mtime))
def touch(args)
%prog touch timestamp.info Recover timestamps for files in the current folder. CAUTION: you must execute this in the same directory as timestamp().
2.779766
2.815634
0.987261
from jcvi.formats.base import must_open p = OptionParser(less.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) filename, pos = args fsize = getfilesize(filename) if pos == "all": pos = [x / 10. for x in range(0, 10)] else: pos = [float(x) for x in pos.split(",")] if pos[0] > 1: pos = [x / fsize for x in pos] if len(pos) > 1: counts = 20 else: counts = None fp = must_open(filename) for p in pos: snapshot(fp, p, fsize, counts=counts)
def less(args)
%prog less filename position | less Enhance the unix `less` command by seeking to a file location first. This is useful to browse big files. Position is relative 0.00 - 1.00, or bytenumber. $ %prog less myfile 0.1 # Go to 10% of the current file and streaming $ %prog less myfile 0.1,0.2 # Stream at several positions $ %prog less myfile 100 # Go to certain byte number and streaming $ %prog less myfile 100,200 # Stream at several positions $ %prog less myfile all # Generate a snapshot every 10% (10%, 20%, ..)
2.795768
2.521596
1.10873
assert -1 <= priority <= 2, \ "Priority should be an int() between -1 and 2" if timestamp == None: from time import time timestamp = int(time()) retry, expire = (300, 3600) if priority == 2 \ else (None, None) conn = HTTPSConnection("api.pushover.net:443") conn.request("POST", "/1/messages.json", urlencode({ "token": token, "user": user, "message": message, "title": title, "priority": priority, "timestamp": timestamp, "retry": retry, "expire": expire, }), { "Content-type": "application/x-www-form-urlencoded" }) conn.getresponse()
def pushover(message, token, user, title="JCVI: Job Monitor", \ priority=0, timestamp=None)
pushover.net python API <https://pushover.net/faq#library-python>
2.318748
2.374065
0.976699
assert -2 <= priority <= 2, \ "Priority should be an int() between -2 and 2" conn = HTTPSConnection("www.notifymyandroid.com") conn.request("POST", "/publicapi/notify", urlencode({ "apikey": apikey, "application": "python notify", "event": event, "description": description, "priority": priority, }), { "Content-type": "application/x-www-form-urlencoded" }) conn.getresponse()
def nma(description, apikey, event="JCVI: Job Monitor", priority=0)
notifymyandroid.com API <http://www.notifymyandroid.com/api.jsp>
3.432786
3.46032
0.992043
import base64 headers = {} auth = base64.encodestring("{0}:".format(apikey)).strip() headers['Authorization'] = "Basic {0}".format(auth) headers['Content-type'] = "application/x-www-form-urlencoded" conn = HTTPSConnection("api.pushbullet.com".format(apikey)) conn.request("POST", "/api/pushes", urlencode({ "iden": device, "type": "note", "title": title, "body": body, }), headers) conn.getresponse()
def pushbullet(body, apikey, device, title="JCVI: Job Monitor", type="note")
pushbullet.com API <https://www.pushbullet.com/api>
2.570354
2.570622
0.999896
import types assert type(priority) is int and -1 <= priority <= 2, \ "Priority should be and int() between -1 and 2" cfgfile = op.join(op.expanduser("~"), "pushnotify.ini") Config = ConfigParser() if op.exists(cfgfile): Config.read(cfgfile) else: sys.exit("Push notification config file `{0}`".format(cfgfile) + \ " does not exist!") if api == "pushover": cfg = ConfigSectionMap(Config, api) token, key = cfg["token"], cfg["user"] pushover(message, token, key, title=subject, \ priority=priority, timestamp=timestamp) elif api == "nma": cfg = ConfigSectionMap(Config, api) apikey = cfg["apikey"] nma(message, apikey, event=subject, \ priority=priority) elif api == "pushbullet": cfg = ConfigSectionMap(Config, api) apikey, iden = cfg["apikey"], cfg['iden'] pushbullet(message, apikey, iden, title=subject, \ type="note")
def pushnotify(subject, message, api="pushover", priority=0, timestamp=None)
Send push notifications using pre-existing APIs Requires a config `pushnotify.ini` file in the user home area containing the necessary api tokens and user keys. Default API: "pushover" Config file format: ------------------- [pushover] token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx user: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy [nma] apikey: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz [pushbullet] apikey: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb iden: dddddddddddddddddddddddddddddddddddd
3.358765
2.980736
1.126824
from smtplib import SMTP from email.mime.text import MIMEText SERVER = "localhost" _message = MIMEText(message) _message['Subject'] = subject _message['From'] = fromaddr _message['To'] = ", ".join(toaddr) server = SMTP(SERVER) server.sendmail(fromaddr, toaddr, _message.as_string()) server.quit()
def send_email(fromaddr, toaddr, subject, message)
Send an email message
1.991596
2.037238
0.977596
if whoami == "user": username = getusername() domain = getdomainname() myemail = "{0}@{1}".format(username, domain) return myemail else: fromaddr = "notifier-donotreply@{0}".format(getdomainname()) return fromaddr
def get_email_address(whoami="user")
Auto-generate the FROM and TO email address
4.465785
4.214103
1.059724
import re qtext = '[^\\x0d\\x22\\x5c\\x80-\\xff]' dtext = '[^\\x0d\\x5b-\\x5d\\x80-\\xff]' atom = '[^\\x00-\\x20\\x22\\x28\\x29\\x2c\\x2e\\x3a-\\x3c\\x3e\\x40\\x5b-\\x5d\\x7f-\\xff]+' quoted_pair = '\\x5c[\\x00-\\x7f]' domain_literal = "\\x5b(?:%s|%s)*\\x5d" % (dtext, quoted_pair) quoted_string = "\\x22(?:%s|%s)*\\x22" % (qtext, quoted_pair) domain_ref = atom sub_domain = "(?:%s|%s)" % (domain_ref, domain_literal) word = "(?:%s|%s)" % (atom, quoted_string) domain = "%s(?:\\x2e%s)*" % (sub_domain, sub_domain) local_part = "%s(?:\\x2e%s)*" % (word, word) addr_spec = "%s\\x40%s" % (local_part, domain) email_address = re.compile('\A%s\Z' % addr_spec) if email_address.match(email): return True return False
def is_valid_email(email)
RFC822 Email Address Regex -------------------------- Originally written by Cal Henderson c.f. http://iamcal.com/publish/articles/php/parsing_email/ Translated to Python by Tim Fletcher, with changes suggested by Dan Kubb. Licensed under a Creative Commons Attribution-ShareAlike 2.5 License http://creativecommons.org/licenses/by-sa/2.5/
1.685787
1.674306
1.006857
from jcvi.utils.iter import flatten valid_notif_methods.extend(available_push_api.keys()) fromaddr = get_email_address(whoami="notifier") p = OptionParser(notify.__doc__) p.add_option("--method", default="email", choices=valid_notif_methods, help="Specify the mode of notification [default: %default]") p.add_option("--subject", default="JCVI: job monitor", help="Specify the subject of the notification message") p.set_email() g1 = OptionGroup(p, "Optional `push` parameters") g1.add_option("--api", default="pushover", \ choices=list(flatten(available_push_api.values())), help="Specify API used to send the push notification") g1.add_option("--priority", default=0, type="int", help="Message priority (-1 <= p <= 2) [default: %default]") g1.add_option("--timestamp", default=None, type="int", \ dest="timestamp", \ help="Message timestamp in unix format [default: %default]") p.add_option_group(g1) opts, args = p.parse_args(args) if len(args) == 0: logging.error("Please provide a brief message to be sent") sys.exit(not p.print_help()) subject = opts.subject message = " ".join(args).strip() if opts.method == "email": toaddr = opts.email.split(",") # TO address should be in a list for addr in toaddr: if not is_valid_email(addr): logging.debug("Email address `{0}` is not valid!".format(addr)) sys.exit() send_email(fromaddr, toaddr, subject, message) else: pushnotify(subject, message, api=opts.api, priority=opts.priority, \ timestamp=opts.timestamp)
def notify(args)
%prog notify "Message to be sent" Send a message via email/push notification. Email notify: Recipient email address is constructed by joining the login `username` and `dnsdomainname` of the server Push notify: Uses available API
3.347692
3.254762
1.028552
import shlex from jcvi.utils.iter import flatten valid_notif_methods.extend(list(flatten(available_push_api.values()))) p = OptionParser(waitpid.__doc__) p.add_option("--notify", default="email", choices=valid_notif_methods, help="Specify type of notification to be sent after waiting") p.add_option("--interval", default=120, type="int", help="Specify PID polling interval in seconds") p.add_option("--message", help="Specify notification message [default: %default]") p.set_email() p.set_grid() opts, args = p.parse_args(args) if len(args) == 0: sys.exit(not p.print_help()) if not opts.message: from subprocess import check_output sep = ":::" cmd = None if sep in args: sepidx = args.index(sep) cmd = " ".join(args[sepidx + 1:]).strip() args = args[:sepidx] pid = int(" ".join(args).strip()) status = pid_exists(pid) if status: if opts.message: msg = opts.message else: get_origcmd = "ps -p {0} -o cmd h".format(pid) msg = check_output(shlex.split(get_origcmd)).strip() _waitpid(pid, interval=opts.interval) else: logging.debug("Process with PID {0} does not exist".format(pid)) sys.exit() if opts.notify: notifycmd = ["[{0}] `{1}`".format(gethostname(), msg)] if opts.notify != "email": notifycmd.append("--method={0}".format("push")) notifycmd.append("--api={0}".format(opts.notify)) else: notifycmd.append('--email={0}'.format(opts.email)) notify(notifycmd) if cmd is not None: bg = False if opts.grid else True sh(cmd, grid=opts.grid, background=bg)
def waitpid(args)
%prog waitpid PID ::: "./command_to_run param1 param2 ...." Given a PID, this script will wait for the PID to finish running and then perform a desired action (notify user and/or execute a new command) Specify "--notify=METHOD` to send the user a notification after waiting for PID Specify `--grid` option to send the new process to the grid after waiting for PID
3.68101
3.305544
1.113587
p = which(cmd) # if in PATH, just returns it if p: return p PATH = "Path" config = RawConfigParser() cfg = op.expanduser(cfg) changed = False if op.exists(cfg): config.read(cfg) assert name is not None, "Need a program name" try: fullpath = config.get(PATH, name) except NoSectionError: config.add_section(PATH) changed = True except: pass try: fullpath = config.get(PATH, name) except NoOptionError: msg = "=== Configure path for {0} ===\n".format(name, cfg) if url: msg += "URL: {0}\n".format(url) msg += "[Directory that contains `{0}`]: ".format(cmd) fullpath = input(msg).strip() config.set(PATH, name, fullpath) changed = True path = op.join(op.expanduser(fullpath), cmd) if warn == "exit": try: assert is_exe(path), \ "***ERROR: Cannot execute binary `{0}`. ".format(path) except AssertionError as e: sys.exit("{0!s}Please verify and rerun.".format(e)) if changed: configfile = open(cfg, "w") config.write(configfile) logging.debug("Configuration written to `{0}`.".format(cfg)) return path
def getpath(cmd, name=None, url=None, cfg="~/.jcvirc", warn="exit")
Get install locations of common binaries First, check ~/.jcvirc file to get the full path If not present, ask on the console and store
3.676061
3.586463
1.024982
for k in dir(object): try: details = getattr(object, k) except Exception as e: details = e try: details = str(details) except Exception as e: details = e print("{}: {}".format(k, details), file=sys.stderr)
def inspect(object)
A better dir() showing attributes and values
2.8129
2.630012
1.069539
import random if N < len(a): return random.sample(a, N) return [random.choice(a) for x in range(N)]
def sample_N(a, N)
When size of N is >= size of a, random.sample() will emit an error: ValueError: sample larger than population This method handles such restrictions by repeatedly sampling when that happens. Examples: >>> sample_N([1, 2, 3], 2) >>> sample_N([1, 2, 3], 3) >>> sample_N([1, 2, 3], 4)
3.467086
4.140576
0.837344
dest_prog = "to {0}".format(prog) if prog else "" self.add_option("--params", dest="extra", default=params, help="Extra parameters to pass {0}".format(dest_prog) + \ " (these WILL NOT be validated)")
def set_params(self, prog=None, params="")
Add --params options for given command line programs
9.358276
8.529865
1.097119
from multiprocessing import cpu_count max_cpus = cpu_count() if not 0 < cpus < max_cpus: cpus = max_cpus self.add_option("--cpus", default=cpus, type="int", help="Number of CPUs to use, 0=unlimited [default: %default]")
def set_cpus(self, cpus=0)
Add --cpus options to specify how many threads to use.
3.592846
3.366733
1.067161
from jcvi.utils.db import valid_dbconn, get_profile self.add_option("--db", default=dbname, dest="dbname", help="Specify name of database to query [default: %default]") self.add_option("--connector", default="Sybase", dest="dbconn", choices=valid_dbconn.keys(), help="Specify database connector [default: %default]") hostname, username, password = get_profile() if credentials: self.add_option("--hostname", default=hostname, help="Specify hostname [default: %default]") self.add_option("--username", default=username, help="Username to connect to database [default: %default]") self.add_option("--password", default=password, help="Password to connect to database [default: %default]") self.add_option("--port", type="int", help="Specify port number [default: %default]")
def set_db_opts(self, dbname="mta4", credentials=True)
Add db connection specific attributes
2.665707
2.618312
1.018102
from jcvi.graphics.base import ImageOptions, setup_theme allowed_format = ("emf", "eps", "pdf", "png", "ps", \ "raw", "rgba", "svg", "svgz") allowed_fonts = ("Helvetica", "Palatino", "Schoolbook", "Arial") allowed_styles = ("darkgrid", "whitegrid", "dark", "white", "ticks") allowed_diverge = ("BrBG", "PiYG", "PRGn", "PuOr", "RdBu", \ "RdGy", "RdYlBu", "RdYlGn", "Spectral") group = OptionGroup(self, "Image options") self.add_option_group(group) group.add_option("--figsize", default=figsize, help="Figure size `width`x`height` in inches [default: %default]") group.add_option("--dpi", default=dpi, type="int", help="Physical dot density (dots per inch) [default: %default]") group.add_option("--format", default=format, choices=allowed_format, help="Generate image of format [default: %default]") group.add_option("--font", default=font, choices=allowed_fonts, help="Font name") group.add_option("--style", default=style, choices=allowed_styles, help="Axes background") group.add_option("--diverge", default="PiYG", choices=allowed_diverge, help="Contrasting color scheme") group.add_option("--cmap", default=cmap, help="Use this color map") group.add_option("--notex", default=False, action="store_true", help="Do not use tex") if args is None: args = sys.argv[1:] opts, args = self.parse_args(args) assert opts.dpi > 0 assert "x" in opts.figsize setup_theme(style=opts.style, font=opts.font, usetex=(not opts.notex)) return opts, args, ImageOptions(opts)
def set_image_options(self, args=None, figsize="6x6", dpi=300, format="pdf", font="Helvetica", palette="deep", style="darkgrid", cmap="jet")
Add image format options for given command line programs.
2.579189
2.567416
1.004585
self.set_usage(self.set_pairs.__doc__) self.add_option("--pairsfile", default=None, help="Write valid pairs to pairsfile [default: %default]") self.add_option("--nrows", default=200000, type="int", help="Only use the first n lines [default: %default]") self.set_mates() self.add_option("--pdf", default=False, action="store_true", help="Print PDF instead ASCII histogram [default: %default]") self.add_option("--bins", default=20, type="int", help="Number of bins in the histogram [default: %default]") self.add_option("--distmode", default="ss", choices=("ss", "ee"), help="Distance mode between paired reads, ss is outer distance, " \ "ee is inner distance [default: %default]")
def set_pairs(self)
%prog pairs <blastfile|samfile|bedfile> Report how many paired ends mapped, avg distance between paired ends, etc. Paired reads must have the same prefix, use --rclip to remove trailing part, e.g. /1, /2, or .f, .r, default behavior is to truncate until last char.
4.168796
3.580042
1.164455
from jcvi.formats.fasta import gaps from jcvi.apps.cdhit import deduplicate, ids p = OptionParser(dedup.__doc__) p.set_align(pctid=GoodPct) p.set_mingap(default=10) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) scaffolds, = args mingap = opts.mingap splitfile, oagpfile, cagpfile = gaps([scaffolds, "--split", "--mingap={0}".format(mingap)]) dd = splitfile + ".cdhit" clstrfile = dd + ".clstr" idsfile = dd + ".ids" if need_update(splitfile, clstrfile): deduplicate([splitfile, "--pctid={0}".format(opts.pctid)]) if need_update(clstrfile, idsfile): ids([clstrfile]) agp = AGP(cagpfile) reps = set(x.split()[-1] for x in open(idsfile)) pf = scaffolds.rsplit(".", 1)[0] dedupagp = pf + ".dedup.agp" fw = open(dedupagp, "w") ndropped = ndroppedbases = 0 for a in agp: if not a.is_gap and a.component_id not in reps: span = a.component_span logging.debug("Drop component {0} ({1})".\ format(a.component_id, span)) ndropped += 1 ndroppedbases += span continue print(a, file=fw) fw.close() logging.debug("Dropped components: {0}, Dropped bases: {1}".\ format(ndropped, ndroppedbases)) logging.debug("Deduplicated file written to `{0}`.".format(dedupagp)) tidyagp = tidy([dedupagp, splitfile]) dedupfasta = pf + ".dedup.fasta" build([tidyagp, dd, dedupfasta]) return dedupfasta
def dedup(args)
%prog dedup scaffolds.fasta Remove redundant contigs with CD-HIT. This is run prior to assembly.sspace.embed().
3.889416
3.784457
1.027734
try: parts = id.split(".") aid = ".".join(parts[:2]) fid = int(parts[2].replace("frag", "")) except: aid, fid = None, None return aid, fid
def get_shred_id(id)
>>> get_shred_id("ca-bacs.5638.frag11.22000-23608") ("ca-bacs.5638", 11)
4.755158
3.089341
1.539214
from jcvi.apps.align import run_megablast p = OptionParser(blast.__doc__) p.add_option("-n", type="int", default=2, help="Take best N hits [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) allfasta, clonename = args fastadir = "fasta" infile = op.join(fastadir, clonename + ".fasta") if not op.exists(infile): entrez([clonename, "--skipcheck", "--outdir=" + fastadir]) outfile = "{0}.{1}.blast".format(clonename, allfasta.split(".")[0]) run_megablast(infile=infile, outfile=outfile, db=allfasta, \ pctid=GoodPct, hitlen=GoodOverlap) blasts = [BlastLine(x) for x in open(outfile)] besthits = [] for b in blasts: if b.query.count("|") >= 3: b.query = b.query.split("|")[3] if b.subject.count("|") >= 3: b.subject = b.subject.split("|")[3] b.query = b.query.rsplit(".", 1)[0] b.subject = b.subject.rsplit(".", 1)[0] if b.query == b.subject: continue if b.subject not in besthits: besthits.append(b.subject) if len(besthits) == opts.n: break for b in besthits: overlap([clonename, b, "--dir=" + fastadir])
def blast(args)
%prog blast allfasta clonename Insert a component into agpfile by aligning to the best hit in pool and see if they have good overlaps.
2.964575
2.768691
1.07075
from jcvi.apps.align import run_blat p = OptionParser(bes.__doc__) p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) bacfasta, clonename = args entrez([clonename, "--database=nucgss", "--skipcheck"]) besfasta = clonename + ".fasta" blatfile = clonename + ".bes.blat" run_blat(infile=besfasta, outfile=blatfile, db=bacfasta, \ pctid=95, hitlen=100, cpus=opts.cpus) aid, asize = next(Fasta(bacfasta).itersizes()) width = 50 msg = "=" * width msg += " " + aid print(msg, file=sys.stderr) ratio = width * 1. / asize _ = lambda x: int(round(x * ratio, 0)) blasts = [BlastLine(x) for x in open(blatfile)] for b in blasts: if b.orientation == '+': msg = " " * _(b.sstart) + "->" else: msg = " " * (_(b.sstop) - 2) + "<-" msg += " " * (width - len(msg) + 2) msg += b.query if b.orientation == '+': msg += " (hang={0})".format(b.sstart - 1) else: msg += " (hang={0})".format(asize - b.sstop) print(msg, file=sys.stderr)
def bes(args)
%prog bes bacfasta clonename Use the clone name to download BES gss sequences from Genbank, map and then visualize.
4.371561
3.936138
1.110622
p = OptionParser(flip.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args outfastafile = fastafile.rsplit(".", 1)[0] + ".flipped.fasta" fo = open(outfastafile, "w") f = Fasta(fastafile, lazy=True) for name, rec in f.iteritems_ordered(): tmpfasta = "a.fasta" fw = open(tmpfasta, "w") SeqIO.write([rec], fw, "fasta") fw.close() o = overlap([tmpfasta, name]) if o.orientation == '-': rec.seq = rec.seq.reverse_complement() SeqIO.write([rec], fo, "fasta") os.remove(tmpfasta)
def flip(args)
%prog flip fastafile Go through each FASTA record, check against Genbank file and determines whether or not to flip the sequence. This is useful before updates of the sequences to make sure the same orientation is used.
2.725869
2.625648
1.03817
p = OptionParser(batchoverlap.__doc__) p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) pairsfile, outdir = args fp = open(pairsfile) cmds = [] mkdir("overlaps") for row in fp: a, b = row.split()[:2] oa = op.join(outdir, a + ".fa") ob = op.join(outdir, b + ".fa") cmd = "python -m jcvi.assembly.goldenpath overlap {0} {1}".format(oa, ob) cmd += " -o overlaps/{0}_{1}.ov".format(a, b) cmds.append(cmd) print("\n".join(cmds))
def batchoverlap(args)
%prog batchoverlap pairs.txt outdir Check overlaps between pairs of sequences.
2.802975
2.633549
1.064334