code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
from jcvi.assembly.soap import prepare logging.debug("Work on {0} ({1})".format(pf, ','.join(p))) asm = "{0}.closed.scafSeq".format(pf) if not need_update(p, asm): logging.debug("Assembly found: {0}. Skipped.".format(asm)) return slink(p, pf, tag, extra) cwd = os.getcwd() os.chdir(pf) prepare(sorted(glob("*.fastq") + glob("*.fastq.gz")) + \ ["--assemble_1st_rank_only", "-K 31"]) sh("./run.sh") sh("cp asm31.closed.scafSeq ../{0}".format(asm)) logging.debug("Assembly finished: {0}".format(asm)) os.chdir(cwd)
def soap_trios(p, pf, tag, extra)
Take one pair of reads and 'widow' reads after correction and run SOAP.
6.864745
6.688527
1.026346
p = OptionParser(soapX.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) folder, tag = args[:2] extra = args[2:] extra = [get_abs_path(x) for x in extra] tag = tag.split(",") for p, pf in iter_project(folder, n=3): soap_trios(p, pf, tag, extra)
def soapX(args)
%prog soapX folder tag [*.fastq] Run SOAP on a folder of paired reads and apply tag before assembly. Optional *.fastq in the argument list will be symlinked in each folder and co-assembled.
4.510181
3.695247
1.220536
p = OptionParser(correctX.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, tag = args tag = tag.split(",") for p, pf in iter_project(folder): correct_pairs(p, pf, tag)
def correctX(args)
%prog correctX folder tag Run ALLPATHS correction on a folder of paired reads and apply tag.
3.86094
2.920025
1.322228
p = OptionParser(allpathsX.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, tag = args tag = tag.split(",") for p, pf in iter_project(folder): assemble_pairs(p, pf, tag)
def allpathsX(args)
%prog allpathsX folder tag Run assembly on a folder of paired reads and apply tag (PE-200, PE-500). Allow multiple tags separated by comma, e.g. PE-350,TT-1050
3.991196
3.081041
1.295405
p = OptionParser(stats.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) folder, = args statsfiles = iglob(folder, "*.stats") after_equal = lambda x: x.split("=")[-1] header = "Library Assembled_reads Contigs".split() contents = [] # label=M0096 total=7443 cnts=948 mean=7.851 std=35.96 for statsfile in statsfiles: fp = open(statsfile) for row in fp: if row.startswith("label="): break label, total, cnts = row.split()[:3] label = after_equal(label) reads = int(after_equal(total)) contigs = int(after_equal(cnts)) contents.append((label, reads, contigs)) all_labels, all_reads, all_contigs = zip(*contents) contents.append(("SUM", sum(all_reads), sum(all_contigs))) contents.append(("AVERAGE (per sample)", \ int(np.mean(all_reads)), int(np.mean(all_contigs)))) contents.append(("MEDIAN (per sample)", \ int(np.median(all_reads)), int(np.median(all_contigs)))) write_csv(header, contents, filename=opts.outfile)
def stats(args)
%prog stats folder Generate table summarizing .stats files.
3.521761
3.418794
1.030118
p = OptionParser(mcluster.__doc__) add_consensus_options(p) p.set_cpus() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) consensusfiles = args minlength = opts.minlength cpus = opts.cpus pf = opts.prefix pctid = find_pctid(consensusfiles) pf += ".P{0}".format(pctid) consensusfile = pf + ".consensus.fasta" if need_update(consensusfiles, consensusfile): fw_cons = must_open(consensusfile, "w") totalseqs = 0 for cf in consensusfiles: nseqs = 0 s = op.basename(cf).split(".")[0] for name, seq in parse_fasta(cf): name = '.'.join((s, name)) print(">{0}\n{1}".format(name, seq), file=fw_cons) nseqs += 1 logging.debug("Read `{0}`: {1} seqs".format(cf, nseqs)) totalseqs += nseqs logging.debug("Total: {0} seqs".format(totalseqs)) fw_cons.close() userfile = pf + ".u" notmatchedfile = pf + ".notmatched" if need_update(consensusfile, userfile): cluster_smallmem(consensusfile, userfile, notmatchedfile, minlength, pctid, cpus) clustfile = pf + ".clust" if need_update((consensusfile, userfile, notmatchedfile), clustfile): makeclust(consensusfile, userfile, notmatchedfile, clustfile) clustSfile = pf + ".clustS" if need_update(clustfile, clustSfile): parallel_musclewrap(clustfile, cpus, minsamp=opts.minsamp)
def mcluster(args)
%prog mcluster *.consensus Cluster across samples using consensus sequences.
3.205901
3.124623
1.026012
p = OptionParser(mconsensus.__doc__) p.add_option("--allele_counts", default="allele_counts", help="Directory to generate allele counts") add_consensus_options(p) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) consensusfiles = args prefix = opts.prefix acdir = opts.allele_counts store = ClustStores(consensusfiles) pctid = find_pctid(consensusfiles) pf = prefix + ".P{0}".format(pctid) clustSfile = pf + ".clustS" AC = makeloci(clustSfile, store, prefix, minsamp=opts.minsamp, pctid=pctid) mkdir(acdir) acfile = pf + ".allele_counts" fw = open(acfile, "w") seen = DefaultOrderedDict(list) # chr, pos => taxa print("# " + "\t".join(ACHEADER), file=fw) # Sort allele counts into separate files for ac in AC: chrpos = ac.chr, ac.pos seen[chrpos].append(ac) print(ac.tostring(taxon=True), file=fw) fw.close() logging.debug("Populate all taxa and instantiate empty vector if missing") all_taxa = set([op.basename(x).split(".")[0] for x in consensusfiles]) taxon_to_ac = defaultdict(list) for chrpos, aclist in seen.items(): included_taxa = set([x.taxon for x in aclist]) missing_taxa = all_taxa - included_taxa template = deepcopy(aclist[0]) template.clear() for ac in aclist: taxon_to_ac[ac.taxon].append(ac) for tx in missing_taxa: taxon_to_ac[tx].append(template) logging.debug("Write allele counts for all taxa") for tx, aclist in sorted(taxon_to_ac.items()): tx_acfile = op.join(acdir, tx + ".allele_counts") fw = open(tx_acfile, "w") print("# " + "\t".join(ACHEADER_NO_TAXON), file=fw) for ac in aclist: print(ac.tostring(), file=fw) fw.close() logging.debug("Written {0} sites in `{1}`".\ format(len(aclist), tx_acfile))
def mconsensus(args)
%prog mconsensus *.consensus Call consensus along the stacks from cross-sample clustering.
3.582958
3.552646
1.008532
p = OptionParser(consensus.__doc__) p.add_option("--ploidy", default=2, type="int", help="Number of haplotypes per locus") add_consensus_options(p) p.set_verbose() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) clustSfile, = args pf = clustSfile.rsplit(".", 1)[0] mindepth = opts.mindepth minlength = opts.minlength verbose = opts.verbose C = ClustFile(clustSfile) output = [] bins = [] indices = [] start = end = 0 # Index into base count array for data in C: names, seqs, nreps = zip(*data) total_nreps = sum(nreps) # Depth filter if total_nreps < mindepth: continue first_name, first_seq, first_nrep = data[0] fname = first_name.split(";")[0] + ";size={0};".format(total_nreps) cons_name, cons_seq, cons_nrep = get_seed(data) if len(data) > 1 and cons_name != CONSTAG: logging.debug("Tag {0} not found in cluster {1}".\ format(CONSTAG, cons_name)) # List for sequence data S = [(seq, nrep) for name, seq, nrep in data if nrep] # Pileups for base counting RAD = stack(S) if len(data) == 1: # No computation needed output.append((fname, seq)) bins.extend(RAD) start = end end += len(seq) indices.append((fname, start, end)) continue shortcon, shortRAD = compute_consensus(fname, cons_seq, \ RAD, S, total_nreps, mindepth=mindepth, verbose=verbose) if len(shortcon) < minlength: cons_seq = seq shortcon, shortRAD = compute_consensus(fname, first_seq,\ RAD, S, total_nreps, mindepth=mindepth, verbose=verbose) if len(shortcon) < minlength: # Stop trying continue output.append((fname, shortcon)) bins.extend(shortRAD) start = end end += len(shortcon) indices.append((fname, start, end)) consensfile = pf + ".consensus" consens = open(consensfile, 'w') for k, v in output: print("\n".join((k, v)), file=consens) consens.close() logging.debug("Consensus sequences written to `{0}`".format(consensfile)) binfile = consensfile + ".bin" bins = np.array(bins, dtype=np.uint32) ulimit = 65535 bins[bins > ulimit] = ulimit bins = np.array(bins, dtype=np.uint16) # Compact size bins.tofile(binfile) logging.debug("Allele counts written to `{0}`".format(binfile)) idxfile = consensfile + ".idx" fw = open(idxfile, "w") for fname, start, end in indices: print("\t".join(str(x) for x in (fname, start, end)), file=fw) fw.close() logging.debug("Serializing indices to `{0}`".format(idxfile)) return consensfile, binfile, idxfile
def consensus(args)
%prog consensus clustSfile Call consensus along the stacks. Tabulate bases at each site, tests for errors according to error rate, calls consensus.
3.478853
3.35002
1.038458
S, nreps = zip(*S) S = np.array([list(x) for x in S]) rows, cols = S.shape counts = [] for c in xrange(cols): freq = [0] * NBASES for b, nrep in zip(S[:, c], nreps): freq[BASES.index(b)] += nrep counts.append(freq) return counts
def stack(S)
From list of bases at a site D, make counts of bases
4.259558
3.903899
1.091103
cseq = seq.strip(GAPS) leftjust = seq.index(cseq[0]) rightjust = seq.rindex(cseq[-1]) return leftjust, rightjust
def get_left_right(seq)
Find position of the first and last base
4.899249
4.555274
1.075511
C = ClustFile(f) for data in C: names, seqs, nreps = zip(*data) total_nreps = sum(nreps) # Depth filter if total_nreps < mindepth: continue S = [] for name, seq, nrep in data: # Append sequence * number of dereps S.append([seq, nrep]) # Make list for each site in sequences res = stack(S) yield [x[:4] for x in res if sum(x[:4]) >= mindepth]
def cons(f, mindepth)
Makes a list of lists of reads at each site
8.61644
7.891144
1.091913
C = defaultdict(int) for d in N: C[tuple(d)] += 1 return [i for i in C.items() if (0, 0, 0, 0) not in i]
def makeC(N)
Makes a dictionary with counts of base counts [x,x,x,x]:x, speeds up Likelihood calculation
4.219463
4.384814
0.96229
p = OptionParser(estimateHE.__doc__) add_consensus_options(p) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) clustSfile, = args HEfile = clustSfile.rsplit(".", 1)[0] + ".HE" if not need_update(clustSfile, HEfile): logging.debug("File `{0}` found. Computation skipped.".format(HEfile)) return HEfile D = [] for d in cons(clustSfile, opts.mindepth): D.extend(d) logging.debug("Computing base frequencies ...") P = makeP(D) C = makeC(D) logging.debug("Solving log-likelihood function ...") x0 = [.01, .001] # initital values H, E = scipy.optimize.fmin(LL, x0, args=(P, C)) fw = must_open(HEfile, "w") print(H, E, file=fw) fw.close() return HEfile
def estimateHE(args)
%prog estimateHE clustSfile Estimate heterozygosity (H) and error rate (E). Idea borrowed heavily from the PyRad paper.
4.077169
3.660784
1.113742
matfile = op.join(datadir, "blosum80.mat") cmd = "poa -read_fasta - -pir stdout {0} -tolower -silent -hb -fuse_all".format(matfile) p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) s = "" for i, j in zip(names, seqs): s += "\n".join((i, j)) + "\n" return p.communicate(s)[0]
def alignfast(names, seqs)
Performs MUSCLE alignments on cluster and returns output as string
5.51488
5.646791
0.97664
seen = set() filtered_names, filtered_seqs = [], [] for name, seq in zip(names, seqs): samp = name.split(sep, 1)[0] if samp in seen: continue seen.add(samp) filtered_names.append(name) filtered_seqs.append(seq) nfiltered, nnames = len(filtered_names), len(names) assert nfiltered == len(seen) return filtered_names, filtered_seqs, seen
def filter_samples(names, seqs, sep='.')
When there are uncollapsed contigs within the same sample, only retain the first seq, or the seq that is most abundant (with cluster_size).
2.492549
2.494484
0.999224
p = OptionParser(cluster.__doc__) add_consensus_options(p) p.set_align(pctid=95) p.set_outdir() p.set_cpus() opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) prefix = args[0] fastqfiles = args[1:] cpus = opts.cpus pctid = opts.pctid mindepth = opts.mindepth minlength = opts.minlength fastafile, qualfile = fasta(fastqfiles + ["--seqtk", "--outdir={0}".format(opts.outdir), "--outfile={0}".format(prefix + ".fasta")]) prefix = op.join(opts.outdir, prefix) pf = prefix + ".P{0}".format(pctid) derepfile = prefix + ".derep" if need_update(fastafile, derepfile): derep(fastafile, derepfile, minlength, cpus) userfile = pf + ".u" notmatchedfile = pf + ".notmatched" if need_update(derepfile, userfile): cluster_smallmem(derepfile, userfile, notmatchedfile, minlength, pctid, cpus) clustfile = pf + ".clust" if need_update((derepfile, userfile, notmatchedfile), clustfile): makeclust(derepfile, userfile, notmatchedfile, clustfile, mindepth=mindepth) clustSfile = pf + ".clustS" if need_update(clustfile, clustSfile): parallel_musclewrap(clustfile, cpus) statsfile = pf + ".stats" if need_update(clustSfile, statsfile): makestats(clustSfile, statsfile, mindepth=mindepth)
def cluster(args)
%prog cluster prefix fastqfiles Use `vsearch` to remove duplicate reads. This routine is heavily influenced by PyRAD: <https://github.com/dereneaton/pyrad>.
3.203557
3.118106
1.027405
p = OptionParser(align.__doc__) p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) clustfile, = args parallel_musclewrap(clustfile, opts.cpus)
def align(args)
%prog align clustfile Align clustfile to clustSfile. Useful for benchmarking aligners.
3.97606
3.353732
1.185563
ax = np.array(x) ay = np.array(y) if title: print(dark(title), file=sys.stderr) az = ay * width / ay.max() tx = [asciiaxis(x, digit=digit) for x in ax] rjust = max([len(x) for x in tx]) + 1 for x, y, z in zip(tx, ay, az): x = x.rjust(rjust) y = y or "" z = green(char * z) print("{0} |{1} {2}".format(x, z, y), file=sys.stderr)
def asciiplot(x, y, digit=1, width=50, title=None, char="=")
Print out a horizontal plot using ASCII chars. width is the textwidth (height) of the plot.
4.347342
4.389501
0.990396
fig = plt.figure() ax = fig.add_subplot(111) xmax = 20 * (len(palette) + 1) x1s = np.arange(0, xmax, 20) xintervals = [10] * len(palette) xx = zip(x1s, xintervals) ax.broken_barh(xx, (5, 10), facecolors=palette) ax.set_ylim(0, 20) ax.set_xlim(0, xmax) ax.set_axis_off() savefig(outfile)
def print_colors(palette, outfile="Palette.png")
print color palette (a tuple) to a PNG file for quick check
2.762453
2.736808
1.00937
import random from scipy import interpolate if usepreset: if 0 < N <= 5: cmap = cm.gist_rainbow elif N <= 20: cmap = cm.Set1 else: sys.exit(discrete_rainbow.__doc__) cdict = cmap._segmentdata.copy() # N colors colors_i = np.linspace(0,1.,N) # N+1 indices indices = np.linspace(0,1.,N+1) rgbs = [] for key in ('red','green','blue'): # Find the N colors D = np.array(cdict[key]) I = interpolate.interp1d(D[:,0], D[:,1]) colors = I(colors_i) rgbs.append(colors) # Place these colors at the correct indices. A = np.zeros((N+1,3), float) A[:,0] = indices A[1:,1] = colors A[:-1,2] = colors # Create a tuple for the dictionary. L = [] for l in A: L.append(tuple(l)) cdict[key] = tuple(L) palette = zip(*rgbs) if shuffle: random.shuffle(palette) if plot: print_colors(palette) # Return (colormap object, RGB tuples) return mpl.colors.LinearSegmentedColormap('colormap',cdict,1024), palette
def discrete_rainbow(N=7, cmap=cm.Set1, usepreset=True, shuffle=False, \ plot=False)
Return a discrete colormap and the set of colors. modified from <http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations> cmap: colormap instance, eg. cm.jet. N: Number of colors. Example >>> x = resize(arange(100), (5,100)) >>> djet = cmap_discretize(cm.jet, 5) >>> imshow(x, cmap=djet) See available matplotlib colormaps at: <http://dept.astro.lsa.umich.edu/~msshin/science/code/matplotlib_cm/> If N>20 the sampled colors might not be very distinctive. If you want to error and try anyway, set usepreset=False
3.428185
3.527971
0.971716
tc = "gray" axt = ax.transAxes yy = .95 for msg in messages: ax.text(.95, yy, msg, color=tc, transform=axt, ha="right") yy -= .05
def write_messages(ax, messages)
Write text on canvas, usually on the top right corner.
4.923167
4.442501
1.108197
''' TODO: redundant with quickplot(), need to be refactored. ''' if percentage: total_length = sum(data.values()) data = dict((k, v * 100. / total_length) for (k, v) in data.items()) left, height = zip(*sorted(data.items())) pad = max(height) * .01 c1, c2 = "darkslategray", "tomato" if counts: for l, h in zip(left, height): if xmax and l > xmax: break tag = str(int(h)) rotation = 90 if percentage: tag = append_percentage(tag) if int(tag) > 0 else "" rotation = 0 color = c1 if highlight is not None and l in highlight: color = c2 ax.text(l, h + pad, tag, color=color, size=8, ha="center", va="bottom", rotation=rotation) if xmax is None: xmax = max(left) ax.bar(left, height, align="center", fc=c1) if highlight: for h in highlight: ax.bar([h], [data[h]], align="center", ec=c2, fc=c2) ax.set_xlabel(markup(xlabel)) if ylabel: ax.set_ylabel(markup(ylabel)) if title: ax.set_title(markup(title)) ax.set_xlim((xmin - .5, xmax + .5)) if percentage: ax.set_ylim(0, 100)
def quickplot_ax(ax, data, xmin, xmax, xlabel, title=None, ylabel="Counts", counts=True, percentage=True, highlight=None)
TODO: redundant with quickplot(), need to be refactored.
3.137947
2.916985
1.07575
plt.figure(1, (6, 6)) left, height = zip(*sorted(data.items())) pad = max(height) * .01 if counts: for l, h in zip(left, height): if xmax and l > xmax: break plt.text(l, h + pad, str(h), color="darkslategray", size=8, ha="center", va="bottom", rotation=90) if xmax is None: xmax = max(left) plt.bar(left, height, align="center") plt.xlabel(markup(xlabel)) plt.ylabel(markup(ylabel)) plt.title(markup(title)) plt.xlim((xmin - .5, xmax + .5)) # Basic statistics messages = [] counts_over_xmax = sum([v for k, v in data.items() if k > xmax]) if counts_over_xmax: messages += ["Counts over xmax({0}): {1}".format(xmax, counts_over_xmax)] kk = [] for k, v in data.items(): kk += [k] * v messages += ["Total: {0}".format(np.sum(height))] messages += ["Maximum: {0}".format(np.max(kk))] messages += ["Minimum: {0}".format(np.min(kk))] messages += ["Average: {0:.2f}".format(np.mean(kk))] messages += ["Median: {0}".format(np.median(kk))] ax = plt.gca() if print_stats: write_messages(ax, messages) set_human_axis(ax) set_ticklabels_helvetica(ax) savefig(figname)
def quickplot(data, xmin, xmax, xlabel, title, ylabel="Counts", figname="plot.pdf", counts=True, print_stats=True)
Simple plotting function - given a dictionary of data, produce a bar plot with the counts shown on the plot.
2.888687
2.865021
1.00826
parts = au.split() first = parts[0] middle = [x for x in parts if x[-1] == '.'] middle = "".join(middle) last = [x for x in parts[1:] if x[-1] != '.'] last = " ".join(last) initials = "{0}.{1}".format(first[0], middle) if first[-1] == '.': # Some people use full middle name middle, last = last.split(None, 1) initials = "{0}.{1}.".format(first[0], middle) return last, first, initials
def get_name_parts(au)
Fares Z. Najar => last, first, initials >>> get_name_parts("Fares Z. Najar") ('Najar', 'Fares', 'F.Z.')
3.601522
3.718673
0.968496
from jcvi.formats.base import read_block fp = open(lstfile) all_authors = [] for header, seq in read_block(fp, "["): seq = " ".join(seq) authors = [] for au in seq.split(","): au = au.strip() if not au: continue au = string.translate(au, None, string.digits) #au = au.replace("-", '') authors.append(au) all_authors.append(authors) out = [] for authors in all_authors: blocks = [] for au in authors: last, first, initials = get_name_parts(au) suffix = "" nameblock = NameTemplate.format(last=last, first=first, initials=initials, suffix=suffix) blocks.append(nameblock) bigblock = ",\n".join(blocks) out.append(bigblock) return out
def parse_names(lstfile)
This is the alternative format `lstfile`. In this format, there are two sections, starting with [Sequence] and [Manuscript], respectively, then followed by authors separated by comma.
3.367142
3.314739
1.015809
p = OptionParser(names.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) namelist, templatefile = args # First check the alternative format if open(namelist).read()[0] == '[': out = parse_names(namelist) make_template(templatefile, out) return reader = csv.reader(open(namelist), delimiter="\t") header = next(reader) ncols = len(header) assert ncols > 3 nextras = ncols - 3 blocks = [] bools = [] for row in reader: first, middle, last = row[:3] extras = row[3:] bools.append([(x.upper() == 'Y') for x in extras]) middle = middle.strip() if middle != "": middle = middle.rstrip('.') + '.' initials = "{0}.{1}".format(first[0], middle) suffix = "" nameblock = NameTemplate.format(last=last, first=first, initials=initials, suffix=suffix) blocks.append(nameblock) selected_idx = zip(*bools) out = [] * nextras for i, sbools in enumerate(selected_idx): selected = [] for b, ss in zip(blocks, sbools): if ss: selected.append(b) bigblock = ",\n".join(selected) out.append(bigblock) logging.debug("List N{0} contains a total of {1} names.".format(i, len(selected))) make_template(templatefile, out)
def names(args)
%prog names namelist templatefile Generate name blocks from the `namelist` file. The `namelist` file is tab-delimited that contains >=4 columns of data. Three columns are mandatory. First name, middle initial and last name. First row is table header. For the extra columns, the first column will go in the `$N0` field in the template file, second to the `$N1` field, etc. In the alternative mode, the namelist just contains several sections. First row will go in the `$N0` in the template file, second to the `$N1` field. The namelist may look like: [Sequence] Bruce A. Roe, Frederic Debelle, Giles Oldroyd, Rene Geurts [Manuscript] Haibao Tang1, Vivek Krishnakumar1, Shelby Bidwell1, Benjamin Rosen1 Then in this example Sequence section goes into N0, Manuscript goes into N1. Useful hints for constructing the template file can be found in: <http://www.ncbi.nlm.nih.gov/IEB/ToolBox/CPP_DOC/asn_spec/seq.asn.html> Often the template file can be retrieved from web form: <http://www.ncbi.nlm.nih.gov/WebSub/template.cgi>
3.867542
3.425513
1.12904
p = OptionParser(main.__doc__) p.add_option("-g", "--graphic", default=False, action="store_true", help="Create boilerplate for a graphic script") opts, args = p.parse_args() if len(args) != 1: sys.exit(not p.print_help()) script, = args imports = graphic_imports if opts.graphic else default_imports app = graphic_app if opts.graphic else default_app template = default_template.format(imports, app) write_file(script, template) message = "template writes to `{0}`".format(script) if opts.graphic: message = "graphic " + message message = message[0].upper() + message[1:] logging.debug(message)
def main()
%prog scriptname.py create a minimal boilerplate for a new script
3.494385
3.375291
1.035284
sd = [ambiguous_dna_values[x] for x in s] return ["".join(x) for x in list(product(*sd))]
def unpack_ambiguous(s)
List sequences with ambiguous characters in all possibilities.
5.841689
4.912655
1.18911
p = OptionParser(split.__doc__) p.set_outdir(outdir="deconv") p.add_option("--nocheckprefix", default=False, action="store_true", help="Don't check shared prefix [default: %default]") p.add_option("--paired", default=False, action="store_true", help="Paired-end data [default: %default]") p.add_option("--append", default=False, action="store_true", help="Append barcode to 2nd read [default: %default]") p.set_cpus() opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) barcodefile = args[0] fastqfile = args[1:] paired = opts.paired append = opts.append if append: assert paired, "--append only works with --paired" nfiles = len(fastqfile) barcodes = [] fp = open(barcodefile) for row in fp: id, seq = row.split() for s in unpack_ambiguous(seq): barcodes.append(BarcodeLine._make((id, s))) nbc = len(barcodes) logging.debug("Imported {0} barcodes (ambiguous codes expanded).".format(nbc)) checkprefix = not opts.nocheckprefix if checkprefix: # Sanity check of shared prefix excludebarcodes = [] for bc in barcodes: exclude = [] for s in barcodes: if bc.id == s.id: continue assert bc.seq != s.seq if s.seq.startswith(bc.seq) and len(s.seq) > len(bc.seq): logging.error("{0} shares same prefix as {1}.".format(s, bc)) exclude.append(s) excludebarcodes.append(exclude) else: excludebarcodes = nbc * [[]] outdir = opts.outdir mkdir(outdir) cpus = opts.cpus logging.debug("Create a pool of {0} workers.".format(cpus)) pool = Pool(cpus) if paired: assert nfiles == 2, "You asked for --paired, but sent in {0} files".\ format(nfiles) split_fun = append_barcode_paired if append else split_barcode_paired mode = "paired" else: split_fun = split_barcode mode = "single" logging.debug("Mode: {0}".format(mode)) pool.map(split_fun, \ zip(barcodes, excludebarcodes, nbc * [outdir], nbc * [fastqfile]))
def split(args)
%prog split barcodefile fastqfile1 .. Deconvolute fastq files into subsets of fastq reads, based on the barcodes in the barcodefile, which is a two-column file like: ID01 AGTCCAG Input fastqfiles can be several files. Output files are ID01.fastq, ID02.fastq, one file per line in barcodefile. When --paired is set, the number of input fastqfiles must be two. Output file (the deconvoluted reads) will be in interleaved format.
3.162382
3.047901
1.03756
p = OptionParser(merge.__doc__) p.set_outdir(outdir="outdir") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) folders = args outdir = opts.outdir mkdir(outdir) files = flatten(glob("{0}/*.*.fastq".format(x)) for x in folders) files = list(files) key = lambda x: op.basename(x).split(".")[0] files.sort(key=key) for id, fns in groupby(files, key=key): fns = list(fns) outfile = op.join(outdir, "{0}.fastq".format(id)) FileMerger(fns, outfile=outfile).merge(checkexists=True)
def merge(args)
%prog merge folder1 ... Consolidate split contents in the folders. The folders can be generated by the split() process and several samples may be in separate fastq files. This program merges them.
3.191673
2.864713
1.114134
_p = set() for x in p: _p |= set(range(x - tolerance, x + tolerance + 1)) return _p
def expand_alleles(p, tolerance=0)
Returns expanded allele set given the tolerance.
3.958681
3.49059
1.134101
_p1 = expand_alleles(p1, tolerance=tolerance) _p2 = expand_alleles(p2, tolerance=tolerance) possible_progenies = set(tuple(sorted(x)) for x in product(_p1, _p2)) if x_linked: # Add all hemizygotes possible_progenies |= set((x, x) for x in (set(_p1) | set(_p2))) return possible_progenies
def get_progenies(p1, p2, x_linked=False, tolerance=0)
Returns possible progenies in a trio.
3.126399
2.886915
1.082955
p = OptionParser(mendelian_errors2.__doc__) opts, args, iopts = p.set_image_options(args, figsize="7x7", format="png") if len(args) != 1: sys.exit(not p.print_help()) csvfile, = args fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) ymin = -.2 df = pd.read_csv(csvfile) data = [] for i, d in df.iterrows(): tred = d['Name'] motif = d['Motif'] if tred in ignore: logging.debug("Ignore {}".format(d['TRED'])) continue if len(motif) > 6: if "/" in motif: # CTG/CAG motif = motif.split("/")[0] else: motif = motif[:6] + ".." xtred = "{} {}".format(tred, motif) accuracy = d[-1] data.append((xtred, accuracy)) key = lambda x: float(x.rstrip('%')) data.sort(key=lambda x: key(x[-1])) print(data) treds, accuracies = zip(*data) ntreds = len(treds) ticks = range(ntreds) accuracies = [key(x) for x in accuracies] for tick, accuracy in zip(ticks, accuracies): ax.plot([tick, tick], [ymin, accuracy], "-", lw=2, color='lightslategray') trios, = ax.plot(accuracies, "o", mfc='w', mec='b') ax.set_title("Mendelian errors based on STR calls in trios in HLI samples") ntrios = "Mendelian errors in 802 trios" ax.legend([trios], [ntrios], loc='best') ax.set_xticks(ticks) ax.set_xticklabels(treds, rotation=45, ha="right", size=8) ax.set_yticklabels([int(x) for x in ax.get_yticks()], family='Helvetica') ax.set_ylabel("Mendelian errors (\%)") ax.set_ylim(ymin, 100) normalize_axes(root) image_name = "mendelian_errors2." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def mendelian_errors2(args)
%prog mendelian_errors2 Trios.summary.csv Plot Mendelian errors as calculated by mendelian(). File `Trios.summary.csv` looks like: Name,Motif,Inheritance,N_Correct,N_Error,N_missing,ErrorRate [N_Error / (N_Correct + N_Error))] DM1,CTG,AD,790,12,0,1.5% DM2,CCTG,AD,757,45,0,5.6% DRPLA,CAG,AD,791,11,0,1.4%
3.437916
3.297673
1.042528
call_to_ints = lambda x: tuple(int(_) for _ in x.split("|") if _ != ".") tp1_sex, tp1_call = tp1[:2] tp2_sex, tp2_call = tp2[:2] tpp_sex, tpp_call = tpp[:2] # tp1_evidence = sum(int(x) for x in tp1[2:]) # tp2_evidence = sum(int(x) for x in tp2[2:]) # tpp_evidence = sum(int(x) for x in tpp[2:]) tp1_call = call_to_ints(tp1_call) tp2_call = call_to_ints(tp2_call) tpp_call = call_to_ints(tpp_call) possible_progenies = set(tuple(sorted(x)) \ for x in product(tp1_call, tp2_call)) if is_xlinked and tpp_sex == "Male": possible_progenies = set(tuple((x,)) for x in tp1_call) if -1 in tp1_call or -1 in tp2_call or -1 in tpp_call: tag = "Missing" # elif tp1_evidence < 2 or tp2_evidence < 2 or tpp_evidence < 2: # tag = "Missing" else: tag = "Correct" if tpp_call in possible_progenies else "Error" return tag
def mendelian_check(tp1, tp2, tpp, is_xlinked=False)
Compare TRED calls for Parent1, Parent2 and Proband.
2.425333
2.359935
1.027712
return (rname == target_chr) and \ (target_start <= rstart <= target_end)
def in_region(rname, rstart, target_chr, target_start, target_end)
Quick check if a point is within the target region.
2.752519
2.604005
1.057033
import pysam from jcvi.utils.iter import pairwise from jcvi.utils.grouper import Grouper p = OptionParser(alts.__doc__) p.set_outfile(outfile="TREDs.alts.csv") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) treds = args repo = TREDsRepo() if "all" in treds: treds = repo.names pad_left, pad_right = 1000, 10000 READLEN = 150 fw = must_open(opts.outfile, "w") print("TRED,alts,alts.hg19", file=fw) # Header for tred in treds: ref_regions = [] # Simulate a depth 1000 BAM with 300 repeats for ref in ("hg38", "hg19"): # This is the region that involves the TRED locus repo = TREDsRepo(ref=ref) t = repo[tred] chr, start, end = t.chr, t.repeat_start, t.repeat_end start -= pad_left end += pad_right tred_ref = "{}_{}".format(tred, ref) if not op.isdir(tred_ref): simulate([tred_ref, "300", "300", "--depth=1000", "--ref={}".format(ref), "--tred={}".format(tred)]) bamfile = op.join(tred_ref, "300.bam") # Parse the BAM file, retrieve all regions bamfile = pysam.AlignmentFile(bamfile, "rb") nreads = altreads = 0 alt_points = set() for read in bamfile.fetch(): fname, fstart = bamfile.getrname(read.reference_id), read.reference_start rname, rstart = bamfile.getrname(read.next_reference_id), read.next_reference_start f_in_region = in_region(fname, fstart, chr, start, end) r_in_region = in_region(rname, rstart, chr, start, end) if (not f_in_region) and r_in_region: alt_points.add((fname, fstart)) altreads += 1 if (not r_in_region) and f_in_region: alt_points.add((rname, rstart)) altreads += 1 nreads += 1 logging.debug("A total of {} reads ({} alts) processed".\ format(nreads, altreads)) alt_points = natsorted(alt_points) # Chain these points together into regions g = Grouper() for a in alt_points: g.join(a) for a, b in pairwise(alt_points): achr, apos = a bchr, bpos = b if achr != bchr: continue if (bpos - apos) > READLEN: continue g.join(a, b) # All regions that contain ALT alt_sum = 0 regions = [] for c in g: chr_min, pos_min = min(c) chr_max, pos_max = max(c) assert chr_min, chr_max pos_min -= READLEN pos_max += READLEN regions.append((chr_min, pos_min, pos_max)) alt_sum += pos_max - pos_min regions = "|".join(["{}:{}-{}".format(c, start, end) \ for c, start, end in natsorted(regions)]) ref_regions.append(regions) line = ",".join([tred] + ref_regions) print(line, file=sys.stderr) print(line, file=fw) logging.debug("Alternative region sum: {} bp".format(alt_sum)) fw.close()
def alts(args)
%prog alts HD Build alternative loci based on simulation data.
3.054109
3.034291
1.006531
import seaborn as sns p = OptionParser(depth.__doc__) opts, args, iopts = p.set_image_options(args, figsize="14x14") if len(args) != 1: sys.exit(not p.print_help()) tsvfile, = args fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(ncols=2, nrows=2, figsize=(iopts.w, iopts.h)) plt.tight_layout(pad=6) data = pd.read_csv(tsvfile, sep="\t", low_memory=False) ids, treds = read_treds() for (dp, ax, title) in zip(("FDP", "PDP", "RDP", "PEDP"), (ax1, ax2, ax3, ax4), ("Spanning reads", "Partial reads", "Repeat-only reads", "Paired-end reads")): logging.debug("Build {}".format(title)) # Construct related data structure xd = [] # (tred, dp) mdp = [] # (tred, median_dp) for tred, motif in zip(treds["abbreviation"], treds["motif"]): if tred in ignore: logging.debug("Ignore {}".format(tred)) continue if len(motif) > 4: if "/" in motif: # CTG/CAG motif = motif.split("/")[0] else: motif = motif[:4] + ".." xtred = "{} {}".format(tred, motif) md = [x for x in data[tred + '.' + dp] if x >= 0] subsample = 10000 if dp == "RDP" else 1000 md = sample(md, subsample) pmd = [x for x in md if x > 0] median = np.median(pmd) if pmd else 0 mdp.append((xtred, median)) for d in md: xd.append((xtred, d)) # Determine order mdp.sort(key=lambda x: x[1]) order, mdp = zip(*mdp) # OK, now plot xt, xd = zip(*xd) sns.boxplot(xt, xd, ax=ax, order=order, fliersize=2) xticklabels = ax.get_xticklabels() ax.set_xticklabels(xticklabels, rotation=45, ha="right") ax.set_title("Number of {} per locus".format(title), size=18) ylim = 30 if dp == "RDP" else 100 ax.set_ylim(0, ylim) yticklabels = [int(x) for x in ax.get_yticks()] ax.set_yticklabels(yticklabels, family='Helvetica', size=14) root = fig.add_axes([0, 0, 1, 1]) pad = .04 panel_labels(root, ((pad, 1 - pad, "A"), (1 / 2. + pad / 2, 1 - pad, "B"), (pad, .5 - pad / 2, "C"), (1 / 2. + pad / 2, .5 - pad / 2, "D"))) normalize_axes(root) image_name = "depth." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def depth(args)
%prog depth DP.tsv Plot read depths across all TREDs.
3.201663
3.04475
1.051536
p = OptionParser(mendelian_errors.__doc__) opts, args, iopts = p.set_image_options(args, figsize="6x6") if len(args) != 1: sys.exit(not p.print_help()) csvfile, = args fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) ymin = -.2 df = pd.read_csv(csvfile) data = [] for i, d in df.iterrows(): if d['TRED'].split()[0] in ignore: logging.debug("Ignore {}".format(d['TRED'])) continue data.append(d) treds, duos, trios = zip(*data) ntreds = len(treds) ticks = range(ntreds) treds = [x.split()[0] for x in treds] duos = [float(x.rstrip('%')) for x in duos] trios = [float(x.rstrip('%')) for x in trios] for tick, duo, trio in zip(ticks, duos, trios): m = max(duo, trio) ax.plot([tick, tick], [ymin, m], "-", lw=2, color='lightslategray') duos, = ax.plot(duos, "o", mfc='w', mec='g') trios, = ax.plot(trios, "o", mfc='w', mec='b') ax.set_title("Mendelian errors based on trios and duos in HLI samples") nduos = "Mendelian errors in 362 duos" ntrios = "Mendelian errors in 339 trios" ax.legend([trios, duos], [ntrios, nduos], loc='best') ax.set_xticks(ticks) ax.set_xticklabels(treds, rotation=45, ha="right", size=8) yticklabels = [int(x) for x in ax.get_yticks()] ax.set_yticklabels(yticklabels, family='Helvetica') ax.set_ylabel("Mendelian errors (\%)") ax.set_ylim(ymin, 20) normalize_axes(root) image_name = "mendelian_errors." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def mendelian_errors(args)
%prog mendelian_errors STR-Mendelian-errors.csv Plot Mendelian errors as calculated by mendelian(). File `STR-Mendelian-errors.csv` looks like: ,Duos - Mendelian errors,Trios - Mendelian errors SCA36,1.40%,0.60% ULD,0.30%,1.50% BPES,0.00%,1.80% One TRED disease per line, followed by duo errors and trio errors.
2.710426
2.58465
1.048663
self_key = ["ChildSelf"] keys = family.keys() spouse_key = [x for x in keys if ("spouse" in x.lower())] assert len(spouse_key) <= 1 parent_keys = [x for x in keys if \ ("parent" in x.lower()) and ("grand" not in x.lower())] sib_keys = [x for x in keys if ("sibling" in x.lower()) \ or ("twin" in x.lower())] + self_key child_keys = [x for x in keys if \ ("child" in x.lower()) and ("grand" not in x.lower()) \ and ("self" not in x.lower())] for sk in sib_keys: yield TrioOrDuo(parent_keys, [sk], family) for ck in child_keys: yield TrioOrDuo(self_key + spouse_key, [ck], family)
def extract_trios(family)
Identify all trios/duos inside a family, where a family contains dictionary of relationship: individual, for example: { "ChildSelf": "176531498", "DzTwin": "176531497", "Parent": "176449143" }
3.457097
3.008599
1.149072
df = pd.read_csv(tsvfile, sep="\t", index_col=0, dtype={"SampleKey": str}) return df
def read_tred_tsv(tsvfile)
Read the TRED table into a dataframe.
3.893492
3.781091
1.029727
p = OptionParser(mendelian.__doc__) p.add_option("--tolerance", default=0, type="int", help="Tolernace for differences") p.set_verbose() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) triosjson, tredtsv = args verbose = opts.verbose tolerance = opts.tolerance js = json.load(open(triosjson)) allterms = set() duos = set() trios = set() for v in js: allterms |= set(v.keys()) for trio_or_duo in extract_trios(v): assert len(trio_or_duo) in (2, 3) if len(trio_or_duo) == 2: duos.add(trio_or_duo) else: trios.add(trio_or_duo) # print "\n".join(allterms) print("A total of {} families imported".format(len(js))) # Read in all data df = read_tred_tsv(tredtsv) ids, treds = read_treds() table = {} for tred, inheritance in zip(treds["abbreviation"], treds["inheritance"]): x_linked = inheritance[0] == 'X' # X-linked name = tred if x_linked: name += " (X-linked)" print("[TRED] {}".format(name)) n_total = len(duos) n_error = 0 for duo in duos: n_error += duo.check_mendelian(df, tred, tolerance=tolerance, x_linked=x_linked, verbose=verbose) tag = "Duos - Mendelian errors" print("{}: {}".format(tag, percentage(n_error, n_total))) duo_error = percentage(n_error, n_total, mode=2) table[(name, tag)] = "{0:.1f}%".format(duo_error) n_total = len(trios) n_error = 0 for trio in trios: n_error += trio.check_mendelian(df, tred, tolerance=tolerance, x_linked=x_linked, verbose=verbose) tag = "Trios - Mendelian errors" print("{}: {}".format(tag, percentage(n_error, n_total))) trio_error = percentage(n_error, n_total, mode=2) table[(name, tag)] = "{0:.1f}%".format(trio_error) # Summarize print(tabulate(table))
def mendelian(args)
%prog mendelian trios_candidate.json hli.20170424.tred.tsv Calculate Mendelian errors based on trios and duos.
2.93967
2.717172
1.081886
p = OptionParser(mini.__doc__) p.add_option("--pad", default=20000, type="int", help="Add padding to the STR reigons") p.add_option("--treds", default=None, help="Extract specific treds, use comma to separate") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) bamfile, minibam = args treds = opts.treds.split(",") if opts.treds else None pad = opts.pad bedfile = make_STR_bed(pad=pad, treds=treds) get_minibam_bed(bamfile, bedfile, minibam) logging.debug("Mini-BAM written to `{}`".format(minibam))
def mini(args)
%prog mini bamfile minibamfile Prepare mini-BAMs that contain only the STR loci.
3.494931
2.879089
1.213902
p = OptionParser(likelihood.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x5", style="white", cmap="coolwarm") if len(args) != 0: sys.exit(not p.print_help()) fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(iopts.w, iopts.h)) plt.tight_layout(pad=4) # Haploid model LL, CI_h1, CI_h2, MLE = parse_log("100_100.log") data = [] for k, v in LL.items(): data.append((k[0], v)) data.sort() x, y = zip(*data) x = np.array(x) curve, = ax1.plot(x, y, "-", color=lsg, lw=2) ax1.set_title("Simulated haploid ($h^{truth}=100$)") h_hat, max_LL = max(data, key=lambda x: x[-1]) _, min_LL = min(data, key=lambda x: x[-1]) ymin, ymax = ax1.get_ylim() ax1.set_ylim([ymin, ymax + 30]) LL_label = "log(Likelihood)" ax1.plot([h_hat, h_hat], [ymin, max_LL], ":", color=lsg, lw=2) ax1.text(h_hat, max_LL + 10, r"$\hat{h}=93$", color=lsg) ax1.set_xlabel(r"$h$") ax1.set_ylabel(LL_label) a, b = CI_h1 ci = ax1.fill_between(x, [ymin] * len(x), y, where=(x >= a) & (x <= b), color=lsg, alpha=.5) ax1.legend([curve, ci], ["Likelihood curve", r'95$\%$ CI'], loc='best') # Diploid model LL, CI_h1, CI_h2, MLE = parse_log("100_20.log") h_hat, max_LL = max(data, key=lambda x: x[-1]) _, min_LL = min(data, key=lambda x: x[-1]) data = np.ones((301, 301)) * min_LL for k, v in LL.items(): a, b = k data[a, b] = v data[b, a] = v data = mask_upper_triangle(data) ax_imshow(ax2, data, opts.cmap, LL_label, 20, 104) root = fig.add_axes([0, 0, 1, 1]) pad = .04 panel_labels(root, ((pad / 2, 1 - pad, "A"), (1 / 2., 1 - pad, "B"))) normalize_axes(root) image_name = "likelihood." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def likelihood(args)
%prog likelihood Plot likelihood surface. Look for two files in the current folder: - 100_100.log, haploid model - 100_20.log, diploid model
3.072207
2.815662
1.091114
from matplotlib import gridspec p = OptionParser(likelihood2.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x5", style="white", cmap="coolwarm") if len(args) != 1: sys.exit(not p.print_help()) jsonfile, = args fig = plt.figure(figsize=(iopts.w, iopts.h)) gs = gridspec.GridSpec(2, 2) ax1 = fig.add_subplot(gs[:, 0]) ax2 = fig.add_subplot(gs[0, 1]) ax3 = fig.add_subplot(gs[1, 1]) plt.tight_layout(pad=3) pf = plot_panel(jsonfile, ax1, ax2, ax3, opts.cmap) root = fig.add_axes([0, 0, 1, 1]) normalize_axes(root) image_name = "likelihood2.{}.".format(pf) + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def likelihood2(args)
%prog likelihood2 100_20.json Plot the likelihood surface and marginal distributions.
2.750187
2.524802
1.089268
from matplotlib import gridspec p = OptionParser(likelihood3.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x10", style="white", cmap="coolwarm") if len(args) != 2: sys.exit(not p.print_help()) jsonfile1, jsonfile2 = args fig = plt.figure(figsize=(iopts.w, iopts.h)) gs = gridspec.GridSpec(9, 2) ax1 = fig.add_subplot(gs[:4, 0]) ax2 = fig.add_subplot(gs[:2, 1]) ax3 = fig.add_subplot(gs[2:4, 1]) ax4 = fig.add_subplot(gs[5:, 0]) ax5 = fig.add_subplot(gs[5:7, 1]) ax6 = fig.add_subplot(gs[7:, 1]) plt.tight_layout(pad=2) plot_panel(jsonfile1, ax1, ax2, ax3, opts.cmap) plot_panel(jsonfile2, ax4, ax5, ax6, opts.cmap) root = fig.add_axes([0, 0, 1, 1]) pad = .02 panel_labels(root, ((pad, 1 - pad, "A"), (pad, 4. / 9, "B"))) normalize_axes(root) image_name = "likelihood3." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def likelihood3(args)
%prog likelihood3 140_20.json 140_70.json Plot the likelihood surface and marginal distributions for two settings.
2.457937
2.289191
1.073714
p = OptionParser(diagram.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x4", format="png") if len(args) != 0: sys.exit(not p.print_help()) fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) # Gauge on top, this is log-scale yy = .7 yinterval = .1 height = .05 yp = yy - yinterval - height canvas = .95 xstart = .025 convert = lambda x: xstart + x * canvas / 600 # Symbols root.text(.5, .9, r"$L$: Read length, $F$: Flank size, $V$: Pair distance", ha="center") root.text(.5, .85, r"ex. $L=150bp, F=9bp, V=500bp$", ha="center") root.text(xstart + canvas, yy - height, "STR repeat length", ha="center", color=lsg, size=10) # Mark the key events pad = .02 arrowlen = canvas * 1.05 arrowprops = dict(length_includes_head=True, width=.01, fc=lsg, lw=0, head_length=arrowlen * .12, head_width=.04) p = FancyArrow(xstart, yy, arrowlen, 0, shape="right", **arrowprops) root.add_patch(p) ppad = 30 keyevents = (( 0, 0, -1, r"$0$"), (150 - 18, 150 - 18 - ppad, 0, r"$L - 2F$"), (150 - 9, 150 - 9, 1, r"$L - F$"), (150, 150 + ppad, 2, r"$L$"), (500 - 9, 500 - 9, 3, r"$V - F$"), ) for event, pos, i, label in keyevents: _event = convert(event) _pos = convert(pos) root.plot((_event, _event), (yy - height / 4, yy + height / 4), '-', color='k') root.text(_pos, yy + pad, label, rotation=45, va="bottom", size=8) if i < 0: continue ystart = yp - i * yinterval root.plot((_event, _event), (ystart, yy - height / 4), ':', color=lsg) # Range on bottom. These are simple 4 rectangles, with the range indicating # the predictive range. CLOSED, OPEN = range(2) ranges = ((0, 150 - 18, CLOSED, "Spanning reads"), (9, 150 - 9, OPEN, "Partial reads"), (150, 500 - 9, CLOSED, "Repeat reads"), (0, 500 - 9, CLOSED, "Paired-end reads"), ) for start, end, starttag, label in ranges: _start = convert(start) _end = convert(end) data = [[0., 1.], [0., 1.]] if starttag == OPEN else \ [[1., 0.], [1., 0.]] root.imshow(data, interpolation='bicubic', cmap=plt.cm.Greens, extent=[_start, _end, yp, yp + height]) root.text(_end + pad, yp + height / 2, label, va="center") yp -= yinterval normalize_axes(root) image_name = "diagram." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def diagram(args)
%prog diagram Plot the predictive power of various evidences.
3.933568
3.89674
1.009451
p = OptionParser(allelefreqall.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) reportfile, = args treds, df = read_treds(reportfile) # Prepare 5 pages, each page with 6 distributions treds = sorted(treds) count = 6 pdfs = [] for page in xrange(len(treds) / count + 1): start = page * count page_treds = treds[start: start + count] if not page_treds: break allelefreq([",".join(page_treds), "--usereport", reportfile, "--nopanels", "--figsize", "12x16"]) outpdf = "allelefreq.{}.pdf".format(page) sh("mv allelefreq.pdf {}".format(outpdf)) pdfs.append(outpdf) from jcvi.formats.pdf import cat pf = op.basename(reportfile).split(".")[0] finalpdf = pf + ".allelefreq.pdf" logging.debug("Merging pdfs into `{}`".format(finalpdf)) cat(pdfs + ["-o", finalpdf, "--cleanup"])
def allelefreqall(args)
%prog allelefreqall HN_Platinum_Gold.20180525.tsv.report.txt Plot all 30 STR allele frequencies.
4.114099
4.084572
1.007229
p = OptionParser(allelefreq.__doc__) p.add_option("--nopanels", default=False, action="store_true", help="No panel labels A, B, ...") p.add_option("--usereport", help="Use allele frequency in report file") opts, args, iopts = p.set_image_options(args, figsize="9x13") if len(args) != 1: sys.exit(not p.print_help()) loci, = args fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(ncols=2, nrows=3, figsize=(iopts.w, iopts.h)) plt.tight_layout(pad=4) if opts.usereport: treds, df = read_treds(tredsfile=opts.usereport) else: treds, df = read_treds() df = df.set_index(["abbreviation"]) axes = (ax1, ax2, ax3, ax4, ax5, ax6) loci = loci.split(",") for ax, locus in zip(axes, loci): plot_allelefreq(ax, df, locus) # Delete unused axes for ax in axes[len(loci):]: ax.set_axis_off() root = fig.add_axes([0, 0, 1, 1]) pad = .03 if not opts.nopanels: panel_labels(root, ((pad / 2, 1 - pad, "A"), (.5 + pad, 1 - pad, "B"), (pad / 2, 2 / 3. - pad / 2, "C"), (.5 + pad, 2 / 3. - pad / 2, "D"), (pad / 2, 1 / 3. , "E"), (.5 + pad, 1 / 3. , "F"), )) normalize_axes(root) image_name = "allelefreq." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def allelefreq(args)
%prog allelefreq HD,DM1,SCA1,SCA17,FXTAS,FRAXE Plot the allele frequencies of some STRs.
2.65593
2.625992
1.011401
p = OptionParser(simulate.__doc__) p.add_option("--method", choices=("wgsim", "eagle"), default="eagle", help="Read simulator") p.add_option("--ref", default="hg38", choices=("hg38", "hg19"), help="Reference genome version") p.add_option("--tred", default="HD", help="TRED locus") add_simulate_options(p) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) rundir, startunits, endunits = args ref = opts.ref ref_fasta = "/mnt/ref/{}.upper.fa".format(ref) startunits, endunits = int(startunits), int(endunits) basecwd = os.getcwd() mkdir(rundir) os.chdir(rundir) cwd = os.getcwd() # TRED region (e.g. Huntington) pad_left, pad_right = 1000, 10000 repo = TREDsRepo(ref=ref) tred = repo[opts.tred] chr, start, end = tred.chr, tred.repeat_start, tred.repeat_end logging.debug("Simulating {}".format(tred)) fasta = Fasta(ref_fasta) seq_left = fasta[chr][start - pad_left:start - 1] seq_right = fasta[chr][end: end + pad_right] motif = tred.repeat simulate_method = wgsim if opts.method == "wgsim" else eagle # Write fake sequence for units in range(startunits, endunits + 1): pf = str(units) mkdir(pf) os.chdir(pf) seq = str(seq_left) + motif * units + str(seq_right) fastafile = pf + ".fasta" make_fasta(seq, fastafile, id=chr.upper()) # Simulate reads on it simulate_method([fastafile, "--depth={}".format(opts.depth), "--readlen={}".format(opts.readlen), "--distance={}".format(opts.distance), "--outfile={}".format(pf)]) read1 = pf + ".bwa.read1.fastq" read2 = pf + ".bwa.read2.fastq" samfile, _ = align([ref_fasta, read1, read2]) indexed_samfile = index([samfile]) sh("mv {} ../{}.bam".format(indexed_samfile, pf)) sh("mv {}.bai ../{}.bam.bai".format(indexed_samfile, pf)) os.chdir(cwd) shutil.rmtree(pf) os.chdir(basecwd)
def simulate(args)
%prog simulate run_dir 1 300 Simulate BAMs with varying inserts with dwgsim. The above command will simulate between 1 to 300 CAGs in the HD region, in a directory called `run_dir`.
3.401989
3.316764
1.025695
p = OptionParser(mergebam.__doc__) p.set_cpus() opts, args = p.parse_args(args) if len(args) not in (2, 3): sys.exit(not p.print_help()) if len(args) == 2: idir1, outdir = args dir1 = [idir1] if idir1.endswith(".bam") else iglob(idir1, "*.bam") logging.debug("Homozygous mode") dir2 = [""] * len(dir1) elif len(args) == 3: idir1, idir2, outdir = args dir1 = [idir1] if idir1.endswith(".bam") else iglob(idir1, "*.bam") dir2 = [idir2] if idir2.endswith(".bam") else iglob(idir2, "*.bam") assert len(dir2) == 1, "Second pile must contain a single bam" dir2 = [idir2] * len(dir1) assert len(dir1) == len(dir2), "Two piles must contain same number of bams" cmd = "samtools merge {} {} {} && samtools index {}" cmds = [] mkdir(outdir) for a, b in zip(dir1, dir2): ia = op.basename(a).split(".")[0] ib = op.basename(b).split(".")[0] if b else ia outfile = op.join(outdir, "{}_{}.bam".format(ia, ib)) cmds.append(cmd.format(outfile, a, b, outfile)) p = Parallel(cmds, cpus=opts.cpus) p.run()
def mergebam(args)
%prog mergebam dir1 homo_outdir or %prog mergebam dir1 dir2/20.bam het_outdir Merge sets of BAMs to make diploid. Two modes: - Homozygous mode: pair-up the bams in the two folders and merge - Heterozygous mode: pair the bams in first folder with a particular bam
2.362583
2.285603
1.033681
p = OptionParser(batchlobstr.__doc__) p.add_option("--haploid", default="chrY,chrM", help="Use haploid model for these chromosomes") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bamlist, = args cmd = "python -m jcvi.variation.str lobstr TREDs" cmd += " --input_bam_path {}" cmd += " --haploid {}".format(opts.haploid) cmd += " --simulation" cmds = [cmd.format(x.strip()) for x in open(bamlist).readlines()] p = Parallel(cmds, cpus=opts.cpus) p.run()
def batchlobstr(args)
%prog batchlobstr bamlist Run lobSTR on a list of BAMs. The corresponding batch command for TREDPARSE: $ tred.py bamlist --haploid chr4 --workdir tredparse_results
3.976732
3.606621
1.10262
from jcvi.variation.str import LobSTRvcf p = OptionParser(compilevcf.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) folder, = args vcf_files = iglob(folder, "*.vcf,*.vcf.gz") for vcf_file in vcf_files: try: p = LobSTRvcf(columnidsfile=None) p.parse(vcf_file, filtered=False) res = p.items() if res: k, v = res[0] res = v.replace(',', '/') else: res = "-1/-1" num = op.basename(vcf_file).split(".")[0] print(num, res) except (TypeError, AttributeError) as e: p = TREDPARSEvcf(vcf_file) continue
def compilevcf(args)
%prog compilevcf dir Compile vcf outputs into lists.
4.953265
4.859082
1.019383
p = OptionParser(evidences.__doc__) p.add_option("--csv", default="hli.20170328.tred.tsv", help="TRED csv output to plot") opts, args, iopts = p.set_image_options(args, format="pdf") if len(args) != 0: sys.exit(not p.print_help()) format = iopts.format # Extract sample coverage first df = pd.read_csv("qc-export-MeanCoverage.csv", header=None, names=["Samplekey", "MeanCoverage"], index_col=0) # Find coverage for HD xf = pd.read_csv(opts.csv, sep="\t", index_col=0) dp = {} tred = "HD" for sk, row in xf.iterrows(): sk = str(sk) a1 = row[tred + ".1"] a2 = row[tred + ".2"] fdp = row[tred + ".FDP"] pdp = row[tred + ".PDP"] pedp = row[tred + ".PEDP"] dp[sk] = (a1, a2, fdp, pdp, pedp) # Build a consolidated dataframe ef = pd.DataFrame.from_dict(dp, orient="index") ef.columns = [tred + ".1", tred + ".2", tred + ".FDP", tred + ".PDP", tred + ".PEDP"] ef.index.name = "SampleKey" mf = df.merge(ef, how="right", left_index=True, right_index=True) # Plot a bunch of figures outdir = "output" mkdir(outdir) xlim = ylim = (0, 100) draw_jointplot(outdir + "/A", "MeanCoverage", "HD.FDP", data=mf, xlim=xlim, ylim=ylim, format=format) draw_jointplot(outdir + "/B", "MeanCoverage", "HD.PDP", data=mf, color='g', xlim=xlim, ylim=ylim, format=format) draw_jointplot(outdir + "/C", "MeanCoverage", "HD.PEDP", data=mf, color='m', xlim=xlim, ylim=ylim, format=format) xlim = (0, 50) draw_jointplot(outdir + "/D", "HD.2", "HD.FDP", data=mf, xlim=xlim, ylim=ylim, format=format) draw_jointplot(outdir + "/E", "HD.2", "HD.PDP", data=mf, color='g', xlim=xlim, ylim=ylim, format=format) draw_jointplot(outdir + "/F", "HD.2", "HD.PEDP", data=mf, color='m', xlim=xlim, ylim=ylim, format=format)
def evidences(args)
%prog evidences Plot distribution of evidences against two factors: - Sample mean coverage - Longer allele
2.590734
2.572255
1.007184
import seaborn as sns sns.set_context('talk') plt.clf() register = {"MeanCoverage": "Sample Mean Coverage", "HD.FDP": "Depth of full spanning reads", "HD.PDP": "Depth of partial spanning reads", "HD.PEDP": "Depth of paired-end reads", "HD.2": "Repeat size of the longer allele"} g = sns.jointplot(x, y, data=data, kind=kind, color=color, xlim=xlim, ylim=ylim) g.ax_joint.set_xlabel(register.get(x, x)) g.ax_joint.set_ylabel(register.get(y, y)) savefig(figname + "." + format, cleanup=False)
def draw_jointplot(figname, x, y, data=None, kind="reg", color=None, xlim=None, ylim=None, format="pdf")
Wraps around sns.jointplot
4.202741
4.260574
0.986426
a, b = s.split("|") ai, aj = a.split("-") bi, bj = b.split("-") los = [int(ai), int(bi)] his = [int(aj), int(bj)] if exclude and exclude in los: los.remove(exclude) if exclude and exclude in his: his.remove(exclude) return max(los), max(his)
def get_lo_hi_from_CI(s, exclude=None)
Parse the confidence interval from CI. >>> get_lo_hi_from_CI("20-20/40-60") (40, 60)
2.461694
2.867074
0.858608
p = OptionParser(compare.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x10") if len(args) != 1: sys.exit(not p.print_help()) datafile, = args pf = datafile.rsplit(".", 1)[0] fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(ncols=2, nrows=2, figsize=(iopts.w, iopts.h)) plt.tight_layout(pad=3) bbox = {'facecolor': 'tomato', 'alpha': .2, 'ec': 'w'} pad = 2 # Read benchmark data df = pd.read_csv("Evaluation.csv") truth = df["Truth"] axes = (ax1, ax2, ax3, ax4) progs = ("Manta", "Isaac", "GATK", "lobSTR") markers = ("bx-", "yo-", "md-", "c+-") for ax, prog, marker in zip(axes, progs, markers): ax.plot(truth, df[prog], marker) ax.plot(truth, truth, 'k--') # to show diagonal ax.axhline(infected_thr, color='tomato') ax.text(max(truth) - pad, infected_thr + pad, 'Risk threshold', bbox=bbox, ha="right") ax.axhline(ref_thr, color='tomato') ax.text(max(truth) - pad, ref_thr - pad, 'Reference repeat count', bbox=bbox, ha="right", va="top") ax.set_title(SIMULATED_HAPLOID) ax.set_xlabel(r'Num of CAG repeats inserted ($\mathit{h}$)') ax.set_ylabel('Num of CAG repeats called') ax.legend([prog, 'Truth'], loc='best') root = fig.add_axes([0, 0, 1, 1]) pad = .03 panel_labels(root, ((pad / 2, 1 - pad, "A"), (1 / 2., 1 - pad, "B"), (pad / 2, 1 / 2., "C"), (1 / 2., 1 / 2. , "D"))) normalize_axes(root) image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def compare(args)
%prog compare Evaluation.csv Compare performances of various variant callers on simulated STR datasets.
3.800547
3.429311
1.108254
p = OptionParser(compare2.__doc__) p.add_option('--maxinsert', default=300, type="int", help="Maximum number of repeats") add_simulate_options(p) opts, args, iopts = p.set_image_options(args, figsize="10x5") if len(args) != 0: sys.exit(not p.print_help()) depth = opts.depth readlen = opts.readlen distance = opts.distance max_insert = opts.maxinsert fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(iopts.w, iopts.h)) plt.tight_layout(pad=2) # ax1: lobSTR vs TREDPARSE with haploid model lobstr_results = parse_results("lobstr_results_homo.txt") tredparse_results = parse_results("tredparse_results_homo.txt") title = SIMULATED_HAPLOID + \ r" ($D=%s\times, L=%dbp, V=%dbp$)" % (depth, readlen, distance) plot_compare(ax1, title, tredparse_results, lobstr_results, max_insert=max_insert) # ax2: lobSTR vs TREDPARSE with diploid model lobstr_results = parse_results("lobstr_results_het.txt", exclude=20) tredparse_results = parse_results("tredparse_results_het.txt", exclude=20) title = SIMULATED_DIPLOID + \ r" ($D=%s\times, L=%dbp, V=%dbp$)" % (depth, readlen, distance) plot_compare(ax2, title, tredparse_results, lobstr_results, max_insert=max_insert) for ax in (ax1, ax2): ax.set_xlim(0, max_insert) ax.set_ylim(0, max_insert) root = fig.add_axes([0, 0, 1, 1]) pad = .03 panel_labels(root, ((pad / 2, 1 - pad, "A"), (1 / 2., 1 - pad, "B"))) normalize_axes(root) image_name = "tredparse." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def compare2(args)
%prog compare2 Compare performances of various variant callers on simulated STR datasets.
2.679372
2.579708
1.038634
p = OptionParser(power.__doc__) p.add_option('--maxinsert', default=300, type="int", help="Maximum number of repeats") add_simulate_options(p) opts, args, iopts = p.set_image_options(args, figsize="10x10", format="png") if len(args) != 0: sys.exit(not p.print_help()) max_insert = opts.maxinsert fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(ncols=2, nrows=2, figsize=(iopts.w, iopts.h)) plt.tight_layout(pad=3) color = "lightslategray" # ax1: Spanning tredparse_results = parse_results("tredparse_results_het-spanning.txt") title = SIMULATED_DIPLOID + " (Sub-model 1: Spanning reads)" plot_compare(ax1, title, tredparse_results, None, color=color, max_insert=max_insert, risk=False) # ax2: Partial tredparse_results = parse_results("tredparse_results_het-partial.txt", exclude=20) title = SIMULATED_DIPLOID + " (Sub-model 2: Partial reads)" plot_compare(ax2, title, tredparse_results, None, color=color, max_insert=max_insert, risk=False) # ax3: Repeat tredparse_results = parse_results("tredparse_results_het-repeat.txt", exclude=20) # HACK (repeat reads won't work under 50) tredparse_results = [x for x in tredparse_results if x[0] > 50] title = SIMULATED_DIPLOID + " (Sub-model 3: Repeat-only reads)" plot_compare(ax3, title, tredparse_results, None, color=color, max_insert=max_insert, risk=False) # ax4: Pair tredparse_results = parse_results("tredparse_results_het-pair.txt", exclude=20) title = SIMULATED_DIPLOID + " (Sub-model 4: Paired-end reads)" plot_compare(ax4, title, tredparse_results, None, color=color, max_insert=max_insert, risk=False) for ax in (ax1, ax2, ax3, ax4): ax.set_xlim(0, max_insert) ax.set_ylim(0, max_insert) root = fig.add_axes([0, 0, 1, 1]) pad = .03 panel_labels(root, ((pad / 2, 1 - pad, "A"), (1 / 2., 1 - pad, "B"), (pad / 2, 1 / 2. , "C"), (1 / 2., 1 / 2. , "D"))) normalize_axes(root) image_name = "power." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def power(args)
%prog power Compare performances of various variant callers on simulated STR datasets. This compares the power of various evidence types.
2.410269
2.356353
1.022881
p = OptionParser(tredparse.__doc__) p.add_option('--maxinsert', default=300, type="int", help="Maximum number of repeats") add_simulate_options(p) opts, args, iopts = p.set_image_options(args, figsize="10x10") if len(args) != 0: sys.exit(not p.print_help()) depth = opts.depth max_insert = opts.maxinsert fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(ncols=2, nrows=2, figsize=(iopts.w, iopts.h)) plt.tight_layout(pad=3) # ax1: lobSTR vs TREDPARSE with haploid model lobstr_results = parse_results("lobstr_results_homo-20x-150bp-500bp.txt") tredparse_results = parse_results("tredparse_results_homo-20x-150bp-500bp.txt") title = SIMULATED_HAPLOID + r" (Depth=$%s\times$)" % depth plot_compare(ax1, title, tredparse_results, lobstr_results, max_insert=max_insert) # ax2: lobSTR vs TREDPARSE with diploid model (depth=20x) lobstr_results = parse_results("lobstr_results_het-20x-150bp-500bp.txt", exclude=20) tredparse_results = parse_results("tredparse_results_het-20x-150bp-500bp.txt", exclude=20) title = SIMULATED_DIPLOID + r" (Depth=$%s\times$)" % depth plot_compare(ax2, title, tredparse_results, lobstr_results, max_insert=max_insert) # ax3: lobSTR vs TREDPARSE with diploid model (depth=5x) lobstr_results = parse_results("lobstr_results_het-5x-150bp-500bp.txt", exclude=20) tredparse_results = parse_results("tredparse_results_het-5x-150bp-500bp.txt", exclude=20) title = SIMULATED_DIPLOID + r" (Depth=$%s\times$)" % 5 plot_compare(ax3, title, tredparse_results, lobstr_results, max_insert=max_insert) # ax4: lobSTR vs TREDPARSE with diploid model (depth=80x) lobstr_results = parse_results("lobstr_results_het-80x-150bp-500bp.txt", exclude=20) tredparse_results = parse_results("tredparse_results_het-80x-150bp-500bp.txt", exclude=20) title = SIMULATED_DIPLOID + r" (Depth=$%s\times$)" % 80 plot_compare(ax4, title, tredparse_results, lobstr_results, max_insert=max_insert) for ax in (ax1, ax2, ax3, ax4): ax.set_xlim(0, max_insert) ax.set_ylim(0, max_insert) root = fig.add_axes([0, 0, 1, 1]) pad = .03 panel_labels(root, ((pad / 2, 1 - pad, "A"), (1 / 2., 1 - pad, "B"), (pad / 2, 1 / 2. , "C"), (1 / 2., 1 / 2. , "D"))) normalize_axes(root) image_name = "tredparse." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def tredparse(args)
%prog tredparse Compare performances of various variant callers on simulated STR datasets. Adds coverage comparisons as panel C and D.
1.910129
1.86906
1.021973
from jcvi.utils.cbook import percentage if summary: unique = len(data) total = sum(data) # Print out a distribution print("Unique: {0}".format(percentage(unique, total)), file=sys.stderr) bins = defaultdict(int) for d in data: logd = int(log(d, base)) bins[logd] += 1 x, y = [], [] for size, number in sorted(bins.items()): lb, ub = base ** size, base ** (size + 1) x.append((lb, ub)) y.append(number) asciiplot(x, y, title=title)
def loghistogram(data, base=2, ascii=True, title="Counts", summary=False)
bins is a dictionary with key: log(x, base), value: counts.
3.783378
3.657698
1.03436
''' Generate stem and leaf plot given a collection of numbers ''' assert bins > 0 range = vmax - vmin step = range * 1. / bins if isinstance(range, int): step = int(ceil(step)) step = step or 1 bins = np.arange(vmin, vmax + step, step) hist, bin_edges = np.histogram(data, bins=bins) # By default, len(bin_edges) = len(hist) + 1 bin_edges = bin_edges[:len(hist)] asciiplot(bin_edges, hist, digit=digit, title=title) print("Last bin ends in {0}, inclusive.".format(vmax), file=sys.stderr) return bin_edges, hist
def stem_leaf_plot(data, vmin, vmax, bins, digit=1, title=None)
Generate stem and leaf plot given a collection of numbers
4.550087
3.976255
1.144315
if ascii: return texthistogram([numberfile], vmin, vmax, title=title, bins=bins, skip=skip, col=col, base=base) data, vmin, vmax = get_data(numberfile, vmin, vmax, skip=skip, col=col) outfile = numberfile + ".base{0}.{1}".format(base, outfmt) \ if base else numberfile + ".pdf" template = histogram_log_template if base else histogram_template rtemplate = RTemplate(template, locals()) rtemplate.run()
def histogram(numberfile, vmin, vmax, xlabel, title, outfmt="pdf", bins=50, skip=0, col=0, ascii=False, base=0, fill="white")
Generate histogram using number from numberfile, and only numbers in the range of (vmin, vmax)
4.436201
4.62039
0.960136
if ascii: return texthistogram(numberfiles, vmin, vmax, title=title, bins=bins, skip=skip) newfile = "_".join(op.basename(x).split(".")[0] for x in numberfiles) fw = open(newfile, "w") print("{0}\tgrp".format(xlabel), file=fw) if tags: tags = tags.split(",") for i, f in enumerate(numberfiles): data, va, vb = get_data(f, vmin, vmax, skip=skip) vmin = min(vmin, va) vmax = max(vmax, vb) fp = open(f) if tags: tag = tags[i] else: tag = op.basename(f).rsplit(".", 1)[0] for row in fp: val = row.strip() print("\t".join((val, tag)), file=fw) fw.close() numberfile = newfile outfile = numberfile + '.' + outfmt if prefix: outfile = prefix + outfile htemplate = histogram_multiple_template_b \ if facet else histogram_multiple_template_a rtemplate = RTemplate(htemplate, locals()) rtemplate.run()
def histogram_multiple(numberfiles, vmin, vmax, xlabel, title, outfmt="pdf", tags=None, bins=20, skip=0, ascii=False, facet=False, fill="white", prefix="")
Generate histogram using number from numberfile, and only numbers in the range of (vmin, vmax). First combining multiple files.
3.785789
3.727287
1.015696
allowed_format = ("emf", "eps", "pdf", "png", "ps", \ "raw", "rgba", "svg", "svgz") p = OptionParser(main.__doc__) p.add_option("--skip", default=0, type="int", help="skip the first several lines [default: %default]") p.add_option("--col", default=0, type="int", help="Get the n-th column") p.set_histogram() p.add_option("--tags", dest="tags", default=None, help="tags for data if multiple input files, comma sep") p.add_option("--ascii", default=False, action="store_true", help="print ASCII text stem-leaf plot [default: %default]") p.add_option("--base", default="0", choices=("0", "2", "10"), help="use logarithm axis with base, 0 to disable [default: %default]") p.add_option("--facet", default=False, action="store_true", help="place multiple histograms side-by-side [default: %default]") p.add_option("--fill", default="white", help="color of the bin [default: %default]") p.add_option("--format", default="pdf", choices=allowed_format, help="Generate image of format [default: %default]") p.add_option("--quick", default=False, action="store_true", help="Use quick plot, assuming bins are already counted") p.add_option("--noprintstats", default=False, action="store_true", help="Write basic stats when using --quick") opts, args = p.parse_args() if len(args) < 1: sys.exit(not p.print_help()) skip = opts.skip vmin, vmax = opts.vmin, opts.vmax bins = opts.bins xlabel, title = opts.xlabel, opts.title title = title or args[0] base = int(opts.base) fileno = len(args) if opts.quick: assert fileno == 1, "Single input file expected using --quick" filename = args[0] figname = filename.rsplit(".", 1)[0] + ".pdf" data = DictFile(filename, keycast=int, cast=int) quickplot(data, vmin, vmax, xlabel, title, figname=figname, print_stats=(not opts.noprintstats)) return if fileno == 1: histogram(args[0], vmin, vmax, xlabel, title, outfmt=opts.format, bins=bins, skip=skip, ascii=opts.ascii, base=base, fill=opts.fill, col=opts.col) else: histogram_multiple(args, vmin, vmax, xlabel, title, outfmt=opts.format, tags=opts.tags, bins=bins, skip=skip, ascii=opts.ascii, facet=opts.facet, fill=opts.fill)
def main()
%prog numbers1.txt number2.txt ... Print histogram of the data files. The data files contain one number per line. If more than one file is inputted, the program will combine the histograms into the same plot.
3.243381
3.203633
1.012407
tlx, brx = [canvas2px(x, w, dpi) for x in (tlx, brx)] tly, bry = [canvas2px(y, h, dpi) for y in (tly, bry)] chr, bac_list = chr.split(':') return '<area shape="rect" coords="' + \ ",".join(str(x) for x in (tlx, tly, brx, bry)) \ + '" href="#' + chr + '"' \ + ' title="' + chr + ':' + str(segment_start) + '..' + str(segment_end) + '"' \ + ' />'
def write_ImageMapLine(tlx, tly, brx, bry, w, h, dpi, chr, segment_start, segment_end)
Write out an image map area line with the coordinates passed to this function <area shape="rect" coords="tlx,tly,brx,bry" href="#chr7" title="chr7:100001..500001">
2.795164
2.469708
1.131779
p = OptionParser(batch.__doc__) set_align_options(p) p.set_sam_options() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) ref_fasta, proj_dir, outdir = args outdir = outdir.rstrip("/") s3dir = None if outdir.startswith("s3://"): s3dir = outdir outdir = op.basename(outdir) mkdir(outdir) mm = MakeManager() for p, pf in iter_project(proj_dir): targs = [ref_fasta] + p cmd1, bamfile = mem(targs, opts) if cmd1: cmd1 = output_bam(cmd1, bamfile) nbamfile = op.join(outdir, bamfile) cmd2 = "mv {} {}".format(bamfile, nbamfile) cmds = [cmd1, cmd2] if s3dir: cmd = "aws s3 cp {} {} --sse".format(nbamfile, op.join(s3dir, bamfile)) cmds.append(cmd) mm.add(p, nbamfile, cmds) mm.write()
def batch(args)
%proj batch database.fasta project_dir output_dir Run bwa in batch mode.
3.554724
3.451203
1.029995
p = OptionParser(index.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) dbfile, = args check_index(dbfile)
def index(args)
%prog index database.fasta Wrapper for `bwa index`. Same interface.
2.642164
2.345026
1.12671
p.add_option("--bwa", default="bwa", help="Run bwa at this path") p.add_option("--rg", help="Read group") p.add_option("--readtype", choices=("pacbio", "pbread", "ont2d", "intractg"), help="Read type in bwa-mem") p.set_cutoff(cutoff=800)
def set_align_options(p)
Used in align() and batch()
7.068439
6.908607
1.023135
valid_modes = ("bwasw", "aln", "mem") p = OptionParser(align.__doc__) p.add_option("--mode", default="mem", choices=valid_modes, help="BWA mode") set_align_options(p) p.set_sam_options() opts, args = p.parse_args(args) mode = opts.mode nargs = len(args) if nargs not in (2, 3): sys.exit(not p.print_help()) tag = "bwa-{0}: ".format(mode) c = mem if nargs == 2: tag += "Single-end alignment" if mode == "bwasw": c = bwasw elif mode == "aln": c = samse else: assert mode != "bwasw", "Cannot use --bwasw with paired-end mode" tag += "Paired-end alignment" if mode == "aln": c = sampe logging.debug(tag) cmd, samfile = c(args, opts) if cmd: cmd = output_bam(cmd, samfile) bam = opts.bam unmapped = opts.unmapped sh(cmd) if unmapped: dbfile, readfile = args[:2] mopts = [samfile, "--unmapped"] if not bam: mopts += ["--sam"] mapped(mopts) FileShredder([samfile]) return samfile, None
def align(args)
%prog align database.fasta read1.fq [read2.fq] Wrapper for three modes of BWA - mem (default), aln, bwasw (long reads).
4.495648
4.058228
1.107786
dbfile, readfile = args dbfile = check_index(dbfile) saifile = check_aln(dbfile, readfile, cpus=opts.cpus) samfile, _, unmapped = get_samfile(readfile, dbfile, bam=opts.bam, unmapped=opts.unmapped) if not need_update((dbfile, saifile), samfile): logging.error("`{0}` exists. `bwa samse` already run.".format(samfile)) return "", samfile cmd = "bwa samse {0} {1} {2}".format(dbfile, saifile, readfile) cmd += " " + opts.extra if opts.uniq: cmd += " -n 1" return cmd, samfile
def samse(args, opts)
%prog samse database.fasta short_read.fastq Wrapper for `bwa samse`. Output will be short_read.sam.
5.516506
5.280905
1.044614
dbfile, read1file, read2file = args dbfile = check_index(dbfile) sai1file = check_aln(dbfile, read1file, cpus=opts.cpus) sai2file = check_aln(dbfile, read2file, cpus=opts.cpus) samfile, _, unmapped = get_samfile(read1file, dbfile, bam=opts.bam, unmapped=opts.unmapped) if not need_update((dbfile, sai1file, sai2file), samfile): logging.error("`{0}` exists. `bwa samse` already run.".format(samfile)) return "", samfile cmd = "bwa sampe " + " ".join((dbfile, sai1file, sai2file, read1file, read2file)) cmd += " " + opts.extra if opts.cutoff: cmd += " -a {0}".format(opts.cutoff) if opts.uniq: cmd += " -n 1" return cmd, samfile
def sampe(args, opts)
%prog sampe database.fasta read1.fq read2.fq Wrapper for `bwa sampe`. Output will be read1.sam.
4.284782
3.892195
1.100865
dbfile, read1file = args[:2] readtype = opts.readtype pl = readtype or "illumina" pf = op.basename(read1file).split(".")[0] rg = opts.rg or r"@RG\tID:{0}\tSM:sm\tLB:lb\tPL:{1}".format(pf, pl) dbfile = check_index(dbfile) args[0] = dbfile samfile, _, unmapped = get_samfile(read1file, dbfile, bam=opts.bam, unmapped=opts.unmapped) if not need_update(read1file, samfile): logging.error("`{0}` exists. `bwa mem` already run.".format(samfile)) return "", samfile cmd = "{} mem".format(opts.bwa) ''' -M Mark shorter split hits as secondary (for Picard compatibility). ''' cmd += " -M -t {0}".format(opts.cpus) cmd += ' -R "{0}"'.format(rg) if readtype: cmd += " -x {0}".format(readtype) cmd += " " + opts.extra cmd += " ".join(args) return cmd, samfile
def mem(args, opts)
%prog mem database.fasta read1.fq [read2.fq] Wrapper for `bwa mem`. Output will be read1.sam.
5.403132
5.113921
1.056554
dbfile, readfile = args dbfile = check_index(dbfile) samfile, _, unmapped = get_samfile(readfile, dbfile, bam=opts.bam, unmapped=opts.unmapped) if not need_update(dbfile, samfile): logging.error("`{0}` exists. `bwa bwasw` already run.".format(samfile)) return "", samfile cmd = "bwa bwasw " + " ".join(args) cmd += " -t {0}".format(opts.cpus) cmd += " " + opts.extra return cmd, samfile
def bwasw(args, opts)
%prog bwasw database.fasta long_read.fastq Wrapper for `bwa bwasw`. Output will be long_read.sam.
5.497244
4.815858
1.141488
from jcvi.apps.base import mkdir p = OptionParser(link.__doc__) p.add_option("--dir", help="Place links in a subdirectory [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) meta, = args d = opts.dir if d: mkdir(d) fp = open(meta) cwd = op.dirname(get_abs_path(meta)) for row in fp: source, target = row.split() source = op.join(cwd, source) if d: target = op.join(d, target) lnsf(source, target, log=True)
def link(args)
%prog link metafile Link source to target based on a tabular file.
3.16386
2.964539
1.067235
p = OptionParser(touch.__doc__) opts, args = p.parse_args(args) fp = sys.stdin for link_name in fp: link_name = link_name.strip() if not op.islink(link_name): continue if not op.exists(link_name): continue source = get_abs_path(link_name) lnsf(source, link_name)
def touch(args)
find . -type l | %prog touch Linux commands `touch` wouldn't modify mtime for links, this script can. Use find to pipe in all the symlinks.
4.039889
3.726762
1.084021
p = OptionParser(clean.__doc__) opts, args = p.parse_args(args) for link_name in os.listdir(os.getcwd()): if not op.islink(link_name): continue logging.debug("remove symlink `{0}`".format(link_name)) os.unlink(link_name)
def clean(args)
%prog clean Removes all symlinks from current folder
3.776274
3.18476
1.185733
p = OptionParser(cp.__doc__) fp = sys.stdin for link_name in fp: link_name = link_name.strip() if not op.exists(link_name): continue source = get_abs_path(link_name) link_name = op.basename(link_name) if not op.exists(link_name): os.symlink(source, link_name) logging.debug(" => ".join((source, link_name)))
def cp(args)
find folder -type l | %prog cp Copy all the softlinks to the current folder, using absolute paths
4.025498
3.663697
1.098753
from jcvi.utils.cbook import human_size p = OptionParser(size.__doc__) fp = sys.stdin results = [] for link_name in fp: link_name = link_name.strip() if not op.islink(link_name): continue source = get_abs_path(link_name) link_name = op.basename(link_name) filesize = op.getsize(source) results.append((filesize, link_name)) # sort by descending file size for filesize, link_name in sorted(results, reverse=True): filesize = human_size(filesize, a_kilobyte_is_1024_bytes=True) print("%10s\t%s" % (filesize, link_name), file=sys.stderr)
def size(args)
find folder -type l | %prog size Get the size for all the paths that are pointed by the links
3.46027
3.501798
0.988141
p = OptionParser(nucmer.__doc__) opts, args = p.parse_args(args) if len(args) != 5: sys.exit(not p.print_help()) mapbed, mtrfasta, asmfasta, chr, idx = args idx = int(idx) m1 = 1000000 bedfile = "sample.bed" bed = Bed() bed.add("\t".join(str(x) for x in (chr, (idx - 1) * m1, idx * m1))) bed.print_to_file(bedfile) cmd = "intersectBed -a {0} -b {1} -nonamecheck -sorted | cut -f4".\ format(mapbed, bedfile) idsfile = "query.ids" sh(cmd, outfile=idsfile) sfasta = fastaFromBed(bedfile, mtrfasta) qfasta = "query.fasta" cmd = "faSomeRecords {0} {1} {2}".format(asmfasta, idsfile, qfasta) sh(cmd) cmd = "nucmer {0} {1}".format(sfasta, qfasta) sh(cmd) mummerplot_main(["out.delta", "--refcov=0"]) sh("mv out.pdf {0}.{1}.pdf".format(chr, idx))
def nucmer(args)
%prog nucmer mappings.bed MTR.fasta assembly.fasta chr1 3 Select specific chromosome region based on MTR mapping. The above command will extract chr1:2,000,001-3,000,000.
4.470879
3.997421
1.118441
from jcvi.apps.base import download so_file_url = "http://obo.cvs.sourceforge.net/viewvc/obo/obo/ontology/genomic-proteomic/so.obo" so_file = download(so_file_url, debug=False) return GODag(so_file)
def load_GODag()
OBO file retrieved from http://obo.cvs.sourceforge.net/viewvc/obo/obo/ontology/genomic-proteomic/so.obo
4.203075
2.188285
1.920716
if so is None: so = load_GODag() oterm = term if term not in so.valid_names: if "resolve" in method: if "_" in term: tparts = deque(term.split("_")) tparts.pop() if "prefix" in method else tparts.popleft() nterm = "_".join(tparts).strip() term = validate_term(nterm, so=so, method=method) if term is None: return None else: logging.error("Term `{0}` does not exist".format(term)) sys.exit(1) if oterm != term: logging.debug("Resolved term `{0}` to `{1}`".format(oterm, term)) return term
def validate_term(term, so=None, method="verify")
Validate an SO term against so.obo
3.740386
3.688168
1.014158
p = OptionParser(agp.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) tablefile, = args fp = open(tablefile) for row in fp: atoms = row.split() hr = atoms[0] scaf = atoms[1] scaf_start = int(atoms[2]) + 1 scaf_end = int(atoms[3]) strand = atoms[4] hr_start = int(atoms[5]) + 1 hr_end = int(atoms[6]) print("\t".join(str(x) for x in \ (hr, hr_start, hr_end, 1, 'W', scaf, scaf_start, scaf_end, strand)))
def agp(args)
%prog agp Siirt_Female_pistachio_23May2017_table.txt The table file, as prepared by Dovetail Genomics, is not immediately useful to convert gene model coordinates, as assumed by formats.chain.fromagp(). This is a quick script to do such conversion. The file structure of this table file is described in the .manifest file shipped in the same package:: pistachio_b_23May2017_MeyIy.table.txt Tab-delimited table describing positions of input assembly scaffolds in the Hirise scaffolds. The table has the following format: 1. HiRise scaffold name 2. Input sequence name 3. Starting base (zero-based) of the input sequence 4. Ending base of the input sequence 5. Strand (- or +) of the input sequence in the scaffold 6. Starting base (zero-based) in the HiRise scaffold 7. Ending base in the HiRise scaffold where '-' in the strand column indicates that the sequence is reverse complemented relative to the input assembly. CAUTION: This is NOT a proper AGP format since it does not have gaps in them.
2.531113
2.428292
1.042343
p = OptionParser(traits.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) samples = [] for folder in args: targets = iglob(folder, "*-traits.json") if not targets: continue filename = targets[0] js = json.load(open(filename)) js["skin_rgb"] = make_rgb( js["traits"]["skin-color"]["L"], js["traits"]["skin-color"]["A"], js["traits"]["skin-color"]["B"]) js["eye_rgb"] = make_rgb( js["traits"]["eye-color"]["L"], js["traits"]["eye-color"]["A"], js["traits"]["eye-color"]["B"]) samples.append(js) template = Template(traits_template) fw = open("report.html", "w") print(template.render(samples=samples), file=fw) logging.debug("Report written to `{}`".format(fw.name)) fw.close()
def traits(args)
%prog traits directory Make HTML page that reports eye and skin color.
2.771375
2.53951
1.091303
p = OptionParser(regression.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x8") if len(args) != 1: sys.exit(not p.print_help()) tsvfile, = args df = pd.read_csv(tsvfile, sep="\t") chrono = "Chronological age (yr)" pred = "Predicted age (yr)" resdf = pd.DataFrame({chrono: df["hli_calc_age_sample_taken"], pred: df["Predicted Age"]}) g = sns.jointplot(chrono, pred, resdf, joint_kws={"s": 6}, xlim=(0, 100), ylim=(0, 80)) g.fig.set_figwidth(iopts.w) g.fig.set_figheight(iopts.h) outfile = tsvfile.rsplit(".", 1)[0] + ".regression.pdf" savefig(outfile)
def regression(args)
%prog regression postgenomic-s.tsv Plot chronological vs. predicted age.
3.351095
2.852391
1.174837
fig = plt.figure(1, size) ax1 = plt.subplot2grid((2, 2), (0, 0)) ax2 = plt.subplot2grid((2, 2), (0, 1)) ax3 = plt.subplot2grid((2, 2), (1, 0)) ax4 = plt.subplot2grid((2, 2), (1, 1)) chemistry = ["V1", "V2", "V2.5", float("nan")] colors = sns.color_palette("Set2", 8) color_map = dict(zip(chemistry, colors)) age_label = "Chronological age (yr)" ax1.scatter(df["hli_calc_age_sample_taken"], df["teloLength"], s=10, marker='.', color=df["Chemistry"].map(color_map)) ax1.set_ylim(0, 15) ax1.set_ylabel("Telomere length (Kb)") ax2.scatter(df["hli_calc_age_sample_taken"], df["ccn.chrX"], s=10, marker='.', color=df["Chemistry"].map(color_map)) ax2.set_ylim(1.8, 2.1) ax2.set_ylabel("ChrX copy number") ax4.scatter(df["hli_calc_age_sample_taken"], df["ccn.chrY"], s=10, marker='.', color=df["Chemistry"].map(color_map)) ax4.set_ylim(0.8, 1.1) ax4.set_ylabel("ChrY copy number") ax3.scatter(df["hli_calc_age_sample_taken"], df["TRA.PPM"], s=10, marker='.', color=df["Chemistry"].map(color_map)) ax3.set_ylim(0, 250) ax3.set_ylabel("$TCR-\\alpha$ deletions (count per million reads)") from matplotlib.lines import Line2D legend_elements = [Line2D([0], [0], marker='.', color='w', label=chem, markerfacecolor=color, markersize=16) \ for (chem, color) in zip(chemistry, colors)[:3]] for ax in (ax1, ax2, ax3, ax4): ax.set_xlabel(age_label) ax.legend(handles=legend_elements, loc="upper right") plt.tight_layout() root = fig.add_axes((0, 0, 1, 1)) labels = ((.02, .98, "A"), (.52, .98, "B"), (.02, .5, "C"), (.52, .5, "D")) panel_labels(root, labels) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off()
def composite_correlation(df, size=(12, 8))
Plot composite correlation figure
2.47629
2.4897
0.994613
p = OptionParser(correlation.__doc__) opts, args, iopts = p.set_image_options(args, figsize="12x8") if len(args) != 1: sys.exit(not p.print_help()) tsvfile, = args df = pd.read_csv(tsvfile, sep="\t") composite_correlation(df, size=(iopts.w, iopts.h)) outfile = tsvfile.rsplit(".", 1)[0] + ".correlation.pdf" savefig(outfile)
def correlation(args)
%prog correlation postgenomic-s.tsv Plot correlation of age vs. postgenomic features.
2.920494
2.444711
1.194617
df = df_orig.rename(columns={"hli_calc_age_sample_taken": "Age", "hli_calc_gender": "Gender", "eth7_max": "Ethnicity", "MeanCoverage": "Mean coverage", "Chemistry": "Sequencing chemistry", "Release Client": "Cohort", }) fig = plt.figure(1, size) ax1 = plt.subplot2grid((2, 7), (0, 0), rowspan=1, colspan=2) ax2 = plt.subplot2grid((2, 7), (0, 2), rowspan=1, colspan=2) ax3 = plt.subplot2grid((2, 7), (0, 4), rowspan=1, colspan=3) ax4 = plt.subplot2grid((2, 7), (1, 0), rowspan=1, colspan=2) ax5 = plt.subplot2grid((2, 7), (1, 2), rowspan=1, colspan=2) ax6 = plt.subplot2grid((2, 7), (1, 4), rowspan=1, colspan=3) sns.distplot(df["Age"].dropna(), kde=False, ax=ax1) sns.countplot(x="Gender", data=df, ax=ax2) sns.countplot(x="Ethnicity", data=df, ax=ax3, order = df['Ethnicity'].value_counts().index) sns.distplot(df["Mean coverage"].dropna(), kde=False, ax=ax4) ax4.set_xlim(0, 100) sns.countplot(x="Sequencing chemistry", data=df, ax=ax5) sns.countplot(x="Cohort", data=df, ax=ax6, order = df['Cohort'].value_counts().index) # Anonymize the cohorts cohorts = ax6.get_xticklabels() newCohorts = [] for i, c in enumerate(cohorts): if c.get_text() == "Spector": c = "TwinsUK" elif c.get_text() != "Health Nucleus": c = "C{}".format(i + 1) newCohorts.append(c) ax6.set_xticklabels(newCohorts) for ax in (ax6,): ax.set_xticklabels(ax.get_xticklabels(), ha="right", rotation=30) for ax in (ax1, ax2, ax3, ax4, ax5, ax6): ax.set_title(ax.get_xlabel()) ax.set_xlabel("") plt.tight_layout() root = fig.add_axes((0, 0, 1, 1)) labels = ((.02, .96, "A"), (.3, .96, "B"), (.6, .96, "C"), (.02, .52, "D"), (.3, .52, "E"), (.6, .52, "F")) panel_labels(root, labels) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off()
def composite_qc(df_orig, size=(16, 12))
Plot composite QC figures
2.241422
2.233944
1.003347
# Construct the pairs of trait values traitValuesAbsent = 0 nanValues = 0 genderSkipped = 0 twinValues = [] for a, b, t in triples: if gender is not None and t != gender: genderSkipped += 1 continue if not (a in traits and b in traits): traitValuesAbsent += 1 continue if np.isnan(traits[a]) or np.isnan(traits[b]): nanValues += 1 continue twinValues.append((traits[a], traits[b])) print("A total of {} pairs extracted ({} absent; {} nan; {} genderSkipped)"\ .format(len(twinValues), traitValuesAbsent, nanValues, genderSkipped)) return twinValues
def extract_twin_values(triples, traits, gender=None)
Calculate the heritability of certain traits in triplets. Parameters ========== triples: (a, b, "Female/Male") triples. The sample IDs are then used to query the traits dictionary. traits: sample_id => value dictionary Returns ======= tuples of size 2, that contain paired trait values of the twins
2.99838
3.107881
0.964767
p = OptionParser(heritability.__doc__) opts, args, iopts = p.set_image_options(args, figsize="12x18") if len(args) != 3: sys.exit(not p.print_help()) combined, mz, dz = args # Prepare twins data def get_pairs(filename): with open(filename) as fp: for row in fp: yield row.strip().split(",") MZ = list(get_pairs(mz)) DZ = list(get_pairs(dz)) print(len(MZ), "monozygotic twins") print(len(DZ), "dizygotic twins") df = pd.read_csv(combined, sep="\t", index_col=0) df["Sample name"] = np.array(df["Sample name"], dtype=np.str) gender = extract_trait(df, "Sample name", "hli_calc_gender") sameGenderMZ = list(filter_same_gender(MZ, gender)) sameGenderDZ = list(filter_same_gender(DZ, gender)) composite(df, sameGenderMZ, sameGenderDZ, size=(iopts.w, iopts.h)) logging.getLogger().setLevel(logging.CRITICAL) savefig("heritability.pdf")
def heritability(args)
%prog pg.tsv MZ-twins.csv DZ-twins.csv Plot composite figures ABCD on absolute difference of 4 traits, EFGH on heritability of 4 traits. The 4 traits are: telomere length, ccn.chrX, ccn.chrY, TRA.PPM
4.214524
3.593874
1.172696
p = OptionParser(compile.__doc__) p.set_outfile(outfile="age.tsv") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) dfs = [] for folder in args: ofolder = os.listdir(folder) # telomeres subdir = [x for x in ofolder if x.startswith("telomeres")][0] subdir = op.join(folder, subdir) filename = op.join(subdir, "tel_lengths.txt") df = pd.read_csv(filename, sep="\t") d1 = df.ix[0].to_dict() # ccn subdir = [x for x in ofolder if x.startswith("ccn")][0] subdir = op.join(folder, subdir) filename = iglob(subdir, "*.ccn.json")[0] js = json.load(open(filename)) d1.update(js) df = pd.DataFrame(d1, index=[0]) dfs.append(df) df = pd.concat(dfs, ignore_index=True) df.to_csv(opts.outfile, sep="\t", index=False)
def compile(args)
%prog compile directory Extract telomere length and ccn.
2.904848
2.554879
1.13698
from random import choice seq = Seq(''.join(choice('ACGT') for _ in xrange(size))) s = SeqRecord(seq, id=name, description="Fake sequence") SeqIO.write([s], fw, "fasta")
def simulate_one(fw, name, size)
Simulate a random sequence with name and size
4.169788
3.774137
1.104832
p = OptionParser(simulate.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) idsfile, = args fp = open(idsfile) fw = must_open(opts.outfile, "w") for row in fp: name, size = row.split() size = int(size) simulate_one(fw, name, size) fp.close()
def simulate(args)
%prog simulate idsfile Simulate random FASTA file based on idsfile, which is a two-column tab-separated file with sequence name and size.
2.820316
2.221859
1.26935
p = OptionParser(gc.__doc__) p.add_option("--binsize", default=500, type="int", help="Bin size to use") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args binsize = opts.binsize allbins = [] for name, seq in parse_fasta(fastafile): for i in range(len(seq) / binsize): atcnt = gccnt = 0 for c in seq[i * binsize: (i + 1) * binsize].upper(): if c in "AT": atcnt += 1 elif c in "GC": gccnt += 1 totalcnt = atcnt + gccnt if totalcnt == 0: continue gcpct = gccnt * 100 / totalcnt allbins.append(gcpct) from jcvi.graphics.base import asciiplot from collections import Counter title = "Total number of bins={}".format(len(allbins)) c = Counter(allbins) x, y = zip(*sorted(c.items())) asciiplot(x, y, title=title)
def gc(args)
%prog gc fastafile Plot G+C content distribution.
2.657827
2.550005
1.042283
from jcvi.utils.cbook import SummaryStats p = OptionParser(trimsplit.__doc__) p.add_option("--minlength", default=1000, type="int", help="Min length of contigs to keep") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args minlength = opts.minlength fw = must_open(fastafile.rsplit(".", 1)[0] + ".split.fasta", "w") ntotal = 0 removed = [] Ns = [] for name, seq in parse_fasta(fastafile): stretches = [] ntotal += len(seq) for lower, stretch in groupby(seq, key=lambda x: x.islower()): stretch = "".join(stretch) if lower or len(stretch) < minlength: removed.append(len(stretch)) continue for isN, s in groupby(stretch, key=lambda x: x in "Nn"): s = "".join(s) if isN or len(s) < minlength: Ns.append(len(s)) continue stretches.append(s) for i, seq in enumerate(stretches): id = "{0}_{1}".format(name.split("|")[0], i) s = SeqRecord(Seq(seq), id=id, description="") SeqIO.write([s], fw, "fasta") fw.close() # Reporting if removed: logging.debug("Total bases removed: {0}".\ format(percentage(sum(removed), ntotal))) print(SummaryStats(removed), file=sys.stderr) if Ns: logging.debug("Total Ns removed: {0}".\ format(percentage(sum(Ns), ntotal))) print(SummaryStats(Ns), file=sys.stderr)
def trimsplit(args)
%prog trimsplit fastafile Split sequences at lower-cased letters and stretch of Ns. This is useful at cleaning up the low quality bases for the QUIVER output.
2.472383
2.359091
1.048024
from jcvi.formats.sizes import Sizes p = OptionParser(qual.__doc__) p.add_option("--qv", default=31, type="int", help="Dummy qv score for extended bases") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args sizes = Sizes(fastafile) qvchar = str(opts.qv) fw = must_open(opts.outfile, "w") total = 0 for s, slen in sizes.iter_sizes(): print(">" + s, file=fw) print(" ".join([qvchar] * slen), file=fw) total += 1 fw.close() logging.debug("Written {0} records in `{1}`.".format(total, opts.outfile))
def qual(args)
%prog qual fastafile Generate dummy .qual file based on FASTA file.
3.199175
2.91505
1.097468
p = OptionParser(info.__doc__) p.add_option("--gaps", default=False, action="store_true", help="Count number of gaps [default: %default]") p.set_table() p.set_outfile() opts, args = p.parse_args(args) if len(args) == 0: sys.exit(not p.print_help()) fastafiles = args data = [] for f in fastafiles: s = SequenceInfo(f, gapstats=opts.gaps) data.append(s.data) write_csv(s.header, data, sep=opts.sep, filename=opts.outfile, align=opts.align)
def info(args)
%prog info *.fasta Run `sequence_info` on FASTA files. Generate a report per file.
3.0887
2.713368
1.138327
p = OptionParser(fromtab.__doc__) p.set_sep(sep=None) p.add_option("--noheader", default=False, action="store_true", help="Ignore first line") p.add_option("--replace", help="Replace spaces in name to char [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) tabfile, fastafile = args sep = opts.sep replace = opts.replace fp = must_open(tabfile) fw = must_open(fastafile, "w") nseq = 0 if opts.noheader: next(fp) for row in fp: row = row.strip() if not row or row[0] == '#': continue name, seq = row.rsplit(sep, 1) if replace: name = name.replace(" ", replace) print(">{0}\n{1}".format(name, seq), file=fw) nseq += 1 fw.close() logging.debug("A total of {0} sequences written to `{1}`.".\ format(nseq, fastafile))
def fromtab(args)
%prog fromtab tabfile fastafile Convert 2-column sequence file to FASTA format. One usage for this is to generatea `adapters.fasta` for TRIMMOMATIC.
2.520715
2.327374
1.083072
p = OptionParser(longestorf.__doc__) p.add_option("--ids", action="store_true", help="Generate table with ORF info [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args pf = fastafile.rsplit(".", 1)[0] orffile = pf + ".orf.fasta" idsfile = None if opts.ids: idsfile = pf + ".orf.ids" fwids = open(idsfile, "w") f = Fasta(fastafile, lazy=True) fw = must_open(orffile, "w") before, after = 0, 0 for name, rec in f.iteritems_ordered(): cds = rec.seq before += len(cds) # Try all six frames orf = ORFFinder(cds) lorf = orf.get_longest_orf() newcds = Seq(lorf) after += len(newcds) newrec = SeqRecord(newcds, id=name, description=rec.description) SeqIO.write([newrec], fw, "fasta") if idsfile: print("\t".join((name, orf.info)), file=fwids) fw.close() if idsfile: fwids.close() logging.debug("Longest ORFs written to `{0}` ({1}).".\ format(orffile, percentage(after, before))) return orffile
def longestorf(args)
%prog longestorf fastafile Find longest ORF for each sequence in fastafile.
2.897203
2.803077
1.03358
from jcvi.utils.iter import grouper p = OptionParser(ispcr.__doc__) p.add_option("-r", dest="rclip", default=1, type="int", help="pair ID is derived from rstrip N chars [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args ispcrfile = fastafile + ".isPcr" fw = open(ispcrfile, "w") N = opts.rclip strip_name = lambda x: x[:-N] if N else str npairs = 0 fastaiter = SeqIO.parse(fastafile, "fasta") for a, b in grouper(fastaiter, 2): aid, bid = [strip_name(x) for x in (a.id, b.id)] assert aid == bid, "Name mismatch {0}".format((aid, bid)) print("\t".join((aid, str(a.seq), str(b.seq))), file=fw) npairs += 1 fw.close() logging.debug("A total of {0} pairs written to `{1}`.".\ format(npairs, ispcrfile))
def ispcr(args)
%prog ispcr fastafile Reformat paired primers into isPcr query format, which is three column format: name, forward, reverse
3.108526
3.072927
1.011585
''' parse a fasta-formatted file and returns header can be a fasta file that contains multiple records. ''' try: fp = must_open(infile) except: fp = infile # keep header fa_iter = (x[1] for x in groupby(fp, lambda row: row[0] == '>')) for header in fa_iter: header = next(header) if header[0] != '>': continue # drop '>' header = header.strip()[1:] # stitch the sequence lines together and make into upper case seq = "".join(s.strip() for s in next(fa_iter)) if upper: seq = seq.upper() yield header, seq
def parse_fasta(infile, upper=False)
parse a fasta-formatted file and returns header can be a fasta file that contains multiple records.
4.364554
3.360394
1.298822