code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if not HAVE_MPL: # pragma: no cover raise RuntimeError('matplotlib not available.') cmap = LinearSegmentedColormap.from_list(self.name, self.mpl_colors, **kwargs) return cmap
def get_mpl_colormap(self, **kwargs)
A color map that can be used in matplotlib plots. Requires matplotlib to be importable. Keyword arguments are passed to `matplotlib.colors.LinearSegmentedColormap.from_list`.
4.205011
4.08694
1.02889
from ipythonblocks import BlockGrid grid = BlockGrid(self.number, 1, block_size=block_size) for block, color in zip(grid, self.colors): block.rgb = color grid.show()
def show_as_blocks(self, block_size=100)
Show colors in the IPython Notebook using ipythonblocks. Parameters ---------- block_size : int, optional Size of displayed blocks.
6.412446
6.653701
0.963741
url = 'http://colorbrewer2.org/index.html?type={0}&scheme={1}&n={2}' return url.format(self.type.lower(), self.name, self.number)
def colorbrewer2_url(self)
URL that can be used to view the color map at colorbrewer2.org.
2.987949
2.848918
1.048802
from jcvi.formats.fasta import summary as fsummary from jcvi.utils.cbook import percentage, human_size p = OptionParser(summary.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) chainfile, oldfasta, newfasta = args chain = Chain(chainfile) ungapped, dt, dq = chain.ungapped, chain.dt, chain.dq print("File `{0}` contains {1} chains.".\ format(chainfile, len(chain)), file=sys.stderr) print("ungapped={0} dt={1} dq={2}".\ format(human_size(ungapped), human_size(dt), human_size(dq)), file=sys.stderr) oldreal, oldnn, oldlen = fsummary([oldfasta, "--outfile=/dev/null"]) print("Old fasta (`{0}`) mapped: {1}".\ format(oldfasta, percentage(ungapped, oldreal)), file=sys.stderr) newreal, newnn, newlen = fsummary([newfasta, "--outfile=/dev/null"]) print("New fasta (`{0}`) mapped: {1}".\ format(newfasta, percentage(ungapped, newreal)), file=sys.stderr)
def summary(args)
%prog summary old.new.chain old.fasta new.fasta Provide stats of the chain file.
2.761256
2.488626
1.109551
from jcvi.formats.agp import AGP from jcvi.formats.sizes import Sizes p = OptionParser(fromagp.__doc__) p.add_option("--novalidate", default=False, action="store_true", help="Do not validate AGP") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) agpfile, componentfasta, objectfasta = args chainfile = agpfile.rsplit(".", 1)[0] + ".chain" fw = open(chainfile, "w") agp = AGP(agpfile, validate=(not opts.novalidate)) componentsizes = Sizes(componentfasta).mapping objectsizes = Sizes(objectfasta).mapping chain = "chain" score = 1000 tStrand = "+" id = 0 for a in agp: if a.is_gap: continue tName = a.component_id tSize = componentsizes[tName] tStart = a.component_beg tEnd = a.component_end tStart -= 1 qName = a.object qSize = objectsizes[qName] qStrand = "-" if a.orientation == "-" else "+" qStart = a.object_beg qEnd = a.object_end if qStrand == '-': _qStart = qSize - qEnd + 1 _qEnd = qSize - qStart + 1 qStart, qEnd = _qStart, _qEnd qStart -= 1 id += 1 size = a.object_span headerline = "\t".join(str(x) for x in ( chain, score, tName, tSize, tStrand, tStart, tEnd, qName, qSize, qStrand, qStart, qEnd, id )) alignmentline = size print(headerline, file=fw) print(alignmentline, file=fw) print(file=fw) fw.close() logging.debug("File written to `{0}`.".format(chainfile))
def fromagp(args)
%prog fromagp agpfile componentfasta objectfasta Generate chain file from AGP format. The components represent the old genome (target) and the objects represent new genome (query).
2.717917
2.485977
1.093299
p = OptionParser(blat.__doc__) p.add_option("--minscore", default=100, type="int", help="Matches minus mismatches gap penalty [default: %default]") p.add_option("--minid", default=98, type="int", help="Minimum sequence identity [default: %default]") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) oldfasta, newfasta = args twobitfiles = [] for fastafile in args: tbfile = faToTwoBit(fastafile) twobitfiles.append(tbfile) oldtwobit, newtwobit = twobitfiles cmd = "pblat -threads={0}".format(opts.cpus) if which("pblat") else "blat" cmd += " {0} {1}".format(oldtwobit, newfasta) cmd += " -tileSize=12 -minScore={0} -minIdentity={1} ".\ format(opts.minscore, opts.minid) pslfile = "{0}.{1}.psl".format(*(op.basename(x).split('.')[0] \ for x in (newfasta, oldfasta))) cmd += pslfile sh(cmd)
def blat(args)
%prog blat old.fasta new.fasta Generate psl file using blat.
3.308975
3.131968
1.056516
from jcvi.formats.sizes import Sizes p = OptionParser(frompsl.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) pslfile, oldfasta, newfasta = args pf = oldfasta.split(".")[0] # Chain together alignments from using axtChain chainfile = pf + ".chain" twobitfiles = [] for fastafile in (oldfasta, newfasta): tbfile = faToTwoBit(fastafile) twobitfiles.append(tbfile) oldtwobit, newtwobit = twobitfiles if need_update(pslfile, chainfile): cmd = "axtChain -linearGap=medium -psl {0}".format(pslfile) cmd += " {0} {1} {2}".format(oldtwobit, newtwobit, chainfile) sh(cmd) # Sort chain files sortedchain = chainfile.rsplit(".", 1)[0] + ".sorted.chain" if need_update(chainfile, sortedchain): cmd = "chainSort {0} {1}".format(chainfile, sortedchain) sh(cmd) # Make alignment nets from chains netfile = pf + ".net" oldsizes = Sizes(oldfasta).filename newsizes = Sizes(newfasta).filename if need_update((sortedchain, oldsizes, newsizes), netfile): cmd = "chainNet {0} {1} {2}".format(sortedchain, oldsizes, newsizes) cmd += " {0} /dev/null".format(netfile) sh(cmd) # Create liftOver chain file liftoverfile = pf + ".liftover.chain" if need_update((netfile, sortedchain), liftoverfile): cmd = "netChainSubset {0} {1} {2}".\ format(netfile, sortedchain, liftoverfile) sh(cmd)
def frompsl(args)
%prog frompsl old.new.psl old.fasta new.fasta Generate chain file from psl file. The pipeline is describe in: <http://genomewiki.ucsc.edu/index.php/Minimal_Steps_For_LiftOver>
2.937448
2.777003
1.057776
atoms = row.strip().split("\t") name1, name2, coverage, identity, nmismatch, ngap, \ start1, end1, strand1, start2, end2, strand2, score = atoms identity = identity.replace("%", "") hitlen = coverage.split("/")[1] score = float(score) same_strand = (strand1 == strand2) if not same_strand: start2, end2 = end2, start2 evalue = blastz_score_to_ncbi_expectation(score) score = blastz_score_to_ncbi_bits(score) evalue, score = "%.2g" % evalue, "%.1f" % score return "\t".join((name1, name2, identity, hitlen, nmismatch, ngap, \ start1, end1, start2, end2, evalue, score))
def lastz_to_blast(row)
Convert the lastz tabular to the blast tabular, see headers above Obsolete after LASTZ version 1.02.40
3.589094
3.519085
1.019894
bfasta_fn, afasta_fn, outfile, lastz_bin, extra, mask, format = t ref_tags = [Darkspace] qry_tags = [Darkspace] ref_tags, qry_tags = add_mask(ref_tags, qry_tags, mask=mask) lastz_cmd = Lastz_template.format(lastz_bin, bfasta_fn, ref_tags, \ afasta_fn, qry_tags) if extra: lastz_cmd += " " + extra.strip() lastz_cmd += " --format={0}".format(format) proc = Popen(lastz_cmd) out_fh = open(outfile, "w") logging.debug("job <%d> started: %s" % (proc.pid, lastz_cmd)) for row in proc.stdout: out_fh.write(row) out_fh.flush() logging.debug("job <%d> finished" % proc.pid)
def lastz_2bit(t)
Used for formats other than BLAST, i.e. lav, maf, etc. which requires the database file to contain a single FASTA record.
4.34037
4.445572
0.976336
p = OptionParser(main.__doc__) supported_formats = tuple(x.strip() for x in \ "lav, lav+text, axt, axt+, maf, maf+, maf-, sam, softsam, "\ "sam-, softsam-, cigar, BLASTN, BLASTN-, differences, rdotplot, text".split(',')) p.add_option("--format", default="BLASTN-", choices=supported_formats, help="Ooutput format [default: %default]") p.add_option("--path", dest="lastz_path", default=None, help="specify LASTZ path") p.add_option("--mask", dest="mask", default=False, action="store_true", help="treat lower-case letters as mask info [default: %default]") p.add_option("--similar", default=False, action="store_true", help="Use options tuned for close comparison [default: %default]") p.set_cpus(cpus=32) p.set_params() p.set_outfile() opts, args = p.parse_args() if len(args) != 2: sys.exit(p.print_help()) bfasta_fn, afasta_fn = args for fn in (afasta_fn, bfasta_fn): assert op.exists(fn) afasta_fn = op.abspath(afasta_fn) bfasta_fn = op.abspath(bfasta_fn) out_fh = must_open(opts.outfile, "w") extra = opts.extra if opts.similar: extra += similarOptions lastz_bin = opts.lastz_path or "lastz" assert lastz_bin.endswith("lastz"), "You need to include lastz in your path" mask = opts.mask cpus = opts.cpus logging.debug("Dispatch job to %d cpus" % cpus) format = opts.format blastline = (format == "BLASTN-") # The axt, maf, etc. format can only be run on splitted database (i.e. one # FASTA record per file). The splitted files are then parallelized for the # computation, as opposed to splitting queries through "subsample". outdir = "outdir" if not blastline: from jcvi.formats.fasta import Fasta from jcvi.formats.chain import faToTwoBit mkdir(outdir) bfasta_2bit = faToTwoBit(bfasta_fn) bids = list(Fasta(bfasta_fn, lazy=True).iterkeys_ordered()) apf = op.basename(afasta_fn).split(".")[0] args = [] # bfasta_fn, afasta_fn, outfile, lastz_bin, extra, mask, format for id in bids: bfasta = "/".join((bfasta_2bit, id)) outfile = op.join(outdir, "{0}.{1}.{2}".format(apf, id, format)) args.append((bfasta, afasta_fn, outfile, \ lastz_bin, extra, mask, format)) p = Pool(cpus) p.map(lastz_2bit, args) return lock = Lock() args = [(k + 1, cpus, bfasta_fn, afasta_fn, out_fh, lock, lastz_bin, extra, mask) for k in xrange(cpus)] g = Jobs(target=lastz, args=args) g.run()
def main()
%prog database.fa query.fa [options] Run LASTZ similar to the BLAST interface, and generates -m8 tabular format
4.698961
4.608009
1.019738
p = OptionParser(augustus.__doc__) p.add_option("--species", default="maize", help="Use species model for prediction") p.add_option("--hintsfile", help="Hint-guided AUGUSTUS") p.add_option("--nogff3", default=False, action="store_true", help="Turn --gff3=off") p.set_home("augustus") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args cpus = opts.cpus mhome = opts.augustus_home gff3 = not opts.nogff3 suffix = ".gff3" if gff3 else ".out" cfgfile = op.join(mhome, "config/extrinsic/extrinsic.M.RM.E.W.cfg") outdir = mkdtemp(dir=".") fs = split([fastafile, outdir, str(cpus)]) augustuswrap_params = partial(augustuswrap, species=opts.species, gff3=gff3, cfgfile=cfgfile, hintsfile=opts.hintsfile) g = Jobs(augustuswrap_params, fs.names) g.run() gff3files = [x.rsplit(".", 1)[0] + suffix for x in fs.names] outfile = fastafile.rsplit(".", 1)[0] + suffix FileMerger(gff3files, outfile=outfile).merge() shutil.rmtree(outdir) if gff3: from jcvi.annotation.reformat import augustus as reformat_augustus reformat_outfile = outfile.replace(".gff3", ".reformat.gff3") reformat_augustus([outfile, "--outfile={0}".format(reformat_outfile)])
def augustus(args)
%prog augustus fastafile Run parallel AUGUSTUS. Final results can be reformatted using annotation.reformat.augustus().
3.82969
3.634778
1.053624
p = OptionParser(star.__doc__) p.add_option("--single", default=False, action="store_true", help="Single end mapping") p.set_fastq_names() p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, reference = args cpus = opts.cpus mm = MakeManager() num = 1 if opts.single else 2 folder, reference = args gd = "GenomeDir" mkdir(gd) STAR = "STAR --runThreadN {0} --genomeDir {1}".format(cpus, gd) # Step 0: build genome index genomeidx = op.join(gd, "Genome") if need_update(reference, genomeidx): cmd = STAR + " --runMode genomeGenerate" cmd += " --genomeFastaFiles {0}".format(reference) mm.add(reference, genomeidx, cmd) # Step 1: align for p, prefix in iter_project(folder, opts.names, num): pf = "{0}_star".format(prefix) bamfile = pf + "Aligned.sortedByCoord.out.bam" cmd = STAR + " --readFilesIn {0}".format(" ".join(p)) if p[0].endswith(".gz"): cmd += " --readFilesCommand zcat" cmd += " --outSAMtype BAM SortedByCoordinate" cmd += " --outFileNamePrefix {0}".format(pf) cmd += " --twopassMode Basic" # Compatibility for cufflinks cmd += " --outSAMstrandField intronMotif" cmd += " --outFilterIntronMotifs RemoveNoncanonical" mm.add(p, bamfile, cmd) mm.write()
def star(args)
%prog star folder reference Run star on a folder with reads.
3.29791
3.177873
1.037773
p = OptionParser(cufflinks.__doc__) p.add_option("--gtf", help="Reference annotation [default: %default]") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, reference = args cpus = opts.cpus gtf = opts.gtf transcripts = "transcripts.gtf" mm = MakeManager() gtfs = [] for bam in iglob(folder, "*.bam"): pf = op.basename(bam).split(".")[0] outdir = pf + "_cufflinks" cmd = "cufflinks" cmd += " -o {0}".format(outdir) cmd += " -p {0}".format(cpus) if gtf: cmd += " -g {0}".format(gtf) cmd += " --frag-bias-correct {0}".format(reference) cmd += " --multi-read-correct" cmd += " {0}".format(bam) cgtf = op.join(outdir, transcripts) mm.add(bam, cgtf, cmd) gtfs.append(cgtf) assemblylist = "assembly_list.txt" cmd = 'find . -name "{0}" > {1}'.format(transcripts, assemblylist) mm.add(gtfs, assemblylist, cmd) mergedgtf = "merged/merged.gtf" cmd = "cuffmerge" cmd += " -o merged" cmd += " -p {0}".format(cpus) if gtf: cmd += " -g {0}".format(gtf) cmd += " -s {0}".format(reference) cmd += " {0}".format(assemblylist) mm.add(assemblylist, mergedgtf, cmd) mm.write()
def cufflinks(args)
%prog cufflinks folder reference Run cufflinks on a folder containing tophat results.
2.506116
2.454472
1.021041
from jcvi.apps.bowtie import check_index from jcvi.formats.fastq import guessoffset p = OptionParser(tophat.__doc__) p.add_option("--gtf", help="Reference annotation [default: %default]") p.add_option("--single", default=False, action="store_true", help="Single end mapping") p.add_option("--intron", default=15000, type="int", help="Max intron size [default: %default]") p.add_option("--dist", default=-50, type="int", help="Mate inner distance [default: %default]") p.add_option("--stdev", default=50, type="int", help="Mate standard deviation [default: %default]") p.set_phred() p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) num = 1 if opts.single else 2 folder, reference = args reference = check_index(reference) for p, prefix in iter_project(folder, n=num): outdir = "{0}_tophat".format(prefix) outfile = op.join(outdir, "accepted_hits.bam") if op.exists(outfile): logging.debug("File `{0}` found. Skipping.".format(outfile)) continue cmd = "tophat -p {0}".format(opts.cpus) if opts.gtf: cmd += " -G {0}".format(opts.gtf) cmd += " -o {0}".format(outdir) if num == 1: # Single-end a, = p else: # Paired-end a, b = p cmd += " --max-intron-length {0}".format(opts.intron) cmd += " --mate-inner-dist {0}".format(opts.dist) cmd += " --mate-std-dev {0}".format(opts.stdev) phred = opts.phred or str(guessoffset([a])) if phred == "64": cmd += " --phred64-quals" cmd += " {0} {1}".format(reference, " ".join(p)) sh(cmd)
def tophat(args)
%prog tophat folder reference Run tophat on a folder of reads.
2.595776
2.542535
1.02094
from scipy.stats import hmean return int(round(hmean(np.clip(a, a_min, a_max))))
def hmean_int(a, a_min=5778, a_max=1149851)
Harmonic mean of an array, returns the closest int
3.125829
2.778952
1.124823
counts = np.zeros(BB, dtype=int) for x in a: c = int(round(math.log(x, phi))) if c < lb: c = lb if c > ub: c = ub counts[c - lb] += 1 return counts
def golden_array(a, phi=1.61803398875, lb=LB, ub=UB)
Given list of ints, we aggregate similar values so that it becomes an array of multiples of phi, where phi is the golden ratio. phi ^ 14 = 843 phi ^ 33 = 7881196 So the array of counts go between 843 to 788196. One triva is that the exponents of phi gets closer to integers as N grows. See interesting discussion here: <https://www.johndcook.com/blog/2017/03/22/golden-powers-are-nearly-integers/>
3.142025
3.56808
0.880593
from .chic import score_evaluate_M t, stour, tour_score, active_sizes, M = arg stour_score, = score_evaluate_M(stour, active_sizes, M) delta_score = tour_score - stour_score log10d = np.log10(delta_score) if delta_score > 1e-9 else -9 return t, log10d
def prune_tour_worker(arg)
Worker thread for CLMFile.prune_tour()
8.218111
8.308777
0.989088
p = OptionParser(heatmap.__doc__) p.add_option("--resolution", default=500000, type="int", help="Resolution when counting the links") p.add_option("--chr", help="Plot this contig/chr only") p.add_option("--nobreaks", default=False, action="store_true", help="Do not plot breaks (esp. if contigs are small)") opts, args, iopts = p.set_image_options(args, figsize="10x10", style="white", cmap="coolwarm", format="png", dpi=120) if len(args) != 2: sys.exit(not p.print_help()) npyfile, jsonfile = args contig = opts.chr # Load contig/chromosome starts and sizes header = json.loads(open(jsonfile).read()) resolution = header.get("resolution", opts.resolution) logging.debug("Resolution set to {}".format(resolution)) # Load the matrix A = np.load(npyfile) # Select specific submatrix if contig: contig_start = header["starts"][contig] contig_size = header["sizes"][contig] contig_end = contig_start + contig_size A = A[contig_start: contig_end, contig_start: contig_end] # Several concerns in practice: # The diagonal counts may be too strong, this can either be resolved by # masking them. Or perform a log transform on the entire heatmap. B = A.astype("float64") B += 1.0 B = np.log(B) vmin, vmax = 1, 7 B[B < vmin] = vmin B[B > vmax] = vmax print(B) logging.debug("Matrix log-transformation and thresholding ({}-{}) done" .format(vmin, vmax)) # Canvas fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) # whole canvas ax = fig.add_axes([.05, .05, .9, .9]) # just the heatmap breaks = header["starts"].values() breaks += [header["total_bins"]] # This is actually discarded breaks = sorted(breaks)[1:] if contig or opts.nobreaks: breaks = [] plot_heatmap(ax, B, breaks, iopts, binsize=resolution) # Title pf = npyfile.rsplit(".", 1)[0] title = pf if contig: title += "-{}".format(contig) root.text(.5, .98, title, color="darkslategray", size=18, ha="center", va="center") normalize_axes(root) image_name = title + "." + iopts.format # macOS sometimes has way too verbose output logging.getLogger().setLevel(logging.CRITICAL) savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def heatmap(args)
%prog heatmap input.npy genome.json Plot heatmap based on .npy data file. The .npy stores a square matrix with bins of genome, and cells inside the matrix represent number of links between bin i and bin j. The `genome.json` contains the offsets of each contig/chr so that we know where to draw boundary lines, or extract per contig/chromosome heatmap.
3.945331
3.710479
1.063294
p = OptionParser(mergemat.__doc__) p.set_outfile(outfile="out") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) npyfiles = args A = np.load(npyfiles[0]) logging.debug("Load `{}`: matrix of shape {}:; sum={}" .format(npyfiles[0], A.shape, A.sum())) for npyfile in npyfiles[1:]: B = np.load(npyfile) A += B logging.debug("Load `{}`: sum={}" .format(npyfiles[0], A.sum())) pf = opts.outfile np.save(pf, A) logging.debug("Combined {} files into `{}.npy`".format(len(npyfiles), pf))
def mergemat(args)
%prog mergemat *.npy Combine counts from multiple .npy data files.
3.029465
2.747011
1.102822
import pysam bamfile = pysam.AlignmentFile(bamfile, "rb") seqsize = {} for kv in bamfile.header["SQ"]: if kv["LN"] < 10 * N: continue seqsize[kv["SN"]] = kv["LN"] / N + 1 allseqs = natsorted(seqsize.keys()) allseqsizes = np.array([seqsize[x] for x in allseqs]) seqstarts = np.cumsum(allseqsizes) seqstarts = np.roll(seqstarts, 1) total_bins = seqstarts[0] seqstarts[0] = 0 seqstarts = dict(zip(allseqs, seqstarts)) return seqstarts, seqsize, total_bins
def get_seqstarts(bamfile, N)
Go through the SQ headers and pull out all sequences with size greater than the resolution settings, i.e. contains at least a few cells
2.790582
2.814774
0.991405
b = np.ones(bins, dtype="float64") b[0] = 100 for i in range(1, bins): b[i] = b[i - 1] * ratio bins = np.around(b).astype(dtype="int") binsizes = np.diff(bins) return bins, binsizes
def get_distbins(start=100, bins=2500, ratio=1.01)
Get exponentially sized
3.037666
3.003142
1.011496
p = OptionParser(simulate.__doc__) p.add_option("--genomesize", default=10000000, type="int", help="Genome size") p.add_option("--genes", default=1000, type="int", help="Number of genes") p.add_option("--contigs", default=100, type="int", help="Number of contigs") p.add_option("--coverage", default=10, type="int", help="Link coverage") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) pf, = args GenomeSize = opts.genomesize Genes = opts.genes Contigs = opts.contigs Coverage = opts.coverage PE = 500 Links = int(GenomeSize * Coverage / PE) # Simulate the contig sizes that sum to GenomeSize # See also: # <https://en.wikipedia.org/wiki/User:Skinnerd/Simplex_Point_Picking> ContigSizes, = np.random.dirichlet([1] * Contigs, 1) * GenomeSize ContigSizes = np.array(np.round_(ContigSizes, decimals=0), dtype=int) ContigStarts = np.zeros(Contigs, dtype=int) ContigStarts[1:] = np.cumsum(ContigSizes)[:-1] # Write IDS file idsfile = pf + ".ids" fw = open(idsfile, "w") print("#Contig\tRECounts\tLength", file=fw) for i, s in enumerate(ContigSizes): print("tig{:04d}\t{}\t{}".format(i, s / (4 ** 4), s), file=fw) fw.close() # Simulate the gene positions GenePositions = np.sort(np.random.random_integers(0, GenomeSize - 1, size=Genes)) write_last_and_beds(pf, GenePositions, ContigStarts) # Simulate links, uniform start, with link distances following 1/x, where x # is the distance between the links. As an approximation, we have links # between [1e3, 1e7], so we map from uniform [1e-7, 1e-3] LinkStarts = np.sort(np.random.random_integers(0, GenomeSize - 1, size=Links)) a, b = 1e-7, 1e-3 LinkSizes = np.array(np.round_(1 / ((b - a) * np.random.rand(Links) + a), decimals=0), dtype="int") LinkEnds = LinkStarts + LinkSizes # Find link to contig membership LinkStartContigs = np.searchsorted(ContigStarts, LinkStarts) - 1 LinkEndContigs = np.searchsorted(ContigStarts, LinkEnds) - 1 # Extract inter-contig links InterContigLinks = (LinkStartContigs != LinkEndContigs) & \ (LinkEndContigs != Contigs) ICLinkStartContigs = LinkStartContigs[InterContigLinks] ICLinkEndContigs = LinkEndContigs[InterContigLinks] ICLinkStarts = LinkStarts[InterContigLinks] ICLinkEnds = LinkEnds[InterContigLinks] # Write CLM file write_clm(pf, ICLinkStartContigs, ICLinkEndContigs, ICLinkStarts, ICLinkEnds, ContigStarts, ContigSizes)
def simulate(args)
%prog simulate test Simulate CLM and IDS files with given names. The simulator assumes several distributions: - Links are distributed uniformly across genome - Log10(link_size) are distributed normally - Genes are distributed uniformly
3.020748
3.017743
1.000996
qbedfile = pf + "tigs.bed" sbedfile = pf + "chr.bed" lastfile = "{}tigs.{}chr.last".format(pf, pf) qbedfw = open(qbedfile, "w") sbedfw = open(sbedfile, "w") lastfw = open(lastfile, "w") GeneContigs = np.searchsorted(ContigStarts, GenePositions) - 1 for i, (c, gstart) in enumerate(zip(GeneContigs, GenePositions)): gene = "gene{:05d}".format(i) tig = "tig{:04d}".format(c) start = ContigStarts[c] cstart = gstart - start print("\t".join(str(x) for x in (tig, cstart, cstart + 1, gene)), file=qbedfw) print("\t".join(str(x) for x in ("chr1", gstart, gstart + 1, gene)), file=sbedfw) lastatoms = [gene, gene, 100] + [0] * 8 + [100] print("\t".join(str(x) for x in lastatoms), file=lastfw) qbedfw.close() sbedfw.close() lastfw.close()
def write_last_and_beds(pf, GenePositions, ContigStarts)
Write LAST file, query and subject BED files.
2.779079
2.666475
1.04223
clm = defaultdict(list) for start, end, linkstart, linkend in \ zip(ICLinkStartContigs, ICLinkEndContigs, ICLinkStarts, ICLinkEnds): start_a = ContigStarts[start] start_b = start_a + ContigSizes[start] end_a = ContigStarts[end] end_b = end_a + ContigSizes[end] if linkend >= end_b: continue clm[(start, end)].append((linkstart - start_a, start_b - linkstart, linkend - end_a, end_b - linkend)) clmfile = pf + ".clm" fw = open(clmfile, "w") def format_array(a): return [str(x) for x in sorted(a) if x > 0] for (start, end), links in sorted(clm.items()): start = "tig{:04d}".format(start) end = "tig{:04d}".format(end) nlinks = len(links) if not nlinks: continue ff = format_array([(b + c) for a, b, c, d in links]) fr = format_array([(b + d) for a, b, c, d in links]) rf = format_array([(a + c) for a, b, c, d in links]) rr = format_array([(a + d) for a, b, c, d in links]) print("{}+ {}+\t{}\t{}".format(start, end, nlinks, " ".join(ff)), file=fw) print("{}+ {}-\t{}\t{}".format(start, end, nlinks, " ".join(fr)), file=fw) print("{}- {}+\t{}\t{}".format(start, end, nlinks, " ".join(rf)), file=fw) print("{}- {}-\t{}\t{}".format(start, end, nlinks, " ".join(rr)), file=fw) fw.close()
def write_clm(pf, ICLinkStartContigs, ICLinkEndContigs, ICLinkStarts, ICLinkEnds, ContigStarts, ContigSizes)
Write CLM file from simulated data.
1.920403
1.944493
0.987611
p = OptionParser(density.__doc__) p.add_option("--save", default=False, action="store_true", help="Write log densitites of contigs to file") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) clmfile, = args clm = CLMFile(clmfile) pf = clmfile.rsplit(".", 1)[0] if opts.save: logdensities = clm.calculate_densities() densityfile = pf + ".density" fw = open(densityfile, "w") for name, logd in logdensities.items(): s = clm.tig_to_size[name] print("\t".join(str(x) for x in (name, s, logd)), file=fw) fw.close() logging.debug("Density written to `{}`".format(densityfile)) tourfile = clmfile.rsplit(".", 1)[0] + ".tour" tour = clm.activate(tourfile=tourfile, backuptour=False) clm.flip_all(tour) clm.flip_whole(tour) clm.flip_one(tour)
def density(args)
%prog density test.clm Estimate link density of contigs.
3.549939
3.359465
1.056698
p = OptionParser(optimize.__doc__) p.add_option("--skiprecover", default=False, action="store_true", help="Do not import 'recover' contigs") p.add_option("--startover", default=False, action="store_true", help="Do not resume from existing tour file") p.add_option("--skipGA", default=False, action="store_true", help="Skip GA step") p.set_outfile(outfile=None) p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) clmfile, = args startover = opts.startover runGA = not opts.skipGA cpus = opts.cpus # Load contact map clm = CLMFile(clmfile, skiprecover=opts.skiprecover) tourfile = opts.outfile or clmfile.rsplit(".", 1)[0] + ".tour" if startover: tourfile = None tour = clm.activate(tourfile=tourfile) fwtour = open(tourfile, "w") # Store INIT tour print_tour(fwtour, clm.tour, "INIT", clm.active_contigs, clm.oo, signs=clm.signs) if runGA: for phase in range(1, 3): tour = optimize_ordering(fwtour, clm, phase, cpus) tour = clm.prune_tour(tour, cpus) # Flip orientations phase = 1 while True: tag1, tag2 = optimize_orientations(fwtour, clm, phase, cpus) if tag1 == REJECT and tag2 == REJECT: logging.debug("Terminating ... no more {}".format(ACCEPT)) break phase += 1 fwtour.close()
def optimize(args)
%prog optimize test.clm Optimize the contig order and orientation, based on CLM file.
4.116782
3.832673
1.074128
from .chic import score_evaluate_M # Prepare input files tour_contigs = clm.active_contigs tour_sizes = clm.active_sizes tour_M = clm.M tour = clm.tour signs = clm.signs oo = clm.oo def callback(tour, gen, phase, oo): fitness = tour.fitness if hasattr(tour, "fitness") else None label = "GA{}-{}".format(phase, gen) if fitness: fitness = "{0}".format(fitness).split(",")[0].replace("(", "") label += "-" + fitness if gen % 20 == 0: print_tour(fwtour, tour, label, tour_contigs, oo, signs=signs) return tour callbacki = partial(callback, phase=phase, oo=oo) toolbox = GA_setup(tour) toolbox.register("evaluate", score_evaluate_M, tour_sizes=tour_sizes, tour_M=tour_M) tour, tour_fitness = GA_run(toolbox, ngen=1000, npop=100, cpus=cpus, callback=callbacki) clm.tour = tour return tour
def optimize_ordering(fwtour, clm, phase, cpus)
Optimize the ordering of contigs by Genetic Algorithm (GA).
5.00914
4.835193
1.035975
# Prepare input files tour_contigs = clm.active_contigs tour = clm.tour oo = clm.oo print_tour(fwtour, tour, "FLIPALL{}".format(phase), tour_contigs, oo, signs=clm.signs) tag1 = clm.flip_whole(tour) print_tour(fwtour, tour, "FLIPWHOLE{}".format(phase), tour_contigs, oo, signs=clm.signs) tag2 = clm.flip_one(tour) print_tour(fwtour, tour, "FLIPONE{}".format(phase), tour_contigs, oo, signs=clm.signs) return tag1, tag2
def optimize_orientations(fwtour, clm, phase, cpus)
Optimize the orientations of contigs by using heuristic flipping.
3.747446
3.658745
1.024243
qbedfile, sbedfile = get_bed_filenames(lastfile, p, opts) qbedfile = op.abspath(qbedfile) sbedfile = op.abspath(sbedfile) qbed = Bed(qbedfile, sorted=False) contig_to_beds = dict(qbed.sub_beds()) # Create a separate directory for the subplots and movie mkdir(odir, overwrite=True) os.chdir(odir) logging.debug("Change into subdir `{}`".format(odir)) # Make anchorsfile anchorsfile = ".".join(op.basename(lastfile).split(".", 2)[:2]) \ + ".anchors" fw = open(anchorsfile, "w") for b in Blast(lastfile): print("\t".join((gene_name(b.query), gene_name(b.subject), str(int(b.score)))), file=fw) fw.close() # Symlink sbed symlink(sbedfile, op.basename(sbedfile)) return anchorsfile, qbedfile, contig_to_beds
def prepare_synteny(tourfile, lastfile, odir, p, opts)
Prepare synteny plots for movie().
4.52211
4.314257
1.048178
tour = [] tour_o = [] for contig in row.split(): if contig[-1] in ('+', '-', '?'): tour.append(contig[:-1]) tour_o.append(contig[-1]) else: # Unoriented tour.append(contig) tour_o.append('?') return tour, tour_o
def separate_tour_and_o(row)
The tour line typically contains contig list like: tig00044568+ tig00045748- tig00071055- tig00015093- tig00030900- This function separates the names from the orientations.
3.267538
2.90459
1.124957
row = open(tourfile).readlines()[-1] _tour, _tour_o = separate_tour_and_o(row) tour = [] tour_o = [] for tc, to in zip(_tour, _tour_o): if tc not in clm.contigs: logging.debug("Contig `{}` in file `{}` not found in `{}`" .format(tc, tourfile, clm.idsfile)) continue tour.append(tc) tour_o.append(to) return tour, tour_o
def iter_last_tour(tourfile, clm)
Extract last tour from tourfile. The clm instance is also passed in to see if any contig is covered in the clm.
3.557264
3.32237
1.070701
fp = open(tourfile) i = 0 for row in fp: if row[0] == '>': label = row[1:].strip() if label.startswith("GA"): pf, j, score = label.split("-", 2) j = int(j) else: j = 0 i += 1 else: if j % frames != 0: continue tour, tour_o = separate_tour_and_o(row) yield i, label, tour, tour_o fp.close()
def iter_tours(tourfile, frames=1)
Extract tours from tourfile. Tourfile contains a set of contig configurations, generated at each iteration of the genetic algorithm. Each configuration has two rows, first row contains iteration id and score, second row contains list of contigs, separated by comma.
4.213403
4.18301
1.007266
p = OptionParser(movie.__doc__) p.add_option("--frames", default=500, type="int", help="Only plot every N frames") p.add_option("--engine", default="ffmpeg", choices=("ffmpeg", "gifsicle"), help="Movie engine, output MP4 or GIF") p.set_beds() opts, args, iopts = p.set_image_options(args, figsize="16x8", style="white", cmap="coolwarm", format="png", dpi=300) if len(args) != 3: sys.exit(not p.print_help()) tourfile, clmfile, lastfile = args tourfile = op.abspath(tourfile) clmfile = op.abspath(clmfile) lastfile = op.abspath(lastfile) cwd = os.getcwd() odir = op.basename(tourfile).rsplit(".", 1)[0] + "-movie" anchorsfile, qbedfile, contig_to_beds = \ prepare_synteny(tourfile, lastfile, odir, p, opts) args = [] for i, label, tour, tour_o in iter_tours(tourfile, frames=opts.frames): padi = "{:06d}".format(i) # Make sure the anchorsfile and bedfile has the serial number in, # otherwise parallelization may fail a, b = op.basename(anchorsfile).split(".", 1) ianchorsfile = a + "_" + padi + "." + b symlink(anchorsfile, ianchorsfile) # Make BED file with new order qb = Bed() for contig, o in zip(tour, tour_o): if contig not in contig_to_beds: continue bedlines = contig_to_beds[contig][:] if o == '-': bedlines.reverse() for x in bedlines: qb.append(x) a, b = op.basename(qbedfile).split(".", 1) ibedfile = a + "_" + padi + "." + b qb.print_to_file(ibedfile) # Plot dot plot, but do not sort contigs by name (otherwise losing # order) image_name = padi + "." + iopts.format tour = ",".join(tour) args.append([[tour, clmfile, ianchorsfile, "--outfile", image_name, "--label", label]]) Jobs(movieframe, args).run() os.chdir(cwd) make_movie(odir, odir, engine=opts.engine, format=iopts.format)
def movie(args)
%prog movie test.tour test.clm ref.contigs.last Plot optimization history.
4.673956
4.335537
1.078057
p = OptionParser(score.__doc__) p.set_cpus() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) mdir, cdir, contigsfasta = args orderingfiles = natsorted(iglob(mdir, "*.ordering")) sizes = Sizes(contigsfasta) contig_names = list(sizes.iter_names()) contig_ids = dict((name, i) for (i, name) in enumerate(contig_names)) oo = [] # Load contact matrix glm = op.join(cdir, "all.GLM") N = len(contig_ids) M = np.zeros((N, N), dtype=int) fp = open(glm) for row in fp: if row[0] == '#': continue x, y, z = row.split() if x == 'X': continue M[int(x), int(y)] = int(z) fwtour = open("tour", "w") def callback(tour, gen, oo): fitness = tour.fitness if hasattr(tour, "fitness") else None label = "GA-{0}".format(gen) if fitness: fitness = "{0}".format(fitness).split(",")[0].replace("(", "") label += "-" + fitness print_tour(fwtour, tour, label, contig_names, oo) return tour for ofile in orderingfiles: co = ContigOrdering(ofile) for x in co: contig_id = contig_ids[x.contig_name] oo.append(contig_id) pf = op.basename(ofile).split(".")[0] print(pf) print(oo) tour, tour_sizes, tour_M = prepare_ec(oo, sizes, M) # Store INIT tour print_tour(fwtour, tour, "INIT", contig_names, oo) # Faster Cython version for evaluation from .chic import score_evaluate_M callbacki = partial(callback, oo=oo) toolbox = GA_setup(tour) toolbox.register("evaluate", score_evaluate_M, tour_sizes=tour_sizes, tour_M=tour_M) tour, tour.fitness = GA_run(toolbox, npop=100, cpus=opts.cpus, callback=callbacki) print(tour, tour.fitness) break fwtour.close()
def score(args)
%prog score main_results/ cached_data/ contigsfasta Score the current LACHESIS CLM.
4.536965
4.356666
1.041385
tour = range(len(oo)) tour_sizes = np.array([sizes.sizes[x] for x in oo]) tour_M = M[oo, :][:, oo] return tour, tour_sizes, tour_M
def prepare_ec(oo, sizes, M)
This prepares EC and converts from contig_id to an index.
3.866329
3.507558
1.102285
sizes_oo = np.array([tour_sizes[x] for x in tour]) sizes_cum = np.cumsum(sizes_oo) - sizes_oo / 2 s = 0 size = len(tour) for ia in xrange(size): a = tour[ia] for ib in xrange(ia + 1, size): b = tour[ib] links = tour_M[a, b] dist = sizes_cum[ib] - sizes_cum[ia] if dist > 1e7: break s += links * 1. / dist return s,
def score_evaluate(tour, tour_sizes=None, tour_M=None)
SLOW python version of the evaluation function. For benchmarking purposes only. Do not use in production.
3.3235
3.348678
0.992481
p = OptionParser(movieframe.__doc__) p.add_option("--label", help="Figure title") p.set_beds() p.set_outfile(outfile=None) opts, args, iopts = p.set_image_options(args, figsize="16x8", style="white", cmap="coolwarm", format="png", dpi=120) if len(args) != 3: sys.exit(not p.print_help()) tour, clmfile, anchorsfile = args tour = tour.split(",") image_name = opts.outfile or ("movieframe." + iopts.format) label = opts.label or op.basename(image_name).rsplit(".", 1)[0] clm = CLMFile(clmfile) totalbins, bins, breaks = make_bins(tour, clm.tig_to_size) M = read_clm(clm, totalbins, bins) fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) # whole canvas ax1 = fig.add_axes([.05, .1, .4, .8]) # heatmap ax2 = fig.add_axes([.55, .1, .4, .8]) # dot plot ax2_root = fig.add_axes([.5, 0, .5, 1]) # dot plot canvas # Left axis: heatmap plot_heatmap(ax1, M, breaks, iopts) # Right axis: synteny qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts, sorted=False) dotplot(anchorsfile, qbed, sbed, fig, ax2_root, ax2, sep=False, title="") root.text(.5, .98, clm.name, color="g", ha="center", va="center") root.text(.5, .95, label, color="darkslategray", ha="center", va="center") normalize_axes(root) savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def movieframe(args)
%prog movieframe tour test.clm contigs.ref.anchors Draw heatmap and synteny in the same plot.
4.014009
3.490465
1.149993
p = OptionParser(agp.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) odir, contigsfasta = args fwagp = must_open(opts.outfile, 'w') orderingfiles = natsorted(iglob(odir, "*.ordering")) sizes = Sizes(contigsfasta).mapping contigs = set(sizes.keys()) anchored = set() for ofile in orderingfiles: co = ContigOrdering(ofile) anchored |= set([x.contig_name for x in co]) obj = op.basename(ofile).split('.')[0] co.write_agp(obj, sizes, fwagp) singletons = contigs - anchored logging.debug('Anchored: {}, Singletons: {}'. format(len(anchored), len(singletons))) for s in natsorted(singletons): order_to_agp(s, [(s, "?")], sizes, fwagp)
def agp(args)
%prog agp main_results/ contigs.fasta Generate AGP file based on LACHESIS output.
4.272899
3.965281
1.077578
'''Converts the ContigOrdering file into AGP format ''' contigorder = [(x.contig_name, x.strand) for x in self] order_to_agp(obj, contigorder, sizes, fw, gapsize=gapsize, gaptype=gaptype, evidence=evidence)
def write_agp(self, obj, sizes, fw=sys.stdout, gapsize=100, gaptype="contig", evidence="map")
Converts the ContigOrdering file into AGP format
6.359537
4.213562
1.509302
'''IDS file has a list of contigs that need to be ordered. 'recover', keyword, if available in the third column, is less confident. tig00015093 46912 tig00035238 46779 recover tig00030900 119291 ''' idsfile = self.idsfile logging.debug("Parse idsfile `{}`".format(idsfile)) fp = open(idsfile) tigs = [] for row in fp: if row[0] == '#': # Header continue atoms = row.split() tig, size = atoms[:2] size = int(size) if skiprecover and len(atoms) == 3 and atoms[2] == 'recover': continue tigs.append((tig, size)) # Arrange contig names and sizes _tigs, _sizes = zip(*tigs) self.contigs = set(_tigs) self.sizes = np.array(_sizes) self.tig_to_size = dict(tigs) # Initially all contigs are considered active self.active = set(_tigs)
def parse_ids(self, skiprecover)
IDS file has a list of contigs that need to be ordered. 'recover', keyword, if available in the third column, is less confident. tig00015093 46912 tig00035238 46779 recover tig00030900 119291
5.742777
2.573421
2.231573
active = self.active densities = defaultdict(int) for (at, bt), links in self.contacts.items(): if not (at in active and bt in active): continue densities[at] += links densities[bt] += links logdensities = {} for x, d in densities.items(): s = self.tig_to_size[x] logd = np.log10(d * 1. / min(s, 500000)) logdensities[x] = logd return logdensities
def calculate_densities(self)
Calculate the density of inter-contig links per base. Strong contigs considered to have high level of inter-contig links in the current partition.
4.135475
3.668313
1.127351
if tourfile and (not op.exists(tourfile)): logging.debug("Tourfile `{}` not found".format(tourfile)) tourfile = None if tourfile: logging.debug("Importing tourfile `{}`".format(tourfile)) tour, tour_o = iter_last_tour(tourfile, self) self.active = set(tour) tig_to_idx = self.tig_to_idx tour = [tig_to_idx[x] for x in tour] signs = sorted([(x, FF[o]) for (x, o) in zip(tour, tour_o)]) _, signs = zip(*signs) self.signs = np.array(signs, dtype=int) if backuptour: backup(tourfile) tour = array.array('i', tour) else: self.report_active() while True: logdensities = self.calculate_densities() lb, ub = outlier_cutoff(logdensities.values()) logging.debug("Log10(link_densities) ~ [{}, {}]" .format(lb, ub)) remove = set(x for x, d in logdensities.items() if (d < lb and self.tig_to_size[x] < minsize * 10)) if remove: self.active -= remove self.report_active() else: break logging.debug("Remove contigs with size < {}".format(minsize)) self.active = set(x for x in self.active if self.tig_to_size[x] >= minsize) tour = range(self.N) # Use starting (random) order otherwise tour = array.array('i', tour) # Determine orientations self.flip_all(tour) self.report_active() self.tour = tour return tour
def activate(self, tourfile=None, minsize=10000, backuptour=True)
Select contigs in the current partition. This is the setup phase of the algorithm, and supports two modes: - "de novo": This is useful at the start of a new run where no tours available. We select the strong contigs that have significant number of links to other contigs in the partition. We build a histogram of link density (# links per bp) and remove the contigs that appear as outliers. The orientations are derived from the matrix decomposition of the pairwise strandedness matrix O. - "hotstart": This is useful when there was a past run, with a given tourfile. In this case, the active contig list and orientations are derived from the last tour in the file.
4.47128
4.32212
1.034511
from .chic import score_evaluate_M return score_evaluate_M(tour, self.active_sizes, self.M)
def evaluate_tour_M(self, tour)
Use Cythonized version to evaluate the score of a current tour
17.293421
14.16977
1.220445
from .chic import score_evaluate_P return score_evaluate_P(tour, self.active_sizes, self.P)
def evaluate_tour_P(self, tour)
Use Cythonized version to evaluate the score of a current tour, with better precision on the distance of the contigs.
18.051754
12.620217
1.430384
from .chic import score_evaluate_Q return score_evaluate_Q(tour, self.active_sizes, self.Q)
def evaluate_tour_Q(self, tour)
Use Cythonized version to evaluate the score of a current tour, taking orientation into consideration. This may be the most accurate evaluation under the right condition.
18.32765
13.546539
1.35294
if self.signs is None: # First run score = 0 else: old_signs = self.signs[:self.N] score, = self.evaluate_tour_Q(tour) # Remember we cannot have ambiguous orientation code (0 or '?') here self.signs = get_signs(self.O, validate=False, ambiguous=False) score_flipped, = self.evaluate_tour_Q(tour) if score_flipped >= score: tag = ACCEPT else: self.signs = old_signs[:] tag = REJECT self.flip_log("FLIPALL", score, score_flipped, tag) return tag
def flip_all(self, tour)
Initialize the orientations based on pairwise O matrix.
7.615099
7.002648
1.08746
score, = self.evaluate_tour_Q(tour) self.signs = -self.signs score_flipped, = self.evaluate_tour_Q(tour) if score_flipped > score: tag = ACCEPT else: self.signs = -self.signs tag = REJECT self.flip_log("FLIPWHOLE", score, score_flipped, tag) return tag
def flip_whole(self, tour)
Test flipping all contigs at the same time to see if score improves.
4.928718
4.935844
0.998556
n_accepts = n_rejects = 0 any_tag_ACCEPT = False for i, t in enumerate(tour): if i == 0: score, = self.evaluate_tour_Q(tour) self.signs[t] = -self.signs[t] score_flipped, = self.evaluate_tour_Q(tour) if score_flipped > score: n_accepts += 1 tag = ACCEPT else: self.signs[t] = -self.signs[t] n_rejects += 1 tag = REJECT self.flip_log("FLIPONE ({}/{})".format(i + 1, len(self.signs)), score, score_flipped, tag) if tag == ACCEPT: any_tag_ACCEPT = True score = score_flipped logging.debug("FLIPONE: N_accepts={} N_rejects={}" .format(n_accepts, n_rejects)) return ACCEPT if any_tag_ACCEPT else REJECT
def flip_one(self, tour)
Test flipping every single contig sequentially to see if score improves.
3.282876
3.237176
1.014117
while True: tour_score, = self.evaluate_tour_M(tour) logging.debug("Starting score: {}".format(tour_score)) active_sizes = self.active_sizes M = self.M args = [] for i, t in enumerate(tour): stour = tour[:i] + tour[i + 1:] args.append((t, stour, tour_score, active_sizes, M)) # Parallel run p = Pool(processes=cpus) results = list(p.imap(prune_tour_worker, args)) assert len(tour) == len(results), \ "Array size mismatch, tour({}) != results({})"\ .format(len(tour), len(results)) # Identify outliers active_contigs = self.active_contigs idx, log10deltas = zip(*results) lb, ub = outlier_cutoff(log10deltas) logging.debug("Log10(delta_score) ~ [{}, {}]".format(lb, ub)) remove = set(active_contigs[x] for (x, d) in results if d < lb) self.active -= remove self.report_active() tig_to_idx = self.tig_to_idx tour = [active_contigs[x] for x in tour] tour = array.array('i', [tig_to_idx[x] for x in tour if x not in remove]) if not remove: break self.tour = tour self.flip_all(tour) return tour
def prune_tour(self, tour, cpus)
Test deleting each contig and check the delta_score; tour here must be an array of ints.
4.75538
4.558024
1.043299
N = self.N tig_to_idx = self.tig_to_idx M = np.zeros((N, N), dtype=int) for (at, bt), links in self.contacts.items(): if not (at in tig_to_idx and bt in tig_to_idx): continue ai = tig_to_idx[at] bi = tig_to_idx[bt] M[ai, bi] = M[bi, ai] = links return M
def M(self)
Contact frequency matrix. Each cell contains how many inter-contig links between i-th and j-th contigs.
2.853749
2.476113
1.152511
N = self.N tig_to_idx = self.tig_to_idx O = np.zeros((N, N), dtype=int) for (at, bt), (strandedness, md, mh) in self.orientations.items(): if not (at in tig_to_idx and bt in tig_to_idx): continue ai = tig_to_idx[at] bi = tig_to_idx[bt] score = strandedness * md O[ai, bi] = O[bi, ai] = score return O
def O(self)
Pairwise strandedness matrix. Each cell contains whether i-th and j-th contig are the same orientation +1, or opposite orientation -1.
3.519616
3.082044
1.141975
N = self.N tig_to_idx = self.tig_to_idx P = np.zeros((N, N, 2), dtype=int) for (at, bt), (strandedness, md, mh) in self.orientations.items(): if not (at in tig_to_idx and bt in tig_to_idx): continue ai = tig_to_idx[at] bi = tig_to_idx[bt] P[ai, bi, 0] = P[bi, ai, 0] = md P[ai, bi, 1] = P[bi, ai, 1] = mh return P
def P(self)
Contact frequency matrix with better precision on distance between contigs. In the matrix M, the distance is assumed to be the distance between mid-points of two contigs. In matrix Q, however, we compute harmonic mean of the links for the orientation configuration that is shortest. This offers better precision for the distance between big contigs.
2.902641
2.689924
1.079079
N = self.N tig_to_idx = self.tig_to_idx signs = self.signs Q = np.ones((N, N, BB), dtype=int) * -1 # Use -1 as the sentinel for (at, bt), k in self.contacts_oriented.items(): if not (at in tig_to_idx and bt in tig_to_idx): continue ai = tig_to_idx[at] bi = tig_to_idx[bt] ao = signs[ai] bo = signs[bi] Q[ai, bi] = k[(ao, bo)] return Q
def Q(self)
Contact frequency matrix when contigs are already oriented. This is s a similar matrix as M, but rather than having the number of links in the cell, it points to an array that has the actual distances.
4.355255
3.980851
1.094051
p = OptionParser(insertionpairs.__doc__) p.add_option("--extend", default=10, type="int", help="Allow insertion sites to match up within distance") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args mergedbedfile = mergeBed(bedfile, d=opts.extend, nms=True) bed = Bed(mergedbedfile) fw = must_open(opts.outfile, "w") support = lambda x: -x.reads for b in bed: names = b.accn.split(",") ends = [EndPoint(x) for x in names] REs = sorted([x for x in ends if x.leftright == "RE"], key=support) LEs = sorted([x for x in ends if x.leftright == "LE"], key=support) if not (REs and LEs): continue mRE, mLE = REs[0], LEs[0] pRE, pLE = mRE.position, mLE.position if pLE < pRE: b.start, b.end = pLE - 1, pRE else: b.start, b.end = pRE - 1, pLE b.accn = "{0}|{1}".format(mRE.label, mLE.label) b.score = pLE - pRE - 1 print(b, file=fw)
def insertionpairs(args)
%prog insertionpairs endpoints.bed Pair up the candidate endpoints. A candidate exision point would contain both left-end (LE) and right-end (RE) within a given distance. -----------| |------------ -------| |-------- ---------| |---------- (RE) (LE)
3.120748
2.965388
1.052391
p = OptionParser(insertion.__doc__) p.add_option("--mindepth", default=6, type="int", help="Minimum depth to call an insertion") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args mindepth = opts.mindepth bed = Bed(bedfile) fw = must_open(opts.outfile, "w") for seqid, feats in bed.sub_beds(): left_ends = Counter([x.start for x in feats]) right_ends = Counter([x.end for x in feats]) selected = [] for le, count in left_ends.items(): if count >= mindepth: selected.append((seqid, le, "LE-{0}".format(le), count)) for re, count in right_ends.items(): if count >= mindepth: selected.append((seqid, re, "RE-{0}".format(re), count)) selected.sort() for seqid, pos, label, count in selected: label = "{0}-r{1}".format(label, count) print("\t".join((seqid, str(pos - 1), str(pos), label)), file=fw)
def insertion(args)
%prog insertion mic.mac.bed Find IES based on mapping MIC reads to MAC genome. Output a bedfile with 'lesions' (stack of broken reads) in the MAC genome.
2.395556
2.308559
1.037685
p.add_option("--distance", default=500, type="int", help="Outer distance between the two ends [default: %default]") p.add_option("--readlen", default=150, type="int", help="Length of the read") p.set_depth(depth=10) p.set_outfile(outfile=None)
def add_sim_options(p)
Add options shared by eagle or wgsim.
4.156691
3.976133
1.04541
p = OptionParser(wgsim.__doc__) p.add_option("--erate", default=.01, type="float", help="Base error rate of the read [default: %default]") p.add_option("--noerrors", default=False, action="store_true", help="Simulate reads with no errors [default: %default]") p.add_option("--genomesize", type="int", help="Genome size in Mb [default: estimate from data]") add_sim_options(p) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args pf = op.basename(fastafile).split(".")[0] genomesize = opts.genomesize size = genomesize * 1000000 if genomesize else Fasta(fastafile).totalsize depth = opts.depth readlen = opts.readlen readnum = int(math.ceil(size * depth / (2 * readlen))) distance = opts.distance stdev = distance / 10 outpf = opts.outfile or "{0}.{1}bp.{2}x".format(pf, distance, depth) logging.debug("Total genome size: {0} bp".format(size)) logging.debug("Target depth: {0}x".format(depth)) logging.debug("Number of read pairs (2x{0}): {1}".format(readlen, readnum)) if opts.noerrors: opts.erate = 0 cmd = "dwgsim -e {0} -E {0}".format(opts.erate) if opts.noerrors: cmd += " -r 0 -R 0 -X 0 -y 0" cmd += " -d {0} -s {1}".format(distance, stdev) cmd += " -N {0} -1 {1} -2 {1}".format(readnum, readlen) cmd += " {0} {1}".format(fastafile, outpf) sh(cmd)
def wgsim(args)
%prog wgsim fastafile Run dwgsim on fastafile.
2.83584
2.746142
1.032663
p = OptionParser(cov.__doc__) p.add_option("--order", default="swede,kale,h165,yudal,aviso,abu,bristol,bzh", help="The order to plot the tracks, comma-separated") p.add_option("--reverse", default=False, action="store_true", help="Plot the order in reverse") p.add_option("--gauge_step", default=5000000, type="int", help="Step size for the base scale") p.add_option("--hlsuffix", default="regions.forhaibao", help="Suffix for the filename to be used to highlight regions") opts, args, iopts = p.set_image_options(args, figsize="11x8") if len(args) != 4: sys.exit(not p.print_help()) chr1, chr2, sizesfile, datadir = args chr1 = chr1.split(",") chr2 = chr2.split(",") order = opts.order hlsuffix = opts.hlsuffix if order: order = order.split(",") if opts.reverse: order.reverse() sizes = Sizes(sizesfile).mapping fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) chrs = (chr1, chr2) chr_sizes, chr_sum_sizes, ratio = calc_ratio(chrs, sizes) chr_size1, chr_size2 = chr_sum_sizes chr_sizes1, chr_sizes2 = chr_sizes w1_start, w1_end = center_panel(chr1, chr_size1, ratio) w2_start, w2_end = center_panel(chr2, chr_size2, ratio) w1s = w1_start w2s = w2_start dsg = "gray" i = 0 for c1, s1 in zip(chr1, chr_sizes1): w1 = ratio * s1 plot_label = i == 0 i += 1 canvas1 = (w1s, .6, w1, .3) Coverage(fig, root, canvas1, c1, (0, s1), datadir, order=order, gauge="top", plot_label=plot_label, gauge_step=opts.gauge_step, palette=dsg, cap=40, hlsuffix=hlsuffix) w1s += w1 + gap i = 0 for c2, s2 in zip(chr2, chr_sizes2): w2 = ratio * s2 plot_label = i == 0 i += 1 canvas2 = (w2s, .15, w2, .3) Coverage(fig, root, canvas2, c2, (0, s2), datadir, order=order, gauge="bottom", plot_label=plot_label, gauge_step=opts.gauge_step, palette=dsg, cap=40, hlsuffix=hlsuffix) w2s += w2 + gap # Synteny panel seqidsfile = make_seqids(chrs) klayout = make_layout(chrs, chr_sum_sizes, ratio, template_cov) Karyotype(fig, root, seqidsfile, klayout, gap=gap, generank=False, sizes=sizes) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() chr2 = "_".join(chr2) if opts.reverse: chr2 += ".reverse" image_name = chr2 + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def cov(args)
%prog cov chrA01 chrC01 chr.sizes data AN.CN.1x1.lifted.anchors.simple Plot coverage graphs between homeologs, the middle panel show the homeologous gene pairs. Allow multiple chromosomes to multiple chromosomes.
3.554698
3.369807
1.054867
p = OptionParser(fig4.__doc__) p.add_option("--gauge_step", default=200000, type="int", help="Step size for the base scale") opts, args, iopts = p.set_image_options(args, figsize="9x7") if len(args) != 2: sys.exit(not p.print_help()) layout, datadir = args layout = F4ALayout(layout, datadir=datadir) gs = opts.gauge_step fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) block, napusbed, slayout = "r28.txt", "all.bed", "r28.layout" s = Synteny(fig, root, block, napusbed, slayout, chr_label=False) synteny_exts = [(x.xstart, x.xend) for x in s.rr] h = .1 order = "bzh,yudal".split(",") labels = (r"\textit{B. napus} A$\mathsf{_n}$2", r"\textit{B. rapa} A$\mathsf{_r}$2", r"\textit{B. oleracea} C$\mathsf{_o}$2", r"\textit{B. napus} C$\mathsf{_n}$2") for t in layout: xstart, xend = synteny_exts[2 * t.i] canvas = [xstart, t.y, xend - xstart, h] root.text(xstart - h, t.y + h / 2, labels[t.i], ha="center", va="center") ch, ab = t.box_region.split(":") a, b = ab.split("-") vlines = [int(x) for x in (a, b)] Coverage(fig, root, canvas, t.seqid, (t.start, t.end), datadir, order=order, gauge="top", plot_chr_label=False, gauge_step=gs, palette="gray", cap=40, hlsuffix="regions.forhaibao", vlines=vlines) # Highlight GSL biosynthesis genes a, b = (3, "Bra029311"), (5, "Bo2g161590") for gid in (a, b): start, end = s.gg[gid] xstart, ystart = start xend, yend = end x = (xstart + xend) / 2 arrow = FancyArrowPatch(posA=(x, ystart - .04), posB=(x, ystart - .005), arrowstyle="fancy,head_width=6,head_length=8", lw=3, fc='k', ec='k', zorder=20) root.add_patch(arrow) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() image_name = "napus-fig4." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def fig4(args)
%prog fig4 layout data Napus Figure 4A displays an example deleted region for quartet chromosomes, showing read alignments from high GL and low GL lines.
5.101378
4.908676
1.039258
import math from jcvi.formats.bed import Bed from jcvi.graphics.chromosome import HorizontalChromosome from jcvi.graphics.base import kb_formatter p = OptionParser(deletion.__doc__) opts, args, iopts = p.set_image_options(args) if len(args) != 3: sys.exit(not p.print_help()) deletion_genes, deletions, bed = args dg = [int(x) for x in open(deletion_genes)] dsg, lsg = "darkslategray", "lightslategray" fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) ax = fig.add_axes([.1, .1, .8, .8]) minval = 2 if deletion_genes == "deleted-genes" else 2048 bins = np.logspace(math.log(minval, 10), math.log(max(dg), 10), 16) n, bins, histpatches = ax.hist(dg, bins=bins, \ fc=lsg, alpha=.75) ax.set_xscale('log', basex=2) if deletion_genes == "deleted-genes": ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%d')) ax.set_xlabel('No. of deleted genes in each segment') else: ax.xaxis.set_major_formatter(kb_formatter) ax.set_xlabel('No. of deleted bases in each segment') ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%d')) ax.set_ylabel('No. of segments') ax.patch.set_alpha(0.1) # Draw chromosome C2 na, nb = .45, .85 root.text((na + nb) / 2, .54, "ChrC02", ha="center") HorizontalChromosome(root, na, nb, .5, height=.025, fc=lsg, fill=True) order = Bed(bed).order fp = open(deletions) scale = lambda x: na + x * (nb - na) / 52886895 for i, row in enumerate(fp): i += 1 num, genes = row.split() genes = genes.split("|") ia, a = order[genes[0]] ib, b = order[genes[-1]] mi, mx = a.start, a.end mi, mx = scale(mi), scale(mx) root.add_patch(Rectangle((mi, .475), mx - mi, .05, fc="red", ec="red")) if i == 1: # offset between two adjacent regions for aesthetics mi -= .015 elif i == 2: mi += .015 TextCircle(root, mi, .44, str(i), fc="red") for i, mi in zip(range(1, 4), (.83, .78, .73)): TextCircle(root, mi, .2, str(i), fc="red") root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() image_name = deletion_genes + ".pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def deletion(args)
%prog deletion [deletion-genes|deletion-bases] C2-deletions boleracea.bed Plot histogram for napus deletions. Can plot deletion-genes or deletion-bases. The three largest segmental deletions will be highlighted along with a drawing of the C2 chromosome.
3.395955
3.262703
1.040841
p = OptionParser(ploidy.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x7") if len(args) != 2: sys.exit(not p.print_help()) seqidsfile, klayout = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) Karyotype(fig, root, seqidsfile, klayout) fc = "darkslategrey" radius = .012 ot = -.05 # use this to adjust vertical position of the left panel TextCircle(root, .1, .9 + ot, r'$\gamma$', radius=radius, fc=fc) root.text(.1, .88 + ot, r"$\times3$", ha="center", va="top", color=fc) TextCircle(root, .08, .79 + ot, r'$\alpha$', radius=radius, fc=fc) TextCircle(root, .12, .79 + ot, r'$\beta$', radius=radius, fc=fc) root.text(.1, .77 + ot, r"$\times3\times2\times2$", ha="center", va="top", color=fc) root.text(.1, .67 + ot, r"Brassica triplication", ha="center", va="top", color=fc, size=11) root.text(.1, .65 + ot, r"$\times3\times2\times2\times3$", ha="center", va="top", color=fc) root.text(.1, .42 + ot, r"Allo-tetraploidy", ha="center", va="top", color=fc, size=11) root.text(.1, .4 + ot, r"$\times3\times2\times2\times3\times2$", ha="center", va="top", color=fc) bb = dict(boxstyle="round,pad=.5", fc="w", ec="0.5", alpha=0.5) root.text(.5, .2 + ot, r"\noindent\textit{Brassica napus}\\" "(A$\mathsf{_n}$C$\mathsf{_n}$ genome)", ha="center", size=16, color="k", bbox=bb) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "napus" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def ploidy(args)
%prog ploidy seqids layout Build a figure that calls graphics.karyotype to illustrate the high ploidy of B. napus genome.
3.013797
2.797278
1.077403
from jcvi.graphics.base import red_purple as default_cm p = OptionParser(expr.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x5") if len(args) != 4: sys.exit(not p.print_help()) block, exp, layout, napusbed = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) s = Synteny(fig, root, block, napusbed, layout) # Import the expression values # Columns are: leaf-A, leaf-C, root-A, root-C fp = open(exp) data = {} for row in fp: gid, lf, rt = row.split() lf, rt = float(lf), float(rt) data[gid] = (lf, rt) rA, rB = s.rr gA = [x.accn for x in rA.genes] gC = [x.accn for x in rB.genes] A = [data.get(x, (0, 0)) for x in gA] C = [data.get(x, (0, 0)) for x in gC] A = np.array(A) C = np.array(C) A = np.transpose(A) C = np.transpose(C) d, h = .01, .1 lsg = "lightslategrey" coords = s.gg # Coordinates of the genes axes = [] for j, (y, gg) in enumerate(((.79, gA), (.24, gC))): r = s.rr[j] x = r.xstart w = r.xend - r.xstart ax = fig.add_axes([x, y, w, h]) axes.append(ax) root.add_patch(Rectangle((x - h, y - d), w + h + d, h + 2 * d, fill=False, ec=lsg, lw=1)) root.text(x - d, y + 3 * h / 4, "root", ha="right", va="center") root.text(x - d, y + h / 4, "leaf", ha="right", va="center") ty = y - 2 * d if y > .5 else y + h + 2 * d nrows = len(gg) for i, g in enumerate(gg): start, end = coords[(j, g)] sx, sy = start ex, ey = end assert sy == ey sy = sy + 2 * d if sy > .5 else sy - 2 * d root.plot(((sx + ex) / 2, x + w * (i + .5) / nrows), (sy, ty), lw=1, ls=":", color="k", alpha=.2) axA, axC = axes p = axA.pcolormesh(A, cmap=default_cm) p = axC.pcolormesh(C, cmap=default_cm) axA.set_xlim(0, len(gA)) axC.set_xlim(0, len(gC)) x, y, w, h = .35, .1, .3, .05 ax_colorbar = fig.add_axes([x, y, w, h]) fig.colorbar(p, cax=ax_colorbar, orientation='horizontal') root.text(x - d, y + h / 2, "RPKM", ha="right", va="center") root.set_xlim(0, 1) root.set_ylim(0, 1) for x in (axA, axC, root): x.set_axis_off() image_name = "napusf4b." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def expr(args)
%prog expr block exp layout napus.bed Plot a composite figure showing synteny and the expression level between homeologs in two tissues - total 4 lists of values. block file contains the gene pairs between AN and CN.
3.059624
2.86284
1.068737
p = OptionParser(pasteprepare.__doc__) p.add_option("--flank", default=5000, type="int", help="Get the seq of size on two ends [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) goodfasta, = args flank = opts.flank pf = goodfasta.rsplit(".", 1)[0] extbed = pf + ".ext.bed" sizes = Sizes(goodfasta) fw = open(extbed, "w") for bac, size in sizes.iter_sizes(): print("\t".join(str(x) for x in \ (bac, 0, min(flank, size), bac + "L")), file=fw) print("\t".join(str(x) for x in \ (bac, max(size - flank, 0), size, bac + "R")), file=fw) fw.close() fastaFromBed(extbed, goodfasta, name=True)
def pasteprepare(args)
%prog pasteprepare bacs.fasta Prepare sequences for paste.
3.35168
3.230469
1.037521
from jcvi.formats.bed import uniq p = OptionParser(paste.__doc__) p.add_option("--maxsize", default=300000, type="int", help="Maximum size of patchers to be replaced [default: %default]") p.add_option("--prefix", help="Prefix of the new object [default: %default]") p.set_rclip(rclip=1) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) pbed, blastfile, bbfasta = args maxsize = opts.maxsize # Max DNA size to replace gap order = Bed(pbed).order beforebed, afterbed = blast_to_twobeds(blastfile, order, log=True, rclip=opts.rclip, maxsize=maxsize, flipbeds=True) beforebed = uniq([beforebed]) afbed = Bed(beforebed) bfbed = Bed(afterbed) shuffle_twobeds(afbed, bfbed, bbfasta, prefix=opts.prefix)
def paste(args)
%prog paste flanks.bed flanks_vs_assembly.blast backbone.fasta Paste in good sequences in the final assembly.
4.693723
4.387397
1.06982
p = OptionParser(eject.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) candidates, chrfasta = args sizesfile = Sizes(chrfasta).filename cbedfile = complementBed(candidates, sizesfile) cbed = Bed(cbedfile) for b in cbed: b.accn = b.seqid b.score = 1000 b.strand = '+' cbed.print_to_file()
def eject(args)
%prog eject candidates.bed chr.fasta Eject scaffolds from assembly, using the range identified by closest().
4.983202
3.701315
1.346333
p = OptionParser(closest.__doc__) p.add_option("--om", default=False, action="store_true", help="The bedfile is OM blocks [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) candidates, gapsbed, fastafile = args sizes = Sizes(fastafile).mapping bed = Bed(candidates) ranges = [] for b in bed: r = range_parse(b.accn) if opts.om else b ranges.append([r.seqid, r.start, r.end]) gapsbed = Bed(gapsbed) granges = [(x.seqid, x.start, x.end) for x in gapsbed] ranges = range_merge(ranges) for r in ranges: a = range_closest(granges, r) b = range_closest(granges, r, left=False) seqid = r[0] if a is not None and a[0] != seqid: a = None if b is not None and b[0] != seqid: b = None mmin = 1 if a is None else a[1] mmax = sizes[seqid] if b is None else b[2] print("\t".join(str(x) for x in (seqid, mmin - 1, mmax)))
def closest(args)
%prog closest candidates.bed gaps.bed fastafile Identify the nearest gaps flanking suggested regions.
3.188433
2.867258
1.112015
from jcvi.formats.agp import mask, bed from jcvi.formats.sizes import agp p = OptionParser(insert.__doc__) opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) candidates, gapsbed, chrfasta, unplacedfasta = args refinedbed = refine([candidates, gapsbed]) sizes = Sizes(unplacedfasta).mapping cbed = Bed(candidates) corder = cbed.order gbed = Bed(gapsbed) gorder = gbed.order gpbed = Bed() gappositions = {} # (chr, start, end) => gapid fp = open(refinedbed) gap_to_scf = defaultdict(list) seen = set() for row in fp: atoms = row.split() if len(atoms) <= 6: continue unplaced = atoms[3] strand = atoms[5] gapid = atoms[9] if gapid not in seen: seen.add(gapid) gi, gb = gorder[gapid] gpbed.append(gb) gappositions[(gb.seqid, gb.start, gb.end)] = gapid gap_to_scf[gapid].append((unplaced, strand)) gpbedfile = "candidate.gaps.bed" gpbed.print_to_file(gpbedfile, sorted=True) agpfile = agp([chrfasta]) maskedagpfile = mask([agpfile, gpbedfile]) maskedbedfile = maskedagpfile.rsplit(".", 1)[0] + ".bed" bed([maskedagpfile, "--outfile={0}".format(maskedbedfile)]) mbed = Bed(maskedbedfile) finalbed = Bed() for b in mbed: sid = b.seqid key = (sid, b.start, b.end) if key not in gappositions: finalbed.add("{0}\n".format(b)) continue gapid = gappositions[key] scfs = gap_to_scf[gapid] # For scaffolds placed in the same gap, sort according to positions scfs.sort(key=lambda x: corder[x[0]][1].start + corder[x[0]][1].end) for scf, strand in scfs: size = sizes[scf] finalbed.add("\t".join(str(x) for x in \ (scf, 0, size, sid, 1000, strand))) finalbedfile = "final.bed" finalbed.print_to_file(finalbedfile) # Clean-up toclean = [gpbedfile, agpfile, maskedagpfile, maskedbedfile] FileShredder(toclean)
def insert(args)
%prog insert candidates.bed gaps.bed chrs.fasta unplaced.fasta Insert scaffolds into assembly.
3.53912
3.216763
1.100212
from jcvi.formats.bed import uniq from jcvi.utils.iter import pairwise p = OptionParser(gaps.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ombed, fastafile = args ombed = uniq([ombed]) bed = Bed(ombed) for a, b in pairwise(bed): om_a = (a.seqid, a.start, a.end, "+") om_b = (b.seqid, b.start, b.end, "+") ch_a = range_parse(a.accn) ch_b = range_parse(b.accn) ch_a = (ch_a.seqid, ch_a.start, ch_a.end, "+") ch_b = (ch_b.seqid, ch_b.start, ch_b.end, "+") om_dist, x = range_distance(om_a, om_b, distmode="ee") ch_dist, x = range_distance(ch_a, ch_b, distmode="ee") if om_dist <= 0 and ch_dist <= 0: continue print(a) print(b) print(om_dist, ch_dist)
def gaps(args)
%prog gaps OM.bed fastafile Create patches around OM gaps.
2.601868
2.39769
1.085156
p = OptionParser(tips.__doc__) opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) pbedfile, cbedfile, sizesfile, bbfasta = args pbed = Bed(pbedfile, sorted=False) cbed = Bed(cbedfile, sorted=False) complements = dict() for object, beds in groupby(cbed, key=lambda x: x.seqid): beds = list(beds) complements[object] = beds sizes = Sizes(sizesfile).mapping bbsizes = Sizes(bbfasta).mapping tbeds = [] for object, beds in groupby(pbed, key=lambda x: x.accn): beds = list(beds) startbed, endbed = beds[0], beds[-1] start_id, end_id = startbed.seqid, endbed.seqid if startbed.start == 1: start_id = None if endbed.end == sizes[end_id]: end_id = None print(object, start_id, end_id, file=sys.stderr) if start_id: b = complements[start_id][0] b.accn = object tbeds.append(b) tbeds.append(BedLine("\t".join(str(x) for x in \ (object, 0, bbsizes[object], object, 1000, "+")))) if end_id: b = complements[end_id][-1] b.accn = object tbeds.append(b) tbed = Bed() tbed.extend(tbeds) tbedfile = "tips.bed" tbed.print_to_file(tbedfile)
def tips(args)
%prog tips patchers.bed complements.bed original.fasta backbone.fasta Append telomeric sequences based on patchers and complements.
2.672838
2.517512
1.061698
p = OptionParser(fill.__doc__) p.add_option("--extend", default=2000, type="int", help="Extend seq flanking the gaps [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gapsbed, badfasta = args Ext = opts.extend gapdist = 2 * Ext + 1 # This is to prevent to replacement ranges intersect gapsbed = mergeBed(gapsbed, d=gapdist, nms=True) bed = Bed(gapsbed) sizes = Sizes(badfasta).mapping pf = gapsbed.rsplit(".", 1)[0] extbed = pf + ".ext.bed" fw = open(extbed, "w") for b in bed: gapname = b.accn start, end = max(0, b.start - Ext - 1), b.start - 1 print("\t".join(str(x) for x in \ (b.seqid, start, end, gapname + "L")), file=fw) start, end = b.end, min(sizes[b.seqid], b.end + Ext) print("\t".join(str(x) for x in \ (b.seqid, start, end, gapname + "R")), file=fw) fw.close() fastaFromBed(extbed, badfasta, name=True)
def fill(args)
%prog fill gaps.bed bad.fasta Perform gap filling of one assembly (bad) using sequences from another.
3.836496
3.389397
1.131911
from jcvi.apps.align import blast from jcvi.formats.fasta import SeqIO p = OptionParser(install.__doc__) p.set_rclip(rclip=1) p.add_option("--maxsize", default=300000, type="int", help="Maximum size of patchers to be replaced [default: %default]") p.add_option("--prefix", help="Prefix of the new object [default: %default]") p.add_option("--strict", default=False, action="store_true", help="Only update if replacement has no gaps [default: %default]") opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) pbed, pfasta, bbfasta, altfasta = args maxsize = opts.maxsize # Max DNA size to replace gap rclip = opts.rclip blastfile = blast([altfasta, pfasta,"--wordsize=100", "--pctid=99"]) order = Bed(pbed).order beforebed, afterbed = blast_to_twobeds(blastfile, order, rclip=rclip, maxsize=maxsize) beforefasta = fastaFromBed(beforebed, bbfasta, name=True, stranded=True) afterfasta = fastaFromBed(afterbed, altfasta, name=True, stranded=True) # Exclude the replacements that contain more Ns than before ah = SeqIO.parse(beforefasta, "fasta") bh = SeqIO.parse(afterfasta, "fasta") count_Ns = lambda x: x.seq.count('n') + x.seq.count('N') exclude = set() for arec, brec in zip(ah, bh): an = count_Ns(arec) bn = count_Ns(brec) if opts.strict: if bn == 0: continue elif bn < an: continue id = arec.id exclude.add(id) logging.debug("Ignore {0} updates because of decreasing quality."\ .format(len(exclude))) abed = Bed(beforebed, sorted=False) bbed = Bed(afterbed, sorted=False) abed = [x for x in abed if x.accn not in exclude] bbed = [x for x in bbed if x.accn not in exclude] abedfile = "before.filtered.bed" bbedfile = "after.filtered.bed" afbed = Bed() afbed.extend(abed) bfbed = Bed() bfbed.extend(bbed) afbed.print_to_file(abedfile) bfbed.print_to_file(bbedfile) shuffle_twobeds(afbed, bfbed, bbfasta, prefix=opts.prefix)
def install(args)
%prog install patchers.bed patchers.fasta backbone.fasta alt.fasta Install patches into backbone, using sequences from alternative assembly. The patches sequences are generated via jcvi.assembly.patch.fill(). The output is a bedfile that can be converted to AGP using jcvi.formats.agp.frombed().
3.602128
3.380269
1.065634
p = OptionParser(refine.__doc__) p.add_option("--closest", default=False, action="store_true", help="In case of no gaps, use closest [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) breakpointsbed, gapsbed = args ncols = len(open(breakpointsbed).next().split()) logging.debug("File {0} contains {1} columns.".format(breakpointsbed, ncols)) cmd = "intersectBed -wao -a {0} -b {1}".format(breakpointsbed, gapsbed) pf = "{0}.{1}".format(breakpointsbed.split(".")[0], gapsbed.split(".")[0]) ingapsbed = pf + ".bed" sh(cmd, outfile=ingapsbed) fp = open(ingapsbed) data = [x.split() for x in fp] nogapsbed = pf + ".nogaps.bed" largestgapsbed = pf + ".largestgaps.bed" nogapsfw = open(nogapsbed, "w") largestgapsfw = open(largestgapsbed, "w") for b, gaps in groupby(data, key=lambda x: x[:ncols]): gaps = list(gaps) gap = gaps[0] if len(gaps) == 1 and gap[-1] == "0": assert gap[-3] == "." print("\t".join(b), file=nogapsfw) continue gaps = [(int(x[-1]), x) for x in gaps] maxgap = max(gaps)[1] print("\t".join(maxgap), file=largestgapsfw) nogapsfw.close() largestgapsfw.close() beds = [largestgapsbed] toclean = [nogapsbed, largestgapsbed] if opts.closest: closestgapsbed = pf + ".closestgaps.bed" cmd = "closestBed -a {0} -b {1} -d".format(nogapsbed, gapsbed) sh(cmd, outfile=closestgapsbed) beds += [closestgapsbed] toclean += [closestgapsbed] else: pointbed = pf + ".point.bed" pbed = Bed() bed = Bed(nogapsbed) for b in bed: pos = (b.start + b.end) / 2 b.start, b.end = pos, pos pbed.append(b) pbed.print_to_file(pointbed) beds += [pointbed] toclean += [pointbed] refinedbed = pf + ".refined.bed" FileMerger(beds, outfile=refinedbed).merge() # Clean-up FileShredder(toclean) return refinedbed
def refine(args)
%prog refine breakpoints.bed gaps.bed Find gaps within or near breakpoint region. For breakpoint regions with no gaps, there are two options: - Break in the middle of the region - Break at the closest gap (--closest)
2.534267
2.411638
1.050849
from jcvi.formats.bed import uniq p = OptionParser(patcher.__doc__) p.add_option("--backbone", default="OM", help="Prefix of the backbone assembly [default: %default]") p.add_option("--object", default="object", help="New object name [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) backbonebed, otherbed = args backbonebed = uniq([backbonebed]) otherbed = uniq([otherbed]) pf = backbonebed.split(".")[0] key = lambda x: (x.seqid, x.start, x.end) # Make a uniq bed keeping backbone at redundant intervals cmd = "intersectBed -v -wa" cmd += " -a {0} -b {1}".format(otherbed, backbonebed) outfile = otherbed.rsplit(".", 1)[0] + ".not." + backbonebed sh(cmd, outfile=outfile) uniqbed = Bed() uniqbedfile = pf + ".merged.bed" uniqbed.extend(Bed(backbonebed)) uniqbed.extend(Bed(outfile)) uniqbed.print_to_file(uniqbedfile, sorted=True) # Condense adjacent intervals, allow some chaining bed = uniqbed key = lambda x: range_parse(x.accn).seqid bed_fn = pf + ".patchers.bed" bed_fw = open(bed_fn, "w") for k, sb in groupby(bed, key=key): sb = list(sb) chr, start, end, strand = merge_ranges(sb) print("\t".join(str(x) for x in \ (chr, start, end, opts.object, 1000, strand)), file=bed_fw) bed_fw.close()
def patcher(args)
%prog patcher backbone.bed other.bed Given optical map alignment, prepare the patchers. Use --backbone to suggest which assembly is the major one, and the patchers will be extracted from another assembly.
4.08193
3.826408
1.066779
p = OptionParser(treds.__doc__) p.add_option("--csv", default=False, action="store_true", help="Also write `meta.csv`") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) tredresults, = args df = pd.read_csv(tredresults, sep="\t") tredsfile = datafile("TREDs.meta.csv") tf = pd.read_csv(tredsfile) tds = list(tf["abbreviation"]) ids = list(tf["id"]) tags = ["SampleKey"] final_columns = ["SampleKey"] afs = [] for td, id in zip(tds, ids): tag1 = "{}.1".format(td) tag2 = "{}.2".format(td) if tag2 not in df: afs.append("{}") continue tags.append(tag2) final_columns.append(id) a = np.array(list(df[tag1]) + list(df[tag2])) counts = alleles_to_counts(a) af = counts_to_af(counts) afs.append(af) tf["allele_frequency"] = afs metafile = "TREDs_{}_SEARCH.meta.tsv".format(timestamp()) tf.to_csv(metafile, sep="\t", index=False) logging.debug("File `{}` written.".format(metafile)) if opts.csv: metacsvfile = metafile.rsplit(".", 1)[0] + ".csv" tf.to_csv(metacsvfile, index=False) logging.debug("File `{}` written.".format(metacsvfile)) pp = df[tags] pp.columns = final_columns datafile = "TREDs_{}_SEARCH.data.tsv".format(timestamp()) pp.to_csv(datafile, sep="\t", index=False) logging.debug("File `{}` written.".format(datafile)) mask([datafile, metafile])
def treds(args)
%prog treds hli.tred.tsv Compile allele_frequency for TREDs results. Write data.tsv, meta.tsv and mask.tsv in one go.
2.926023
2.679715
1.091916
p = OptionParser(stutter.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) vcf, = args pf = op.basename(vcf).split(".")[0] execid, sampleid = pf.split("_") C = "vcftools --remove-filtered-all --min-meanDP 10" C += " --gzvcf {} --out {}".format(vcf, pf) C += " --indv {}".format(sampleid) info = pf + ".INFO" if need_update(vcf, info): cmd = C + " --get-INFO MOTIF --get-INFO RL" sh(cmd) allreads = pf + ".ALLREADS.FORMAT" if need_update(vcf, allreads): cmd = C + " --extract-FORMAT-info ALLREADS" sh(cmd) q = pf + ".Q.FORMAT" if need_update(vcf, q): cmd = C + " --extract-FORMAT-info Q" sh(cmd) outfile = pf + ".STUTTER" if need_update((info, allreads, q), outfile): cmd = "cut -f1,2,5,6 {}".format(info) cmd += r" | sed -e 's/\t/_/g'" cmd += " | paste - {} {}".format(allreads, q) cmd += " | cut -f1,4,7" sh(cmd, outfile=outfile)
def stutter(args)
%prog stutter a.vcf.gz Extract info from lobSTR vcf file. Generates a file that has the following fields: CHR, POS, MOTIF, RL, ALLREADS, Q
3.926034
3.676463
1.067884
p = OptionParser(filtervcf.__doc__) p.set_home("lobstr", default="/mnt/software/lobSTR") p.set_aws_opts(store="hli-mv-data-science/htang/str") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) samples, = args lhome = opts.lobstr_home store = opts.output_path if samples.endswith((".vcf", ".vcf.gz")): vcffiles = [samples] else: vcffiles = [x.strip() for x in must_open(samples)] vcffiles = [x for x in vcffiles if ".filtered." not in x] run_args = [(x, lhome, x.startswith("s3://") and store) for x in vcffiles] cpus = min(opts.cpus, len(run_args)) p = Pool(processes=cpus) for res in p.map_async(run_filter, run_args).get(): continue
def filtervcf(args)
%prog filtervcf NA12878.hg38.vcf.gz Filter lobSTR VCF using script shipped in lobSTR. Input file can be a list of vcf files.
4.495221
4.091889
1.098568
p = OptionParser(meta.__doc__) p.add_option("--cutoff", default=.5, type="float", help="Percent observed required (chrY half cutoff)") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) binfile, sampleids, strids, wobed = args cutoff = opts.cutoff af_file = "allele_freq" if need_update(binfile, af_file): df, m, samples, loci = read_binfile(binfile, sampleids, strids) nalleles = len(samples) fw = must_open(af_file, "w") for i, locus in enumerate(loci): a = m[:, i] counts = alleles_to_counts(a) af = counts_to_af(counts) seqid = locus.split("_")[0] remove = counts_filter(counts, nalleles, seqid, cutoff=cutoff) print("\t".join((locus, af, remove)), file=fw) fw.close() logging.debug("Load gene intersections from `{}`".format(wobed)) fp = open(wobed) gene_map = defaultdict(set) for row in fp: chr1, start1, end1, chr2, start2, end2, name, ov = row.split() gene_map[(chr1, start1)] |= set(name.split(",")) for k, v in gene_map.items(): non_enst = sorted(x for x in v if not x.startswith("ENST")) #enst = sorted(x.rsplit(".", 1)[0] for x in v if x.startswith("ENST")) gene_map[k] = ",".join(non_enst) TREDS, df = read_treds() metafile = "STRs_{}_SEARCH.meta.tsv".format(timestamp()) write_meta(af_file, gene_map, TREDS, filename=metafile) logging.debug("File `{}` written.".format(metafile))
def meta(args)
%prog meta data.bin samples STR.ids STR-exons.wo.bed Compute allele frequencies and prune sites based on missingness. Filter subset of loci that satisfy: 1. no redundancy (unique chr:pos) 2. variable (n_alleles > 1) 3. low level of missing data (>= 50% autosomal + X, > 25% for Y) Write meta file with the following infor: 1. id 2. title 3. gene_name 4. variant_type 5. motif 6. allele_frequency `STR-exons.wo.bed` can be generated like this: $ tail -n 694105 /mnt/software/lobSTR/hg38/index.tab | cut -f1-3 > all-STR.bed $ intersectBed -a all-STR.bed -b all-exons.bed -wo > STR-exons.wo.bed
4.435167
4.193947
1.057516
p = OptionParser(bin.__doc__) p.add_option("--dtype", choices=("float32", "int32"), help="dtype of the matrix") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) tsvfile, = args dtype = opts.dtype if dtype is None: # Guess dtype = np.int32 if "data" in tsvfile else np.float32 else: dtype = np.int32 if dtype == "int32" else np.float32 print("dtype: {}".format(dtype), file=sys.stderr) fp = open(tsvfile) next(fp) arrays = [] for i, row in enumerate(fp): a = np.fromstring(row, sep="\t", dtype=dtype) a = a[1:] arrays.append(a) print(i, a, file=sys.stderr) print("Merging", file=sys.stderr) b = np.concatenate(arrays) print("Binary shape: {}".format(b.shape), file=sys.stderr) binfile = tsvfile.rsplit(".", 1)[0] + ".bin" b.tofile(binfile)
def bin(args)
%prog bin data.tsv Conver tsv to binary format.
2.408303
2.31014
1.042493
p = OptionParser(data.__doc__) p.add_option("--notsv", default=False, action="store_true", help="Do not write data.tsv") opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) databin, sampleids, strids, metafile = args final_columns, percentiles = read_meta(metafile) df, m, samples, loci = read_binfile(databin, sampleids, strids) # Clean the data m %= 1000 # Get the larger of the two alleles m[m == 999] = -1 # Missing data final = set(final_columns) remove = [] for i, locus in enumerate(loci): if locus not in final: remove.append(locus) continue pf = "STRs_{}_SEARCH".format(timestamp()) filteredstrids = "{}.STR.ids".format(pf) fw = open(filteredstrids, "w") print("\n".join(final_columns), file=fw) fw.close() logging.debug("Dropped {} columns; Retained {} columns (`{}`)".\ format(len(remove), len(final_columns), filteredstrids)) # Remove low-quality columns! df.drop(remove, inplace=True, axis=1) df.columns = final_columns filtered_bin = "{}.data.bin".format(pf) if need_update(databin, filtered_bin): m = df.as_matrix() m.tofile(filtered_bin) logging.debug("Filtered binary matrix written to `{}`".format(filtered_bin)) # Write data output filtered_tsv = "{}.data.tsv".format(pf) if not opts.notsv and need_update(databin, filtered_tsv): df.to_csv(filtered_tsv, sep="\t", index_label="SampleKey")
def data(args)
%prog data data.bin samples.ids STR.ids meta.tsv Make data.tsv based on meta.tsv.
4.363021
3.894823
1.12021
p = OptionParser(mask.__doc__) opts, args = p.parse_args(args) if len(args) not in (2, 4): sys.exit(not p.print_help()) if len(args) == 4: databin, sampleids, strids, metafile = args df, m, samples, loci = read_binfile(databin, sampleids, strids) mode = "STRs" elif len(args) == 2: databin, metafile = args df = pd.read_csv(databin, sep="\t", index_col=0) m = df.as_matrix() samples = df.index loci = list(df.columns) mode = "TREDs" pf = "{}_{}_SEARCH".format(mode, timestamp()) final_columns, percentiles = read_meta(metafile) maskfile = pf + ".mask.tsv" run_args = [] for i, locus in enumerate(loci): a = m[:, i] percentile = percentiles[locus] run_args.append((i, a, percentile)) if mode == "TREDs" or need_update(databin, maskfile): cpus = min(8, len(run_args)) write_mask(cpus, samples, final_columns, run_args, filename=maskfile) logging.debug("File `{}` written.".format(maskfile))
def mask(args)
%prog mask data.bin samples.ids STR.ids meta.tsv OR %prog mask data.tsv meta.tsv Compute P-values based on meta and data. The `data.bin` should be the matrix containing filtered loci and the output mask.tsv will have the same dimension.
4.154766
3.779213
1.099373
p = OptionParser(mergecsv.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) csvfiles = args arrays = [] samplekeys = [] for csvfile in csvfiles: samplekey = op.basename(csvfile).split(".")[0] a = np.fromfile(csvfile, sep=",", dtype=np.int32) x1 = a[::2] x2 = a[1::2] a = x1 * 1000 + x2 a[a < 0] = -1 arrays.append(a) samplekeys.append(samplekey) print(samplekey, a, file=sys.stderr) print("Merging", file=sys.stderr) b = np.concatenate(arrays) b.tofile("data.bin") fw = open("samples", "w") print("\n".join(samplekeys), file=fw) fw.close()
def mergecsv(args)
%prog mergecsv *.csv Combine CSV into binary array.
2.543311
2.357541
1.078798
p = OptionParser(compilevcf.__doc__) p.add_option("--db", default="hg38", help="Use these lobSTR db") p.add_option("--nofilter", default=False, action="store_true", help="Do not filter the variants") p.set_home("lobstr") p.set_cpus() p.set_aws_opts(store="hli-mv-data-science/htang/str-data") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) samples, = args workdir = opts.workdir store = opts.output_path cleanup = not opts.nocleanup filtered = not opts.nofilter dbs = opts.db.split(",") cwd = os.getcwd() mkdir(workdir) os.chdir(workdir) samples = op.join(cwd, samples) stridsfile = "STR.ids" if samples.endswith((".vcf", ".vcf.gz")): vcffiles = [samples] else: vcffiles = [x.strip() for x in must_open(samples)] if not op.exists(stridsfile): ids = [] for db in dbs: ids.extend(STRFile(opts.lobstr_home, db=db).ids) uids = uniqify(ids) logging.debug("Combined: {} Unique: {}".format(len(ids), len(uids))) fw = open(stridsfile, "w") print("\n".join(uids), file=fw) fw.close() run_args = [(x, filtered, cleanup, store) for x in vcffiles] cpus = min(opts.cpus, len(run_args)) p = Pool(processes=cpus) for res in p.map_async(run_compile, run_args).get(): continue
def compilevcf(args)
%prog compilevcf samples.csv Compile vcf results into master spreadsheet.
3.991339
3.927225
1.016326
from jcvi.utils.table import write_csv p = OptionParser(ystr.__doc__) p.set_home("lobstr") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) vcffile, = args si = STRFile(opts.lobstr_home, db="hg38-named") register = si.register header = "Marker|Reads|Ref|Genotype|Motif".split("|") contents = [] fp = must_open(vcffile) reader = vcf.Reader(fp) simple_register = {} for record in reader: name = register[(record.CHROM, record.POS)] info = record.INFO ref = int(float(info["REF"])) rpa = info.get("RPA", ref) if isinstance(rpa, list): rpa = "|".join(str(int(float(x))) for x in rpa) ru = info["RU"] simple_register[name] = rpa for sample in record.samples: contents.append((name, sample["ALLREADS"], ref, rpa, ru)) # Multi-part markers a, b, c = "DYS389I", "DYS389B.1", "DYS389B" if a in simple_register and b in simple_register: simple_register[c] = int(simple_register[a]) + int(simple_register[b]) # Multi-copy markers mm = ["DYS385", "DYS413", "YCAII"] for m in mm: ma, mb = m + 'a', m + 'b' if ma not in simple_register or mb not in simple_register: simple_register[ma] = simple_register[mb] = None del simple_register[ma] del simple_register[mb] continue if simple_register[ma] > simple_register[mb]: simple_register[ma], simple_register[mb] = \ simple_register[mb], simple_register[ma] write_csv(header, contents, sep=" ") print("[YSEARCH]") build_ysearch_link(simple_register) print("[YFILER]") build_yhrd_link(simple_register, panel=YHRD_YFILER) print("[YFILERPLUS]") build_yhrd_link(simple_register, panel=YHRD_YFILERPLUS) print("[YSTR-ALL]") build_yhrd_link(simple_register, panel=USYSTR_ALL)
def ystr(args)
%prog ystr chrY.vcf Print out Y-STR info given VCF. Marker name extracted from tabfile.
4.134412
4.023084
1.027672
p = OptionParser(liftover.__doc__) p.add_option("--checkvalid", default=False, action="store_true", help="Check minscore, period and length") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) refbed, fastafile = args genome = pyfasta.Fasta(fastafile) edits = [] fp = open(refbed) for i, row in enumerate(fp): s = STRLine(row) seq = genome[s.seqid][s.start - 1: s.end].upper() s.motif = get_motif(seq, len(s.motif)) s.fix_counts(seq) if opts.checkvalid and not s.is_valid(): continue edits.append(s) if i % 10000 == 0: print(i, "lines read", file=sys.stderr) edits = natsorted(edits, key=lambda x: (x.seqid, x.start)) for e in edits: print(str(e))
def liftover(args)
%prog liftover lobstr_v3.0.2_hg38_ref.bed hg38.upper.fa LiftOver CODIS/Y-STR markers.
3.216331
3.057996
1.051777
from jcvi.apps.base import iglob cparams = "1 1 2 80 5 200 2000" p = OptionParser(trf.__doc__) p.add_option("--mismatch", default=31, type="int", help="Mismatch and gap penalty") p.add_option("--minscore", default=MINSCORE, type="int", help="Minimum score to report") p.add_option("--period", default=6, type="int", help="Maximum period to report") p.add_option("--lobstr", default=False, action="store_true", help="Generate output for lobSTR") p.add_option("--telomeres", default=False, action="store_true", help="Run telomere search: minscore=140 period=7") p.add_option("--centromeres", default=False, action="store_true", help="Run centromere search: {}".format(cparams)) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) outdir, = args minlength = opts.minscore / 2 mm = MakeManager() if opts.telomeres: opts.minscore, opts.period = 140, 7 params = "2 {0} {0} 80 10 {1} {2}".\ format(opts.mismatch, opts.minscore, opts.period).split() if opts.centromeres: params = cparams.split() bedfiles = [] for fastafile in natsorted(iglob(outdir, "*.fa,*.fasta")): pf = op.basename(fastafile).split(".")[0] cmd1 = "trf {0} {1} -d -h".format(fastafile, " ".join(params)) datfile = op.basename(fastafile) + "." + ".".join(params) + ".dat" bedfile = "{0}.trf.bed".format(pf) cmd2 = "cat {} | grep -v ^Parameters".format(datfile) if opts.lobstr: cmd2 += " | awk '($8 >= {} && $8 <= {})'".\ format(minlength, READLEN - minlength) else: cmd2 += " | awk '($8 >= 0)'" cmd2 += " | sed 's/ /\\t/g'" cmd2 += " | awk '{{print \"{0}\\t\" $0}}' > {1}".format(pf, bedfile) mm.add(fastafile, datfile, cmd1) mm.add(datfile, bedfile, cmd2) bedfiles.append(bedfile) bedfile = "trf.bed" cmd = "cat {0} > {1}".format(" ".join(natsorted(bedfiles)), bedfile) mm.add(bedfiles, bedfile, cmd) mm.write()
def trf(args)
%prog trf outdir Run TRF on FASTA files.
3.095117
3.032254
1.020732
p = OptionParser(batchlobstr.__doc__) p.add_option("--sep", default=",", help="Separator for building commandline") p.set_home("lobstr", default="s3://hli-mv-data-science/htang/str-build/lobSTR/") p.set_aws_opts(store="hli-mv-data-science/htang/str-data") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) samplesfile, = args store = opts.output_path computed = ls_s3(store) fp = open(samplesfile) skipped = total = 0 for row in fp: total += 1 sample, s3file = row.strip().split(",")[:2] exec_id, sample_id = sample.split("_") bamfile = s3file.replace(".gz", "").replace(".vcf", ".bam") gzfile = sample + ".{0}.vcf.gz".format("hg38") if gzfile in computed: skipped += 1 continue print(opts.sep.join("python -m jcvi.variation.str lobstr".split() + \ ["hg38", "--input_bam_path", bamfile, "--output_path", store, "--sample_id", sample_id, "--workflow_execution_id", exec_id, "--lobstr_home", opts.lobstr_home, "--workdir", opts.workdir])) fp.close() logging.debug("Total skipped: {0}".format(percentage(skipped, total)))
def batchlobstr(args)
%prog batchlobstr samples.csv Run lobSTR sequentially on list of samples. Each line contains: sample-name,s3-location
5.047091
4.937597
1.022175
from jcvi.formats.sam import get_minibam # See `Format-lobSTR-database.ipynb` for a list of TREDs for validation INCLUDE = ["HD", "SBMA", "SCA1", "SCA2", "SCA8", "SCA17", "DM1", "DM2", "FXTAS"] db_choices = ("hg38", "hg19") p = OptionParser(locus.__doc__) p.add_option("--tred", choices=INCLUDE, help="TRED name") p.add_option("--ref", choices=db_choices, default="hg38", help="Reference genome") p.set_home("lobstr") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bamfile, = args ref = opts.ref lhome = opts.lobstr_home tred = opts.tred tredsfile = datafile("TREDs.meta.csv") tf = pd.read_csv(tredsfile, index_col=0) row = tf.ix[tred] tag = "repeat_location" ldb = "TREDs" if ref == "hg19": tag += "." + ref ldb += "-" + ref seqid, start_end = row[tag].split(":") PAD = 1000 start, end = start_end.split('-') start, end = int(start) - PAD, int(end) + PAD region = "{}:{}-{}".format(seqid, start, end) minibamfile = get_minibam(bamfile, region) c = seqid.replace("chr", "") cmd, vcf = allelotype_on_chr(minibamfile, c, lhome, ldb) sh(cmd) parser = LobSTRvcf(columnidsfile=None) parser.parse(vcf, filtered=False) items = parser.items() if not items: print("No entry found!", file=sys.stderr) return k, v = parser.items()[0] print("{} => {}".format(tred, v.replace(',', '/')), file=sys.stderr)
def locus(args)
%prog locus bamfile Extract selected locus from a list of TREDs for validation, and run lobSTR.
4.847856
4.490833
1.0795
p = OptionParser(lobstrindex.__doc__) p.add_option("--notreds", default=False, action="store_true", help="Remove TREDs from the bed file") p.set_home("lobstr") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) trfbed, fastafile = args pf = fastafile.split(".")[0] lhome = opts.lobstr_home mkdir(pf) if opts.notreds: newbedfile = trfbed + ".new" newbed = open(newbedfile, "w") fp = open(trfbed) retained = total = 0 seen = set() for row in fp: r = STRLine(row) total += 1 name = r.longname if name in seen: continue seen.add(name) print(r, file=newbed) retained += 1 newbed.close() logging.debug("Retained: {0}".format(percentage(retained, total))) else: newbedfile = trfbed mm = MakeManager() cmd = "python {0}/scripts/lobstr_index.py".format(lhome) cmd += " --str {0} --ref {1} --out {2}".format(newbedfile, fastafile, pf) mm.add((newbedfile, fastafile), op.join(pf, "lobSTR_ref.fasta.rsa"), cmd) tabfile = "{0}/index.tab".format(pf) cmd = "python {0}/scripts/GetSTRInfo.py".format(lhome) cmd += " {0} {1} > {2}".format(newbedfile, fastafile, tabfile) mm.add((newbedfile, fastafile), tabfile, cmd) infofile = "{0}/index.info".format(pf) cmd = "cp {0} {1}".format(newbedfile, infofile) mm.add(trfbed, infofile, cmd) mm.write()
def lobstrindex(args)
%prog lobstrindex hg38.trf.bed hg38.upper.fa Make lobSTR index. Make sure the FASTA contain only upper case (so use fasta.format --upper to convert from UCSC fasta). The bed file is generated by str().
3.122686
2.972037
1.050689
p = OptionParser(close.__doc__) p.set_home("gapfiller") p.set_cpus() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) scaffolds = args[0] libtxt = write_libraries(args[1:], aligner="bwa") cmd = "perl " + op.join(opts.gapfiller_home, "GapFiller.pl") cmd += " -l {0} -s {1} -T {2}".format(libtxt, scaffolds, opts.cpus) runsh = "run.sh" write_file(runsh, cmd)
def close(args)
%prog close scaffolds.fasta PE*.fastq Run GapFiller to fill gaps.
4.01501
3.195921
1.256292
p = OptionParser(scaffold.__doc__) p.set_aligner(aligner="bwa") p.set_home("sspace") p.set_cpus() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) contigs = args[0] libtxt = write_libraries(args[1:], aligner=opts.aligner) # Requires getopts.pl which may be missing download("http://web.vims.edu/bridge/bridge2/aw/lib/getopts.pl") cmd = "perl " + op.join(opts.sspace_home, "SSPACE_Standard_v3.0.pl") cmd += " -l {0} -s {1} -T {2}".format(libtxt, contigs, opts.cpus) runsh = "run.sh" write_file(runsh, cmd)
def scaffold(args)
%prog scaffold contigs.fasta MP*.fastq Run SSPACE scaffolding.
6.395324
5.470994
1.168951
p = OptionParser(agp.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) evidencefile, contigs = args ef = EvidenceFile(evidencefile, contigs) agpfile = evidencefile.replace(".evidence", ".agp") ef.write_agp(agpfile)
def agp(args)
%prog agp evidencefile contigs.fasta Convert SSPACE scaffold structure to AGP format.
2.814865
2.048578
1.374058
p = OptionParser(embed.__doc__) p.set_mingap(default=10) p.add_option("--min_length", default=200, type="int", help="Minimum length to consider [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) evidencefile, scaffolds, contigs = args min_length = opts.min_length splitfasta, oagp, cagp = gaps([scaffolds, "--split", "--mingap={0}".format(opts.mingap)]) agp = AGP(cagp) p = agp.graph ef = EvidenceFile(evidencefile, contigs) sizes = ef.sz q = ef.graph logging.debug("Reference graph: {0}".format(p)) logging.debug("Patch graph: {0}".format(q)) newagp = deepcopy(agp) seen = set() deleted = set() for a in agp: if a.is_gap: continue name = a.component_id object = a.object if name in deleted: print("* Skip {0}, already embedded".format(name), file=sys.stderr) continue seen.add(name) target_name, tag = get_target(p, name) path = q.get_path(name, target_name, tag=tag) path_size = sum([sizes[x.v] for x, t in path]) if path else None status = NO_UPDATE # Heuristic, the patch must not be too long if path and path_size > min_length and len(path) > 3: path = None if not path: print(name, target_name, path, path_size, status, file=sys.stderr) continue backward = False for x, t in path: if x.v in seen: print("* Does not allow backward" \ " patch on {0}".format(x.v), file=sys.stderr) backward = True break if backward: continue # Build the path plus the ends vv = q.get_node(name) path.appendleft((vv, tag)) if tag == ">": path.reverse() status = INSERT_BEFORE elif target_name is None: status = INSERT_AFTER else: target = q.get_node(target_name) path.append((target, tag)) status = INSERT_BETWEEN print(name, target_name, path, path_size, status, file=sys.stderr) # Trim the ends off from the constructed AGPLines lines = path_to_agp(q, path, object, sizes, status) if status == INSERT_BEFORE: lines = lines[:-1] td = newagp.insert_lines(name, lines, \ delete=True, verbose=True) elif status == INSERT_AFTER: lines = lines[1:] td = newagp.insert_lines(name, lines, after=True, \ delete=True, verbose=True) else: lines = lines[1:-1] td = newagp.update_between(name, target_name, lines, \ delete=True, verbose=True) deleted |= td seen |= td # Recruite big singleton contigs CUTOFF = opts.min_length for ctg, size in sizes.items(): if ctg in seen: continue if size < CUTOFF: continue newagp.append(AGPLine.cline(ctg, ctg, sizes, "?")) # Write a new AGP file newagpfile = "embedded.agp" newagp.print_to_file(newagpfile, index=True) tidy([newagpfile, contigs])
def embed(args)
%prog embed evidencefile scaffolds.fasta contigs.fasta Use SSPACE evidencefile to scaffold contigs into existing scaffold structure, as in `scaffolds.fasta`. Contigs.fasta were used by SSPACE directly to scaffold. Rules: 1. Only update existing structure by embedding contigs small enough to fit. 2. Promote singleton contigs only if they are big (>= min_length).
4.541344
4.336781
1.047169
align_choices = ("left", "center", "right") p = OptionParser(gff.__doc__) p.add_option("--align", default="left", choices=align_choices, help="Horizontal alignment [default: %default]") p.add_option("--noUTR", default=False, action="store_true", help="Do not plot UTRs [default: %default]") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) fig = plt.figure(1, (8, 5)) root = fig.add_axes([0, 0, 1, 1]) gffiles = args ngenes = len(gffiles) canvas = .6 setups, ratio = get_setups(gffiles, canvas=canvas, noUTR=opts.noUTR) align = opts.align xs = .2 if align == "left" else .8 yinterval = canvas / ngenes ys = .8 tip = .01 for genename, mrnabed, cdsbeds in setups: ExonGlyph(root, xs, ys, mrnabed, cdsbeds, ratio=ratio, align=align) if align == "left": root.text(xs - tip, ys, genename, ha="right", va="center") elif align == "right": root.text(xs + tip, ys, genename, ha="left", va="center") ys -= yinterval root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() figname = "exons.pdf" savefig(figname, dpi=300)
def gff(args)
%prog gff *.gff Draw exons for genes based on gff files. Each gff file should contain only one gene, and only the "mRNA" and "CDS" feature will be drawn on the canvas.
2.936266
2.829475
1.037742
from jcvi.formats.fastq import readlen p = OptionParser(spades.__doc__) opts, args = p.parse_args(args) if len(args) == 0: sys.exit(not p.print_help()) folder, = args for p, pf in iter_project(folder): rl = readlen([p[0], "--silent"]) # <http://spades.bioinf.spbau.ru/release3.1.0/manual.html#sec3.4> kmers = None if rl >= 150: kmers = "21,33,55,77" elif rl >= 250: kmers = "21,33,55,77,99,127" cmd = "spades.py" if kmers: cmd += " -k {0}".format(kmers) cmd += " --careful" cmd += " --pe1-1 {0} --pe1-2 {1}".format(*p) cmd += " -o {0}_spades".format(pf) print(cmd)
def spades(args)
%prog spades folder Run automated SPADES.
3.733759
3.551172
1.051416
from jcvi.apps.bowtie import align p = OptionParser(contamination.__doc__) p.add_option("--mapped", default=False, action="store_true", help="Retain contaminated reads instead [default: %default]") p.set_cutoff(cutoff=800) p.set_mateorientation(mateorientation="+-") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, ecoli = args ecoli = get_abs_path(ecoli) tag = "--mapped" if opts.mapped else "--unmapped" for p, pf in iter_project(folder): align_opts = [ecoli] + p + [tag] align_opts += ["--cutoff={0}".format(opts.cutoff), "--null"] if opts.mateorientation: align_opts += ["--mateorientation={0}".format(opts.mateorientation)] samfile, logfile = align(align_opts)
def contamination(args)
%prog contamination folder Ecoli.fasta Remove contaminated reads. The FASTQ files in the folder will automatically pair and filtered against Ecoli.fasta to remove contaminants using BOWTIE2.
4.053464
3.699788
1.095594
p = OptionParser(pairs.__doc__) p.set_firstN() p.set_mates() p.set_aligner() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) cwd = os.getcwd() aligner = opts.aligner work = "-".join(("pairs", aligner)) mkdir(work) from jcvi.formats.sam import pairs as ps if aligner == "bowtie": from jcvi.apps.bowtie import align elif aligner == "bwa": from jcvi.apps.bwa import align folder, ref = args ref = get_abs_path(ref) messages = [] for p, prefix in iter_project(folder): samplefq = [] for i in range(2): samplefq.append(op.join(work, prefix + "_{0}.first.fastq".format(i+1))) first([str(opts.firstN)] + [p[i]] + ["-o", samplefq[i]]) os.chdir(work) align_args = [ref] + [op.basename(fq) for fq in samplefq] outfile, logfile = align(align_args) bedfile, stats = ps([outfile, "--rclip={0}".format(opts.rclip)]) os.chdir(cwd) median = stats.median tag = "MP" if median > 1000 else "PE" median = str(median) pf, sf = median[:2], median[2:] if sf and int(sf) != 0: pf = str(int(pf) + 1) # Get the first two effective digits lib = "{0}-{1}".format(tag, pf + '0' * len(sf)) for i, xp in enumerate(p): suffix = "fastq.gz" if xp.endswith(".gz") else "fastq" link = "{0}-{1}.{2}.{3}".format(lib, prefix.replace("-", ""), i + 1, suffix) m = "\t".join(str(x) for x in (xp, link)) messages.append(m) messages = "\n".join(messages) write_file("f.meta", messages, tee=True)
def pairs(args)
%prog pairs folder reference.fasta Estimate insert size distribution. Compatible with a variety of aligners, including BOWTIE and BWA.
4.608087
4.383369
1.051266
p = OptionParser(allpaths.__doc__) p.add_option("--ploidy", default="1", choices=("1", "2"), help="Ploidy [default: %default]") opts, args = p.parse_args(args) if len(args) == 0: sys.exit(not p.print_help()) folders = args for pf in folders: if not op.isdir(pf): continue assemble_dir(pf, target=["final.contigs.fasta", "final.assembly.fasta"], ploidy=opts.ploidy)
def allpaths(args)
%prog allpaths folder1 folder2 ... Run automated ALLPATHS on list of dirs.
3.380087
3.398392
0.994614
p = OptionParser(prepare.__doc__) p.add_option("--first", default=0, type="int", help="Use only first N reads [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) jfile, = args metafile = jfile + ".meta" if need_update(jfile, metafile): fp = open(jfile) fastqfiles = [x.strip() for x in fp if ".fastq" in x] metas = [Meta(x) for x in fastqfiles] fw = open(metafile, "w") print("\n".join(str(x) for x in metas), file=fw) print("Now modify `{0}`, and restart this script.".\ format(metafile), file=sys.stderr) print("Each line is : genome library fastqfile", file=sys.stderr) fw.close() return mf = MetaFile(metafile) for m in mf: m.make_link(firstN=opts.first)
def prepare(args)
%prog prepare jira.txt Parse JIRA report and prepare input. Look for all FASTQ files in the report and get the prefix. Assign fastq to a folder and a new file name indicating the library type (e.g. PE-500, MP-5000, etc.). Note that JIRA report can also be a list of FASTQ files.
3.649271
3.411234
1.06978
slink(p, pf, tag) assemble_dir(pf, target)
def assemble_pairs(p, pf, tag, target=["final.contigs.fasta"])
Take one pair of reads and assemble to contigs.fasta.
15.76053
16.326332
0.965344
from jcvi.assembly.preprocess import correct as cr logging.debug("Work on {0} ({1})".format(pf, ','.join(p))) itag = tag[0] cm = ".".join((pf, itag)) targets = (cm + ".1.corr.fastq", cm + ".2.corr.fastq", \ pf + ".PE-0.corr.fastq") if not need_update(p, targets): logging.debug("Corrected reads found: {0}. Skipped.".format(targets)) return slink(p, pf, tag) cwd = os.getcwd() os.chdir(pf) cr(sorted(glob("*.fastq") + glob("*.fastq.gz")) + ["--nofragsdedup"]) sh("mv {0}.1.corr.fastq ../{1}".format(itag, targets[0])) sh("mv {0}.2.corr.fastq ../{1}".format(itag, targets[1])) sh("mv frag_reads_corr.corr.fastq ../{0}".format(targets[2])) logging.debug("Correction finished: {0}".format(targets)) os.chdir(cwd)
def correct_pairs(p, pf, tag)
Take one pair of reads and correct to generate *.corr.fastq.
4.920497
4.621193
1.064768