code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
from jcvi.formats.blast import chain_HSPs
p = OptionParser(overlap.__doc__)
p.add_option("--dir", default=os.getcwd(),
help="Download sequences to dir [default: %default]")
p.add_option("--suffix", default="fasta",
help="Suffix of the sequence file in dir [default: %default]")
p.add_option("--qreverse", default=False, action="store_true",
help="Reverse seq a [default: %default]")
p.add_option("--nochain", default=False, action="store_true",
help="Do not chain adjacent HSPs [default: chain HSPs]")
p.set_align(pctid=GoodPct, hitlen=GoodOverlap, evalue=.01)
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
afasta, bfasta = args
dir = opts.dir
chain = not opts.nochain
suffix = opts.suffix
evalue = opts.evalue
pctid = opts.pctid
hitlen = opts.hitlen
cutoff = Cutoff(pctid, hitlen)
# Check first whether it is file or accession name
if not op.exists(afasta):
af = op.join(dir, ".".join((afasta, suffix)))
if not op.exists(af): # Check to avoid redownload
entrez([afasta, "--skipcheck", "--outdir=" + dir])
afasta = af
if not op.exists(bfasta):
bf = op.join(dir, ".".join((bfasta, suffix)))
if not op.exists(bf):
entrez([bfasta, "--skipcheck", "--outdir=" + dir])
bfasta = bf
assert op.exists(afasta) and op.exists(bfasta)
cmd = "blastn -dust no"
cmd += " -query {0} -subject {1}".format(afasta, bfasta)
cmd += " -evalue {0} -outfmt 6 -perc_identity {1}".format(evalue, pctid)
fp = popen(cmd)
hsps = fp.readlines()
hsps = [BlastLine(x) for x in hsps]
hsps = [x for x in hsps if x.hitlen >= hitlen]
if chain:
logging.debug("Chain HSPs in the Blast output.")
dist = 2 * hitlen # Distance to chain the HSPs
hsps = chain_HSPs(hsps, xdist=dist, ydist=dist)
if len(hsps) == 0:
print("No match found.", file=sys.stderr)
return None
besthsp = hsps[0]
aid, asize = next(Fasta(afasta).itersizes())
bid, bsize = next(Fasta(bfasta).itersizes())
o = Overlap(besthsp, asize, bsize, cutoff, qreverse=opts.qreverse)
o.print_graphic()
if opts.outfile:
fw = must_open(opts.outfile, "w")
print(str(o), file=fw)
fw.close()
return o
|
def overlap(args)
|
%prog overlap <a|a.fasta> <b|b.fasta>
Check overlaps between two fasta records. The arguments can be genBank IDs
instead of FASTA files. In case of IDs, the sequences will be downloaded
first.
| 2.918242 | 2.857532 | 1.021246 |
p = OptionParser(certificate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tpffile, certificatefile = args
fastadir = "fasta"
tpf = TPF(tpffile)
data = check_certificate(certificatefile)
fw = must_open(certificatefile, "w")
for i, a in enumerate(tpf):
if a.is_gap:
continue
aid = a.component_id
af = op.join(fastadir, aid + ".fasta")
if not op.exists(af): # Check to avoid redownload
entrez([aid, "--skipcheck", "--outdir=" + fastadir])
north, south = tpf.getNorthSouthClone(i)
aphase, asize = phase(aid)
for tag, p in (("North", north), ("South", south)):
if not p: # end of the chromosome
ov = "telomere\t{0}".format(asize)
elif p.isCloneGap:
bphase = "0"
ov = "{0}\t{1}".format(p.gap_type, asize)
else:
bid = p.component_id
bphase, bsize = phase(bid)
key = (tag, aid, bid)
if key in data:
print(data[key], file=fw)
continue
ar = [aid, bid, "--dir=" + fastadir]
o = overlap(ar)
ov = o.certificateline if o \
else "{0}\t{1}\tNone".format(bid, asize)
print("\t".join(str(x) for x in \
(tag, a.object, aphase, bphase, aid, ov)), file=fw)
fw.flush()
|
def certificate(args)
|
%prog certificate tpffile certificatefile
Generate certificate file for all overlaps in tpffile. tpffile can be
generated by jcvi.formats.agp.tpf().
North chr1 2 0 AC229737.8 telomere 58443
South chr1 2 1 AC229737.8 AC202463.29 58443 37835 58443 + Non-terminal
Each line describes a relationship between the current BAC and the
north/south BAC. First, "North/South" tag, then the chromosome, phases of
the two BACs, ids of the two BACs, the size and the overlap start-stop of
the CURRENT BAC, and orientation. Each BAC will have two lines in the
certificate file.
| 5.966173 | 5.004024 | 1.192275 |
p = OptionParser(neighbor.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
agpfile, componentID = args
fastadir = "fasta"
cmd = "grep"
cmd += " --color -C2 {0} {1}".format(componentID, agpfile)
sh(cmd)
agp = AGP(agpfile)
aorder = agp.order
if not componentID in aorder:
print("Record {0} not present in `{1}`."\
.format(componentID, agpfile), file=sys.stderr)
return
i, c = aorder[componentID]
north, south = agp.getNorthSouthClone(i)
if not north.isCloneGap:
ar = [north.component_id, componentID, "--dir=" + fastadir]
if north.orientation == '-':
ar += ["--qreverse"]
overlap(ar)
if not south.isCloneGap:
ar = [componentID, south.component_id, "--dir=" + fastadir]
if c.orientation == '-':
ar += ["--qreverse"]
overlap(ar)
|
def neighbor(args)
|
%prog neighbor agpfile componentID
Check overlaps of a particular component in agpfile.
| 4.249483 | 3.751676 | 1.132689 |
from jcvi.formats.base import DictFile
p = OptionParser(agp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
tpffile, certificatefile, agpfile = args
orientationguide = DictFile(tpffile, valuepos=2)
cert = Certificate(certificatefile)
cert.write_AGP(agpfile, orientationguide=orientationguide)
|
def agp(args)
|
%prog agp tpffile certificatefile agpfile
Build agpfile from overlap certificates.
Tiling Path File (tpf) is a file that lists the component and the gaps.
It is a three-column file similar to below, also see jcvi.formats.agp.tpf():
telomere chr1 na
AC229737.8 chr1 +
AC202463.29 chr1 +
Note: the orientation of the component is only used as a guide. If the
orientation is derivable from a terminal overlap, it will use it regardless
of what the tpf says.
See jcvi.assembly.goldenpath.certificate() which generates a list of
certificates based on agpfile. At first, it seems counter-productive to
convert first agp to certificates then certificates back to agp.
The certificates provide a way to edit the overlap information, so that the
agpfile can be corrected (without changing agpfile directly).
| 4.055129 | 3.119316 | 1.300006 |
aLhang, aRhang = self.qstart - 1, self.asize - self.qstop
bLhang, bRhang = self.sstart - 1, self.bsize - self.sstop
if self.orientation == '-':
bLhang, bRhang = bRhang, bLhang
if self.qreverse:
aLhang, aRhang = aRhang, aLhang
bLhang, bRhang = bRhang, bLhang
return aLhang, aRhang, bLhang, bRhang
|
def get_hangs(self)
|
Determine the type of overlap given query, ref alignment coordinates
Consider the following alignment between sequence a and b:
aLhang \ / aRhang
\------------/
/------------\
bLhang / \ bRhang
Terminal overlap: a before b, b before a
Contain overlap: a in b, b in a
| 2.782713 | 2.294577 | 1.212735 |
print(aclr, bclr, file=sys.stderr)
otype = self.otype
if otype == 1:
if aclr.orientation == '+':
aclr.end = self.qstop
else:
aclr.start = self.qstart
if bclr.orientation == '+':
bclr.start = self.sstop + 1
else:
bclr.end = self.sstart - 1
elif otype == 3:
aclr.start = aclr.end
elif otype == 4:
bclr.start = bclr.end
print(aclr, bclr, file=sys.stderr)
|
def update_clr(self, aclr, bclr)
|
Zip the two sequences together, using "left-greedy" rule
============= seqA
||||
====(===============) seqB
| 2.925367 | 2.857793 | 1.023646 |
aLhang, aRhang, bLhang, bRhang = self.get_hangs()
achar = ">"
bchar = "<" if self.orientation == '-' else ">"
if self.qreverse:
achar = "<"
bchar = {">" : "<", "<" : ">"}[bchar]
print(aLhang, aRhang, bLhang, bRhang, file=sys.stderr)
width = 50 # Canvas
hitlen = self.hitlen
lmax = max(aLhang, bLhang)
rmax = max(aRhang, bRhang)
bpwidth = lmax + hitlen + rmax
ratio = width * 1. / bpwidth
_ = lambda x: int(round(x * ratio, 0))
a1, a2 = _(aLhang), _(aRhang)
b1, b2 = _(bLhang), _(bRhang)
hit = max(_(hitlen), 1)
msg = " " * max(b1 - a1, 0)
msg += achar * (a1 + hit + a2)
msg += " " * (width - len(msg) + 2)
msg += "{0} ({1})".format(self.aid, self.asize)
print(msg, file=sys.stderr)
msg = " " * max(a1, b1)
msg += "|" * hit
print(msg, file=sys.stderr)
msg = " " * max(a1 - b1, 0)
msg += bchar * (b1 + hit + b2)
msg += " " * (width - len(msg) + 2)
msg += "{0} ({1})".format(self.bid, self.bsize)
print(msg, file=sys.stderr)
print(self, file=sys.stderr)
|
def print_graphic(self)
|
>>>>>>>>>>>>>>>>>>> seqA (alen)
||||||||
<<<<<<<<<<<<<<<<<<<<< seqB (blen)
| 3.03035 | 2.87799 | 1.05294 |
fw = must_open(filename, "w")
for aid, bb in groupby(self.lines, key=lambda x: x.aid):
bb = list(bb)
north, south = bb
aid = north.aid
assert aid == south.aid
aphase = north.aphase
chr = north.chr
size = north.asize
ar = [chr, 0, 0, 0]
northline = southline = None
northrange = southrange = None
# Warn if adjacent components do not have valid overlaps
if south.is_no_overlap:
print(south, file=sys.stderr)
# Most gaps, except telomeres occur twice, so only do the "North"
if north.is_gap:
bar = ar + self.get_agp_gap(north.bid)
northline = "\t".join(str(x) for x in bar)
else:
if north.isTerminal:
northrange = north.astart, north.astop
if south.is_gap:
if south.bid == "telomere":
bar = ar + self.get_agp_gap(south.bid)
southline = "\t".join(str(x) for x in bar)
else:
if south.isTerminal:
southrange = south.astart, south.astop
else:
bar = ar + self.get_agp_gap("fragment")
southline = "\t".join(str(x) for x in bar)
# Determine the orientation and clear range for the current BAC
clr = [1, size]
orientation = sorientation = None
if northrange:
start, stop = northrange
Lhang = start - 1
Rhang = size - stop
orientation = '+' if Lhang < Rhang else '-'
if north.bphase == 1 and north.bphase < aphase:
if Lhang < Rhang: # North overlap at 5`
clr[0] = start
else:
clr[1] = stop
# Override left-greedy (also see below)
else:
if Lhang < Rhang:
clr[0] = stop + 1
else:
clr[1] = start - 1
if southrange:
start, stop = southrange
Lhang = start - 1
Rhang = size - stop
sorientation = '+' if Lhang > Rhang else '-'
# Override left-greedy (also see above)
if aphase == 1 and aphase < south.bphase:
if Lhang < Rhang: # South overlap at 5`
clr[0] = stop + 1
else:
clr[1] = start - 1
else:
if Lhang < Rhang:
clr[0] = start
else:
clr[1] = stop
if orientation:
if sorientation:
try:
assert orientation == sorientation, \
"Orientation conflicts:\n{0}\n{1}".format(north, south)
except AssertionError as e:
logging.debug(e)
else:
if sorientation:
orientation = sorientation
else: # Both overlaps fail to define orientation
orientation = orientationguide.get(aid, "+")
component_type = "D" if aphase in (1, 2) else "F"
bar = ar + [component_type, aid, clr[0], clr[1], orientation]
cline = "\t".join(str(x) for x in bar)
if northline:
print(northline, file=fw)
print(cline, file=fw)
if southline:
print(southline, file=fw)
fw.close()
reindex([filename, "--inplace"])
|
def write_AGP(self, filename, orientationguide={})
|
For each component, we have two overlaps: North and South.
=======
|||| South
====(=================) Current BAC
North ||||
===============
For the case that says "Non-terminal", the overlap will not be
considered. North-South would suggest a '+' orientation, South-North
would suggest a '-' orientation. In most cases, unless the overlap
involves phase1 BAC, the selected range will be shown as the brackets
above - exclude North overlap, and include South overlap (aka the
"left-greedy" rule).
| 3.652269 | 3.44891 | 1.058963 |
import hashlib
from jcvi.algorithms.formula import MAD_interval as confidence_interval
from jcvi.graphics.base import latex, plt, savefig, set2
p = OptionParser(gcdepth.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
sample_name, tag = args
# The tag is used to add to title, also provide a random (hashed) color
coloridx = int(hashlib.sha1(tag).hexdigest(), 16) % len(set2)
color = set2[coloridx]
# mosdepth outputs a table that we can use to plot relationship
gcbedgz = sample_name + ".regions.gc.bed.gz"
df = pd.read_csv(gcbedgz, delimiter="\t")
mf = df.loc[:, ("4_usercol", "6_pct_gc")]
mf.columns = ["depth", "gc"]
# We discard any bins that are gaps
mf = mf[(mf["depth"] > .001) | (mf["gc"] > .001)]
# Create GC bins
gcbins = defaultdict(list)
for i, row in mf.iterrows():
gcp = int(round(row["gc"] * 100))
gcbins[gcp].append(row["depth"])
gcd = sorted((k * .01, confidence_interval(v))
for (k, v) in gcbins.items())
gcd_x, gcd_y = zip(*gcd)
m, lo, hi = zip(*gcd_y)
# Plot
plt.plot(mf["gc"], mf["depth"], ".", color="lightslategray", ms=2,
mec="lightslategray", alpha=.1)
patch = plt.fill_between(gcd_x, lo, hi,
facecolor=color, alpha=.25, zorder=10,
linewidth=0.0, label="Median +/- MAD band")
plt.plot(gcd_x, m, "-", color=color, lw=2, zorder=20)
ax = plt.gca()
ax.legend(handles=[patch], loc="best")
ax.set_xlim(0, 1)
ax.set_ylim(0, 100)
ax.set_title("{} ({})".format(latex(sample_name), tag))
ax.set_xlabel("GC content")
ax.set_ylabel("Depth")
savefig(sample_name + ".gcdepth.png")
|
def gcdepth(args)
|
%prog gcdepth sample_name tag
Plot GC content vs depth vs genomnic bins. Inputs are mosdepth output:
- NA12878_S1.mosdepth.global.dist.txt
- NA12878_S1.mosdepth.region.dist.txt
- NA12878_S1.regions.bed.gz
- NA12878_S1.regions.bed.gz.csi
- NA12878_S1.regions.gc.bed.gz
A sample mosdepth.sh script might look like:
```
#!/bin/bash
LD_LIBRARY_PATH=mosdepth/htslib/ mosdepth/mosdepth $1 \\
bams/$1.bam -t 4 -c chr1 -n --by 1000
bedtools nuc -fi GRCh38/WholeGenomeFasta/genome.fa \\
-bed $1.regions.bed.gz \\
| pigz -c > $1.regions.gc.bed.gz
```
| 3.979648 | 3.678486 | 1.081871 |
p = OptionParser(exonunion.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gencodebed, = args
beds = BedTool(gencodebed)
# fields[3] is gene_id; fields[6] is gene_name
for g, gb in groupby(beds, key=lambda x: x.fields[3]):
gb = BedTool(gb)
sys.stdout.write(str(gb.sort().merge(c="4,5,6,7",
o=','.join(['first'] * 4))))
|
def exonunion(args)
|
%prog exonunion gencode.v26.annotation.exon.bed
Collapse overlapping exons within the same gene. File
`gencode.v26.annotation.exon.bed` can be generated by:
$ zcat gencode.v26.annotation.gtf.gz | awk 'OFS="\t" {if ($3=="exon")
{print $1,$4-1,$5,$10,$12,$14,$16,$7}}' | tr -d '";'
| 4.141375 | 4.135713 | 1.001369 |
from cyvcf2 import VCF
counter = Counter()
for v in VCF(vcffile):
tag = v.ID.split(":")[1]
counter[tag] += 1
return counter
|
def get_gain_loss_summary(vcffile)
|
Extract Canvas:GAIN/LOSS/REF/LOH tags
| 4.46593 | 4.090415 | 1.091804 |
p = OptionParser(summarycanvas.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
for vcffile in args:
counter = get_gain_loss_summary(vcffile)
pf = op.basename(vcffile).split(".")[0]
print(pf + " " +
" ".join("{}:{}".format(k, v)
for k, v in sorted(counter.items())))
|
def summarycanvas(args)
|
%prog summarycanvas output.vcf.gz
Generate tag counts (GAIN/LOSS/REF/LOH) of segments in Canvas output.
| 3.438965 | 2.698456 | 1.274419 |
from cStringIO import StringIO
from cyvcf2 import VCF
output = StringIO()
for v in VCF(vcffile):
chrom = v.CHROM
start = v.start
end = v.INFO.get('END') - 1
cn, = v.format('CN')[0]
print("\t".join(str(x) for x in (chrom, start, end, cn)), file=output)
beds = BedTool(output.getvalue(), from_string=True)
return beds
|
def parse_segments(vcffile)
|
Extract all copy number segments from a CANVAS file
VCF line looks like:
chr1 788879 Canvas:GAIN:chr1:788880-821005 N <CNV> 2 q10
SVTYPE=CNV;END=821005;CNVLEN=32126 RC:BC:CN:MCC 157:4:3:2
| 3.326536 | 3.468098 | 0.959182 |
if not counter:
return np.nan, np.nan
total = sum(v for k, v in counter.items())
mid = total / 2
weighted_sum = 0
items_seen = 0
median_found = False
for k, v in sorted(counter.items()):
weighted_sum += k * v
items_seen += v
if not median_found and items_seen >= mid:
median = k
median_found = True
mean = weighted_sum * 1. / total
return mean, median
|
def counter_mean_and_median(counter)
|
Calculate the mean and median value of a counter
| 2.62955 | 2.703175 | 0.972764 |
if not counter:
return "na"
return ",".join("{}:{}".format(*z) for z in sorted(counter.items()))
|
def counter_format(counter)
|
Pretty print a counter so that it appears as: "2:200,3:100,4:20"
| 6.383723 | 4.84408 | 1.31784 |
p = OptionParser(gcn.__doc__)
p.set_cpus()
p.set_tmpdir(tmpdir="tmp")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
exonbed = args[0]
canvasvcfs = args[1:]
tsvfile = opts.outfile
tmpdir = opts.tmpdir
mkdir(tmpdir)
set_tempdir(tmpdir)
df = vcf_to_df(canvasvcfs, exonbed, opts.cpus)
for suffix in (".avgcn", ".medcn"):
df_to_tsv(df, tsvfile, suffix)
|
def gcn(args)
|
%prog gcn gencode.v26.exonunion.bed data/*.vcf.gz
Compile gene copy njumber based on CANVAS results.
| 4.104642 | 3.329911 | 1.232658 |
canvasvcf, exonbed, i = arg
logging.debug("Working on job {}: {}".format(i, canvasvcf))
samplekey = op.basename(canvasvcf).split(".")[0].rsplit('_', 1)[0]
d = {'SampleKey': samplekey}
exons = BedTool(exonbed)
cn = parse_segments(canvasvcf)
overlaps = exons.intersect(cn, wao=True)
gcn_store = {}
for ov in overlaps:
# Example of ov.fields:
# [u'chr1', u'11868', u'12227', u'ENSG00000223972.5',
# u'ENST00000456328.2', u'transcribed_unprocessed_pseudogene',
# u'DDX11L1', u'.', u'-1', u'-1', u'.', u'0']
gene_name = "|".join((ov.fields[6], ov.fields[3], ov.fields[5]))
if gene_name not in gcn_store:
gcn_store[gene_name] = defaultdict(int)
cn = ov.fields[-2]
if cn == ".":
continue
cn = int(cn)
if cn > 10:
cn = 10
amt = int(ov.fields[-1])
gcn_store[gene_name][cn] += amt
for k, v in sorted(gcn_store.items()):
v_mean, v_median = counter_mean_and_median(v)
d[k + ".avgcn"] = v_mean
d[k + ".medcn"] = v_median
cleanup()
return d
|
def vcf_to_df_worker(arg)
|
Convert CANVAS vcf to a dict, single thread
| 3.925836 | 3.826679 | 1.025912 |
df = pd.DataFrame()
p = Pool(processes=cpus)
results = []
args = [(x, exonbed, i) for (i, x) in enumerate(canvasvcfs)]
r = p.map_async(vcf_to_df_worker, args,
callback=results.append)
r.wait()
for res in results:
df = df.append(res, ignore_index=True)
return df
|
def vcf_to_df(canvasvcfs, exonbed, cpus)
|
Compile a number of vcf files into tsv file for easy manipulation
| 2.57602 | 2.50829 | 1.027002 |
tsvfile += suffix
columns = ["SampleKey"] + sorted(x for x in df.columns
if x.endswith(suffix))
tf = df.reindex_axis(columns, axis='columns')
tf.sort_values("SampleKey")
tf.to_csv(tsvfile, sep='\t', index=False, float_format='%.4g', na_rep="na")
print("TSV output written to `{}` (# samples={})"\
.format(tsvfile, tf.shape[0]), file=sys.stderr)
|
def df_to_tsv(df, tsvfile, suffix)
|
Serialize the dataframe as a tsv
| 4.37793 | 4.508751 | 0.970985 |
from jcvi.graphics.base import savefig
p = OptionParser(coverage.__doc__)
opts, args, iopts = p.set_image_options(args, format="png")
if len(args) != 1:
sys.exit(not p.print_help())
covfile, = args
df = pd.read_csv(covfile, sep='\t', names=["Ref", "Position", "Depth"])
xlabel, ylabel = "Position", "Depth"
df.plot(xlabel, ylabel, color='g')
image_name = covfile + "." + iopts.format
savefig(image_name)
|
def coverage(args)
|
%prog coverage *.coverage
Plot coverage along chromosome. The coverage file can be generated with:
$ samtools depth a.bam > a.coverage
The plot is a simple line plot using matplotlib.
| 3.109315 | 2.936401 | 1.058886 |
from jcvi.graphics.base import savefig
p = OptionParser(plot.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x7", format="png")
if len(args) != 3:
sys.exit(not p.print_help())
workdir, sample_key, chrs = args
chrs = chrs.split(",")
hmm = CopyNumberHMM(workdir=workdir)
hmm.plot(sample_key, chrs=chrs)
image_name = sample_key + "_cn." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
|
def plot(args)
|
%prog plot workdir sample chr1,chr2
Plot some chromosomes for visual proof. Separate multiple chromosomes with
comma. Must contain folder workdir/sample-cn/.
| 3.026196 | 2.73249 | 1.107487 |
p = OptionParser(sweep.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, sample_key = args
golden_ratio = (1 + 5 ** .5) / 2
cmd = "python -m jcvi.variation.cnv hmm {} {}".format(workdir, sample_key)
cmd += " --mu {:.5f} --sigma {:.3f} --threshold {:.3f}"
mus = [.00012 * golden_ratio ** x for x in range(10)]
sigmas = [.0012 * golden_ratio ** x for x in range(20)]
thresholds = [.1 * golden_ratio ** x for x in range(10)]
print(mus, file=sys.stderr)
print(sigmas, file=sys.stderr)
print(thresholds, file=sys.stderr)
for mu in mus:
for sigma in sigmas:
for threshold in thresholds:
tcmd = cmd.format(mu, sigma, threshold)
print(tcmd)
|
def sweep(args)
|
%prog sweep workdir 102340_NA12878
Write a number of commands to sweep parameter space.
| 2.913815 | 2.771933 | 1.051185 |
p = OptionParser(compare.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
truths = args[0]
cnvoutputs = args[1:]
cpus = min(len(cnvoutputs), opts.cpus)
p = Pool(processes=cpus)
results = []
files = [(x, truths) for x in cnvoutputs]
r = p.map_async(compare_worker, files, callback=results.append)
r.wait()
for res in results:
print("\n".join(res))
|
def compare(args)
|
%prog compare NA12878_array_hg38.bed *.seg
Compare cnv output to known ground truths.
| 2.985197 | 2.29318 | 1.301771 |
p = OptionParser(cib.__doc__)
p.add_option("--prefix", help="Report seqids with this prefix only")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, samplekey = args
mkdir(samplekey)
bam = pysam.AlignmentFile(bamfile, "rb")
refs = [x for x in bam.header["SQ"]]
prefix = opts.prefix
if prefix:
refs = [x for x in refs if x["SN"].startswith(prefix)]
task_args = []
for r in refs:
task_args.append((bamfile, r, samplekey))
cpus = min(opts.cpus, len(task_args))
logging.debug("Use {} cpus".format(cpus))
p = Pool(processes=cpus)
for res in p.imap(bam_to_cib, task_args):
continue
|
def cib(args)
|
%prog cib bamfile samplekey
Convert BAM to CIB (a binary storage of int8 per base).
| 2.92452 | 2.636563 | 1.109217 |
p = OptionParser(batchcn.__doc__)
p.add_option("--upload", default="s3://hli-mv-data-science/htang/ccn",
help="Upload cn and seg results to s3")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, samples = args
upload = opts.upload
store = upload + "/{}/*.seg".format(workdir)
computed = [op.basename(x).split(".")[0] for x in glob_s3(store)]
computed = set(computed)
# Generate a bunch of cn commands
fp = open(samples)
nskipped = ntotal = 0
cmd = "python -m jcvi.variation.cnv cn --hmm --cleanup {}".format(workdir)
for row in fp:
samplekey, path = row.strip().split(",")
ntotal += 1
if samplekey in computed:
nskipped += 1
continue
print(" ".join((cmd, samplekey, path)))
logging.debug("Skipped: {}".format(percentage(nskipped, ntotal)))
|
def batchcn(args)
|
%prog batchcn workdir samples.csv
Run CNV segmentation caller in batch mode. Scans a workdir.
| 5.836182 | 5.249154 | 1.111833 |
p = OptionParser(hmm.__doc__)
p.add_option("--mu", default=.003, type="float",
help="Transition probability")
p.add_option("--sigma", default=.1, type="float",
help="Standard deviation of Gaussian emission distribution")
p.add_option("--threshold", default=1, type="float",
help="Standard deviation must be < this "
"in the baseline population")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, sample_key = args
model = CopyNumberHMM(workdir=workdir, mu=opts.mu, sigma=opts.sigma,
threshold=opts.threshold)
events = model.run(sample_key)
params = ".mu-{}.sigma-{}.threshold-{}"\
.format(opts.mu, opts.sigma, opts.threshold)
hmmfile = op.join(workdir, sample_key + params + ".seg")
fw = open(hmmfile, "w")
nevents = 0
for mean_cn, rr, event in events:
if event is None:
continue
print(" ".join((event.bedline, sample_key)), file=fw)
nevents += 1
fw.close()
logging.debug("A total of {} aberrant events written to `{}`"
.format(nevents, hmmfile))
return hmmfile
|
def hmm(args)
|
%prog hmm workdir sample_key
Run CNV segmentation caller. The workdir must contain a subfolder called
`sample_key-cn` that contains CN for each chromosome. A `beta` directory
that contains scaler for each bin must also be present in the current
directory.
| 3.635883 | 3.482521 | 1.044038 |
p = OptionParser(batchccn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
mm = MakeManager()
pf = op.basename(csvfile).split(".")[0]
mkdir(pf)
header = next(open(csvfile))
header = None if header.strip().endswith(".bam") else "infer"
logging.debug("Header={}".format(header))
df = pd.read_csv(csvfile, header=header)
cmd = "perl /mnt/software/ccn_gcn_hg38_script/ccn_gcn_hg38.pl"
cmd += " -n {} -b {}"
cmd += " -o {} -r hg38".format(pf)
for i, (sample_key, bam) in df.iterrows():
cmdi = cmd.format(sample_key, bam)
outfile = "{}/{}/{}.ccn".format(pf, sample_key, sample_key)
mm.add(csvfile, outfile, cmdi)
mm.write()
|
def batchccn(args)
|
%prog batchccn test.csv
Run CCN script in batch. Write makefile.
| 4.21366 | 4.039148 | 1.043205 |
p = OptionParser(mergecn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
samples = [x.replace("-cn", "").strip().strip("/") for x in open(csvfile)]
betadir = "beta"
mkdir(betadir)
for seqid in allsomes:
names = [op.join(s + "-cn", "{}.{}.cn".
format(op.basename(s), seqid)) for s in samples]
arrays = [np.fromfile(name, dtype=np.float) for name in names]
shapes = [x.shape[0] for x in arrays]
med_shape = np.median(shapes)
arrays = [x for x in arrays if x.shape[0] == med_shape]
ploidy = 2 if seqid not in ("chrY", "chrM") else 1
if seqid in sexsomes:
chr_med = [np.median([x for x in a if x > 0]) for a in arrays]
chr_med = np.array(chr_med)
idx = get_kmeans(chr_med, k=2)
zero_med = np.median(chr_med[idx == 0])
one_med = np.median(chr_med[idx == 1])
logging.debug("K-means with {} c0:{} c1:{}"
.format(seqid, zero_med, one_med))
higher_idx = 1 if one_med > zero_med else 0
# Use the higher mean coverage componen
arrays = np.array(arrays)[idx == higher_idx]
arrays = [[x] for x in arrays]
ar = np.concatenate(arrays)
print(seqid, ar.shape)
rows, columns = ar.shape
beta = []
std = []
for j in xrange(columns):
a = ar[:, j]
beta.append(np.median(a))
std.append(np.std(a) / np.mean(a))
beta = np.array(beta) / ploidy
betafile = op.join(betadir, "{}.beta".format(seqid))
beta.tofile(betafile)
stdfile = op.join(betadir, "{}.std".format(seqid))
std = np.array(std)
std.tofile(stdfile)
logging.debug("Written to `{}`".format(betafile))
ar.tofile("{}.bin".format(seqid))
|
def mergecn(args)
|
%prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another.
| 3.38746 | 3.30226 | 1.025801 |
# We need a way to go from compressed idices to original indices
P = Z.copy()
P[~np.isfinite(P)] = -1
_, mapping = np.unique(np.cumsum(P >= 0), return_index=True)
dZ = Z.compressed()
uniq, idx = np.unique(dZ, return_inverse=True)
segments = []
for i, mean_cn in enumerate(uniq):
if not np.isfinite(mean_cn):
continue
for rr in contiguous_regions(idx == i):
segments.append((mean_cn, mapping[rr]))
return segments
|
def annotate_segments(self, Z)
|
Report the copy number and start-end segment
| 5.387792 | 5.424419 | 0.993248 |
if len(args) != 0:
sys.exit(not p.print_help())
s = InstanceSkeleton()
print("IP address:", s.private_ip_address, file=sys.stderr)
print("Instance type:", s.instance_type, file=sys.stderr)
|
def ip(args)
|
%prog describe
Show current IP address from JSON settings.
| 4.626322 | 4.821877 | 0.959444 |
p = OptionParser(start.__doc__)
p.add_option("--ondemand", default=False, action="store_true",
help="Do we want a more expensive on-demand instance")
p.add_option("--profile", default="mvrad-datasci-role", help="Profile name")
p.add_option("--price", default=4.0, type=float, help="Spot price")
opts, args = p.parse_args(args)
if len(args) != 0:
sys.exit(not p.print_help())
role(["htang"])
session = boto3.Session(profile_name=opts.profile)
client = session.client('ec2')
s = InstanceSkeleton()
# Make sure the instance id is empty
instance_id = s.instance_id
if instance_id != "":
logging.error("Instance exists {}".format(instance_id))
sys.exit(1)
launch_spec = s.launch_spec
instance_id = ""
if opts.ondemand:
# Launch on-demand instance
response = client.run_instances(
BlockDeviceMappings=s.block_device_mappings,
MaxCount=1, MinCount=1,
ImageId=s.image_id,
InstanceType=s.instance_type,
KeyName=s.key_name,
Placement={"AvailabilityZone": s.availability_zone},
SecurityGroupIds=s.security_group_ids,
SubnetId=s.subnet_id,
EbsOptimized=s.ebs_optimized,
IamInstanceProfile=s.iam_instance_profile,
)
instance_id = response["Instances"][0]["InstanceId"]
else:
# Launch spot instance
response = client.request_spot_instances(
SpotPrice=str(opts.price),
InstanceCount=1,
Type="one-time",
AvailabilityZoneGroup=s.availability_zone,
LaunchSpecification=launch_spec
)
request_id = response["SpotInstanceRequests"][0]["SpotInstanceRequestId"]
print("Request id {}".format(request_id), file=sys.stderr)
while not instance_id:
response = client.describe_spot_instance_requests(
SpotInstanceRequestIds=[request_id]
)
if "InstanceId" in response["SpotInstanceRequests"][0]:
instance_id = response["SpotInstanceRequests"][0]["InstanceId"]
else:
logging.debug("Waiting to be fulfilled ...")
time.sleep(10)
# Check if the instance is running
print("Instance id {}".format(instance_id), file=sys.stderr)
status = ""
while status != "running":
logging.debug("Waiting instance to run ...")
time.sleep(3)
response = client.describe_instance_status(InstanceIds=[instance_id])
if len(response["InstanceStatuses"]) > 0:
status = response["InstanceStatuses"][0]["InstanceState"]["Name"]
# Tagging
name = "htang-lx-ondemand" if opts.ondemand else "htang-lx-spot"
response = client.create_tags(
Resources=[instance_id],
Tags=[{"Key": k, "Value": v} for k, v in { \
"Name": name,
"owner": "htang",
"project": "mv-bioinformatics"
}.items()]
)
# Attach working volumes
volumes = s.volumes
for volume in volumes:
response = client.attach_volume(
VolumeId=volume["VolumeId"],
InstanceId=instance_id,
Device=volume["Device"]
)
# Save instance id and ip
response = client.describe_instances(InstanceIds=[instance_id])
ip_address = response["Reservations"][0]["Instances"][0]["PrivateIpAddress"]
print("IP address {}".format(ip_address), file=sys.stderr)
s.save_instance_id(instance_id, ip_address)
|
def start(args)
|
%prog start
Launch ec2 instance through command line.
| 2.33479 | 2.290347 | 1.019405 |
p = OptionParser(stop.__doc__)
p.add_option("--profile", default="mvrad-datasci-role", help="Profile name")
opts, args = p.parse_args(args)
if len(args) != 0:
sys.exit(not p.print_help())
role(["htang"])
session = boto3.Session(profile_name=opts.profile)
client = session.client('ec2')
s = InstanceSkeleton()
# Make sure the instance id is NOT empty
instance_id = s.instance_id
if instance_id == "":
logging.error("Cannot find instance_id {}".format(instance_id))
sys.exit(1)
block_device_mappings = []
for volume in s.volumes:
block_device_mappings.append(
{
"DeviceName": volume["Device"],
"NoDevice": ""
}
)
new_image_name = "htang-dev-{}-{}".format(timestamp(), int(time.time()))
response = client.create_image(
InstanceId=instance_id,
Name=new_image_name,
BlockDeviceMappings=block_device_mappings
)
print(response, file=sys.stderr)
new_image_id = response["ImageId"]
image_status = ""
while image_status != "available":
logging.debug("Waiting for image to be ready")
time.sleep(10)
response = client.describe_images(ImageIds=[new_image_id])
image_status = response["Images"][0]["State"]
# Delete old image, snapshot and shut down instance
old_image_id = s.image_id
response = client.describe_images(ImageIds=[old_image_id])
old_snapshot_id = response["Images"][0]["BlockDeviceMappings"][0]["Ebs"]["SnapshotId"]
response = client.deregister_image(ImageId=old_image_id)
print(response, file=sys.stderr)
response = client.delete_snapshot(SnapshotId=old_snapshot_id)
print(response, file=sys.stderr)
response = client.terminate_instances(InstanceIds=[instance_id])
print(response, file=sys.stderr)
# Save new image id
s.save_image_id(new_image_id)
s.save_instance_id("", "")
|
def stop(args)
|
%prog stop
Stop EC2 instance.
| 2.482464 | 2.418664 | 1.026378 |
p = OptionParser(rm.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
store, = args
contents = glob_s3(store)
for c in contents:
rm_s3(c)
|
def rm(args)
|
%prog rm "s3://hli-mv-data-science/htang/str/*.csv"
Remove a bunch of files.
| 3.196016 | 2.991917 | 1.068217 |
p = OptionParser(cp.__doc__)
p.add_option("--force", default=False,
action="store_true", help="Force overwrite if exists")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
store, folder = args
force = opts.force
cpus = opts.cpus
if op.exists(store):
contents = [x.strip().split(",") for x in open(store)]
else:
contents = glob_s3(store)
tasks = []
for c in contents:
if isinstance(c, six.string_types):
oc = op.basename(c)
tc = op.join(folder, oc)
else:
if len(c) == 2:
c, tc = c
else:
c, = c
tc = op.basename(c)
tasks.append((c, tc, force))
worker_pool = Pool(cpus)
worker_pool.map(worker, tasks)
worker_pool.close()
worker_pool.join()
|
def cp(args)
|
%prog cp "s3://hli-mv-data-science/htang/str/*.csv" .
Copy files to folder. Accepts list of s3 addresses as input.
| 2.815552 | 2.672009 | 1.053721 |
p = OptionParser(ls.__doc__)
p.add_option("--keys", help="List of keys to include")
p.add_option("--recursive", default=False, action="store_true",
help="Recursive search")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
store, = args
keys = opts.keys
if keys:
keys = SetFile(keys)
print("\n".join(glob_s3(store, keys=keys, recursive=opts.recursive)))
|
def ls(args)
|
%prog ls "s3://hli-mv-data-science/htang/str/*.vcf.gz"
List files with support for wildcards.
| 3.102038 | 3.058249 | 1.014318 |
src_acct, src_username, dst_acct, dst_role = \
"205134639408 htang 114692162163 mvrad-datasci-role".split()
p = OptionParser(role.__doc__)
p.add_option("--profile", default="mvrad-datasci-role", help="Profile name")
p.add_option('--device',
default="arn:aws:iam::" + src_acct + ":mfa/" + src_username,
metavar='arn:aws:iam::123456788990:mfa/dudeman',
help="The MFA Device ARN. This value can also be "
"provided via the environment variable 'MFA_DEVICE' or"
" the ~/.aws/credentials variable 'aws_mfa_device'.")
p.add_option('--duration',
type=int, default=3600,
help="The duration, in seconds, that the temporary "
"credentials should remain valid. Minimum value: "
"900 (15 minutes). Maximum: 129600 (36 hours). "
"Defaults to 43200 (12 hours), or 3600 (one "
"hour) when using '--assume-role'. This value "
"can also be provided via the environment "
"variable 'MFA_STS_DURATION'. ")
p.add_option('--assume-role', '--assume',
default="arn:aws:iam::" + dst_acct + ":role/" + dst_role,
metavar='arn:aws:iam::123456788990:role/RoleName',
help="The ARN of the AWS IAM Role you would like to "
"assume, if specified. This value can also be provided"
" via the environment variable 'MFA_ASSUME_ROLE'")
p.add_option('--role-session-name',
help="Friendly session name required when using "
"--assume-role",
default=getpass.getuser())
p.add_option('--force',
help="Refresh credentials even if currently valid.",
action="store_true")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
# Use a config to check the expiration of session token
config = get_config(AWS_CREDS_PATH)
validate(opts, config)
|
def role(args)
|
%prog role htang
Change aws role.
| 4.065366 | 3.827301 | 1.062202 |
p = OptionParser(query.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
locifile, contig = args
idx = build_index(locifile)
pos = idx[contig]
logging.debug("Contig {0} found at pos {1}".format(contig, pos))
fp = open(locifile)
fp.seek(pos)
section = []
while True:
row = fp.readline()
if row.startswith("//") and row.split()[1] != contig:
break
section.append(row)
print("".join(section))
|
def query(args)
|
%prog query out.loci contig
Random access to loci file. This script helps speeding up debugging.
| 3.10677 | 2.718747 | 1.142721 |
from jcvi.assembly.geneticmap import bed as geneticmap_bed
from jcvi.apps.align import blat
from jcvi.formats.blast import bed as blast_bed, best
p = OptionParser(synteny.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
mstmapout, novo, ref = args
pf = mstmapout.split(".")[0]
rf = ref.split(".")[0]
mstmapbed = geneticmap_bed([mstmapout])
cmd = "cut -d. -f1 {0}".format(mstmapbed)
tmpbed = mstmapbed + ".tmp"
sh(cmd, outfile=tmpbed)
os.rename(tmpbed, pf + ".bed")
cmd = "cut -f4 {0} | cut -d. -f1 | sort -u".format(mstmapbed)
idsfile = pf + ".ids"
sh(cmd, outfile=idsfile)
fastafile = pf + ".fasta"
cmd = "faSomeRecords {0} {1} {2}".format(novo, idsfile, fastafile)
sh(cmd)
blastfile = blat([ref, fastafile])
bestblastfile = best([blastfile])
blastbed = blast_bed([bestblastfile])
os.rename(blastbed, rf + ".bed")
anchorsfile = "{0}.{1}.anchors".format(pf, rf)
cmd = "paste {0} {0}".format(idsfile)
sh(cmd, outfile=anchorsfile)
|
def synteny(args)
|
%prog synteny mstmap.out novo.final.fasta reference.fasta
Plot MSTmap against reference genome.
| 3.214167 | 2.963747 | 1.084494 |
from jcvi.assembly.geneticmap import MSTMatrix
p = OptionParser(mstmap.__doc__)
p.add_option("--population_type", default="RIL6",
help="Type of population, possible values are DH and RILd")
p.add_option("--missing_threshold", default=.5,
help="Missing threshold, .25 excludes any marker with >25% missing")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
lmd, = args
fp = open(lmd)
next(fp) # Header
table = {"0": "-", "1": "A", "2": "B", "3": "X"}
mh = ["locus_name"] + fp.next().split()[4:]
genotypes = []
for row in fp:
atoms = row.split()
chr, pos, ref, alt = atoms[:4]
locus_name = ".".join((chr, pos))
codes = [table[x] for x in atoms[4:]]
genotypes.append([locus_name] + codes)
mm = MSTMatrix(genotypes, mh, opts.population_type, opts.missing_threshold)
mm.write(opts.outfile, header=True)
|
def mstmap(args)
|
%prog mstmap LMD50.snps.genotype.txt
Convert LMDs to MSTMAP input.
| 4.208921 | 3.856867 | 1.09128 |
import numpy as np
from jcvi.utils.progressbar import ProgressBar, Percentage, Bar, ETA
p = OptionParser(weblogo.__doc__)
p.add_option("-N", default=10, type="int",
help="Count the first and last N bases")
p.add_option("--nreads", default=1000000, type="int",
help="Parse first N reads")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
N = opts.N
nreads = opts.nreads
pat = "ATCG"
L = np.zeros((4, N), dtype="int32")
R = np.zeros((4, N), dtype="int32")
p = dict((a, i) for (i, a) in enumerate(pat))
L4, R3 = Counter(), Counter()
widgets = ['Parse reads: ', Percentage(), ' ',
Bar(marker='>', left='[', right=']'), ' ', ETA()]
pr = ProgressBar(maxval=nreads, term_width=60, widgets=widgets).start()
k = 0
fw_L = open("L.fasta", "w")
fw_R = open("R.fasta", "w")
fastq = fastqfile.endswith(".fastq")
it = iter_fastq(fastqfile) if fastq else \
SeqIO.parse(must_open(fastqfile), "fasta")
for rec in it:
k += 1
if k % 1000 == 0:
pr.update(k)
if k > nreads:
break
if rec is None:
break
s = str(rec.seq)
for i, a in enumerate(s[:N]):
if a in p:
a = p[a]
L[a][i] += 1
for j, a in enumerate(s[-N:][::-1]):
if a in p:
a = p[a]
R[a][N - 1 - j] += 1
l4, r3 = s[:4], s[-3:]
L4[l4] += 1
R3[r3] += 1
print(">{0}\n{1}".format(k, s[:N]), file=fw_L)
print(">{0}\n{1}".format(k, s[-N:]), file=fw_R)
fw_L.close()
fw_R.close()
cmd = "weblogo -F png -s large -f {0}.fasta -o {0}.png"
cmd += " --color-scheme classic --composition none -U probability"
cmd += " --title {1}"
sh(cmd.format('L', "First_10_bases"))
sh(cmd.format('R', "Last_10_bases"))
np.savetxt("L.{0}.csv".format(pat), L, delimiter=',', fmt="%d")
np.savetxt("R.{0}.csv".format(pat), R, delimiter=',', fmt="%d")
fw = open("L4.common", "w")
for p, c in L4.most_common(N):
print("\t".join((p, str(c))), file=fw)
fw.close()
fw = open("R3.common", "w")
for p, c in R3.most_common(N):
print("\t".join((p, str(c))), file=fw)
fw.close()
|
def weblogo(args)
|
%prog weblogo [fastafile|fastqfile]
Extract base composition for reads
| 2.469165 | 2.417541 | 1.021354 |
from jcvi.graphics.histogram import stem_leaf_plot
from jcvi.utils.cbook import SummaryStats
p = OptionParser(count.__doc__)
p.add_option("--csv", help="Write depth per contig to file")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
csv = open(opts.csv, "w") if opts.csv else None
f = Fasta(fastafile, lazy=True)
sizes = []
for desc, rec in f.iterdescriptions_ordered():
if desc.startswith("singleton"):
sizes.append(1)
continue
# consensus_for_cluster_0 with 63 sequences
if "with" in desc:
name, w, size, seqs = desc.split()
if csv:
print("\t".join(str(x)
for x in (name, size, len(rec))), file=csv)
assert w == "with"
sizes.append(int(size))
# MRD85:00603:02472;size=167;
else:
name, size, tail = desc.split(";")
sizes.append(int(size.replace("size=", "")))
if csv:
csv.close()
logging.debug("File written to `{0}`".format(opts.csv))
s = SummaryStats(sizes)
print(s, file=sys.stderr)
stem_leaf_plot(s.data, 0, 100, 20, title="Cluster size")
|
def count(args)
|
%prog count cdhit.consensus.fasta
Scan the headers for the consensus clusters and count the number of reads.
| 4.316698 | 4.190223 | 1.030183 |
from jcvi.assembly.kmer import jellyfish, histogram
from jcvi.assembly.preprocess import diginorm
from jcvi.formats.fasta import filter as fasta_filter, format
from jcvi.apps.cdhit import filter as cdhit_filter
p = OptionParser(novo.__doc__)
p.add_option("--technology", choices=("illumina", "454", "iontorrent"),
default="iontorrent", help="Sequencing platform")
p.set_depth(depth=50)
p.set_align(pctid=96)
p.set_home("cdhit", default="/usr/local/bin/")
p.set_home("fiona", default="/usr/local/bin/")
p.set_home("jellyfish", default="/usr/local/bin/")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
cpus = opts.cpus
depth = opts.depth
pf, sf = fastqfile.rsplit(".", 1)
diginormfile = pf + ".diginorm." + sf
if need_update(fastqfile, diginormfile):
diginorm([fastqfile, "--single", "--depth={0}".format(depth)])
keepabund = fastqfile + ".keep.abundfilt"
sh("cp -s {0} {1}".format(keepabund, diginormfile))
jf = pf + "-K23.histogram"
if need_update(diginormfile, jf):
jellyfish([diginormfile, "--prefix={0}".format(pf),
"--cpus={0}".format(cpus),
"--jellyfish_home={0}".format(opts.jellyfish_home)])
genomesize = histogram([jf, pf, "23"])
fiona = pf + ".fiona.fa"
if need_update(diginormfile, fiona):
cmd = op.join(opts.fiona_home, "fiona")
cmd += " -g {0} -nt {1} --sequencing-technology {2}".\
format(genomesize, cpus, opts.technology)
cmd += " -vv {0} {1}".format(diginormfile, fiona)
logfile = pf + ".fiona.log"
sh(cmd, outfile=logfile, errfile=logfile)
dedup = "cdhit"
pctid = opts.pctid
cons = fiona + ".P{0}.{1}.consensus.fasta".format(pctid, dedup)
if need_update(fiona, cons):
deduplicate([fiona, "--consensus", "--reads",
"--pctid={0}".format(pctid),
"--cdhit_home={0}".format(opts.cdhit_home)])
filteredfile = pf + ".filtered.fasta"
if need_update(cons, filteredfile):
covfile = pf + ".cov.fasta"
cdhit_filter([cons, "--outfile={0}".format(covfile),
"--minsize={0}".format(depth / 5)])
fasta_filter([covfile, "50", "--outfile={0}".format(filteredfile)])
finalfile = pf + ".final.fasta"
if need_update(filteredfile, finalfile):
format([filteredfile, finalfile, "--sequential=replace",
"--prefix={0}_".format(pf)])
|
def novo(args)
|
%prog novo reads.fastq
Reference-free tGBS pipeline v1.
| 3.297657 | 3.249632 | 1.014779 |
p = OptionParser(novo2.__doc__)
p.set_fastq_names()
p.set_align(pctid=95)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
trimmed, pf = args
pctid = opts.pctid
reads, samples = scan_read_files(trimmed, opts.names)
# Set up directory structure
clustdir = "uclust"
acdir = "allele_counts"
for d in (clustdir, acdir):
mkdir(d)
mm = MakeManager()
clustfiles = []
# Step 0 - clustering within sample
for s in samples:
flist = [x for x in reads if op.basename(x).split(".")[0] == s]
outfile = s + ".P{0}.clustS".format(pctid)
outfile = op.join(clustdir, outfile)
cmd = "python -m jcvi.apps.uclust cluster --cpus=8"
cmd += " {0} {1}".format(s, " ".join(flist))
cmd += " --outdir={0}".format(clustdir)
cmd += " --pctid={0}".format(pctid)
mm.add(flist, outfile, cmd)
clustfiles.append(outfile)
# Step 1 - make consensus within sample
allcons = []
for s, clustfile in zip(samples, clustfiles):
outfile = s + ".P{0}.consensus".format(pctid)
outfile = op.join(clustdir, outfile)
cmd = "python -m jcvi.apps.uclust consensus"
cmd += " {0}".format(clustfile)
mm.add(clustfile, outfile, cmd)
allcons.append(outfile)
# Step 2 - clustering across samples
clustSfile = pf + ".P{0}.clustS".format(pctid)
cmd = "python -m jcvi.apps.uclust mcluster {0}".format(" ".join(allcons))
cmd += " --prefix={0}".format(pf)
mm.add(allcons, clustSfile, cmd)
# Step 3 - make consensus across samples
locifile = pf + ".P{0}.loci".format(pctid)
cmd = "python -m jcvi.apps.uclust mconsensus {0}".format(" ".join(allcons))
cmd += " --prefix={0}".format(pf)
mm.add(allcons + [clustSfile], locifile, cmd)
mm.write()
|
def novo2(args)
|
%prog novo2 trimmed projectname
Reference-free tGBS pipeline v2.
| 2.746471 | 2.677045 | 1.025934 |
p = OptionParser(snpplot.__doc__)
opts, args, iopts = p.set_image_options(args, format="png")
if len(args) != 1:
sys.exit(not p.print_help())
datafile, = args
# Read in CDT file
fp = open(datafile)
next(fp)
next(fp)
data = []
for row in fp:
atoms = row.split()[4:]
nval = len(atoms)
values = [float(x) for x in atoms]
# normalize
values = [x * 1. / sum(values) for x in values]
data.append(values)
pf = datafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
xmin, xmax = .1, .9
ymin, ymax = .1, .9
yinterval = (ymax - ymin) / len(data)
colors = "rbg" if nval == 3 else ["lightgray"] + list("rbg")
ystart = ymax
for d in data:
xstart = xmin
for dd, c in zip(d, colors):
xend = xstart + (xmax - xmin) * dd
root.plot((xstart, xend), (ystart, ystart), "-", color=c)
xstart = xend
ystart -= yinterval
root.text(.05, .5, "{0} LMD50 SNPs".format(len(data)),
ha="center", va="center", rotation=90, color="lightslategray")
for x, t, c in zip((.3, .5, .7), ("REF", "ALT", "HET"), "rbg"):
root.text(x, .95, t, color=c, ha="center", va="center")
normalize_axes(root)
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
|
def snpplot(args)
|
%prog counts.cdt
Illustrate the histogram per SNP site.
| 2.937291 | 2.811296 | 1.044817 |
p = OptionParser(filterm4.__doc__)
p.add_option("--best", default=1, type="int", help="Only retain best N hits")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
m4file, = args
best = opts.best
fp = open(m4file)
fw = must_open(opts.outfile, "w")
seen = defaultdict(int)
retained = total = 0
for row in fp:
r = M4Line(row)
total += 1
if total % 100000 == 0:
logging.debug("Retained {0} lines".\
format(percentage(retained, total)))
if seen.get(r.query, 0) < best:
fw.write(row)
seen[r.query] += 1
retained += 1
fw.close()
|
def filterm4(args)
|
%prog filterm4 sample.m4 > filtered.m4
Filter .m4 file after blasr is run. As blasr takes a long time to run,
changing -bestn is undesirable. This screens the m4 file to retain top hits.
| 2.586433 | 2.474248 | 1.045341 |
import json
p = OptionParser(spancount.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fof, = args
fp = open(fof)
flist = [row.strip() for row in fp]
spanCount = "spanCount"
avgSpanBases = "avgSpanBases"
fw = open(spanCount, "w")
for f in flist:
fp = open(f)
j = json.load(fp)
sc = j.get(spanCount, None)
asb = j.get(avgSpanBases, None)
print(f, asb, sc, file=fw)
fw.flush()
fw.close()
|
def spancount(args)
|
%prog spancount list_of_fillingMetrics
Count span support for each gap. A file with paths of all fillingMetrics can
be built with Linux `find`.
$ (find assembly -name "fillingMetrics.json" -print > list_of_fillMetrics 2>
/dev/null &)
| 3.132198 | 2.988448 | 1.048102 |
from jcvi.formats.base import write_file
from jcvi.formats.fasta import format
p = OptionParser(patch.__doc__)
p.add_option("--cleanfasta", default=False, action="store_true",
help="Clean FASTA to remove description [default: %default]")
p.add_option("--highqual", default=False, action="store_true",
help="Reads are of high quality [default: %default]")
p.set_home("pbjelly")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ref, reads = args
cpus = opts.cpus
cmd = op.join(opts.pbjelly_home, "setup.sh")
setup = "source {0}".format(cmd)
if not which("fakeQuals.py"):
sh(setup)
pf = ref.rsplit(".", 1)[0]
pr, px = reads.rsplit(".", 1)
# Remove description line
if opts.cleanfasta:
oref = pf + ".f.fasta"
oreads = pr + ".f.fasta"
format([ref, oref])
format([reads, oreads])
ref, reads = oref, oreads
# Check if the FASTA has qual
ref, refq = fake_quals(ref)
convert_reads = not px in ("fq", "fastq", "txt")
if convert_reads:
reads, readsq = fake_quals(reads)
readsfiles = " ".join((reads, readsq))
else:
readsfiles = reads
# Make directory structure
dref, dreads = "data/reference", "data/reads"
cwd = os.getcwd()
reference = op.join(cwd, "{0}/{1}".format(dref, ref))
reads = op.join(cwd, "{0}/{1}".format(dreads, reads))
if not op.exists(reference):
sh("mkdir -p {0}".format(dref))
sh("cp {0} {1}/".format(" ".join((ref, refq)), dref))
if not op.exists(reads):
sh("mkdir -p {0}".format(dreads))
sh("cp {0} {1}/".format(readsfiles, dreads))
outputDir = cwd
p = Protocol(outputDir, reference, reads, highqual=opts.highqual)
p.write_xml()
# Build the pipeline
runsh = [setup]
for action in "setup|mapping|support|extraction".split("|"):
runsh.append("Jelly.py {0} Protocol.xml".format(action))
runsh.append('Jelly.py assembly Protocol.xml -x "--nproc={0}"'.format(cpus))
runsh.append("Jelly.py output Protocol.xml")
runfile = "run.sh"
contents = "\n".join(runsh)
write_file(runfile, contents)
|
def patch(args)
|
%prog patch reference.fasta reads.fasta
Run PBJelly with reference and reads.
| 3.633767 | 3.456843 | 1.051181 |
p = OptionParser(prepare.__doc__ + FastqNamings)
p.add_option("-K", default=51, type="int", help="K-mer size")
p.set_cpus(cpus=32)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
genomesize = float(args[0]) / 1000
fnames = args[1:]
for x in fnames:
assert op.exists(x), "File `{0}` not found.".format(x)
s = comment_banner("Meraculous params file") + "\n"
s += comment_banner("Basic parameters") + "\n"
s += "# Describe the libraries ( one line per library )\n"
s += "# " + " ".join(header.split()) + "\n"
libs = get_libs(fnames)
lib_seqs = []
rank = 0
for lib, fs in libs:
size = lib.size
if size == 0:
continue
rank += 1
library_name = lib.library_name
name = library_name.replace("-", "")
wildcard = "{0}*.1.*,{0}*.2.*".format(library_name)
rl = max(readlen([x]) for x in fs)
lib_seq = lib.get_lib_seq(wildcard, name, rl, rank)
lib_seqs.append(lib_seq)
s += "\n" + "\n".join(load_csv(None, lib_seqs, sep=" ")) + "\n"
params = [("genome_size", genomesize),
("is_diploid", 0),
("mer_size", opts.K),
("num_prefix_blocks", 1),
("no_read_validation", 0),
("local_num_procs", opts.cpus)]
s += "\n" + "\n".join(load_csv(None, params, sep=" ")) + "\n"
cfgfile = "meraculous.config"
write_file(cfgfile, s, tee=True)
s = "~/export/meraculous/bin/run_meraculous.sh -c {0}"\
.format(cfgfile)
runsh = "run.sh"
write_file(runsh, s)
|
def prepare(args)
|
%prog prepare genomesize *.fastq
Prepare MERACULOUS configuation file. Genome size should be entered in Mb.
| 4.636864 | 4.170582 | 1.111803 |
from jcvi.apps.fetch import batch_taxonomy
list_of_taxids = [str(x) for x in list_of_taxids]
return list(batch_taxonomy(list_of_taxids))
|
def get_names(list_of_taxids)
|
>>> mylist = [3702, 3649, 3694, 3880]
>>> get_names(mylist)
['Arabidopsis thaliana', 'Carica papaya', 'Populus trichocarpa', 'Medicago truncatula']
| 4.343782 | 4.754012 | 0.913709 |
from jcvi.apps.fetch import batch_taxids
return [int(x) for x in batch_taxids(list_of_names)]
|
def get_taxids(list_of_names)
|
>>> mylist = ['Arabidopsis thaliana', 'Carica papaya']
>>> get_taxids(mylist)
[1, 2]
| 6.228539 | 6.724813 | 0.926203 |
from ete2 import Tree
t = TaxIDTree(list_of_taxids)
t = Tree(str(t), format=8)
ancestor = t.get_common_ancestor(*t.get_leaves())
return ancestor.name
|
def MRCA(list_of_taxids)
|
This gets the most recent common ancester (MRCA) for a list of taxids
>>> mylist = [3702, 3649, 3694, 3880]
>>> MRCA(mylist)
'rosids'
| 4.877177 | 6.150789 | 0.792935 |
assert isinstance(taxid, int)
t = TaxIDTree(taxid)
try:
return "Viridiplantae" in str(t)
except AttributeError:
raise ValueError("{0} is not a valid ID".format(taxid))
|
def isPlantOrigin(taxid)
|
Given a taxid, this gets the expanded tree which can then be checked to
see if the organism is a plant or not
>>> isPlantOrigin(29760)
True
| 5.456123 | 5.706039 | 0.956201 |
p = OptionParser(newick.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
mylist = [x.strip() for x in open(idsfile) if x.strip()]
print(get_taxids(mylist))
t = TaxIDTree(mylist)
print(t)
|
def newick(args)
|
%prog newick idslist
Query a list of IDs to retrieve phylogeny.
| 3.213061 | 2.892598 | 1.110787 |
pf = op.basename(bedfile).split(".")[0]
minibamfile = minibam or op.basename(bamfile).replace(".bam", ".{}.bam".format(pf))
minisamfile = minibam.replace(".bam", ".sam")
baifile = minibamfile + ".bai"
if op.exists(baifile):
sh("rm {}".format(baifile))
cmd = "samtools view -H {} > {}".format(bamfile, minisamfile)
sh(cmd)
cmd = "cat {}".format(bedfile)
cmd += " | perl -lane 'print \"$F[0]:$F[1]-$F[2]\"'"
cmd += " | xargs -n1 -t -I \{\}"
cmd += " samtools view {}".format(bamfile)
cmd += " \{\} >> " + minisamfile
sh(cmd)
cmd = "samtools view {} -b".format(minisamfile)
cmd += " | samtools sort -"
cmd += " -o {0}".format(minibamfile)
sh(cmd)
sh("samtools index {0}".format(minibamfile))
return minibamfile
|
def get_minibam_bed(bamfile, bedfile, minibam=None)
|
samtools view -L could do the work, but it is NOT random access. Here we
are processing multiple regions sequentially. See also:
https://www.biostars.org/p/49306/
| 3.010196 | 3.125143 | 0.963219 |
p = OptionParser(fastq.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, pf = args
singletons = pf + ".se.fastq"
a = pf + ".read1.fastq"
b = pf + ".read2.fastq"
cmd = "samtools collate -uOn 128 {} tmp-prefix".format(bamfile)
cmd += " | samtools fastq -s {} -1 {} -2 {} -"\
.format(singletons, a, b)
sh(cmd)
if os.stat(singletons).st_size == 0: # singleton file is empty
os.remove(singletons)
return a, b
|
def fastq(args)
|
%prog fastq bamfile prefix
Convert BAM files to paired FASTQ files.
| 4.154544 | 3.969392 | 1.046645 |
p = OptionParser(mini.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, region = args
get_minibam(bamfile, region)
|
def mini(args)
|
%prog mini bamfile region
Extract mini-bam for a single region.
| 2.866166 | 2.009571 | 1.426258 |
p = OptionParser(noclip.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bamfile, = args
noclipbam = bamfile.replace(".bam", ".noclip.bam")
cmd = "samtools view -h {} | awk -F '\t' '($6 !~ /H|S/)'".format(bamfile)
cmd += " | samtools view -@ 4 -b -o {}".format(noclipbam)
sh(cmd)
sh("samtools index {}".format(noclipbam))
|
def noclip(args)
|
%prog noclip bamfile
Remove clipped reads from BAM.
| 2.648163 | 2.466251 | 1.07376 |
p = OptionParser(append.__doc__)
p.add_option("--prepend", help="Prepend string to read names")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bamfile, = args
prepend = opts.prepend
icmd = "samtools view -h {0}".format(bamfile)
bamfile = bamfile.rsplit(".", 1)[0] + ".append.bam"
ocmd = "samtools view -b -@ 64 - -o {0}".format(bamfile)
p = Popen(ocmd, stdin=PIPE)
for row in popen(icmd):
if row[0] == '@':
print(row.strip(), file=p.stdin)
else:
s = SamLine(row)
if prepend:
s.qname = prepend + "_" + s.qname
else:
s.update_readname()
print(s, file=p.stdin)
|
def append(args)
|
%prog append bamfile
Append /1 or /2 to read names. Useful for using the Tophat2 bam file for
training AUGUSTUS gene models.
| 2.759181 | 2.551936 | 1.081211 |
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
bedfile = args[0]
bamfiles = args[1:]
for bamfile in bamfiles:
cmd = "bamToBed -i {0}".format(bamfile)
sh(cmd, outfile=bedfile, append=True)
|
def bed(args)
|
%prog bed bedfile bamfiles
Convert bam files to bed.
| 2.48405 | 2.17779 | 1.140629 |
from jcvi.apps.grid import MakeManager
p = OptionParser(merge.__doc__)
p.set_sep(sep="_", help="Separator to group per prefix")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
merged_bams = args[0]
bamdirs = args[1:]
mkdir(merged_bams)
bams = []
for x in bamdirs:
bams += glob(op.join(x, "*.bam"))
bams = [x for x in bams if "nsorted" not in x]
logging.debug("Found a total of {0} BAM files.".format(len(bams)))
sep = opts.sep
key = lambda x: op.basename(x).split(sep)[0]
bams.sort(key=key)
mm = MakeManager()
for prefix, files in groupby(bams, key=key):
files = sorted(list(files))
nfiles = len(files)
source = " ".join(files)
target = op.join(merged_bams, op.basename(files[0]))
if nfiles == 1:
source = get_abs_path(source)
cmd = "ln -s {0} {1}".format(source, target)
mm.add("", target, cmd)
else:
cmd = "samtools merge -@ 8 {0} {1}".format(target, source)
mm.add(files, target, cmd, remove=True)
mm.write()
|
def merge(args)
|
%prog merge merged_bams bams1_dir bams2_dir ...
Merge BAM files. Treat the bams with the same prefix as a set.
Output the commands first.
| 2.943633 | 2.713904 | 1.084649 |
p = OptionParser(count.__doc__)
p.add_option("--type", default="exon",
help="Only count feature type")
p.set_cpus(cpus=8)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, gtf = args
cpus = opts.cpus
pf = bamfile.split(".")[0]
countfile = pf + ".count"
if not need_update(bamfile, countfile):
return
nsorted = pf + "_nsorted"
nsortedbam, nsortedsam = nsorted + ".bam", nsorted + ".sam"
if need_update(bamfile, nsortedsam):
cmd = "samtools sort -@ {0} -n {1} {2}".format(cpus, bamfile, nsorted)
sh(cmd)
cmd = "samtools view -@ {0} -h {1}".format(cpus, nsortedbam)
sh(cmd, outfile=nsortedsam)
if need_update(nsortedsam, countfile):
cmd = "htseq-count --stranded=no --minaqual=10"
cmd += " -t {0}".format(opts.type)
cmd += " {0} {1}".format(nsortedsam, gtf)
sh(cmd, outfile=countfile)
|
def count(args)
|
%prog count bamfile gtf
Count the number of reads mapped using `htseq-count`.
| 2.699282 | 2.489758 | 1.084154 |
p = OptionParser(coverage.__doc__)
p.add_option("--format", default="bigwig",
choices=("bedgraph", "bigwig", "coverage"),
help="Output format")
p.add_option("--nosort", default=False, action="store_true",
help="Do not sort BAM")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, bamfile = args
format = opts.format
if opts.nosort:
logging.debug("BAM sorting skipped")
else:
bamfile = index([bamfile, "--fasta={0}".format(fastafile)])
pf = bamfile.rsplit(".", 2)[0]
sizesfile = Sizes(fastafile).filename
cmd = "genomeCoverageBed -ibam {0} -g {1}".format(bamfile, sizesfile)
if format in ("bedgraph", "bigwig"):
cmd += " -bg"
bedgraphfile = pf + ".bedgraph"
sh(cmd, outfile=bedgraphfile)
if format == "bedgraph":
return bedgraphfile
bigwigfile = pf + ".bigwig"
cmd = "bedGraphToBigWig {0} {1} {2}".\
format(bedgraphfile, sizesfile, bigwigfile)
sh(cmd)
return bigwigfile
coveragefile = pf + ".coverage"
if need_update(fastafile, coveragefile):
sh(cmd, outfile=coveragefile)
gcf = GenomeCoverageFile(coveragefile)
fw = must_open(opts.outfile, "w")
for seqid, cov in gcf.iter_coverage_seqid():
print("\t".join((seqid, "{0:.1f}".format(cov))), file=fw)
fw.close()
|
def coverage(args)
|
%prog coverage fastafile bamfile
Calculate coverage for BAM file. BAM file will be sorted unless with
--nosort.
| 2.754091 | 2.551785 | 1.07928 |
p = OptionParser(fpkm.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
fastafile = args[0]
bamfiles = args[1:]
# Create a DUMMY gff file for cuffdiff
gffile = fastafile.rsplit(".", 1)[0] + ".gff"
if need_update(fastafile, gffile):
fw = open(gffile, "w")
f = Fasta(fastafile, lazy=True)
for key, size in f.itersizes_ordered():
print("\t".join(str(x) for x in (key, "dummy", "transcript",\
1, size, ".", ".", ".", "ID=" + key)), file=fw)
fw.close()
logging.debug("Dummy GFF created: {0}".format(gffile))
cmd = "cuffdiff {0} {1}".format(gffile, " ".join(bamfiles))
sh(cmd)
|
def fpkm(args)
|
%prog fpkm fastafile *.bam
Calculate FPKM values from BAM file.
| 3.197411 | 3.005013 | 1.064026 |
import jcvi.formats.bed
p = OptionParser(pairs.__doc__)
p.set_pairs()
opts, targs = p.parse_args(args)
if len(targs) != 1:
sys.exit(not p.print_help())
samfile, = targs
bedfile = samfile.rsplit(".", 1)[0] + ".bed"
if need_update(samfile, bedfile):
cmd = "bamToBed -i {0}".format(samfile)
sh(cmd, outfile=bedfile)
args[args.index(samfile)] = bedfile
return jcvi.formats.bed.pairs(args)
|
def pairs(args)
|
See __doc__ for OptionParser.set_pairs().
| 3.201763 | 2.990763 | 1.070551 |
p = OptionParser(consensus.__doc__)
p.add_option("--fasta", default=False, action="store_true",
help="Generate consensus FASTA sequences [default: %default]")
p.add_option("--mask", default=0, type="int",
help="Mask bases with quality lower than")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
fastafile, bamfile = args
fasta = opts.fasta
suffix = "fasta" if fasta else "fastq"
pf = bamfile.rsplit(".", 1)[0]
cnsfile = pf + ".cns.{0}".format(suffix)
vcfgzfile = pf + ".vcf.gz"
vcf([fastafile, bamfile, "-o", vcfgzfile])
cmd += "zcat {0} | vcfutils.pl vcf2fq".format(vcfgzfile)
if fasta:
cmd += " | seqtk seq -q {0} -A -".format(opts.mask)
sh(cmd, outfile=cnsfile)
|
def consensus(args)
|
%prog consensus fastafile bamfile
Convert bam alignments to consensus FASTQ/FASTA.
| 3.445184 | 3.160402 | 1.09011 |
from jcvi.apps.grid import Jobs
valid_callers = ("mpileup", "freebayes")
p = OptionParser(vcf.__doc__)
p.set_outfile(outfile="out.vcf.gz")
p.add_option("--nosort", default=False, action="store_true",
help="Do not sort the BAM files")
p.add_option("--caller", default="mpileup", choices=valid_callers,
help="Use variant caller [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
fastafile = args[0]
bamfiles = args[1:]
caller = opts.caller
unsorted = [x for x in bamfiles if ".sorted." not in x]
if opts.nosort:
bamfiles = unsorted
else:
jargs = [[[x, "--unique"]] for x in unsorted]
jobs = Jobs(index, args=jargs)
jobs.run()
bamfiles = [x.replace(".sorted.bam", ".bam") for x in bamfiles]
bamfiles = [x.replace(".bam", ".sorted.bam") for x in bamfiles]
if caller == "mpileup":
cmd = "samtools mpileup -E -uf"
cmd += " {0} {1}".format(fastafile, " ".join(bamfiles))
cmd += " | bcftools call -vmO v"
elif caller == "freebayes":
cmd = "freebayes -f"
cmd += " {0} {1}".format(fastafile, " ".join(bamfiles))
sh(cmd, outfile=opts.outfile)
|
def vcf(args)
|
%prog vcf fastafile bamfiles > out.vcf.gz
Call SNPs on bam files.
| 2.538849 | 2.377097 | 1.068046 |
import pysam
from jcvi.utils.natsort import natsorted
p = OptionParser(chimera.__doc__)
p.set_verbose()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
samfile, = args
samfile = pysam.AlignmentFile(samfile)
rstore = defaultdict(list)
hstore = defaultdict(int)
for r in samfile.fetch():
rstore[r.query_name] += list(breakpoint(r))
hstore[r.query_name] += 1
if opts.verbose:
print(r.query_name, "+-"[r.is_reverse], \
sum(l for o, l in r.cigartuples), r.cigarstring, list(breakpoint(r)), file=sys.stderr)
for rn, bps in natsorted(rstore.items()):
bps = "|".join(str(x) for x in sorted(bps)) if bps else "na"
print("\t".join((rn, str(hstore[rn]), bps)))
|
def chimera(args)
|
%prog chimera bamfile
Parse BAM file from `bwasw` and list multi-hit reads and breakpoints.
| 2.972091 | 2.838223 | 1.047166 |
p = OptionParser(index.__doc__)
p.add_option("--fasta", dest="fasta", default=None,
help="add @SQ header to the BAM file [default: %default]")
p.add_option("--unique", default=False, action="store_true",
help="only retain uniquely mapped reads [default: %default]")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
samfile, = args
cpus = opts.cpus
fastafile = opts.fasta
if fastafile:
assert op.exists(fastafile)
bamfile = samfile.replace(".sam", ".bam")
if fastafile:
faifile = fastafile + ".fai"
if need_update(fastafile, faifile):
sh("samtools faidx {0}".format(fastafile))
cmd = "samtools view -bt {0} {1} -o {2}".\
format(faifile, samfile, bamfile)
else:
cmd = "samtools view -bS {0} -o {1}".\
format(samfile, bamfile)
cmd += " -@ {0}".format(cpus)
if opts.unique:
cmd += " -q 1"
if samfile.endswith(".sam") and need_update(samfile, bamfile):
sh(cmd)
# Already sorted?
if bamfile.endswith(".sorted.bam"):
sortedbamfile = bamfile
else:
prefix = bamfile.replace(".bam", "")
sortedbamfile = prefix + ".sorted.bam"
if need_update(bamfile, sortedbamfile):
cmd = "samtools sort {0} -o {1}".format(bamfile, sortedbamfile)
cmd += " -@ {0}".format(cpus)
sh(cmd)
baifile = sortedbamfile + ".bai"
if need_update(sortedbamfile, baifile):
sh("samtools index {0}".format(sortedbamfile))
return sortedbamfile
|
def index(args)
|
%prog index samfile/bamfile
If SAM file, convert to BAM, sort and then index, using SAMTOOLS
| 2.178547 | 2.106593 | 1.034157 |
import pysam
from jcvi.apps.grid import Jobs
p = OptionParser(mapped.__doc__)
p.set_sam_options(extra=False)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
samfile, = args
view_opts = []
oext, mopts = (".sam", ["-S"]) \
if samfile.endswith(".sam") else (".bam", [])
flag, ext = ("-b", ".bam") if opts.bam else ("-h", ".sam")
mopts.append(flag)
if opts.uniq:
mopts.append("-q1")
ext = ".uniq{0}".format(ext)
if opts.unmapped:
uopts = [x for x in mopts]
uoutfile = samfile.replace(oext, ".unmapped{0}".format(ext))
uopts.extend(["-f4", samfile, "-o{0}".format(uoutfile)])
view_opts.append(uopts)
outfile = samfile.replace(oext, ".mapped{0}".format(ext))
mopts.extend(["-F4", samfile, "-o{0}".format(outfile)])
view_opts.append(mopts)
for vo in view_opts:
logging.debug('samtools view {0}'.format(" ".join(vo)))
jobs = Jobs(pysam.view, [(z for z in x) for x in view_opts])
jobs.run()
|
def mapped(args)
|
%prog mapped sam/bamfile
Given an input sam/bam file, output a sam/bam file containing only the mapped reads.
Optionally, extract the unmapped reads into a separate file
| 3.412084 | 3.344686 | 1.020151 |
p = OptionParser(pair.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
def callback(s):
print(s.pairline)
Sam(args[0], callback=callback)
|
def pair(args)
|
%prog pair samfile
Parses the sam file and retrieve in pairs format,
query:pos ref:pos
| 3.742981 | 3.943785 | 0.949083 |
seq, cigar = a.seq, a.cigar
start = 0
subseqs = []
npadded = 0
if cigar is None:
return None, npadded
for operation, length in cigar:
end = start if operation == 2 else start + length
if operation == 0: # match
subseq = seq[start:end]
elif operation == 1: # insertion
subseq = ""
elif operation == 2: # deletion
subseq = gap * length
npadded += length
elif operation == 3: # skipped
subseq = 'N' * length
elif operation in (4, 5): # clip
subseq = ""
else:
raise NotImplementedError
subseqs.append(subseq)
start = end
return "".join(subseqs), npadded
|
def cigar_to_seq(a, gap='*')
|
Accepts a pysam row.
cigar alignment is presented as a list of tuples (operation,length). For
example, the tuple [ (0,3), (1,5), (0,2) ] refers to an alignment with 3
matches, 5 insertions and another 2 matches.
Op BAM Description
M 0 alignment match (can be a sequence match or mismatch)
I 1 insertion to the reference
D 2 deletion from the reference
N 3 skipped region from the reference
S 4 soft clipping (clipped sequences present in SEQ)
H 5 hard clipping (clipped sequences NOT present in SEQ)
P 6 padding (silent deletion from padded reference)
= 7 sequence match
X 8 sequence mismatch
convert the sequence based on the cigar string. For example:
| 2.593589 | 2.645809 | 0.980263 |
p = OptionParser(dump.__doc__)
p.add_option("--dir",
help="Working directory [default: %default]")
p.add_option("--nosim", default=False, action="store_true",
help="Do not simulate qual to 50 [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastbfile, = args
d = opts.dir
if d:
from jcvi.assembly.preprocess import export_fastq
rc = "jump" in fastbfile
export_fastq(d, fastbfile, rc=rc)
return
sim = not opts.nosim
pf = "j" if "jump" in fastbfile else "f"
statsfile = "{0}.lib_stats".format(pf)
if op.exists(statsfile):
os.remove(statsfile)
cmd = "SplitReadsByLibrary READS_IN={0}".format(fastbfile)
cmd += " READS_OUT={0} QUALS=True".format(pf)
sh(cmd)
libs = []
fp = open(statsfile)
next(fp); next(fp) # skip two rows
for row in fp:
if row.strip() == "":
continue
libname = row.split()[0]
if libname == "Unpaired":
continue
libs.append(libname)
logging.debug("Found libraries: {0}".format(",".join(libs)))
cmds = []
for libname in libs:
cmd = "FastbQualbToFastq"
cmd += " HEAD_IN={0}.{1}.AB HEAD_OUT={1}".format(pf, libname)
cmd += " PAIRED=True PHRED_OFFSET=33"
if sim:
cmd += " SIMULATE_QUALS=True"
if pf == 'j':
cmd += " FLIP=True"
cmds.append((cmd, ))
m = Jobs(target=sh, args=cmds)
m.run()
for libname in libs:
cmd = "mv {0}.A.fastq {0}.1.fastq".format(libname)
sh(cmd)
cmd = "mv {0}.B.fastq {0}.2.fastq".format(libname)
sh(cmd)
|
def dump(args)
|
%prog dump fastbfile
Export ALLPATHS fastb file to fastq file. Use --dir to indicate a previously
run allpaths folder.
| 3.979665 | 3.703243 | 1.074643 |
p = OptionParser(fixpairs.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
pairsfile, sep, sd = args
newpairsfile = pairsfile.rsplit(".", 1)[0] + ".new.pairs"
sep = int(sep)
sd = int(sd)
p = PairsFile(pairsfile)
p.fixLibraryStats(sep, sd)
p.write(newpairsfile)
|
def fixpairs(args)
|
%prog fixpairs pairsfile sep sd
Fix pairs library stats. This is sometime useful to modify library stats,
for example, the separation between paired reads after importing the data.
| 2.830066 | 2.018937 | 1.40176 |
p = OptionParser(fill.__doc__)
p.add_option("--stretch", default=3, type="int",
help="MAX_STRETCH to pass to FillFragments [default: %default]")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastb, = args
assert fastb == "frag_reads_corr.fastb"
pcfile = "frag_reads_corr.k28.pc.info"
nthreads = " NUM_THREADS={0}".format(opts.cpus)
maxstretch = " MAX_STRETCH={0}".format(opts.stretch)
if need_update(fastb, pcfile):
cmd = "PathReads READS_IN=frag_reads_corr"
cmd += nthreads
sh(cmd)
filledfastb = "filled_reads.fastb"
if need_update(pcfile, filledfastb):
cmd = "FillFragments PAIRS_OUT=frag_reads_corr_cpd"
cmd += " PRECORRECT_LIBSTATS=True"
cmd += maxstretch
cmd += nthreads
sh(cmd)
filledfasta = "filled_reads.fasta"
if need_update(filledfastb, filledfasta):
cmd = "Fastb2Fasta IN=filled_reads.fastb OUT=filled_reads.fasta"
sh(cmd)
|
def fill(args)
|
%prog fill frag_reads_corr.fastb
Run FillFragments on `frag_reads_corr.fastb`.
| 4.327679 | 3.523796 | 1.22813 |
fp = open(fastqfile)
currentID = 0
npairs = nfrags = 0
for x, lib in izip(p.r1, p.libs):
while currentID != x:
fragsfw.writelines(islice(fp, 4)) # Exhaust the iterator
currentID += 1
nfrags += 1
a = list(islice(fp, 4))
b = list(islice(fp, 4))
if suffix:
name = a[0].rstrip()
a[0] = name + "/1\n"
b[0] = name + "/2\n"
else:
b[0] = a[0] # Keep same read ID for pairs
p1fw[lib].writelines(a)
p2fw[lib].writelines(b)
currentID += 2
npairs += 2
# Write the remaining single reads
while True:
contents = list(islice(fp, 4))
if not contents:
break
fragsfw.writelines(contents)
nfrags += 1
logging.debug("A total of {0} paired reads written to `{1}`.".\
format(npairs, ",".join(x.name for x in p1fw + p2fw)))
logging.debug("A total of {0} single reads written to `{1}`.".\
format(nfrags, fragsfw.name))
# Validate the numbers
expected_pairs = 2 * p.npairs
expected_frags = p.nreads - 2 * p.npairs
assert npairs == expected_pairs, "Expect {0} paired reads, got {1} instead".\
format(expected_pairs, npairs)
assert nfrags == expected_frags, "Expect {0} single reads, got {1} instead".\
format(expected_frags, nfrags)
|
def extract_pairs(fastqfile, p1fw, p2fw, fragsfw, p, suffix=False)
|
Take fastqfile and array of pair ID, extract adjacent pairs to outfile.
Perform check on numbers when done. p1fw, p2fw is a list of file handles,
each for one end. p is a Pairs instance.
| 3.009779 | 2.886416 | 1.042739 |
from jcvi.assembly.preprocess import run_FastbAndQualb2Fastq
p = OptionParser(pairs.__doc__)
p.add_option("--header", default=False, action="store_true",
help="Print header only [default: %default]")
p.add_option("--suffix", default=False, action="store_true",
help="Add suffix /1, /2 to read names")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pairsfile, fastqfile = args
pf = op.basename(fastqfile).split(".")[0]
p = PairsFile(pairsfile)
print(p.header, file=sys.stderr)
if opts.header:
return
if fastqfile.endswith(".fastb"):
fastbfile = fastqfile
fastqfile = fastbfile.replace(".fastb", ".fastq")
run_FastbAndQualb2Fastq(infile=fastbfile, outfile=fastqfile)
p1file = "{0}.1.corr.fastq"
p2file = "{0}.2.corr.fastq"
fragsfile = "{0}.corr.fastq"
p1fw = [open(p1file.format(x), "w") for x in p.libnames]
p2fw = [open(p2file.format(x), "w") for x in p.libnames]
fragsfw = open(fragsfile.format(pf), "w")
extract_pairs(fastqfile, p1fw, p2fw, fragsfw, p, suffix=opts.suffix)
|
def pairs(args)
|
%prog pairs pairsfile <fastbfile|fastqfile>
Parse ALLPATHS pairs file, and write pairs IDs and single read IDs in
respective ids files: e.g. `lib1.pairs.fastq`, `lib2.pairs.fastq`,
and single `frags.fastq` (with single reads from lib1/2).
| 3.058682 | 2.720573 | 1.124279 |
from jcvi.algorithms.graph import nx, topological_sort
p = OptionParser(log.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
g = nx.DiGraph()
logfile, = args
fp = open(logfile)
row = fp.readline()
incalling = False
basedb = {}
while row:
atoms = row.split()
if len(atoms) < 3:
row = fp.readline()
continue
tag, token, trailing = atoms[0], atoms[1], atoms[-1]
if trailing == 'file(s):':
numfiles = int(atoms[-2])
row = fp.readline()
assert row.strip() == tag
if token == "Calling" and not incalling:
createfiles = []
for i in xrange(numfiles):
row = fp.readline()
createfiles.append(row.split()[-1])
incalling = True
if token == "from" and incalling:
fromfiles = []
for i in xrange(numfiles):
row = fp.readline()
fromfiles.append(row.split()[-1])
for a in fromfiles:
for b in createfiles:
ba, bb = op.basename(a), op.basename(b)
basedb[ba] = a
basedb[bb] = b
g.add_edge(ba, bb)
incalling = False
if token == "ln":
fromfile, createfile = atoms[-2:]
ba, bb = op.basename(fromfile), op.basename(createfile)
#print ba, "-->", bb
if ba != bb:
g.add_edge(ba, bb)
row = fp.readline()
ts = [basedb[x] for x in topological_sort(g) if x in basedb]
print("\n".join(ts))
|
def log(args)
|
%prog log logfile
Prepare a log of created files, ordered by their creation data. The purpose
for this script is to touch these files sequentially to reflect their build
order. On the JCVI scratch area, the files are touched regularly to avoid
getting deleted, losing their respective timestamps. However, this created a
problem for the make system adopted by ALLPATHS.
An example block to be extracted ==>
[PC] Calling PreCorrect to create 2 file(s):
[PC]
[PC] $(RUN)/frag_reads_prec.fastb
[PC] $(RUN)/frag_reads_prec.qualb
[PC]
[PC] from 2 file(s):
[PC]
[PC] $(RUN)/frag_reads_filt.fastb
[PC] $(RUN)/frag_reads_filt.qualb
| 3.025777 | 2.861281 | 1.05749 |
mapping = self._mapping
set_a = mapping.setdefault(a, [a])
for arg in args:
set_b = mapping.get(arg)
if set_b is None:
set_a.append(arg)
mapping[arg] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
|
def join(self, a, *args)
|
Join given arguments into the same set. Accepts one or more arguments.
| 2.136494 | 2.066672 | 1.033785 |
mapping = self._mapping
try:
return mapping[a] is mapping[b]
except KeyError:
return False
|
def joined(self, a, b)
|
Returns True if a and b are members of the same set.
| 5.1512 | 4.301717 | 1.197475 |
from csv import reader
from xlwt import Workbook, easyxf
from jcvi.formats.base import flexible_cast
p = OptionParser(fromcsv.__doc__)
p.add_option("--noheader", default=False, action="store_true",
help="Do not treat the first row as header")
p.add_option("--rgb", default=-1, type="int",
help="Show RGB color box")
p.set_sep()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
header = not opts.noheader
rgb = opts.rgb
excelfile = csvfile.rsplit(".", 1)[0] + ".xls"
data = []
for row in reader(open(csvfile), delimiter=opts.sep):
data.append(row)
w = Workbook()
s = w.add_sheet(op.basename(csvfile))
header_style = easyxf('font: bold on')
if header:
s.panes_frozen = True
s.horz_split_pos = 1
cm = ColorMatcher()
for i, row in enumerate(data):
for j, cell in enumerate(row):
cell = flexible_cast(cell)
if header and i == 0:
s.write(i, j, cell, header_style)
else:
if j == rgb:
cix = cm.match_color_index(cell)
color_style = easyxf('font: color_index {0}'.format(cix))
s.write(i, j, cell, color_style)
else:
s.write(i, j, cell)
w.save(excelfile)
logging.debug("File written to `{0}`.".format(excelfile))
return excelfile
|
def fromcsv(args)
|
%prog fromcsv csvfile
Convert csv file to EXCEL.
| 2.425553 | 2.346614 | 1.033639 |
from xlrd import open_workbook
p = OptionParser(csv.__doc__)
p.set_sep(sep=',')
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
excelfile, = args
sep = opts.sep
csvfile = excelfile.rsplit(".", 1)[0] + ".csv"
wb = open_workbook(excelfile)
fw = open(csvfile, "w")
for s in wb.sheets():
print('Sheet:',s.name, file=sys.stderr)
for row in range(s.nrows):
values = []
for col in range(s.ncols):
values.append(s.cell(row, col).value)
print(sep.join(str(x) for x in values), file=fw)
|
def csv(args)
|
%prog csv excelfile
Convert EXCEL to csv file.
| 2.162119 | 1.981444 | 1.091184 |
from jcvi.utils.webcolors import color_diff
if isinstance(color, int):
return color
if color:
if isinstance(color, six.string_types):
rgb = map(int, color.split(','))
else:
rgb = color.Get()
logging.disable(logging.DEBUG)
distances = [color_diff(rgb, x) for x in self.xlwt_colors]
logging.disable(logging.NOTSET)
result = distances.index(min(distances))
self.unused_colors.discard(self.xlwt_colors[result])
return result
|
def match_color_index(self, color)
|
Takes an "R,G,B" string or wx.Color and returns a matching xlwt
color.
| 3.932989 | 3.219529 | 1.221604 |
if not self.unused_colors:
# If we somehow run out of colors, reset the color matcher.
self.reset()
used_colors = [c for c in self.xlwt_colors if c not in self.unused_colors]
result_color = max(self.unused_colors,
key=lambda c: min(self.color_distance(c, c2)
for c2 in used_colors))
result_index = self.xlwt_colors.index(result_color)
self.unused_colors.discard(result_color)
return result_index
|
def get_unused_color(self)
|
Returns an xlwt color index that has not been previously returned by
this instance. Attempts to maximize the distance between the color and
all previously used colors.
| 3.634894 | 3.011501 | 1.207004 |
import pyfasta
p = OptionParser(validate.__doc__)
p.add_option("--prefix", help="Add prefix to seqid")
opts, args = p.parse_args(args)
vcffile, fastafile = args
pf = opts.prefix
genome = pyfasta.Fasta(fastafile, record_class=pyfasta.MemoryRecord)
fp = must_open(vcffile)
match_ref = match_alt = total = 0
for row in fp:
if row[0] == '#':
continue
seqid, pos, id, ref, alt = row.split()[:5]
total += 1
if pf:
seqid = pf + seqid
pos = int(pos)
if seqid not in genome:
continue
true_ref = genome[seqid][pos - 1]
if total % 100000 == 0:
print(total, "sites parsed", file=sys.stderr)
if ref == true_ref:
match_ref += 1
elif alt == true_ref:
match_alt += 1
logging.debug("Match REF: {}".format(percentage(match_ref, total)))
logging.debug("Match ALT: {}".format(percentage(match_alt, total)))
|
def validate(args)
|
%prog validate input.vcf genome.fasta
Fasta validation of vcf file.
| 3.212683 | 2.987828 | 1.075257 |
from six.moves.urllib.parse import parse_qs
p = OptionParser(uniq.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
fp = must_open(vcffile)
data = []
for row in fp:
if row[0] == '#':
print(row.strip())
continue
v = VcfLine(row)
data.append(v)
for pos, vv in groupby(data, lambda x: x.pos):
vv = list(vv)
if len(vv) == 1:
print(vv[0])
continue
bestv = max(vv, key=lambda x: float(parse_qs(x.info)["R2"][0]))
print(bestv)
|
def uniq(args)
|
%prog uniq vcffile
Retain only the first entry in vcf file.
| 2.859519 | 2.571161 | 1.112151 |
from random import random
p = OptionParser(sample.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, ratio = args
ratio = float(ratio)
fp = open(vcffile)
pf = vcffile.rsplit(".", 1)[0]
kept = pf + ".kept.vcf"
withheld = pf + ".withheld.vcf"
fwk = open(kept, "w")
fww = open(withheld, "w")
nkept = nwithheld = 0
for row in fp:
if row[0] == '#':
print(row.strip(), file=fwk)
continue
if random() < ratio:
nkept += 1
print(row.strip(), file=fwk)
else:
nwithheld += 1
print(row.strip(), file=fww)
logging.debug("{0} records kept to `{1}`".format(nkept, kept))
logging.debug("{0} records withheld to `{1}`".format(nwithheld, withheld))
|
def sample(args)
|
%prog sample vcffile 0.9
Sample subset of vcf file.
| 2.277628 | 2.11774 | 1.075499 |
p = OptionParser(fromimpute2.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
impute2file, fastafile, chr = args
fasta = Fasta(fastafile)
print(get_vcfstanza(fastafile, fasta))
fp = open(impute2file)
seen = set()
for row in fp:
snp_id, rsid, pos, ref, alt, aa, ab, bb = row.split()
pos = int(pos)
if pos in seen:
continue
seen.add(pos)
code = max((float(aa), "0/0"), (float(ab), "0/1"), (float(bb), "1/1"))[-1]
tag = "PR" if snp_id == chr else "IM"
print("\t".join(str(x) for x in \
(chr, pos, rsid, ref, alt, ".", ".", tag, \
"GT:GP", code + ":" + ",".join((aa, ab, bb)))))
|
def fromimpute2(args)
|
%prog fromimpute2 impute2file fastafile 1
Convert impute2 output to vcf file. Imputed file looks like:
--- 1:10177:A:AC 10177 A AC 0.451 0.547 0.002
| 3.936787 | 3.635537 | 1.082862 |
p = OptionParser(refallele.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
fp = open(vcffile)
for row in fp:
if row[0] == '#':
continue
atoms = row.split()
marker = "{0}:{1}".format(*atoms[:2])
ref = atoms[3]
print("\t".join((marker, ref)))
|
def refallele(args)
|
%prog refallele vcffile > out.refAllele
Make refAllele file which can be used to convert PLINK file to VCF file.
| 2.737516 | 2.509386 | 1.090911 |
from jcvi.formats.bed import BedLine
from jcvi.graphics.histogram import stem_leaf_plot
p = OptionParser(location.__doc__)
p.add_option("--dist", default=100, type="int",
help="Distance cutoff to call 5` and 3` [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
dist = opts.dist
sizes = Sizes(fastafile).mapping
fp = open(bedfile)
fiveprime = threeprime = total = 0
percentages = []
for row in fp:
b = BedLine(row)
pos = b.start
size = sizes[b.seqid]
if pos < dist:
fiveprime += 1
if size - pos < dist:
threeprime += 1
total += 1
percentages.append(100 * pos / size)
m = "Five prime (within {0}bp of start codon): {1}\n".format(dist, fiveprime)
m += "Three prime (within {0}bp of stop codon): {1}\n".format(dist, threeprime)
m += "Total: {0}".format(total)
print(m, file=sys.stderr)
bins = 10
title = "Locations within the gene [0=Five-prime, 100=Three-prime]"
stem_leaf_plot(percentages, 0, 100, bins, title=title)
|
def location(args)
|
%prog location bedfile fastafile
Given SNP locations, summarize the locations in the sequences. For example,
find out if there are more 3`-SNPs than 5`-SNPs.
| 2.937487 | 2.825239 | 1.039731 |
atoms = s.split(":")
if len(atoms) < 3:
return g2x[atoms[0]]
inferred = atoms[0]
depth = int(atoms[depth_index])
if depth < mindepth:
return '-'
if inferred == '0/0':
return 'A'
if inferred == '0/1':
return '-' if nohet else 'X'
if inferred == '1/1':
return 'B'
return '-'
|
def encode_genotype(s, mindepth=3, depth_index=2, nohet=False)
|
>>> encode_genotype("1/1:128,18,0:6:18") # homozygote B
'B'
>>> encode_genotype("0/1:0,0,0:0:3") # missing data
'-'
>>> encode_genotype("0/1:128,0,26:7:22") # heterozygous A/B
'X'
| 3.569856 | 3.442067 | 1.037126 |
from jcvi.assembly.geneticmap import MSTMatrix
p = OptionParser(mstmap.__doc__)
p.add_option("--dh", default=False, action="store_true",
help="Double haploid population, no het [default: %default]")
p.add_option("--freq", default=.2, type="float",
help="Allele must be above frequency [default: %default]")
p.add_option("--mindepth", default=3, type="int",
help="Only trust genotype calls with depth [default: %default]")
p.add_option("--missing_threshold", default=.25, type="float",
help="Fraction missing must be below")
p.add_option("--noheader", default=False, action="store_true",
help="Do not print MSTmap run parameters [default: %default]")
p.add_option("--pv4", default=False, action="store_true",
help="Enable filtering strand-bias, tail distance bias, etc. "
"[default: %default]")
p.add_option("--freebayes", default=False, action="store_true",
help="VCF output from freebayes")
p.set_sep(sep=".", help="Use separator to simplify individual names")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
if vcffile.endswith(".bcf"):
bcffile = vcffile
vcffile = bcffile.rsplit(".", 1)[0] + ".vcf"
cmd = "bcftools view {0}".format(bcffile)
cmd += " | vcfutils.pl varFilter"
if not opts.pv4:
cmd += " -1 0 -2 0 -3 0 -4 0 -e 0"
if need_update(bcffile, vcffile):
sh(cmd, outfile=vcffile)
freq = opts.freq
sep = opts.sep
depth_index = 1 if opts.freebayes else 2
ptype = "DH" if opts.dh else "RIL6"
nohet = ptype == "DH"
fp = open(vcffile)
genotypes = []
for row in fp:
if row[:2] == "##":
continue
atoms = row.split()
if row[0] == '#':
ind = [x.split(sep)[0] for x in atoms[9:]]
nind = len(ind)
mh = ["locus_name"] + ind
continue
marker = "{0}.{1}".format(*atoms[:2])
geno = atoms[9:]
geno = [encode_genotype(x, mindepth=opts.mindepth,
depth_index=depth_index,
nohet=nohet) for x in geno]
assert len(geno) == nind
f = 1. / nind
if geno.count("A") * f < freq:
continue
if geno.count("B") * f < freq:
continue
if geno.count("-") * f > opts.missing_threshold:
continue
genotype = [marker] + geno
genotypes.append(genotype)
mm = MSTMatrix(genotypes, mh, ptype, opts.missing_threshold)
mm.write(opts.outfile, header=(not opts.noheader))
|
def mstmap(args)
|
%prog mstmap bcffile/vcffile > matrixfile
Convert bcf/vcf format to mstmap input.
| 3.727462 | 3.636404 | 1.025041 |
p = OptionParser(liftover.__doc__)
p.add_option("--newid", default=False, action="store_true",
help="Make new identifiers")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
oldvcf, chainfile, newvcf = args
ul = UniqueLiftover(chainfile)
num_excluded = 0
fp = open(oldvcf)
fw = open(newvcf, "w")
for row in fp:
row = row.strip()
if row[0] == '#':
if row.startswith("##source="):
row = "##source={0}".format(__file__)
elif row.startswith("##reference="):
row = "##reference=hg38"
elif row.startswith("##contig="):
continue
print(row.strip(), file=fw)
continue
v = VcfLine(row)
# GRCh37.p2 has the same MT sequence as hg38 (but hg19 is different)
if v.seqid == "MT":
v.seqid = "chrM"
print(v, file=fw)
continue
try:
new_chrom, new_pos = ul.liftover_cpra(CM[v.seqid], v.pos)
except:
num_excluded +=1
continue
if new_chrom != None and new_pos != None:
v.seqid, v.pos = new_chrom, new_pos
if opts.newid:
v.rsid = "{0}:{1}".format(new_chrom.replace("chr", ""), new_pos)
print(v, file=fw)
else:
num_excluded +=1
logging.debug("Excluded {0}".format(num_excluded))
|
def liftover(args)
|
%prog liftover old.vcf hg19ToHg38.over.chain.gz new.vcf
Lift over coordinates in vcf file.
| 2.872588 | 2.736664 | 1.049668 |
chromosome = str(chromosome)
position = int(position)
# Perform the liftover lookup, shift the position by 1 as pyliftover deals in 0-based co-ords
new = self.liftover.convert_coordinate(chromosome, position - 1)
# This has to be here as new will be NoneType when the chromosome doesn't exist in the chainfile
if new:
# If the liftover is unique
if len(new) == 1:
# If the liftover hasn't changed strand
if new[0][2] == "+":
# Set the co-ordinates to the lifted-over ones and write out
new_chromosome = str(new[0][0])
# Shift the position forward by one to convert back to a 1-based co-ords
new_position = int(new[0][1]) + 1
return new_chromosome, new_position
else:
exception_string = "{},{} has a flipped strand in liftover: {}".format(chromosome, position, new)
else:
exception_string = "{},{} lifts over to multiple positions: {}".format(chromosome, position, new)
elif new is None:
exception_string = "Chromosome '{}' provided not in chain file".format(chromosome)
if verbose:
logging.error(exception_string)
return None, None
|
def liftover_cpra(self, chromosome, position, verbose=False)
|
Given chromosome, position in 1-based co-ordinates,
This will use pyliftover to liftover a CPRA, will return a (c,p) tuple or raise NonUniqueLiftover if no unique
and strand maintaining liftover is possible
:param chromosome: string with the chromosome as it's represented in the from_genome
:param position: position on chromosome (will be cast to int)
:return: ((str) chromosome, (int) position) or None if no liftover
| 4.2365 | 4.229952 | 1.001548 |
p = OptionParser(multilineplot.__doc__)
p.add_option("--lines",
help="Features to plot in lineplot [default: %default]")
p.add_option("--colors",
help="List of colors matching number of input bed files")
p.add_option("--mode", default="span", choices=("span", "count", "score"),
help="Accumulate feature based on [default: %default]")
p.add_option("--binned", default=False, action="store_true",
help="Specify whether the input is already binned; " +
"if True, input files are considered to be binfiles")
p.add_option("--ymax", type="int", help="Set Y-axis max")
add_window_options(p)
opts, args, iopts = p.set_image_options(args, figsize="8x5")
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, chr = args
window, shift, subtract, merge = check_window_options(opts)
linebeds = []
colors = opts.colors
if opts.lines:
lines = opts.lines.split(",")
assert len(colors) == len(lines), "Number of chosen colors must match" + \
" number of input bed files"
linebeds = get_beds(lines, binned=opts.binned)
linebins = get_binfiles(linebeds, fastafile, shift, mode=opts.mode,
binned=opts.binned, merge=merge)
clen = Sizes(fastafile).mapping[chr]
nbins = get_nbins(clen, shift)
plt.rcParams["xtick.major.size"] = 0
plt.rcParams["ytick.major.size"] = 0
plt.rcParams["figure.figsize"] = iopts.w, iopts.h
fig, axarr = plt.subplots(nrows=len(lines))
if len(linebeds) == 1:
axarr = (axarr, )
fig.suptitle(latex(chr), color="darkslategray")
for i, ax in enumerate(axarr):
lineplot(ax, [linebins[i]], nbins, chr, window, shift, \
color="{0}{1}".format(colors[i], 'r'))
if opts.ymax:
ax.set_ylim(0, opts.ymax)
plt.subplots_adjust(hspace=0.5)
image_name = chr + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
|
def multilineplot(args)
|
%prog multilineplot fastafile chr1
Combine multiple line plots in one vertical stack
Inputs must be BED-formatted.
--lines: traditional line plots, useful for plotting feature freq
| 3.482716 | 3.383953 | 1.029186 |
p = OptionParser(stack.__doc__)
p.add_option("--top", default=10, type="int",
help="Draw the first N chromosomes [default: %default]")
p.add_option("--stacks",
default="Exons,Introns,DNA_transposons,Retrotransposons",
help="Features to plot in stackplot [default: %default]")
p.add_option("--switch",
help="Change chr names based on two-column file [default: %default]")
add_window_options(p)
opts, args, iopts = p.set_image_options(args, figsize="8x8")
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
top = opts.top
window, shift, subtract, merge = check_window_options(opts)
switch = opts.switch
if switch:
switch = DictFile(opts.switch)
stacks = opts.stacks.split(",")
bedfiles = get_beds(stacks)
binfiles = get_binfiles(bedfiles, fastafile, shift,
subtract=subtract, merge=merge)
sizes = Sizes(fastafile)
s = list(sizes.iter_sizes())[:top]
maxl = max(x[1] for x in s)
margin = .08
inner = .02 # y distance between tracks
pf = fastafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
# Gauge
ratio = draw_gauge(root, margin, maxl)
# Per chromosome
yinterval = (1 - 2 * margin) / (top + 1)
xx = margin
yy = 1 - margin
for chr, clen in s:
yy -= yinterval
xlen = clen / ratio
cc = chr
if "_" in chr:
ca, cb = chr.split("_")
cc = ca[0].upper() + cb
if switch and cc in switch:
cc = "\n".join((cc, "({0})".format(switch[cc])))
root.add_patch(Rectangle((xx, yy), xlen, yinterval - inner, color=gray))
ax = fig.add_axes([xx, yy, xlen, yinterval - inner])
nbins = clen / shift
if clen % shift:
nbins += 1
stackplot(ax, binfiles, nbins, palette, chr, window, shift)
root.text(xx - .04, yy + .5 * (yinterval - inner), cc, ha="center", va="center")
ax.set_xlim(0, nbins)
ax.set_ylim(0, 1)
ax.set_axis_off()
# Legends
yy -= yinterval
xx = margin
for b, p in zip(bedfiles, palette):
b = b.rsplit(".", 1)[0].replace("_", " ")
b = Registration.get(b, b)
root.add_patch(Rectangle((xx, yy), inner, inner, color=p, lw=0))
xx += 2 * inner
root.text(xx, yy, b, size=13)
xx += len(b) * .012 + inner
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
|
def stack(args)
|
%prog stack fastafile
Create landscape plots that show the amounts of genic sequences, and repetitive
sequences along the chromosomes.
| 3.598276 | 3.525384 | 1.020676 |
from Bio.Emboss.Applications import NeedleCommandline
needle_cline = NeedleCommandline(asequence=fa, bsequence=fb,
gapopen=10, gapextend=0.5, outfile=needlefile)
stdout, stderr = needle_cline()
nh = NeedleHeader(needlefile)
FileShredder([fa, fb, needlefile], verbose=False)
r = ["\t".join((a, b, nh.identity, nh.score))]
results.extend(r)
|
def _needle(fa, fb, needlefile, a, b, results)
|
Run single needle job
| 4.473867 | 4.529823 | 0.987647 |
from jcvi.formats.fasta import Fasta, SeqIO
p = OptionParser(needle.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
manager = mp.Manager()
results = manager.list()
needle_pool = mp.Pool(processes=mp.cpu_count())
pairsfile, apep, bpep = args
afasta, bfasta = Fasta(apep), Fasta(bpep)
fp = must_open(pairsfile)
for i, row in enumerate(fp):
a, b = row.split()
a, b = afasta[a], bfasta[b]
fa, fb = must_open("{0}_{1}_a.fasta".format(pairsfile, i), "w"), \
must_open("{0}_{1}_b.fasta".format(pairsfile, i), "w")
SeqIO.write([a], fa, "fasta")
SeqIO.write([b], fb, "fasta")
fa.close()
fb.close()
needlefile = "{0}_{1}_ab.needle".format(pairsfile, i)
needle_pool.apply_async(_needle, \
(fa.name, fb.name, needlefile, a.id, b.id, results))
needle_pool.close()
needle_pool.join()
fp.close()
scoresfile = "{0}.scores".format(pairsfile.rsplit(".")[0])
fw = must_open(scoresfile, "w")
for result in results:
print(result, file=fw)
fw.close()
|
def needle(args)
|
%prog needle nw.pairs a.pep.fasta b.pep.fasta
Take protein pairs and needle them
Automatically writes output file `nw.scores`
| 2.380315 | 2.229596 | 1.067599 |
from jcvi.formats.base import SetFile, FileShredder
A, T, P = "ABINITIO_PREDICTION", "TRANSCRIPT", "PROTEIN"
# Stores default weights and types
Registry = {\
"maker": (A, 5),
"augustus_masked": (A, 1),
"snap_masked": (A, 1),
"genemark": (A, 1),
"est2genome": (T, 5),
"est_gff": (T, 5),
"protein2genome": (P, 5),
"blastx": (P, 1)
}
p = OptionParser(maker.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gffile, fastafile = args
types = "type.ids"
if need_update(gffile, types):
cmd = "cut -f2 -s {0} | sort -u".format(gffile)
sh(cmd, outfile=types)
types = SetFile(types)
reg = defaultdict(list)
weightsfile = "weights.txt"
contents = []
for s in types:
rs = s.split(":")[0]
if rs not in Registry:
continue
type, weight = Registry[rs]
reg[type].append(s)
contents.append("\t".join(str(x) for x in (type, s, weight)))
contents = "\n".join(sorted(contents))
write_file(weightsfile, contents)
evs = [x + ".gff" for x in (A, T, P)]
FileShredder(evs)
for type, tracks in reg.items():
for t in tracks:
cmd = "grep '\t{0}' {1} | grep -v '_match\t' >> {2}.gff".format(t, gffile, type)
sh(cmd)
partition(evs)
runfile = "run.sh"
contents = EVMRUN.format(*evs)
write_file(runfile, contents)
|
def maker(args)
|
%prog maker maker.gff3 genome.fasta
Prepare EVM inputs by separating tracks from MAKER.
| 4.634776 | 4.281867 | 1.082419 |
p = OptionParser(tigrload.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
db, ev_type = args
runfile = "load.sh"
contents = EVMLOAD.format(db, ev_type)
write_file(runfile, contents)
|
def tigrload(args)
|
%prog tigrload db ev_type
Load EVM results into TIGR db. Actually, just write a load.sh script. The
ev_type should be set, e.g. "EVM1", "EVM2", etc.
| 3.745573 | 2.443922 | 1.532608 |
p = OptionParser(pasa.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pasa_db, fastafile = args
termexons = "pasa.terminal_exons.gff3"
if need_update(fastafile, termexons):
cmd = "$ANNOT_DEVEL/PASA2/scripts/pasa_asmbls_to_training_set.dbi"
cmd += ' -M "{0}:mysql.tigr.org" -p "access:access"'.format(pasa_db)
cmd += ' -g {0}'.format(fastafile)
sh(cmd)
cmd = "$EVM/PasaUtils/retrieve_terminal_CDS_exons.pl"
cmd += " trainingSetCandidates.fasta trainingSetCandidates.gff"
sh(cmd, outfile=termexons)
return termexons
|
def pasa(args)
|
%prog pasa pasa_db fastafile
Run EVM in TIGR-only mode.
| 7.071276 | 6.192741 | 1.141865 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.