code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
p = OptionParser(tigrprepare.__doc__)
opts, args = p.parse_args(args)
if len(args) != 4:
sys.exit(not p.print_help())
fastafile, asmbl_id, db, pasa_db = args
if asmbl_id == 'all':
idsfile = fastafile + ".ids"
if need_update(fastafile, idsfile):
ids([fastafile, "-o", idsfile])
else:
idsfile = asmbl_id
oneid = open(idsfile).next().strip()
weightsfile = "weights.txt"
if need_update(idsfile, weightsfile):
cmd = "$EVM/TIGR-only/create_sample_weights_file.dbi"
cmd += " {0} {1} | tee weights.txt".format(db, oneid)
sh(cmd)
evs = ["gene_predictions.gff3", "transcript_alignments.gff3",
"protein_alignments.gff3"]
if need_update(weightsfile, evs):
cmd = "$EVM/TIGR-only/write_GFF3_files.dbi"
cmd += " --db {0} --asmbl_id {1} --weights {2}".\
format(db, idsfile, weightsfile)
sh(cmd)
evs[1] = fix_transcript()
partition(evs)
runfile = "run.sh"
contents = EVMRUN.format(*evs)
write_file(runfile, contents)
|
def tigrprepare(args)
|
%prog tigrprepare asmbl.fasta asmbl.ids db pasa.terminal_exons.gff3
Run EVM in TIGR-only mode.
| 4.319391 | 3.621254 | 1.192789 |
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gffile, cdsfasta = args
gff = Gff(gffile)
sizes = Sizes(cdsfasta).mapping
gene_register = {}
for g in gff:
if g.type != "mRNA":
continue
aed = float(g.attributes["_AED"][0])
gene_register[g.parent] = (1 - aed) * sizes[g.accn]
allgenes = import_feats(gffile)
g = get_piles(allgenes)
bestids = set()
for group in g:
ranges = [to_range(x, score=gene_register[x.accn], id=x.accn) \
for x in group]
selected_chain, score = range_chain(ranges)
bestids |= set(x.id for x in selected_chain)
removed = set(x.accn for x in allgenes) - bestids
fw = open("removed.ids", "w")
print("\n".join(sorted(removed)), file=fw)
fw.close()
populate_children(opts.outfile, bestids, gffile, "gene")
|
def uniq(args)
|
%prog uniq gffile cdsfasta
Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping
'piles' are processed, one by one.
Here, we use a different algorithm, that retains the best non-overlapping
subset witin each pile, rather than single best model. Scoring function is
also different, rather than based on score or span, we optimize for the
subset that show the best combined score. Score is defined by:
score = (1 - AED) * length
| 4.784103 | 4.056575 | 1.179345 |
import __builtin__
from jcvi.utils.cbook import enumerate_reversed
p = OptionParser(nmd.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
gff = make_index(gffile)
fw = must_open(opts.outfile, "w")
for gene in gff.features_of_type('gene', order_by=('seqid', 'start')):
_enumerate = __builtin__.enumerate if gene.strand == "-" else enumerate_reversed
for mrna in gff.children(gene, featuretype='mRNA', order_by=('start')):
tracker = dict()
tracker['exon'] = list(gff.children(mrna, featuretype='exon', order_by=('start')))
tracker['cds'] = [None] * len(tracker['exon'])
tcds_pos = None
for i, exon in _enumerate(tracker['exon']):
for cds in gff.region(region=exon, featuretype='CDS', completely_within=True):
if mrna.id in cds['Parent']:
tracker['cds'][i] = cds
tcds_pos = i
break
if tcds_pos: break
NMD, distance = False, 0
if (mrna.strand == "+" and tcds_pos + 1 < len(tracker['exon'])) \
or (mrna.strand == "-" and tcds_pos - 1 >= 0):
tcds = tracker['cds'][tcds_pos]
texon = tracker['exon'][tcds_pos]
PTC = tcds.end if mrna.strand == '+' else tcds.start
TDSS = texon.end if mrna.strand == '+' else texon.start
distance = abs(TDSS - PTC)
NMD = True if distance > 50 else False
print("\t".join(str(x) for x in (gene.id, mrna.id, \
gff.children_bp(mrna, child_featuretype='CDS'), distance, NMD)), file=fw)
fw.close()
|
def nmd(args)
|
%prog nmd gffile
Identify transcript variants which might be candidates for nonsense
mediated decay (NMD)
A transcript is considered to be a candidate for NMD when the CDS stop
codon is located more than 50nt upstream of terminal splice site donor
References:
http://www.nature.com/horizon/rna/highlights/figures/s2_spec1_f3.html
http://www.biomedcentral.com/1741-7007/7/23/figure/F1
| 2.975805 | 2.871976 | 1.036152 |
symbols = {'+': '>', '-': '<'}
for seqid, bs in bed.sub_beds():
prev_node, prev_strand = None, '+'
for b in bs:
accn = b.accn
strand = b.strand
node = "=".join(families[accn])
if prev_node:
print("{}{}--{}{}".format(prev_node, symbols[prev_strand],
symbols[strand], node))
prev_node, prev_strand = node, strand
|
def print_edges(G, bed, families)
|
Instead of going through the graph construction, just print the edges.
| 4.492333 | 4.500399 | 0.998208 |
from jcvi.algorithms.graph import BiGraph
p = OptionParser(fuse.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
bedfiles = [x for x in args if x.endswith(".bed")]
anchorfiles = [x for x in args if x.endswith(".anchors")]
# TODO: Use Markov clustering to sparsify the edges
families = Grouper()
for anchorfile in anchorfiles:
af = AnchorFile(anchorfile)
for a, b, block_id in af.iter_pairs():
families.join(a, b)
allowed = set(families.keys())
logging.debug("Total families: {}, Gene members: {}"
.format(len(families), len(allowed)))
# TODO: Use C++ implementation of BiGraph() when available
# For now just serialize this to the disk
G = BiGraph()
for bedfile in bedfiles:
bed = Bed(bedfile, include=allowed)
#add_bed_to_graph(G, bed, families)
print_edges(G, bed, families)
|
def fuse(args)
|
%prog fuse *.bed *.anchors
Fuse gene orders based on anchors file.
| 4.515965 | 4.088396 | 1.104581 |
import pygraphviz as pgv
from jcvi.utils.iter import pairwise
from jcvi.formats.base import SetFile
p = OptionParser(adjgraph.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
infile, subgraph = args
subgraph = SetFile(subgraph)
subgraph = set(x.strip("-") for x in subgraph)
G = pgv.AGraph(strict=False) # allow multi-edge
SG = pgv.AGraph(strict=False)
palette = ("green", "magenta", "tomato", "peachpuff")
fp = open(infile)
genome_id = -1
key = 0
for row in fp:
if row.strip() == "":
continue
atoms = row.split()
tag = atoms[0]
if tag in ("ChrNumber", "chr"):
continue
if tag == "genome":
genome_id += 1
gcolor = palette[genome_id]
continue
nodeseq = []
for p in atoms:
np = p.strip("-")
nodeL, nodeR = np + "L", np + "R"
if p[0] == "-": # negative strand
nodeseq += [nodeR, nodeL]
else:
nodeseq += [nodeL, nodeR]
for a, b in pairwise(nodeseq):
G.add_edge(a, b, key, color=gcolor)
key += 1
na, nb = a[:-1], b[:-1]
if na not in subgraph and nb not in subgraph:
continue
SG.add_edge(a, b, key, color=gcolor)
G.graph_attr.update(dpi="300")
fw = open("graph.dot", "w")
G.write(fw)
fw.close()
fw = open("subgraph.dot", "w")
SG.write(fw)
fw.close()
|
def adjgraph(args)
|
%prog adjgraph adjacency.txt subgraph.txt
Construct adjacency graph for graphviz. The file may look like sample below.
The lines with numbers are chromosomes with gene order information.
genome 0
chr 0
-1 -13 -16 3 4 -6126 -5 17 -6 7 18 5357 8 -5358 5359 -9 -10 -11 5362 5360
chr 1
138 6133 -5387 144 -6132 -139 140 141 146 -147 6134 145 -170 -142 -143
| 2.998851 | 2.954407 | 1.015043 |
p = OptionParser(pairs.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
anchorfile, prefix = args
outfile = prefix + ".pairs"
fw = open(outfile, "w")
af = AnchorFile(anchorfile)
blocks = af.blocks
pad = len(str(len(blocks)))
npairs = 0
for i, block in enumerate(blocks):
block_id = "{0}{1:0{2}d}".format(prefix, i + 1, pad)
lines = []
for q, s, score in block:
npairs += 1
score = score.replace('L', '')
lines.append("\t".join((q, s, score, block_id)))
print("\n".join(sorted(lines)), file=fw)
fw.close()
logging.debug("A total of {0} pairs written to `{1}`.".
format(npairs, outfile))
|
def pairs(args)
|
%prog pairs anchorsfile prefix
Convert anchorsfile to pairsfile.
| 2.861351 | 2.6448 | 1.081878 |
p = OptionParser(zipbed.__doc__)
p.add_option("--prefix", default="b",
help="Prefix for the new seqid [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, anchorfile = args
prefix = opts.prefix
bed = Bed(bedfile)
order = bed.order
newbedfile = prefix + ".bed"
fw = open(newbedfile, "w")
af = AnchorFile(anchorfile)
blocks = af.blocks
pad = len(str(len(blocks)))
for i, block in enumerate(blocks):
block_id = "{0}{1:0{2}d}".format(prefix, i + 1, pad)
pairs = []
for q, s, score in block:
qi, q = order[q]
si, s = order[s]
pairs.append((qi, si))
newbed = list(interleave_pairs(pairs))
for i, b in enumerate(newbed):
accn = bed[b].accn
print("\t".join(str(x)
for x in (block_id, i, i + 1, accn)), file=fw)
logging.debug("Reconstructed bedfile written to `{0}`.".format(newbedfile))
|
def zipbed(args)
|
%prog zipbed species.bed collinear.anchors
Build ancestral contig from collinear blocks. For example, to build pre-rho
order, use `zipbed rice.bed rice.rice.1x1.collinear.anchors`. The algorithms
proceeds by interleaving the genes together.
| 3.164543 | 3.083869 | 1.02616 |
p = OptionParser(collinear.__doc__)
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
anchorfile, = args
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)
af = AnchorFile(anchorfile)
newanchorfile = anchorfile.rsplit(".", 1)[0] + ".collinear.anchors"
fw = open(newanchorfile, "w")
blocks = af.blocks
for block in blocks:
print("#" * 3, file=fw)
iblock = []
for q, s, score in block:
qi, q = qorder[q]
si, s = sorder[s]
score = int(long(score))
iblock.append([qi, si, score])
block = get_collinear(iblock)
for q, s, score in block:
q = qbed[q].accn
s = sbed[s].accn
print("\t".join((q, s, str(score))), file=fw)
fw.close()
|
def collinear(args)
|
%prog collinear a.b.anchors
Reduce synteny blocks to strictly collinear, use dynamic programming in a
procedure similar to DAGchainer.
| 3.34426 | 3.233208 | 1.034347 |
p = OptionParser(counts.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
vcf_reader = vcf.Reader(open(vcffile))
for r in vcf_reader:
v = CPRA(r)
if not v.is_valid:
continue
for sample in r.samples:
ro = sample["RO"]
ao = sample["AO"]
print("\t".join(str(x) for x in (v, ro, ao)))
|
def counts(args)
|
%prog counts vcffile
Collect allele counts from RO and AO fields.
| 3.161752 | 2.68715 | 1.176619 |
p = OptionParser(prepare.__doc__)
p.add_option("--accuracy", default=.85,
help="Sequencing per-base accuracy")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, bamfile = args
right = "{:.2f}".format(opts.accuracy)
wrong = "{:.2f}".format(1 - opts.accuracy)
vcf_reader = vcf.Reader(open(vcffile))
variants = []
for r in vcf_reader:
v = CPRA(r)
if not v.is_valid:
continue
variants.append(v)
logging.debug("A total of {} bi-allelic SNVs imported from `{}`".\
format(len(variants), vcffile))
bamfile = pysam.AlignmentFile(bamfile, "rb")
for v in variants:
pos = v.pos - 1
for column in bamfile.pileup(v.chr, pos, pos + 1, truncate=True):
for read in column.pileups:
query_position = read.query_position
if query_position is None:
continue
read_name = read.alignment.query_name
query_base = read.alignment.query_sequence[query_position]
a, b = v.alleles
if query_base == a:
other_base = b
elif query_base == b:
other_base = a
else:
continue
print(" ".join(str(x) for x in \
(v, read_name, query_base, right, other_base, wrong)))
|
def prepare(args)
|
%prog prepare vcffile bamfile
Convert vcf and bam to variant list. Inputs are:
- vcffile: contains the positions of variants
- bamfile: contains the reads that hold the variants
Outputs:
- reads_to_phase: phasing for each read
- variants_to_phase: in format of phased vcf
| 2.963151 | 2.925197 | 1.012975 |
return len(self.ref) == 1 and \
len(self.alt) == 1 and \
len(self.alt[0]) == 1
|
def is_valid(self)
|
Only retain SNPs or single indels, and are bi-allelic
| 3.85586 | 2.70543 | 1.42523 |
# Split. If there are no splits, return now
s = regex.split(s)
if len(s) == 1:
return tuple(s)
# Now convert the numbers to numbers, and leave strings as strings
s = remove_empty(s)
for i in range(len(s)):
try:
s[i] = numconv(s[i])
except ValueError:
pass
# If the list begins with a number, lead with an empty string.
# This is used to get around the "unorderable types" issue.
if not isinstance(s[0], six.string_types):
return [''] + s
else:
return s
|
def _number_finder(s, regex, numconv)
|
Helper to split numbers
| 4.568319 | 4.658287 | 0.980687 |
return sorted(seq, key=lambda x: natsort_key(key(x),
number_type=number_type,
signed=signed, exp=exp))
|
def natsorted(seq, key=lambda x: x, number_type=float, signed=True, exp=True)
|
\
Sorts a sequence naturally (alphabetically and numerically),
not lexicographically.
>>> a = ['num3', 'num5', 'num2']
>>> natsorted(a)
['num2', 'num3', 'num5']
>>> b = [('a', 'num3'), ('b', 'num5'), ('c', 'num2')]
>>> from operator import itemgetter
>>> natsorted(b, key=itemgetter(1))
[('c', 'num2'), ('a', 'num3'), ('b', 'num5')]
| 2.370076 | 3.37059 | 0.703164 |
from operator import itemgetter
item1 = itemgetter(1)
# Pair the index and sequence together, then sort by
index_seq_pair = [[x, key(y)] for x, y in zip(range(len(seq)), seq)]
index_seq_pair.sort(key=lambda x: natsort_key(item1(x),
number_type=number_type,
signed=signed, exp=exp))
return [x[0] for x in index_seq_pair]
|
def index_natsorted(seq, key=lambda x: x, number_type=float, signed=True, exp=True)
|
\
Sorts a sequence naturally, but returns a list of sorted the
indeces and not the sorted list.
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> [a[i] for i in index]
['num2', 'num3', 'num5']
>>> [b[i] for i in index]
['baz', 'foo', 'bar']
>>> c = [('a', 'num3'), ('b', 'num5'), ('c', 'num2')]
>>> from operator import itemgetter
>>> index_natsorted(c, key=itemgetter(1))
[2, 0, 1]
| 3.398635 | 4.118252 | 0.825262 |
xargs = args[2:]
p = OptionParser(calibrate.__doc__)
opts, args, iopts = add_seeds_options(p, args)
if len(args) != 2:
sys.exit(not p.print_help())
imagefile, boxsize = args
boxsize = float(boxsize)
# Read in color checker
colorcheckerfile = op.join(datadir, "colorchecker.txt")
colorchecker = []
expected = 0
for row in open(colorcheckerfile):
boxes = row.split()
colorchecker.append(boxes)
expected += len(boxes)
folder = op.split(imagefile)[0]
objects = seeds([imagefile, "--outdir={0}".format(folder)] + xargs)
nseeds = len(objects)
logging.debug("Found {0} boxes (expected={1})".format(nseeds, expected))
assert expected - 4 <= nseeds <= expected + 4, \
"Number of boxes drastically different from {0}".format(expected)
# Calculate pixel-cm ratio
boxes = [t.area for t in objects]
reject = reject_outliers(boxes)
retained_boxes = [b for r, b in zip(reject, boxes) if not r]
mbox = np.median(retained_boxes) # in pixels
pixel_cm_ratio = (mbox / boxsize) ** .5
logging.debug("Median box size: {0} pixels. Measured box size: {1} cm2".\
format(mbox, boxsize))
logging.debug("Pixel-cm ratio: {0}".format(pixel_cm_ratio))
xs = [t.x for t in objects]
ys = [t.y for t in objects]
idx_xs = get_kmeans(xs, 6)
idx_ys = get_kmeans(ys, 4)
for xi, yi, s in zip(idx_xs, idx_ys, objects):
s.rank = (yi, xi)
objects.sort(key=lambda x: x.rank)
colormap = []
for s in objects:
x, y = s.rank
observed, expected = s.rgb, rgb_to_triplet(colorchecker[x][y])
colormap.append((np.array(observed), np.array(expected)))
# Color transfer
tr0 = np.eye(3).flatten()
print("Initial distance:", total_error(tr0, colormap), file=sys.stderr)
tr = fmin(total_error, tr0, args=(colormap,))
tr.resize((3, 3))
print("RGB linear transform:\n", tr, file=sys.stderr)
calib = {"PixelCMratio": pixel_cm_ratio,
"RGBtransform": tr.tolist()}
jsonfile = op.join(folder, "calibrate.json")
fw = must_open(jsonfile, "w")
print(json.dumps(calib, indent=4), file=fw)
fw.close()
logging.debug("Calibration specs written to `{0}`.".format(jsonfile))
return jsonfile
|
def calibrate(args)
|
%prog calibrate calibrate.JPG boxsize
Calibrate pixel-inch ratio and color adjustment.
- `calibrate.JPG` is the photo containig a colorchecker
- `boxsize` is the measured size for the boxes on printed colorchecker, in
squared centimeter (cm2) units
| 4.026233 | 3.709214 | 1.085468 |
from jcvi.formats.pdf import cat
xargs = args[1:]
p = OptionParser(batchseeds.__doc__)
opts, args, iopts = add_seeds_options(p, args)
if len(args) != 1:
sys.exit(not p.print_help())
folder, = args
folder = folder.rstrip('/')
outdir = folder + "-debug"
outfile = folder + "-output.tsv"
assert op.isdir(folder)
images = []
jsonfile = opts.calibrate or op.join(folder, "calibrate.json")
if not op.exists(jsonfile):
jsonfile = None
for im in iglob(folder, "*.jpg,*.JPG,*.png"):
if im.endswith((".resize.jpg", ".main.jpg", ".label.jpg")):
continue
if op.basename(im).startswith("calibrate"):
continue
images.append(im)
fw = must_open(outfile, 'w')
print(Seed.header(calibrate=jsonfile), file=fw)
nseeds = 0
for im in images:
imargs = [im, "--noheader", "--outdir={0}".format(outdir)] + xargs
if jsonfile:
imargs += ["--calibrate={0}".format(jsonfile)]
objects = seeds(imargs)
for o in objects:
print(o, file=fw)
nseeds += len(objects)
fw.close()
logging.debug("Processed {0} images.".format(len(images)))
logging.debug("A total of {0} objects written to `{1}`.".\
format(nseeds, outfile))
pdfs = iglob(outdir, "*.pdf")
outpdf = folder + "-output.pdf"
cat(pdfs + ["--outfile={0}".format(outpdf)])
logging.debug("Debugging information written to `{0}`.".format(outpdf))
return outfile
|
def batchseeds(args)
|
%prog batchseeds folder
Extract seed metrics for each image in a directory.
| 3.758127 | 3.65714 | 1.027614 |
p = OptionParser(filterbedgraph.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedgraphfile, cutoff = args
c = float(cutoff)
fp = open(bedgraphfile)
pf = bedgraphfile.rsplit(".", 1)[0]
filteredbed = pf + ".filtered-{}.bed".format(cutoff)
fw = open(filteredbed, "w")
nfiltered = ntotal = 0
for row in fp:
b = BedLine(row)
ntotal += 1
if float(b.accn) >= c:
print(b, file=fw)
nfiltered += 1
fw.close()
logging.debug("A total of {} intervals (score >= {}) written to `{}`".\
format(percentage(nfiltered, ntotal), cutoff, filteredbed))
mergeBed(filteredbed, sorted=True, delim=None)
|
def filterbedgraph(args)
|
%prog filterbedgraph a.bedgraph 1
Filter the bedGraph, typically from the gem-mappability pipeline. Unique
regions are 1, two copies .5, etc.
| 3.536202 | 3.525829 | 1.002942 |
p = OptionParser(tiling.__doc__)
p.add_option("--overlap", default=3000, type="int",
help="Minimum amount of overlaps required")
p.set_verbose()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
ov = opts.overlap
bed = Bed(bedfile)
inf = len(bed)
selected = Bed()
for seqid, sbed in bed.sub_beds():
g = Grouper()
current = sbed[0]
# Partition connected features
for a in sbed:
g.join(a)
# requires a real overlap
if a.start < current.end - ov:
g.join(a, current)
if a.end > current.end:
current = a
# Process per partition
for gbed in g:
end = max(x.end for x in gbed)
gbed.sort(key=lambda x: (x.start, -x.end))
entries = len(gbed)
counts = [inf] * entries
counts[0] = 1
traceback = [-1] * entries
for i, a in enumerate(gbed):
for j in xrange(i + 1, entries):
b = gbed[j]
if b.start >= a.end - ov:
break
# Two ranges overlap!
if counts[i] + 1 < counts[j]:
counts[j] = counts[i] + 1
traceback[j] = i
endi = [i for i, a in enumerate(gbed) if a.end == end]
last = min((traceback[i], i) for i in endi)[1]
chain = []
while last != -1:
chain.append(last)
last = traceback[last]
chain = chain[::-1]
selected.extend([gbed[x] for x in chain])
if opts.verbose:
print(counts)
print(traceback)
print(chain)
print("\n".join(str(x) for x in gbed))
print("*" * 30)
print("\n".join(str(gbed[x]) for x in chain))
print()
tilingbedfile = bedfile.rsplit(".", 1)[0] + ".tiling.bed"
selected.print_to_file(filename=tilingbedfile, sorted=True)
logging.debug("A total of {} tiling features written to `{}`"\
.format(len(selected), tilingbedfile))
|
def tiling(args)
|
%prog tiling bedfile
Compute minimum tiling path using as few clones as possible. Implemented
with dynamic programming. Greedy algorithm may also work according a
stackoverflow source.
| 3.267915 | 3.178959 | 1.027983 |
p = OptionParser(chain.__doc__)
p.add_option("--dist", default=100000, help="Chaining distance")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
cmd = "sort -k4,4 -k1,1 -k2,2n -k3,3n {0} -o {0}".format(bedfile)
sh(cmd)
bed = Bed(bedfile, sorted=False)
newbed = Bed()
for accn, bb in groupby(bed, key=lambda x: x.accn):
bb = list(bb)
g = Grouper()
for a in bb:
g.join(a)
for a, b in pairwise(bb):
if a.seqid == b.seqid and b.start - a.end < opts.dist:
g.join(a, b)
data = []
for p in g:
seqid = p[0].seqid
start = min(x.start for x in p)
end = max(x.end for x in p)
score = sum(x.span for x in p)
data.append((seqid, start - 1, end, accn, score))
d = max(data, key=lambda x: x[-1])
newbed.append(BedLine("\t".join(str(x) for x in d)))
newbed.print_to_file(opts.outfile, sorted=True)
|
def chain(args)
|
%prog chain bedfile
Chain BED segments together.
| 2.669815 | 2.516366 | 1.06098 |
p = OptionParser(density.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
bed = Bed(bedfile)
sizes = Sizes(fastafile).mapping
header = "seqid features size density_per_Mb".split()
print("\t".join(header))
for seqid, bb in bed.sub_beds():
nfeats = len(bb)
size = sizes[seqid]
ds = nfeats * 1e6 / size
print("\t".join(str(x) for x in \
(seqid, nfeats, size, "{0:.1f}".format(ds))))
|
def density(args)
|
%prog density bedfile ref.fasta
Calculates density of features per seqid.
| 3.738964 | 3.24048 | 1.15383 |
p = OptionParser(clr.__doc__)
p.set_bedpe()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedpe, ref = args
if bedpe.endswith(".bam"):
bedpefile = bedpe.replace(".bam", ".bedpe")
if need_update(bedpe, bedpefile):
cmd = "bamToBed -bedpe -i {0}".format(bedpe)
sh(cmd, outfile=bedpefile)
bedpe = bedpefile
filtered = bedpe + ".filtered"
if need_update(bedpe, filtered):
filter_bedpe(bedpe, filtered, ref, rc=opts.rc,
minlen=opts.minlen, maxlen=opts.maxlen)
rmdup = filtered + ".sorted.rmdup"
if need_update(filtered, rmdup):
rmdup_bedpe(filtered, rmdup, dupwiggle=opts.dup)
converted = rmdup + ".converted"
if need_update(rmdup, converted):
fp = open(rmdup)
fw = open(converted, "w")
for row in fp:
r = BedpeLine(row)
print(r.bedline, file=fw)
fw.close()
merged = converted + ".merge.bed"
if need_update(converted, merged):
mergeBed(converted)
|
def clr(args)
|
%prog clr [bamfile|bedpefile] ref.fasta
Use mates from BEDPE to extract ranges where the ref is covered by mates.
This is useful in detection of chimeric contigs.
| 2.715667 | 2.592768 | 1.0474 |
p = OptionParser(alignextend.__doc__)
p.add_option("--len", default=100, type="int",
help="Extend to this length")
p.add_option("--qv", default=31, type="int",
help="Dummy qv score for extended bases")
p.add_option("--bedonly", default=False, action="store_true",
help="Only generate bed files, no FASTA")
p.set_bedpe()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedpe, ref = args
qvchar = chr(opts.qv + 33)
pf = bedpe.split(".")[0]
filtered = bedpe + ".filtered"
if need_update(bedpe, filtered):
filter_bedpe(bedpe, filtered, ref, rc=opts.rc,
minlen=opts.minlen, maxlen=opts.maxlen, rlen=opts.rlen)
rmdup = filtered + ".filtered.sorted.rmdup"
if need_update(filtered, rmdup):
rmdup_bedpe(filtered, rmdup, dupwiggle=opts.dup)
if opts.bedonly:
return
bed1, bed2 = pf + ".1e.bed", pf + ".2e.bed"
if need_update(rmdup, (bed1, bed2)):
sh("cut -f1-3,7-9 {0}".format(rmdup), outfile=bed1)
sh("cut -f4-6,7-8,10 {0}".format(rmdup), outfile=bed2)
sfa1, sfa2 = pf + ".1e.sfa", pf + ".2e.sfa"
if need_update((bed1, bed2, ref), (sfa1, sfa2)):
for bed in (bed1, bed2):
fastaFromBed(bed, ref, name=True, tab=True, stranded=True)
fq1, fq2 = pf + ".1e.fq", pf + ".2e.fq"
if need_update((sfa1, sfa2), (fq1, fq2)):
for sfa in (sfa1, sfa2):
sfa_to_fq(sfa, qvchar)
|
def alignextend(args)
|
%prog alignextend bedpefile ref.fasta
Similar idea to alignextend, using mates from BEDPE and FASTA ref. See AMOS
script here:
https://github.com/nathanhaigh/amos/blob/master/src/Experimental/alignextend.pl
| 2.756646 | 2.79757 | 0.985372 |
p = OptionParser(seqids.__doc__)
p.add_option("--maxn", default=100, type="int",
help="Maximum number of seqids")
p.add_option("--prefix", help="Seqids must start with")
p.add_option("--exclude", default="random", help="Seqids should not contain")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
bedfile, = args
pf = opts.prefix
exclude = opts.exclude
bed = Bed(bedfile)
s = bed.seqids
if pf:
s = [x for x in s if x.startswith(pf)]
if exclude:
s = [x for x in s if not exclude in x]
s = s[:opts.maxn]
print(",".join(s))
|
def seqids(args)
|
%prog seqids bedfile
Print out all seqids on one line. Useful for graphics.karyotype.
| 2.593969 | 2.45105 | 1.058309 |
from tempfile import mkstemp
from pybedtools import BedTool
p = OptionParser(juncs.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fh, trimbed = mkstemp(suffix = ".bed")
fw = must_open(trimbed, "w")
for i, juncbed in enumerate(args):
bed = Bed(juncbed, juncs=True)
for b in bed:
ovh = [int(x) for x in b.extra[-2].split(",")]
b.start += ovh[0]
b.end -= ovh[1]
b.accn = "{0}-{1}".format(b.accn, i)
b.extra = None
print(b, file=fw)
fw.close()
if len(args) > 1:
sh("sort -k1,1 -k2,2n {0} -o {0}".format(trimbed))
tbed = BedTool(trimbed)
grouptbed = tbed.groupby(g=[1,2,3,6], c=5, ops=['sum'])
cmd =
infile = grouptbed.fn
sh(cmd, infile=infile, outfile=opts.outfile)
else:
sort([trimbed, "-o", opts.outfile])
os.unlink(trimbed)
|
def juncs(args)
|
%prog junctions junctions1.bed [junctions2.bed ...]
Given a TopHat junctions.bed file, trim the read overhang to get intron span
If more than one junction bed file is provided, uniq the junctions and
calculate cumulative (sum) junction support
| 3.578962 | 3.486984 | 1.026378 |
from random import sample
from jcvi.formats.base import flexible_cast
p = OptionParser(random.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, N = args
assert is_number(N)
b = Bed(bedfile)
NN = flexible_cast(N)
if NN < 1:
NN = int(round(NN * len(b)))
beds = sample(b, NN)
new_bed = Bed()
new_bed.extend(beds)
outfile = bedfile.rsplit(".", 1)[0] + ".{0}.bed".format(N)
new_bed.print_to_file(outfile)
logging.debug("Write {0} features to `{1}`".format(NN, outfile))
|
def random(args)
|
%prog random bedfile number_of_features
Extract a random subset of features. Number of features can be an integer
number, or a fractional number in which case a random fraction (for example
0.1 = 10% of all features) will be extracted.
| 3.220724 | 2.827135 | 1.139218 |
p = OptionParser(filter.__doc__)
p.add_option("--minsize", default=0, type="int",
help="Minimum feature length")
p.add_option("--maxsize", default=1000000000, type="int",
help="Minimum feature length")
p.add_option("--minaccn", type="int",
help="Minimum value of accn, useful to filter based on coverage")
p.add_option("--minscore", type="int", help="Minimum score")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
fp = must_open(bedfile)
fw = must_open(opts.outfile, "w")
minsize, maxsize = opts.minsize, opts.maxsize
minaccn = opts.minaccn
minscore = opts.minscore
total = []
keep = []
for row in fp:
try:
b = BedLine(row)
except IndexError:
print(row.strip(), file=fw)
continue
span = b.span
total.append(span)
if not minsize <= span <= maxsize:
continue
if minaccn and int(b.accn) < minaccn:
continue
if minscore and int(b.score) < minscore:
continue
print(b, file=fw)
keep.append(span)
logging.debug("Stats: {0} features kept.".\
format(percentage(len(keep), len(total))))
logging.debug("Stats: {0} bases kept.".\
format(percentage(sum(keep), sum(total))))
|
def filter(args)
|
%prog filter bedfile
Filter the bedfile to retain records between certain size range.
| 2.458642 | 2.313536 | 1.062721 |
p = OptionParser(mergebydepth.__doc__)
p.add_option("--mindepth", default=3, type="int",
help="Minimum depth required")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
mindepth = opts.mindepth
bedgraph = make_bedgraph(bedfile)
bedgraphfiltered = bedgraph + ".d{0}".format(mindepth)
if need_update(bedgraph, bedgraphfiltered):
filter([bedgraph, "--minaccn={0}".format(mindepth),
"--outfile={0}".format(bedgraphfiltered)])
merged = bedgraphfiltered + ".merge.fasta"
if need_update(bedgraphfiltered, merged):
mergeBed(bedgraphfiltered, sorted=True)
|
def mergebydepth(args)
|
%prog mergebydepth reads.bed genome.fasta
Similar to mergeBed, but only returns regions beyond certain depth.
| 3.144344 | 2.822086 | 1.114191 |
p = OptionParser(depth.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
readsbed, featsbed = args
fp = open(featsbed)
nargs = len(fp.readline().split("\t"))
keepcols = ",".join(str(x) for x in range(1, nargs + 1))
cmd = "coverageBed -a {0} -b {1} -d".format(readsbed, featsbed)
cmd += " | groupBy -g {0} -c {1} -o mean".format(keepcols, nargs + 2)
sh(cmd, outfile=opts.outfile)
|
def depth(args)
|
%prog depth reads.bed features.bed
Calculate depth depth per feature using coverageBed.
| 2.976961 | 2.518487 | 1.182043 |
key = lambda x: x.rsplit(".", 1)[0]
iso_number = lambda x: get_number(x.split(".")[-1])
ids = sorted(ids, key=key)
newids = []
for k, ii in groupby(ids, key=key):
min_i = min(list(ii), key=iso_number)
newids.append(min_i)
return newids
|
def remove_isoforms(ids)
|
This is more or less a hack to remove the GMAP multiple mappings. Multiple
GMAP mappings can be seen given the names .mrna1, .mrna2, etc.
| 3.305227 | 3.418669 | 0.966817 |
from jcvi.formats.sizes import Sizes
p = OptionParser(longest.__doc__)
p.add_option("--maxsize", default=20000, type="int",
help="Limit max size")
p.add_option("--minsize", default=60, type="int",
help="Limit min size")
p.add_option("--precedence", default="Medtr",
help="Accessions with prefix take precedence")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
maxsize = opts.maxsize
minsize = opts.minsize
prec = opts.precedence
mergedbed = mergeBed(bedfile, nms=True)
sizes = Sizes(fastafile).mapping
bed = Bed(mergedbed)
pf = bedfile.rsplit(".", 1)[0]
ids = set()
for b in bed:
accns = b.accn.split(";")
prec_accns = [x for x in accns if x.startswith(prec)]
if prec_accns:
accns = prec_accns
accn_sizes = [(sizes.get(x, 0), x) for x in accns]
accn_sizes = [(size, x) for size, x in accn_sizes if size < maxsize]
if not accn_sizes:
continue
max_size, max_accn = max(accn_sizes)
if max_size < minsize:
continue
ids.add(max_accn)
newids = remove_isoforms(ids)
logging.debug("Remove isoforms: before={0} after={1}".\
format(len(ids), len(newids)))
longestidsfile = pf + ".longest.ids"
fw = open(longestidsfile, "w")
print("\n".join(newids), file=fw)
fw.close()
logging.debug("A total of {0} records written to `{1}`.".\
format(len(newids), longestidsfile))
longestbedfile = pf + ".longest.bed"
some([bedfile, longestidsfile, "--outfile={0}".format(longestbedfile),
"--no_strip_names"])
|
def longest(args)
|
%prog longest bedfile fastafile
Select longest feature within overlapping piles.
| 2.978404 | 2.890622 | 1.030368 |
p = OptionParser(merge.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
bedfiles = args
fw = must_open(opts.outfile, "w")
for bedfile in bedfiles:
bed = Bed(bedfile)
pf = op.basename(bedfile).split(".")[0]
for b in bed:
b.seqid = "_".join((pf, b.seqid))
print(b, file=fw)
|
def merge(args)
|
%prog merge bedfiles > newbedfile
Concatenate bed files together. Performing seqid and name changes to avoid
conflicts in the new bed file.
| 2.704202 | 2.358712 | 1.146474 |
p = OptionParser(fix.__doc__)
p.add_option("--minspan", default=0, type="int",
help="Enforce minimum span [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
minspan = opts.minspan
fp = open(bedfile)
fw = must_open(opts.outfile, "w")
nfixed = nfiltered = ntotal = 0
for row in fp:
atoms = row.strip().split("\t")
assert len(atoms) >= 3, "Must be at least 3 columns"
seqid, start, end = atoms[:3]
start, end = int(start), int(end)
orientation = '+'
if start > end:
start, end = end, start
orientation = '-'
nfixed += 1
atoms[1:3] = [str(start), str(end)]
if len(atoms) > 6:
atoms[6] = orientation
line = "\t".join(atoms)
b = BedLine(line)
if b.span >= minspan:
print(b, file=fw)
nfiltered += 1
ntotal += 1
if nfixed:
logging.debug("Total fixed: {0}".format(percentage(nfixed, ntotal)))
if nfiltered:
logging.debug("Total filtered: {0}".format(percentage(nfiltered, ntotal)))
|
def fix(args)
|
%prog fix bedfile > newbedfile
Fix non-standard bed files. One typical problem is start > end.
| 2.470608 | 2.318221 | 1.065734 |
from jcvi.formats.base import SetFile
from jcvi.utils.cbook import gene_name
p = OptionParser(some.__doc__)
p.add_option("-v", dest="inverse", default=False, action="store_true",
help="Get the inverse, like grep -v [default: %default]")
p.set_outfile()
p.set_stripnames()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, idsfile = args
inverse = opts.inverse
ostrip = opts.strip_names
fw = must_open(opts.outfile, "w")
ids = SetFile(idsfile)
if ostrip:
ids = set(gene_name(x) for x in ids)
bed = Bed(bedfile)
ntotal = nkeep = 0
for b in bed:
ntotal += 1
keep = b.accn in ids
if inverse:
keep = not keep
if keep:
nkeep += 1
print(b, file=fw)
fw.close()
logging.debug("Stats: {0} features kept.".\
format(percentage(nkeep, ntotal)))
|
def some(args)
|
%prog some bedfile idsfile > newbedfile
Retrieve a subset of bed features given a list of ids.
| 3.149628 | 2.844021 | 1.107456 |
from jcvi.formats.sizes import Sizes
p = OptionParser(uniq.__doc__)
p.add_option("--sizes", help="Use sequence length as score")
p.add_option("--mode", default="span", choices=("span", "score"),
help="Pile mode")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
uniqbedfile = bedfile.split(".")[0] + ".uniq.bed"
bed = Bed(bedfile)
if opts.sizes:
sizes = Sizes(opts.sizes).mapping
ranges = [Range(x.seqid, x.start, x.end, sizes[x.accn], i) \
for i, x in enumerate(bed)]
else:
if opts.mode == "span":
ranges = [Range(x.seqid, x.start, x.end, x.end - x.start + 1, i) \
for i, x in enumerate(bed)]
else:
ranges = [Range(x.seqid, x.start, x.end, float(x.score), i) \
for i, x in enumerate(bed)]
selected, score = range_chain(ranges)
selected = [x.id for x in selected]
selected_ids = set(selected)
selected = [bed[x] for x in selected]
notselected = [x for i, x in enumerate(bed) if i not in selected_ids]
newbed = Bed()
newbed.extend(selected)
newbed.print_to_file(uniqbedfile, sorted=True)
if notselected:
leftoverfile = bedfile.split(".")[0] + ".leftover.bed"
leftoverbed = Bed()
leftoverbed.extend(notselected)
leftoverbed.print_to_file(leftoverfile, sorted=True)
logging.debug("Imported: {0}, Exported: {1}".format(len(bed), len(newbed)))
return uniqbedfile
|
def uniq(args)
|
%prog uniq bedfile
Remove overlapping features with higher scores.
| 2.401915 | 2.296301 | 1.045993 |
from jcvi.formats.sizes import Sizes
p = OptionParser(bins.__doc__)
p.add_option("--binsize", default=100000, type="int",
help="Size of the bins [default: %default]")
p.add_option("--subtract",
help="Subtract bases from window [default: %default]")
p.add_option("--mode", default="span", choices=("span", "count", "score"),
help="Accumulate feature based on [default: %default]")
p.add_option("--nomerge", default=False, action="store_true",
help="Do not merge features")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
subtract = opts.subtract
mode = opts.mode
assert op.exists(bedfile), "File `{0}` not found".format(bedfile)
binsize = opts.binsize
binfile = bedfile + ".{0}".format(binsize)
binfile += ".{0}.bins".format(mode)
if not need_update(bedfile, binfile):
return binfile
sz = Sizes(fastafile)
sizesfile = sz.filename
sizes = sz.mapping
fw = open(binfile, "w")
scores = "median" if mode == "score" else None
if not opts.nomerge:
bedfile = mergeBed(bedfile, nms=True, scores=scores)
if subtract:
subtractmerge = mergeBed(subtract)
subtract_complement = complementBed(subtractmerge, sizesfile)
bedfile = intersectBed(bedfile, subtract_complement)
bedfile = sort([bedfile, "-i"])
bed = Bed(bedfile)
sbdict = dict(bed.sub_beds())
for chr, chr_len in sorted(sizes.items()):
chr_len = sizes[chr]
subbeds = sbdict.get(chr, [])
nbins = chr_len / binsize
last_bin = chr_len % binsize
if last_bin:
nbins += 1
a = np.zeros(nbins) # values
b = np.zeros(nbins, dtype="int") # bases
c = np.zeros(nbins, dtype="int") # count
b[:-1] = binsize
b[-1] = last_bin
for bb in subbeds:
start, end = bb.start, bb.end
startbin = start / binsize
endbin = end / binsize
assert startbin <= endbin
c[startbin:endbin + 1] += 1
if mode == "score":
a[startbin:endbin + 1] += float(bb.score)
elif mode == "span":
if startbin == endbin:
a[startbin] += end - start + 1
if startbin < endbin:
firstsize = (startbin + 1) * binsize - start + 1
lastsize = end - endbin * binsize
a[startbin] += firstsize
if startbin + 1 < endbin:
a[startbin + 1:endbin] += binsize
a[endbin] += lastsize
if mode == "count":
a = c
for xa, xb in zip(a, b):
print("\t".join(str(x) for x in (chr, xa, xb)), file=fw)
fw.close()
if subtract:
subtractbinfile = bins([subtract, fastafile, "--binsize={0}".format(binsize)])
binfile = subtractbins(binfile, subtractbinfile)
return binfile
|
def bins(args)
|
%prog bins bedfile fastafile
Bin bed lengths into each consecutive window. Use --subtract to remove bases
from window, e.g. --subtract gaps.bed ignores the gap sequences.
| 2.824761 | 2.729122 | 1.035044 |
from jcvi.utils.grouper import Grouper
p = OptionParser(pile.__doc__)
p.add_option("--minOverlap", default=0, type="int",
help="Minimum overlap required [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
abedfile, bbedfile = args
iw = intersectBed_wao(abedfile, bbedfile, minOverlap=opts.minOverlap)
groups = Grouper()
for a, b in iw:
groups.join(a.accn, b.accn)
ngroups = 0
for group in groups:
if len(group) > 1:
ngroups += 1
print("|".join(group))
logging.debug("A total of {0} piles (>= 2 members)".format(ngroups))
|
def pile(args)
|
%prog pile abedfile bbedfile > piles
Call intersectBed on two bedfiles.
| 3.235194 | 2.702508 | 1.197108 |
p = OptionParser(index.__doc__)
p.add_option("--fasta", help="Generate bedgraph and index")
p.add_option("--query", help="Chromosome location")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
fastafile = opts.fasta
if fastafile:
bedfile = make_bedgraph(bedfile, fastafile)
bedfile = sort([bedfile])
gzfile = bedfile + ".gz"
if need_update(bedfile, gzfile):
cmd = "bgzip {0}".format(bedfile)
sh(cmd)
tbifile = gzfile + ".tbi"
if need_update(gzfile, tbifile):
cmd = "tabix -p bed {0}".format(gzfile)
sh(cmd)
query = opts.query
if not query:
return
cmd = "tabix {0} {1}".format(gzfile, query)
sh(cmd, outfile=opts.outfile)
|
def index(args)
|
%prog index bedfile
Compress and index bedfile using `tabix`. Use --fasta to give a FASTA file
so that a bedgraph file can be generated and indexed.
| 2.81429 | 2.476275 | 1.136502 |
from jcvi.formats.sizes import Sizes
p = OptionParser(evaluate.__doc__)
p.add_option("--query",
help="Chromosome location [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
prediction, reality, fastafile = args
query = opts.query
prediction = mergeBed(prediction)
reality = mergeBed(reality)
sizes = Sizes(fastafile)
sizesfile = sizes.filename
prediction_complement = complementBed(prediction, sizesfile)
reality_complement = complementBed(reality, sizesfile)
TPbed = intersectBed(prediction, reality)
FPbed = intersectBed(prediction, reality_complement)
FNbed = intersectBed(prediction_complement, reality)
TNbed = intersectBed(prediction_complement, reality_complement)
beds = (TPbed, FPbed, FNbed, TNbed)
if query:
subbeds = []
rr = query_to_range(query, sizes)
ce = 'echo "{0}"'.format("\t".join(str(x) for x in rr))
for b in beds:
subbed = ".".join((b, query))
cmd = ce + " | intersectBed -a stdin -b {0}".format(b)
sh(cmd, outfile=subbed)
subbeds.append(subbed)
beds = subbeds
be = BedEvaluate(*beds)
print(be, file=sys.stderr)
if query:
for b in subbeds:
os.remove(b)
return be
|
def evaluate(args)
|
%prog evaluate prediction.bed reality.bed fastafile
Make a truth table like:
True False --- Reality
True TP FP
False FN TN
|----Prediction
Sn = TP / (all true in reality) = TP / (TP + FN)
Sp = TP / (all true in prediction) = TP / (TP + FP)
Ac = (TP + TN) / (TP + FP + FN + TN)
| 3.223388 | 2.965093 | 1.087112 |
p = OptionParser(refine.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
abedfile, bbedfile, refinedbed = args
fw = open(refinedbed, "w")
intersected = refined = 0
for a, b in intersectBed_wao(abedfile, bbedfile):
if b is None:
print(a, file=fw)
continue
intersected += 1
aspan_before = a.span
arange = (a.start, a.end)
brange = (b.start, b.end)
irange = range_intersect(arange, brange)
a.start, a.end = irange
aspan_after = a.span
if aspan_before > aspan_after:
refined += 1
print(a, file=fw)
fw.close()
print("Total intersected: {0}".format(intersected), file=sys.stderr)
print("Total refined: {0}".format(refined), file=sys.stderr)
summary([abedfile])
summary([refinedbed])
|
def refine(args)
|
%prog refine bedfile1 bedfile2 refinedbed
Refine bed file using a second bed file. The final bed is keeping all the
intervals in bedfile1, but refined by bedfile2 whenever they have
intersection.
| 2.950983 | 2.606014 | 1.132374 |
from jcvi.utils.iter import pairwise
p = OptionParser(distance.__doc__)
p.add_option("--distmode", default="ss", choices=("ss", "ee"),
help="Distance mode between paired reads. ss is outer distance, " \
"ee is inner distance [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
sortedbedfile = sort([bedfile])
valid = total = 0
fp = open(sortedbedfile)
for a, b in pairwise(fp):
a = BedLine(a)
b = BedLine(b)
ar = (a.seqid, a.start, a.end, "+")
br = (b.seqid, b.start, b.end, "+")
dist, oo = range_distance(ar, br, distmode=opts.distmode)
total += 1
if dist > 0:
print(dist)
valid += 1
logging.debug("Total valid (> 0) distances: {0}.".\
format(percentage(valid, total)))
|
def distance(args)
|
%prog distance bedfile
Calculate distance between bed features. The output file is a list of
distances, which can be used to plot histogram, etc.
| 3.555462 | 3.377935 | 1.052555 |
import random
from jcvi.assembly.coverage import Coverage
p = OptionParser(sample.__doc__)
p.add_option("--raindrop", default=0, type="int",
help="Raindrop selection, ignores all other options")
p.add_option("--max", default=10, type="int",
help="Max depth allowed")
p.add_option("--targetsize", type="int",
help="Sample bed file to get target base number")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, sizesfile = args
pf = bedfile.rsplit(".", 1)[0]
raindrop = opts.raindrop
# Raindrop method
if raindrop:
bed = Bed(bedfile)
forward = []
for b in bed:
if not forward or abs(b.start - forward[-1].start) >= raindrop:
forward.append(b)
reverse = []
bed.sort(key=lambda x: -x.end)
for b in bed:
if not reverse or abs(b.end - reverse[-1].end) >= raindrop:
reverse.append(b)
for tag, L in zip(("forward", "reverse"), (forward, reverse)):
logging.debug("Selected {0} features in {1} direction, span: {2}".\
format(len(L), tag, sum(x.span for x in L)))
selected = Bed()
selected.extend(set(forward + reverse))
selected.print_to_file(opts.outfile, sorted=True)
return
targetsize = opts.targetsize
if targetsize:
bed = Bed(bedfile)
samplebed = pf + ".sample.bed"
fw = open(samplebed, "w")
nfeats = len(bed)
nbases = bed.sum(unique=False)
targetfeats = int(round(nfeats * targetsize / nbases))
sub_bed = random.sample(bed, targetfeats)
for b in sub_bed:
print(b, file=fw)
logging.debug("File written to `{0}`.".format(samplebed))
return
c = Coverage(bedfile, sizesfile)
coveragefile = c.filename
samplecoveragefile = pf + ".sample.coverage"
fw = open(samplecoveragefile, "w")
fp = open(coveragefile)
for row in fp:
seqid, start, end, cov = row.split()
cov = int(cov)
if cov <= opts.max:
fw.write(row)
fw.close()
samplebedfile = pf + ".sample.bed"
cmd = "intersectBed -a {0} -b {1} -wa -u".format(bedfile, samplecoveragefile)
sh(cmd, outfile=samplebedfile)
logging.debug("Sampled bedfile written to `{0}`.".format(samplebedfile))
|
def sample(args)
|
%prog sample bedfile sizesfile
Sample bed file and remove high-coverage regions.
When option --targetsize is used, this program uses a differnent mode. It
first calculates the current total bases from all ranges and then compare to
targetsize, if more, then sample down as close to targetsize as possible.
Selection via --raindrop has the effect of making coverage even. Selected
reads have the property that their end points are not within a certain
window from one another. One sweep goes from left to right, the other in
the reverse direction.
| 3.02211 | 2.778305 | 1.087753 |
from jcvi.assembly.coverage import bed_to_bedpe
p = OptionParser(bedpe.__doc__)
p.add_option("--span", default=False, action="store_true",
help="Write span bed file [default: %default]")
p.add_option("--strand", default=False, action="store_true",
help="Write the strand columns [default: %default]")
p.add_option("--mates", help="Check the library stats from .mates file")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
pf = bedfile.rsplit(".", 1)[0]
bedpefile = pf + ".bedpe"
bedspanfile = pf + ".spans.bed" if opts.span else None
bed_to_bedpe(bedfile, bedpefile, \
pairsbedfile=bedspanfile, matesfile=opts.mates, \
strand=opts.strand)
return bedpefile, bedspanfile
|
def bedpe(args)
|
%prog bedpe bedfile
Convert to bedpe format. Use --span to write another bed file that contain
the span of the read pairs.
| 2.859687 | 2.712323 | 1.054331 |
p = OptionParser(sizes.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
assert op.exists(bedfile)
sizesfile = bedfile.rsplit(".", 1)[0] + ".sizes"
fw = must_open(sizesfile, "w", checkexists=True, skipcheck=True)
if fw:
b = Bed(bedfile)
for s, sbeds in b.sub_beds():
print("{0}\t{1}".format(\
s, max(x.end for x in sbeds)), file=fw)
logging.debug("Sizes file written to `{0}`.".format(sizesfile))
return sizesfile
|
def sizes(args)
|
%prog sizes bedfile
Infer the sizes for each seqid. Useful before dot plots.
| 3.725223 | 3.52458 | 1.056927 |
peak0 = [d for d in dists if d < cutoff]
peak1 = [d for d in dists if d >= cutoff]
c0, c1 = len(peak0), len(peak1)
logging.debug("Component counts: {0} {1}".format(c0, c1))
if c0 == 0 or c1 == 0 or float(c1) / len(dists) < alpha:
logging.debug("Single peak identified ({0} / {1} < {2})".\
format(c1, len(dists), alpha))
return np.median(dists)
peak0_median = np.median(peak0)
peak1_median = np.median(peak1)
logging.debug("Dual peaks identified: {0}bp ({1}), {2}bp ({3}) (selected)".\
format(int(peak0_median), c0, int(peak1_median), c1))
return peak1_median
|
def analyze_dists(dists, cutoff=1000, alpha=.1)
|
The dists can show bimodal distribution if they come from a mate-pair
library. Assume bimodal distribution and then separate the two peaks. Based
on the percentage in each peak, we can decide if it is indeed one peak or
two peaks, and report the median respectively.
| 2.954425 | 2.87615 | 1.027215 |
p = OptionParser(pairs.__doc__)
p.set_pairs()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
basename = bedfile.split(".")[0]
insertsfile = ".".join((basename, "inserts"))
bedfile = sort([bedfile, "--accn"])
fp = open(bedfile)
data = [BedLine(row) for i, row in enumerate(fp) if i < opts.nrows]
ascii = not opts.pdf
return bedfile, report_pairs(data, opts.cutoff, opts.mateorientation,
pairsfile=opts.pairsfile, insertsfile=insertsfile,
rclip=opts.rclip, ascii=ascii, bins=opts.bins,
distmode=opts.distmode)
|
def pairs(args)
|
See __doc__ for OptionParser.set_pairs().
| 5.519956 | 5.423583 | 1.017769 |
p = OptionParser(summary.__doc__)
p.add_option("--sizes", default=False, action="store_true",
help="Write .sizes file")
p.add_option("--all", default=False, action="store_true",
help="Write summary stats per seqid")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
bed = Bed(bedfile)
bs = BedSummary(bed)
if opts.sizes:
sizesfile = bedfile + ".sizes"
fw = open(sizesfile, "w")
for span, accn in bs.mspans:
print(span, file=fw)
fw.close()
logging.debug("Spans written to `{0}`.".format(sizesfile))
return bs
if not opts.all:
bs.report()
return bs
for seqid, subbeds in bed.sub_beds():
bs = BedSummary(subbeds)
print("\t".join((seqid, str(bs))))
|
def summary(args)
|
%prog summary bedfile
Sum the total lengths of the intervals.
| 3.114776 | 3.019944 | 1.031402 |
p = OptionParser(sort.__doc__)
p.add_option("-i", "--inplace", dest="inplace",
default=False, action="store_true",
help="Sort bed file in place [default: %default]")
p.add_option("-u", dest="unique",
default=False, action="store_true",
help="Uniqify the bed file")
p.add_option("--accn", default=False, action="store_true",
help="Sort based on the accessions [default: %default]")
p.set_outfile(outfile=None)
p.set_tmpdir()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
inplace = opts.inplace
if not inplace and ".sorted." in bedfile:
return bedfile
sortedbed = opts.outfile
if inplace:
sortedbed = bedfile
elif opts.outfile is None:
pf, sf = op.basename(bedfile).rsplit(".", 1)
sortedbed = pf + ".sorted." + sf
sortopt = "-k1,1 -k2,2n -k3,3n -k4,4" if not opts.accn else \
"-k4,4 -k1,1 -k2,2n -k3,3n"
cmd = "sort"
if opts.tmpdir:
cmd += " -T {0}".format(opts.tmpdir)
if opts.unique:
cmd += " -u"
cmd += " {0} {1} -o {2}".format(sortopt, bedfile, sortedbed)
if inplace or need_update(bedfile, sortedbed):
sh(cmd)
return sortedbed
|
def sort(args)
|
%prog sort bedfile
Sort bed file to have ascending order of seqid, then start. It uses the
`sort` command.
| 2.44911 | 2.378841 | 1.029539 |
p = OptionParser(mates.__doc__)
p.add_option("--lib", default=False, action="store_true",
help="Output library information along with pairs [default: %default]")
p.add_option("--nointra", default=False, action="store_true",
help="Remove mates that are intra-scaffold [default: %default]")
p.add_option("--prefix", default=False, action="store_true",
help="Only keep links between IDs with same prefix [default: %default]")
p.set_mates()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
rclip = opts.rclip
key = (lambda x: x.accn[:-rclip]) if rclip else (lambda x: x.accn)
bed = Bed(bedfile, key=key)
pf = bedfile.rsplit(".", 1)[0]
matesfile = pf + ".mates"
lib = pf if opts.lib else None
fw = open(matesfile, "w")
if lib:
bedfile, stats = pairs([bedfile, \
"--rclip={0}".format(rclip),
"--cutoff={0}".format(opts.cutoff)])
sv = int(2 * stats.sd)
mindist = max(stats.mean - sv, 1)
maxdist = stats.mean + sv
print("\t".join(str(x) for x in \
("library", pf, mindist, maxdist)), file=fw)
num_fragments = num_pairs = 0
matesbedfile = matesfile + ".bed"
fwm = open(matesbedfile, "w")
for pe, lines in groupby(bed, key=key):
lines = list(lines)
if len(lines) != 2:
num_fragments += len(lines)
continue
a, b = lines
if opts.nointra and a.seqid == b.seqid:
continue
# Use --prefix to limit the links between seqids with the same prefix
# For example, contigs of the same BAC, mth2-23j10_001, mth-23j10_002
if opts.prefix:
aprefix = a.seqid.split("_")[0]
bprefix = b.seqid.split("_")[0]
if aprefix != bprefix:
continue
num_pairs += 1
pair = [a.accn, b.accn]
if lib:
pair.append(lib)
print("\t".join(pair), file=fw)
print(a, file=fwm)
print(b, file=fwm)
logging.debug("Discard {0} frags and write {1} pairs to `{2}` and `{3}`.".\
format(num_fragments, num_pairs, matesfile, matesbedfile))
fw.close()
fwm.close()
return matesfile, matesbedfile
|
def mates(args)
|
%prog mates bedfile
Generate the mates file by inferring from the names.
| 3.55952 | 3.46606 | 1.026964 |
from numpy import array, argsort
p = OptionParser(flanking.__doc__)
p.add_option("--chrom", default=None, type="string",
help="chrom name of the position in query. Make sure it matches bedfile.")
p.add_option("--coord", default=None, type="int",
help="coordinate of the position in query.")
p.add_option("-n", default=10, type="int",
help="number of flanking features to get [default: %default]")
p.add_option("--side", default="both", choices=("upstream", "downstream", "both"),
help="which side to get flanking features [default: %default]")
p.add_option("--max_d", default=None, type="int",
help="features <= max_d away from position [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if any([len(args) != 1, opts.chrom is None, opts.coord is None]):
sys.exit(not p.print_help())
bedfile, = args
position = (opts.chrom, opts.coord)
n, side, maxd = opts.n, opts.side, opts.max_d
chrombed = Bed(bedfile).sub_bed(position[0])
if side == "upstream":
data = [(abs(f.start-position[1]), f) for f in chrombed \
if f.start <= position[1]]
elif side == "downstream":
data = [(abs(f.start-position[1]), f) for f in chrombed \
if f.start >= position[1]]
else:
data = [(abs(f.start-position[1]), f) for f in chrombed]
if maxd:
data = [f for f in data if f[0]<=maxd]
n += 1 # not counting self
n = min(n, len(data))
distances, subbed = zip(*data)
distances = array(distances)
idx = argsort(distances)[:n]
flankingbed = [f for (i, f) in enumerate(subbed) if i in idx]
fw = must_open(opts.outfile, "w")
for atom in flankingbed:
print(str(atom), file=fw)
return (position, flankingbed)
|
def flanking(args)
|
%prog flanking bedfile [options]
Get up to n features (upstream or downstream or both) flanking a given position.
| 2.603364 | 2.452642 | 1.061453 |
args = [getattr(self, attr) for attr in BlastLine.__slots__[:12]]
args[0:2] = [self.subject, self.query]
args[6:10] = [self.sstart, self.sstop, self.qstart, self.qstop]
if self.orientation == '-':
args[8], args[9] = args[9], args[8]
b = "\t".join(str(x) for x in args)
return BlastLine(b)
|
def swapped(self)
|
Swap query and subject.
| 3.775545 | 3.602351 | 1.048078 |
p = OptionParser(gff.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gbkfile, = args
MultiGenBank(gbkfile)
|
def gff(args)
|
%prog gff seq.gbk
Convert Genbank file to GFF and FASTA file.
The Genbank file can contain multiple records.
| 3.414932 | 2.796341 | 1.221214 |
p = OptionParser(tofasta.__doc__)
p.add_option("--prefix", default="gbfasta",
help="prefix of output files [default: %default]")
filenames, accessions, idfile, opts, args = preparegb(p, args)
prefix = opts.prefix
GenBank(filenames=filenames, accessions=accessions, idfile=idfile).\
write_fasta(output=prefix, individual=opts.individual)
if opts.individual:
logging.debug("Output written dir {0}".format(prefix))
else:
logging.debug("Output written to {0}.fasta".format(prefix))
|
def tofasta(args)
|
%prog tofasta [--options]
Read GenBank file, or retrieve from web.
Output fasta file with one record per file
or all records in one file
| 5.806116 | 5.725057 | 1.014159 |
p = OptionParser(getgenes.__doc__)
p.add_option("--prefix", default="gbout",
help="prefix of output files [default: %default]")
p.add_option("--nopep", default=False, action="store_true",
help="Only get cds and bed, no pep [default: %default]")
filenames, accessions, idfile, opts, args = preparegb(p, args)
prefix = opts.prefix
GenBank(filenames=filenames, accessions=accessions, idfile=idfile).\
write_genes(output=prefix, individual=opts.individual, \
pep=(not opts.nopep))
if opts.individual:
logging.debug("Output written dir {0}".format(prefix))
elif opts.nopep:
logging.debug("Output written to {0}.bed, {0}.cds".format(prefix,))
else:
logging.debug("Output written to {0}.bed, {0}.cds, {0}.pep".format(prefix,))
|
def getgenes(args)
|
%prog getgenes [--options]
Read GenBank file, or retrieve from web.
Output bed, cds files, and pep file (can turn off with --nopep).
Either --gb_dir or --id/--simple should be provided.
| 4.657657 | 3.813332 | 1.221414 |
prot_id = None
for ftype in quals_ftypes:
for i, quals in enumerate(locus[locus_tag][ftype]):
for elem in quals:
elem_id = elem[0]
if len(locus[locus_tag]["protein_id"]) > 0 and ftype in ("mRNA", "CDS"):
elem_id = locus[locus_tag]["protein_id"][i]
if ftype == 'misc_RNA': ftype = 'ncRNA'
print("\t".join(str(x) for x in (elem_id, elem[1], elem[2], ftype)))
|
def print_locus_quals(locus_tag, locus, quals_ftypes)
|
Given a locus_tag and dict of feaures, print out 3-column output:
locus_tag, qualifier, value
Replace locus_tag with protein_id if processing an "mRNA" or "CDS"
| 3.418918 | 3.396039 | 1.006737 |
p = OptionParser(getquals.__doc__)
p.add_option("--types", default="gene,mRNA,CDS",
type="str", dest="quals_ftypes",
help="Feature types from which to extract qualifiers")
p.add_option("--ignore", default="locus_tag,product,codon_start,translation",
type="str", dest="quals_ignore",
help="Qualifiers to exclude from parsing")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gbkfile, = args
quals_ftypes = opts.quals_ftypes.split(",")
quals_ignore = opts.quals_ignore.split(",")
locus = dict()
locus_tag = None
for rec in SeqIO.parse(gbkfile, "gb"):
for f in rec.features:
if f.type in quals_ftypes:
locus_tag = f.qualifiers[LT][0]
if locus_tag not in locus:
locus[locus_tag] = dict()
for ftype in quals_ftypes:
if ftype not in locus[locus_tag]:
locus[locus_tag][ftype] = []
if ftype == "CDS": # store the CDS protein_id
locus[locus_tag]["protein_id"] = []
quals = []
for qual in f.qualifiers:
if qual in quals_ignore:
continue
for qval in f.qualifiers[qual]:
quals.append((locus_tag, qual, qval))
if qual == "protein_id":
locus[locus_tag]["protein_id"].append(qval)
if len(quals) > 0:
locus[locus_tag][f.type].append(quals)
for locus_tag in locus:
print_locus_quals(locus_tag, locus, quals_ftypes)
|
def getquals(args)
|
%prog getquals [--options] gbkfile > qualsfile
Read GenBank file and extract all qualifiers per feature type
into a tab-delimited file
| 2.145037 | 2.013005 | 1.06559 |
"Advance the iterator n-steps ahead. If n is none, consume entirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
collections.deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
|
def consume(iterator, n)
|
Advance the iterator n-steps ahead. If n is none, consume entirely.
| 2.41933 | 2.447042 | 0.988675 |
"Random selection from itertools.combinations(iterable, r)"
pool = tuple(iterable)
n = len(pool)
indices = sorted(random.sample(xrange(n), r))
return tuple(pool[i] for i in indices)
|
def random_combination(iterable, r)
|
Random selection from itertools.combinations(iterable, r)
| 2.506187 | 2.126661 | 1.178461 |
"Random selection from itertools.combinations_with_replacement(iterable, r)"
pool = tuple(iterable)
n = len(pool)
indices = sorted(random.randrange(n) for i in xrange(r))
return tuple(pool[i] for i in indices)
|
def random_combination_with_replacement(iterable, r)
|
Random selection from itertools.combinations_with_replacement(iterable, r)
| 2.613867 | 2.168791 | 1.205219 |
for value in islice(t.__copy__(), i, None):
return value
raise IndexError(i)
|
def tee_lookahead(t, i)
|
Inspect the i-th upcomping value from a tee object
while leaving the tee object at its current position.
Raise an IndexError if the underlying iterator doesn't
have enough values.
| 10.473719 | 10.694083 | 0.979394 |
# Doesn't seem to run into any number-of-args limits.
for group in (list(g) for g in zip_longest(*[iter(iterable)] * n,
fillvalue=_marker)):
if group[-1] is _marker:
# If this is the last group, shuck off the padding:
del group[group.index(_marker):]
yield group
|
def chunked(iterable, n)
|
Break an iterable into lists of a given length::
>>> list(chunked([1, 2, 3, 4, 5, 6, 7], 3))
[[1, 2, 3], [4, 5, 6], [7]]
If the length of ``iterable`` is not evenly divisible by ``n``, the last
returned list will be shorter.
This is useful for splitting up a computation on a large number of keys
into batches, to be pickled and sent off to worker processes. One example
is operations on rows in MySQL, which does not implement server-side
cursors properly and would otherwise load the entire dataset into RAM on
the client.
| 8.139925 | 8.954328 | 0.909049 |
if not hasattr(self, '_peek'):
try:
self._peek = next(self._it)
except StopIteration:
if default is _marker:
raise
return default
return self._peek
|
def peek(self, default=_marker)
|
Return the item that will be next returned from ``next()``.
Return ``default`` if there are no items left. If ``default`` is not
provided, raise ``StopIteration``.
| 2.686407 | 2.565084 | 1.047298 |
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
fw = must_open(opts.outfile, "w")
nduplicates = nreads = 0
seen = set()
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
name = rec.name
if name in seen:
nduplicates += 1
continue
seen.add(name)
print(rec, file=fw)
logging.debug("Removed duplicate reads: {}".\
format(percentage(nduplicates, nreads)))
|
def uniq(args)
|
%prog uniq fastqfile
Retain only first instance of duplicate reads. Duplicate is defined as
having the same read name.
| 2.768735 | 2.581782 | 1.072412 |
p = OptionParser(suffix.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastqfile, sf = args
fw = must_open(opts.outfile, "w")
nreads = nselected = 0
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
if rec.seq.endswith(sf):
print(rec, file=fw)
nselected += 1
logging.debug("Selected reads with suffix {0}: {1}".\
format(sf, percentage(nselected, nreads)))
|
def suffix(args)
|
%prog suffix fastqfile CAG
Filter reads based on suffix.
| 3.042276 | 2.784178 | 1.092702 |
p = OptionParser(readlen.__doc__)
p.set_firstN()
p.add_option("--silent", default=False, action="store_true",
help="Do not print read length stats")
p.add_option("--nocheck", default=False, action="store_true",
help="Do not check file type suffix")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
f, = args
if (not opts.nocheck) and (not is_fastq(f)):
logging.debug("File `{}` does not endswith .fastq or .fq".format(f))
return 0
s = calc_readlen(f, opts.firstN)
if not opts.silent:
print("\t".join(str(x) for x in (f, s.min, s.max, s.mean, s.median)))
return int(s.max)
|
def readlen(args)
|
%prog readlen fastqfile
Calculate read length, will only try the first N reads. Output min, max, and
avg for each file.
| 2.790467 | 2.795806 | 0.99809 |
p = OptionParser(fasta.__doc__)
p.add_option("--seqtk", default=False, action="store_true",
help="Use seqtk to convert")
p.set_outdir()
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastqfiles = args
outdir = opts.outdir
if outdir and outdir != ".":
mkdir(outdir)
fastqfile = fastqfiles[0]
pf = op.basename(fastqfile)
gzinput = pf.endswith(".gz")
if gzinput:
pf = pf.rsplit(".", 1)[0]
pf, sf = pf.rsplit(".", 1)
if sf not in ("fq", "fastq"):
logging.debug("Assumed FASTA: suffix not `fq` or `fastq`")
return fastqfile, None
fastafile, qualfile = pf + ".fasta", pf + ".qual"
outfile = opts.outfile or fastafile
outfile = op.join(outdir, outfile)
if opts.seqtk:
if need_update(fastqfiles, outfile):
for i, fastqfile in enumerate(fastqfiles):
cmd = "seqtk seq -A {0} -L 30 -l 70".format(fastqfile)
# First one creates file, following ones append to it
sh(cmd, outfile=outfile, append=i)
else:
logging.debug("Outfile `{0}` already exists.".format(outfile))
return outfile, None
for fastqfile in fastqfiles:
SeqIO.convert(fastqfile, "fastq", fastafile, "fasta")
SeqIO.convert(fastqfile, "fastq", qualfile, "qual")
return fastafile, qualfile
|
def fasta(args)
|
%prog fasta fastqfiles
Convert fastq to fasta and qual file.
| 3.187926 | 3.007523 | 1.059984 |
from jcvi.apps.base import need_update
p = OptionParser(first.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
N = int(args[0])
nlines = N * 4
fastqfiles = args[1:]
fastqfile = fastqfiles[0]
outfile = opts.outfile
if not need_update(fastqfiles, outfile):
logging.debug("File `{0}` exists. Will not overwrite.".format(outfile))
return
gz = fastqfile.endswith(".gz")
for fastqfile in fastqfiles:
if gz:
cmd = "zcat {0} | head -n {1}".format(fastqfile, nlines)
else:
cmd = "head -n {0} {1}".format(nlines, fastqfile)
sh(cmd, outfile=opts.outfile, append=True)
|
def first(args)
|
%prog first N fastqfile(s)
Get first N reads from file.
| 2.45853 | 2.19349 | 1.12083 |
p = OptionParser(filter.__doc__)
p.add_option("-q", dest="qv", default=20, type="int",
help="Minimum quality score to keep [default: %default]")
p.add_option("-p", dest="pct", default=95, type="int",
help="Minimum percent of bases that have [-q] quality "\
"[default: %default]")
opts, args = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help())
if len(args) == 1:
r1 = r2 = args[0]
else:
r1, r2 = args
qv = opts.qv
pct = opts.pct
offset = guessoffset([r1])
qvchar = chr(offset + qv)
logging.debug("Call base qv >= {0} as good.".format(qvchar))
outfile = r1.rsplit(".", 1)[0] + ".q{0}.paired.fastq".format(qv)
fw = open(outfile, "w")
p1fp, p2fp = FastqPairedIterator(r1, r2)
while True:
a = list(islice(p1fp, 4))
if not a:
break
b = list(islice(p2fp, 4))
q1 = a[-1].rstrip()
q2 = b[-1].rstrip()
if isHighQv(q1, qvchar, pct=pct) and isHighQv(q2, qvchar, pct=pct):
fw.writelines(a)
fw.writelines(b)
|
def filter(args)
|
%prog filter paired.fastq
Filter to get high qv reads. Use interleaved format (one file) or paired
format (two files) to filter on paired reads.
| 3.204611 | 2.892759 | 1.107804 |
p = OptionParser(shuffle.__doc__)
p.set_tag()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
p1, p2 = args
pairsfastq = pairspf((p1, p2)) + ".fastq"
tag = opts.tag
p1fp = must_open(p1)
p2fp = must_open(p2)
pairsfw = must_open(pairsfastq, "w")
nreads = 0
while True:
a = list(islice(p1fp, 4))
if not a:
break
b = list(islice(p2fp, 4))
if tag:
name = a[0].rstrip()
a[0] = name + "/1\n"
b[0] = name + "/2\n"
pairsfw.writelines(a)
pairsfw.writelines(b)
nreads += 2
pairsfw.close()
extra = nreads * 2 if tag else 0
checkShuffleSizes(p1, p2, pairsfastq, extra=extra)
logging.debug("File `{0}` verified after writing {1} reads.".\
format(pairsfastq, nreads))
return pairsfastq
|
def shuffle(args)
|
%prog shuffle p1.fastq p2.fastq
Shuffle pairs into interleaved format.
| 3.358299 | 3.120088 | 1.076348 |
from jcvi.apps.grid import Jobs
p = OptionParser(split.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
gz = pairsfastq.endswith(".gz")
pf = pairsfastq.replace(".gz", "").rsplit(".", 1)[0]
p1 = pf + ".1.fastq"
p2 = pf + ".2.fastq"
cmd = "zcat" if gz else "cat"
p1cmd = cmd + " {0} | sed -ne '1~8{{N;N;N;p}}'".format(pairsfastq)
p2cmd = cmd + " {0} | sed -ne '5~8{{N;N;N;p}}'".format(pairsfastq)
if gz:
p1cmd += " | gzip"
p2cmd += " | gzip"
p1 += ".gz"
p2 += ".gz"
p1cmd += " > " + p1
p2cmd += " > " + p2
args = [(p1cmd, ), (p2cmd, )]
m = Jobs(target=sh, args=args)
m.run()
checkShuffleSizes(p1, p2, pairsfastq)
|
def split(args)
|
%prog split pairs.fastq
Split shuffled pairs into `.1.fastq` and `.2.fastq`, using `sed`. Can work
on gzipped file.
<http://seqanswers.com/forums/showthread.php?t=13776>
| 2.749703 | 2.598952 | 1.058004 |
p = OptionParser(guessoffset.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = next(ai)
offset = 64
while rec:
quality = rec.quality
lowcounts = len([x for x in quality if x < 59])
highcounts = len([x for x in quality if x > 74])
diff = highcounts - lowcounts
if diff > 10:
break
elif diff < -10:
offset = 33
break
rec = next(ai)
if offset == 33:
print("Sanger encoding (offset=33)", file=sys.stderr)
elif offset == 64:
print("Illumina encoding (offset=64)", file=sys.stderr)
return offset
|
def guessoffset(args)
|
%prog guessoffset fastqfile
Guess the quality offset of the fastqfile, whether 33 or 64.
See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format>
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS...............................
..........................XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
...............................IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
.................................JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL...............................
!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
| | | | |
33 59 64 73 104
S - Sanger Phred+33, raw reads typically (0, 40)
X - Solexa Solexa+64, raw reads typically (-5, 40)
I - Illumina 1.3+ Phred+64, raw reads typically (0, 40)
J - Illumina 1.5+ Phred+64, raw reads typically (3, 40)
L - Illumina 1.8+ Phred+33, raw reads typically (0, 40)
with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold)
| 2.699579 | 2.708517 | 0.9967 |
p = OptionParser(format.__doc__)
p.add_option("--convert", default=None, choices=[">=1.8", "<1.8", "sra"],
help="Convert fastq header to a different format" +
" [default: %default]")
p.set_tag(specify_tag=True)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = next(ai)
dialect = None
while rec:
h = FastqHeader(rec.header)
if not dialect:
dialect = h.dialect
logging.debug("Input fastq dialect: `{0}`".format(dialect))
if opts.convert:
logging.debug("Output fastq dialect: `{0}`".format(opts.convert))
rec.name = h.format_header(dialect=opts.convert, tag=opts.tag)
print(rec)
rec = next(ai)
|
def format(args)
|
%prog format fastqfile
Format FASTQ file. Currently provides option to convert FASTQ header from
one dialect to another.
| 3.851692 | 3.443885 | 1.118415 |
p = OptionParser(some.__doc__)
opts, args = p.parse_args(args)
if len(args) not in (2, 3):
sys.exit(not p.print_help())
idsfile, afastq, = args[:2]
bfastq = args[2] if len(args) == 3 else None
ids = DictFile(idsfile, valuepos=None)
ai = iter_fastq(open(afastq))
arec = next(ai)
if bfastq:
bi = iter_fastq(open(bfastq))
brec = next(bi)
while arec:
if arec.name[1:] in ids:
print(arec)
if bfastq:
print(brec)
arec = next(ai)
if bfastq:
brec = next(bi)
|
def some(args)
|
%prog some idsfile afastq [bfastq]
Select a subset of the reads with ids present in the idsfile.
`bfastq` is optional (only if reads are paired)
| 2.972727 | 2.294189 | 1.295763 |
p = OptionParser(trim.__doc__)
p.add_option("-f", dest="first", default=0, type="int",
help="First base to keep. Default is 1.")
p.add_option("-l", dest="last", default=0, type="int",
help="Last base to keep. Default is entire read.")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
obfastqfile = op.basename(fastqfile)
fq = obfastqfile.rsplit(".", 1)[0] + ".ntrimmed.fastq"
if fastqfile.endswith(".gz"):
fq = obfastqfile.rsplit(".", 2)[0] + ".ntrimmed.fastq.gz"
cmd = "fastx_trimmer -Q33 "
if opts.first:
cmd += "-f {0.first} ".format(opts)
if opts.last:
cmd += "-l {0.last} ".format(opts)
sh(cmd, infile=fastqfile, outfile=fq)
|
def trim(args)
|
%prog trim fastqfile
Wraps `fastx_trimmer` to trim from begin or end of reads.
| 2.552694 | 2.433039 | 1.049179 |
p = OptionParser(catread.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
r1, r2 = args
p1fp, p2fp = FastqPairedIterator(r1, r2)
outfile = pairspf((r1, r2)) + ".cat.fastq"
fw = must_open(outfile, "w")
while True:
a = list(islice(p1fp, 4))
if not a:
break
atitle, aseq, _, aqual = a
btitle, bseq, _, bqual = list(islice(p2fp, 4))
print("\n".join((atitle.strip(), aseq.strip() + bseq.strip(), \
"+", aqual.strip() + bqual.strip())), file=fw)
|
def catread(args)
|
%prog catread fastqfile1 fastqfile2
Concatenate paired end reads into one. Useful for example to do single-end
mapping and perform filtering on the whole read pair level.
| 3.339979 | 3.095534 | 1.078967 |
p = OptionParser(splitread.__doc__)
p.add_option("-n", dest="n", default=76, type="int",
help="Split at N-th base position [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="Reverse complement second read [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
base = op.basename(pairsfastq).split(".")[0]
fq1 = base + ".1.fastq"
fq2 = base + ".2.fastq"
fw1 = must_open(fq1, "w")
fw2 = must_open(fq2, "w")
fp = must_open(pairsfastq)
n = opts.n
minsize = n * 8 / 5
for name, seq, qual in FastqGeneralIterator(fp):
if len(seq) < minsize:
logging.error("Skipping read {0}, length={1}".format(name, len(seq)))
continue
name = "@" + name
rec1 = FastqLite(name, seq[:n], qual[:n])
rec2 = FastqLite(name, seq[n:], qual[n:])
if opts.rc:
rec2.rc()
print(rec1, file=fw1)
print(rec2, file=fw2)
logging.debug("Reads split into `{0},{1}`".format(fq1, fq2))
fw1.close()
fw2.close()
|
def splitread(args)
|
%prog splitread fastqfile
Split fastqfile into two read fastqfiles, cut in the middle.
| 2.612853 | 2.489864 | 1.049396 |
p = OptionParser(size.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
total_size = total_numrecords = 0
for f in args:
cur_size = cur_numrecords = 0
for rec in iter_fastq(f):
if not rec:
break
cur_numrecords += 1
cur_size += len(rec)
print(" ".join(str(x) for x in \
(op.basename(f), cur_numrecords, cur_size)))
total_numrecords += cur_numrecords
total_size += cur_size
if len(args) > 1:
print(" ".join(str(x) for x in \
("Total", total_numrecords, total_size)))
|
def size(args)
|
%prog size fastqfile
Find the total base pairs in a list of fastq files
| 2.33525 | 2.27957 | 1.024426 |
p = OptionParser(convert.__doc__)
p.set_phred()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
infastq, = args
phred = opts.phred or str(guessoffset([infastq]))
ophred = {"64": "33", "33": "64"}[phred]
gz = infastq.endswith(".gz")
outfastq = infastq.rsplit(".", 1)[0] if gz else infastq
pf, sf = outfastq.rsplit(".", 1)
outfastq = "{0}.q{1}.{2}".format(pf, ophred, sf)
if gz:
outfastq += ".gz"
fin = "illumina" if phred == "64" else "sanger"
fout = "sanger" if phred == "64" else "illumina"
seqret = "seqret"
if infastq.endswith(".gz"):
cmd = "zcat {0} | ".format(infastq)
cmd += seqret + " fastq-{0}::stdin fastq-{1}::stdout".\
format(fin, fout)
else:
cmd = seqret + " fastq-{0}::{1} fastq-{2}::stdout".\
format(fin, infastq, fout)
sh(cmd, outfile=outfastq)
return outfastq
|
def convert(args)
|
%prog convert in.fastq
illumina fastq quality encoding uses offset 64, and sanger uses 33. This
script creates a new file with the correct encoding. Output gzipped file if
input is also gzipped.
| 3.172027 | 2.9161 | 1.087763 |
from jcvi.utils.iter import pairwise
p = OptionParser(pairinplace.__doc__)
p.set_rclip()
p.set_tag()
p.add_option("--base",
help="Base name for the output files [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
base = opts.base or op.basename(fastqfile).split(".")[0]
frags = base + ".frags.fastq"
pairs = base + ".pairs.fastq"
if fastqfile.endswith(".gz"):
frags += ".gz"
pairs += ".gz"
fragsfw = must_open(frags, "w")
pairsfw = must_open(pairs, "w")
N = opts.rclip
tag = opts.tag
strip_name = (lambda x: x[:-N]) if N else None
fh_iter = iter_fastq(fastqfile, key=strip_name)
skipflag = False # controls the iterator skip
for a, b in pairwise(fh_iter):
if b is None: # hit the eof
break
if skipflag:
skipflag = False
continue
if a.name == b.name:
if tag:
a.name += "/1"
b.name += "/2"
print(a, file=pairsfw)
print(b, file=pairsfw)
skipflag = True
else:
print(a, file=fragsfw)
# don't forget the last one, when b is None
if not skipflag:
print(a, file=fragsfw)
logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags))
return pairs
|
def pairinplace(args)
|
%prog pairinplace bulk.fastq
Pair up the records in bulk.fastq by comparing the names for adjancent
records. If they match, print to bulk.pairs.fastq, else print to
bulk.frags.fastq.
| 3.316688 | 3.05005 | 1.087421 |
p = OptionParser(fromsra.__doc__)
p.add_option("--paired", default=False, action="store_true",
help="Specify if library layout is paired-end " + \
"[default: %default]")
p.add_option("--compress", default=None, choices=["gzip", "bzip2"],
help="Compress output fastq files [default: %default]")
p.set_outdir()
p.set_grid()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
srafile, = args
paired = opts.paired
compress = opts.compress
outdir = opts.outdir
script_path = which("fastq-dump")
if not script_path:
logging.error("Cannot find `fastq-dump` in the PATH")
sys.exit()
cmd = [script_path]
if compress:
cmd.append("--{0}".format(compress))
if paired:
cmd.append("--split-files")
if outdir:
cmd.append("--outdir {0}".format(outdir))
cmd.append(srafile)
outcmd = " ".join(cmd)
sh(outcmd, grid=opts.grid)
|
def fromsra(args)
|
%prog fromsra srafile
Convert sra file to fastq using the sratoolkit `fastq-dump`
| 2.626724 | 2.474169 | 1.061659 |
p = OptionParser(blast.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btab = Btab(btabfile)
for b in btab:
print(b.blastline)
|
def blast(args)
|
%prog blast btabfile
Convert to BLAST -m8 format.
| 2.74627 | 2.381157 | 1.153334 |
from jcvi.formats.blast import BlastLine
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btab = Btab(btabfile)
for b in btab:
Bline = BlastLine(b.blastline)
print(Bline.bedline)
|
def bed(args)
|
%prog bed btabfile
Convert btab to bed format.
| 2.904101 | 2.555094 | 1.136592 |
from jcvi.utils.range import range_minmax
from jcvi.formats.gff import valid_gff_parent_child, valid_gff_type
p = OptionParser(gff.__doc__)
p.add_option("--source", default=None, help="Specify GFF source." +
" By default, it picks algorithm used to generate btab file." +
" [default: %default]")
p.add_option("--type", default="protein_match", choices=valid_gff_type,
help="GFF feature type [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
btabfile, = args
btabdict = {}
btab = Btab(btabfile, aat_dialect=True)
osource = opts.source or "aat"
otype = opts.type
octype = valid_gff_parent_child[otype]
for b in btab:
nargs = b.nargs
id = b.query + "-" + otype + "{0:05d}".format(b.chainNum)
key = b.key
if key not in btabdict:
btabdict[key] = { 'id': id,
'method': b.method,
'query': b.query,
'subject': b.subject,
'strand': b.qStrand,
'sDesc': b.sDesc,
'coords': [],
'children': []
}
btabdict[key]['coords'].append((b.qStart, b.qStop))
btabdict[key]['children'].append(b.gffline(source=osource, type=octype, id=id))
for v in btabdict.itervalues():
b = BtabLine("\t".join(str(x) for x in [0] * nargs), aat_dialect=True)
id = v['id']
b.query = v['query']
b.method = v['method']
b.subject = v['subject']
b.qStrand = v['strand']
b.sDesc = v['sDesc']
b.qStart, b.qStop = range_minmax(v['coords'])
print(b.gffline(source=osource, type=otype, primary_tag="ID", id=id))
print("\n".join(v['children']))
|
def gff(args)
|
%prog gff btabfile
Convert btab file generated by AAT to gff3 format.
| 3.337012 | 3.149939 | 1.059389 |
for taxid in list_of_taxids:
handle = Entrez.efetch(db='Taxonomy', id=taxid, retmode="xml")
records = Entrez.read(handle)
yield records[0]["ScientificName"]
|
def batch_taxonomy(list_of_taxids)
|
Convert list of taxids to Latin names
| 2.409266 | 2.227921 | 1.081397 |
for name in list_of_names:
handle = Entrez.esearch(db='Taxonomy', term=name, retmode="xml")
records = Entrez.read(handle)
yield records["IdList"][0]
|
def batch_taxids(list_of_names)
|
Opposite of batch_taxonomy():
Convert list of Latin names to taxids
| 2.629781 | 2.346381 | 1.120782 |
for term in list_of_terms:
logging.debug("Search term %s" % term)
success = False
ids = None
if not term:
continue
while not success:
try:
search_handle = Entrez.esearch(db=db, retmax=retmax, term=term)
rec = Entrez.read(search_handle)
success = True
ids = rec["IdList"]
except (HTTPError, URLError,
RuntimeError, KeyError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
if not ids:
logging.error("term {0} not found".format(term))
continue
assert ids
nids = len(ids)
if nids > 1:
logging.debug("A total of {0} results found.".format(nids))
if batchsize != 1:
logging.debug("Use a batch size of {0}.".format(batchsize))
ids = list(grouper(ids, batchsize))
for id in ids:
id = [x for x in id if x]
size = len(id)
id = ",".join(id)
success = False
while not success:
try:
fetch_handle = Entrez.efetch(db=db, id=id, rettype=rettype,
email=email)
success = True
except (HTTPError, URLError,
RuntimeError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
yield id, size, term, fetch_handle
|
def batch_entrez(list_of_terms, db="nuccore", retmax=1, rettype="fasta",
batchsize=1, email=myEmail)
|
Retrieve multiple rather than a single record
| 2.501748 | 2.5652 | 0.975265 |
p = OptionParser(ensembl.__doc__)
p.add_option("--version", default="75",
help="Ensembl version [default: %default]")
opts, args = p.parse_args(args)
version = opts.version
url = "ftp://ftp.ensembl.org/pub/release-{0}/".format(version)
fasta_url = url + "fasta/"
valid_species = [x for x in ls_ftp(fasta_url) if "." not in x]
doc = "\n".join((ensembl.__doc__, tile(valid_species)))
p.set_usage(doc)
if len(args) != 1:
sys.exit(not p.print_help())
species, = args
species = species.split(",")
for s in species:
download_species_ensembl(s, valid_species, url)
|
def ensembl(args)
|
%prog ensembl species
Retrieve genomes and annotations from ensembl FTP. Available species
listed below. Use comma to give a list of species to download. For example:
$ %prog ensembl danio_rerio,gasterosteus_aculeatus
| 3.119105 | 3.220025 | 0.968659 |
from jcvi.apps.biomart import GlobusXMLParser
p = OptionParser(phytozome10.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
species, = args
fp = open("get-directory.html")
g = GlobusXMLParser(fp)
g.parse_folder()
|
def phytozome10(args)
|
%prog phytozome species
Retrieve genomes and annotations from phytozome using Globus API. Available
species listed below. Use comma to give a list of species to download. For
example:
$ %prog phytozome Athaliana,Vvinifera,Osativa,Sbicolor,Slycopersicum
| 4.564952 | 4.236294 | 1.077581 |
from jcvi.formats.gff import bed as gff_bed
from jcvi.formats.fasta import format as fasta_format
p = OptionParser(phytozome.__doc__)
p.add_option("--version", default="9.0",
help="Phytozome version [default: %default]")
p.add_option("--assembly", default=False, action="store_true",
help="Download assembly [default: %default]")
p.add_option("--format", default=False, action="store_true",
help="Format to CDS and BED for synteny inference")
opts, args = p.parse_args(args)
url = "ftp://ftp.jgi-psf.org/pub/compgen/phytozome/v{0}/".\
format(opts.version)
valid_species = [x for x in ls_ftp(url) if "." not in x]
doc = "\n".join((phytozome.__doc__, tile(valid_species)))
p.set_usage(doc)
if len(args) != 1:
sys.exit(not p.print_help())
species, = args
if species == "all":
species = ",".join(valid_species)
species = species.split(",")
use_IDs = set()
# We have to watch out when the gene names and mRNA names mismatch, in which
# case we just extract the mRNA names
use_mRNAs = set(["Cclementina", "Creinhardtii", "Csinensis", "Fvesca",
"Lusitatissimum", "Mesculenta", "Mguttatus", "Ppersica",
"Pvirgatum", "Rcommunis", "Sitalica", "Tcacao",
"Thalophila", "Vcarteri", "Vvinifera", "Zmays"])
for s in species:
gff, fa = download_species_phytozome(s, valid_species, url,
assembly=opts.assembly)
key = "ID" if s in use_IDs else "Name"
ttype = "mRNA" if s in use_mRNAs else "gene"
if not opts.format:
continue
bedfile = s + ".bed"
cdsfile = s + ".cds"
gff_bed([gff, "--type={}".format(ttype), "--key={}".format(key),
"-o", bedfile])
fasta_format([fa, cdsfile, r"--sep=|"])
|
def phytozome(args)
|
%prog phytozome species
Retrieve genomes and annotations from phytozome FTP. Available species
listed below. Use comma to give a list of species to download. For example:
$ %prog phytozome Athaliana,Vvinifera,Osativa,Sbicolor,Slycopersicum
| 4.914038 | 4.789932 | 1.02591 |
f = list(SeqIO.parse(fastafile, "fasta"))
if len(f) > 1:
logging.debug("{0} records found in {1}, using the first one".
format(len(f), fastafile))
return f[0]
|
def get_first_rec(fastafile)
|
Returns the first record in the fastafile
| 2.999074 | 2.957284 | 1.014131 |
p = OptionParser(bisect.__doc__)
p.set_email()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
acc, fastafile = args
arec = get_first_rec(fastafile)
valid = None
for i in range(1, 100):
term = "%s.%d" % (acc, i)
try:
query = list(batch_entrez([term], email=opts.email))
except AssertionError as e:
logging.debug("no records found for %s. terminating." % term)
return
id, term, handle = query[0]
brec = next(SeqIO.parse(handle, "fasta"))
match = print_first_difference(arec, brec, ignore_case=True,
ignore_N=True, rc=True)
if match:
valid = term
break
if valid:
print()
print(green("%s matches the sequence in `%s`" % (valid, fastafile)))
|
def bisect(args)
|
%prog bisect acc accession.fasta
determine the version of the accession by querying entrez, based on a fasta file.
This proceeds by a sequential search from xxxx.1 to the latest record.
| 5.015296 | 4.372916 | 1.1469 |
p = OptionParser(sra.__doc__)
p.add_option("--nogzip", dest="nogzip",
default=False, action="store_true",
help="Do not gzip the FASTQ generated by fastq-dump")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
term, = args
if op.isfile(term):
terms = [x.strip() for x in open(term)]
else:
terms = [term]
for term in terms:
srafile = download_srr_term(term)
pf = srafile.split(".")[0]
mkdir(pf)
_opts = [srafile, "--paired", "--outdir={0}".format(pf)]
if not opts.nogzip:
_opts.append("--compress=gzip")
fromsra(_opts)
|
def sra(args)
|
%prog sra [term|term.ids]
Given an SRA run ID, fetch the corresponding .sra file from the sra-instant FTP.
The term can also be a file containing list of SRR ids, one per line.
Once downloaded, the SRA file is processed through `fastq-dump` to produce
FASTQ formatted sequence files, which are gzipped by default.
| 3.293507 | 3.023265 | 1.089387 |
for i in dir(item):
try:
member = str(getattr(item, i))
if maxchar and len(member) > maxchar:
member = member[:maxchar] + "..."
except:
member = "[ERROR]"
print("{}: {}".format(i, member), file=sys.stderr)
|
def inspect(item, maxchar=80)
|
Inspect the attributes of an item.
| 2.64877 | 2.671629 | 0.991444 |
import time
def timed(*args, **kw):
ts = time.time()
result = func(*args, **kw)
te = time.time()
msg = "{0}{1} {2:.2f}s".format(func.__name__, args, te - ts)
logging.debug(msg)
return result
return timed
|
def timeit(func)
|
<http://www.zopyx.com/blog/a-python-decorator-for-measuring-the-execution-time-of-methods>
| 2.245885 | 2.038965 | 1.101483 |
from jcvi.apps.base import need_update, listify
infile = "infile"
outfile = "outfile"
def wrapper(*args, **kwargs):
assert outfile in kwargs, \
"You need to specify `outfile=` on function call"
if infile in kwargs:
infilename = listify(kwargs[infile])
for x in infilename:
assert op.exists(x), \
"The specified infile `{0}` does not exist".format(x)
outfilename = kwargs[outfile]
if need_update(infilename, outfilename):
return func(*args, **kwargs)
else:
msg = "File `{0}` exists. Computation skipped." \
.format(outfilename)
logging.debug(msg)
outfilename = listify(outfilename)
for x in outfilename:
assert op.exists(x), \
"Something went wrong, `{0}` not found".format(x)
return outfilename
return wrapper
|
def depends(func)
|
Decorator to perform check on infile and outfile. When infile is not present, issue
warning, and when outfile is present, skip function calls.
| 3.349925 | 3.198536 | 1.047331 |
for index in reversed(xrange(len(sequence))):
yield index, sequence[index]
|
def enumerate_reversed(sequence)
|
Perform reverse enumeration, returning an iterator with decrementing
index/position values
Source: http://stackoverflow.com/questions/529424/traverse-a-list-in-reverse-order-in-python
| 2.867743 | 3.735332 | 0.767735 |
_a, _b = a, b
pct = "{0:.{1}f}%".format(a * 100. / b, precision)
a, b = thousands(a), thousands(b)
if mode == 0:
return "{0} of {1} ({2})".format(a, b, pct)
elif mode == 1:
return "{0} ({1})".format(a, pct)
elif mode == 2:
return _a * 100. / _b
return pct
|
def percentage(a, b, precision=1, mode=0)
|
>>> percentage(100, 200)
'100 of 200 (50.0%)'
| 2.448844 | 2.282147 | 1.073044 |
import locale
try:
locale.setlocale(locale.LC_ALL, "en_US.utf8")
except Exception:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
finally:
s = '%d' % x
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
return locale.format('%d', x, True)
|
def thousands(x)
|
>>> thousands(12345)
'12,345'
| 2.312011 | 2.288479 | 1.010283 |
'''Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
Credit: <http://diveintopython3.org/your-first-python-program.html>
>>> print(human_size(1000000000000, True))
931.3GiB
>>> print(human_size(1000000000000))
1.0Tb
>>> print(human_size(300))
300.0
'''
if size < 0:
raise ValueError('number must be non-negative')
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES[multiple]:
if target:
if suffix == target:
break
size /= float(multiple)
else:
if size >= multiple:
size /= float(multiple)
else:
break
return '{0:.{1}f}{2}'.format(size, precision, suffix)
|
def human_size(size, a_kilobyte_is_1024_bytes=False, precision=1, target=None)
|
Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
Credit: <http://diveintopython3.org/your-first-python-program.html>
>>> print(human_size(1000000000000, True))
931.3GiB
>>> print(human_size(1000000000000))
1.0Tb
>>> print(human_size(300))
300.0
| 2.560473 | 1.535997 | 1.666977 |
slen = str(bp)
tlen = slen[0:2] if len(slen) > 1 else slen[0]
precision = len(slen) - 2 # how many zeros we need to pad?
bp_len_scaled = int(tlen) # scale bp_len to range (0, 100)
tick_diffs = [(x, abs(bp_len_scaled / x - optimal)) for x in [1, 2, 5, 10]]
best_stride, best_tick_diff = min(tick_diffs, key=lambda x: x[1])
while precision > 0:
best_stride *= 10
precision -= 1
return best_stride
|
def autoscale(bp, optimal=6)
|
>>> autoscale(150000000)
20000000
>>> autoscale(97352632)
10000000
| 4.397855 | 4.55964 | 0.964518 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.