code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
p = OptionParser(clean.__doc__)
p.add_option("--fancy", default=False, action="store_true",
help="Pretty print the sequence [default: %default]")
p.add_option("--canonical", default=False, action="store_true",
help="Use only acgtnACGTN [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fw = must_open(opts.outfile, "w")
if opts.fancy:
for header, seq in iter_clean_fasta(fastafile):
print(">" + header, file=fw)
fancyprint(fw, seq)
return 0
iterator = iter_canonical_fasta if opts.canonical else iter_clean_fasta
for header, seq in iterator(fastafile):
seq = Seq(seq)
s = SeqRecord(seq, id=header, description="")
SeqIO.write([s], fw, "fasta")
|
def clean(args)
|
%prog clean fastafile
Remove irregular chars in FASTA seqs.
| 2.902768 | 2.699877 | 1.075148 |
transl_tables = [str(x) for x in xrange(1,25)]
p = OptionParser(translate.__doc__)
p.add_option("--ids", default=False, action="store_true",
help="Create .ids file with the complete/partial/gaps "
"label [default: %default]")
p.add_option("--longest", default=False, action="store_true",
help="Find the longest ORF from each input CDS [default: %default]")
p.add_option("--table", default=1, choices=transl_tables,
help="Specify translation table to use [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
cdsfasta, = args
if opts.longest:
cdsfasta = longestorf([cdsfasta])
f = Fasta(cdsfasta, lazy=True)
outfile = opts.outfile
fw = must_open(outfile, "w")
if opts.ids:
idsfile = cdsfasta.rsplit(".", 1)[0] + ".ids"
ids = open(idsfile, "w")
else:
ids = None
five_prime_missing = three_prime_missing = 0
contain_ns = complete = cannot_translate = total = 0
for name, rec in f.iteritems_ordered():
cds = rec.seq
cdslen = len(cds)
peplen = cdslen / 3
total += 1
# Try all three frames
pep = ""
for i in xrange(3):
newcds = cds[i: i + peplen * 3]
newpep = newcds.translate(table=opts.table)
if len(newpep.split("*")[0]) > len(pep.split("*")[0]):
pep = newpep
labels = []
if "*" in pep.rstrip("*"):
logging.error("{0} cannot translate".format(name))
cannot_translate += 1
labels.append("cannot_translate")
contains_start = pep.startswith("M")
contains_stop = pep.endswith("*")
contains_ns = "X" in pep
start_ns = pep.startswith("X")
end_ns = pep.endswith("X")
if not contains_start:
five_prime_missing += 1
labels.append("five_prime_missing")
if not contains_stop:
three_prime_missing += 1
labels.append("three_prime_missing")
if contains_ns:
contain_ns += 1
labels.append("contain_ns")
if contains_start and contains_stop:
complete += 1
labels.append("complete")
if start_ns:
labels.append("start_ns")
if end_ns:
labels.append("end_ns")
if ids:
print("\t".join((name, ",".join(labels))), file=ids)
peprec = SeqRecord(pep, id=name, description=rec.description)
SeqIO.write([peprec], fw, "fasta")
fw.flush()
print("Complete gene models: {0}".\
format(percentage(complete, total)), file=sys.stderr)
print("Missing 5`-end: {0}".\
format(percentage(five_prime_missing, total)), file=sys.stderr)
print("Missing 3`-end: {0}".\
format(percentage(three_prime_missing, total)), file=sys.stderr)
print("Contain Ns: {0}".\
format(percentage(contain_ns, total)), file=sys.stderr)
if cannot_translate:
print("Cannot translate: {0}".\
format(percentage(cannot_translate, total)), file=sys.stderr)
fw.close()
return cdsfasta, outfile
|
def translate(args)
|
%prog translate cdsfasta
Translate CDS to proteins. The tricky thing is that sometimes the CDS
represents a partial gene, therefore disrupting the frame of the protein.
Check all three frames to get a valid translation.
| 2.670033 | 2.524506 | 1.057646 |
p = OptionParser(filter.__doc__)
p.add_option("--less", default=False, action="store_true",
help="filter the sizes < certain cutoff [default: >=]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, cutoff = args
try:
cutoff = int(cutoff)
except ValueError:
sys.exit(not p.print_help())
f = Fasta(fastafile, lazy=True)
fw = must_open(opts.outfile, "w")
for name, rec in f.iteritems_ordered():
if opts.less and len(rec) >= cutoff:
continue
if (not opts.less) and len(rec) < cutoff:
continue
SeqIO.write([rec], fw, "fasta")
fw.flush()
return fw.name
|
def filter(args)
|
%prog filter fastafile 100
Filter the FASTA file to contain records with size >= or <= certain cutoff.
| 2.939259 | 2.433521 | 1.207822 |
from jcvi.formats.base import longest_unique_prefix
p = OptionParser(pool.__doc__)
p.add_option("--sep", default=".", help="Separator between prefix and name")
p.add_option("--sequential", default=False, action="store_true",
help="Add sequential IDs")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
for fastafile in args:
pf = longest_unique_prefix(fastafile, args)
print(fastafile, "=>", pf, file=sys.stderr)
prefixopt = "--prefix={0}{1}".format(pf, opts.sep)
format_args = [fastafile, "stdout", prefixopt]
if opts.sequential:
format_args += ["--sequential=replace"]
format(format_args)
|
def pool(args)
|
%prog pool fastafiles > pool.fasta
Pool a bunch of FASTA files, and add prefix to each record based on
filenames. File names are simplified to longest unique prefix to avoid
collisions after getting shortened.
| 3.223734 | 2.919873 | 1.104067 |
p = OptionParser(ids.__doc__)
p.add_option("--until", default=None,
help="Truncate the name and description at words [default: %default]")
p.add_option("--description", default=False, action="store_true",
help="Generate a second column with description [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
until = opts.until
fw = must_open(opts.outfile, "w")
for row in must_open(args):
if row[0] == ">":
row = row[1:].rstrip()
if until:
row = row.split(until)[0]
atoms = row.split(None, 1)
if opts.description:
outrow = "\t".join(atoms)
else:
outrow = atoms[0]
print(outrow, file=fw)
fw.close()
|
def ids(args)
|
%prog ids fastafiles
Generate the FASTA headers without the '>'.
| 3.043873 | 2.724559 | 1.117199 |
p = OptionParser(sort.__doc__)
p.add_option("--sizes", default=False, action="store_true",
help="Sort by decreasing size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
fastafile, = args
sortedfastafile = fastafile.rsplit(".", 1)[0] + ".sorted.fasta"
f = Fasta(fastafile, index=False)
fw = must_open(sortedfastafile, "w")
if opts.sizes:
# Sort by decreasing size
sortlist = sorted(f.itersizes(), key=lambda x: (-x[1], x[0]))
logging.debug("Sort by size: max: {0}, min: {1}".\
format(sortlist[0], sortlist[-1]))
sortlist = [x for x, s in sortlist]
else:
sortlist = sorted(f.iterkeys())
for key in sortlist:
rec = f[key]
SeqIO.write([rec], fw, "fasta")
logging.debug("Sorted file written to `{0}`.".format(sortedfastafile))
fw.close()
return sortedfastafile
|
def sort(args)
|
%prog sort fastafile
Sort a list of sequences and output with sorted IDs, etc.
| 2.692628 | 2.663733 | 1.010847 |
from jcvi.formats.agp import OO, Phases, build
from jcvi.formats.sizes import Sizes
p = OptionParser(join.__doc__)
p.add_option("--newid", default=None,
help="New sequence ID [default: `%default`]")
p.add_option("--gapsize", default=100, type="int",
help="Number of N's in between the sequences [default: %default]")
p.add_option("--gaptype", default="contig",
help="Gap type to use in the AGP file [default: %default]")
p.add_option("--evidence", default="",
help="Linkage evidence to report in the AGP file [default: %default]")
p.add_option("--oo", help="Use .oo file generated by bambus [default: %default]")
opts, args = p.parse_args(args)
nargs = len(args)
if nargs not in (1, 2):
sys.exit(not p.print_help())
if nargs == 2:
fastafile, phasefile = args
phases = DictFile(phasefile)
phases = dict((a, Phases[int(b)]) for a, b in phases.items())
else:
fastafile, = args
phases = {}
sizes = Sizes(fastafile)
prefix = fastafile.rsplit(".", 1)[0]
agpfile = prefix + ".agp"
newid = opts.newid
oo = opts.oo
o = OO(oo, sizes.mapping)
if oo:
seen = o.contigs
# The leftover contigs not in the oo file
logging.debug("A total of {0} contigs ({1} in `{2}`)".\
format(len(sizes), len(seen), oo))
for ctg, size in sizes.iter_sizes():
if ctg in seen:
continue
o.add(ctg, ctg, size)
else:
if newid:
for ctg, size in sizes.iter_sizes():
o.add(newid, ctg, size)
else:
for scaffold_number, (ctg, size) in enumerate(sizes.iter_sizes()):
object_id = "scaffold{0:03d}".format(scaffold_number + 1)
o.add(object_id, ctg, size)
fw = open(agpfile, "w")
o.write_AGP(fw, gapsize=opts.gapsize, gaptype=opts.gaptype,
evidence=opts.evidence, phases=phases)
fw.close()
joinedfastafile = prefix + ".joined.fasta"
build([agpfile, fastafile, joinedfastafile])
return joinedfastafile
|
def join(args)
|
%prog join fastafile [phasefile]
Make AGP file for a bunch of sequences, and add gaps between, and then build
the joined fastafile. This is useful by itself, but with --oo option this
can convert the .oo (BAMBUS output) into AGP and a joined fasta.
Phasefile is optional, but must contain two columns - BAC and phase (0, 1, 2, 3).
| 3.154919 | 2.900826 | 1.087593 |
from jcvi.utils.natsort import natsort_key
p = OptionParser(summary.__doc__)
p.add_option("--suffix", default="Mb",
help="make the base pair counts human readable [default: %default]")
p.add_option("--ids",
help="write the ids that have >= 50% N's [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
idsfile = opts.ids
header = "Seqid Real N's Total %_real".split()
if idsfile:
idsfile = open(idsfile, "w")
nids = 0
data = []
for fastafile in args:
for rec in SeqIO.parse(fastafile, "fasta"):
seqlen = len(rec)
nns = rec.seq.count('n') + rec.seq.count('N')
reals = seqlen - nns
pct = reals * 100. / seqlen
pctreal = "{0:.1f}%".format(pct)
if idsfile and pct < 50:
nids += 1
print(rec.id, file=idsfile)
data.append((rec.id, reals, nns, seqlen, pctreal))
data.sort(key=natsort_key)
ids, reals, nns, seqlen, pctreal = zip(*data)
reals = sum(reals)
nns = sum(nns)
seqlen = sum(seqlen)
pctreal = "{0:.1f}%".format(reals * 100. / seqlen)
data.append(("Total", reals, nns, seqlen, pctreal))
write_csv(header, data, sep=" ", filename=opts.outfile, thousands=True)
if idsfile:
logging.debug("A total of {0} ids >= 50% N's written to {1}.".\
format(nids, idsfile.name))
idsfile.close()
return reals, nns, seqlen
|
def summary(args)
|
%prog summary *.fasta
Report real bases and N's in fastafiles in a tabular report
| 2.964792 | 2.76337 | 1.07289 |
plus_match = _print_first_difference(arec, brec, ignore_case=ignore_case,
ignore_N=ignore_N, report_match=report_match)
if rc and not plus_match:
logging.debug("trying reverse complement of %s" % brec.id)
brec.seq = brec.seq.reverse_complement()
minus_match = _print_first_difference(arec, brec,
ignore_case=ignore_case, ignore_N=ignore_N,
report_match=report_match)
return minus_match
else:
return plus_match
|
def print_first_difference(arec, brec, ignore_case=False, ignore_N=False,
rc=False, report_match=True)
|
Returns the first different nucleotide in two sequence comparisons
runs both Plus and Minus strand
| 2.169094 | 2.192735 | 0.989219 |
aseq, bseq = arec.seq, brec.seq
asize, bsize = len(aseq), len(bseq)
matched = True
for i, (a, b) in enumerate(zip_longest(aseq, bseq)):
if ignore_case and None not in (a, b):
a, b = a.upper(), b.upper()
if ignore_N and ('N' in (a, b) or 'X' in (a, b)):
continue
if a != b:
matched = False
break
if i + 1 == asize and matched:
if report_match:
print(green("Two sequences match"))
match = True
else:
print(red("Two sequences do not match"))
snippet_size = 20 # show the context of the difference
print(red("Sequence start to differ at position %d:" % (i + 1)))
begin = max(i - snippet_size, 0)
aend = min(i + snippet_size, asize)
bend = min(i + snippet_size, bsize)
print(red(aseq[begin:i] + "|" + aseq[i:aend]))
print(red(bseq[begin:i] + "|" + bseq[i:bend]))
match = False
return match
|
def _print_first_difference(arec, brec, ignore_case=False, ignore_N=False,
report_match=True)
|
Returns the first different nucleotide in two sequence comparisons
| 2.692068 | 2.634092 | 1.02201 |
from jcvi.utils.table import banner
p = OptionParser(diff.__doc__)
p.add_option("--ignore_case", default=False, action="store_true",
help="ignore case when comparing sequences [default: %default]")
p.add_option("--ignore_N", default=False, action="store_true",
help="ignore N and X's when comparing sequences [default: %default]")
p.add_option("--ignore_stop", default=False, action="store_true",
help="ignore stop codon when comparing sequences [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="also consider reverse complement [default: %default]")
p.add_option("--quiet", default=False, action="store_true",
help="don't output comparison details [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
afasta, bfasta = args
afastan = len(Fasta(afasta))
bfastan = len(Fasta(bfasta))
if afastan == bfastan:
print(green("Two sets contain the same number of sequences ({0}, {1})".\
format(afastan, bfastan)))
else:
print(red("Two sets contain different number of sequences ({0}, {1})".\
format(afastan, bfastan)))
ah = SeqIO.parse(afasta, "fasta")
bh = SeqIO.parse(bfasta, "fasta")
problem_ids = []
for arec, brec in zip(ah, bh):
if opts.ignore_stop:
arec.seq = arec.seq.rstrip("*")
brec.seq = brec.seq.rstrip("*")
asize, bsize = len(arec), len(brec)
if not opts.quiet:
print(banner(str(arec), [str(brec)]))
if asize == bsize:
print(green("Two sequence size match (%d)" % asize))
else:
print(red("Two sequence size do not match (%d, %d)" % (asize, bsize)))
# print out the first place the two sequences diff
fd = print_first_difference(arec, brec, ignore_case=opts.ignore_case,
ignore_N=opts.ignore_N, rc=opts.rc, report_match=not opts.quiet)
if not fd:
logging.error("Two sets of sequences differ at `{0}`".format(arec.id))
problem_ids.append("\t".join(str(x) for x in (arec.id, asize, bsize,
abs(asize - bsize))))
if problem_ids:
print(red("A total of {0} records mismatch.".format(len(problem_ids))))
fw = must_open("Problems.ids", "w")
print("\n".join(problem_ids), file=fw)
|
def diff(args)
|
%prog diff afasta bfasta
print out whether the records in two fasta files are the same
| 2.509261 | 2.453799 | 1.022602 |
if ignore_stop:
seq = seq.rstrip("*")
if ignore_case:
seq = seq.upper()
if ignore_N:
if not all(c.upper() in 'ATGCN' for c in seq):
seq = re.sub('X', '', seq)
else:
seq = re.sub('N', '', seq)
if checksum == "MD5":
hashed = md5(seq).hexdigest()
elif checksum == "GCG":
hashed = seguid(seq)
return hashed
|
def hash_fasta(seq, ignore_case=False, ignore_N=False, ignore_stop=False, checksum="MD5")
|
Generates checksum of input sequence element
| 3.083457 | 3.037879 | 1.015003 |
from jcvi.utils.cbook import AutoVivification
allowed_checksum = ["MD5", "GCG"]
p = OptionParser(identical.__doc__)
p.add_option("--ignore_case", default=False, action="store_true",
help="ignore case when comparing sequences [default: %default]")
p.add_option("--ignore_N", default=False, action="store_true",
help="ignore N and X's when comparing sequences [default: %default]")
p.add_option("--ignore_stop", default=False, action="store_true",
help="ignore stop codon when comparing sequences [default: %default]")
p.add_option("--output_uniq", default=False, action="store_true",
help="output uniq sequences in FASTA format" + \
" [default: %default]")
p.add_option("--checksum", default="MD5", choices=allowed_checksum,
help="specify checksum method [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
d = AutoVivification()
files = []
for fastafile in args:
f = Fasta(fastafile)
pf = fastafile.rsplit(".", 1)[0]
files.append(pf)
logging.debug("Hashing individual elements of {0}".format(fastafile))
for name, rec in f.iteritems_ordered():
seq = re.sub(' ', '', str(rec.seq))
hashed = hash_fasta(seq, ignore_case=opts.ignore_case, ignore_N=opts.ignore_N, \
ignore_stop=opts.ignore_stop, checksum=opts.checksum)
if not d[hashed]:
d[hashed]['seq'] = seq
d[hashed]['count'] = 0
if not d[hashed]['names'][pf]:
d[hashed]['names'][pf] = set()
d[hashed]['names'][pf].add(name)
fw = must_open(opts.outfile, "w")
if opts.output_uniq:
uniqfile = "_".join(files) + ".uniq.fasta"
uniqfw = must_open(uniqfile, "w")
header = "\t".join(str(x) for x in (args))
print("\t".join(str(x) for x in ("", header)), file=fw)
for idx, hashed in enumerate(d.keys()):
line = []
line.append("t{0}".format(idx))
for fastafile in files:
if fastafile in d[hashed]['names'].keys():
line.append(",".join(d[hashed]['names'][fastafile]))
if opts.output_uniq:
d[hashed]['count'] += len(d[hashed]['names'][fastafile])
else:
line.append("na")
print("\t".join(line), file=fw)
if opts.output_uniq:
seqid = "\t".join(str(x) for x in ("t{0}".format(idx), d[hashed]['count']))
rec = SeqRecord(Seq(d[hashed]['seq']), id=seqid, description="")
SeqIO.write([rec], uniqfw, "fasta")
fw.close()
if opts.output_uniq:
logging.debug("Uniq sequences written to `{0}`".format(uniqfile))
uniqfw.close()
|
def identical(args)
|
%prog identical *.fasta
Given multiple fasta files, find all the exactly identical records
based on the computed md5 hexdigest or GCG checksum of each sequence.
Output is an N + 1 column file (where N = number of input fasta files).
If there are duplicates within a given fasta file, they will all be
listed out in the same row separated by a comma.
Example output:
---------------------------
tta1.fsa tta2.fsa
t0 2131 na
t1 3420 na
t2 3836,3847 852
t3 148 890
t4 584 614
t5 623 684
t6 1281 470
t7 3367 na
| 2.301177 | 2.226009 | 1.033768 |
qualfile1 = fastafile.rsplit(".", 1)[0] + suffix
qualfile2 = fastafile + suffix
if check:
if op.exists(qualfile1):
logging.debug("qual file `{0}` found".format(qualfile1))
return qualfile1
elif op.exists(qualfile2):
logging.debug("qual file `{0}` found".format(qualfile2))
return qualfile2
else:
logging.warning("qual file not found")
return None
return qualfile1
|
def get_qual(fastafile, suffix=QUALSUFFIX, check=True)
|
Check if current folder contains a qual file associated with the fastafile
| 2.129154 | 2.068414 | 1.029365 |
p = OptionParser(some.__doc__)
p.add_option("--exclude", default=False, action="store_true",
help="Output sequences not in the list file [default: %default]")
p.add_option("--uniprot", default=False, action="store_true",
help="Header is from uniprot [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(p.print_help())
fastafile, listfile, outfastafile = args
outfastahandle = must_open(outfastafile, "w")
qualfile = get_qual(fastafile)
names = set(x.strip() for x in open(listfile))
if qualfile:
outqualfile = outfastafile + ".qual"
outqualhandle = open(outqualfile, "w")
parser = iter_fasta_qual(fastafile, qualfile)
else:
parser = SeqIO.parse(fastafile, "fasta")
num_records = 0
for rec in parser:
name = rec.id
if opts.uniprot:
name = name.split("|")[-1]
if opts.exclude:
if name in names:
continue
else:
if name not in names:
continue
SeqIO.write([rec], outfastahandle, "fasta")
if qualfile:
SeqIO.write([rec], outqualhandle, "qual")
num_records += 1
logging.debug("A total of %d records written to `%s`" % \
(num_records, outfastafile))
|
def some(args)
|
%prog some fastafile listfile outfastafile
generate a subset of fastafile, based on a list
| 2.547184 | 2.406552 | 1.058437 |
from jcvi.formats.fastq import FastqLite
p = OptionParser(fastq.__doc__)
p.add_option("--qv", type="int",
help="Use generic qv value [dafault: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fastqfile = fastafile.rsplit(".", 1)[0] + ".fastq"
fastqhandle = open(fastqfile, "w")
num_records = 0
if opts.qv is not None:
qv = chr(ord('!') + opts.qv)
logging.debug("QV char '{0}' ({1})".format(qv, opts.qv))
else:
qv = None
if qv:
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
r = FastqLite("@" + name, str(rec.seq).upper(), qv * len(rec.seq))
print(r, file=fastqhandle)
num_records += 1
else:
qualfile = get_qual(fastafile)
for rec in iter_fasta_qual(fastafile, qualfile):
SeqIO.write([rec], fastqhandle, "fastq")
num_records += 1
fastqhandle.close()
logging.debug("A total of %d records written to `%s`" % \
(num_records, fastqfile))
|
def fastq(args)
|
%prog fastq fastafile
Generate fastqfile by combining fastafile and fastafile.qual.
Also check --qv option to use a default qv score.
| 3.069477 | 2.871927 | 1.068786 |
p = OptionParser(pair.__doc__)
p.set_sep(sep=None, help="Separator in name to reduce to clone id" +\
"e.g. GFNQ33242/1 use /, BOT01-2453H.b1 use .")
p.add_option("-m", dest="matepairs", default=False, action="store_true",
help="generate .matepairs file [often used for Celera Assembler]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
fastafile, = args
qualfile = get_qual(fastafile)
prefix = fastafile.rsplit(".", 1)[0]
pairsfile = prefix + ".pairs.fasta"
fragsfile = prefix + ".frags.fasta"
pairsfw = open(pairsfile, "w")
fragsfw = open(fragsfile, "w")
#TODO: need a class to handle coupled fasta and qual iterating and indexing
if opts.matepairs:
matepairsfile = prefix + ".matepairs"
matepairsfw = open(matepairsfile, "w")
if qualfile:
pairsqualfile = pairsfile + ".qual"
pairsqualhandle = open(pairsqualfile, "w")
fragsqualfile = fragsfile + ".qual"
fragsqualhandle = open(fragsqualfile, "w")
f = Fasta(fastafile)
if qualfile:
q = SeqIO.index(qualfile, "qual")
all_keys = list(f.keys())
all_keys.sort()
sep = opts.sep
if sep:
key_fun = lambda x: x.split(sep, 1)[0]
else:
key_fun = lambda x: x[:-1]
for key, variants in groupby(all_keys, key=key_fun):
variants = list(variants)
paired = (len(variants) == 2)
if paired and opts.matepairs:
print("\t".join(("%s/1" % key, "%s/2" % key)), file=matepairsfw)
fw = pairsfw if paired else fragsfw
if qualfile:
qualfw = pairsqualhandle if paired else fragsqualhandle
for i, var in enumerate(variants):
rec = f[var]
if qualfile:
recqual = q[var]
newid = "%s/%d" % (key, i + 1)
rec.id = newid
rec.description = ""
SeqIO.write([rec], fw, "fasta")
if qualfile:
recqual.id = newid
recqual.description = ""
SeqIO.write([recqual], qualfw, "qual")
logging.debug("sequences written to `%s` and `%s`" % \
(pairsfile, fragsfile))
if opts.matepairs:
logging.debug("mates written to `%s`" % matepairsfile)
|
def pair(args)
|
%prog pair fastafile
Generate .pairs.fasta and .fragments.fasta by matching records
into the pairs and the rest go to fragments.
| 3.192271 | 3.104645 | 1.028224 |
from jcvi.utils.iter import pairwise
p = OptionParser(pairinplace.__doc__)
p.add_option("-r", dest="rclip", default=1, type="int",
help="pair ID is derived from rstrip N chars [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
base = op.basename(fastafile).split(".")[0]
frags = base + ".frags.fasta"
pairs = base + ".pairs.fasta"
if fastafile.endswith(".gz"):
frags += ".gz"
pairs += ".gz"
fragsfw = must_open(frags, "w")
pairsfw = must_open(pairs, "w")
N = opts.rclip
strip_name = lambda x: x[:-N] if N else str
skipflag = False # controls the iterator skip
fastaiter = SeqIO.parse(fastafile, "fasta")
for a, b in pairwise(fastaiter):
aid, bid = [strip_name(x) for x in (a.id, b.id)]
if skipflag:
skipflag = False
continue
if aid == bid:
SeqIO.write([a, b], pairsfw, "fasta")
skipflag = True
else:
SeqIO.write([a], fragsfw, "fasta")
# don't forget the last one, when b is None
if not skipflag:
SeqIO.write([a], fragsfw, "fasta")
logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags))
|
def pairinplace(args)
|
%prog pairinplace bulk.fasta
Pair up the records in bulk.fasta by comparing the names for adjacent
records. If they match, print to bulk.pairs.fasta, else print to
bulk.frags.fasta.
| 3.417743 | 3.176881 | 1.075817 |
p = OptionParser(extract.__doc__)
p.add_option('--newname', help="Use this new name instead")
p.add_option('--include', default=False, action="store_true",
help="search description line for match [default: %default]")
p.add_option('--exclude', default=False, action="store_true",
help="exclude description that matches [default: %default]")
p.add_option('--idonly', default=False, action="store_true",
help="Only search identifier")
p.add_option('--bed', default=None,
help="path to bed file to guide extraction by matching seqname")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) == 2:
fastafile, query = args
elif len(args) == 1 and opts.bed:
fastafile, = args
bedaccns = Bed(opts.bed).accns
else:
sys.exit(p.print_help())
if opts.bed:
fw = must_open(opts.outfile, "w")
f = Fasta(fastafile)
for accn in bedaccns:
try:
rec = f[accn]
except:
logging.error("{0} not found in {1}".format(accn, fastafile))
continue
SeqIO.write([rec], fw, "fasta")
return fw.name
atoms = query.split(":")
key = atoms[0]
assert len(atoms) <= 3, "cannot have more than two ':' in your query"
pos = ""
if len(atoms) in (2, 3):
pos = atoms[1]
strand = "+"
if len(atoms) == 3:
strand = atoms[2]
assert strand in ('+', '-'), "strand must be either '+' or '-'"
feature = dict(chr=key)
if "-" in pos:
start, stop = pos.split("-")
try:
start, stop = int(start), int(stop)
except ValueError as e:
logging.error(e)
sys.exit(p.print_help())
feature["start"] = start
feature["stop"] = stop
else:
start, stop = None, None
assert start < stop or None in (start, stop), \
"start must be < stop, you have ({0}, {1})".format(start, stop)
feature["strand"] = strand
include, exclude = opts.include, opts.exclude
# conflicting options, cannot be true at the same time
assert not (include and exclude), "--include and --exclude cannot be "\
"on at the same time"
fw = must_open(opts.outfile, "w")
if include or exclude:
f = Fasta(fastafile, lazy=True)
fi = f.iteritems_ordered if opts.idonly else f.iterdescriptions_ordered
for k, rec in fi():
if include and key not in k:
continue
if exclude and key in k:
continue
seq = Fasta.subseq(rec, start, stop, strand)
newid = rec.id
if start is not None:
newid += ":{0}-{1}:{2}".format(start, stop, strand)
rec = SeqRecord(seq, id=newid, description=k)
SeqIO.write([rec], fw, "fasta")
else:
f = Fasta(fastafile)
try:
seq = f.sequence(feature, asstring=False)
except AssertionError as e:
logging.error(e)
return
newid = opts.newname or query
rec = SeqRecord(seq, id=newid, description="")
SeqIO.write([rec], fw, "fasta")
return fw.name
|
def extract(args)
|
%prog extract fasta query
extract query out of fasta file, query needs to be in the form of
"seqname", or "seqname:start-stop", or "seqname:start-stop:-"
| 2.849163 | 2.717394 | 1.048491 |
seen = set()
for rec in SeqIO.parse(fastafile, "fasta"):
name = str(rec.seq) if seq else rec.id
if name in seen:
logging.debug("ignore {0}".format(rec.id))
continue
seen.add(name)
yield rec
|
def _uniq_rec(fastafile, seq=False)
|
Returns unique records
| 2.488253 | 2.474604 | 1.005516 |
p = OptionParser(uniq.__doc__)
p.add_option("--seq", default=False, action="store_true",
help="Uniqify the sequences [default: %default]")
p.add_option("-t", "--trimname", dest="trimname",
action="store_true", default=False,
help="turn on the defline trim to first space [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
fastafile, uniqfastafile = args
fw = must_open(uniqfastafile, "w")
seq = opts.seq
for rec in _uniq_rec(fastafile, seq=seq):
if opts.trimname:
rec.description = ""
SeqIO.write([rec], fw, "fasta")
|
def uniq(args)
|
%prog uniq fasta uniq.fasta
remove fasta records that are the same
| 3.417008 | 3.224139 | 1.059821 |
from random import sample
p = OptionParser(random.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, N = args
N = int(N)
assert N > 0
f = Fasta(fastafile)
fw = must_open("stdout", "w")
for key in sample(f.keys(), N):
rec = f[key]
SeqIO.write([rec], fw, "fasta")
fw.close()
|
def random(args)
|
%prog random fasta 100 > random100.fasta
Take number of records randomly from fasta
| 2.803113 | 2.586 | 1.083957 |
from Bio.SeqIO.QualityIO import PairedFastaQualIterator
if not qualfile:
qualfile = make_qual(fastafile, score=defaultqual)
rec_iter = PairedFastaQualIterator(open(fastafile), open(qualfile))
for rec in rec_iter:
yield rec if not modify else modify_qual(rec)
|
def iter_fasta_qual(fastafile, qualfile, defaultqual=OKQUAL, modify=False)
|
used by trim, emits one SeqRecord with quality values in it
| 3.839351 | 3.598809 | 1.06684 |
from jcvi.algorithms.maxsum import max_sum
p = OptionParser(trim.__doc__)
p.add_option("-c", dest="min_length", type="int", default=64,
help="minimum sequence length after trimming")
p.add_option("-s", dest="score", default=QUAL,
help="quality trimming cutoff [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
fastafile, newfastafile = args
qualfile = get_qual(fastafile)
newqualfile = get_qual(newfastafile, check=False)
logging.debug("Trim bad sequence from fasta file `%s` to `%s`" % \
(fastafile, newfastafile))
fw = must_open(newfastafile, "w")
fw_qual = open(newqualfile, "w")
dropped = trimmed = 0
for rec in iter_fasta_qual(fastafile, qualfile, modify=True):
qv = [x - opts.score for x in \
rec.letter_annotations["phred_quality"]]
msum, trim_start, trim_end = max_sum(qv)
score = trim_end - trim_start + 1
if score < opts.min_length:
dropped += 1
continue
if score < len(rec):
trimmed += 1
rec = rec[trim_start:trim_end + 1]
write_fasta_qual(rec, fw, fw_qual)
print("A total of %d sequences modified." % trimmed, file=sys.stderr)
print("A total of %d sequences dropped (length < %d)." % \
(dropped, opts.min_length), file=sys.stderr)
fw.close()
fw_qual.close()
|
def trim(args)
|
%prog trim fasta.screen newfasta
take the screen output from `cross_match` (against a vector db, for
example), then trim the sequences to remove X's. Will also perform quality
trim if fasta.screen.qual is found. The trimming algorithm is based on
finding the subarray that maximize the sum
| 3.06593 | 2.95052 | 1.039115 |
p = OptionParser(sequin.__doc__)
p.add_option("--unk", default=100, type="int",
help="The size for unknown gaps [default: %default]")
p.add_option("--newid", default=None,
help="Use this identifier instead [default: %default]")
p.add_option("--chromosome", default=None,
help="Add [chromosome= ] to FASTA header [default: %default]")
p.add_option("--clone", default=None,
help="Add [clone= ] to FASTA header [default: %default]")
p.set_mingap(default=100)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputfasta, = args
unk = opts.unk
outputfasta = inputfasta.rsplit(".", 1)[0] + ".split"
rec = next(SeqIO.parse(must_open(inputfasta), "fasta"))
seq = ""
unknowns, knowns = 0, 0
for gap, gap_group in groupby(rec.seq, lambda x: x.upper() == 'N'):
subseq = "".join(gap_group)
if gap:
gap_length = len(subseq)
if gap_length == unk:
subseq = "\n>?unk{0}\n".format(unk)
unknowns += 1
elif gap_length >= opts.mingap:
subseq = "\n>?{0}\n".format(gap_length)
knowns += 1
seq += subseq
fw = must_open(outputfasta, "w")
id = opts.newid or rec.id
fastaheader = ">{0}".format(id)
if opts.chromosome:
fastaheader += " [chromosome={0}]".format(opts.chromosome)
if opts.clone:
fastaheader += " [clone={0}]".format(opts.clone)
print(fastaheader, file=fw)
print(seq, file=fw)
fw.close()
logging.debug("Sequin FASTA written to `{0}` (gaps: {1} unknowns, {2} knowns).".\
format(outputfasta, unknowns, knowns))
return outputfasta, unknowns + knowns
|
def sequin(args)
|
%prog sequin inputfasta
Generate a gapped fasta format with known gap sizes embedded. suitable for
Sequin submission.
A gapped sequence represents a newer method for describing non-contiguous
sequences, but only requires a single sequence identifier. A gap is
represented by a line that starts with >? and is immediately followed by
either a length (for gaps of known length) or "unk100" for gaps of unknown
length. For example, ">?200". The next sequence segment continues on the
next line, with no separate definition line or identifier. The difference
between a gapped sequence and a segmented sequence is that the gapped
sequence uses a single identifier and can specify known length gaps.
Gapped sequences are preferred over segmented sequences. A sample gapped
sequence file is shown here:
>m_gagei [organism=Mansonia gagei] Mansonia gagei NADH dehydrogenase ...
ATGGAGCATACATATCAATATTCATGGATCATACCGTTTGTGCCACTTCCAATTCCTATTTTAATAGGAA
TTGGACTCCTACTTTTTCCGACGGCAACAAAAAATCTTCGTCGTATGTGGGCTCTTCCCAATATTTTATT
>?200
GGTATAATAACAGTATTATTAGGGGCTACTTTAGCTCTTGC
TCAAAAAGATATTAAGAGGGGTTTAGCCTATTCTACAATGTCCCAACTGGGTTATATGATGTTAGCTCTA
>?unk100
TCAATAAAACTATGGGGTAAAGAAGAACAAAAAATAATTAACAGAAATTTTCGTTTATCTCCTTTATTAA
TATTAACGATGAATAATAATGAGAAGCCATATAGAATTGGTGATAATGTAAAAAAAGGGGCTCTTATTAC
| 2.698813 | 2.506472 | 1.076738 |
p = OptionParser(tidy.__doc__)
p.add_option("--gapsize", dest="gapsize", default=0, type="int",
help="Set all gaps to the same size [default: %default]")
p.add_option("--minlen", dest="minlen", default=100, type="int",
help="Minimum component size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
gapsize = opts.gapsize
minlen = opts.minlen
tidyfastafile = fastafile.rsplit(".", 1)[0] + ".tidy.fasta"
fw = must_open(tidyfastafile, "w")
removed = normalized = 0
fasta = Fasta(fastafile, lazy=True)
for name, rec in fasta.iteritems_ordered():
rec.seq = rec.seq.upper()
if minlen:
removed += remove_small_components(rec, minlen)
trim_terminal_Ns(rec)
if gapsize:
normalized += normalize_gaps(rec, gapsize)
if len(rec) == 0:
logging.debug("Drop seq {0}".format(rec.id))
continue
SeqIO.write([rec], fw, "fasta")
# Print statistics
if removed:
logging.debug("Total discarded bases: {0}".format(removed))
if normalized:
logging.debug("Gaps normalized: {0}".format(normalized))
logging.debug("Tidy FASTA written to `{0}`.".format(tidyfastafile))
fw.close()
return tidyfastafile
|
def tidy(args)
|
%prog tidy fastafile
Trim terminal Ns, normalize gap sizes and remove small components.
| 2.726196 | 2.451304 | 1.112141 |
from jcvi.formats.sizes import agp
from jcvi.formats.agp import mask, build
p = OptionParser(gaps.__doc__)
p.add_option("--split", default=False, action="store_true",
help="Generate .split.fasta [default: %default]")
p.set_mingap(default=100)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputfasta, = args
mingap = opts.mingap
split = opts.split
prefix = inputfasta.rsplit(".", 1)[0]
bedfile = prefix + ".gaps.bed"
if need_update(inputfasta, bedfile):
write_gaps_bed(inputfasta, prefix, mingap, opts.cpus)
if split:
splitfile = prefix + ".split.fasta"
oagpfile = prefix + ".splitobject.agp"
cagpfile = prefix + ".splitcomponent.agp"
if need_update((inputfasta, bedfile), splitfile):
sizesagpfile = agp([inputfasta])
maskedagpfile = mask([sizesagpfile, bedfile, "--splitobject"])
shutil.move(maskedagpfile, oagpfile)
logging.debug("AGP file written to `{0}`.".format(oagpfile))
maskedagpfile = mask([sizesagpfile, bedfile, "--splitcomponent"])
shutil.move(maskedagpfile, cagpfile)
logging.debug("AGP file written to `{0}`.".format(cagpfile))
build([oagpfile, inputfasta, splitfile])
os.remove(sizesagpfile)
return splitfile, oagpfile, cagpfile
|
def gaps(args)
|
%prog gaps fastafile
Print out a list of gaps in BED format (.gaps.bed).
| 2.799631 | 2.740386 | 1.021619 |
start = start - 1 if start is not None else 0
stop = stop if stop is not None else len(fasta)
if start < 0:
msg = "start ({0}) must > 0 of `{1}`. Reset to 1".\
format(start + 1, fasta.id)
logging.error(msg)
start = 0
if stop > len(fasta):
msg = "stop ({0}) must be <= length of `{1}` ({2}). Reset to {2}.".\
format(stop, fasta.id, len(fasta))
logging.error(msg)
stop = len(fasta)
seq = fasta.seq[start:stop]
if strand in (-1, '-1', '-'):
seq = seq.reverse_complement()
return seq
|
def subseq(cls, fasta, start=None, stop=None, strand=None)
|
Take Bio.SeqRecord and slice "start:stop" from it, does proper
index and error handling
| 2.5444 | 2.525471 | 1.007495 |
assert 'chr' in f, "`chr` field required"
name = f['chr']
assert name in self, "feature: %s not in `%s`" % \
(f, self.filename)
fasta = self[f['chr']]
seq = Fasta.subseq(fasta,
f.get('start'), f.get('stop'), f.get('strand'))
if asstring:
return str(seq)
return seq
|
def sequence(self, f, asstring=True)
|
Emulate brentp's pyfasta/fasta.py sequence() methods
take a feature and use the start/stop or exon_keys to return
the sequence from the assocatied fasta file:
f: a feature
asstring: if true, return the sequence as a string
: if false, return as a biopython Seq
>>> f = Fasta('tests/data/three_chrs.fasta')
>>> f.sequence({'start':1, 'stop':2, 'strand':1, 'chr': 'chr1'})
'AC'
>>> f.sequence({'start':1, 'stop':2, 'strand': -1, 'chr': 'chr1'})
'GT'
| 5.273304 | 4.930429 | 1.069543 |
start = frame
while start + 3 <= self.size:
yield self.sequence[start : start + 3], start
start += 3
|
def codons(self, frame)
|
A generator that yields DNA in one codon blocks
"frame" counts for 0. This function yields a tuple (triplet, index) with
index relative to the original DNA sequence
| 4.596102 | 4.250275 | 1.081366 |
orf_start = None
for c, index in self.codons(frame):
if (c not in self.stop and (c in self.start or not self.start)
and orf_start is None):
orf_start = index
elif c in self.stop and orf_start is not None:
self._update_longest(orf_start, index + 3, direction, frame)
orf_start = None
if orf_start is not None:
self._update_longest(orf_start, index + 3, direction, frame)
|
def scan_sequence(self, frame, direction)
|
Search in one reading frame
| 2.930113 | 2.97646 | 0.984429 |
if not ca:
return s[:-1]
if s[0] == '1':
return s[2:]
return s.rstrip('ab')
|
def clone_name(s, ca=False)
|
>>> clone_name("120038881639")
"0038881639"
>>> clone_name("GW11W6RK01DAJDWa")
"GW11W6RK01DAJDW"
| 7.140831 | 8.543545 | 0.835816 |
fp = must_open(bedfile)
fw = must_open(bedpefile, "w")
if pairsbedfile:
fwpairs = must_open(pairsbedfile, "w")
clones = defaultdict(list)
for row in fp:
b = BedLine(row)
name = b.accn
clonename = clone_name(name, ca=ca)
clones[clonename].append(b)
if matesfile:
fp = open(matesfile)
libraryline = next(fp)
# 'library bes 37896 126916'
lib, name, smin, smax = libraryline.split()
assert lib == "library"
smin, smax = int(smin), int(smax)
logging.debug("Happy mates for lib {0} fall between {1} - {2}".\
format(name, smin, smax))
nbedpe = 0
nspan = 0
for clonename, blines in clones.items():
nlines = len(blines)
if nlines == 2:
a, b = blines
aseqid, astart, aend = a.seqid, a.start, a.end
bseqid, bstart, bend = b.seqid, b.start, b.end
outcols = [aseqid, astart - 1, aend, bseqid, bstart - 1, bend, clonename]
if strand:
outcols.extend([0, a.strand, b.strand])
print("\t".join(str(x) for x in outcols), file=fw)
nbedpe += 1
elif nlines == 1:
a, = blines
aseqid, astart, aend = a.seqid, a.start, a.end
bseqid, bstart, bend = 0, 0, 0
else: # More than two lines per pair
pass
if pairsbedfile:
start = min(astart, bstart) if bstart > 0 else astart
end = max(aend, bend) if bend > 0 else aend
if aseqid != bseqid:
continue
span = end - start + 1
if (not matesfile) or (smin <= span <= smax):
print("\t".join(str(x) for x in \
(aseqid, start - 1, end, clonename)), file=fwpairs)
nspan += 1
fw.close()
logging.debug("A total of {0} bedpe written to `{1}`.".\
format(nbedpe, bedpefile))
if pairsbedfile:
fwpairs.close()
logging.debug("A total of {0} spans written to `{1}`.".\
format(nspan, pairsbedfile))
|
def bed_to_bedpe(bedfile, bedpefile, pairsbedfile=None, matesfile=None, ca=False, strand=False)
|
This converts the bedfile to bedpefile, assuming the reads are from CA.
| 2.620827 | 2.63766 | 0.993618 |
p = OptionParser(posmap.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(p.print_help())
frgscffile, fastafile, scf = args
# fasta
cmd = "faOneRecord {0} {1}".format(fastafile, scf)
scffastafile = scf + ".fasta"
if not op.exists(scffastafile):
sh(cmd, outfile=scffastafile)
# sizes
sizesfile = scffastafile + ".sizes"
sizes = Sizes(scffastafile).mapping
scfsize = sizes[scf]
logging.debug("`{0}` has length of {1}.".format(scf, scfsize))
# gaps.bed
gapsbedfile = scf + ".gaps.bed"
if not op.exists(gapsbedfile):
args = [scffastafile, "--bed", "--mingap=100"]
gaps(args)
# reads frgscf posmap
posmapfile = scf + ".posmap"
if not op.exists(posmapfile):
args = [frgscffile, scf]
query(args)
# reads bed
bedfile = scf + ".bed"
if not op.exists(bedfile):
args = [posmapfile]
bed(args)
# reads bedpe
bedpefile = scf + ".bedpe"
pairsbedfile = scf + ".pairs.bed"
if not (op.exists(bedpefile) and op.exists(pairsbedfile)):
bed_to_bedpe(bedfile, bedpefile, pairsbedfile=pairsbedfile, ca=True)
# base coverage
Coverage(bedfile, sizesfile)
Coverage(pairsbedfile, sizesfile)
|
def posmap(args)
|
%prog posmap frgscf.sorted scf.fasta scfID
Perform QC on the selected scfID, generate multiple BED files for plotting.
| 3.261611 | 3.032129 | 1.075684 |
'Updates the widget to show the ETA or total time when finished.'
if pbar.currval == 0:
return 'ETA: --:--:--'
elif pbar.finished:
return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
else:
elapsed = pbar.seconds_elapsed
eta = elapsed * pbar.maxval / pbar.currval - elapsed
return 'ETA: %s' % self.format_time(eta)
|
def update(self, pbar)
|
Updates the widget to show the ETA or total time when finished.
| 4.027123 | 2.95699 | 1.361899 |
'Updates the widget with the current SI prefixed speed.'
if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: # =~ 0
scaled = power = 0
else:
speed = pbar.currval / pbar.seconds_elapsed
power = int(math.log(speed, 1000))
scaled = speed / 1000.**power
return self._format % (scaled, self.prefixes[power], self.unit)
|
def update(self, pbar)
|
Updates the widget with the current SI prefixed speed.
| 7.558087 | 5.454376 | 1.385692 |
'''Updates the widget to show the next marker or the first marker when
finished'''
if pbar.finished: return self.markers[0]
self.curmark = (self.curmark + 1) % len(self.markers)
return self.markers[self.curmark]
|
def update(self, pbar)
|
Updates the widget to show the next marker or the first marker when
finished
| 6.7312 | 3.399904 | 1.979821 |
'Updates the progress bar and its subcomponents'
left, marker, right = (format_updatable(i, pbar) for i in
(self.left, self.marker, self.right))
width -= len(left) + len(right)
# Marker must *always* have length of 1
marker *= int(pbar.currval / pbar.maxval * width)
if self.fill_left:
return '%s%s%s' % (left, marker.ljust(width, self.fill), right)
else:
return '%s%s%s' % (left, marker.rjust(width, self.fill), right)
|
def update(self, pbar, width)
|
Updates the progress bar and its subcomponents
| 4.440053 | 4.251541 | 1.04434 |
'Updates the progress bar and its subcomponents'
left, marker, right = (format_updatable(i, pbar) for i in
(self.left, self.marker, self.right))
width -= len(left) + len(right)
if pbar.finished: return '%s%s%s' % (left, width * marker, right)
position = int(pbar.currval % (width * 2 - 1))
if position > width: position = width * 2 - position
lpad = self.fill * (position - 1)
rpad = self.fill * (width - len(marker) - len(lpad))
# Swap if we want to bounce the other way
if not self.fill_left: rpad, lpad = lpad, rpad
return '%s%s%s%s%s' % (left, lpad, marker, rpad, right)
|
def update(self, pbar, width)
|
Updates the progress bar and its subcomponents
| 4.107019 | 4.010668 | 1.024024 |
'Tries to catch resize signals sent from the terminal.'
h, w = array('h', ioctl(self.fd, termios.TIOCGWINSZ, '\0' * 8))[:2]
self.term_width = w
|
def _handle_resize(self, signum=None, frame=None)
|
Tries to catch resize signals sent from the terminal.
| 7.59286 | 4.242216 | 1.789834 |
'Joins the widgets and justifies the line'
widgets = ''.join(self._format_widgets())
if self.left_justify: return widgets.ljust(self.term_width)
else: return widgets.rjust(self.term_width)
|
def _format_line(self)
|
Joins the widgets and justifies the line
| 7.440477 | 4.771089 | 1.559492 |
'Returns whether the ProgressBar should redraw the line.'
if self.currval >= self.next_update or self.finished: return True
delta = time.time() - self.last_update_time
return self._time_sensitive and delta > self.poll
|
def _need_update(self)
|
Returns whether the ProgressBar should redraw the line.
| 13.859288 | 9.534692 | 1.453564 |
'Checks all widgets for the time sensitive bit'
self._time_sensitive = any(getattr(w, 'TIME_SENSITIVE', False)
for w in self.widgets)
|
def _update_widgets(self)
|
Checks all widgets for the time sensitive bit
| 11.950857 | 5.654054 | 2.113679 |
'Updates the ProgressBar to a new value.'
if value is not None and value is not UnknownLength:
if (self.maxval is not UnknownLength
and not 0 <= value <= self.maxval):
raise ValueError('Value out of range')
self.currval = value
if not self._need_update(): return
if self.start_time is None:
raise RuntimeError('You must call "start" before calling "update"')
now = time.time()
self.seconds_elapsed = now - self.start_time
self.next_update = self.currval + self.update_interval
self.fd.write(self._format_line() + '\r')
self.last_update_time = now
|
def update(self, value=None)
|
Updates the ProgressBar to a new value.
| 4.465471 | 4.306525 | 1.036908 |
'''Starts measuring time, and prints the bar at 0%.
It returns self so you can use it like this:
>>> pbar = ProgressBar().start()
>>> for i in range(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
'''
if self.maxval is None:
self.maxval = self._DEFAULT_MAXVAL
self.num_intervals = max(100, self.term_width)
self.next_update = 0
if self.maxval is not UnknownLength:
if self.maxval < 0: raise ValueError('Value out of range')
self.update_interval = self.maxval / self.num_intervals
self.start_time = self.last_update_time = time.time()
self.update(0)
return self
|
def start(self)
|
Starts measuring time, and prints the bar at 0%.
It returns self so you can use it like this:
>>> pbar = ProgressBar().start()
>>> for i in range(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
| 4.076032 | 2.637952 | 1.54515 |
'Puts the ProgressBar bar in the finished state.'
self.finished = True
self.update(self.maxval)
self.fd.write('\n')
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
|
def finish(self)
|
Puts the ProgressBar bar in the finished state.
| 7.168081 | 5.134816 | 1.395976 |
from jcvi.graphics.base import plt, savefig
from jcvi.graphics.chromosome import ChromosomeMap
p = OptionParser(plot.__doc__)
p.add_option("--firstn", type="int", help="Only plot the first N genes")
p.add_option("--ymax", type="int", help="Y-axis max value")
p.add_option("--log", action="store_true",
help="Write plotting data [default: %default]")
opts, args, iopts = p.set_image_options(args, figsize="6x4")
if len(args) != 2:
sys.exit(not p.print_help())
taggedbed, chr = args
bed = Bed(taggedbed)
beds = list(bed.sub_bed(chr))
old, new = [], []
i = 0
for b in beds:
accn = b.extra[0]
if "te" in accn:
continue
accn, tag = accn.split("|")
if tag == "OVERLAP":
continue
c, r = atg_name(accn)
if tag == "NEW":
new.append((i, r))
else:
old.append((i, r))
i += 1
ngenes = i
assert ngenes == len(new) + len(old)
logging.debug("Imported {0} ranks on {1}.".format(ngenes, chr))
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
xstart, xend = .2, .8
ystart, yend = .2, .8
pad = .02
ngenes = opts.firstn or ngenes
ymax = opts.ymax or 500000
title = "Assignment of Medtr identifiers"
if opts.ymax:
subtitle = "{0}, first {1} genes".format(chr, ngenes)
else:
subtitle = "{0}, {1} genes ({2} new)".format(chr, ngenes, len(new))
chr_map = ChromosomeMap(fig, root, xstart, xend, ystart, yend, pad, 0,
ymax, 5, title, subtitle)
ax = chr_map.axes
if opts.log:
from jcvi.utils.table import write_csv
header = ["x", "y"]
write_csv(header, new, filename=chr + ".new")
write_csv(header, old, filename=chr + ".old")
x, y = zip(*new)
ax.plot(x, y, "b,")
x, y = zip(*old)
ax.plot(x, y, "r,")
# Legends
ymid = (ystart + yend) / 2
y = ymid + pad
root.plot([.2], [y], "r.", lw=2)
root.text(.2 + pad, y, "Existing Medtr ids", va="center", size=10)
y = ymid - pad
root.plot([.2], [y], "b.", lw=2)
root.text(.2 + pad, y, "Newly instantiated ids", va="center", size=10)
ax.set_xlim(0, ngenes)
ax.set_ylim(0, ymax)
ax.set_axis_off()
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = chr + ".identifiers." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
|
def plot(args)
|
%prog plot tagged.new.bed chr1
Plot gene identifiers along a particular chromosome, often to illustrate the
gene id assignment procedure.
| 2.960505 | 2.878901 | 1.028346 |
p = OptionParser(instantiate.__doc__)
p.set_annot_reformat_opts()
p.add_option("--extended_stride", default=False, action="store_true",
help="Toggle extended strides for gene numbering")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
taggedbed, blacklist, gapsbed = args
r = NameRegister(prefix=opts.prefix, pad0=opts.pad0, uc=opts.uc)
r.get_blacklist(blacklist)
r.get_gaps(gapsbed)
# Run through the bed, identify stretch of NEW ids to instantiate,
# identify the flanking FRAMEs, interpolate!
bed = Bed(taggedbed)
outputbed = taggedbed.rsplit(".", 1)[0] + ".new.bed"
fw = open(outputbed, "w")
tagkey = lambda x: x.rsplit("|", 1)[-1]
for chr, sbed in bed.sub_beds():
current_chr = chr_number(chr)
if not current_chr:
continue
sbed = list(sbed)
ranks = []
for i, s in enumerate(sbed):
nametag = s.extra[0]
tag = tagkey(nametag)
if tag in (NEW, FRAME):
ranks.append((i, nametag))
blocks = []
for tag, names in groupby(ranks, key=lambda x: tagkey(x[-1])):
names = list(names)
if tag == NEW:
blocks.append((tag, [sbed[x[0]] for x in names]))
else:
start, end = names[0][-1], names[-1][-1]
start, end = atg_name(start, retval="rank"), atg_name(end, retval="rank")
blocks.append((tag, [start, end]))
id_table = {} # old to new name conversion
for i, (tag, info) in enumerate(blocks):
if tag != NEW:
continue
start_id = 0 if i == 0 else blocks[i - 1][1][-1]
end_id = start_id + 10000 if i == len(blocks) -1 \
else blocks[i + 1][1][0]
r.allocate(info, chr, start_id, end_id, id_table, extended_stride=opts.extended_stride)
# Output new names
for i, s in enumerate(sbed):
nametag = s.extra[0]
name, tag = nametag.split("|")
if tag == NEW:
assert name == '.'
name = id_table[s.accn]
elif tag == OVERLAP:
if name in id_table:
name = id_table[name]
s.extra[0] = "|".join((name, tag))
print(s, file=fw)
fw.close()
|
def instantiate(args)
|
%prog instantiate tagged.bed blacklist.ids big_gaps.bed
instantiate NEW genes tagged by renumber.
| 4.102789 | 3.818015 | 1.074587 |
pf = bedfile.rsplit(".", 1)[0]
abedfile = pf + ".a.bed"
bbedfile = pf + ".b.bed"
fwa = open(abedfile, "w")
fwb = open(bbedfile, "w")
bed = Bed(bedfile)
seen = set()
for b in bed:
accns = b.accn.split(";")
new_accns = []
for accn in accns:
if ":" in accn:
method, a = accn.split(":", 1)
if method in ("liftOver", "GMAP", ""):
accn = a
if accn in seen:
logging.error("Duplicate id {0} found. Ignored.".format(accn))
continue
new_accns.append(accn)
b.accn = accn
print(b, file=fwa)
seen.add(accn)
b.accn = ";".join(new_accns)
print(b, file=fwb)
fwa.close()
fwb.close()
|
def prepare(bedfile)
|
Remove prepended tags in gene names.
| 2.753028 | 2.748165 | 1.00177 |
from jcvi.algorithms.lis import longest_increasing_subsequence
from jcvi.utils.grouper import Grouper
p = OptionParser(renumber.__doc__)
p.set_annot_reformat_opts()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
pf = bedfile.rsplit(".", 1)[0]
abedfile = pf + ".a.bed"
bbedfile = pf + ".b.bed"
if need_update(bedfile, (abedfile, bbedfile)):
prepare(bedfile)
mbed = Bed(bbedfile)
g = Grouper()
for s in mbed:
accn = s.accn
g.join(*accn.split(";"))
bed = Bed(abedfile)
for chr, sbed in bed.sub_beds():
current_chr = chr_number(chr)
if not current_chr:
continue
ranks = []
gg = set()
for s in sbed:
accn = s.accn
achr, arank = atg_name(accn)
if achr != current_chr:
continue
ranks.append(arank)
gg.add(accn)
lranks = longest_increasing_subsequence(ranks)
print(current_chr, len(sbed), "==>", len(ranks), \
"==>", len(lranks), file=sys.stderr)
granks = set(gene_name(current_chr, x, prefix=opts.prefix, \
pad0=opts.pad0, uc=opts.uc) for x in lranks) | \
set(gene_name(current_chr, x, prefix=opts.prefix, \
pad0=opts.pad0, sep="te", uc=opts.uc) for x in lranks)
tagstore = {}
for s in sbed:
achr, arank = atg_name(s.accn)
accn = s.accn
if accn in granks:
tag = (accn, FRAME)
elif accn in gg:
tag = (accn, RETAIN)
else:
tag = (".", NEW)
tagstore[accn] = tag
# Find cases where genes overlap
for s in sbed:
accn = s.accn
gaccn = g[accn]
tags = [((tagstore[x][-1] if x in tagstore else NEW), x) for x in gaccn]
group = [(PRIORITY.index(tag), x) for tag, x in tags]
best = min(group)[-1]
if accn != best:
tag = (best, OVERLAP)
else:
tag = tagstore[accn]
print("\t".join((str(s), "|".join(tag))))
|
def renumber(args)
|
%prog renumber Mt35.consolidated.bed > tagged.bed
Renumber genes for annotation updates.
| 3.613333 | 3.554373 | 1.016588 |
import string
p = OptionParser(rename.__doc__)
p.add_option("-a", dest="gene_increment", default=10, type="int",
help="Increment for continuous genes [default: %default]")
p.add_option("-b", dest="gap_increment", default=1000, type="int",
help="Increment for gaps [default: %default]")
p.add_option("--pad0", default=6, type="int",
help="Pad gene identifiers with 0 [default: %default]")
p.add_option("--spad0", default=4, type="int",
help="Pad gene identifiers on small scaffolds [default: %default]")
p.add_option("--prefix", default="Bo",
help="Genome prefix [default: %default]")
p.add_option("--jgi", default=False, action="store_true",
help="Create JGI style identifier PREFIX.NN[G|TE]NNNNN.1" + \
" [default: %default]")
opts, args = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help())
genebed = args[0]
gapbed = args[1] if len(args) == 2 else None
prefix = opts.prefix
gene_increment = opts.gene_increment
gap_increment = opts.gap_increment
genes = Bed(genebed)
if gapbed:
fp = open(gapbed)
for row in fp:
genes.append(BedLine(row))
genes.sort(key=genes.key)
idsfile = prefix + ".ids"
newbedfile = prefix + ".bed"
gap_increment -= gene_increment
assert gap_increment >= 0
if opts.jgi:
prefix += "."
fw = open(idsfile, "w")
for chr, lines in groupby(genes, key=lambda x: x.seqid):
lines = list(lines)
pad0 = opts.pad0 if len(lines) > 1000 else opts.spad0
isChr = chr[0].upper() == 'C'
digits = "".join(x for x in chr if x in string.digits)
gs = "g" if isChr else "s"
pp = prefix + digits + gs
idx = 0
if isChr:
idx += gap_increment
for r in lines:
isGap = r.strand not in ("+", "-")
if isGap:
idx += gap_increment
continue
else:
idx += gene_increment
accn = pp + "{0:0{1}d}".format(idx, pad0)
oldaccn = r.accn
print("\t".join((oldaccn, accn)), file=fw)
r.accn = accn
genes.print_to_file(newbedfile)
logging.debug("Converted IDs written to `{0}`.".format(idsfile))
logging.debug("Converted bed written to `{0}`.".format(newbedfile))
|
def rename(args)
|
%prog rename genes.bed [gaps.bed]
Rename genes for annotation release.
For genes on chromosomes (e.g. the 12th gene on C1):
Bo1g00120
For genes on scaffolds (e.g. the 12th gene on unplaced Scaffold00285):
Bo00285s120
The genes identifiers will increment by 10. So assuming no gap, these are
the consecutive genes:
Bo1g00120, Bo1g00130, Bo1g00140...
Bo00285s120, Bo00285s130, Bo00285s140...
When we encounter gaps, we would like the increment to be larger. For example,
Bo1g00120, <gap>, Bo1g01120...
Gaps bed file is optional.
| 3.200842 | 2.917367 | 1.097168 |
pf, id = (), identifier
if "|" in identifier:
pf, id = tuple(identifier.split('|')[:-1]), identifier.split('|')[-1]
return pf, id
|
def parse_prefix(identifier)
|
Parse identifier such as a|c|le|d|li|re|or|AT4G00480.1 and return
tuple of prefix string (separated at '|') and suffix (AGI identifier)
| 6.167175 | 4.885639 | 1.262307 |
p = OptionParser(publocus.__doc__)
p.add_option("--locus_tag", default="MTR_",
help="GenBank locus tag [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
locus_tag = opts.locus_tag
index = AutoVivification()
idsfile, = args
fp = must_open(idsfile)
for row in fp:
locus, chrom, sep, rank, iso = atg_name(row, retval="locus,chr,sep,rank,iso")
if None in (locus, chrom, sep, rank, iso):
logging.warning("{0} is not a valid gene model identifier".format(row))
continue
if locus not in index.keys():
pub_locus = gene_name(chrom, rank, prefix=locus_tag, sep=sep)
index[locus]['pub_locus'] = pub_locus
index[locus]['isos'] = set()
index[locus]['isos'].add(int(iso))
for locus in index:
pub_locus = index[locus]['pub_locus']
index[locus]['isos'] = sorted(index[locus]['isos'])
if len(index[locus]['isos']) > 1:
new = [chr(n+64) for n in index[locus]['isos'] if n < 27]
for i, ni in zip(index[locus]['isos'], new):
print("\t".join(x for x in ("{0}.{1}".format(locus, i), \
"{0}{1}".format(pub_locus, ni))))
else:
print("\t".join(x for x in ("{0}.{1}".format(locus, index[locus]['isos'][0]), \
pub_locus)))
|
def publocus(args)
|
%prog publocus idsfile > idsfiles.publocus
Given a list of model identifiers, convert each into a GenBank approved
pub_locus.
Example output:
Medtr1g007020.1 MTR_1g007020
Medtr1g007030.1 MTR_1g007030
Medtr1g007060.1 MTR_1g007060A
Medtr1g007060.2 MTR_1g007060B
| 2.915756 | 2.765725 | 1.054246 |
from jcvi.formats.gff import Gff
p = OptionParser(augustus.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
ingff3, = args
gff = Gff(ingff3)
fw = must_open(opts.outfile, "w")
seen = defaultdict(int)
for g in gff:
if g.type not in ("gene", "transcript", "CDS"):
continue
if g.type == "transcript":
g.type = "mRNA"
prefix = g.seqid + "_"
pid = prefix + g.id
newid = "{0}-{1}".format(pid, seen[pid]) if pid in seen else pid
seen[pid] += 1
g.attributes["ID"] = [newid]
g.attributes["Parent"] = [(prefix + x) for x in g.attributes["Parent"]]
g.update_attributes()
print(g, file=fw)
fw.close()
|
def augustus(args)
|
%prog augustus augustus.gff3 > reformatted.gff3
AUGUSTUS does generate a gff3 (--gff3=on) but need some refinement.
| 2.521109 | 2.380777 | 1.058944 |
from jcvi.formats.gff import sort
p = OptionParser(tRNAscan.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
trnaout, = args
gffout = trnaout + ".gff3"
fp = open(trnaout)
fw = open(gffout, "w")
next(fp)
next(fp)
row = next(fp)
assert row.startswith("--------")
for row in fp:
atoms = [x.strip() for x in row.split("\t")]
contig, trnanum, start, end, aa, codon, \
intron_start, intron_end, score = atoms
start, end = int(start), int(end)
orientation = '+'
if start > end:
start, end = end, start
orientation = '-'
source = "tRNAscan"
type = "tRNA"
if codon == "???":
codon = "XXX"
comment = "ID={0}.tRNA.{1};Name=tRNA-{2} (anticodon: {3})".\
format(contig, trnanum, aa, codon)
print("\t".join(str(x) for x in (contig, source, type, start,\
end, score, orientation, ".", comment)), file=fw)
fw.close()
sort([gffout, "-i"])
|
def tRNAscan(args)
|
%prog tRNAscan all.trna > all.trna.gff3
Convert tRNAscan-SE output into gff3 format.
Sequence tRNA Bounds tRNA Anti Intron Bounds Cove
Name tRNA # Begin End Type Codon Begin End Score
-------- ------ ---- ------ ---- ----- ----- ---- ------
23231 1 335355 335440 Tyr GTA 335392 335404 69.21
23231 2 1076190 1076270 Leu AAG 0 0 66.33
Conversion based on PERL one-liner in:
<https://github.com/sujaikumar/assemblage/blob/master/README-annotation.md>
| 3.146721 | 2.900554 | 1.084869 |
p = OptionParser(summary.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
f = Fasta(fastafile, index=False)
halfmaskedseqs = set()
allmasked = 0
allbases = 0
cutoff = 50
for key, seq in f.iteritems():
masked = 0
for base in seq:
if base not in "AGCT":
masked += 1
seqlen = len(seq)
if masked * 100. / seqlen > cutoff:
halfmaskedseqs.add(key)
allmasked += masked
allbases += seqlen
seqnum = len(f)
maskedseqnum = len(halfmaskedseqs)
print("Total masked bases: {0}".\
format(percentage(allmasked, allbases)), file=sys.stderr)
print("Total masked sequences (contain > {0}% masked): {1}".\
format(cutoff, percentage(maskedseqnum, seqnum)), file=sys.stderr)
|
def summary(args)
|
%prog summary fastafile
Report the number of bases and sequences masked.
| 3.017741 | 2.832952 | 1.065229 |
p = OptionParser(mask.__doc__)
p.add_option("--hard", dest="hard", default=False, action="store_true",
help="Hard mask the low-complexity bases [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
genomefile, = args
# entire pipeline
countsfile = genomefile + ".counts"
wm_mk_counts(infile=genomefile, outfile=countsfile)
maskedfastafile = "%s.masked%s" % op.splitext(genomefile)
wm_mk_masks(infile=countsfile, outfile=maskedfastafile, genomefile=genomefile)
if opts.hard:
hardmask(maskedfastafile)
|
def mask(args)
|
%prog mask fastafile
This script pipelines the windowmasker in NCBI BLAST+. Masked fasta file
will have an appended suffix of .mask with all the low-complexity bases masked
(default to lower case, set --hard for hardmasking).
| 3.544256 | 3.143801 | 1.127379 |
group.sort()
pos = bisect_left(group, (query, 0))
left_flanker = group[0] if pos == 0 else group[pos-1]
right_flanker = group[-1] if pos == len(group) else group[pos]
# pick the closest flanker
if abs(query - left_flanker[0]) < abs(query - right_flanker[0]):
flanker, other = left_flanker, right_flanker
else:
flanker, other = right_flanker, left_flanker
flanked = not (pos == 0 or pos == len(group) or flanker == query)
return flanker, other, flanked
|
def get_flanker(group, query)
|
>>> get_flanker([(370, 15184), (372, 15178), (373, 15176), (400, 15193)], 385)
((373, 15176), (400, 15193), True)
>>> get_flanker([(124, 13639), (137, 13625)], 138)
((137, 13625), (137, 13625), False)
| 2.173206 | 2.331902 | 0.931946 |
regions = []
ysorted = sorted(data, key=lambda x: x[1])
g = Grouper()
a, b = tee(ysorted)
next(b, None)
for ia, ib in izip(a, b):
pos1, pos2 = ia[1], ib[1]
if pos2 - pos1 < window and sbed[pos1].seqid == sbed[pos2].seqid:
g.join(ia, ib)
for group in sorted(g):
(qflanker, syntelog), (far_flanker, far_syntelog), flanked = \
get_flanker(group, query)
# run a mini-dagchainer here, take the direction that gives us most anchors
if colinear:
y_indexed_group = [(y, i) for i, (x, y) in enumerate(group)]
lis = longest_increasing_subsequence(y_indexed_group)
lds = longest_decreasing_subsequence(y_indexed_group)
if len(lis) >= len(lds):
track = lis
orientation = "+"
else:
track = lds
orientation = "-"
group = [group[i] for (y, i) in track]
xpos, ypos = zip(*group)
score = min(len(set(xpos)), len(set(ypos)))
if qflanker == query:
gray = "S"
else:
gray = "G" if not flanked else "F"
score -= 1 # slight penalty for not finding syntelog
if score < cutoff:
continue
# y-boundary of the block
left, right = group[0][1], group[-1][1]
# this characterizes a syntenic region (left, right).
# syntelog is -1 if it's a gray gene
syn_region = (syntelog, far_syntelog, left,
right, gray, orientation, score)
regions.append(syn_region)
return sorted(regions, key=lambda x: -x[-1])
|
def find_synteny_region(query, sbed, data, window, cutoff, colinear=False)
|
Get all synteny blocks for a query, algorithm is single linkage
anchors are a window centered on query
Two categories of syntenic regions depending on what query is:
(Syntelog): syntenic region is denoted by the syntelog
(Gray gene): syntenic region is marked by the closest flanker
| 5.017644 | 4.566099 | 1.098891 |
m, n = len(qpadnames), len(spadnames)
qpadorder, spadorder = qpadbed.order, spadbed.order
qpadid = dict((a, i) for i, a in enumerate(qpadnames))
spadid = dict((a, i) for i, a in enumerate(spadnames))
qpadlen = dict((a, len(b)) for a, b in qpadbed.sub_beds())
spadlen = dict((a, len(b)) for a, b in spadbed.sub_beds())
qsize, ssize = len(qpadbed), len(spadbed)
assert sum(qpadlen.values()) == qsize
assert sum(spadlen.values()) == ssize
# Populate arrays of observed counts and expected counts
logging.debug("Initialize array of size ({0} x {1})".format(m, n))
observed = np.zeros((m, n))
fp = open(blastfile)
all_dots = 0
for row in fp:
b = BlastLine(row)
qi, q = qpadorder[b.query]
si, s = spadorder[b.subject]
qseqid, sseqid = q.seqid, s.seqid
qsi, ssi = qpadid[qseqid], spadid[sseqid]
observed[qsi, ssi] += 1
all_dots += 1
assert int(round(observed.sum())) == all_dots
logging.debug("Total area: {0} x {1}".format(qsize, ssize))
S = qsize * ssize
expected = np.zeros((m, n))
qsum = 0
for i, a in enumerate(qpadnames):
alen = qpadlen[a]
qsum += alen
for j, b in enumerate(spadnames):
blen = spadlen[b]
expected[i, j] = all_dots * alen * blen * 1. / S
assert int(round(expected.sum())) == all_dots
# Calculate the statistical significance for each cell
from scipy.stats.distributions import poisson
M = m * n # multiple testing
logmp = np.zeros((m, n))
for i in xrange(m):
for j in xrange(n):
obs, exp = observed[i, j], expected[i, j]
pois = max(poisson.pmf(obs, exp), 1e-250) # Underflow
logmp[i, j] = max(- log(pois), 0)
return logmp
|
def make_arrays(blastfile, qpadbed, spadbed, qpadnames, spadnames)
|
This function makes three matrices: observed, expected and logmp. The logmp
contains the statistical significance for each comparison.
| 2.712725 | 2.601244 | 1.042857 |
from jcvi.formats.cdt import CDT
p = OptionParser(pad.__doc__)
p.set_beds()
p.add_option("--cutoff", default=.3, type="float",
help="The clustering cutoff to call similar [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
cutoff = opts.cutoff
blastfile, cdtfile = args
qbed, sbed, qorder, sorder, is_self = check_beds(blastfile, p, opts)
cdt = CDT(cdtfile)
qparts = list(cdt.iter_partitions(cutoff=cutoff))
sparts = list(cdt.iter_partitions(cutoff=cutoff, gtr=False))
qid, sid = {}, {}
for i, part in enumerate(qparts):
qid.update(dict((x, i) for x in part))
for i, part in enumerate(sparts):
sid.update(dict((x, i) for x in part))
# Without writing files, conversion from PAD to merged PAD is done in memory
for q in qbed:
q.seqid = qid[q.seqid]
for s in sbed:
s.seqid = sid[s.seqid]
qnames = range(len(qparts))
snames = range(len(sparts))
logmp = make_arrays(blastfile, qbed, sbed, qnames, snames)
m, n = logmp.shape
pvalue_cutoff = 1e-30
cutoff = - log(pvalue_cutoff)
significant = []
for i in xrange(m):
for j in xrange(n):
score = logmp[i, j]
if score < cutoff:
continue
significant.append((qparts[i], sparts[j], score))
for a, b, score in significant:
print("|".join(a), "|".join(b), score)
logging.debug("Collected {0} PAR comparisons significant at (P < {1}).".\
format(len(significant), pvalue_cutoff))
return significant
|
def pad(args)
|
%prog pad blastfile cdtfile --qbed q.pad.bed --sbed s.pad.bed
Test and reconstruct candidate PADs.
| 3.659804 | 3.450032 | 1.060803 |
from jcvi.utils.range import range_chain, LEFT, RIGHT
NUL = 2
selected, score = range_chain(ranges)
endpoints = [(x.start, NUL) for x in selected]
endpoints += [(x[0], LEFT) for x in extra]
endpoints += [(x[1], RIGHT) for x in extra]
endpoints.sort()
current_left = 0
for a, ai in endpoints:
if ai == LEFT:
current_left = a
if ai == RIGHT:
yield current_left, a
elif ai == NUL:
if a - current_left < minsegment:
continue
yield current_left, a - 1
current_left = a
|
def get_segments(ranges, extra, minsegment=40)
|
Given a list of Range, perform chaining on the ranges and select a highest
scoring subset and cut based on their boundaries. Let's say the projection
of the synteny blocks onto one axis look like the following.
1=====10......20====30....35====~~
Then the segmentation will yield a block [1, 20), [20, 35), using an
arbitrary right extension rule. Extra are additional end breaks for
chromosomes.
| 4.174054 | 4.235098 | 0.985586 |
from jcvi.utils.range import Range
p = OptionParser(cluster.__doc__)
p.set_beds()
p.add_option("--minsize", default=10, type="int",
help="Only segment using blocks >= size [default: %default]")
p.add_option("--path", default="~/scratch/bin",
help="Path to the CLUSTER 3.0 binary [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
blastfile, anchorfile = args
qbed, sbed, qorder, sorder, is_self = check_beds(blastfile, p, opts)
minsize = opts.minsize
ac = AnchorFile(anchorfile)
qranges, sranges = [], []
qextra = [x[1:] for x in qbed.get_breaks()]
sextra = [x[1:] for x in sbed.get_breaks()]
id = 0
for block in ac.iter_blocks(minsize=minsize):
q, s = zip(*block)[:2]
q = [qorder[x][0] for x in q]
s = [sorder[x][0] for x in s]
minq, maxq = min(q), max(q)
mins, maxs = min(s), max(s)
id += 1
qr = Range("0", minq, maxq, maxq - minq, id)
sr = Range("0", mins, maxs, maxs - mins, id)
qranges.append(qr)
sranges.append(sr)
qpads = list(get_segments(qranges, qextra))
spads = list(get_segments(sranges, sextra))
suffix = ".pad.bed"
qpf = opts.qbed.split(".")[0]
spf = opts.sbed.split(".")[0]
qpadfile = qpf + suffix
spadfile = spf + suffix
qnpads, qpadnames = write_PAD_bed(qpadfile, qpf, qpads, qbed)
snpads, spadnames = write_PAD_bed(spadfile, spf, spads, sbed)
qpadbed, spadbed = Bed(qpadfile), Bed(spadfile)
logmp = make_arrays(blastfile, qpadbed, spadbed, qpadnames, spadnames)
m, n = logmp.shape
matrixfile = ".".join((qpf, spf, "logmp.txt"))
fw = open(matrixfile, "w")
header = ["o"] + spadnames
print("\t".join(header), file=fw)
for i in xrange(m):
row = [qpadnames[i]] + ["{0:.1f}".format(x) for x in logmp[i, :]]
print("\t".join(row), file=fw)
fw.close()
# Run CLUSTER 3.0 (Pearson correlation, average linkage)
cmd = op.join(opts.path, "cluster")
cmd += " -g 2 -e 2 -m a -f {0}".format(matrixfile)
pf = matrixfile.rsplit(".", 1)[0]
cdtfile = pf + ".cdt"
if need_update(matrixfile, cdtfile):
sh(cmd)
|
def cluster(args)
|
%prog cluster blastfile anchorfile --qbed qbedfile --sbed sbedfile
Cluster the segments and form PAD. This is the method described in Tang et
al. (2010) PNAS paper. The anchorfile defines a list of synteny blocks,
based on which the genome on one or both axis can be chopped up into pieces
and clustered.
| 3.189967 | 3.07571 | 1.037148 |
p = OptionParser(cat.__doc__.format(page_range_help=PAGE_RANGE_HELP))
p.add_option("--nosort", default=False, action="store_true",
help="Do not sort file names")
p.add_option("--cleanup", default=False, action="store_true",
help="Remove individual pdfs after merging")
p.set_outfile()
p.set_verbose(help="Show page ranges as they are being read")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
outfile = opts.outfile
if outfile in args:
args.remove(outfile)
if not opts.nosort:
args = natsorted(args)
filename_page_ranges = parse_filename_page_ranges(args)
verbose = opts.verbose
fw = must_open(outfile, "wb")
merger = PdfFileMerger()
in_fs = {}
try:
for (filename, page_range) in filename_page_ranges:
if verbose:
print(filename, page_range, file=sys.stderr)
if filename not in in_fs:
in_fs[filename] = open(filename, "rb")
merger.append(in_fs[filename], pages=page_range)
except:
print(traceback.format_exc(), file=sys.stderr)
print("Error while reading " + filename, file=sys.stderr)
sys.exit(1)
merger.write(fw)
fw.close()
if opts.cleanup:
logging.debug("Cleaning up {} files".format(len(args)))
for arg in args:
os.remove(arg)
|
def cat(args)
|
%prog cat *.pdf -o output.pdf
Concatenate pages from pdf files into a single pdf file.
Page ranges refer to the previously-named file.
A file not followed by a page range means all the pages of the file.
PAGE RANGES are like Python slices.
{page_range_help}
EXAMPLES
pdfcat -o output.pdf head.pdf content.pdf :6 7: tail.pdf -1
Concatenate all of head.pdf, all but page seven of content.pdf,
and the last page of tail.pdf, producing output.pdf.
pdfcat chapter*.pdf >book.pdf
You can specify the output file by redirection.
pdfcat chapter?.pdf chapter10.pdf >book.pdf
In case you don't want chapter 10 before chapter 2.
| 2.654883 | 2.533564 | 1.047885 |
l = len(kmer) - 2
k = l if l < 64 else 64
counts = defaultdict(int)
for i in range(l):
trinuc = kmer[i: i + 3]
counts[trinuc] += 1
logk = math.log(k)
res = 0
for k, v in counts.items():
f = v * 1. / l
res += f * math.log(f) / logk
return res * -100
|
def entropy_score(kmer)
|
Schmieder and Edwards. Quality control and preprocessing of metagenomic datasets. (2011) Bioinformatics
https://academic.oup.com/bioinformatics/article/27/6/863/236283/Quality-control-and-preprocessing-of-metagenomic
| 3.327209 | 3.379021 | 0.984666 |
p = OptionParser(entropy.__doc__)
p.add_option("--threshold", default=0, type="int",
help="Complexity needs to be above")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
kmc_out, = args
fp = open(kmc_out)
for row in fp:
kmer, count = row.split()
score = entropy_score(kmer)
if score >= opts.threshold:
print(" ".join((kmer, count, "{:.2f}".format(score))))
|
def entropy(args)
|
%prog entropy kmc_dump.out
kmc_dump.out contains two columns:
AAAAAAAAAAAGAAGAAAGAAA 34
| 3.336012 | 3.180103 | 1.049026 |
from jcvi.formats.fasta import rc, parse_fasta
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, dumpfile = args
fp = open(dumpfile)
KMERS = set()
for row in fp:
kmer = row.split()[0]
kmer_rc = rc(kmer)
KMERS.add(kmer)
KMERS.add(kmer_rc)
K = len(kmer)
logging.debug("Imported {} {}-mers".format(len(KMERS), K))
for name, seq in parse_fasta(fastafile):
name = name.split()[0]
for i in range(len(seq) - K):
if i % 5000000 == 0:
print("{}:{}".format(name, i), file=sys.stderr)
kmer = seq[i: i + K]
if kmer in KMERS:
print("\t".join(str(x) for x in (name, i, i + K, kmer)))
|
def bed(args)
|
%prog bed fastafile kmer.dump.txt
Map kmers on FASTA.
| 2.433812 | 2.177671 | 1.117622 |
p = OptionParser(kmcop.__doc__)
p.add_option("--action", choices=("union", "intersect"),
default="union", help="Action")
p.add_option("-o", default="results", help="Output name")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
indices = args
ku = KMCComplex(indices)
ku.write(opts.o, action=opts.action)
|
def kmcop(args)
|
%prog kmcop *.kmc_suf
Intersect or union kmc indices.
| 3.752324 | 3.232098 | 1.160956 |
p = OptionParser(kmc.__doc__)
p.add_option("-k", default=21, type="int", help="Kmer size")
p.add_option("--ci", default=2, type="int",
help="Exclude kmers with less than ci counts")
p.add_option("--cs", default=2, type="int",
help="Maximal value of a counter")
p.add_option("--cx", default=None, type="int",
help="Exclude kmers with more than cx counts")
p.add_option("--single", default=False, action="store_true",
help="Input is single-end data, only one FASTQ/FASTA")
p.add_option("--fasta", default=False, action="store_true",
help="Input is FASTA instead of FASTQ")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
folder, = args
K = opts.k
n = 1 if opts.single else 2
pattern = "*.fa,*.fa.gz,*.fasta,*.fasta.gz" if opts.fasta else \
"*.fq,*.fq.gz,*.fastq,*.fastq.gz"
mm = MakeManager()
for p, pf in iter_project(folder, pattern=pattern,
n=n, commonprefix=False):
pf = pf.split("_")[0] + ".ms{}".format(K)
infiles = pf + ".infiles"
fw = open(infiles, "w")
print("\n".join(p), file=fw)
fw.close()
cmd = "kmc -k{} -m64 -t{}".format(K, opts.cpus)
cmd += " -ci{} -cs{}".format(opts.ci, opts.cs)
if opts.cx:
cmd += " -cx{}".format(opts.cx)
if opts.fasta:
cmd += " -fm"
cmd += " @{} {} .".format(infiles, pf)
outfile = pf + ".kmc_suf"
mm.add(p, outfile, cmd)
mm.write()
|
def kmc(args)
|
%prog kmc folder
Run kmc3 on Illumina reads.
| 3.091515 | 3.034688 | 1.018726 |
p = OptionParser(meryl.__doc__)
p.add_option("-k", default=19, type="int", help="Kmer size")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
folder, = args
K = opts.k
cpus = opts.cpus
mm = MakeManager()
for p, pf in iter_project(folder):
cmds = []
mss = []
for i, ip in enumerate(p):
ms = "{}{}.ms{}".format(pf, i + 1, K)
mss.append(ms)
cmd = "meryl -B -C -m {} -threads {}".format(K, cpus)
cmd += " -s {} -o {}".format(ip, ms)
cmds.append(cmd)
ams, bms = mss
pms = "{}.ms{}".format(pf, K)
cmd = "meryl -M add -s {} -s {} -o {}".format(ams, bms, pms)
cmds.append(cmd)
cmd = "rm -f {}.mcdat {}.mcidx {}.mcdat {}.mcidx".\
format(ams, ams, bms, bms)
cmds.append(cmd)
mm.add(p, pms + ".mcdat", cmds)
mm.write()
|
def meryl(args)
|
%prog meryl folder
Run meryl on Illumina reads.
| 3.420545 | 3.272519 | 1.045233 |
from scipy.stats import binom, poisson
p = OptionParser(model.__doc__)
p.add_option("-k", default=23, type="int", help="Kmer size")
p.add_option("--cov", default=50, type="int", help="Expected coverage")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
erate, = args
erate = float(erate)
cov = opts.cov
k = opts.k
xy = []
# Range include c although it is unclear what it means to have c=0
for c in xrange(0, cov * 2 + 1):
Prob_Yk = 0
for i in xrange(k + 1):
# Probability of having exactly i errors
pi_i = binom.pmf(i, k, erate)
# Expected coverage of kmer with exactly i errors
mu_i = cov * (erate / 3) ** i * (1 - erate) ** (k - i)
# Probability of seeing coverage of c
Prob_Yk_i = poisson.pmf(c, mu_i)
# Sum i over 0, 1, ... up to k errors
Prob_Yk += pi_i * Prob_Yk_i
xy.append((c, Prob_Yk))
x, y = zip(*xy)
asciiplot(x, y, title="Model")
|
def model(args)
|
%prog model erate
Model kmer distribution given error rate. See derivation in FIONA paper:
<http://bioinformatics.oxfordjournals.org/content/30/17/i356.full>
| 4.303513 | 3.815313 | 1.127958 |
from math import log
from jcvi.formats.base import DictFile
p = OptionParser(logodds.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
cnt1, cnt2 = args
d = DictFile(cnt2)
fp = open(cnt1)
for row in fp:
scf, c1 = row.split()
c2 = d[scf]
c1, c2 = float(c1), float(c2)
c1 += 1
c2 += 1
score = int(100 * (log(c1) - log(c2)))
print("{0}\t{1}".format(scf, score))
|
def logodds(args)
|
%prog logodds cnt1 cnt2
Compute log likelihood between two db.
| 2.519497 | 2.364323 | 1.065631 |
from bitarray import bitarray
p = OptionParser(count.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, jfdb = args
K = get_K(jfdb)
cmd = "jellyfish query {0} -C | cut -d' ' -f 2".format(jfdb)
t = must_open("tmp", "w")
proc = Popen(cmd, stdin=PIPE, stdout=t)
t.flush()
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print("\n".join(kmers), file=proc.stdin)
proc.stdin.close()
logging.debug(cmd)
proc.wait()
a = bitarray()
binfile = ".".join((fastafile, jfdb, "bin"))
fw = open(binfile, "w")
t.seek(0)
for row in t:
c = row.strip()
a.append(int(c))
a.tofile(fw)
logging.debug("Serialize {0} bits to `{1}`.".format(len(a), binfile))
fw.close()
sh("rm {0}".format(t.name))
logging.debug("Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.".\
format(K, fastafile, jfdb, binfile))
cntfile = ".".join((fastafile, jfdb, "cnt"))
bincount([fastafile, binfile, "-o", cntfile, "-K {0}".format(K)])
logging.debug("Shared K-mer counts written to `{0}`.".format(cntfile))
|
def count(args)
|
%prog count fastafile jf.db
Run dump - jellyfish - bin - bincount in serial.
| 3.419256 | 3.082718 | 1.109169 |
from bitarray import bitarray
from jcvi.formats.sizes import Sizes
p = OptionParser(bincount.__doc__)
p.add_option("-K", default=23, type="int",
help="K-mer size [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, binfile = args
K = opts.K
fp = open(binfile)
a = bitarray()
a.fromfile(fp)
f = Sizes(fastafile)
tsize = 0
fw = must_open(opts.outfile, "w")
for name, seqlen in f.iter_sizes():
ksize = seqlen - K + 1
b = a[tsize: tsize + ksize]
bcount = b.count()
print("\t".join(str(x) for x in (name, bcount)), file=fw)
tsize += ksize
|
def bincount(args)
|
%prog bincount fastafile binfile
Count K-mers in the bin.
| 2.581566 | 2.278369 | 1.133076 |
from bitarray import bitarray
p = OptionParser(bin.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
inp, outp = args
fp = must_open(inp)
fw = must_open(outp, "w")
a = bitarray()
for row in fp:
c = row.split()[-1]
a.append(int(c))
a.tofile(fw)
fw.close()
|
def bin(args)
|
%prog bin filename filename.bin
Serialize counts to bitarrays.
| 2.768208 | 2.62714 | 1.053697 |
p = OptionParser(dump.__doc__)
p.add_option("-K", default=23, type="int",
help="K-mer size [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
K = opts.K
fw = must_open(opts.outfile, "w")
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print("\n".join(kmers), file=fw)
fw.close()
|
def dump(args)
|
%prog dump fastafile
Convert FASTA sequences to list of K-mers.
| 2.375031 | 2.042125 | 1.163019 |
from jcvi.apps.base import getfilesize
from jcvi.utils.cbook import human_size
p = OptionParser(jellyfish.__doc__)
p.add_option("-K", default=23, type="int",
help="K-mer size [default: %default]")
p.add_option("--coverage", default=40, type="int",
help="Expected sequence coverage [default: %default]")
p.add_option("--prefix", default="jf",
help="Database prefix [default: %default]")
p.add_option("--nohist", default=False, action="store_true",
help="Do not print histogram [default: %default]")
p.set_home("jellyfish")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastqfiles = args
K = opts.K
coverage = opts.coverage
totalfilesize = sum(getfilesize(x) for x in fastqfiles)
fq = fastqfiles[0]
pf = opts.prefix
gzip = fq.endswith(".gz")
hashsize = totalfilesize / coverage
logging.debug("Total file size: {0}, hashsize (-s): {1}".\
format(human_size(totalfilesize,
a_kilobyte_is_1024_bytes=True), hashsize))
jfpf = "{0}-K{1}".format(pf, K)
jfdb = jfpf
fastqfiles = " ".join(fastqfiles)
jfcmd = op.join(opts.jellyfish_home, "jellyfish")
cmd = jfcmd
cmd += " count -t {0} -C -o {1}".format(opts.cpus, jfpf)
cmd += " -s {0} -m {1}".format(hashsize, K)
if gzip:
cmd = "gzip -dc {0} | ".format(fastqfiles) + cmd + " /dev/fd/0"
else:
cmd += " " + fastqfiles
if need_update(fastqfiles, jfdb):
sh(cmd)
if opts.nohist:
return
jfhisto = jfpf + ".histogram"
cmd = jfcmd + " histo -t 64 {0} -o {1}".format(jfdb, jfhisto)
if need_update(jfdb, jfhisto):
sh(cmd)
|
def jellyfish(args)
|
%prog jellyfish [*.fastq|*.fasta]
Run jellyfish to dump histogram to be used in kmer.histogram().
| 2.838264 | 2.760062 | 1.028333 |
pf, sf = op.splitext(merylfile)
outfile = pf + ".histogram"
if need_update(merylfile, outfile):
cmd = "meryl -Dh -s {0}".format(pf)
sh(cmd, outfile=outfile)
return outfile
|
def merylhistogram(merylfile)
|
Run meryl to dump histogram to be used in kmer.histogram(). The merylfile
are the files ending in .mcidx or .mcdat.
| 7.375899 | 7.091737 | 1.040069 |
p = OptionParser(multihistogram.__doc__)
p.add_option("--kmin", default=15, type="int",
help="Minimum K-mer size, inclusive")
p.add_option("--kmax", default=30, type="int",
help="Maximum K-mer size, inclusive")
p.add_option("--vmin", default=2, type="int",
help="Minimum value, inclusive")
p.add_option("--vmax", default=100, type="int",
help="Maximum value, inclusive")
opts, args, iopts = p.set_image_options(args, figsize="10x5", dpi=300)
if len(args) < 1:
sys.exit(not p.print_help())
histfiles = args[:-1]
species = args[-1]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
A = fig.add_axes([.08, .12, .38, .76])
B = fig.add_axes([.58, .12, .38, .76])
lines = []
legends = []
genomesizes = []
for histfile in histfiles:
ks = KmerSpectrum(histfile)
x, y = ks.get_xy(opts.vmin, opts.vmax)
K = get_number(op.basename(histfile).split(".")[0].split("-")[-1])
if not opts.kmin <= K <= opts.kmax:
continue
line, = A.plot(x, y, '-', lw=1)
lines.append(line)
legends.append("K = {0}".format(K))
ks.analyze(K=K)
genomesizes.append((K, ks.genomesize / 1e6))
leg = A.legend(lines, legends, shadow=True, fancybox=True)
leg.get_frame().set_alpha(.5)
title = "{0} genome K-mer histogram".format(species)
A.set_title(markup(title))
xlabel, ylabel = "Coverage (X)", "Counts"
A.set_xlabel(xlabel)
A.set_ylabel(ylabel)
set_human_axis(A)
title = "{0} genome size estimate".format(species)
B.set_title(markup(title))
x, y = zip(*genomesizes)
B.plot(x, y, "ko", mfc='w')
t = np.linspace(opts.kmin - .5, opts.kmax + .5, 100)
p = np.poly1d(np.polyfit(x, y, 2))
B.plot(t, p(t), "r:")
xlabel, ylabel = "K-mer size", "Estimated genome size (Mb)"
B.set_xlabel(xlabel)
B.set_ylabel(ylabel)
set_ticklabels_helvetica(B)
labels = ((.04, .96, 'A'), (.54, .96, 'B'))
panel_labels(root, labels)
normalize_axes(root)
imagename = species + ".multiK.pdf"
savefig(imagename, dpi=iopts.dpi, iopts=iopts)
|
def multihistogram(args)
|
%prog multihistogram *.histogram species
Plot the histogram based on a set of K-mer hisotograms. The method is based
on Star et al.'s method (Atlantic Cod genome paper).
| 2.475554 | 2.42689 | 1.020052 |
p = OptionParser(histogram.__doc__)
p.add_option("--vmin", dest="vmin", default=1, type="int",
help="minimum value, inclusive [default: %default]")
p.add_option("--vmax", dest="vmax", default=100, type="int",
help="maximum value, inclusive [default: %default]")
p.add_option("--pdf", default=False, action="store_true",
help="Print PDF instead of ASCII plot [default: %default]")
p.add_option("--coverage", default=0, type="int",
help="Kmer coverage [default: auto]")
p.add_option("--nopeaks", default=False, action="store_true",
help="Do not annotate K-mer peaks")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
histfile, species, N = args
ascii = not opts.pdf
peaks = not opts.nopeaks
N = int(N)
if histfile.rsplit(".", 1)[-1] in ("mcdat", "mcidx"):
logging.debug("CA kmer index found")
histfile = merylhistogram(histfile)
ks = KmerSpectrum(histfile)
ks.analyze(K=N)
Total_Kmers = int(ks.totalKmers)
coverage = opts.coverage
Kmer_coverage = ks.max2 if not coverage else coverage
Genome_size = int(round(Total_Kmers * 1. / Kmer_coverage))
Total_Kmers_msg = "Total {0}-mers: {1}".format(N, thousands(Total_Kmers))
Kmer_coverage_msg = "{0}-mer coverage: {1}".format(N, Kmer_coverage)
Genome_size_msg = "Estimated genome size: {0:.1f}Mb".\
format(Genome_size / 1e6)
Repetitive_msg = ks.repetitive
SNPrate_msg = ks.snprate
for msg in (Total_Kmers_msg, Kmer_coverage_msg, Genome_size_msg):
print(msg, file=sys.stderr)
x, y = ks.get_xy(opts.vmin, opts.vmax)
title = "{0} {1}-mer histogram".format(species, N)
if ascii:
asciiplot(x, y, title=title)
return Genome_size
plt.figure(1, (6, 6))
plt.plot(x, y, 'g-', lw=2, alpha=.5)
ax = plt.gca()
if peaks:
t = (ks.min1, ks.max1, ks.min2, ks.max2, ks.min3)
tcounts = [(x, y) for x, y in ks.counts if x in t]
if tcounts:
x, y = zip(*tcounts)
tcounts = dict(tcounts)
plt.plot(x, y, 'ko', lw=2, mec='k', mfc='w')
ax.text(ks.max1, tcounts[ks.max1], "SNP peak", va="top")
ax.text(ks.max2, tcounts[ks.max2], "Main peak")
messages = [Total_Kmers_msg, Kmer_coverage_msg, Genome_size_msg,
Repetitive_msg, SNPrate_msg]
write_messages(ax, messages)
ymin, ymax = ax.get_ylim()
ymax = ymax * 7 / 6
ax.set_title(markup(title))
ax.set_ylim((ymin, ymax))
xlabel, ylabel = "Coverage (X)", "Counts"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
set_human_axis(ax)
imagename = histfile.split(".")[0] + ".pdf"
savefig(imagename, dpi=100)
return Genome_size
|
def histogram(args)
|
%prog histogram meryl.histogram species K
Plot the histogram based on meryl K-mer distribution, species and N are
only used to annotate the graphic.
| 3.191788 | 3.034657 | 1.051779 |
db = get_abs_path(db)
nin = db + ".nin"
run_formatdb(infile=db, outfile=nin)
cmd = "blastn"
cmd += " -task blastn"
cmd += " -query {0} -db {1} -out {2}".format(infile, db, outfile)
cmd += " -penalty -5 -gapopen 4 -gapextend 4 -dust yes -soft_masking true"
cmd += " -searchsp 1750000000000 -evalue 0.01 -outfmt 6 -num_threads 8"
sh(cmd)
|
def run_vecscreen(infile=None, outfile=None, db="UniVec_Core",
pctid=None, hitlen=None)
|
BLASTN parameters reference:
http://www.ncbi.nlm.nih.gov/VecScreen/VecScreen_docs.html
| 4.233618 | 4.274263 | 0.990491 |
from itertools import product
from jcvi.apps.grid import MakeManager
from jcvi.formats.base import split
p = OptionParser(nucmer.__doc__)
p.add_option("--chunks", type="int",
help="Split both query and subject into chunks")
p.set_params(prog="nucmer", params="-l 100 -c 500")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ref, query = args
cpus = opts.cpus
nrefs = nqueries = opts.chunks or int(cpus ** .5)
refdir = ref.split(".")[0] + "-outdir"
querydir = query.split(".")[0] + "-outdir"
reflist = split([ref, refdir, str(nrefs)]).names
querylist = split([query, querydir, str(nqueries)]).names
mm = MakeManager()
for i, (r, q) in enumerate(product(reflist, querylist)):
pf = "{0:04d}".format(i)
cmd = "nucmer -maxmatch"
cmd += " {0}".format(opts.extra)
cmd += " {0} {1} -p {2}".format(r, q, pf)
deltafile = pf + ".delta"
mm.add((r, q), deltafile, cmd)
print(cmd)
mm.write()
|
def nucmer(args)
|
%prog nucmer ref.fasta query.fasta
Run NUCMER using query against reference. Parallel implementation derived
from: <https://github.com/fritzsedlazeck/sge_mummer>
| 3.415491 | 3.422866 | 0.997846 |
from jcvi.apps.grid import MakeManager
from jcvi.utils.iter import grouper
p = OptionParser(blasr.__doc__)
p.set_cpus(cpus=8)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
reffasta, fofn = args
flist = sorted([x.strip() for x in open(fofn)])
h5list = []
mm = MakeManager()
for i, fl in enumerate(grouper(flist, 3)):
chunkname = "chunk{0:03d}".format(i)
fn = chunkname + ".fofn"
h5 = chunkname + ".cmp.h5"
fw = open(fn, "w")
print("\n".join(fl), file=fw)
fw.close()
cmd = "pbalign {0} {1} {2}".format(fn, reffasta, h5)
cmd += " --nproc {0} --forQuiver --tmpDir .".format(opts.cpus)
mm.add((fn, reffasta), h5, cmd)
h5list.append(h5)
# Merge h5, sort and repack
allh5 = "all.cmp.h5"
tmph5 = "tmp.cmp.h5"
cmd_merge = "cmph5tools.py merge --outFile {0}".format(allh5)
cmd_merge += " " + " ".join(h5list)
cmd_sort = "cmph5tools.py sort --deep {0} --tmpDir .".format(allh5)
cmd_repack = "h5repack -f GZIP=1 {0} {1}".format(allh5, tmph5)
cmd_repack += " && mv {0} {1}".format(tmph5, allh5)
mm.add(h5list, allh5, [cmd_merge, cmd_sort, cmd_repack])
# Quiver
pf = reffasta.rsplit(".", 1)[0]
variantsgff = pf + ".variants.gff"
consensusfasta = pf + ".consensus.fasta"
cmd_faidx = "samtools faidx {0}".format(reffasta)
cmd = "quiver -j 32 {0}".format(allh5)
cmd += " -r {0} -o {1} -o {2}".format(reffasta, variantsgff, consensusfasta)
mm.add(allh5, consensusfasta, [cmd_faidx, cmd])
mm.write()
|
def blasr(args)
|
%prog blasr ref.fasta fofn
Run blasr on a set of PacBio reads. This is based on a divide-and-conquer
strategy described below.
| 2.90783 | 2.873731 | 1.011866 |
p = OptionParser(blat.__doc__)
p.set_align(pctid=95, hitlen=30)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
reffasta, queryfasta = args
blastfile = get_outfile(reffasta, queryfasta, suffix="blat")
run_blat(infile=queryfasta, outfile=blastfile, db=reffasta,
pctid=opts.pctid, hitlen=opts.hitlen, cpus=opts.cpus,
overwrite=False)
return blastfile
|
def blat(args)
|
%prog blat ref.fasta query.fasta
Calls blat and filters BLAST hits.
| 2.884622 | 2.706221 | 1.065923 |
task_choices = ("blastn", "blastn-short", "dc-megablast", \
"megablast", "vecscreen")
p = OptionParser(blast.__doc__)
p.set_align(pctid=0, evalue=.01)
p.add_option("--wordsize", type="int", help="Word size [default: %default]")
p.add_option("--best", default=1, type="int",
help="Only look for best N hits [default: %default]")
p.add_option("--task", default="megablast", choices=task_choices,
help="Task of the blastn [default: %default]")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
reffasta, queryfasta = args
blastfile = get_outfile(reffasta, queryfasta)
run_megablast(infile=queryfasta, outfile=blastfile, db=reffasta,
wordsize=opts.wordsize, pctid=opts.pctid, evalue=opts.evalue,
hitlen=None, best=opts.best, task=opts.task, cpus=opts.cpus)
return blastfile
|
def blast(args)
|
%prog blast ref.fasta query.fasta
Calls blast and then filter the BLAST hits. Default is megablast.
| 3.146474 | 2.989973 | 1.052342 |
from jcvi.apps.grid import MakeManager
p = OptionParser(lastgenome.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gA, gB = args
mm = MakeManager()
bb = lambda x : op.basename(x).rsplit(".", 1)[0]
gA_pf, gB_pf = bb(gA), bb(gB)
# Build LASTDB
dbname = "-".join((gA_pf, "NEAR"))
dbfile = dbname + ".suf"
build_db_cmd = "lastdb -P0 -uNEAR -R01 {} {}".format(dbfile, gA)
mm.add(gA, dbfile, build_db_cmd)
# Run LASTAL
maffile = "{}.{}.1-1.maf".format(gA_pf, gB_pf)
lastal_cmd = "lastal -E0.05 -C2 {} {}".format(dbname, gB)
lastal_cmd += " | last-split -m1"
lastal_cmd += " | maf-swap"
lastal_cmd += " | last-split -m1 -fMAF > {}".format(maffile)
mm.add([dbfile, gB], maffile, lastal_cmd)
# Convert to BLAST format
blastfile = maffile.replace(".maf", ".blast")
convert_cmd = "maf-convert -n blasttab {} > {}".format(maffile, blastfile)
mm.add(maffile, blastfile, convert_cmd)
mm.write()
|
def lastgenome(args)
|
%prog genome_A.fasta genome_B.fasta
Run LAST by calling LASTDB, LASTAL and LAST-SPLIT. The recipe is based on
tutorial here:
<https://github.com/mcfrith/last-genome-alignments>
The script runs the following steps:
$ lastdb -P0 -uNEAR -R01 Chr10A-NEAR Chr10A.fa
$ lastal -E0.05 -C2 Chr10A-NEAR Chr10B.fa | last-split -m1 | maf-swap | last-split -m1 -fMAF > Chr10A.Chr10B.1-1.maf
$ maf-convert -n blasttab Chr10A.Chr10B.1-1.maf > Chr10A.Chr10B.1-1.blast
Works with LAST v959.
| 3.931587 | 2.280395 | 1.724082 |
p = OptionParser(last.__doc__)
p.add_option("--dbtype", default="nucl",
choices=("nucl", "prot"),
help="Molecule type of subject database")
p.add_option("--path", help="Specify LAST path")
p.add_option("--mask", default=False, action="store_true", help="Invoke -c in lastdb")
p.add_option("--format", default="BlastTab",
choices=("TAB", "MAF", "BlastTab", "BlastTab+"),
help="Output format")
p.add_option("--minlen", default=0, type="int",
help="Filter alignments by how many bases match")
p.add_option("--minid", default=0, type="int", help="Minimum sequence identity")
p.set_cpus()
p.set_params()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
subject, query = args
path = opts.path
cpus = opts.cpus
if not dbtype:
dbtype = opts.dbtype
getpath = lambda x: op.join(path, x) if path else x
lastdb_bin = getpath("lastdb")
lastal_bin = getpath("lastal")
subjectdb = subject.rsplit(".", 1)[0]
run_lastdb(infile=subject, outfile=subjectdb + ".prj", mask=opts.mask, \
lastdb_bin=lastdb_bin, dbtype=dbtype)
u = 2 if opts.mask else 0
cmd = "{0} -u {1}".format(lastal_bin, u)
cmd += " -P {0} -i3G".format(cpus)
cmd += " -f {0}".format(opts.format)
cmd += " {0} {1}".format(subjectdb, query)
minlen = opts.minlen
minid = opts.minid
extra = opts.extra
assert minid != 100, "Perfect match not yet supported"
mm = minid / (100 - minid)
if minlen:
extra += " -e{0}".format(minlen)
if minid:
extra += " -r1 -q{0} -a{0} -b{0}".format(mm)
if extra:
cmd += " " + extra.strip()
lastfile = get_outfile(subject, query, suffix="last")
sh(cmd, outfile=lastfile)
|
def last(args, dbtype=None)
|
%prog database.fasta query.fasta
Run LAST by calling LASTDB and LASTAL. LAST program available:
<http://last.cbrc.jp>
Works with LAST-719.
| 3.593852 | 3.313951 | 1.084461 |
p = OptionParser(extract.__doc__)
p.add_option("--format", default=False, action="store_true",
help="enable flag to reformat header into a symbol separated list of constituent reads "+ \
"[default: %default]")
p.add_option("--singlets", default=False, action="store_true",
help="ask the program to look in the singlets file (should be in the same folder) for " +\
"unused reads and put them in the resultant fasta file [default: %default]")
p.set_sep(sep="|", help="Separator used to list the reads in the FASTA header")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
acefile, = args
ace = Ace.read(must_open(acefile))
logging.debug('Loaded ace file {0}'.format(acefile))
fastafile = acefile.rsplit(".", 1)[0] + ".fasta"
fw = open(fastafile, "w")
for c in ace.contigs:
id = c.name
if opts.format:
id = opts.sep.join([read.name for read in c.af])
seqrec = SeqRecord(Seq(c.sequence), id=id, description="")
SeqIO.write([seqrec], fw, "fasta")
if opts.singlets:
singletsfile = acefile.rsplit(".", 1)[0] + ".singlets"
if os.path.getsize(singletsfile) > 0:
fp = SeqIO.parse(must_open(singletsfile), "fasta")
for rec in fp:
SeqIO.write(rec, fw, "fasta")
fw.close()
logging.debug('Wrote contigs to fasta file {0}'.format(fastafile))
|
def extract(args)
|
%prog extract [--options] ace_file
Extract contigs from ace file and if necessary reformat header with
a pipe(|) separated list of constituent reads.
| 3.594482 | 3.140971 | 1.144386 |
from jcvi.utils.table import tabulate
p = OptionParser(report.__doc__)
types = {"read": ["padded_start", "padded_end", "orient"],
"consensus": ["padded_consensus_start", "padded_consensus_end"],
"quality" : ["qual_clipping_start", "qual_clipping_end", "align_clipping_start", "align_clipping_end"]
}
valid_types = tuple(types.keys())
p.add_option("--type", default="read", choices=valid_types,
help="choose report type [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
acefile, = args
ace = Ace.read(must_open(acefile))
logging.debug('Loaded ace file {0}'.format(acefile))
for c in ace.contigs:
print(c.name)
table = dict()
if opts.type == "read":
ps, pe = [], []
ps = [read.padded_start for read in c.af]
for i in xrange(1, len(ps)):
pe.append(ps[i] - ps[i-1])
pe.append(c.nbases)
map = dict(zip(ps, pe))
for i, read in enumerate(c.af):
values = [str(x) for x in (read.padded_start, map[read.padded_start], read.coru)]
for i, label in enumerate(types[opts.type]):
table[(str(read.name), label)] = values[i]
elif opts.type == "consensus":
for read in c.bs:
values = [str(x) for x in (read.padded_start, read.padded_end)]
for i, label in enumerate(types[opts.type]):
table[(str(read.name), label)] = values[i]
elif opts.type == "quality":
for read in c.reads:
(r1, r2) = (read.rd, read.qa)
values = [str(x) for x in (r2.qual_clipping_start, r2.qual_clipping_end, r2.align_clipping_start, r2.align_clipping_end)]
for i, label in enumerate(types[opts.type]):
table[(str(r1.name), label)] = values[i]
print(tabulate(table), "\n")
|
def report(args)
|
%prog report [--options] ace_file > report
Prepare a report of read location, consensus location or quality segment per contig
| 2.728382 | 2.595306 | 1.051276 |
p = OptionParser(correct.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
lstfile = "reads2cor.lst"
fw = open(lstfile, "w")
print("\n".join(x for x in args if x[:2] == "PE"), file=fw)
fw.close()
p1 = args[0]
offset = guessoffset([p1])
cpus = opts.cpus
freq = "output.freq.cz"
freqlen = freq + ".len"
if need_update(args, (freq, freqlen)):
cmd = "KmerFreq_AR_v2.0 -k 17 -c -1 -q {0}".format(offset)
cmd += " -m 1 -t {0}".format(cpus)
cmd += " -p output {0}".format(lstfile)
sh(cmd)
fw = open(lstfile, "w")
print("\n".join(args), file=fw)
fw.close()
cmd = "Corrector_AR_v2.0 -k 17 -l 3 -m 5 -c 5 -a 0 -e 1 -w 0 -r 45"
cmd += " -Q {0} -q 30 -x 8 -t {1} -o 1 ".format(offset, cpus)
cmd += " {0} {1} {2}".format(freq, freqlen, lstfile)
sh(cmd)
|
def correct(args)
|
%prog correct *.fastq
Correct reads using ErrorCorrection. Only PE will be used to build the K-mer
table.
| 4.266564 | 4.066124 | 1.049295 |
p = OptionParser(clean.__doc__)
p.add_option("-a", default=0, type="int",
help="Trim length at 5' end [default: %default]")
p.add_option("-b", default=50, type="int",
help="Trim length at 3' end [default: %default]")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) == 2:
p1, p2 = args
size = get_size(p1)
elif len(args) == 3:
p1, p2, size = args
size = int(size)
else:
sys.exit(not p.print_help())
pf = p1.split(".")[0]
cpus = opts.cpus
offset = guessoffset([p1])
a, b = opts.a, opts.b
p1_clean = p1 + ".clean"
p1_cleangz = p1_clean + ".gz"
p2_clean = p2 + ".clean"
p2_cleangz = p2_clean + ".gz"
if need_update([p1, p2], [p1_cleangz, p2_cleangz]):
cmd = "SOAPfilter_v2.0 -t {0} -m 2000000 -p -y -z -g".format(cpus)
cmd += " -q {0} -w 10 -B 50 -f 0".format(offset)
cmd += " -l {0} -a {1} -b {2} -c {1} -d {2}".format(size, a, b, a, b)
cmd += " {0} {1} {2}.clean.stat {3} {4}".\
format(p1, p2, pf, p1_clean, p2_clean)
sh(cmd)
|
def clean(args)
|
%prog clean 1.fastq 2.fastq [insertsize]
Clean and dedup paired FASTQ files.
| 2.889029 | 2.766288 | 1.04437 |
from jcvi.utils.cbook import SummaryStats, percentage, thousands
p = OptionParser(fillstats.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fillfile, = args
fp = open(fillfile)
scaffolds = 0
gaps = []
for row in fp:
if row[0] == ">":
scaffolds += 1
continue
fl = FillLine(row)
gaps.append(fl)
print("{0} scaffolds in total".format(scaffolds), file=sys.stderr)
closed = [x for x in gaps if x.closed]
closedbp = sum(x.before for x in closed)
notClosed = [x for x in gaps if not x.closed]
notClosedbp = sum(x.before for x in notClosed)
totalgaps = len(closed) + len(notClosed)
print("Closed gaps: {0} size: {1} bp".\
format(percentage(len(closed), totalgaps), thousands(closedbp)), file=sys.stderr)
ss = SummaryStats([x.after for x in closed])
print(ss, file=sys.stderr)
ss = SummaryStats([x.delta for x in closed])
print("Delta:", ss, file=sys.stderr)
print("Remaining gaps: {0} size: {1} bp".\
format(percentage(len(notClosed), totalgaps), thousands(notClosedbp)), file=sys.stderr)
ss = SummaryStats([x.after for x in notClosed])
print(ss, file=sys.stderr)
|
def fillstats(args)
|
%prog fillstats genome.fill
Build stats on .fill file from GapCloser.
| 2.572788 | 2.539694 | 1.013031 |
from jcvi.formats.base import write_file
p = OptionParser(prepare.__doc__ + FastqNamings)
p.add_option("-K", default=45, type="int",
help="K-mer size [default: %default]")
p.add_option("--assemble_1st_rank_only", default=False, action="store_true",
help="Assemble the first rank only, other libs asm_flags=2 [default: %default]")
p.add_option("--scaffold",
help="Only perform scaffolding [default: %default]")
p.add_option("--gapclose",
help="Only perform gap closure [default: %default]")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fnames = args
K = opts.K
for x in fnames:
assert op.exists(x), "File `{0}` not found.".format(x)
a1st = opts.assemble_1st_rank_only
cfgfile = "soap.config"
gc_cfgfile = "soap.gc.config"
fw = open(cfgfile, "w")
fw_gc = open(gc_cfgfile, "w")
libs = get_libs(fnames)
rank = 0
singletons = []
max_rd_len = max(readlen([f]) for f in fnames)
block = "max_rd_len={0}\n".format(max_rd_len)
for stream in (sys.stderr, fw, fw_gc):
print(block, file=stream)
# Collect singletons first
singletons = []
for lib, fs in libs:
if lib.size == 0:
singletons += fs
continue
for lib, fs in libs:
size = lib.size
if size == 0:
continue
rank += 1
block = "[LIB]\n"
block += "avg_ins={0}\n".format(size)
f = fs[0]
block += "reverse_seq={0}\n".format(lib.reverse_seq)
asm_flags = 2 if (rank > 1 and a1st) else lib.asm_flags
block += "asm_flags={0}\n".format(asm_flags)
block += "rank={0}\n".format(rank)
if lib.reverse_seq:
pair_num_cutoff = 3
block += "pair_num_cutoff={0}\n".format(pair_num_cutoff)
block += "map_len=35\n"
for f in fs:
if ".1." in f:
tag = "q1"
elif ".2." in f:
tag = "q2"
block += "{0}={1}\n".format(tag, f)
if rank == 1:
for s in singletons:
tag = "q" if is_fastq(s) else "f"
block += tag + "={0}\n".format(s)
print(block, file=sys.stderr)
print(block, file=fw)
if asm_flags > 2:
print(block, file=fw_gc)
runfile = "run.sh"
scaffold = opts.scaffold
bb = 63 if K <= 63 else 127
binary = "SOAPdenovo-{0}mer".format(bb)
header = SOAPHEADER.format(opts.cpus, K, binary)
if opts.gapclose:
gapclose = opts.gapclose
outfile = gapclose.rsplit(".", 1)[0] + ".closed.fasta"
template = header + GCRUNG.format(gapclose, outfile)
else:
template = header + (SCFRUN % scaffold if scaffold else SOAPRUN)
write_file(runfile, template)
fw.close()
fw_gc.close()
|
def prepare(args)
|
%prog prepare *.fastq
Scan input fastq files (see below) and write SOAP config files based
on inputfiles. Use "--scaffold contigs.fasta" to perform scaffolding.
| 3.758321 | 3.601738 | 1.043474 |
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
'''
for elem, count in self.iteritems():
for _ in repeat(None, count):
yield elem
|
def elements(self)
|
Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
| 4.717317 | 1.980368 | 2.38204 |
p = OptionParser(bed.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pslfile, = args
fw = must_open(opts.outfile, "w")
psl = Psl(pslfile)
for p in psl:
print(p.bed12line, file=fw)
|
def bed(args)
|
%prog bed pslfile
Convert to bed format.
| 2.811676 | 2.500688 | 1.124361 |
p = OptionParser(gff.__doc__)
p.add_option("--source", default="GMAP",
help="specify GFF source [default: %default]")
p.add_option("--type", default="EST_match",
help="specify GFF feature type [default: %default]")
p.add_option("--suffix", default=".match",
help="match ID suffix [default: \"%default\"]")
p.add_option("--swap", default=False, action="store_true",
help="swap query and target features [default: %default]")
p.add_option("--simple_score", default=False, action="store_true",
help="calculate a simple percent score [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pslfile, = args
fw = must_open(opts.outfile, "w")
print("##gff-version 3", file=fw)
psl = Psl(pslfile)
for p in psl:
if opts.swap:
p.swap
psl.trackMatches(p.qName)
# switch from 0-origin to 1-origin
p.qStart += 1
p.tStart += 1
print(p.gffline(source=opts.source, type=opts.type, suffix=opts.suffix, \
primary_tag="ID", alt_score=opts.simple_score, \
count=psl.getMatchCount(p.qName)), file=fw)
# create an empty PslLine() object and load only
# the targetName, queryName and strand info
part = PslLine("\t".join(str(x) for x in [0] * p.nargs))
part.tName, part.qName, part.strand = p.tName, p.qName, p.strand
nparts = len(p.qStarts)
for n in xrange(nparts):
part.qStart, part.tStart, aLen = p.qStarts[n] + 1, p.tStarts[n] + 1, p.blockSizes[n]
part.qEnd = part.qStart + aLen - 1
part.tEnd = part.tStart + aLen - 1
if part.strand == "-":
part.qStart = p.qSize - (p.qStarts[n] + p.blockSizes[n]) + 1
part.qEnd = p.qSize - p.qStarts[n]
print(part.gffline(source=opts.source, suffix=opts.suffix, \
count=psl.getMatchCount(part.qName)), file=fw)
|
def gff(args)
|
%prog gff pslfile
Convert to gff format.
| 3.00756 | 2.978796 | 1.009656 |
last = self.blockCount - 1
return ((self.tEnd == self.tStarts[last] + 3 * self.blockSizes[last]) \
and self.strand == "+") or \
((self.tStart == self.tSize - (self.tStarts[last] + 3 * self.blockSizes[last])\
and self.strand == "-"))
|
def _isProtein(self)
|
check if blockSizes and scores are in the protein space or not
| 4.637963 | 4.024746 | 1.152362 |
sizeMult = self._sizeMult
qAlnSize, tAlnSize = self.qspan * sizeMult, self.tspan
alnSize = min(qAlnSize, tAlnSize)
if alnSize <= 0:
return 0
sizeDiff = qAlnSize - tAlnSize
if sizeDiff < 0:
sizeDiff = 0 if ismRNA else -sizeDiff
insertFactor = self.qNumInsert
if not ismRNA:
insertFactor += self.tNumInsert
total = (self.matches + self.repMatches + self.misMatches) * sizeMult
return (1000 * (self.misMatches * sizeMult + insertFactor + \
round(3 * math.log(1 + sizeDiff)))) / total if total != 0 else 0
|
def _milliBad(self, ismRNA=False)
|
calculate badness in parts per thousand
i.e. number of non-identical matches
| 4.853752 | 4.695283 | 1.033751 |
prefix = glob(dir + "*.gkpStore")[0]
prefix = op.basename(prefix).rsplit(".", 1)[0]
return prefix
|
def get_prefix(dir="../")
|
Look for prefix.gkpStore in the upper directory.
| 10.761915 | 4.4382 | 2.424838 |
from jcvi.formats.base import read_block
p = OptionParser(cnsfix.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
cnsfixout, = args
fp = open(cnsfixout)
utgs = []
saves = []
for header, contents in read_block(fp, "Evaluating"):
contents = list(contents)
utg = header.split()[2]
utgs.append(utg)
# Look for this line:
# save fragment idx=388 ident=206054426 for next pass
for c in contents:
if not c.startswith("save"):
continue
ident = c.split()[3].split("=")[-1]
saves.append(ident)
print("\n".join(saves))
|
def cnsfix(args)
|
%prog cnsfix consensus-fix.out.FAILED > blacklist.ids
Parse consensus-fix.out to extract layouts for fixed unitigs. This will
mark all the failed fragments detected by utgcnsfix and pop them out of the
existing unitigs.
| 4.900616 | 4.295329 | 1.140918 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.