code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if any(st.startswith(x) for x in exclude): sep = None st = st.split('|')[0] if sep and sep in st: name, suffix = st.rsplit(sep, 1) else: name, suffix = st, "" # We only want to remove suffix that are isoforms, longer suffix would # suggest that it is part of the right gene name if len(suffix) != 1: name = st return name
def gene_name(st, exclude=("ev",), sep=".")
Helper functions in the BLAST filtering to get rid alternative splicings. This is ugly, but different annotation groups are inconsistent with respect to how the alternative splicings are named. Mostly it can be done by removing the suffix, except for ones in the exclude list.
6.28528
5.770352
1.089237
seqid = seqid.split(';')[0] if "mito" in seqid or "chloro" in seqid: return (seqid, "", "") numbers = re.findall(r'\d+\.*\d*', seqid) if not numbers: return (seqid, "", "") id = numbers[-1] lastnumi = seqid.rfind(id) suffixi = lastnumi + len(id) suffix = seqid[suffixi:] if sep is None: sep = [""] elif type(sep) == str: sep = [sep] prefix = seqid[: lastnumi] if not stdpf: sep = "|".join(sep) atoms = re.split(sep, prefix) if len(atoms) == 1: prefix = atoms[0] else: prefix = atoms[-2] prefix = prefix.replace("Chromosome", "Chr") else: # use standard prefix if re.findall("chr", prefix, re.I): prefix = "Chr" if re.findall("lg", prefix, re.I): prefix = "LG" elif re.findall("sca", prefix, re.I): prefix = "Sca" elif re.findall("supercontig", prefix, re.I): prefix = "SCg" elif re.findall("ctg|contig", prefix, re.I): prefix = "Ctg" elif re.findall("BAC", prefix, re.I): prefix = "BAC" else: prefix = "C" return prefix, id, suffix
def seqid_parse(seqid, sep=["-"], stdpf=True)
This function tries to parse seqid (1st col in bed files) return prefix, numeric id, and suffix, for example: >>> seqid_parse('chr1_random') ('Chr', '1', '_random') >>> seqid_parse('AmTr_v1.0_scaffold00001', '', stdpf=False) ('AmTr_v1.0_scaffold', '00001', '') >>> seqid_parse('AmTr_v1.0_scaffold00001') ('Sca', '00001', '') >>> seqid_parse('PDK_30s1055861') ('C', '1055861', '') >>> seqid_parse('PDK_30s1055861', stdpf=False) ('PDK', '1055861', '') >>> seqid_parse("AC235758.1", stdpf=False) ('AC', '235758.1', '')
2.869544
2.823437
1.01633
import re mtr_pat1 = re.compile(r"Mt[0-9]+\.[0-9]+[\.[0-9]+]{0,}_([a-z]+[0-9]+)") mtr_pat2 = re.compile(r"([A-z0-9]+)_[A-z]+_[A-z]+") zmays_pat = re.compile( r"[a-z]+:[A-z0-9]+:([A-z0-9]+):[0-9]+:[0-9]+:[0-9]+") zmays_sub = {'mitochondrion': 'Mt', 'chloroplast': 'Pt'} if orgn == "medicago": for mtr_pat in (mtr_pat1, mtr_pat2): match = re.search(mtr_pat, name) if match: n = match.group(1) n = n.replace("0", "") name = re.sub(mtr_pat, n, name) elif orgn == "maize": match = re.search(zmays_pat, name) if match: n = match.group(1) name = re.sub(zmays_pat, n, name) if name in zmays_sub: name = zmays_sub[name] return name
def fixChromName(name, orgn="medicago")
Convert quirky chromosome names encountered in different release files, which are very project specific, into a more general format. For example, in Medicago Convert a seqid like `Mt3.5.1_Chr1` to `chr1` `Mt3.5_Chr3` to `chr3` `chr01_pseudomolecule_IMGAG` to `chr1` Some examples from Maize Convert a seqid like `chromosome:AGPv2:2:1:237068873:1` to `2` Special cases `chromosome:AGPv2:mitochondrion:1:569630:1` to `Mt` `chromosome:AGPv2:chloroplast:1:140384:1` to `Pt`
2.75806
2.548409
1.082267
texts = [] for i in xrange(0, len(text), width): t = delimiter.join(text[i:i + width]) texts.append(t) return "\n".join(texts)
def fill(text, delimiter="", width=70)
Wrap text with width per line
2.581239
2.557261
1.009376
from jcvi.utils.iter import grouper max_len = max(len(x) for x in lt) + gap items_per_line = max(width // max_len, 1) lt = [x.rjust(max_len) for x in lt] g = list(grouper(lt, items_per_line, fillvalue="")) return "\n".join("".join(x) for x in g)
def tile(lt, width=70, gap=1)
Pretty print list of items.
2.761941
2.605944
1.059862
seen = set() nL = [] for a in L: if a in seen: continue nL.append(a) seen.add(a) return nL
def uniqify(L)
Uniqify a list, maintains order (the first occurrence will be kept).
2.412162
2.566266
0.93995
if xlim: ax.set_xlim(0, xlim) if ylim: ax.set_ylim(0, ylim) if xlabel: xticklabels = [int(round(x * xfactor)) for x in ax.get_xticks()] ax.set_xticklabels(xticklabels, family='Helvetica') ax.set_xlabel(xlabel) else: ax.set_xticks([]) if ylabel: yticklabels = [int(round(x * yfactor)) for x in ax.get_yticks()] ax.set_yticklabels(yticklabels, family='Helvetica') ax.set_ylabel(ylabel) else: ax.set_yticks([])
def normalize_lms_axis(ax, xlim=None, ylim=None, xfactor=1e-6, yfactor=1, xlabel=None, ylabel="Map (cM)")
Normalize the axis limits and labels to beautify axis.
1.56769
1.55866
1.005794
from jcvi.graphics.base import plt, savefig, normalize_axes, panel_labels, set2 p = OptionParser(estimategaps.__doc__) opts, args, iopts = p.set_image_options(args, figsize="6x6", dpi=300) if len(args) != 3: sys.exit(not p.print_help()) pf, seqid, mlg = args bedfile = pf + ".lifted.bed" agpfile = pf + ".agp" function = lambda x: x.cm cc = Map(bedfile, scaffold_info=True, function=function) agp = AGP(agpfile) g = GapEstimator(cc, agp, seqid, mlg, function=function) pp, chrsize, mlgsize = g.pp, g.chrsize, g.mlgsize spl, spld = g.spl, g.spld g.compute_all_gaps(verbose=False) fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) # Panel A xstart, ystart = .15, .55 w, h = .7, .4 t = np.linspace(0, chrsize, 1000) ax = fig.add_axes([xstart, ystart, w, h]) mx, my = zip(*g.scatter_data) rho = spearmanr(mx, my) dsg = "g" ax.vlines(pp, 0, mlgsize, colors="beige") ax.plot(mx, my, ".", color=set2[3]) ax.plot(t, spl(t), "-", color=dsg) ax.text(.05, .95, mlg, va="top", transform=ax.transAxes) normalize_lms_axis(ax, xlim=chrsize, ylim=mlgsize, ylabel="Genetic distance (cM)") if rho < 0: ax.invert_yaxis() # Panel B ystart = .1 ax = fig.add_axes([xstart, ystart, w, h]) ax.vlines(pp, 0, mlgsize, colors="beige") ax.plot(t, spld(t), "-", lw=2, color=dsg) ax.plot(pp, spld(pp), "o", mfc="w", mec=dsg, ms=5) normalize_lms_axis(ax, xlim=chrsize, ylim=25 * 1e-6, xfactor=1e-6, xlabel="Physical position (Mb) on {}".format(seqid), yfactor=1000000, ylabel="Recomb. rate\n(cM / Mb)") ax.xaxis.grid(False) labels = ((.05, .95, 'A'), (.05, .5, 'B')) panel_labels(root, labels) normalize_axes(root) pf = "plotratio" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def plotratio(args)
%prog plotratio JM-2 chr23 JMMale-23 Illustrate physical vs map distance ratio, that were used in the gap estimation algorithm.
3.852827
3.777419
1.019963
from math import ceil from random import choice from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord p = OptionParser(fake.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) inputbed, = args bed = Bed(inputbed) recs = [] for seqid, sb in bed.sub_beds(): maxend = max(x.end for x in sb) size = int(ceil(maxend / 1000.) * 1000) seq = "".join([choice("ACGT") for x in xrange(size)]) rec = SeqRecord(Seq(seq), id=seqid, description="") recs.append(rec) fw = must_open(opts.outfile, "w") SeqIO.write(recs, fw, "fasta")
def fake(args)
%prog fake input.bed Make fake `scaffolds.fasta`. Use case for this is that sometimes I would receive just the csv/bed file and I'd like to use path() out of the box.
2.517413
2.37971
1.057866
nmarkers = len(markers) s = [bonus] * nmarkers # score f = [-1] * nmarkers # from for i in xrange(1, nmarkers): for j in xrange(i): mi, mj = markers[i], markers[j] t = bonus if mi.mlg == mj.mlg else penalty + bonus if s[i] < s[j] + t: s[i] = s[j] + t f[i] = j # Recover the highest scoring chain highest_score = max(s) si = s.index(highest_score) onchain = set() while True: if si < 0: break si = f[si] onchain.add(si) return [x for i, x in enumerate(markers) if i in onchain]
def compute_score(markers, bonus, penalty)
Compute chain score using dynamic programming. If a marker is the same linkage group as a previous one, we add bonus; otherwise, we penalize the chain switching.
3.455638
3.198802
1.080291
p = OptionParser(split.__doc__) p.add_option("--chunk", default=4, type="int", help="Split chunks of at least N markers") p.add_option("--splitsingle", default=False, action="store_true", help="Split breakpoint range right in the middle") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) inputbed, = args bonus = 2 nchunk = opts.chunk nbreaks = 0 penalty = -(nchunk * bonus - 1) bed = Bed(inputbed) for seqid, bb in bed.sub_beds(): markers = [Marker(x) for x in bb] markers = compute_score(markers, bonus, penalty) for mi, mj in pairwise(markers): if mi.mlg == mj.mlg: continue assert mi.seqid == mj.seqid start, end = mi.pos, mj.pos if start > end: start, end = end, start if opts.splitsingle: start = end = (start + end) / 2 print("\t".join(str(x) for x in (mi.seqid, start - 1, end))) nbreaks += 1 logging.debug("A total of {} breakpoints inferred (--chunk={})".\ format(nbreaks, nchunk))
def split(args)
%prog split input.bed Split suspicious scaffolds. Suspicious scaffolds are those that contain chunks that map to more than one linkage group. The chunk size can be modified through --chunk option.
4.012805
3.860473
1.03946
p = OptionParser(movie.__doc__) p.add_option("--gapsize", default=100, type="int", help="Insert gaps of size between scaffolds") add_allmaps_plot_options(p) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) inputbed, scaffoldsfasta, seqid = args gapsize = opts.gapsize pf = inputbed.rsplit(".", 1)[0] agpfile = pf + ".chr.agp" tourfile = pf + ".tour" fp = open(tourfile) sizes = Sizes(scaffoldsfasta).mapping ffmpeg = "ffmpeg" mkdir(ffmpeg) score = cur_score = None i = 1 for header, block in read_block(fp, ">"): s, tag, label = header[1:].split() if s != seqid: continue tour = block[0].split() tour = [(x[:-1], x[-1]) for x in tour] if label.startswith("GA"): cur_score = label.split("-")[-1] if cur_score == score: i += 1 continue score = cur_score image_name = ".".join((seqid, "{0:04d}".format(i), label, "pdf")) if need_update(tourfile, image_name): fwagp = must_open(agpfile, "w") order_to_agp(seqid, tour, sizes, fwagp, gapsize=gapsize, gaptype="map") fwagp.close() logging.debug("{0} written to `{1}`".format(header, agpfile)) build([inputbed, scaffoldsfasta, "--cleanup"]) pdf_name = plot([inputbed, seqid, "--title={0}".format(label)]) sh("mv {0} {1}".format(pdf_name, image_name)) if label in ("INIT", "FLIP", "TSP", "FINAL"): for j in xrange(5): # Delay for 5 frames image_delay = image_name.rsplit(".", 1)[0] + \ ".d{0}.pdf".format(j) sh("cp {0} {1}/{2}".format(image_name, ffmpeg, image_delay)) else: sh("cp {0} {1}/".format(image_name, ffmpeg)) i += 1 make_movie(ffmpeg, pf)
def movie(args)
%prog movie input.bed scaffolds.fasta chr1 Visualize history of scaffold OO. The history is contained within the tourfile, generated by path(). For each historical scaffold OO, the program plots a separate PDF file. The plots can be combined to show the progression as a little animation. The third argument limits the plotting to a specific pseudomolecule, for example `chr1`.
4.765559
4.481025
1.063498
os.chdir(workdir) if format != "png": cmd = "parallel convert -density {}".format(dpi) cmd += " {} {.}.png ::: " + "*.{}".format(format) sh(cmd) assert engine in ("ffmpeg", "gifsicle"), \ "Only ffmpeg or gifsicle is currently supported" if engine == "ffmpeg": cmd = "ffmpeg -framerate {} -pattern_type glob -i '*.png' {}.mp4"\ .format(fps, pf) elif engine == "gifsicle": cmd = "convert *.png gif:- |" cmd += " gifsicle --delay {} --loop --optimize=3".format(100 / fps) cmd += " --colors=256 --multifile - > {}.gif".format(pf) sh(cmd)
def make_movie(workdir, pf, dpi=120, fps=1, format="pdf", engine="ffmpeg")
Make the movie using either ffmpeg or gifsicle.
5.051775
4.849501
1.04171
p = OptionParser(estimategaps.__doc__) p.add_option("--minsize", default=100, type="int", help="Minimum gap size") p.add_option("--maxsize", default=500000, type="int", help="Maximum gap size") p.add_option("--links", default=10, type="int", help="Only use linkage grounds with matchings more than") p.set_verbose(help="Print details for each gap calculation") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) inputbed, = args pf = inputbed.rsplit(".", 1)[0] agpfile = pf + ".chr.agp" bedfile = pf + ".lifted.bed" cc = Map(bedfile, scaffold_info=True) agp = AGP(agpfile) minsize, maxsize = opts.minsize, opts.maxsize links = opts.links verbose = opts.verbose outagpfile = pf + ".estimategaps.agp" fw = must_open(outagpfile, "w") for ob, components in agp.iter_object(): components = list(components) s = Scaffold(ob, cc) mlg_counts = s.mlg_counts gaps = [x for x in components if x.is_gap] gapsizes = [None] * len(gaps) # master for mlg, count in mlg_counts.items(): if count < links: continue g = GapEstimator(cc, agp, ob, mlg) g.compute_all_gaps(minsize=minsize, maxsize=maxsize, \ verbose=verbose) # Merge evidence from this mlg into master assert len(g.gapsizes) == len(gaps) for i, gs in enumerate(gapsizes): gg = g.gapsizes[i] if gs is None: gapsizes[i] = gg elif gg: gapsizes[i] = min(gs, gg) print(gapsizes) # Modify AGP i = 0 for x in components: if x.is_gap: x.gap_length = gapsizes[i] or minsize x.component_type = 'U' if x.gap_length == 100 else 'N' i += 1 print(x, file=fw) fw.close() reindex([outagpfile, "--inplace"])
def estimategaps(args)
%prog estimategaps input.bed Estimate sizes of inter-scaffold gaps. The AGP file generated by path() command has unknown gap sizes with a generic number of Ns (often 100 Ns). The AGP file `input.chr.agp` will be modified in-place.
3.791502
3.487802
1.087075
p = OptionParser(merge.__doc__) p.add_option("-w", "--weightsfile", default="weights.txt", help="Write weights to file") p.set_outfile("out.bed") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) maps = args outfile = opts.outfile fp = must_open(maps) b = Bed() mapnames = set() for row in fp: mapname = filename_to_mapname(fp.filename()) mapnames.add(mapname) try: m = CSVMapLine(row, mapname=mapname) if m.cm < 0: logging.error("Ignore marker with negative genetic distance") print(row.strip(), file=sys.stderr) else: b.append(BedLine(m.bedline)) except (IndexError, ValueError): # header or mal-formed line continue b.print_to_file(filename=outfile, sorted=True) logging.debug("A total of {0} markers written to `{1}`.".\ format(len(b), outfile)) assert len(maps) == len(mapnames), "You have a collision in map names" write_weightsfile(mapnames, weightsfile=opts.weightsfile)
def merge(args)
%prog merge map1 map2 map3 ... Convert csv maps to bed format. Each input map is csv formatted, for example: ScaffoldID,ScaffoldPosition,LinkageGroup,GeneticPosition scaffold_2707,11508,1,0 scaffold_2707,11525,1,1.2 scaffold_759,81336,1,9.7
4.355606
4.189596
1.039624
p = OptionParser(mergebed.__doc__) p.add_option("-w", "--weightsfile", default="weights.txt", help="Write weights to file") p.set_outfile("out.bed") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) maps = args outfile = opts.outfile fp = must_open(maps) b = Bed() mapnames = set() for row in fp: mapname = filename_to_mapname(fp.filename()) mapnames.add(mapname) try: m = BedLine(row) m.accn = "{0}-{1}".format(mapname, m.accn) m.extra = ["{0}:{1}".format(m.seqid, m.start)] b.append(m) except (IndexError, ValueError): # header or mal-formed line continue b.print_to_file(filename=outfile, sorted=True) logging.debug("A total of {0} markers written to `{1}`.".\ format(len(b), outfile)) assert len(maps) == len(mapnames), "You have a collision in map names" write_weightsfile(mapnames, weightsfile=opts.weightsfile)
def mergebed(args)
%prog mergebed map1.bed map2.bed map3.bed ... Combine bed maps to bed format, adding the map name.
3.813087
3.535367
1.078555
p = OptionParser(summary.__doc__) p.set_table(sep="|", align=True) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) inputbed, scaffolds = args pf = inputbed.rsplit(".", 1)[0] mapbed = pf + ".bed" chr_agp = pf + ".chr.agp" sep = opts.sep align = opts.align cc = Map(mapbed) mapnames = cc.mapnames s = Sizes(scaffolds) total, l50, n50 = s.summary r = {} maps = [] fw = must_open(opts.outfile, "w") print("*** Summary for each individual map ***", file=fw) for mapname in mapnames: markers = [x for x in cc if x.mapname == mapname] ms = MapSummary(markers, l50, s) r["Linkage Groups", mapname] = ms.num_lgs ms.export_table(r, mapname, total) maps.append(ms) print(tabulate(r, sep=sep, align=align), file=fw) r = {} agp = AGP(chr_agp) print("*** Summary for consensus map ***", file=fw) consensus_scaffolds = set(x.component_id for x in agp if not x.is_gap) oriented_scaffolds = set(x.component_id for x in agp \ if (not x.is_gap) and x.orientation != '?') unplaced_scaffolds = set(s.mapping.keys()) - consensus_scaffolds for mapname, sc in (("Anchored", consensus_scaffolds), ("Oriented", oriented_scaffolds), ("Unplaced", unplaced_scaffolds)): markers = [x for x in cc if x.seqid in sc] ms = MapSummary(markers, l50, s, scaffolds=sc) ms.export_table(r, mapname, total) print(tabulate(r, sep=sep, align=align), file=fw)
def summary(args)
%prog summary input.bed scaffolds.fasta Print out summary statistics per map, followed by consensus summary of scaffold anchoring based on multiple maps.
4.04833
3.681411
1.099668
p = OptionParser(build.__doc__) p.add_option("--cleanup", default=False, action="store_true", help="Clean up bulky FASTA files, useful for plotting") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) inputbed, scaffolds = args pf = inputbed.rsplit(".", 1)[0] mapbed = pf + ".bed" chr_agp = pf + ".chr.agp" chr_fasta = pf + ".chr.fasta" if need_update((chr_agp, scaffolds), chr_fasta): agp_build([chr_agp, scaffolds, chr_fasta]) unplaced_agp = pf + ".unplaced.agp" if need_update((chr_agp, scaffolds), unplaced_agp): write_unplaced_agp(chr_agp, scaffolds, unplaced_agp) unplaced_fasta = pf + ".unplaced.fasta" if need_update((unplaced_agp, scaffolds), unplaced_fasta): agp_build([unplaced_agp, scaffolds, unplaced_fasta]) combined_agp = pf + ".agp" if need_update((chr_agp, unplaced_agp), combined_agp): FileMerger((chr_agp, unplaced_agp), combined_agp).merge() combined_fasta = pf + ".fasta" if need_update((chr_fasta, unplaced_fasta), combined_fasta): FileMerger((chr_fasta, unplaced_fasta), combined_fasta).merge() chainfile = pf + ".chain" if need_update((combined_agp, scaffolds, combined_fasta), chainfile): fromagp([combined_agp, scaffolds, combined_fasta]) liftedbed = mapbed.rsplit(".", 1)[0] + ".lifted.bed" if need_update((mapbed, chainfile), liftedbed): cmd = "liftOver -minMatch=1 {0} {1} {2} unmapped".\ format(mapbed, chainfile, liftedbed) sh(cmd, check=True) if opts.cleanup: FileShredder([chr_fasta, unplaced_fasta, combined_fasta, chainfile, unplaced_agp, combined_fasta + ".sizes", "unmapped"]) sort([liftedbed, "-i"])
def build(args)
%prog build input.bed scaffolds.fasta Build associated genome FASTA file and CHAIN file that can be used to lift old coordinates to new coordinates. The CHAIN file will be used to lift the original marker positions to new positions in the reconstructed genome. The new positions of the markers will be reported in *.lifted.bed.
2.672446
2.481131
1.077108
p = OptionParser(plotall.__doc__) add_allmaps_plot_options(p) opts, args, iopts = p.set_image_options(xargs, figsize="10x6") if len(args) != 1: sys.exit(not p.print_help()) inputbed, = args pf = inputbed.rsplit(".", 1)[0] agpfile = pf + ".chr.agp" agp = AGP(agpfile) objects = [ob for ob, lines in agp.iter_object()] for seqid in natsorted(objects): plot(xargs + [seqid])
def plotall(xargs)
%prog plotall input.bed Plot the matchings between the reconstructed pseudomolecules and the maps. This command will plot each reconstructed object (non-singleton).
5.071934
4.480288
1.132055
scaffolds_ii = dict((s, i) for i, s in enumerate(scaffolds)) scfs = [] ww = [] for mlg in self.linkage_groups: w = float(weights[mlg.mapname]) scf = {} for s, o in tour: si = scaffolds_ii[s] scf[si] = self.get_series(mlg.lg, s, orientation=o) scfs.append(scf) ww.append(w) tour = [scaffolds_ii[x] for x, o in tour] return scfs, tour, ww
def prepare_ec(self, scaffolds, tour, weights)
Prepare Evolutionary Computation. This converts scaffold names into indices (integer) in the scaffolds array.
4.978389
4.769024
1.043901
linkage_groups = self.linkage_groups for mlg in linkage_groups: mapname = mlg.mapname if mapname == self.pivot: pivot_position = mlg.position for mlg in linkage_groups: position = mlg.position # Flip order if path goes in the opposite direction to the pivot common = [] for a, ap in position.items(): if a not in pivot_position: continue pp = pivot_position[a] common.append((ap, pp)) mlg.rho = get_rho(common) if mlg.rho < 0: mlg.path = mlg.path[::-1] mlg.populate_pairwise_distance() # Preparation of TSP distances = defaultdict(list) for mlg in linkage_groups: mapname = mlg.mapname position = mlg.position length = mlg.length path = mlg.path rho = mlg.rho dd = mlg.distances for a, b in combinations(path, 2): d = dd[a, b] distances[a, b].append((d, mapname)) for p in path: adist, bdist = position[p], length - position[p] if rho < 0: adist, bdist = bdist, adist distances[START, p].append((adist, mapname)) distances[p, END].append((bdist, mapname)) self.distances = distances tour = self.distances_to_tour() return tour
def assign_order(self)
The goal is to assign scaffold orders. To help order the scaffolds, two dummy node, START and END, mark the ends of the chromosome. We connect START to each scaffold (directed), and each scaffold to END.
3.912723
3.894997
1.004551
if not si or not sj: return 0 # Same orientation configuration a = lms(si + sj) b = lms(sj + si) # Opposite orientation configuration c = lms(si + sj[::-1]) d = lms(sj[::-1] + si) return max(a, b)[0] - max(c, d)[0]
def get_orientation(self, si, sj)
si, sj are two number series. To compute whether these two series have same orientation or not. We combine them in the two orientation configurations and compute length of the longest monotonic series.
3.737711
3.12305
1.196814
scaffolds, oos = zip(*tour) keep = set() for mlg in self.linkage_groups: lg = mlg.lg for s, o in tour: i = scaffolds.index(s) L = [self.get_series(lg, x, xo) for x, xo in tour[:i]] U = [self.get_series(lg, x, xo) for x, xo in tour[i + 1:]] L, U = list(flatten(L)), list(flatten(U)) M = self.get_series(lg, s, o) score_with = lms(L + M + U)[0] score_without = lms(L + U)[0] assert score_with >= score_without if score_with > score_without: keep.add(s) dropped = len(tour) - len(keep) logging.debug("Dropped {0} minor scaffolds".format(dropped)) return [(s, o) for (s, o) in tour if s in keep]
def fix_tour(self, tour)
Test each scaffold if dropping does not decrease LMS.
3.899885
3.44769
1.131159
orientations = dict(tour) # old configuration here scaffold_oo = defaultdict(list) scaffolds, oos = zip(*tour) for mlg in self.linkage_groups: lg = mlg.lg mapname = mlg.mapname for s, o in tour: i = scaffolds.index(s) L = [self.get_series(lg, x, xo) for x, xo in tour[:i]] U = [self.get_series(lg, x, xo) for x, xo in tour[i + 1:]] L, U = list(flatten(L)), list(flatten(U)) M = self.get_series(lg, s) plus = lms(L + M + U) minus = lms(L + M[::-1] + U) d = plus[0] - minus[0] if not d: continue scaffold_oo[s].append((d, mapname)) # reset orientation fixed = 0 for s, v in scaffold_oo.items(): d = self.weighted_mean(v) old_d = orientations[s] new_d = np.sign(d) if new_d != old_d: orientations[s] = new_d fixed += 1 tour = [(x, orientations[x]) for x in scaffolds] logging.debug("Fixed orientations for {0} scaffolds.".format(fixed)) return tour
def fix_orientation(self, tour)
Test each scaffold if flipping will increass longest monotonic chain length.
4.317675
4.161523
1.037523
for x in self.spinchars: self.string = self.msg + "...\t" + x + "\r" self.out.write(self.string.encode('utf-8')) self.out.flush() time.sleep(self.waittime)
def spin(self)
Perform a single spin
5.608404
5.498442
1.019999
return ["{}_{}_{}".format(name, i, x) for i, x in enumerate(seq)]
def make_sequence(seq, name="S")
Make unique nodes for sequence graph.
3.96896
3.467851
1.144501
for x in seq: if x.endswith("_1"): # Mutation G.node(x, color=color, width="0.1", shape="circle", label="") else: G.node(x, color=color) for a, b in pairwise(seq): G.edge(a, b, color=color)
def sequence_to_graph(G, seq, color='black')
Automatically construct graph given a sequence of characters.
3.55874
3.66732
0.970393
for s in zip(*allseqs): groups = defaultdict(list) for x in s: part = x.split('_', 1)[1] groups[part].append(x) for part, g in groups.items(): with G.subgraph(name="cluster_" + part) as c: for x in g: c.node(x) c.attr(style="invis")
def zip_sequences(G, allseqs, color="white")
Fuse certain nodes together, if they contain same data except for the sequence name.
3.037631
3.20296
0.948383
from jcvi.apps.base import iglob from jcvi.utils.iter import grouper p = OptionParser(gallery.__doc__) p.add_option("--columns", default=3, type="int", help="How many cells per row") p.add_option("--width", default=200, type="int", help="Image width") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, link_prefix = args width = opts.width images = iglob(folder, "*.jpg,*.JPG,*.png") td = '<td>{0}<br><a href="{1}"><img src="{1}" width="{2}"></a></td>' print("<table>") for ims in grouper(images, opts.columns): print('<tr height="{0}" valign="top">'.format(width + 5)) for im in ims: if not im: continue im = op.basename(im) pf = im.split('.')[0].replace('_', '-') link = link_prefix.rstrip("/") + "/" + im print(td.format(pf, link, width)) print("</tr>") print("</table>")
def gallery(args)
%prog gallery folder link_prefix Convert a folder of figures to a HTML table. For example: $ python -m jcvi.formats.html gallery Paper-figures/ https://dl.dropboxusercontent.com/u/15937715/Data/Paper-figures/ Maps the images from local to remote.
2.702742
2.559673
1.055894
p = OptionParser(links.__doc__) p.add_option("--img", default=False, action="store_true", help="Extract <img> tags [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) url, = args img = opts.img htmlfile = download(url) page = open(htmlfile).read() soup = BeautifulSoup(page) tag = 'img' if img else 'a' src = 'src' if img else 'href' aa = soup.findAll(tag) for a in aa: link = a.get(src) link = urljoin(url, link) print(link)
def links(args)
%prog links url Extract all the links "<a href=''>" from web page.
2.453413
2.381401
1.030239
import HTMLParser hp = HTMLParser.HTMLParser() s = hp.unescape(s) s = s.encode('ascii', unicode_action) s = s.replace("\n", "").strip() return s
def unescape(s, unicode_action="replace")
Unescape HTML strings, and convert &amp; etc.
3.080714
2.965429
1.038876
import csv p = OptionParser(table.__doc__) p.set_sep(sep=",") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) htmlfile, = args page = open(htmlfile).read() soup = BeautifulSoup(page) for i, tabl in enumerate(soup.findAll('table')): nrows = 0 csvfile = htmlfile.rsplit(".", 1)[0] + ".{0}.csv".format(i) writer = csv.writer(open(csvfile, "w"), delimiter=opts.sep) rows = tabl.findAll('tr') for tr in rows: cols = tr.findAll('td') if not cols: cols = tr.findAll('th') row = [] for td in cols: try: cell = "".join(td.find(text=True)) cell = unescape(cell) except TypeError: cell = "" row.append(cell) writer.writerow(row) nrows += 1 logging.debug("Table with {0} rows written to `{1}`.".format(nrows, csvfile))
def table(args)
%prog table page.html Convert HTML tables to csv.
2.446561
2.262822
1.081199
p = OptionParser(mask.__doc__) p.add_option("--db", help="Contaminant db other than Ecoli K12 [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args assert op.exists(fastafile) outfastafile = fastafile.rsplit(".", 1)[0] + ".masked.fasta" vecbedfile = blast([fastafile]) ecoliurl = \ "ftp://ftp.ncbi.nih.gov/genomes/Bacteria/Escherichia_coli_K_12_substr__DH10B_uid58979/NC_010473.fna" ecolifile = opts.db or download(ecoliurl, filename="Ecoli.fasta") assert op.exists(ecolifile) ecolibedfile = blast([fastafile, "--db={0}".format(ecolifile)]) cmd = "cat {0} {1}".format(vecbedfile, ecolibedfile) cmd += " | mergeBed -nms -d 100 -i stdin" cmd += " | maskFastaFromBed -fi {0} -bed stdin -fo {1}".\ format(fastafile, outfastafile) sh(cmd) return tidy([outfastafile])
def mask(args)
%prog mask fastafile Mask the contaminants. By default, this will compare against UniVec_Core and Ecoli.fasta. Merge the contaminant results, and use `maskFastaFromBed`. Can perform FASTA tidy if requested.
4.26233
3.80051
1.121515
p = OptionParser(blast.__doc__) p.add_option("--dist", default=100, type="int", help="Merge adjacent HSPs separated by [default: %default]") p.add_option("--db", help="Use a different database rather than UniVec_Core") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args fastaprefix = fastafile.split(".", 1)[0] univec = opts.db or download("ftp://ftp.ncbi.nih.gov/pub/UniVec/UniVec_Core") uniprefix = univec.split(".", 1)[0] fastablast = fastaprefix + ".{0}.blast".format(uniprefix) prog = run_megablast if opts.db else run_vecscreen prog(infile=fastafile, outfile=fastablast, db=univec, pctid=95, hitlen=50) fp = open(fastablast) ranges = [] for row in fp: b = BlastLine(row) ranges.append((b.query, b.qstart, b.qstop)) merged_ranges = range_merge(ranges, dist=opts.dist) bedfile = fastaprefix + ".{0}.bed".format(uniprefix) fw = must_open(bedfile, "w") for seqid, start, end in merged_ranges: print("\t".join(str(x) for x in (seqid, start - 1, end, uniprefix)), file=fw) return bedfile
def blast(args)
%prog blast fastafile Run BLASTN against database (default is UniVec_Core). Output .bed format on the vector/contaminant ranges.
3.610006
3.252125
1.110045
query = op.basename(query) targets = [op.basename(x) for x in targets] prefix_lengths = [len(op.commonprefix([query, name])) for name in targets] if remove_self and len(query) in prefix_lengths: prefix_lengths.remove(len(query)) longest_length = max(prefix_lengths) return query[:longest_length + 1]
def longest_unique_prefix(query, targets, remove_self=True)
Find the longest unique prefix for filename, when compared against a list of filenames. Useful to simplify file names in a pool of files. See usage in formats.fasta.pool().
2.575189
2.507737
1.026898
if op.exists(filename): if oappend: return oappend logging.error("`{0}` found, overwrite (Y/N)?".format(filename)) overwrite = (raw_input() == 'Y') else: overwrite = True return overwrite
def check_exists(filename, oappend=False)
Avoid overwriting some files accidentally.
5.061045
4.955388
1.021322
if isinstance(filename, list): assert "r" in mode if filename[0].endswith((".gz", ".bz2")): filename = " ".join(filename) # allow opening multiple gz/bz2 files else: import fileinput return fileinput.input(filename) if filename.startswith("s3://"): from jcvi.utils.aws import pull_from_s3 filename = pull_from_s3(filename) if filename in ("-", "stdin"): assert "r" in mode fp = sys.stdin elif filename == "stdout": assert "w" in mode fp = sys.stdout elif filename == "stderr": assert "w" in mode fp = sys.stderr elif filename == "tmp" and mode == "w": from tempfile import NamedTemporaryFile fp = NamedTemporaryFile(delete=False) elif filename.endswith(".gz"): if 'r' in mode: cmd = "gunzip -c {0}".format(filename) fp = popen(cmd, debug=False) elif 'w' in mode: import gzip fp = gzip.open(filename, mode) elif filename.endswith(".bz2"): if 'r' in mode: cmd = "bzcat {0}".format(filename) fp = popen(cmd, debug=False) elif 'w' in mode: import bz2 fp = bz2.BZ2File(filename, mode) else: if checkexists: assert mode == "w" overwrite = (not op.exists(filename)) if skipcheck \ else check_exists(filename, oappend) if overwrite: if oappend: fp = open(filename, "a") else: fp = open(filename, "w") else: logging.debug("File `{0}` already exists. Skipped."\ .format(filename)) return None else: fp = open(filename, mode) return fp
def must_open(filename, mode="r", checkexists=False, skipcheck=False, \ oappend=False)
Accepts filename and returns filehandle. Checks on multiple files, stdin/stdout/stderr, .gz or .bz2 file.
2.264172
2.224583
1.017796
signal_len = len(signal) it = (x[1] for x in groupby(handle, key=lambda row: row.strip()[:signal_len] == signal)) found_signal = False for header in it: header = list(header) for h in header[:-1]: h = h.strip() if h[:signal_len] != signal: continue yield h, [] # Header only, no contents header = header[-1].strip() if header[:signal_len] != signal: continue found_signal = True seq = list(s.strip() for s in next(it)) yield header, seq if not found_signal: handle.seek(0) seq = list(s.strip() for s in handle) yield None, seq
def read_block(handle, signal)
Useful for reading block-like file formats, for example FASTA or OBO file, such file usually startswith some signal, and in-between the signals are a record
3.282244
3.3181
0.989194
import string d = "".join(x for x in str(s) if x in string.digits) return cast(d)
def get_number(s, cast=int)
Try to get a number out of a string, and cast it.
5.043029
4.468465
1.128582
p = OptionParser(seqids.__doc__) p.add_option("--pad0", default=0, help="How many zeros to pad") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) prefix, start, end = args pad0 = opts.pad0 start, end = int(start), int(end) step = 1 if start <= end else -1 print(",".join(["{}{:0{}d}".format(prefix, x, pad0) \ for x in xrange(start, end + step, step)]))
def seqids(args)
%prog seqids prefix start end Make a list of seqids for graphics.karyotype. For example: $ python -m jcvi.formats.base seqids chromosome_ 1 3 chromosome_1,chromosome_2,chromosome_3 $ python -m jcvi.formats.base seqids A 3 1 --pad0=2 A03,A02,A01
2.51209
2.215166
1.134041
from itertools import combinations p = OptionParser(pairwise.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) idsfile, = args ids = SetFile(idsfile) ids = sorted(ids) fw = open(idsfile + ".pairs", "w") for a, b in combinations(ids, 2): print("\t".join((a, b)), file=fw) fw.close()
def pairwise(args)
%prog pairwise ids Convert a list of IDs into all pairs.
2.442253
2.372696
1.029316
p = OptionParser(append.__doc__) p.set_sep() p.set_outfile() opts, args = p.parse_args(args) nargs = len(args) if nargs not in (1, 2): sys.exit(not p.print_help()) csvfile = args[0] tag = args[1] if nargs == 2 else csvfile fp = must_open(csvfile) fw = must_open(opts.outfile, "w") for row in fp: row = row.rstrip("\r\n") row = opts.sep.join((row, tag)) print(row, file=fw)
def append(args)
%prog append csvfile [tag] Append a column with fixed value. If tag is missing then just append the filename.
2.746809
2.411354
1.139115
p = OptionParser(truncate.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) number, filename = args number = int(number) count = 0 f = open(filename, "r+b") f.seek(0, os.SEEK_END) while f.tell() > 0: f.seek(-1, os.SEEK_CUR) char = f.read(1) if char == '\n': count += 1 if count == number + 1: f.truncate() print("Removed {0} lines from end of file".format(number), file=sys.stderr) return number f.seek(-1, os.SEEK_CUR) if count < number + 1: print("No change: requested removal would leave empty file", file=sys.stderr) return -1
def truncate(args)
%prog truncate linecount filename Remove linecount lines from the end of the file in-place. Borrowed from: <http://superuser.com/questions/127786/how-to-remove-the-last-2-lines-of-a-very-large-file>
2.323172
2.22612
1.043597
from six.moves import zip_longest p = OptionParser(flatten.__doc__) p.set_sep(sep=",") p.add_option("--zipflatten", default=None, dest="zipsep", help="Specify if columns of the file should be zipped before" + " flattening. If so, specify delimiter separating column elements" + " [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) tabfile, = args zipsep = opts.zipsep fp = must_open(tabfile) for row in fp: if zipsep: row = row.rstrip() atoms = row.split(opts.sep) frows = [] for atom in atoms: frows.append(atom.split(zipsep)) print("\n".join([zipsep.join(x) for x in list(zip_longest(*frows, fillvalue="na"))])) else: print(row.strip().replace(opts.sep, "\n"))
def flatten(args)
%prog flatten filename > ids Convert a list of IDs (say, multiple IDs per line) and move them into one per line. For example, convert this, to this: A,B,C | A 1 | B a,4 | C | 1 | a | 4 If multi-column file with multiple elements per column, zip then flatten like so: A,B,C 2,10,gg | A,2 1,3 4 | B,10 | C,gg | 1,4 | 3,na
3.859816
3.618029
1.066828
from jcvi.utils.cbook import AutoVivification from jcvi.utils.grouper import Grouper p = OptionParser(group.__doc__) p.set_sep() p.add_option("--groupby", default=None, type='int', help="Default column to groupby [default: %default]") p.add_option("--groupsep", default=',', help="Separator to join the grouped elements [default: `%default`]") p.add_option("--nouniq", default=False, action="store_true", help="Do not uniqify the grouped elements [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) tabfile, = args sep = opts.sep groupby = opts.groupby groupsep = opts.groupsep cols = [] grouper = AutoVivification() if groupby is not None else Grouper() fp = must_open(tabfile) for row in fp: row = row.rstrip() atoms = row.split(sep) if groupby is not None: if len(cols) < len(atoms): cols = [x for x in xrange(len(atoms))] if groupby not in cols: logging.error("groupby col index `{0}` is out of range".format(groupby)) sys.exit() key = atoms[groupby] for col in cols: if col == groupby: continue if not grouper[key][col]: grouper[key][col] = [] if opts.nouniq else set() if col < len(atoms): if groupsep in atoms[col]: for atom in atoms[col].split(groupsep): if opts.nouniq: grouper[key][col].append(atom) else: grouper[key][col].add(atom) else: if opts.nouniq: grouper[key][col].append(atoms[col]) else: grouper[key][col].add(atoms[col]) else: grouper.join(*atoms) for key in grouper: if groupby is not None: line = [] for col in cols: if col == groupby: line.append(key) elif col in grouper[key].keys(): line.append(groupsep.join(grouper[key][col])) else: line.append("na") print(sep.join(line)) else: print(groupsep.join(key))
def group(args)
%prog group tabfile > tabfile.grouped Given a tab-delimited file, either group all elements within the file or group the elements in the value column(s) based on the key (groupby) column For example, convert this | into this --------------------------------------- a 2 3 4 | a,2,3,4,5,6 a 5 6 | b,7,8 b 7 8 | c,9,10,11 c 9 | c 10 11 | If grouping by a particular column, convert this | into this: --------------------------------------------- a 2 3 4 | a 2,5 3,6 4 a 5 6 | b 7 8 b 7 8 | c 9,10 11 c 9 | c 10 11 | By default, it uniqifies all the grouped elements
2.313496
2.271797
1.018355
import csv p = OptionParser(reorder.__doc__) p.set_sep() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) tabfile, order = args sep = opts.sep order = [int(x) - 1 for x in order.split(",")] reader = csv.reader(must_open(tabfile), delimiter=sep) writer = csv.writer(sys.stdout, delimiter=sep) for row in reader: newrow = [row[x] for x in order] writer.writerow(newrow)
def reorder(args)
%prog reorder tabfile 1,2,4,3 > newtabfile Reorder columns in tab-delimited files. The above syntax will print out a new file with col-1,2,4,3 from the old file.
2.387422
2.110832
1.131034
p = OptionParser(split.__doc__) mode_choices = ("batch", "cycle", "optimal") p.add_option("--all", default=False, action="store_true", help="split all records [default: %default]") p.add_option("--mode", default="optimal", choices=mode_choices, help="Mode when splitting records [default: %default]") p.add_option("--format", choices=("fasta", "fastq", "txt", "clust"), help="input file format [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) filename, outdir, N = args fs = FileSplitter(filename, outputdir=outdir, format=opts.format, mode=opts.mode) if opts.all: logging.debug("option -all override N") N = fs.num_records else: N = min(fs.num_records, int(N)) assert N > 0, "N must be > 0" logging.debug("split file into %d chunks" % N) fs.split(N) return fs
def split(args)
%prog split file outdir N Split file into N records. This allows splitting FASTA/FASTQ/TXT file properly at boundary of records. Split is useful for parallelization on input chunks. Option --mode is useful on how to break into chunks. 1. chunk - chunk records sequentially, 1-100 in file 1, 101-200 in file 2, etc. 2. cycle - chunk records in Round Robin fashion 3. optimal - try to make split file of roughly similar sizes, using LPT algorithm. This is the default.
3.170454
2.700151
1.174176
p = OptionParser(join.__doc__) p.add_option("--column", default="0", help="0-based column id, multiple values allowed [default: %default]") p.set_sep(multiple=True) p.add_option("--noheader", default=False, action="store_true", help="Do not print header [default: %default]") p.add_option("--na", default="na", help="Value for unjoined data [default: %default]") p.add_option("--compact", default=False, action="store_true", help="Do not repeat pivotal columns in output") p.add_option("--keysep", default=",", help="specify separator joining multiple elements in the key column" + " of the pivot file [default: %default]") p.set_outfile() opts, args = p.parse_args(args) nargs = len(args) keysep = opts.keysep compact = opts.compact if len(args) < 2: sys.exit(not p.print_help()) na = opts.na c = opts.column if "," in c: cc = [int(x) for x in c.split(",")] else: cc = [int(c)] * nargs assert len(cc) == nargs, "Column index number != File number" s = opts.sep if "," in s: ss = [x for x in s.split(",")] else: ss = [s] * nargs assert len(ss) == nargs, "column separator number != File number" # Maintain the first file line order, and combine other files into it pivotfile = args[0] files = [DictFile(f, keypos=c, valuepos=None, delimiter=s) \ for f, c, s in zip(args, cc, ss)] otherfiles = files[1:] # The header contains filenames headers = [] for i, x in enumerate(files): ncols = x.ncols if i and compact: ncols -= 1 headers += [op.basename(x.filename)] * ncols header = "\t".join(headers) fp = must_open(pivotfile) fw = must_open(opts.outfile, "w") if not opts.noheader: print(header, file=fw) for row in fp: row = row.rstrip() atoms = row.split(ss[0]) newrow = atoms key = atoms[cc[0]] keys = key.split(keysep) if keysep in key else [key] for d in otherfiles: drows = list() for key in keys: krow = d.get(key, [na] * d.ncols) if compact: krow.pop(d.keypos) drows.append(krow) drow = [keysep.join(x) for x in list(zip(*drows))] newrow += drow print("\t".join(newrow), file=fw)
def join(args)
%prog join file1.txt(pivotfile) file2.txt .. Join tabular-like files based on common column. --column specifies the column index to pivot on. Use comma to separate multiple values if the pivot column is different in each file. Maintain the order in the first file. --sep specifies the column separators, default to tab. Use comma to separate multiple values if the column separator is different in each file.
3.244755
3.048787
1.064277
p = OptionParser(subset.__doc__) p.add_option("--column", default="0", help="0-based column id, multiple values allowed [default: %default]") p.set_sep(multiple=True) p.add_option("--pivot", default=1, type="int", help="1 for using order in file1, 2 for using order in \ file2 [default: %default]") p.set_outfile() opts, args = p.parse_args(args) nargs = len(args) if len(args) < 2: sys.exit(not p.print_help()) c = opts.column if "," in c: cc = [int(x) for x in c.split(",")] assert len(set(cc[1:])) == 1, \ "Multiple file2's must have same column index." cc = cc[0:2] else: cc = [int(c)] * 2 s = opts.sep if "," in s: ss = [x for x in s.split(",")] assert len(set(cc[1:])) == 1, \ "Multiple file2's must have same column separator." ss = ss[0:2] else: ss = [s] * 2 if nargs > 2: file2 = FileMerger(args[1:], outfile="concatenatedFile2").merge() else: file2 = args[1] newargs = [args[0], file2] files = [DictFile(f, keypos=c, valuepos=None, delimiter=s) \ for f, c, s in zip(newargs, cc, ss)] pivot = 0 if opts.pivot==1 else 1 fp = open(newargs[pivot]) fw = must_open(opts.outfile, "w") for row in fp: row = row.rstrip() atoms = row.split(ss[pivot]) key = atoms[cc[pivot]] d = files[1-pivot] if key in d: print(ss[0].join(files[0][key]), file=fw) if nargs > 2: FileShredder([file2])
def subset(args)
%prog subset file1.txt(pivotfile) file2.txt .. subset tabular-like file1 based on common column with file 2. Normally file1 should have unique row entries. If more than one file2 are provided, they must have same column separators. Multiple file2's will be concatenated in the output. --column specifies the column index (0-based) to pivot on. Use comma to separate multiple values if the pivot column is different in each file. Maintain the order in the first file. --sep specifies the column separators, default to tab. Use comma to separate multiple values if the column separator is different in each file.
3.404954
3.073195
1.107952
from jcvi.utils.natsort import natsorted p = OptionParser(setop.__doc__) p.add_option("--column", default=0, type="int", help="The column to extract, 0-based, -1 to disable [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) statement, = args fa, op, fb = statement.split() assert op in ('|', '&', '-', '^') column = opts.column fa = SetFile(fa, column=column) fb = SetFile(fb, column=column) if op == '|': t = fa | fb elif op == '&': t = fa & fb elif op == '-': t = fa - fb elif op == '^': t = fa ^ fb for x in natsorted(t): print(x)
def setop(args)
%prog setop "fileA & fileB" > newfile Perform set operations, except on files. The files (fileA and fileB) contain list of ids. The operator is one of the four: |: union (elements found in either file) &: intersection (elements found in both) -: difference (elements in fileA but not in fileB) ^: symmetric difference (elementes found in either set but not both) Please quote the argument to avoid shell interpreting | and &.
2.629935
2.45147
1.072799
p = OptionParser(mergecsv.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) tsvfiles = args outfile = opts.outfile if op.exists(outfile): os.remove(outfile) tsvfile = tsvfiles[0] fw = must_open(opts.outfile, "w") for i, tsvfile in enumerate(tsvfiles): fp = open(tsvfile) if i > 0: next(fp) for row in fp: fw.write(row) fw.close()
def mergecsv(args)
%prog mergecsv *.tsv Merge a set of tsv files.
2.471963
2.294117
1.077523
batch_size = math.ceil(self.num_records / float(N)) handle = self._open(self.filename) while True: batch = list(islice(handle, batch_size)) if not batch: break yield batch
def _batch_iterator(self, N=1)
Returns N lists of records. This can be used on any iterator, for example to batch up SeqRecord objects from Bio.SeqIO.parse(...), or to batch Alignment objects from Bio.AlignIO.parse(...), or simply lines from a file handle. This is a generator function, and it returns lists of the entries from the supplied iterator. Each list will have batch_size entries, although the final list may be shorter.
3.67249
3.680108
0.99793
mode = self.mode assert mode in ("batch", "cycle", "optimal") logging.debug("set split mode=%s" % mode) self.names = self.__class__.get_names(self.filename, N) if self.outputdir: self.names = [op.join(self.outputdir, x) for x in self.names] if not need_update(self.filename, self.names) and not force: logging.error("file %s already existed, skip file splitting" % \ self.names[0]) return filehandles = [open(x, "w") for x in self.names] if mode == "batch": for batch, fw in zip(self._batch_iterator(N), filehandles): count = self.write(fw, batch) logging.debug("write %d records to %s" % (count, fw.name)) elif mode == "cycle": handle = self._open(self.filename) for record, fw in zip(handle, cycle(filehandles)): count = self.write(fw, [record]) elif mode == "optimal": endtime = [0] * N handle = self._open(self.filename) for record in handle: mt, mi = min((x, i) for (i, x) in enumerate(endtime)) fw = filehandles[mi] count = self.write(fw, [record]) endtime[mi] += len(record) for fw in filehandles: fw.close()
def split(self, N, force=False)
There are two modes of splitting the records - batch: splitting is sequentially to records/N chunks - cycle: placing each record in the splitted files and cycles use `cycle` if the len of the record is not evenly distributed
3.381557
3.21524
1.051727
from operator import itemgetter from jcvi.formats.fasta import Fasta, SeqIO p = OptionParser(prepare.__doc__) p.add_option("--rearray_lib", default=None, help="name of the rearrayed library [default: %default]") p.add_option("--orig_lib_file", help="fasta file containing reads from the original libraries [default: %default]") g = OptionGroup(p, "Optional parameters") g.add_option("--output_folder", default="to_assemble", help="output folder to write the FASTA files to [default: %default]") p.add_option_group(g) opts, args = p.parse_args(args) if not opts.rearray_lib or not opts.orig_lib_file: logging.error("Please specify the required parameters") sys.exit(not p.print_help()) rearraylib, origlibfile = opts.rearray_lib, opts.orig_lib_file if not op.isfile(origlibfile): logging.error("Original library reads file `{0}` does not exist!".format(origlibfile)) sys.exit() lookuptblfile = rearraylib + '.lookup' logging.debug(lookuptblfile) if not op.isfile(lookuptblfile): logging.error("Lookup table file `{0}` does not exist!".format(lookuptblfile)) sys.exit() rearraylibfile = rearraylib + '.fasta' logging.debug(rearraylibfile) if not op.isfile(rearraylibfile): logging.error("Rearrayed library reads file `{0}` does not exist!".format(rearraylibfile)) sys.exit() origlibFasta = Fasta(origlibfile) rearraylibFasta = Fasta(rearraylibfile) origlibids = [o for o in origlibFasta.iterkeys_ordered()] rearraylibids = [r for r in rearraylibFasta.iterkeys_ordered()] if not op.isdir(opts.output_folder): logging.warning("Output directory `{0}` missing. Creating it now...".format(opts.output_folder)) os.makedirs(opts.output_folder) logfile = rearraylib + '.log' log = open(logfile, 'w') fp = open(lookuptblfile, 'r') for row in fp: origprefix, rearrayprefix = itemgetter(0,3)(row.split('\t')) libpair = origprefix + '_' + rearrayprefix outfile = opts.output_folder + '/' + libpair + '.fasta' ofp = open(outfile, 'w') for o in origlibids: if re.match(origprefix, o): SeqIO.write(origlibFasta[o], ofp, 'fasta') for r in rearraylibids: if re.match(rearrayprefix, r): SeqIO.write(rearraylibFasta[r], ofp, 'fasta') ofp.close() print(outfile, file=log) log.close() logging.debug('Wrote log file `{0}`'.format(logfile))
def prepare(args)
%prog prepare --rearray_lib=<rearraylibrary> --orig_lib_file=<origlibfile> Inferred file names --------------------------------------------- `lookuptblfile` : rearraylibrary.lookup `rearraylibfile`: rearraylibrary.fasta Pick sequences from the original library file and the rearrayed library file based on the mapping information provided in the `lookuptblfile`. # lookuptblfile format: column number (index) # 1 (0) 2 (1) 3 (2) 4 (3) 5 (4) 6 (5) # source_clone source_plate source_well dest_clone dest_plate dest_well The 1st and 4th column in the `lookuptblfile` form the pair of clones which constitute the elements used for the per-clone assembly.
2.356519
2.106143
1.118879
p = OptionParser(extract.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) idsfile, sizesfile = args sizes = Sizes(sizesfile).mapping fp = open(idsfile) for row in fp: name = row.strip() size = sizes[name] print("\t".join(str(x) for x in (name, size)))
def extract(args)
%prog extract idsfile sizesfile Extract the lines containing only the given IDs.
3.047088
2.514456
1.211828
from jcvi.formats.agp import OO p = OptionParser(agp.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) sizesfile, = args sizes = Sizes(sizesfile) agpfile = sizes.filename.rsplit(".", 1)[0] + ".agp" fw = open(agpfile, "w") o = OO() # Without a filename for ctg, size in sizes.iter_sizes(): o.add(ctg, ctg, size) o.write_AGP(fw) fw.close() logging.debug("AGP file written to `{0}`.".format(agpfile)) return agpfile
def agp(args)
%prog agp <fastafile|sizesfile> Convert the sizes file to a trivial AGP file.
3.277369
2.887913
1.134857
return dict(list(zip(list(d.values()), list(d.keys()))))
def _reversedict(d)
Internal helper for generating reverse mappings; given a dictionary, returns a new dictionary with keys and values swapped.
3.803946
3.811782
0.997944
try: hex_digits = HEX_COLOR_RE.match(hex_value).groups()[0] except AttributeError: raise ValueError("'%s' is not a valid hexadecimal color value." % hex_value) if len(hex_digits) == 3: hex_digits = ''.join([2 * s for s in hex_digits]) return '#%s' % hex_digits.lower()
def normalize_hex(hex_value)
Normalize a hexadecimal color value to the following form and return the result:: #[a-f0-9]{6} In other words, the following transformations are applied as needed: * If the value contains only three hexadecimal digits, it is expanded to six. * The value is normalized to lower-case. If the supplied value cannot be interpreted as a hexadecimal color value, ``ValueError`` is raised. Examples: >>> normalize_hex('#0099cc') '#0099cc' >>> normalize_hex('#0099CC') '#0099cc' >>> normalize_hex('#09c') '#0099cc' >>> normalize_hex('#09C') '#0099cc' >>> normalize_hex('0099cc') Traceback (most recent call last): ... ValueError: '0099cc' is not a valid hexadecimal color value.
2.558492
2.626769
0.974007
percent = value.split('%')[0] percent = float(percent) if '.' in percent else int(percent) if 0 <= percent <= 100: return '%s%%' % percent if percent < 0: return '0%' if percent > 100: return '100%'
def _normalize_percent_rgb(value)
Normalize ``value`` for use in a percentage ``rgb()`` triplet, as follows: * If ``value`` is less than 0%, convert to 0%. * If ``value`` is greater than 100%, convert to 100%. Examples: >>> _normalize_percent_rgb('0%') '0%' >>> _normalize_percent_rgb('100%') '100%' >>> _normalize_percent_rgb('62%') '62%' >>> _normalize_percent_rgb('-5%') '0%' >>> _normalize_percent_rgb('250%') '100%' >>> _normalize_percent_rgb('85.49%') '85.49%'
2.550487
2.95364
0.863506
if spec not in SUPPORTED_SPECIFICATIONS: raise TypeError("'%s' is not a supported specification for color name lookups; supported specifications are: %s." % (spec, ', '.join(SUPPORTED_SPECIFICATIONS))) normalized = name.lower() try: hex_value = globals()['%s_names_to_hex' % spec][normalized] except KeyError: raise ValueError("'%s' is not defined as a named color in %s." % (name, spec)) return hex_value
def name_to_hex(name, spec='css3')
Convert a color name to a normalized hexadecimal color value. The optional keyword argument ``spec`` determines which specification's list of color names will be used; valid values are ``html4``, ``css2``, ``css21`` and ``css3``, and the default is ``css3``. The color name will be normalized to lower-case before being looked up, and when no color of that name exists in the given specification, ``ValueError`` is raised. Examples: >>> name_to_hex('white') '#ffffff' >>> name_to_hex('navy') '#000080' >>> name_to_hex('goldenrod') '#daa520' >>> name_to_hex('goldenrod', spec='html4') Traceback (most recent call last): ... ValueError: 'goldenrod' is not defined as a named color in html4. >>> name_to_hex('goldenrod', spec='css5') Traceback (most recent call last): ... TypeError: 'css5' is not a supported specification for color name lookups; supported specifications are: html4, css2, css21, css3.
3.359835
2.567445
1.30863
if spec not in SUPPORTED_SPECIFICATIONS: raise TypeError("'%s' is not a supported specification for color name lookups; supported specifications are: %s." % (spec, ', '.join(SUPPORTED_SPECIFICATIONS))) normalized = normalize_hex(hex_value) try: name = globals()['%s_hex_to_names' % spec][normalized] except KeyError: raise ValueError("'%s' has no defined color name in %s." % (hex_value, spec)) return name
def hex_to_name(hex_value, spec='css3')
Convert a hexadecimal color value to its corresponding normalized color name, if any such name exists. The optional keyword argument ``spec`` determines which specification's list of color names will be used; valid values are ``html4``, ``css2``, ``css21`` and ``css3``, and the default is ``css3``. The hexadecimal value will be normalized before being looked up, and when no color name for the value is found in the given specification, ``ValueError`` is raised. Examples: >>> hex_to_name('#ffffff') 'white' >>> hex_to_name('#fff') 'white' >>> hex_to_name('#000080') 'navy' >>> hex_to_name('#daa520') 'goldenrod' >>> hex_to_name('#daa520', spec='html4') Traceback (most recent call last): ... ValueError: '#daa520' has no defined color name in html4. >>> hex_to_name('#daa520', spec='css5') Traceback (most recent call last): ... TypeError: 'css5' is not a supported specification for color name lookups; supported specifications are: html4, css2, css21, css3.
3.514951
2.541318
1.383121
hex_digits = normalize_hex(hex_value) return tuple([int(s, 16) for s in (hex_digits[1:3], hex_digits[3:5], hex_digits[5:7])])
def hex_to_rgb(hex_value)
Convert a hexadecimal color value to a 3-tuple of integers suitable for use in an ``rgb()`` triplet specifying that color. The hexadecimal value will be normalized before being converted. Examples: >>> hex_to_rgb('#fff') (255, 255, 255) >>> hex_to_rgb('#000080') (0, 0, 128)
2.382153
3.653007
0.652107
# In order to maintain precision for common values, # 256 / 2**n is special-cased for values of n # from 0 through 4, as well as 0 itself. specials = {255: '100%', 128: '50%', 64: '25%', 32: '12.5%', 16: '6.25%', 0: '0%'} return tuple([specials.get(d, '%.02f%%' % ((d / 255.0) * 100)) \ for d in normalize_integer_triplet(rgb_triplet)])
def rgb_to_rgb_percent(rgb_triplet)
Convert a 3-tuple of integers, suitable for use in an ``rgb()`` color triplet, to a 3-tuple of percentages suitable for use in representing that color. This function makes some trade-offs in terms of the accuracy of the final representation; for some common integer values, special-case logic is used to ensure a precise result (e.g., integer 128 will always convert to '50%', integer 32 will always convert to '12.5%'), but for all other values a standard Python ``float`` is used and rounded to two decimal places, which may result in a loss of precision for some values. Examples: >>> rgb_to_rgb_percent((255, 255, 255)) ('100%', '100%', '100%') >>> rgb_to_rgb_percent((0, 0, 128)) ('0%', '0%', '50%') >>> rgb_to_rgb_percent((218, 165, 32)) ('85.49%', '64.71%', '12.5%')
4.344916
4.572929
0.950139
num = float(percent.split('%')[0]) / 100.0 * 255 e = num - math.floor(num) return e < 0.5 and int(math.floor(num)) or int(math.ceil(num))
def _percent_to_integer(percent)
Internal helper for converting a percentage value to an integer between 0 and 255 inclusive.
3.29349
3.047977
1.080549
import numpy as np from skimage.color import rgb2lab, deltaE_cmc rgb1 = np.array(rgb1, dtype="float64").reshape(1, 1, 3) / 255. rgb2 = np.array(rgb2, dtype="float64").reshape(1, 1, 3) / 255. lab1 = rgb2lab(rgb1) lab2 = rgb2lab(rgb2) return deltaE_cmc(lab1, lab2, kL=2, kC=1)[0, 0]
def color_diff(rgb1, rgb2)
Calculate distance between two RGB colors. See discussion: http://stackoverflow.com/questions/8863810/python-find-similar-colors-best-way - for basic / fast calculations, you can use dE76 but beware of its problems - for graphics arts use we recommend dE94 and perhaps dE-CMC 2:1 - for textiles use dE-CMC
2.080662
2.028201
1.025866
logging.disable(logging.DEBUG) colors = [] for key, name in css3_hex_to_names.items(): diff = color_diff(hex_to_rgb(key), requested_color) colors.append((diff, name)) logging.disable(logging.NOTSET) min_diff, min_color = min(colors) return min_color
def closest_color(requested_color)
Find closest color name for the request RGB tuple.
3.846549
3.574128
1.07622
p = OptionParser(offdiag.__doc__) p.set_beds() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) anchorsfile, = args qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts) fp = open(anchorsfile) pf = "-".join(anchorsfile.split(".")[:2]) header = "Block-id|Napus|Diploid|Napus-chr|Diploid-chr|RBH?".split("|") print("\t".join(header)) i = -1 for row in fp: if row[0] == '#': i += 1 continue q, s, score = row.split() rbh = 'no' if score[-1] == 'L' else 'yes' qi, qq = qorder[q] si, ss = sorder[s] oqseqid = qseqid = qq.seqid osseqid = sseqid = ss.seqid sseqid = sseqid.split("_")[0][-3:] if qseqid[0] == 'A': qseqid = qseqid[-3:] # A09 => A09 elif qseqid[0] == 'C': qseqid = 'C0' + qseqid[-1] # C9 => C09 else: continue if qseqid == sseqid or sseqid[-2:] == 'nn': continue block_id = pf + "-block-{0}".format(i) print("\t".join((block_id, q, s, oqseqid, osseqid, rbh)))
def offdiag(args)
%prog offdiag diploid.napus.1x1.lifted.anchors Find gene pairs that are off diagnoal. "Off diagonal" are the pairs that are not on the orthologous chromosomes. For example, napus chrA01 and brapa A01.
4.358224
4.003894
1.088496
from jcvi.utils.cbook import SummaryStats p = OptionParser(diff.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) simplefile, = args fp = open(simplefile) data = [x.split() for x in fp] spans = [] for block_id, ab in groupby(data[1:], key=lambda x: x[0]): a, b = list(ab) aspan, bspan = a[4], b[4] aspan, bspan = int(aspan), int(bspan) spans.append((aspan, bspan)) aspans, bspans = zip(*spans) dspans = [b - a for a, b, in spans] s = SummaryStats(dspans) print("For a total of {0} blocks:".format(len(dspans)), file=sys.stderr) print("Sum of A: {0}".format(sum(aspans)), file=sys.stderr) print("Sum of B: {0}".format(sum(bspans)), file=sys.stderr) print("Sum of Delta: {0} ({1})".format(sum(dspans), s), file=sys.stderr)
def diff(args)
%prog diff simplefile Calculate difference of pairwise syntenic regions.
2.564559
2.479038
1.034498
accns = [order[x] for x in accns] ii, bb = zip(*accns) mini, maxi = min(ii), max(ii) if not conservative: # extend one gene mini -= 1 maxi += 1 minb = bed[mini] maxb = bed[maxi] assert minb.seqid == maxb.seqid distmode = "ss" if conservative else "ee" ra = (minb.seqid, minb.start, minb.end, "+") rb = (maxb.seqid, maxb.start, maxb.end, "+") dist, orientation = range_distance(ra, rb, distmode=distmode) assert dist != -1 return dist
def estimate_size(accns, bed, order, conservative=True)
Estimate the bp length for the deletion tracks, indicated by the gene accns. True different levels of estimates vary on conservativeness.
4.071694
4.291452
0.948792
from jcvi.formats.base import SetFile p = OptionParser(segment.__doc__) p.add_option("--chain", default=1, type="int", help="Allow next N genes to be chained [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) idsfile, bedfile = args bed = Bed(bedfile) order = bed.order ids = SetFile(idsfile) losses = Grouper() skip = opts.chain for i, a in enumerate(bed): a = a.accn for j in xrange(i + 1, i + 1 + skip): if j >= len(bed): break b = bed[j].accn if a in ids: losses.join(a, a) if a in ids and b in ids: losses.join(a, b) losses = list(losses) singletons = [x for x in losses if len(x) == 1] segments = [x for x in losses if len(x) > 1] ns, nm, nt = len(singletons), len(segments), len(losses) assert ns + nm == nt # Summary for all segments for x in sorted(singletons) + sorted(segments): print("\t".join(str(x) for x in ("|".join(sorted(x)), len(x), estimate_size(x, bed, order)))) # Find longest segment stretch if segments: mx, maxsegment = max([(len(x), x) for x in segments]) print("Longest stretch: run of {0} genes".format(mx), file=sys.stderr) print(" {0}".format("|".join(sorted(maxsegment))), file=sys.stderr) seg_asize = sum(estimate_size(x, bed, order) for x in segments) seg_bsize = sum(estimate_size(x, bed, order, conservative=False) \ for x in segments) else: seg_asize = seg_bsize = 0 sing_asize = sum(estimate_size(x, bed, order) for x in singletons) sing_bsize = sum(estimate_size(x, bed, order, conservative=False) \ for x in singletons) total_asize = sing_asize + seg_asize total_bsize = sing_bsize + seg_bsize print("Singleton ({0}): {1} - {2} bp".\ format(ns, sing_asize, sing_bsize), file=sys.stderr) print("Segment ({0}): {1} - {2} bp".\ format(nm, seg_asize, seg_bsize), file=sys.stderr) print("Total ({0}): {1} - {2} bp".\ format(nt, total_asize, total_bsize), file=sys.stderr) print("Average ({0}): {1} bp".\ format(nt, (total_asize + total_bsize) / 2), file=sys.stderr)
def segment(args)
%prog segment loss.ids bedfile Merge adjacent gene loss into segmental loss. Then based on the segmental loss, estimate amount of DNA loss in base pairs. Two estimates can be given: - conservative: just within the start and end of a single gene - aggressive: extend the deletion track to the next gene The real deletion size is within these estimates.
2.49404
2.426339
1.027903
from jcvi.formats.base import DictFile p = OptionParser(merge.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) quartets, registry, lost = args qq = DictFile(registry, keypos=1, valuepos=3) lost = DictFile(lost, keypos=1, valuepos=0, delimiter='|') qq.update(lost) fp = open(quartets) cases = { "AN,CN": 4, "BO,AN,CN": 8, "BO,CN": 2, "BR,AN": 1, "BR,AN,CN": 6, "BR,BO": 3, "BR,BO,AN": 5, "BR,BO,AN,CN": 9, "BR,BO,CN": 7, } ip = { "syntenic_model": "Syntenic_model_excluded_by_OMG", "complete": "Predictable", "partial": "Truncated", "pseudogene": "Pseudogene", "random": "Match_random", "real_ns": "Transposed", "gmap_fail": "GMAP_fail", "AN LOST": "AN_LOST", "CN LOST": "CN_LOST", "BR LOST": "BR_LOST", "BO LOST": "BO_LOST", "outside": "Outside_synteny_blocks", "[NF]": "Not_found", } for row in fp: atoms = row.strip().split("\t") genes = atoms[:4] tag = atoms[4] a, b, c, d = [qq.get(x, ".").rsplit("-", 1)[-1] for x in genes] qqs = [c, d, a, b] for i, q in enumerate(qqs): if atoms[i] != '.': qqs[i] = "syntenic_model" # Make comment comment = "Case{0}".format(cases[tag]) dots = sum([1 for x in genes if x == '.']) if dots == 1: idx = genes.index(".") status = qqs[idx] status = ip[status] comment += "-" + status print(row.strip() + "\t" + "\t".join(qqs + [comment]))
def merge(args)
%prog merge protein-quartets registry LOST Merge protein quartets table with dna quartets registry. This is specific to the napus project.
4.23358
4.028737
1.050845
from jcvi.formats.bed import intersectBed_wao p = OptionParser(gffselect.__doc__) opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) gmapped, expected, idsfile, tag = args data = get_tags(idsfile) completeness = dict((a.replace("mrna", "path"), c) \ for (a, b, c) in data) seen = set() idsfile = expected.rsplit(".", 1)[0] + ".ids" fw = open(idsfile, "w") cnt = 0 for a, b in intersectBed_wao(expected, gmapped): if b is None: continue aname, bbname = a.accn, b.accn bname = bbname.split(".")[0] if completeness[bbname] != tag: continue if aname == bname: if bname in seen: continue seen.add(bname) print(bbname, file=fw) cnt += 1 fw.close() logging.debug("Total {0} records written to `{1}`.".format(cnt, idsfile))
def gffselect(args)
%prog gffselect gmaplocation.bed expectedlocation.bed translated.ids tag Try to match up the expected location and gmap locations for particular genes. translated.ids was generated by fasta.translate --ids. tag must be one of "complete|pseudogene|partial".
3.852392
3.586404
1.074166
from jcvi.formats.base import DictFile from jcvi.apps.base import popen from jcvi.utils.cbook import percentage p = OptionParser(gaps.__doc__) p.add_option("--bdist", default=0, type="int", help="Base pair distance [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) idsfile, frfile, gapsbed = args bdist = opts.bdist d = DictFile(frfile, keypos=1, valuepos=2) bedfile = idsfile + ".bed" fw = open(bedfile, "w") fp = open(idsfile) total = 0 for row in fp: id = row.strip() hit = d[id] tag, pos = get_tag(hit, None) seqid, start, end = pos start, end = max(start - bdist, 1), end + bdist print("\t".join(str(x) for x in (seqid, start - 1, end, id)), file=fw) total += 1 fw.close() cmd = "intersectBed -a {0} -b {1} -v | wc -l".format(bedfile, gapsbed) not_in_gaps = popen(cmd).read() not_in_gaps = int(not_in_gaps) in_gaps = total - not_in_gaps print("Ids in gaps: {1}".\ format(total, percentage(in_gaps, total)), file=sys.stderr)
def gaps(args)
%prog gaps idsfile fractionationfile gapsbed Check gene locations against gaps. `idsfile` contains a list of IDs to query into `fractionationfile` in order to get expected locations.
2.855478
2.702425
1.056635
p = OptionParser(genestatus.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) idsfile, = args data = get_tags(idsfile) key = lambda x: x[0].split(".")[0] for gene, cc in groupby(data, key=key): cc = list(cc) tags = [x[-1] for x in cc] if "complete" in tags: tag = "complete" elif "partial" in tags: tag = "partial" else: tag = "pseudogene" print("\t".join((gene, tag)))
def genestatus(args)
%prog genestatus diploid.gff3.exon.ids Tag genes based on translation from GMAP models, using fasta.translate() --ids.
2.864452
2.581768
1.109492
from jcvi.formats.bed import intersectBed_wao p = OptionParser(validate.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fractionation, cdsbed = args fp = open(fractionation) sbed = "S.bed" fw = open(sbed, "w") for row in fp: a, b, c = row.split() if not c.startswith("[S]"): continue tag, (seqid, start, end) = get_tag(c, None) print("\t".join(str(x) for x in (seqid, start - 1, end, b)), file=fw) fw.close() pairs = {} for a, b in intersectBed_wao(sbed, cdsbed): if b is None: continue pairs[a.accn] = b.accn validated = fractionation + ".validated" fw = open(validated, "w") fp.seek(0) fixed = 0 for row in fp: a, b, c = row.split() if b in pairs: assert c.startswith("[S]") c = pairs[b] fixed += 1 print("\t".join((a, b, c)), file=fw) logging.debug("Fixed {0} [S] cases in `{1}`.".format(fixed, validated)) fw.close()
def validate(args)
%prog validate diploid.napus.fractionation cds.bed Check whether [S] intervals overlap with CDS.
3.367083
2.976654
1.131164
p = OptionParser(compare.__doc__) p.set_pasa_opts(action="compare") p.add_option("--prepare", default=False, action="store_true", help="Prepare PASA run script with commands [default: %default]") p.set_grid() p.set_grid_opts() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) pasa_db, = args PASA_HOME = opts.pasa_home if not op.isdir(PASA_HOME): logging.error("PASA_HOME={0} directory does not exist".format(PASA_HOME)) sys.exit() launch_pasa = which(op.join(PASA_HOME, "scripts", \ "Launch_PASA_pipeline.pl")) annots_gff3 = opts.annots_gff3 grid = opts.grid prepare, runfile = opts.prepare, "run.sh" os.chdir(pasa_db) if prepare: write_file(runfile, "", append=True, skipcheck=True) # initialize run script acfw = must_open(acconf, "w") print(annotCompare_conf.format("{0}_pasa".format(pasa_db), \ opts.pctovl, opts.pct_coding, opts.pctid_prot, opts.pctlen_FL, \ opts.pctlen_nonFL, opts.orf_size, opts.pct_aln, opts.pctovl_gene, \ opts.stompovl, opts.trust_FL, opts.utr_exons), file=acfw) acfw.close() if not op.exists(gfasta): sys.exit("Genome fasta file `{0}` does not exist".format(gfasta)) transcripts = tfasta if not op.exists(transcripts): sys.exit("Transcript fasta file `{0}` does not exist".format(transcripts)) if op.exists("{0}.clean".format(transcripts)): transcripts = "{0}.clean".format(transcripts) accmd = "{0} -c {1} -A -g {2} -t {3} --GENETIC_CODE {4}".format(launch_pasa, \ acconf, gfasta, transcripts, opts.genetic_code) if annots_gff3: if not op.exists(annots_gff3): sys.exit("Annotation gff3 file `{0}` does not exist".format(annots_gff3)) symlink(annots_gff3, annotation) accmd += " -L --annots_gff3 {0}".format(annotation) if prepare: write_file(runfile, accmd, append=True) else: sh(accmd, grid=grid, grid_opts=opts)
def compare(args)
%prog compare pasa_db_name [--annots_gff3=annotation.gff3] Run the PASA annotation comparison pipeline This assumes that PASA alignment assembly has alredy been completed and run directory contains `genome.fasta` and `transcript.fasta` files. If `--annots_gff3` is specified, the PASA database is loaded with the annotations first before starting annotation comparison. Otherwise, it uses previously loaded annotation data. Using the `--prepare` option creates a shell script with the run commands without executing the pipeline
4.200796
3.697832
1.136016
from jcvi.formats.fasta import Fasta, SeqIO from jcvi.formats.sizes import Sizes p = OptionParser(longest.__doc__) p.add_option("--prefix", default="pasa", help="Replace asmbl_ with prefix [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, subclusters = args prefix = fastafile.rsplit(".", 1)[0] idsfile = prefix + ".fl.ids" fw = open(idsfile, "w") sizes = Sizes(fastafile).mapping name_convert = lambda x: x.replace("asmbl", opts.prefix) keep = set() # List of IDs to write fp = open(subclusters) nrecs = 0 for row in fp: if not row.startswith("sub-cluster:"): continue asmbls = row.split()[1:] longest_asmbl = max(asmbls, key=lambda x: sizes[x]) longest_size = sizes[longest_asmbl] print(name_convert(longest_asmbl), file=fw) nrecs += 1 cutoff = max(longest_size / 2, 200) keep.update(set(x for x in asmbls if sizes[x] >= cutoff)) fw.close() logging.debug("{0} fl-cDNA records written to `{1}`.".format(nrecs, idsfile)) f = Fasta(fastafile, lazy=True) newfastafile = prefix + ".clean.fasta" fw = open(newfastafile, "w") nrecs = 0 for name, rec in f.iteritems_ordered(): if name not in keep: continue rec.id = name_convert(name) rec.description = "" SeqIO.write([rec], fw, "fasta") nrecs += 1 fw.close() logging.debug("{0} valid records written to `{1}`.".format(nrecs, newfastafile))
def longest(args)
%prog longest pasa.fasta output.subclusters.out Find the longest PASA assembly and label it as full-length. Also removes transcripts shorter than half the length of the longest, or shorter than 200bp. The assemblies for the same locus is found in `output.subclusters.out`. In particular the lines that look like: sub-cluster: asmbl_25 asmbl_26 asmbl_27
2.903126
2.569648
1.129776
from jcvi.formats.fasta import Fasta, SeqIO p = OptionParser(filter.__doc__) p.add_option("--minsize", default=2, type="int", help="Minimum cluster size") p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) fastafiles = args minsize = opts.minsize totalreads = totalassembled = 0 fw = must_open(opts.outfile, "w") for i, fastafile in enumerate(fastafiles): f = Fasta(fastafile, lazy=True) pf = "s{0:03d}".format(i) nreads = nsingletons = nclusters = 0 for desc, rec in f.iterdescriptions_ordered(): nclusters += 1 if desc.startswith("singleton"): nsingletons += 1 nreads += 1 continue # consensus_for_cluster_0 with 63 sequences name, w, size, seqs = desc.split() assert w == "with" size = int(size) nreads += size if size < minsize: continue rec.description = rec.description.split(None, 1)[-1] rec.id = pf + "_" + rec.id SeqIO.write(rec, fw, "fasta") logging.debug("Scanned {0} clusters with {1} reads ..".\ format(nclusters, nreads)) cclusters, creads = nclusters - nsingletons, nreads - nsingletons logging.debug("Saved {0} clusters (min={1}) with {2} reads (avg:{3}) [{4}]".\ format(cclusters, minsize, creads, creads / cclusters, pf)) totalreads += nreads totalassembled += nreads - nsingletons logging.debug("Total assembled: {0}".\ format(percentage(totalassembled, totalreads)))
def filter(args)
%prog filter *.consensus.fasta Filter consensus sequence with min cluster size.
3.132093
2.972972
1.053523
p = OptionParser(ids.__doc__) p.add_option("--prefix", type="int", help="Find rep id for prefix of len [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) clstrfile, = args cf = ClstrFile(clstrfile) prefix = opts.prefix if prefix: reads = list(cf.iter_reps_prefix(prefix=prefix)) else: reads = list(cf.iter_reps()) nreads = len(reads) idsfile = clstrfile.replace(".clstr", ".ids") fw = open(idsfile, "w") for i, name in reads: print("\t".join(str(x) for x in (i, name)), file=fw) logging.debug("A total of {0} unique reads written to `{1}`.".\ format(nreads, idsfile)) fw.close() return idsfile
def ids(args)
%prog ids cdhit.clstr Get the representative ids from clstr file.
3.106381
2.808199
1.106183
from jcvi.graphics.histogram import loghistogram p = OptionParser(summary.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) clstrfile, = args cf = ClstrFile(clstrfile) data = list(cf.iter_sizes()) loghistogram(data, summary=True)
def summary(args)
%prog summary cdhit.clstr Parse cdhit.clstr file to get distribution of cluster sizes.
3.891895
2.976436
1.307569
p = OptionParser(deduplicate.__doc__) p.set_align(pctid=96, pctcov=0) p.add_option("--fast", default=False, action="store_true", help="Place sequence in the first cluster") p.add_option("--consensus", default=False, action="store_true", help="Compute consensus sequences") p.add_option("--reads", default=False, action="store_true", help="Use `cd-hit-454` to deduplicate [default: %default]") p.add_option("--samestrand", default=False, action="store_true", help="Enforce same strand alignment") p.set_home("cdhit") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args identity = opts.pctid / 100. fastafile, qualfile = fasta([fastafile, "--seqtk"]) ocmd = "cd-hit-454" if opts.reads else "cd-hit-est" cmd = op.join(opts.cdhit_home, ocmd) cmd += " -c {0}".format(identity) if ocmd == "cd-hit-est": cmd += " -d 0" # include complete defline if opts.samestrand: cmd += " -r 0" if not opts.fast: cmd += " -g 1" if opts.pctcov != 0: cmd += " -aL {0} -aS {0}".format(opts.pctcov / 100.) dd = fastafile + ".P{0}.cdhit".format(opts.pctid) clstr = dd + ".clstr" cmd += " -M 0 -T {0} -i {1} -o {2}".format(opts.cpus, fastafile, dd) if need_update(fastafile, (dd, clstr)): sh(cmd) if opts.consensus: cons = dd + ".consensus" cmd = op.join(opts.cdhit_home, "cdhit-cluster-consensus") cmd += " clustfile={0} fastafile={1} output={2} maxlen=1".\ format(clstr, fastafile, cons) if need_update((clstr, fastafile), cons): sh(cmd) return dd
def deduplicate(args)
%prog deduplicate fastafile Wraps `cd-hit-est` to remove duplicate sequences.
3.503393
3.327718
1.052792
self.seek(block.file_offset) return self._fhandle.read(block.size)
def read_block(self, block)
Read complete PEB data from file. Argument: Obj:block -- Block data is desired for.
7.738578
9.590605
0.806892
self.seek(block.file_offset + block.ec_hdr.data_offset) buf = self._fhandle.read(block.size - block.ec_hdr.data_offset - block.vid_hdr.data_pad) return buf
def read_block_data(self, block)
Read LEB data from file Argument: Obj:block -- Block data is desired for.
7.59977
7.606129
0.999164
blocks = {} ubi.file.seek(ubi.file.start_offset) peb_count = 0 cur_offset = 0 bad_blocks = [] # range instead of xrange, as xrange breaks > 4GB end_offset. for i in range(ubi.file.start_offset, ubi.file.end_offset, ubi.file.block_size): try: buf = ubi.file.read(ubi.file.block_size) except Exception as e: if settings.warn_only_block_read_errors: error(extract_blocks, 'Error', 'PEB: %s: %s' % (ubi.first_peb_num + peb_count, str(e))) continue else: error(extract_blocks, 'Fatal', 'PEB: %s: %s' % (ubi.first_peb_num + peb_count, str(e))) if buf.startswith(UBI_EC_HDR_MAGIC): blk = description(buf) blk.file_offset = i blk.peb_num = ubi.first_peb_num + peb_count blk.size = ubi.file.block_size blocks[blk.peb_num] = blk peb_count += 1 log(extract_blocks, blk) verbose_log(extract_blocks, 'file addr: %s' % (ubi.file.last_read_addr())) ec_hdr_errors = '' vid_hdr_errors = '' if blk.ec_hdr.errors: ec_hdr_errors = ','.join(blk.ec_hdr.errors) if blk.vid_hdr and blk.vid_hdr.errors: vid_hdr_errors = ','.join(blk.vid_hdr.errors) if ec_hdr_errors or vid_hdr_errors: if blk.peb_num not in bad_blocks: bad_blocks.append(blk.peb_num) log(extract_blocks, 'PEB: %s has possible issue EC_HDR [%s], VID_HDR [%s]' % (blk.peb_num, ec_hdr_errors, vid_hdr_errors)) verbose_display(blk) else: cur_offset += ubi.file.block_size ubi.first_peb_num = cur_offset/ubi.file.block_size ubi.file.start_offset = cur_offset return blocks
def extract_blocks(ubi)
Get a list of UBI block objects from file Arguments:. Obj:ubi -- UBI object. Returns: Dict -- Of block objects keyed by PEB number.
3.247831
3.124885
1.039344
return list(filter(lambda block: blocks[block].ec_hdr.image_seq == image_seq, blocks))
def by_image_seq(blocks, image_seq)
Filter blocks to return only those associated with the provided image_seq number. Argument: List:blocks -- List of block objects to sort. Int:image_seq -- image_seq number found in ec_hdr. Returns: List -- List of block indexes matching image_seq number.
8.620091
5.22487
1.649819
slist_len = len(blocks) slist = ['x'] * slist_len for block in blocks: if blocks[block].leb_num >= slist_len: add_elements = blocks[block].leb_num - slist_len + 1 slist += (['x'] * add_elements) slist_len = len(slist) slist[blocks[block].leb_num] = block return slist
def by_leb(blocks)
Sort blocks by Logical Erase Block number. Arguments: List:blocks -- List of block objects to sort. Returns: List -- Indexes of blocks sorted by LEB.
3.228516
3.469674
0.930496
vol_blocks = {} # sort block by volume # not reliable with multiple partitions (fifo) for i in blocks: if slist and i not in slist: continue elif not blocks[i].is_valid: continue if blocks[i].vid_hdr.vol_id not in vol_blocks: vol_blocks[blocks[i].vid_hdr.vol_id] = [] vol_blocks[blocks[i].vid_hdr.vol_id].append(blocks[i].peb_num) return vol_blocks
def by_vol_id(blocks, slist=None)
Sort blocks by volume id Arguments: Obj:blocks -- List of block objects. List:slist -- (optional) List of block indexes. Return: Dict -- blocks grouped in lists with dict key as volume id.
4.102421
4.224025
0.971211
layout = [] data = [] int_vol = [] unknown = [] for i in blocks: if slist and i not in slist: continue if blocks[i].is_vtbl and blocks[i].is_valid: layout.append(i) elif blocks[i].is_internal_vol and blocks[i].is_valid: int_vol.append(i) elif blocks[i].is_valid: data.append(i) else: unknown.append(i) return layout, data, int_vol, unknown
def by_type(blocks, slist=None)
Sort blocks into layout, internal volume, data or unknown Arguments: Obj:blocks -- List of block objects. List:slist -- (optional) List of block indexes. Returns: List:layout -- List of block indexes of blocks containing the volume table records. List:data -- List of block indexes containing filesystem data. List:int_vol -- List of block indexes containing volume ids greater than UBI_INTERNAL_VOL_START that are not layout volumes. List:unknown -- List of block indexes of blocks that failed validation of crc in ed_hdr or vid_hdr.
3.341389
2.074286
1.610862
try: inodes = {} bad_blocks = [] walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes, bad_blocks) if len(inodes) < 2: raise Exception('No inodes found') for dent in inodes[1]['dent']: extract_dents(ubifs, inodes, dent, out_path, perms) if len(bad_blocks): error(extract_files, 'Warning', 'Data may be missing or corrupted, bad blocks, LEB [%s]' % ','.join(map(str, bad_blocks))) except Exception as e: error(extract_files, 'Error', '%s' % e)
def extract_files(ubifs, out_path, perms=False)
Extract UBIFS contents to_path/ Arguments: Obj:ubifs -- UBIFS object. Str:out_path -- Path to extract contents to.
5.505939
5.652353
0.974097
layout_temp = list(layout_blocks) for i in range(0, len(layout_temp)): for k in range(0, len(layout_blocks)): if blocks[layout_temp[i]].ec_hdr.image_seq != blocks[layout_blocks[k]].ec_hdr.image_seq: continue if blocks[layout_temp[i]].leb_num != blocks[layout_blocks[k]].leb_num: continue if blocks[layout_temp[i]].vid_hdr.sqnum > blocks[layout_blocks[k]].vid_hdr.sqnum: del layout_blocks[k] break return layout_blocks
def get_newest(blocks, layout_blocks)
Filter out old layout blocks from list Arguments: List:blocks -- List of block objects List:layout_blocks -- List of layout block indexes Returns: List -- Newest layout blocks in list
3.184504
3.452461
0.922387
image_dict={} for block_id in layout_blocks_list: image_seq=blocks[block_id].ec_hdr.image_seq if image_seq not in image_dict: image_dict[image_seq]=[block_id] else: image_dict[image_seq].append(block_id) log(group_pairs, 'Layout blocks found at PEBs: %s' % list(image_dict.values())) return list(image_dict.values())
def group_pairs(blocks, layout_blocks_list)
Sort a list of layout blocks into pairs Arguments: List:blocks -- List of block objects List:layout_blocks -- List of layout block indexes Returns: List -- Layout block pair indexes grouped in a list
4.162212
4.257876
0.977532
seq_blocks = [] for layout_pair in layout_pairs: seq_blocks = sort.by_image_seq(blocks, blocks[layout_pair[0]].ec_hdr.image_seq) layout_pair.append(seq_blocks) return layout_pairs
def associate_blocks(blocks, layout_pairs, start_peb_num)
Group block indexes with appropriate layout pairs Arguments: List:blocks -- List of block objects List:layout_pairs -- List of grouped layout blocks Int:start_peb_num -- Number of the PEB to start from. Returns: List -- Layout block pairs grouped with associated block ranges.
7.211203
6.720164
1.073069
volumes = {} vol_blocks_lists = sort.by_vol_id(blocks, layout_info[2]) for vol_rec in blocks[layout_info[0]].vtbl_recs: vol_name = vol_rec.name.strip(b'\x00').decode('utf-8') if vol_rec.rec_index not in vol_blocks_lists: vol_blocks_lists[vol_rec.rec_index] = [] volumes[vol_name] = description(vol_rec.rec_index, vol_rec, vol_blocks_lists[vol_rec.rec_index]) return volumes
def get_volumes(blocks, layout_info)
Get a list of UBI volume objects from list of blocks Arguments: List:blocks -- List of layout block objects List:layout_info -- Layout info (indexes of layout blocks and associated data blocks.) Returns: Dict -- Of Volume objects by volume name, including any relevant blocks.
4.283293
4.598394
0.931476
hkey, lkey = struct.unpack('<II',key[0:UBIFS_SK_LEN]) ino_num = hkey & UBIFS_S_KEY_HASH_MASK key_type = lkey >> UBIFS_S_KEY_BLOCK_BITS khash = lkey #if key_type < UBIFS_KEY_TYPES_CNT: return {'type':key_type, 'ino_num':ino_num, 'khash': khash}
def parse_key(key)
Parse node key Arguments: Str:key -- Hex string literal of node key. Returns: Int:key_type -- Type of key, data, ino, dent, etc. Int:ino_num -- Inode number. Int:khash -- Key hash.
6.315537
5.606973
1.126372
if ctype == UBIFS_COMPR_LZO: try: return lzo.decompress(b''.join((b'\xf0', struct.pack('>I', unc_len), data))) except Exception as e: error(decompress, 'Warn', 'LZO Error: %s' % e) elif ctype == UBIFS_COMPR_ZLIB: try: return zlib.decompress(data, -11) except Exception as e: error(decompress, 'Warn', 'ZLib Error: %s' % e) else: return data
def decompress(ctype, unc_len, data)
Decompress data. Arguments: Int:ctype -- Compression type LZO, ZLIB (*currently unused*). Int:unc_len -- Uncompressed data lenth. Str:data -- Data to be uncompessed. Returns: Uncompressed Data.
3.08796
3.261812
0.946701
f = open(path, 'rb') f.seek(0,2) file_size = f.tell()+1 f.seek(0) block_size = None for _ in range(0, file_size, FILE_CHUNK_SZ): buf = f.read(FILE_CHUNK_SZ) for m in re.finditer(UBIFS_NODE_MAGIC, buf): start = m.start() chdr = nodes.common_hdr(buf[start:start+UBIFS_COMMON_HDR_SZ]) if chdr and chdr.node_type == UBIFS_SB_NODE: sb_start = start + UBIFS_COMMON_HDR_SZ sb_end = sb_start + UBIFS_SB_NODE_SZ if chdr.len != len(buf[sb_start:sb_end]): f.seek(sb_start) buf = f.read(UBIFS_SB_NODE_SZ) else: buf = buf[sb_start:sb_end] sbn = nodes.sb_node(buf) block_size = sbn.leb_size f.close() return block_size f.close() return block_size
def guess_leb_size(path)
Get LEB size from superblock Arguments: Str:path -- Path to file. Returns: Int -- LEB size. Searches file for superblock and retrieves leb size.
2.771082
2.739508
1.011526
file_offset = 0 offsets = [] f = open(path, 'rb') f.seek(0,2) file_size = f.tell()+1 f.seek(0) for _ in range(0, file_size, FILE_CHUNK_SZ): buf = f.read(FILE_CHUNK_SZ) for m in re.finditer(UBI_EC_HDR_MAGIC, buf): start = m.start() if not file_offset: file_offset = start idx = start else: idx = start+file_offset offsets.append(idx) file_offset += FILE_CHUNK_SZ f.close() occurances = {} for i in range(0, len(offsets)): try: diff = offsets[i] - offsets[i-1] except: diff = offsets[i] if diff not in occurances: occurances[diff] = 0 occurances[diff] += 1 most_frequent = 0 block_size = None for offset in occurances: if occurances[offset] > most_frequent: most_frequent = occurances[offset] block_size = offset return block_size
def guess_peb_size(path)
Determine the most likely block size Arguments: Str:path -- Path to file. Returns: Int -- PEB size. Searches file for Magic Number, picks most common length between them.
2.586903
2.616678
0.988621
if not value: return None # Apart from numbers also accept values that end with px if isinstance(value, str): value = value.strip(' px') try: return int(value) except (TypeError, ValueError): return None
def convert_to_int(value)
Attempts to convert a specified value to an integer :param value: Content to be converted into an integer :type value: string or int
6.191741
7.012114
0.883006
data.update({ 'oembed': oembed_data, }) _type = oembed_data.get('type') provider_name = oembed_data.get('provider_name') if not _type: return data if oembed_data.get('title'): data.update({ 'title': oembed_data.get('title'), }) if _type == 'video': try: item = { 'width': convert_to_int(oembed_data.get('width')), 'height': convert_to_int(oembed_data.get('height')) } if provider_name in ['YouTube', ]: item['src'] = HYPERLINK_PATTERN.search(oembed_data.get('html')).group(0) data['videos'].append(item) except Exception: pass if oembed_data.get('thumbnail_url'): item = { 'width': convert_to_int(oembed_data.get('thumbnail_width')), 'height': convert_to_int(oembed_data.get('thumbnail_height')), 'src': oembed_data.get('thumbnail_url') } data['images'].append(item) return data
def parse_oembed_data(oembed_data, data)
Parse OEmbed resposne data to inject into lassie's response dict. :param oembed_data: OEmbed response data. :type oembed_data: dict :param data: Refrence to data variable being updated. :type data: dict
2.04543
2.095428
0.97614
meta = FILTER_MAPS['meta'][source] meta_map = meta['map'] html = soup.find_all('meta', {meta['key']: meta['pattern']}) image = {} video = {} for line in html: prop = line.get(meta['key']) value = line.get('content') _prop = meta_map.get(prop) if prop in meta_map and _prop and not data.get(_prop): # this could be bad in cases where any values that the property # is mapped up to (i.e. "src", "type", etc) are found in ``data`` # TODO: Figure out a smoother way to prevent conflicts ^^^^^^^^ image_prop = meta['image_key'] video_prop = meta['video_key'] if prop.startswith((image_prop, video_prop)) and \ prop.endswith(('width', 'height')): if prop.endswith(('width', 'height')): value = convert_to_int(value) if meta_map[prop] == 'locale': locale = normalize_locale(value) if locale: data['locale'] = locale if prop == 'keywords': if isinstance(value, str): value = [v.strip() for v in value.split(',')] else: value = [] if image_prop and prop.startswith(image_prop) and value: # og:image URLs can be relative if prop == 'og:image' and url: value = urljoin(url, value) image[meta_map[prop]] = value elif video_prop and prop.startswith(video_prop) and value: video[meta_map[prop]] = value else: data[meta_map[prop]] = value if image: image['type'] = image_prop data['images'].append(image) if video: data['videos'].append(video)
def _filter_meta_data(self, source, soup, data, url=None)
This method filters the web page content for meta tags that match patterns given in the ``FILTER_MAPS`` :param source: The key of the meta dictionary in ``FILTER_MAPS['meta']`` :type source: string :param soup: BeautifulSoup instance to find meta tags :type soup: instance :param data: The response dictionary to manipulate :type data: (dict)
3.661587
3.510315
1.043094