code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
stats = Stats(self.config)
return stats.usage(zone=self.zone, callback=callback, errback=errback,
**kwargs) | def usage(self, callback=None, errback=None, **kwargs) | Return the current usage information for this zone
:rtype: dict
:return: usage information | 5.50435 | 7.030878 | 0.782882 |
if not reload and self.data:
raise MonitorException('monitor already loaded')
def success(result, *args):
self.data = result
if callback:
return callback(self)
else:
return self
return self._rest.retrieve(self.data['id'], callback=success,
errback=errback) | def load(self, callback=None, errback=None, reload=False) | Load monitor data from the API. | 5.0267 | 4.442871 | 1.131408 |
return self._rest.delete(self.data['id'], callback=callback, errback=errback) | def delete(self, callback=None, errback=None) | Delete the monitor | 4.724174 | 5.052372 | 0.935041 |
if not self.data:
raise MonitorException('monitor not loaded')
def success(result, *args):
self.data = result
if callback:
return callback(self)
else:
return self
return self._rest.update(self.data['id'], {}, callback=success, errback=errback, **kwargs) | def update(self, callback=None, errback=None, **kwargs) | Update monitor configuration. Pass a list of keywords and their values to
update. | 4.463933 | 4.14776 | 1.076227 |
import ns1.rest.zones
return ns1.rest.zones.Zones(self.config) | def zones(self) | Return a new raw REST interface to zone resources
:rtype: :py:class:`ns1.rest.zones.Zones` | 7.587127 | 3.700002 | 2.050574 |
import ns1.rest.records
return ns1.rest.records.Records(self.config) | def records(self) | Return a new raw REST interface to record resources
:rtype: :py:class:`ns1.rest.records.Records` | 8.678324 | 3.742047 | 2.319138 |
import ns1.rest.ipam
return ns1.rest.ipam.Addresses(self.config) | def addresses(self) | Return a new raw REST interface to address resources
:rtype: :py:class:`ns1.rest.ipam.Adresses` | 8.336314 | 4.005634 | 2.081147 |
import ns1.rest.ipam
return ns1.rest.ipam.Networks(self.config) | def networks(self) | Return a new raw REST interface to network resources
:rtype: :py:class:`ns1.rest.ipam.Networks` | 8.845332 | 3.458195 | 2.557789 |
import ns1.rest.ipam
return ns1.rest.ipam.Scopegroups(self.config) | def scope_groups(self) | Return a new raw REST interface to scope_group resources
:rtype: :py:class:`ns1.rest.ipam.Scopegroups` | 11.77113 | 3.243307 | 3.62936 |
import ns1.rest.stats
return ns1.rest.stats.Stats(self.config) | def stats(self) | Return a new raw REST interface to stats resources
:rtype: :py:class:`ns1.rest.stats.Stats` | 9.966437 | 3.614544 | 2.757315 |
import ns1.rest.data
return ns1.rest.data.Source(self.config) | def datasource(self) | Return a new raw REST interface to datasource resources
:rtype: :py:class:`ns1.rest.data.Source` | 13.499666 | 3.956154 | 3.412321 |
import ns1.rest.data
return ns1.rest.data.Feed(self.config) | def datafeed(self) | Return a new raw REST interface to feed resources
:rtype: :py:class:`ns1.rest.data.Feed` | 13.193658 | 4.137201 | 3.18903 |
import ns1.rest.monitoring
return ns1.rest.monitoring.Monitors(self.config) | def monitors(self) | Return a new raw REST interface to monitors resources
:rtype: :py:class:`ns1.rest.monitoring.Monitors` | 9.265704 | 3.422573 | 2.707234 |
import ns1.rest.monitoring
return ns1.rest.monitoring.NotifyLists(self.config) | def notifylists(self) | Return a new raw REST interface to notify list resources
:rtype: :py:class:`ns1.rest.monitoring.NotifyLists` | 12.463534 | 3.648507 | 3.416064 |
import ns1.rest.account
return ns1.rest.account.Plan(self.config) | def plan(self) | Return a new raw REST interface to account plan
:rtype: :py:class:`ns1.rest.account.Plan` | 14.076788 | 3.980409 | 3.536518 |
import ns1.zones
zone = ns1.zones.Zone(self.config, zone)
return zone.load(callback=callback, errback=errback) | def loadZone(self, zone, callback=None, errback=None) | Load an existing zone into a high level Zone object.
:param str zone: zone name, like 'example.com'
:rtype: :py:class:`ns1.zones.Zone` | 4.130632 | 3.321185 | 1.243723 |
import ns1.zones
return zone.search(q, has_geo, callback=callback, errback=errback) | def searchZone(self, zone, q=None, has_geo=False, callback=None, errback=None) | Search a zone for a given search query (e.g., for geological data, etc)
:param zone: NOT a string like loadZone - an already loaded ns1.zones.Zone, like one returned from loadZone
:return: | 5.836435 | 4.168473 | 1.400138 |
import ns1.zones
zone = ns1.zones.Zone(self.config, zone)
return zone.create(zoneFile=zoneFile, callback=callback,
errback=errback, **kwargs) | def createZone(self, zone, zoneFile=None, callback=None, errback=None,
**kwargs) | Create a new zone, and return an associated high level Zone object.
Several optional keyword arguments are available to configure the SOA
record.
If zoneFile is specified, upload the specific zone definition file
to populate the zone with.
:param str zone: zone name, like 'example.com'
:param str zoneFile: absolute path of a zone file
:keyword int retry: retry time
:keyword int refresh: refresh ttl
:keyword int expiry: expiry ttl
:keyword int nx_ttl: nxdomain TTL
:rtype: :py:class:`ns1.zones.Zone` | 3.026054 | 2.991481 | 1.011557 |
import ns1.zones
if zone is None:
# extract from record string
parts = domain.split('.')
if len(parts) <= 2:
zone = '.'.join(parts)
else:
zone = '.'.join(parts[1:])
z = ns1.zones.Zone(self.config, zone)
return z.loadRecord(domain, type, callback=callback, errback=errback,
**kwargs) | def loadRecord(self, domain, type, zone=None, callback=None,
errback=None, **kwargs) | Load an existing record into a high level Record object.
:param str domain: domain name of the record in the zone, for example \
'myrecord'. You may leave off the zone, since it must be \
specified in the zone parameter
:param str type: record type, such as 'A', 'MX', 'AAAA', etc.
:param str zone: zone name, like 'example.com'
:rtype: :py:class:`ns1.records` | 3.029067 | 2.914892 | 1.03917 |
import ns1.monitoring
monitors_list = self.monitors().list(callback, errback)
return [ns1.monitoring.Monitor(self.config, m) for m in monitors_list] | def loadMonitors(self, callback=None, errback=None, **kwargs) | Load all monitors | 5.731112 | 5.92936 | 0.966565 |
import ns1.monitoring
monitor = ns1.monitoring.Monitor(self.config)
return monitor.create(callback=callback, errback=errback, **kwargs) | def createMonitor(self, callback=None, errback=None, **kwargs) | Create a monitor | 4.310441 | 4.702849 | 0.91656 |
import ns1.ipam
network = ns1.ipam.Network(self.config, id=id)
return network.load(callback=callback, errback=errback) | def loadNetworkbyID(self, id, callback=None, errback=None) | Load an existing Network by ID into a high level Network object
:param int id: id of an existing Network | 4.925172 | 5.771763 | 0.853322 |
import ns1.ipam
network = ns1.ipam.Network(self.config, name=name)
return network.load(callback=callback, errback=errback) | def loadNetworkbyName(self, name, callback=None, errback=None) | Load an existing Network by name into a high level Network object
:param str name: Name of an existing Network | 4.842845 | 5.979744 | 0.809875 |
import ns1.ipam
if scope_group_id is not None:
scope_group = ns1.ipam.Scopegroup(self.config, id=scope_group_id).update()
kwargs['scope_group'] = scope_group
network = ns1.ipam.Network(self.config, name=name)
return network.create(callback=callback, errback=errback, **kwargs) | def createNetwork(self, name, scope_group_id=None, callback=None, errback=None, **kwargs) | Create a new Network
For the list of keywords available, see :attr:`ns1.rest.ipam.Networks.INT_FIELDS` and :attr:`ns1.rest.ipam.Networks.PASSTHRU_FIELDS`
:param str name: Name of the Network to be created
:param int scope_group: (Optional) id of an existing scope group to associate with | 2.582061 | 2.912635 | 0.886503 |
import ns1.ipam
address = ns1.ipam.Address(self.config, id=id)
return address.load(callback=callback, errback=errback) | def loadAddressbyID(self, id, callback=None, errback=None) | Load an existing address by ID into a high level Address object
:param int id: id of an existing Address | 4.789474 | 5.748926 | 0.833108 |
import ns1.ipam
network = ns1.ipam.Network(self.config, id=network_id).load()
address = ns1.ipam.Address(self.config, prefix=prefix, type=type, network=network)
return address.load(callback=callback, errback=errback) | def loadAddressbyPrefix(self, prefix, type, network_id, callback=None, errback=None) | Load an existing address by prefix, type and network into a high level Address object
:param str prefix: CIDR prefix of an existing Address
:param str type: Type of address assignement (planned, assignment or host)
:param int network_id: network_id associated with the address | 3.019285 | 3.229467 | 0.934917 |
import ns1.ipam
network = ns1.ipam.Network(self.config, id=network_id).load()
address = ns1.ipam.Address(self.config, prefix=prefix, type=type, network=network)
return address.create(callback=callback, errback=errback, **kwargs) | def createAddress(self, prefix, type, network_id, callback=None, errback=None, **kwargs) | Create a new Address
For the list of keywords available, see :attr:`ns1.rest.ipam.Addresses.INT_FIELDS` and :attr:`ns1.rest.ipam.Addresses.PASSTHRU_FIELDS`
:param str prefix: CIDR prefix of the address to be created
:param str type: Type of address assignement (planned, assignment or host)
:param int network_id: network_id associated with the address | 2.697648 | 2.558895 | 1.054224 |
import ns1.ipam
scope_group = ns1.ipam.Scopegroup(self.config, id=id)
return scope_group.load(callback=callback, errback=errback) | def loadScopeGroupbyID(self, id, callback=None, errback=None) | Load an existing Scope Group by ID into a high level Scope Group object
:param int id: id of an existing ScopeGroup | 4.787018 | 5.647657 | 0.847611 |
import ns1.ipam
scope_group = ns1.ipam.Scopegroup(self.config, name=name, service_group_id=service_group_id)
return scope_group.load(callback=callback, errback=errback) | def loadScopeGroupbyName(self, name, service_group_id, callback=None, errback=None) | Load an existing Scope Group by name and service group id into a high level Scope Group object
:param str name: Name of an existing Scope Group
:param int service_group_id: id of the service group the Scope group is associated with | 3.263908 | 4.354698 | 0.749514 |
import ns1.ipam
scope_group = ns1.ipam.Scopegroup(self.config, name=name, service_group_id=service_group_id)
return scope_group.create(dhcp4=dhcp4, dhcp6=dhcp6, callback=callback, errback=errback) | def createScopeGroup(self, name, service_group_id, dhcp4, dhcp6, callback=None, errback=None) | Create a new Scope Group
For the list of keywords available, see :attr:`ns1.rest.ipam.ScopeGroups.INT_FIELDS` and :attr:`ns1.rest.ipam.ScopeGroups.PASSTHRU_FIELDS`
:param str name: Name of the Scope Group to be created
:param int service_group_id: id of the service group the Scope group is associated with
:param ns1.ipam.DHCPIOptions dhcp4: DHCPOptions object that contains the options for dhcpv4
:param ns1.ipam.DHCPIOptions dhcp6: DHCPOptions object that contains the options for dhcpv6 | 2.597067 | 2.38623 | 1.088356 |
from ns1.ipam import DHCPOptions
options = {}
for option in DHCPOptions.OPTIONS[address_family]:
options[option] = ""
return options | def generateDHCPOptionsTemplate(self, address_family) | Generate boilerplate dictionary to hold dhcp options
:param str address_family: dhcpv4 or dhcpv6
:return: dict containing valid option set for address family | 6.140594 | 5.401672 | 1.136795 |
import ns1.ipam
return ns1.ipam.DHCPOptions(address_family, options) | def loadDHCPOptions(self, address_family, options) | Create a high level DHCPOptions object
:param str address_family: Address family of the options. Can be either dhcpv4 or dhcpv6
:param dict options: Dictionary containing the option set to apply for this address family. Note: only those specified will be applied. Allowed options can be found in :attr:`ns1.ipam.DHCPOptions.OPTIONS` | 8.155275 | 3.304753 | 2.467741 |
if not comments:
return
it = iter(comments)
first = next(it)
extra_path_items = imap(_mark_as_root_path, first.root_path)
return chain(extra_path_items, [first], it) | def fill_tree(comments) | Insert extra comments in the comments list, so that the root path of the first comment is always visible.
Use this in comments' pagination to fill in the tree information.
The inserted comments have an ``added_path`` attribute. | 7.9945 | 7.641448 | 1.046202 |
if not comments:
return
it = iter(comments)
# get the first item, this will fail if no items !
old = next(it)
# first item starts a new thread
old.open = True
last = set()
for c in it:
# if this comment has a parent, store its last child for future reference
if old.last_child_id:
last.add(old.last_child_id)
# this is the last child, mark it
if c.pk in last:
c.last = True
# increase the depth
if c.depth > old.depth:
c.open = True
else: # c.depth <= old.depth
# close some depths
old.close = list(range(old.depth - c.depth))
# new thread
if old.root_id != c.root_id:
# close even the top depth
old.close.append(len(old.close))
# and start a new thread
c.open = True
# empty the last set
last = set()
# iterate
yield old
old = c
old.close = range(old.depth)
yield old | def annotate_tree_properties(comments) | iterate through nodes and adds some magic properties to each of them
representing opening list of children and closing it | 5.342868 | 5.192826 | 1.028894 |
'''Runs spades on all kmers. Each a separate run because SPAdes dies if any kmer does
not work. Chooses the 'best' assembly to be the one with the biggest N50'''
n50 = {}
kmer_to_dir = {}
for k in self.spades_kmers:
tmpdir = tempfile.mkdtemp(prefix=self.outdir + '.tmp.spades.' + str(k) + '.', dir=os.getcwd())
kmer_to_dir[k] = tmpdir
ok, errs = self.run_spades_once(k, tmpdir)
if ok:
contigs_fasta = os.path.join(tmpdir, 'contigs.fasta')
contigs_fai = contigs_fasta + '.fai'
common.syscall(self.samtools.exe() + ' faidx ' + contigs_fasta, verbose=self.verbose)
stats = pyfastaq.tasks.stats_from_fai(contigs_fai)
if stats['N50'] != 0:
n50[k] = stats['N50']
if stop_at_first_success:
break
if len(n50) > 0:
if self.verbose:
print('[assemble]\tkmer\tN50')
for k in sorted(n50):
print('[assemble]', k, n50[k], sep='\t')
best_k = None
for k in sorted(n50):
if best_k is None or n50[k] >= n50[best_k]:
best_k = k
assert best_k is not None
for k, directory in kmer_to_dir.items():
if k == best_k:
if self.verbose:
print('[assemble] using assembly with kmer', k)
os.rename(directory, self.outdir)
else:
shutil.rmtree(directory)
else:
raise Error('Error running SPAdes. Output directories are:\n ' + '\n '.join(kmer_to_dir.values()) + '\nThe reason why should be in the spades.log file in each directory.') | def run_spades(self, stop_at_first_success=False) | Runs spades on all kmers. Each a separate run because SPAdes dies if any kmer does
not work. Chooses the 'best' assembly to be the one with the biggest N50 | 3.587765 | 2.798556 | 1.282006 |
'''Runs canu instead of spades'''
cmd = self._make_canu_command(self.outdir,'canu')
ok, errs = common.syscall(cmd, verbose=self.verbose, allow_fail=False)
if not ok:
raise Error('Error running Canu.')
original_contigs = os.path.join(self.outdir, 'canu.contigs.fasta')
renamed_contigs = os.path.join(self.outdir, 'contigs.fasta')
Assembler._rename_canu_contigs(original_contigs, renamed_contigs)
original_gfa = os.path.join(self.outdir, 'canu.contigs.gfa')
renamed_gfa = os.path.join(self.outdir, 'contigs.gfa')
os.rename(original_gfa, renamed_gfa) | def run_canu(self) | Runs canu instead of spades | 3.256578 | 2.999079 | 1.085859 |
'''Returns Fasta or Fastq sequence from pysam aligned read'''
if read.qual is None or ignore_quality:
if qual is None or ignore_quality:
seq = pyfastaq.sequences.Fasta(read.qname, common.decode(read.seq))
else:
seq = pyfastaq.sequences.Fastq(read.qname, common.decode(read.seq), qual * read.query_length)
else:
if qual is None:
seq = pyfastaq.sequences.Fastq(read.qname, common.decode(read.seq), common.decode(read.qual))
else:
seq = pyfastaq.sequences.Fastq(read.qname, common.decode(read.seq), qual * read.query_length)
if read.is_reverse and revcomp:
seq.revcomp()
return seq | def aligned_read_to_read(read, revcomp=True, qual=None, ignore_quality=False) | Returns Fasta or Fastq sequence from pysam aligned read | 2.329325 | 1.991512 | 1.169626 |
'''Gets the length of each reference sequence from the header of the bam. Returns dict name => length'''
sam_reader = pysam.Samfile(self.bam, "rb")
return dict(zip(sam_reader.references, sam_reader.lengths)) | def _get_ref_lengths(self) | Gets the length of each reference sequence from the header of the bam. Returns dict name => length | 5.349196 | 2.739371 | 1.952709 |
'''If contigs_to_use is a set, returns that set. If it's None, returns an empty set.
Otherwise, assumes it's a file name, and gets names from the file'''
if type(contigs_to_use) == set:
return contigs_to_use
elif contigs_to_use is None:
return set()
else:
f = pyfastaq.utils.open_file_read(contigs_to_use)
contigs_to_use = set([line.rstrip() for line in f])
pyfastaq.utils.close(f)
return contigs_to_use | def _get_contigs_to_use(self, contigs_to_use) | If contigs_to_use is a set, returns that set. If it's None, returns an empty set.
Otherwise, assumes it's a file name, and gets names from the file | 2.666662 | 1.692769 | 1.575326 |
'''Checks that the set of contigs to use are all in the reference
fasta lengths dict made by self._get_ref_lengths()'''
if self.contigs_to_use is None:
return True
for contig in self.contigs_to_use:
if contig not in ref_dict:
raise Error('Requested to use contig "' + contig + '", but not found in input BAM file "' + self.bam + '"')
return True | def _check_contigs_to_use(self, ref_dict) | Checks that the set of contigs to use are all in the reference
fasta lengths dict made by self._get_ref_lengths() | 4.663743 | 2.620947 | 1.779412 |
'''Gets all reads from contig called "contig" and writes to fout'''
sam_reader = pysam.Samfile(self.bam, "rb")
for read in sam_reader.fetch(contig):
print(mapping.aligned_read_to_read(read, ignore_quality=not self.fastq_out), file=fout) | def _all_reads_from_contig(self, contig, fout) | Gets all reads from contig called "contig" and writes to fout | 5.61108 | 4.554563 | 1.231969 |
'''Writes all unmapped reads to fout'''
sam_reader = pysam.Samfile(self.bam, "rb")
for read in sam_reader.fetch(until_eof=True):
if read.is_unmapped:
print(mapping.aligned_read_to_read(read, ignore_quality=not self.fastq_out), file=fout) | def _get_all_unmapped_reads(self, fout) | Writes all unmapped reads to fout | 4.336698 | 4.267573 | 1.016198 |
'''Get all reads from contig, but breaks them all at given position (0-based) in the reference. Writes to fout. Currently pproximate where it breaks (ignores indels in the alignment)'''
sam_reader = pysam.Samfile(self.bam, "rb")
for read in sam_reader.fetch(contig):
seqs = []
if read.pos < position < read.reference_end - 1:
split_point = position - read.pos
if split_point - 1 >= min_read_length:
sequence = mapping.aligned_read_to_read(read, revcomp=False, ignore_quality=not self.fastq_out).subseq(0, split_point)
sequence.id += '.left'
seqs.append(sequence)
if read.query_length - split_point >= min_read_length:
sequence = mapping.aligned_read_to_read(read, revcomp=False, ignore_quality=not self.fastq_out).subseq(split_point, read.query_length)
sequence.id += '.right'
seqs.append(sequence)
else:
seqs.append(mapping.aligned_read_to_read(read, revcomp=False, ignore_quality=not self.fastq_out))
for seq in seqs:
if read.is_reverse:
seq.revcomp()
print(seq, file=fout) | def _break_reads(self, contig, position, fout, min_read_length=250) | Get all reads from contig, but breaks them all at given position (0-based) in the reference. Writes to fout. Currently pproximate where it breaks (ignores indels in the alignment) | 3.474294 | 2.297097 | 1.512472 |
'''Writes reads not mapping to the given region of contig, start and end as per python convention'''
sam_reader = pysam.Samfile(self.bam, "rb")
exclude_interval = pyfastaq.intervals.Interval(start, end - 1)
for read in sam_reader.fetch(contig):
read_interval = pyfastaq.intervals.Interval(read.pos, read.reference_end - 1)
if not read_interval.intersects(exclude_interval):
print(mapping.aligned_read_to_read(read, ignore_quality=not self.fastq_out), file=fout) | def _exclude_region(self, contig, start, end, fout) | Writes reads not mapping to the given region of contig, start and end as per python convention | 4.931983 | 3.408373 | 1.44702 |
'''Writes reads mapping to given region of contig, trimming part of read not in the region'''
sam_reader = pysam.Samfile(self.bam, "rb")
trimming_end = (start == 0)
for read in sam_reader.fetch(contig, start, end):
read_interval = pyfastaq.intervals.Interval(read.pos, read.reference_end - 1)
seq = mapping.aligned_read_to_read(read, ignore_quality=not self.fastq_out, revcomp=False)
if trimming_end:
bases_off_start = 0
bases_off_end = max(0, read.reference_end - 1 - end)
#seq.seq = seq.seq[:read.query_alignment_end - bases_off_end]
seq = seq.subseq(0, read.query_alignment_end - bases_off_end)
else:
bases_off_start = max(0, start - read.pos + 1)
#seq.seq = seq.seq[bases_off_start + read.query_alignment_start:]
seq = seq.subseq(bases_off_start + read.query_alignment_start, len(seq))
if read.is_reverse:
seq.revcomp()
if len(seq) >= min_length:
print(seq, file=fout) | def _get_region(self, contig, start, end, fout, min_length=250) | Writes reads mapping to given region of contig, trimming part of read not in the region | 3.395677 | 2.887498 | 1.175993 |
'''Returns a set of names from file called filename. If filename is None, returns an empty set'''
if filename is None:
return set()
with open(filename) as f:
return {line.rstrip() for line in f} | def _get_contigs_to_keep(self, filename) | Returns a set of names from file called filename. If filename is None, returns an empty set | 4.872041 | 2.396878 | 2.032661 |
'''Writes a new file with small contigs removed.
Returns lists of all names and names of removed contigs'''
removed = set()
all_names = set()
if keep is None:
keep = set()
file_reader = pyfastaq.sequences.file_reader(infile)
fout = pyfastaq.utils.open_file_write(outfile)
for seq in file_reader:
all_names.add(seq.id)
if len(seq) >= self.min_contig_length or seq.id in keep:
print(seq, file=fout)
else:
removed.add(seq.id)
pyfastaq.utils.close(fout)
return all_names, removed | def _remove_small_contigs(self, infile, outfile, keep=None) | Writes a new file with small contigs removed.
Returns lists of all names and names of removed contigs | 3.188091 | 2.423959 | 1.315241 |
'''Returns two dictionaries:
1) name=>contig length.
2) Second is dictionary of nucmer hits (ignoring self matches).
contig name => list of hits'''
hits = {}
lengths = {}
file_reader = pymummer.coords_file.reader(infile)
for al in file_reader:
if al.qry_name == al.ref_name:
continue
elif al.qry_name not in hits:
hits[al.qry_name] = []
hits[al.qry_name].append(al)
lengths[al.qry_name] = al.qry_length
lengths[al.ref_name] = al.ref_length
return lengths, hits | def _load_nucmer_hits(self, infile) | Returns two dictionaries:
1) name=>contig length.
2) Second is dictionary of nucmer hits (ignoring self matches).
contig name => list of hits | 4.745626 | 2.540259 | 1.868166 |
'''Returns True iff (the query contig is contained in the reference contig and
the query contig is not flagged to be kept)'''
return (
hit.qry_name not in self.contigs_to_keep
and hit.qry_name != hit.ref_name
and (100 * hit.hit_length_qry / hit.qry_length >= self.min_contig_percent_match)
and hit.percent_identity >= self.nucmer_min_id
) | def _contains(self, hit) | Returns True iff (the query contig is contained in the reference contig and
the query contig is not flagged to be kept) | 7.209024 | 3.5866 | 2.009988 |
'''Given a list of hits, all with same query,
returns a set of the contigs containing that query'''
return {hit.ref_name for hit in hits if self._contains(hit)} | def _containing_contigs(self, hits) | Given a list of hits, all with same query,
returns a set of the contigs containing that query | 8.940447 | 3.370397 | 2.652639 |
'''Given dictionary of nucmer hits (made by self._load_nucmer_hits()), returns a dictionary.
key=contig name. Value = set of contigs that contain the key.'''
containing = {}
for qry_name in hits_dict:
d = self._containing_contigs(hits_dict[qry_name])
if len(d):
containing[qry_name] = d
return containing | def _get_containing_contigs(self, hits_dict) | Given dictionary of nucmer hits (made by self._load_nucmer_hits()), returns a dictionary.
key=contig name. Value = set of contigs that contain the key. | 6.017349 | 2.357439 | 2.552494 |
'''containing_contigs is a dict:
key=contig name. Value = set of contigs that contain the key.
Returns alls contigs called "name" that contain that contig'''
contains_name = set()
# failsafe to prevent infinite recursion
if max_depth < 0:
return contains_name
if name in containing_contigs:
for containing_contig in containing_contigs[name]:
# if we have a contains b and b contains a, then this stops infinite recursion
if containing_contig==exclude:
continue
contains_name.add(containing_contig)
new_names = self._get_all_containing(containing_contigs, containing_contig, exclude=name,max_depth=max_depth-1)
new_names.discard(name)
contains_name.update(new_names)
return contains_name | def _get_all_containing(self, containing_contigs, name, exclude=None, max_depth=10) | containing_contigs is a dict:
key=contig name. Value = set of contigs that contain the key.
Returns alls contigs called "name" that contain that contig | 4.75727 | 3.051735 | 1.558874 |
'''This uses a contined in b, and b contained in c to force a contained in c.
Just in case a contained in c wasn't already found by nucmer'''
for name in containing_contigs:
containing_contigs[name] = self._get_all_containing(containing_contigs, name)
return containing_contigs | def _expand_containing_using_transitivity(self, containing_contigs) | This uses a contined in b, and b contained in c to force a contained in c.
Just in case a contained in c wasn't already found by nucmer | 12.388788 | 2.523209 | 4.909933 |
'''Input is a list of sets. Merges any intersecting sets in the list'''
found = True
while found:
found = False
to_intersect = None
for i in range(len(sets)):
for j in range(len(sets)):
if i == j:
continue
elif sets[i].intersection(sets[j]):
to_intersect = i, j
break
if to_intersect is not None:
break
if to_intersect is not None:
found = True
sets[i].update(sets[j])
sets.pop(j)
return sets | def _collapse_list_of_sets(self, sets) | Input is a list of sets. Merges any intersecting sets in the list | 2.813408 | 2.272372 | 1.238093 |
'''Input is a dict:
key=contig name. Value = set of contigs that contain the key.
Returns a list of sets of contigs that are equivalent'''
equivalent_contigs = []
for qry_name, containing in hits_dict.items():
equivalent = set()
for containing_name in containing:
if containing_name in hits_dict and qry_name in hits_dict[containing_name]:
equivalent.add(containing_name)
equivalent.add(qry_name)
if len(equivalent):
equivalent_contigs.append(equivalent)
equivalent_contigs = self._collapse_list_of_sets(equivalent_contigs)
return equivalent_contigs | def _get_identical_contigs(self, hits_dict) | Input is a dict:
key=contig name. Value = set of contigs that contain the key.
Returns a list of sets of contigs that are equivalent | 3.543985 | 2.252579 | 1.573301 |
'''Returns the name of the longest contig, from the set of names contig_set. contig_lengths
is expected to be a dictionary of contig name => length.'''
longest_name = None
max_length = -1
for name in contig_set:
if contig_lengths[name] > max_length:
longest_name = name
max_length = contig_lengths[name]
assert max_length != -1
assert longest_name is not None
return longest_name | def _longest_contig(self, contig_set, contig_lengths) | Returns the name of the longest contig, from the set of names contig_set. contig_lengths
is expected to be a dictionary of contig name => length. | 2.838391 | 1.671529 | 1.69808 |
'''Input is dictionary of containing contigs made by self._expand_containing_using_transitivity().
Removes redundant identical contigs, leaving one representative (the longest) of
each set of identical contigs.
Returns new version of dictionary, and a dictionary of contig name => contig it was replaced with'''
identical_contigs = self._get_identical_contigs(containing_contigs)
to_replace = {} # contig name => name to replace it with
for contig_set in identical_contigs:
longest_contig = self._longest_contig(contig_set, contig_lengths)
for name in contig_set - {longest_contig}:
assert name not in to_replace
to_replace[name] = longest_contig
for name, replace_with in to_replace.items():
if replace_with not in containing_contigs:
containing_contigs[replace_with] = set()
if name in containing_contigs:
containing_contigs[replace_with].update(containing_contigs[name])
del containing_contigs[name]
to_delete = set()
for name, names_set in containing_contigs.items():
assert name not in to_replace
new_set = {to_replace.get(x, x) for x in names_set}
new_set.discard(name)
if len(new_set) > 0:
containing_contigs[name] = new_set
else:
to_delete.add(name)
for name in to_delete:
del containing_contigs[name]
return containing_contigs, to_replace | def _remove_identical_contigs(self, containing_contigs, contig_lengths) | Input is dictionary of containing contigs made by self._expand_containing_using_transitivity().
Removes redundant identical contigs, leaving one representative (the longest) of
each set of identical contigs.
Returns new version of dictionary, and a dictionary of contig name => contig it was replaced with | 3.008036 | 1.813605 | 1.658594 |
'''Dies if any files in the list of filenames does not exist'''
files_not_found = [x for x in filenames if not os.path.exists(x)]
if len(files_not_found):
for filename in files_not_found:
print('File not found: "', filename, '"', sep='', file=sys.stderr)
raise Error('File(s) not found. Cannot continue') | def check_files_exist(filenames) | Dies if any files in the list of filenames does not exist | 3.771079 | 2.908071 | 1.296763 |
'''Returns a dictionary of contig_name -> pyfastaq.Sequences.Fasta object'''
contigs = {}
pyfastaq.tasks.file_to_dict(self.contigs_fasta, contigs)
return contigs | def get_contigs(self) | Returns a dictionary of contig_name -> pyfastaq.Sequences.Fasta object | 6.239522 | 3.238374 | 1.926745 |
'''Returns a set of the contig names that are circular'''
if self.assembler == 'spades':
if self.contigs_fastg is not None:
return self._circular_contigs_from_spades_before_3_6_1(self.contigs_fastg)
elif None not in [self.contigs_paths, self.assembly_graph_fastg]:
return self._circular_contigs_from_spades_after_3_6_1(self.assembly_graph_fastg, self.contigs_paths)
else:
return set()
elif self.assembler == 'canu':
return self._circular_contigs_from_canu_gfa(self.contigs_gfa)
else:
return set() | def circular_contigs(self) | Returns a set of the contig names that are circular | 3.053811 | 2.875953 | 1.061843 |
'''Run nucmer of new assembly vs original assembly'''
n = pymummer.nucmer.Runner(
ref,
qry,
outfile,
min_id=self.nucmer_min_id,
min_length=self.nucmer_min_length,
diagdiff=self.nucmer_diagdiff,
maxmatch=True,
breaklen=self.nucmer_breaklen,
simplify=True,
verbose=self.verbose
)
n.run() | def _run_nucmer(self, ref, qry, outfile) | Run nucmer of new assembly vs original assembly | 4.951822 | 4.021499 | 1.231337 |
'''Returns dict ref name => list of nucmer hits from infile'''
hits = {}
file_reader = pymummer.coords_file.reader(infile)
for al in file_reader:
if al.ref_name not in hits:
hits[al.ref_name] = []
hits[al.ref_name].append(al)
return hits | def _load_nucmer_hits(self, infile) | Returns dict ref name => list of nucmer hits from infile | 5.090243 | 3.398654 | 1.497724 |
'''Input: list of nucmer hits. Output: dictionary, keys are query names, values are lists of hits'''
d = {}
for hit in hits:
if hit.qry_name not in d:
d[hit.qry_name] = []
d[hit.qry_name].append(hit)
return d | def _hits_hashed_by_query(self, hits) | Input: list of nucmer hits. Output: dictionary, keys are query names, values are lists of hits | 3.026726 | 1.936853 | 1.562703 |
'''Input: list of nucmer hits. Returns the longest hit, taking hit length on the reference'''
if len(nucmer_hits) == 0:
return None
max_length = None
longest_hit = None
for hit in nucmer_hits:
if max_length is None or hit.hit_length_ref > max_length:
max_length = hit.hit_length_ref
longest_hit = copy.copy(hit)
assert longest_hit is not None
return longest_hit | def _get_longest_hit_by_ref_length(self, nucmer_hits) | Input: list of nucmer hits. Returns the longest hit, taking hit length on the reference | 2.933447 | 2.01355 | 1.456853 |
'''Returns True iff the hit is "close enough" to the start of the reference sequence'''
hit_coords = nucmer_hit.ref_coords()
return hit_coords.start < self.ref_end_tolerance | def _is_at_ref_start(self, nucmer_hit) | Returns True iff the hit is "close enough" to the start of the reference sequence | 7.29959 | 4.198872 | 1.738465 |
'''Returns True iff the hit is "close enough" to the end of the reference sequence'''
hit_coords = nucmer_hit.ref_coords()
return hit_coords.end >= nucmer_hit.ref_length - self.ref_end_tolerance | def _is_at_ref_end(self, nucmer_hit) | Returns True iff the hit is "close enough" to the end of the reference sequence | 5.264777 | 3.419191 | 1.539773 |
'''Returns True iff the hit is "close enough" to the start of the query sequence'''
hit_coords = nucmer_hit.qry_coords()
return hit_coords.start < self.qry_end_tolerance | def _is_at_qry_start(self, nucmer_hit) | Returns True iff the hit is "close enough" to the start of the query sequence | 6.781253 | 4.004255 | 1.693512 |
'''Returns True iff the hit is "close enough" to the end of the query sequence'''
hit_coords = nucmer_hit.qry_coords()
return hit_coords.end >= nucmer_hit.qry_length - self.qry_end_tolerance | def _is_at_qry_end(self, nucmer_hit) | Returns True iff the hit is "close enough" to the end of the query sequence | 4.853111 | 3.262446 | 1.487568 |
'''Returns the hit nearest to the start of the ref sequence from the input list of hits'''
nearest_to_start = hits[0]
for hit in hits[1:]:
if hit.ref_coords().start < nearest_to_start.ref_coords().start:
nearest_to_start = hit
return nearest_to_start | def _get_hit_nearest_ref_start(self, hits) | Returns the hit nearest to the start of the ref sequence from the input list of hits | 3.035825 | 2.287696 | 1.327023 |
'''Returns the hit nearest to the end of the ref sequence from the input list of hits'''
nearest_to_end = hits[0]
for hit in hits[1:]:
if hit.ref_coords().end > nearest_to_end.ref_coords().end:
nearest_to_end = hit
return nearest_to_end | def _get_hit_nearest_ref_end(self, hits) | Returns the hit nearest to the end of the ref sequence from the input list of hits | 3.107542 | 2.332243 | 1.332426 |
'''Input: list of nucmer hits to the same reference. Returns the longest hit to the start of the reference, or None if there is no such hit'''
if hits_to_exclude is None:
hits_to_exclude = set()
hits_at_start = [hit for hit in nucmer_hits if self._is_at_ref_start(hit) and hit not in hits_to_exclude]
return self._get_longest_hit_by_ref_length(hits_at_start) | def _get_longest_hit_at_ref_start(self, nucmer_hits, hits_to_exclude=None) | Input: list of nucmer hits to the same reference. Returns the longest hit to the start of the reference, or None if there is no such hit | 2.92996 | 2.00586 | 1.4607 |
'''Input: list of nucmer hits to the same reference. Returns the longest hit to the end of the reference, or None if there is no such hit'''
if hits_to_exclude is None:
hits_to_exclude = set()
hits_at_end = [hit for hit in nucmer_hits if self._is_at_ref_end(hit) and hit not in hits_to_exclude]
return self._get_longest_hit_by_ref_length(hits_at_end) | def _get_longest_hit_at_ref_end(self, nucmer_hits, hits_to_exclude=None) | Input: list of nucmer hits to the same reference. Returns the longest hit to the end of the reference, or None if there is no such hit | 2.937057 | 2.015497 | 1.457237 |
'''Input: list of nucmer hits to the same query. Returns the longest hit to the start of the query, or None if there is no such hit'''
hits_at_start = [hit for hit in nucmer_hits if self._is_at_qry_start(hit)]
return self._get_longest_hit_by_ref_length(hits_at_start) | def _get_longest_hit_at_qry_start(self, nucmer_hits) | Input: list of nucmer hits to the same query. Returns the longest hit to the start of the query, or None if there is no such hit | 3.902137 | 2.421102 | 1.611719 |
'''Input: list of nucmer hits to the same query. Returns the longest hit to the end of the query, or None if there is no such hit'''
hits_at_end = [hit for hit in nucmer_hits if self._is_at_qry_end(hit)]
return self._get_longest_hit_by_ref_length(hits_at_end) | def _get_longest_hit_at_qry_end(self, nucmer_hits) | Input: list of nucmer hits to the same query. Returns the longest hit to the end of the query, or None if there is no such hit | 3.925383 | 2.429721 | 1.61557 |
'''Returns True iff list of nucmer_hits has a hit longer than min_length, not counting the hits in hits_to_exclude'''
if hits_to_exclude is None:
to_exclude = set()
else:
to_exclude = hits_to_exclude
long_hits = [hit.hit_length_qry for hit in nucmer_hits if hit not in to_exclude and hit.hit_length_qry > min_length]
return len(long_hits) > 0 | def _has_qry_hit_longer_than(self, nucmer_hits, min_length, hits_to_exclude=None) | Returns True iff list of nucmer_hits has a hit longer than min_length, not counting the hits in hits_to_exclude | 2.781496 | 2.025917 | 1.372956 |
'''Returns true iff the two hits can be used to circularise the reference sequence of the hits'''
if not(self._is_at_ref_start(start_hit) or self._is_at_ref_end(end_hit)):
return False
if self._is_at_qry_end(start_hit) \
and self._is_at_qry_start(end_hit) \
and start_hit.on_same_strand() \
and end_hit.on_same_strand():
return True
if self._is_at_qry_start(start_hit) \
and self._is_at_qry_end(end_hit) \
and (not start_hit.on_same_strand()) \
and (not end_hit.on_same_strand()):
return True
return False | def _can_circularise(self, start_hit, end_hit) | Returns true iff the two hits can be used to circularise the reference sequence of the hits | 2.341231 | 1.971283 | 1.187668 |
'''Returns a dict ref name => tuple(hit at start, hit at end) for each ref sequence in the hash nucmer_hits (each value is a list of nucmer hits)'''
writing_log_file = None not in [log_fh, log_outprefix]
maybe_circular = {}
all_nucmer_hits = []
for l in nucmer_hits.values():
all_nucmer_hits.extend(l)
nucmer_hits_by_qry = self._hits_hashed_by_query(all_nucmer_hits)
for ref_name, list_of_hits in nucmer_hits.items():
if writing_log_file:
print(log_outprefix, ref_name, 'Checking ' + str(len(list_of_hits)) + ' nucmer hits', sep='\t', file=log_fh)
longest_start_hit = self._get_longest_hit_at_ref_start(list_of_hits)
longest_end_hit = self._get_longest_hit_at_ref_end(list_of_hits)
if longest_start_hit == longest_end_hit:
second_longest_start_hit = self._get_longest_hit_at_ref_start(list_of_hits, hits_to_exclude={longest_start_hit})
second_longest_end_hit = self._get_longest_hit_at_ref_end(list_of_hits, hits_to_exclude={longest_end_hit})
if second_longest_start_hit is not None:
longest_start_hit = self._get_hit_nearest_ref_start([longest_start_hit, second_longest_start_hit])
if second_longest_end_hit is not None:
longest_end_hit = self._get_hit_nearest_ref_end([longest_end_hit, second_longest_end_hit])
if (
longest_start_hit is not None
and longest_end_hit is not None
and longest_start_hit != longest_end_hit
and self._hits_have_same_query(longest_start_hit, longest_end_hit)
):
if writing_log_file:
print(log_outprefix, ref_name, 'potential pair of nucmer hits for circularization:', sep='\t', file=log_fh)
print(log_outprefix, ref_name, '', longest_start_hit, sep='\t', file=log_fh)
print(log_outprefix, ref_name, '', longest_end_hit, sep='\t', file=log_fh)
shortest_hit_length = self._min_qry_hit_length([longest_start_hit, longest_end_hit])
has_longer_hit = self._has_qry_hit_longer_than(
nucmer_hits_by_qry[longest_start_hit.qry_name],
shortest_hit_length,
hits_to_exclude={longest_start_hit, longest_end_hit}
)
if writing_log_file and has_longer_hit:
print(log_outprefix, ref_name, 'cannot use this pair because longer match was found', sep='\t', file=log_fh)
can_circularise = self._can_circularise(longest_start_hit, longest_end_hit)
if writing_log_file and not can_circularise:
print(log_outprefix, ref_name, 'cannot use this pair because positions/orientations of matches no good', sep='\t', file=log_fh)
if (not has_longer_hit) and can_circularise:
print(log_outprefix, ref_name, 'can use this pair of hits', sep='\t', file=log_fh)
maybe_circular[ref_name] = (longest_start_hit, longest_end_hit)
return maybe_circular | def _get_possible_circular_ref_contigs(self, nucmer_hits, log_fh=None, log_outprefix=None) | Returns a dict ref name => tuple(hit at start, hit at end) for each ref sequence in the hash nucmer_hits (each value is a list of nucmer hits) | 2.215576 | 1.992395 | 1.112016 |
'''Returns a new dictionary, with keys from input dict removed if their value was not unique'''
value_counts = collections.Counter(d.values())
new_d = {}
writing_log_file = None not in [log_fh, log_outprefix]
for key in d:
if value_counts[d[key]] == 1:
new_d[key] = d[key]
elif writing_log_file:
print(log_outprefix, 'Reject because non-unique:', d[key], sep='\t', file=log_fh)
return new_d | def _remove_keys_from_dict_with_nonunique_values(self, d, log_fh=None, log_outprefix=None) | Returns a new dictionary, with keys from input dict removed if their value was not unique | 3.765817 | 2.99803 | 1.256097 |
'''Given a nucmer ref_start_hit and ref_end_hit, returns a new contig. Assumes that these hits can be used to circularise the reference contig of the hits using the query contig'''
assert ref_start_hit.ref_name == ref_end_hit.ref_name
assert ref_start_hit.qry_name == ref_end_hit.qry_name
qry_name = ref_start_hit.qry_name
ref_name = ref_start_hit.ref_name
ref_start_coords = ref_start_hit.ref_coords()
ref_end_coords = ref_end_hit.ref_coords()
if ref_start_coords.intersects(ref_end_coords):
new_ctg = copy.copy(self.reassembly_contigs[qry_name])
new_ctg.id = ref_name
return new_ctg
if ref_start_hit.on_same_strand():
qry_start_coords = ref_end_hit.qry_coords()
qry_end_coords = ref_start_hit.qry_coords()
bases = self.original_contigs[ref_name][ref_start_coords.end+1:ref_end_coords.start] + \
self.reassembly_contigs[qry_name][qry_start_coords.start:qry_end_coords.end+1]
return pyfastaq.sequences.Fasta(ref_name, bases)
else:
qry_start_coords = ref_start_hit.qry_coords()
qry_end_coords = ref_end_hit.qry_coords()
tmp_seq = pyfastaq.sequences.Fasta('x', self.reassembly_contigs[qry_name][qry_start_coords.start:qry_end_coords.end+1])
tmp_seq.revcomp()
return pyfastaq.sequences.Fasta(ref_name, self.original_contigs[ref_name][ref_start_coords.end+1:ref_end_coords.start] + tmp_seq.seq) | def _make_circularised_contig(self, ref_start_hit, ref_end_hit) | Given a nucmer ref_start_hit and ref_end_hit, returns a new contig. Assumes that these hits can be used to circularise the reference contig of the hits using the query contig | 2.177003 | 1.827109 | 1.191502 |
'''Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits'''
assert start_hit.qry_name == end_hit.qry_name
if start_hit.ref_name == end_hit.ref_name:
return False
if (
(self._is_at_ref_end(start_hit) and start_hit.on_same_strand())
or (self._is_at_ref_start(start_hit) and not start_hit.on_same_strand())
):
start_hit_ok = True
else:
start_hit_ok = False
if (
(self._is_at_ref_start(end_hit) and end_hit.on_same_strand())
or (self._is_at_ref_end(end_hit) and not end_hit.on_same_strand())
):
end_hit_ok = True
else:
end_hit_ok = False
return start_hit_ok and end_hit_ok | def _orientation_ok_to_bridge_contigs(self, start_hit, end_hit) | Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits | 2.280079 | 1.757636 | 1.297242 |
'''Input is dict qry_name -> list of nucmer hits to that qry. Returns dict qry_name -> tuple(start hit, end hit)'''
bridges = {}
writing_log_file = None not in [log_fh, log_outprefix]
for qry_name, hits_to_qry in nucmer_hits.items():
if len(hits_to_qry) < 2:
continue
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': checking nucmer matches', sep='', file=log_fh)
longest_start_hit = self._get_longest_hit_at_qry_start(hits_to_qry)
longest_end_hit = self._get_longest_hit_at_qry_end(hits_to_qry)
if (
None in (longest_start_hit, longest_end_hit)
or longest_start_hit.ref_name == longest_end_hit.ref_name
or self._hits_have_same_reference(longest_start_hit, longest_end_hit)
):
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': no potential pairs of hits to merge contigs', sep='', file=log_fh)
continue
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': potential pair of hits to merge contigs...', sep='', file=log_fh)
print(log_outprefix, '\t', qry_name, ': ', longest_start_hit, sep='', file=log_fh)
print(log_outprefix, '\t', qry_name, ': ', longest_end_hit, sep='', file=log_fh)
shortest_hit_length = self._min_qry_hit_length([longest_start_hit, longest_end_hit])
has_longer_hit = self._has_qry_hit_longer_than(
hits_to_qry,
shortest_hit_length,
hits_to_exclude={longest_start_hit, longest_end_hit}
)
if has_longer_hit and writing_log_file:
print(log_outprefix, '\t', qry_name, ': rejected - there is a longer hit to elsewhere', sep='', file=log_fh)
orientation_ok = self._orientation_ok_to_bridge_contigs(longest_start_hit, longest_end_hit)
if writing_log_file and not orientation_ok:
print(log_outprefix, '\t', qry_name, ': rejected - orientation/distance from ends not correct to make a merge', sep='', file=log_fh)
if orientation_ok and not has_longer_hit:
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': might be used - no longer hits elsewhere and orientation/distance to ends OK', sep='', file=log_fh)
bridges[qry_name] = (longest_start_hit, longest_end_hit)
return bridges | def _get_possible_query_bridging_contigs(self, nucmer_hits, log_fh=None, log_outprefix=None) | Input is dict qry_name -> list of nucmer hits to that qry. Returns dict qry_name -> tuple(start hit, end hit) | 2.444502 | 2.218318 | 1.101962 |
'''Input is dict qry_name -> tuple(start hit, end hit) made by _get_possible_query_bridging_contigs. Removes key/values where the value(==ref contig) has a hit at the start, or at the end, to more than one key(==qry contig)'''
ref_hits_start = {}
ref_hits_end = {}
for qry_name, (start_hit, end_hit) in bridges.items():
for hit in start_hit, end_hit:
assert self._is_at_ref_start(hit) or self._is_at_ref_end(hit)
if self._is_at_ref_start(hit):
ref_hits_start[hit.ref_name] = ref_hits_start.get(hit.ref_name, 0) + 1
elif self._is_at_ref_end(hit):
ref_hits_end[hit.ref_name] = ref_hits_end.get(hit.ref_name, 0) + 1
qry_names_to_remove = set()
for qry_name, (start_hit, end_hit) in bridges.items():
for hit in start_hit, end_hit:
remove = False
if (
(self._is_at_ref_start(hit) and ref_hits_start.get(hit.ref_name, 0) > 1)
or (self._is_at_ref_end(hit) and ref_hits_end.get(hit.ref_name, 0) > 1)
):
qry_names_to_remove.add(qry_name)
for name in qry_names_to_remove:
del bridges[name]
return bridges | def _filter_bridging_contigs(self, bridges) | Input is dict qry_name -> tuple(start hit, end hit) made by _get_possible_query_bridging_contigs. Removes key/values where the value(==ref contig) has a hit at the start, or at the end, to more than one key(==qry contig) | 2.45185 | 1.492749 | 1.642507 |
'''Input is dict of nucmer_hits. Makes any possible contig merges.
Returns True iff any merges were made'''
writing_log_file = None not in [log_fh, log_outprefix]
if len(nucmer_hits) == 0:
if writing_log_file:
print(log_outprefix, 'No nucmer hits, so will not make any merges', sep='\t', file=log_fh)
return
all_nucmer_hits = []
for l in nucmer_hits.values():
all_nucmer_hits.extend(l)
nucmer_hits_by_qry = self._hits_hashed_by_query(all_nucmer_hits)
bridges = self._get_possible_query_bridging_contigs(nucmer_hits_by_qry, log_fh=log_fh, log_outprefix=log_outprefix)
if writing_log_file:
print(log_outprefix, '\tPotential contigs to use for merging: ', ' '.join(sorted(bridges.keys())), sep='', file=log_fh)
bridges = self._filter_bridging_contigs(bridges)
if writing_log_file:
print(log_outprefix, '\tContigs to use for merging after uniqueness filtering: ', ' '.join(sorted(bridges.keys())), sep='', file=log_fh)
merged = set()
made_a_join = False
for qry_name, (start_hit, end_hit) in bridges.items():
if start_hit.ref_name in merged or end_hit.ref_name in merged:
continue
self._merge_bridged_contig_pair(start_hit, end_hit, ref_contigs, qry_contigs, log_fh=log_fh, log_outprefix=log_outprefix)
merged.add(start_hit.ref_name)
merged.add(end_hit.ref_name)
made_a_join = True
if writing_log_file:
print(log_outprefix, '\tMade at least one contig join: ', made_a_join, sep='', file=log_fh)
return made_a_join | def _merge_all_bridged_contigs(self, nucmer_hits, ref_contigs, qry_contigs, log_fh=None, log_outprefix=None) | Input is dict of nucmer_hits. Makes any possible contig merges.
Returns True iff any merges were made | 2.488587 | 2.175909 | 1.1437 |
'''Writes crunch file and shell script to start up ACT, showing comparison of ref and qry'''
if self.verbose:
print('Making ACT files from', ref_fasta, qry_fasta, coords_file)
ref_fasta = os.path.relpath(ref_fasta)
qry_fasta = os.path.relpath(qry_fasta)
coords_file = os.path.relpath(coords_file)
outprefix = os.path.relpath(outprefix)
self._index_fasta(ref_fasta)
self._index_fasta(qry_fasta)
crunch_file = outprefix + '.crunch'
pymummer.coords_file.convert_to_msp_crunch(
coords_file,
crunch_file,
ref_fai=ref_fasta + '.fai',
qry_fai=qry_fasta + '.fai'
)
bash_script = outprefix + '.start_act.sh'
with open(bash_script, 'w') as f:
print('#!/usr/bin/env bash', file=f)
print('act', ref_fasta, crunch_file, qry_fasta, file=f)
pyfastaq.utils.syscall('chmod +x ' + bash_script) | def _write_act_files(self, ref_fasta, qry_fasta, coords_file, outprefix) | Writes crunch file and shell script to start up ACT, showing comparison of ref and qry | 3.136227 | 2.484416 | 1.26236 |
'''Writes dictionary of contigs to file'''
f = pyfastaq.utils.open_file_write(fname)
for contig in sorted(contigs, key=lambda x:len(contigs[x]), reverse=True):
print(contigs[contig], file=f)
pyfastaq.utils.close(f) | def _contigs_dict_to_file(self, contigs, fname) | Writes dictionary of contigs to file | 2.865892 | 2.907844 | 0.985573 |
'''Returns set of names of nodes in SPAdes fastg file that are circular. Names will match those in spades fasta file'''
seq_reader = pyfastaq.sequences.file_reader(fastg)
names = set([x.id.rstrip(';') for x in seq_reader if ':' in x.id])
found_fwd = set()
found_rev = set()
for name in names:
l = name.split(':')
if len(l) != 2:
continue
if l[0] == l[1]:
if l[0][-1] == "'":
found_rev.add(l[0][:-1])
else:
found_fwd.add(l[0])
return found_fwd.intersection(found_rev) | def _get_spades_circular_nodes(self, fastg) | Returns set of names of nodes in SPAdes fastg file that are circular. Names will match those in spades fasta file | 3.653776 | 2.559076 | 1.427772 |
'''Tries to make new circularised contig from contig called original_contig. hits = list of nucmer hits, all with ref=original contg. circular_spades=set of query contig names that spades says are circular'''
writing_log_file = None not in [log_fh, log_outprefix]
hits_to_circular_contigs = [x for x in hits if x.qry_name in circular_spades]
if len(hits_to_circular_contigs) == 0:
if writing_log_file:
print(log_outprefix, original_contig, 'No matches to SPAdes circular contigs', sep='\t', file=log_fh)
return None, None
for hit in hits_to_circular_contigs:
print(log_outprefix, original_contig, 'Checking hit:', hit, sep='\t', file=log_fh)
percent_query_covered = 100 * (hit.hit_length_qry / hit.qry_length)
if self.min_spades_circular_percent <= percent_query_covered:
print(log_outprefix, '\t', original_contig, '\t\tHit is long enough. Percent of contig covered by hit is ', percent_query_covered, sep='', file=log_fh)
# the spades contig hit is long enough, but now check that
# the input contig is covered by hits from this spades contig
hit_intervals = [x.ref_coords() for x in hits_to_circular_contigs if x.qry_name == hit.qry_name]
if len(hit_intervals) > 0:
pyfastaq.intervals.merge_overlapping_in_list(hit_intervals)
percent_covered = 100 * pyfastaq.intervals.length_sum_from_list(hit_intervals) / hit.ref_length
if writing_log_file:
print(log_outprefix, '\t', original_contig, '\t\treference bases covered by spades contig:', ', '.join([str(x) for x in hit_intervals]), sep='', file=log_fh)
print(log_outprefix, '\t', original_contig, '\t\t ... which is ', percent_covered, ' percent of ', hit.ref_length, ' bases', sep='', file=log_fh)
if self.min_spades_circular_percent <= percent_covered:
if writing_log_file:
print(log_outprefix, original_contig, '\tUsing hit to call as circular (enough bases covered)', sep='\t', file=log_fh)
return pyfastaq.sequences.Fasta(original_contig, self.reassembly_contigs[hit.qry_name].seq), hit.qry_name
elif writing_log_file:
print(log_outprefix, original_contig, '\tNot using hit to call as circular (not enough bases covered)', sep='\t', file=log_fh)
else:
print(log_outprefix, original_contig, '\tNot using hit to call as circular (hit too short)', sep='\t', file=log_fh)
if writing_log_file:
print(log_outprefix, original_contig, 'No suitable matches to SPAdes circular contigs', sep='\t', file=log_fh)
return None, None | def _make_new_contig_from_nucmer_and_spades(self, original_contig, hits, circular_spades, log_fh=None, log_outprefix=None) | Tries to make new circularised contig from contig called original_contig. hits = list of nucmer hits, all with ref=original contg. circular_spades=set of query contig names that spades says are circular | 3.025702 | 2.561841 | 1.181065 |
header_info = RE_HUNK_HEADER.match(header)
hunk_info = header_info.groups()
hunk = Hunk(*hunk_info)
source_line_no = hunk.source_start
target_line_no = hunk.target_start
expected_source_end = source_line_no + hunk.source_length
expected_target_end = target_line_no + hunk.target_length
for diff_line_no, line in diff:
if encoding is not None:
line = line.decode(encoding)
valid_line = RE_HUNK_EMPTY_BODY_LINE.match(line)
if not valid_line:
valid_line = RE_HUNK_BODY_LINE.match(line)
if not valid_line:
raise UnidiffParseError('Hunk diff line expected: %s' % line)
line_type = valid_line.group('line_type')
if line_type == LINE_TYPE_EMPTY:
line_type = LINE_TYPE_CONTEXT
value = valid_line.group('value')
original_line = Line(value, line_type=line_type)
if line_type == LINE_TYPE_ADDED:
original_line.target_line_no = target_line_no
target_line_no += 1
elif line_type == LINE_TYPE_REMOVED:
original_line.source_line_no = source_line_no
source_line_no += 1
elif line_type == LINE_TYPE_CONTEXT:
original_line.target_line_no = target_line_no
target_line_no += 1
original_line.source_line_no = source_line_no
source_line_no += 1
elif line_type == LINE_TYPE_NO_NEWLINE:
pass
else:
original_line = None
# stop parsing if we got past expected number of lines
if (source_line_no > expected_source_end or
target_line_no > expected_target_end):
raise UnidiffParseError('Hunk is longer than expected')
if original_line:
original_line.diff_line_no = diff_line_no
hunk.append(original_line)
# if hunk source/target lengths are ok, hunk is complete
if (source_line_no == expected_source_end and
target_line_no == expected_target_end):
break
# report an error if we haven't got expected number of lines
if (source_line_no < expected_source_end or
target_line_no < expected_target_end):
raise UnidiffParseError('Hunk is shorter than expected')
self.append(hunk) | def _parse_hunk(self, header, diff, encoding) | Parse hunk details. | 2.100101 | 2.082341 | 1.008529 |
if (self.source_file.startswith('a/') and
self.target_file.startswith('b/')):
filepath = self.source_file[2:]
elif (self.source_file.startswith('a/') and
self.target_file == '/dev/null'):
filepath = self.source_file[2:]
elif (self.target_file.startswith('b/') and
self.source_file == '/dev/null'):
filepath = self.target_file[2:]
else:
filepath = self.source_file
return filepath | def path(self) | Return the file path abstracted from VCS. | 2.191136 | 2.032784 | 1.077899 |
greedy = '?' if greedy else '' # For greedy search
if isinstance(post, (list, tuple)):
post = '(?=' + '|'.join(post) + ')'
tag_list = re.findall(r'{pre}(.+{greedy}){post}'.format(pre=pre, post=post, greedy=greedy),
string)
if len(tag_list) > 1:
raise ValueError('More than one matching pattern found... check filename')
elif len(tag_list) == 0:
return None
else:
return tagtype(tag_list[0]) | def get_tag_value(string, pre, post, tagtype=float, greedy=True) | Extracts the value of a tag from a string.
Parameters
-----------------
pre : str
regular expression to match before the the tag value
post : str | list | tuple
regular expression to match after the the tag value
if list than the regular expressions will be combined into the regular expression (?=post[0]|post[1]|..)
tagtype : str | float | int
the type to which the tag value should be converted to
greedy : bool
Whether the regular expression is gredy or not.
Returns
---------------
Tag value if found, None otherwise
Example
------------
get_tag_value('PID_23.5.txt', pre=r'PID_' , post='(?=_|\.txt)') should return 23.5
get_tag_value('PID_23.5_.txt', pre=r'PID_', post='(?=_|\.txt)') should return 23.5
get_tag_value('PID_23_5_.txt', pre=r'PID_', post='(?=_|\.txt)') should return 23
get_tag_value('PID_23.txt', pre=r'PID_', post='.txt') should return 23
get_tag_value('PID.txt', pre=r'PID_', post='.txt') should return None
TODO Make list/tuple input for pre | 3.266567 | 3.137955 | 1.040986 |
# get dirname from user if not given
if dirname is None:
from FlowCytometryTools.gui import dialogs
dirname = dialogs.select_directory_dialog('Select a directory')
# find all files in dirname that match pattern
if recursive: # search subdirs
matches = []
for root, dirnames, filenames in os.walk(dirname):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
else:
matches = glob.glob(os.path.join(dirname, pattern))
return matches | def get_files(dirname=None, pattern='*.*', recursive=True) | Get all file names within a given directory those names match a
given pattern.
Parameters
----------
dirname : str | None
Directory containing the datafiles.
If None is given, open a dialog box.
pattern : str
Return only files whose names match the specified pattern.
recursive : bool
True : Search recursively within all sub-directories.
False : Search only in given directory.
Returns
-------
matches: list
List of file names (including full path). | 2.708775 | 2.860841 | 0.946846 |
with open(path, 'wb') as f:
try:
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Pickling failed for object {0}, path {1}'.format(obj, path))
print('Error message: {0}'.format(e)) | def save(obj, path) | Pickle (serialize) object to input file path
Parameters
----------
obj : any object
path : string
File path | 2.180764 | 2.4135 | 0.903569 |
f = open(path, 'rb')
try:
return pickle.load(f)
finally:
f.close() | def load(path) | Load pickled object from the specified file path.
Parameters
----------
path : string
File path
Returns
-------
unpickled : type of object stored in file | 2.428414 | 3.486274 | 0.696564 |
if isinstance(obj, type(None)):
return None
elif isinstance(obj, six.string_types):
return [obj]
else:
# Nesting here since symmetry is broken in isinstance checks.
# Strings are iterables in python 3, so the relative order of if statements is important.
if isinstance(obj, collections.Iterable):
return obj
else:
return [obj] | def to_iter(obj) | Convert an object to a list if it is not already an iterable.
Nones are returned unaltered.
This is an awful function that proliferates an explosion of types, please do not use anymore. | 6.022692 | 5.448632 | 1.105359 |
obj = to_iter(obj)
if isinstance(obj, type(None)):
return None
else:
return list(obj) | def to_list(obj) | Converts an object into a list if it not an iterable, forcing tuples into lists.
Nones are returned unchanged. | 4.633328 | 4.211182 | 1.100244 |
from copy import copy, deepcopy
if deep:
return deepcopy(self)
else:
return copy(self) | def copy(self, deep=True) | Make a copy of this object
Parameters
----------
deep : boolean, default True
Make a deep copy, i.e. also copy data
Returns
-------
copy : type of caller | 3.175646 | 4.675265 | 0.679244 |
if th <= 0:
raise ValueError('Threshold value must be positive. %s given.' % th)
return where(x <= th, log10(th) * 1. * r / d, log10(x) * 1. * r / d) | def tlog(x, th=1, r=_display_max, d=_l_mmax) | Truncated log10 transform.
Parameters
----------
x : num | num iterable
values to be transformed.
th : num
values below th are transormed to 0.
Must be positive.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
tlog(10**d) = r
Returns
-------
Array of transformed values. | 4.796389 | 5.369011 | 0.893347 |
if th <= 0:
raise ValueError('Threshold value must be positive. %s given.' % th)
x = 10 ** (y * 1. * d / r)
try:
x[x < th] = th
except TypeError:
if x < th: x = th
return x | def tlog_inv(y, th=1, r=_display_max, d=_l_mmax) | Inverse truncated log10 transform.
Values
Parameters
----------
y : num | num iterable
values to be transformed.
th : num
Inverse values below th are transormed to th.
Must be > positive.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
tlog_inv(r) = 10**d
Returns
-------
Array of transformed values. | 4.307365 | 4.798099 | 0.897723 |
aux = 1. * d / r * y
s = sign(y)
if s.shape: # to catch case where input is a single number
s[s == 0] = 1
elif s == 0:
s = 1
return s * 10 ** (s * aux) + b * aux - s | def hlog_inv(y, b=500, r=_display_max, d=_l_mmax) | Inverse of base 10 hyperlog transform. | 7.382378 | 7.077213 | 1.043119 |
x = asarray(x)
xmin = min(x)
xmax = max(x)
if xmin == xmax:
return asarray([xmin] * nx)
if xmax <= 0: # all values<=0
return -_x_for_spln(-x, nx, log_spacing)[::-1]
if not log_spacing:
return linspace(xmin, xmax, nx)
# All code below is to handle-log-spacing when x has potentially both negative
# and positive values.
if xmin > 0:
return logspace(log10(xmin), log10(xmax), nx)
else:
lxmax = max([log10(xmax), 0])
lxmin = max([log10(abs(xmin)), 0])
# All the code below is for log-spacing, when xmin < 0 and xmax > 0
if lxmax == 0 and lxmin == 0:
return linspace(xmin, xmax, nx) # Use linear spacing as fallback
if xmin > 0:
x_spln = logspace(lxmin, lxmax, nx)
elif xmin == 0:
x_spln = r_[0, logspace(-1, lxmax, nx - 1)]
else: # (xmin < 0)
f = lxmin / (lxmin + lxmax)
nx_neg = int(f * nx)
nx_pos = nx - nx_neg
if nx <= 1:
# If triggered fix edge case behavior
raise AssertionError(u'nx should never bebe 0 or 1')
# Work-around various edge cases
if nx_neg == 0:
nx_neg = 1
nx_pos = nx_pos - 1
if nx_pos == 0:
nx_pos = 1
nx_neg = nx_neg - 1
x_spln_pos = logspace(-1, lxmax, nx_pos)
x_spln_neg = -logspace(lxmin, -1, nx_neg)
x_spln = r_[x_spln_neg, x_spln_pos]
return x_spln | def _x_for_spln(x, nx, log_spacing) | Create vector of values to be used in constructing a spline.
Parameters
----------
x : num | num iterable
Resulted values will span the range [min(x), max(x)]
nx : int
Length of returned vector.
log_spacing: bool
False - Create linearly spaced values.
True - Create logarithmically spaced values.
To extend to negative values, the spacing is done separately on the
negative and positive range, and these are later combined.
The number of points in the negative/positive range is proportional
to their relative range in log space. i.e., for data in the range
[-100, 1000] 2/5 of the resulting points will be in the negative range.
Returns
-------
x_spln : array | 3.235554 | 3.29495 | 0.981974 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.