code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
return dict(
reference_name=self.reference_name,
annotation_name=self.annotation_name,
annotation_version=self.annotation_version,
gtf_path_or_url=self._gtf_path_or_url,
transcript_fasta_paths_or_urls=self._transcript_fasta_paths_or_urls,
protein_fasta_paths_or_urls=self._protein_fasta_paths_or_urls,
decompress_on_download=self.decompress_on_download,
copy_local_files_to_cache=self.copy_local_files_to_cache,
cache_directory_path=self.cache_directory_path)
|
def to_dict(self)
|
Returns a dictionary of the essential fields of this Genome.
| 2.605056 | 2.346021 | 1.110415 |
self.gtf_path = None
self._protein_sequences = None
self._transcript_sequences = None
self._db = None
self.protein_fasta_paths = None
self.transcript_fasta_paths = None
# only memoizing the Gene, Transcript, and Exon objects
self._genes = {}
self._transcripts = {}
self._exons = {}
|
def _init_lazy_fields(self)
|
Member data that gets loaded or constructed on demand
| 4.959609 | 4.737117 | 1.046968 |
if len(field_name) == 0:
raise ValueError("Expected non-empty field name")
if len(path_or_url) == 0:
raise ValueError("Expected non-empty path_or_url")
return self.download_cache.local_path_or_install_error(
field_name=field_name,
path_or_url=path_or_url,
download_if_missing=download_if_missing,
overwrite=overwrite)
|
def _get_cached_path(
self,
field_name,
path_or_url,
download_if_missing=False,
overwrite=False)
|
Get the local path for a possibly remote file, invoking either
a download or install error message if it's missing.
| 2.434071 | 2.084559 | 1.167667 |
self._set_local_paths(download_if_missing=True, overwrite=overwrite)
|
def download(self, overwrite=False)
|
Download data files needed by this Genome instance.
Parameters
----------
overwrite : bool, optional
Download files regardless whether local copy already exists.
| 9.107336 | 10.995564 | 0.828274 |
if self.requires_gtf:
self.db.connect_or_create(overwrite=overwrite)
if self.requires_transcript_fasta:
self.transcript_sequences.index(overwrite=overwrite)
if self.requires_protein_fasta:
self.protein_sequences.index(overwrite=overwrite)
|
def index(self, overwrite=False)
|
Assuming that all necessary data for this Genome has been downloaded,
generate the GTF database and save efficient representation of
FASTA sequence files.
| 4.247283 | 3.478574 | 1.220984 |
args = [
"--reference-name", self.reference_name,
"--annotation-name", self.annotation_name]
if self.annotation_version:
args.extend(["--annotation-version", str(self.annotation_version)])
if self.requires_gtf:
args.append("--gtf")
args.append("\"%s\"" % self._gtf_path_or_url)
if self.requires_protein_fasta:
args += [
"--protein-fasta \"%s\"" %
path for path in self._protein_fasta_paths_or_urls]
if self.requires_transcript_fasta:
args += [
"--transcript-fasta \"%s\"" %
path for path in self._transcript_fasta_paths_or_urls]
return "pyensembl install %s" % " ".join(args)
|
def install_string(self)
|
Add every missing file to the install string shown to the user
in an error message.
| 2.853411 | 2.770332 | 1.029989 |
for maybe_fn in self.__dict__.values():
# clear cache associated with all memoization decorators,
# GTF and SequenceData objects
if hasattr(maybe_fn, "clear_cache"):
maybe_fn.clear_cache()
|
def clear_cache(self)
|
Clear any in-memory cached values and short-lived on-disk
materializations from MemoryCache
| 11.955848 | 12.180705 | 0.98154 |
self.clear_cache()
db_path = self.db.local_db_path()
if exists(db_path):
remove(db_path)
|
def delete_index_files(self)
|
Delete all data aside from source GTF and FASTA files
| 5.229081 | 4.903561 | 1.066384 |
return self.db.query_feature_values(
column=column,
feature=feature,
distinct=distinct,
contig=contig,
strand=strand)
|
def _all_feature_values(
self,
column,
feature,
distinct=True,
contig=None,
strand=None)
|
Cached lookup of all values for a particular feature property from
the database, caches repeated queries in memory and
stores them as a CSV.
Parameters
----------
column : str
Name of property (e.g. exon_id)
feature : str
Type of entry (e.g. exon)
distinct : bool, optional
Keep only unique values
contig : str, optional
Restrict query to particular contig
strand : str, optional
Restrict results to "+" or "-" strands
Returns a list constructed from query results.
| 2.311828 | 2.740046 | 0.843719 |
if self.transcript_sequences is None:
raise ValueError(
"No transcript FASTA supplied to this Genome: %s" % self)
return self.transcript_sequences.get(transcript_id)
|
def transcript_sequence(self, transcript_id)
|
Return cDNA nucleotide sequence of transcript, or None if
transcript doesn't have cDNA sequence.
| 5.581849 | 5.41019 | 1.031729 |
if self.protein_sequences is None:
raise ValueError(
"No protein FASTA supplied to this Genome: %s" % self)
return self.protein_sequences.get(protein_id)
|
def protein_sequence(self, protein_id)
|
Return cDNA nucleotide sequence of transcript, or None if
transcript doesn't have cDNA sequence.
| 6.010295 | 5.423644 | 1.108166 |
return self.db.query_locus(
filter_column="gene_id",
filter_value=gene_id,
feature="gene")
|
def locus_of_gene_id(self, gene_id)
|
Given a gene ID returns Locus with: chromosome, start, stop, strand
| 6.490004 | 5.895243 | 1.100888 |
gene_ids = self.gene_ids(contig=contig, strand=strand)
return [self.gene_by_id(gene_id) for gene_id in gene_ids]
|
def genes(self, contig=None, strand=None)
|
Returns all Gene objects in the database. Can be restricted to a
particular contig/chromosome and strand by the following arguments:
Parameters
----------
contig : str
Only return genes on the given contig.
strand : str
Only return genes on this strand.
| 2.352988 | 3.139601 | 0.749455 |
if gene_id not in self._genes:
field_names = [
"seqname",
"start",
"end",
"strand",
]
optional_field_names = [
"gene_name",
"gene_biotype",
]
# Do not look for gene_name and gene_biotype if they are
# not in the database.
field_names.extend([
name for name in optional_field_names
if self.db.column_exists("gene", name)
])
result = self.db.query_one(
field_names,
filter_column="gene_id",
filter_value=gene_id,
feature="gene")
if not result:
raise ValueError("Gene not found: %s" % (gene_id,))
gene_name, gene_biotype = None, None
assert len(result) >= 4 and len(result) <= 6, \
"Result is not the expected length: %d" % len(result)
contig, start, end, strand = result[:4]
if len(result) == 5:
if "gene_name" in field_names:
gene_name = result[4]
else:
gene_biotype = result[4]
elif len(result) == 6:
gene_name, gene_biotype = result[4:]
self._genes[gene_id] = Gene(
gene_id=gene_id,
gene_name=gene_name,
contig=contig,
start=start,
end=end,
strand=strand,
biotype=gene_biotype,
genome=self)
return self._genes[gene_id]
|
def gene_by_id(self, gene_id)
|
Construct a Gene object for the given gene ID.
| 2.353365 | 2.295598 | 1.025164 |
gene_ids = self.gene_ids_of_gene_name(gene_name)
return [self.gene_by_id(gene_id) for gene_id in gene_ids]
|
def genes_by_name(self, gene_name)
|
Get all the unqiue genes with the given name (there might be multiple
due to copies in the genome), return a list containing a Gene object
for each distinct ID.
| 2.322154 | 2.347567 | 0.989175 |
gene_id = self.gene_id_of_protein_id(protein_id)
return self.gene_by_id(gene_id)
|
def gene_by_protein_id(self, protein_id)
|
Get the gene ID associated with the given protein ID,
return its Gene object
| 2.643509 | 2.543805 | 1.039195 |
return self._all_feature_values(
column="gene_name",
feature="gene",
contig=contig,
strand=strand)
|
def gene_names(self, contig=None, strand=None)
|
Return all genes in the database,
optionally restrict to a chromosome and/or strand.
| 5.447882 | 5.627033 | 0.968162 |
return self._all_feature_values(
column="gene_id",
feature="gene",
contig=contig,
strand=strand)
|
def gene_ids(self, contig=None, strand=None)
|
What are all the gene IDs
(optionally restrict to a given chromosome/contig and/or strand)
| 5.455131 | 5.163083 | 1.056565 |
results = self._query_gene_ids("gene_name", gene_name)
if len(results) == 0:
raise ValueError("Gene name not found: %s" % gene_name)
return results
|
def gene_ids_of_gene_name(self, gene_name)
|
What are the gene IDs associated with a given gene name?
(due to copy events, there might be multiple genes per name)
| 3.042545 | 2.690045 | 1.131039 |
results = self._query_gene_ids(
"protein_id",
protein_id,
feature="CDS")
if len(results) == 0:
raise ValueError("Protein ID not found: %s" % protein_id)
assert len(results) == 1, \
("Should have only one gene ID for a given protein ID, "
"but found %d: %s" % (len(results), results))
return results[0]
|
def gene_id_of_protein_id(self, protein_id)
|
What is the gene ID associated with a given protein ID?
| 3.091751 | 2.974198 | 1.039524 |
transcript_ids = self.transcript_ids(contig=contig, strand=strand)
return [
self.transcript_by_id(transcript_id)
for transcript_id in transcript_ids
]
|
def transcripts(self, contig=None, strand=None)
|
Construct Transcript object for every transcript entry in
the database. Optionally restrict to a particular
chromosome using the `contig` argument.
| 2.512839 | 2.752398 | 0.912964 |
if transcript_id not in self._transcripts:
optional_field_names = [
"transcript_name",
"transcript_biotype",
"transcript_support_level",
]
field_names = [
"seqname",
"start",
"end",
"strand",
"gene_id",
]
# Do not look for the optional fields if they are not in the database.
field_names.extend([
name for name in optional_field_names
if self.db.column_exists("transcript", name)
])
result = self.db.query_one(
select_column_names=field_names,
filter_column="transcript_id",
filter_value=transcript_id,
feature="transcript",
distinct=True)
if not result:
raise ValueError("Transcript not found: %s" % (transcript_id,))
transcript_name, transcript_biotype, tsl = None, None, None
assert 5 <= len(result) <= 5 + len(optional_field_names), \
"Result is not the expected length: %d" % len(result)
contig, start, end, strand, gene_id = result[:5]
if len(result) > 5:
extra_field_names = [f for f in optional_field_names if f in field_names]
extra_data = dict(zip(extra_field_names, result[5:]))
transcript_name = extra_data.get("transcript_name")
transcript_biotype = extra_data.get("transcript_biotype")
tsl = extra_data.get("transcript_support_level")
if not tsl or tsl == 'NA':
tsl = None
else:
tsl = int(tsl)
self._transcripts[transcript_id] = Transcript(
transcript_id=transcript_id,
transcript_name=transcript_name,
contig=contig,
start=start,
end=end,
strand=strand,
biotype=transcript_biotype,
gene_id=gene_id,
genome=self,
support_level=tsl)
return self._transcripts[transcript_id]
|
def transcript_by_id(self, transcript_id)
|
Construct Transcript object with given transcript ID
| 2.271442 | 2.286482 | 0.993422 |
return self._all_feature_values(
column="transcript_name",
feature="transcript",
contig=contig,
strand=strand)
|
def transcript_names(self, contig=None, strand=None)
|
What are all the transcript names in the database
(optionally, restrict to a given chromosome and/or strand)
| 5.573202 | 5.310194 | 1.049529 |
results = self._query_transcript_ids(
"protein_id",
protein_id,
feature="CDS")
if len(results) == 0:
raise ValueError("Protein ID not found: %s" % protein_id)
assert len(results) == 1, \
("Should have only one transcript ID for a given protein ID, "
"but found %d: %s" % (len(results), results))
return results[0]
|
def transcript_id_of_protein_id(self, protein_id)
|
What is the transcript ID associated with a given protein ID?
| 3.010732 | 2.856637 | 1.053943 |
# DataFrame with single column called "exon_id"
exon_ids = self.exon_ids(contig=contig, strand=strand)
return [
self.exon_by_id(exon_id)
for exon_id in exon_ids
]
|
def exons(self, contig=None, strand=None)
|
Create exon object for all exons in the database, optionally
restrict to a particular chromosome using the `contig` argument.
| 3.714512 | 3.445745 | 1.078 |
if exon_id not in self._exons:
field_names = [
"seqname",
"start",
"end",
"strand",
"gene_name",
"gene_id",
]
contig, start, end, strand, gene_name, gene_id = self.db.query_one(
select_column_names=field_names,
filter_column="exon_id",
filter_value=exon_id,
feature="exon",
distinct=True)
self._exons[exon_id] = Exon(
exon_id=exon_id,
contig=contig,
start=start,
end=end,
strand=strand,
gene_name=gene_name,
gene_id=gene_id)
return self._exons[exon_id]
|
def exon_by_id(self, exon_id)
|
Construct an Exon object from its ID by looking up the exon"s
properties in the given Database.
| 2.160734 | 2.076278 | 1.040676 |
protein_ids = self._all_feature_values(
column="protein_id",
feature="CDS",
contig=contig,
strand=strand,
distinct=True)
# drop None values
return [protein_id for protein_id in protein_ids if protein_id]
|
def protein_ids(self, contig=None, strand=None)
|
What are all the protein IDs
(optionally restrict to a given chromosome and/or strand)
| 3.98563 | 4.099339 | 0.972262 |
try:
release = int(release)
except:
raise ValueError("Invalid Ensembl release: %s" % release)
if release < MIN_ENSEMBL_RELEASE or release > MAX_ENSEMBL_RELEASE:
raise ValueError(
"Invalid Ensembl releases %d, must be between %d and %d" % (
release, MIN_ENSEMBL_RELEASE, MAX_ENSEMBL_RELEASE))
return release
|
def check_release_number(release)
|
Check to make sure a release is in the valid range of
Ensembl releases.
| 2.415767 | 1.991991 | 1.21274 |
species_list = args.species if args.species else ["human"]
release_list = args.release if args.release else [MAX_ENSEMBL_RELEASE]
genomes = []
for species in species_list:
# Otherwise, use Ensembl release information
for version in release_list:
ensembl_release = EnsemblRelease(version, species=species)
if not args.custom_mirror:
genomes.append(ensembl_release)
else:
# if we're using a custom mirror then we expect the provided
# URL to be a directory with all the same filenames as
# would be provided by Ensembl
gtf_url = os.path.join(
args.custom_mirror,
os.path.basename(ensembl_release.gtf_url))
transcript_fasta_urls = [
os.path.join(
args.custom_mirror,
os.path.basename(transcript_fasta_url))
for transcript_fasta_url in ensembl_release.transcript_fasta_urls
]
protein_fasta_urls = [
os.path.join(
args.custom_mirror,
os.path.basename(protein_fasta_url))
for protein_fasta_url in
ensembl_release.protein_fasta_urls
]
reference_name = ensembl_release.reference_name
genome = Genome(
reference_name=reference_name,
annotation_name="ensembl",
annotation_version=version,
gtf_path_or_url=gtf_url,
transcript_fasta_paths_or_urls=transcript_fasta_urls,
protein_fasta_paths_or_urls=protein_fasta_urls)
genomes.append(genome)
return genomes
|
def all_combinations_of_ensembl_genomes(args)
|
Use all combinations of species and release versions specified by the
commandline arguments to return a list of EnsemblRelease or Genome objects.
The results will typically be of type EnsemblRelease unless the
--custom-mirror argument was given.
| 2.51748 | 2.308647 | 1.090457 |
return SPECIES_SUBDIR_TEMPLATE % {
"release": ensembl_release,
"filetype": filetype,
"species": species,
}
|
def _species_subdir(
ensembl_release,
species="homo_sapiens",
filetype="gtf",
server=ENSEMBL_FTP_SERVER)
|
Assume ensembl_release has already been normalize by calling function
but species might be either a common name or latin name.
| 2.668007 | 2.835343 | 0.940982 |
ensembl_release = check_release_number(ensembl_release)
if not isinstance(species, Species):
species = find_species_by_name(species)
reference_name = species.which_reference(ensembl_release)
return ensembl_release, species.latin_name, reference_name
|
def normalize_release_properties(ensembl_release, species)
|
Make sure a given release is valid, normalize it to be an integer,
normalize the species name, and get its associated reference.
| 3.762885 | 3.225959 | 1.166439 |
ensembl_release, species, reference_name = normalize_release_properties(
ensembl_release, species)
return GTF_FILENAME_TEMPLATE % {
"Species": species.capitalize(),
"reference": reference_name,
"release": ensembl_release,
}
|
def make_gtf_filename(ensembl_release, species)
|
Return GTF filename expect on Ensembl FTP server for a specific
species/release combination
| 4.468997 | 4.456864 | 1.002722 |
ensembl_release, species, _ = \
normalize_release_properties(ensembl_release, species)
subdir = _species_subdir(
ensembl_release,
species=species,
filetype="gtf",
server=server)
url_subdir = urllib_parse.urljoin(server, subdir)
filename = make_gtf_filename(
ensembl_release=ensembl_release,
species=species)
return join(url_subdir, filename)
|
def make_gtf_url(ensembl_release, species, server=ENSEMBL_FTP_SERVER)
|
Returns a URL and a filename, which can be joined together.
| 3.443199 | 3.192177 | 1.078637 |
ensembl_release, species, _ = \
normalize_release_properties(ensembl_release, species)
subdir = _species_subdir(
ensembl_release,
species=species,
filetype="fasta",
server=server,)
server_subdir = urllib_parse.urljoin(server, subdir)
server_sequence_subdir = join(server_subdir, "dna")
filename = make_fasta_dna_filename(
ensembl_release=ensembl_release,
species=species,
contig=contig)
return join(server_sequence_subdir, filename)
|
def make_fasta_dna_url(
ensembl_release,
species,
contig,
server=ENSEMBL_FTP_SERVER)
|
Construct URL to FASTA file with full sequence of a particular chromosome.
Returns server_url/subdir and filename as tuple result.
| 3.336846 | 3.351742 | 0.995556 |
ensembl_release, species, reference_name = normalize_release_properties(
ensembl_release, species)
subdir = _species_subdir(
ensembl_release,
species=species,
filetype="fasta",
server=server)
server_subdir = urllib_parse.urljoin(server, subdir)
server_sequence_subdir = join(server_subdir, sequence_type)
filename = make_fasta_filename(
ensembl_release=ensembl_release,
species=species,
sequence_type=sequence_type)
return join(server_sequence_subdir, filename)
|
def make_fasta_url(
ensembl_release,
species,
sequence_type,
server=ENSEMBL_FTP_SERVER)
|
Construct URL to FASTA file with cDNA transcript or protein sequences
Parameter examples:
ensembl_release = 75
species = "Homo_sapiens"
sequence_type = "cdna" (other option: "pep")
| 3.02182 | 3.328943 | 0.907742 |
if feature not in self._TRANSCRIPT_FEATURES:
raise ValueError("Invalid transcript feature: %s" % feature)
results = self.db.query(
select_column_names=["start", "end"],
filter_column="transcript_id",
filter_value=self.id,
feature=feature)
if required and len(results) == 0:
raise ValueError(
"Transcript %s does not contain feature %s" % (
self.id, feature))
return results
|
def _transcript_feature_position_ranges(self, feature, required=True)
|
Find start/end chromosomal position range of features
(such as start codon) for this transcript.
| 3.16047 | 3.201445 | 0.987201 |
ranges = self._transcript_feature_position_ranges(
feature, required=True)
results = []
# a feature (such as a stop codon), maybe be split over multiple
# contiguous ranges. Collect all the nucleotide positions into a
# single list.
for (start, end) in ranges:
# since ranges are [inclusive, inclusive] and
# Python ranges are [inclusive, exclusive) we have to increment
# the end position
for position in range(start, end + 1):
assert position not in results, \
"Repeated position %d for %s" % (position, feature)
results.append(position)
return results
|
def _transcript_feature_positions(self, feature)
|
Get unique positions for feature, raise an error if feature is absent.
| 5.631518 | 5.404763 | 1.041955 |
results = self._transcript_feature_positions(feature)
if len(results) != 3:
raise ValueError(
"Expected 3 positions for %s of %s but got %d" % (
feature,
self.id,
len(results)))
return results
|
def _codon_positions(self, feature)
|
Parameters
----------
feature : str
Possible values are "start_codon" or "stop_codon"
Returns list of three chromosomal positions.
| 3.896722 | 3.609179 | 1.07967 |
results = self.db.query(
select_column_names=["exon_number", "start", "end"],
filter_column="transcript_id",
filter_value=self.id,
feature="exon")
sorted_intervals = [None] * len(results)
for (exon_number, start, end) in results:
sorted_intervals[int(exon_number) - 1] = (start, end)
return sorted_intervals
|
def exon_intervals(self)
|
List of (start,end) tuples for each exon of this transcript,
in the order specified by the 'exon_number' column of the
exon table.
| 3.249202 | 2.91807 | 1.113476 |
# this code is performance sensitive, so switching from
# typechecks.require_integer to a simpler assertion
assert type(position) == int, \
"Position argument must be an integer, got %s : %s" % (
position, type(position))
if position < self.start or position > self.end:
raise ValueError(
"Invalid position: %d (must be between %d and %d)" % (
position,
self.start,
self.end))
# offset from beginning of unspliced transcript (including introns)
unspliced_offset = self.offset(position)
total_spliced_offset = 0
# traverse exons in order of their appearance on the strand
# Since absolute positions may decrease if on the negative strand,
# we instead use unspliced offsets to get always increasing indices.
#
# Example:
#
# Exon Name: exon 1 exon 2
# Spliced Offset: 123456 789...
# Intron vs. Exon: ...iiiiiieeeeeeiiiiiiiiiiiiiiiieeeeeeiiiiiiiiiii...
for exon in self.exons:
exon_unspliced_start, exon_unspliced_end = self.offset_range(
exon.start, exon.end)
# If the relative position is not within this exon, keep a running
# total of the total exonic length-so-far.
#
# Otherwise, if the relative position is within an exon, get its
# offset into that exon by subtracting the exon"s relative start
# position from the relative position. Add that to the total exonic
# length-so-far.
if exon_unspliced_start <= unspliced_offset <= exon_unspliced_end:
# all offsets are base 0, can be used as indices into
# sequence string
exon_offset = unspliced_offset - exon_unspliced_start
return total_spliced_offset + exon_offset
else:
exon_length = len(exon) # exon_end_position - exon_start_position + 1
total_spliced_offset += exon_length
raise ValueError(
"Couldn't find position %d on any exon of %s" % (
position, self.id))
|
def spliced_offset(self, position)
|
Convert from an absolute chromosomal position to the offset into
this transcript"s spliced mRNA.
Position must be inside some exon (otherwise raise exception).
| 4.563417 | 4.435283 | 1.02889 |
offsets.sort()
for i in range(len(offsets) - 1):
assert offsets[i] + 1 == offsets[i + 1], \
"Offsets not contiguous: %s" % (offsets,)
return offsets
|
def _contiguous_offsets(self, offsets)
|
Sorts the input list of integer offsets,
ensures that values are contiguous.
| 2.804458 | 2.573118 | 1.089906 |
offsets = [
self.spliced_offset(position)
for position
in self.start_codon_positions
]
return self._contiguous_offsets(offsets)
|
def start_codon_spliced_offsets(self)
|
Offsets from start of spliced mRNA transcript
of nucleotides in start codon.
| 5.774333 | 5.960439 | 0.968776 |
offsets = [
self.spliced_offset(position)
for position
in self.stop_codon_positions
]
return self._contiguous_offsets(offsets)
|
def stop_codon_spliced_offsets(self)
|
Offsets from start of spliced mRNA transcript
of nucleotides in stop codon.
| 5.650553 | 5.866688 | 0.963159 |
return (
self.contains_start_codon and
self.contains_stop_codon and
self.coding_sequence is not None and
len(self.coding_sequence) % 3 == 0
)
|
def complete(self)
|
Consider a transcript complete if it has start and stop codons and
a coding sequence whose length is divisible by 3
| 4.063523 | 2.330606 | 1.743548 |
if self.sequence is None:
return None
start = self.first_start_codon_spliced_offset
end = self.last_stop_codon_spliced_offset
# If start codon is the at nucleotide offsets [3,4,5] and
# stop codon is at nucleotide offsets [20,21,22]
# then start = 3 and end = 22.
#
# Adding 1 to end since Python uses non-inclusive ends in slices/ranges.
# pylint: disable=invalid-slice-index
# TODO(tavi) Figure out pylint is not happy with this slice
return self.sequence[start:end + 1]
|
def coding_sequence(self)
|
cDNA coding sequence (from start codon to stop codon, without
any introns)
| 6.505629 | 6.157077 | 1.05661 |
best_distance = float("inf")
best_locus = None
for locus in loci:
distance = locus.distance_to_interval(start, end)
if best_distance > distance:
best_distance = distance
best_locus = locus
return best_distance, best_locus
|
def find_nearest_locus(start, end, loci)
|
Finds nearest locus (object with method `distance_to_interval`) to the
interval defined by the given `start` and `end` positions.
Returns the distance to that locus, along with the locus object itself.
| 1.933724 | 1.639306 | 1.179599 |
candidate_column_groups = [
['seqname', 'start', 'end'],
['gene_name'],
['gene_id'],
['transcript_id'],
['transcript_name'],
['exon_id'],
['protein_id'],
['ccds_id'],
]
indices = []
column_set = set(column_names)
# Since queries are often restricted by feature type
# we should include that column in combination with all
# other indices we anticipate might improve performance
for column_group in candidate_column_groups:
skip = False
for column_name in column_group:
# some columns, such as 'exon_id',
# are not available in all releases of Ensembl (or
# other GTFs)
if column_name not in column_set:
logger.info(
"Skipping database index for {%s}",
", ".join(column_group))
skip = True
if skip:
continue
indices.append(column_group)
return indices
|
def _all_possible_indices(self, column_names)
|
Create list of tuples containing all possible index groups
we might want to create over tables in this database.
If a set of genome annotations is missing some column we want
to index on, we have to drop any indices which use that column.
A specific table may later drop some of these indices if they're
missing values for that feature or are the same as the table's primary key.
| 4.666538 | 4.611023 | 1.01204 |
if feature_name not in self.PRIMARY_KEY_COLUMNS:
return None
primary_key = self.PRIMARY_KEY_COLUMNS[feature_name]
primary_key_values = feature_df[primary_key]
if primary_key_values.isnull().any():
raise ValueError(
"Column '%s' can't be primary key of table '%s'"
" because it contains nulls values" % (
primary_key, feature_name))
elif len(primary_key_values.unique()) < len(primary_key_values):
raise ValueError(
"Column '%s' can't be primary key of table '%s'"
" because it contains repeated values" % (
primary_key, feature_name))
else:
return primary_key
|
def _get_primary_key(self, feature_name, feature_df)
|
Name of primary key for a feature table (e.g. "gene" -> "gene_id")
Since we're potentially going to run this code over unseen data,
make sure that the primary is unique and never null.
If a feature doesn't have a primary key, return None.
| 2.119364 | 2.128108 | 0.995891 |
# each feature only gets indices if they're *not* the
# primary key and have non-null values in the feature's
# subset of data
result = []
for index_group in all_index_groups:
# is the index group just a primary key?
if len(index_group) == 1 and index_group[0] == primary_key:
continue
index_column_values = feature_df[index_group]
if len(index_column_values.dropna()) == 0:
continue
result.append(index_group)
return result
|
def _feature_indices(self, all_index_groups, primary_key, feature_df)
|
Choose subset of index group tuples from `all_index_groups` which are
applicable to a particular feature (not same as its primary key, have
non-null values).
| 3.411123 | 3.169675 | 1.076174 |
logger.info("Creating database: %s", self.local_db_path)
df = self._load_gtf_as_dataframe(
usecols=self.restrict_gtf_columns,
features=self.restrict_gtf_features)
all_index_groups = self._all_possible_indices(df.columns)
if self.restrict_gtf_features:
feature_names = self.restrict_gtf_features
else:
# split single DataFrame into dictionary mapping each unique
# feature name onto that subset of the data
feature_names = df['feature'].unique()
dataframes = {}
# every table gets the same set of indices
indices_dict = {}
# if a feature has an ID then make it that table's primary key
primary_keys = {}
for feature in feature_names:
df_subset = df[df.feature == feature]
if len(df_subset) == 0:
continue
dataframes[feature] = df_subset
primary_key = self._get_primary_key(feature, df_subset)
if primary_key:
primary_keys[feature] = primary_key
indices_dict[feature] = self._feature_indices(
all_index_groups,
primary_key,
df_subset)
self._connection = datacache.db_from_dataframes_with_absolute_path(
db_path=self.local_db_path,
table_names_to_dataframes=dataframes,
table_names_to_primary_keys=primary_keys,
table_names_to_indices=indices_dict,
overwrite=overwrite,
version=DATABASE_SCHEMA_VERSION)
return self._connection
|
def create(
self,
overwrite=False)
|
Create the local database (including indexing) if it's not
already set up. If `overwrite` is True, always re-create
the database from scratch.
Returns a connection to the database.
| 4.07186 | 4.051816 | 1.004947 |
connection = self._get_connection()
if connection:
return connection
else:
message = "GTF database needs to be created"
if self.install_string:
message += ", run: %s" % self.install_string
raise ValueError(message)
|
def connection(self)
|
Get a connection to the database or raise an exception
| 6.311737 | 5.616241 | 1.123836 |
connection = self._get_connection()
if connection:
return connection
else:
return self.create(overwrite=overwrite)
|
def connect_or_create(self, overwrite=False)
|
Return a connection to the database if it exists, otherwise create it.
Overwrite the existing database if `overwrite` is True.
| 3.77112 | 3.091859 | 1.219693 |
# TODO: combine with the query method, since they overlap
# significantly
require_string(column_name, "column_name", nonempty=True)
contig = normalize_chromosome(contig)
require_integer(position, "position")
if end is None:
end = position
require_integer(end, "end")
if not self.column_exists(feature, column_name):
raise ValueError("Table %s doesn't have column %s" % (
feature, column_name,))
if distinct:
distinct_string = "DISTINCT "
else:
distinct_string = ""
query = % (distinct_string, column_name, feature)
query_params = [contig, end, position]
if strand:
query += " AND strand = ?"
query_params.append(strand)
tuples = self.connection.execute(query, query_params).fetchall()
# each result is a tuple, so pull out its first element
results = [t[0] for t in tuples if t[0] is not None]
if sorted:
results.sort()
return results
|
def column_values_at_locus(
self,
column_name,
feature,
contig,
position,
end=None,
strand=None,
distinct=False,
sorted=False)
|
Get the non-null values of a column from the database
at a particular range of loci
| 3.38972 | 3.375608 | 1.00418 |
return self.column_values_at_locus(
column,
feature,
contig,
position,
end=end,
strand=strand,
distinct=True,
sorted=True)
|
def distinct_column_values_at_locus(
self,
column,
feature,
contig,
position,
end=None,
strand=None)
|
Gather all the distinct values for a property/column at some specified
locus.
Parameters
----------
column : str
Which property are we getting the values of.
feature : str
Which type of entry (e.g. transcript, exon, gene) is the property
associated with?
contig : str
Chromosome or unplaced contig name
position : int
Chromosomal position
end : int, optional
End position of a range, if unspecified assume we're only looking
at the single given position.
strand : str, optional
Either the positive ('+') or negative strand ('-'). If unspecified
then check for values on either strand.
| 2.325586 | 3.715493 | 0.625916 |
try:
cursor = self.connection.execute(sql, query_params)
except sqlite3.OperationalError as e:
error_message = e.message if hasattr(e, 'message') else str(e)
logger.warn(
"Encountered error \"%s\" from query \"%s\" with parameters %s",
error_message,
sql,
query_params)
raise
results = cursor.fetchall()
if required and not results:
raise ValueError(
"No results found for query:\n%s\nwith parameters: %s" % (
sql, query_params))
return results
|
def run_sql_query(self, sql, required=False, query_params=[])
|
Given an arbitrary SQL query, run it against the database
and return the results.
Parameters
----------
sql : str
SQL query
required : bool
Raise an error if no results found in the database
query_params : list
For each '?' in the query there must be a corresponding value in
this list.
| 2.515649 | 2.460693 | 1.022334 |
sql = % ("distinct " if distinct else "",
", ".join(select_column_names),
feature,
filter_column)
query_params = [filter_value]
return self.run_sql_query(
sql, required=required, query_params=query_params)
|
def query(
self,
select_column_names,
filter_column,
filter_value,
feature,
distinct=False,
required=False)
|
Construct a SQL query and run against the sqlite3 database,
filtered both by the feature type and a user-provided column/value.
| 4.901154 | 5.127876 | 0.955786 |
query = % ("DISTINCT " if distinct else "", column, feature)
query_params = []
if contig:
contig = normalize_chromosome(contig)
query += " AND seqname = ?"
query_params.append(contig)
if strand:
strand = normalize_strand(strand)
query += " AND strand = ?"
query_params.append(strand)
rows = self.run_sql_query(query, query_params=query_params)
return [row[0] for row in rows if row is not None]
|
def query_feature_values(
self,
column,
feature,
distinct=True,
contig=None,
strand=None)
|
Run a SQL query against the sqlite3 database, filtered
only on the feature type.
| 2.690854 | 2.661388 | 1.011072 |
# list of values containing (contig, start, stop, strand)
result_tuples = self.query(
select_column_names=["seqname", "start", "end", "strand"],
filter_column=filter_column,
filter_value=filter_value,
feature=feature,
distinct=True,
required=True)
return [
Locus(contig, start, end, strand)
for (contig, start, end, strand)
in result_tuples
]
|
def query_loci(self, filter_column, filter_value, feature)
|
Query for loci satisfying a given filter and feature type.
Parameters
----------
filter_column : str
Name of column to filter results by.
filter_value : str
Only return loci which have this value in the their filter_column.
feature : str
Feature names such as 'transcript', 'gene', and 'exon'
Returns list of Locus objects
| 3.54793 | 3.726417 | 0.952102 |
loci = self.query_loci(
filter_column=filter_column,
filter_value=filter_value,
feature=feature)
if len(loci) == 0:
raise ValueError("Couldn't find locus for %s with %s = %s" % (
feature, filter_column, filter_value))
elif len(loci) > 1:
raise ValueError("Too many loci for %s with %s = %s: %s" % (
feature, filter_column, filter_value, loci))
return loci[0]
|
def query_locus(self, filter_column, filter_value, feature)
|
Query for unique locus, raises error if missing or more than
one locus in the database.
Parameters
----------
filter_column : str
Name of column to filter results by.
filter_value : str
Only return loci which have this value in the their filter_column.
feature : str
Feature names such as 'transcript', 'gene', and 'exon'
Returns single Locus object.
| 1.825702 | 1.941129 | 0.940536 |
logger.info("Reading GTF from %s", self.gtf_path)
df = read_gtf(
self.gtf_path,
column_converters={
"seqname": normalize_chromosome,
"strand": normalize_strand,
},
infer_biotype_column=True,
usecols=usecols,
features=features)
column_names = set(df.keys())
expect_gene_feature = features is None or "gene" in features
expect_transcript_feature = features is None or "transcript" in features
observed_features = set(df["feature"])
# older Ensembl releases don't have "gene" or "transcript"
# features, so fill in those rows if they're missing
if expect_gene_feature and "gene" not in observed_features:
# if we have to reconstruct gene feature rows then
# fill in values for 'gene_name' and 'gene_biotype'
# but only if they're actually present in the GTF
logger.info("Creating missing gene features...")
df = create_missing_features(
dataframe=df,
unique_keys={"gene": "gene_id"},
extra_columns={
"gene": {
"gene_name",
"gene_biotype"
}.intersection(column_names),
},
missing_value="")
logger.info("Done.")
if expect_transcript_feature and "transcript" not in observed_features:
logger.info("Creating missing transcript features...")
df = create_missing_features(
dataframe=df,
unique_keys={"transcript": "transcript_id"},
extra_columns={
"transcript": {
"gene_id",
"gene_name",
"gene_biotype",
"transcript_name",
"transcript_biotype",
"protein_id",
}.intersection(column_names)
},
missing_value="")
logger.info("Done.")
return df
|
def _load_gtf_as_dataframe(self, usecols=None, features=None)
|
Parse this genome source's GTF file and load it as a Pandas DataFrame
| 2.88316 | 2.877219 | 1.002065 |
transcript_id_results = self.db.query(
select_column_names=['transcript_id'],
filter_column='gene_id',
filter_value=self.id,
feature='transcript',
distinct=False,
required=False)
# We're doing a SQL query for each transcript ID to fetch
# its particular information, might be more efficient if we
# just get all the columns here, but how do we keep that modular?
return [
self.genome.transcript_by_id(result[0])
for result in transcript_id_results
]
|
def transcripts(self)
|
Property which dynamically construct transcript objects for all
transcript IDs associated with this gene.
| 8.634128 | 8.074571 | 1.069299 |
from .answer import Answer
from zhihu import Post
if isinstance(something, Answer):
mapping = {
'up': 'vote_up',
'clear': 'vote_neutral',
'down': 'vote_down'
}
if vote not in mapping.keys():
raise ValueError('Invalid vote value: {0}'.format(vote))
if something.author.url == self.url:
return False
params = {'answer_id': str(something.aid)}
data = {
'_xsrf': something.xsrf,
'method': mapping[vote],
'params': json.dumps(params)
}
headers = dict(Default_Header)
headers['Referer'] = something.question.url[:-1]
res = self._session.post(Upvote_Answer_Url,
headers=headers, data=data)
return res.json()['r'] == 0
elif isinstance(something, Post):
mapping = {
'up': 'like',
'clear': 'none',
'down': 'dislike'
}
if vote not in mapping.keys():
raise ValueError('Invalid vote value: {0}'.format(vote))
if something.author.url == self.url:
return False
put_url = Upvote_Article_Url.format(
something.column_in_name, something.slug)
data = {'value': mapping[vote]}
headers = {
'Content-Type': 'application/json;charset=utf-8',
'Host': 'zhuanlan.zhihu.com',
'Referer': something.url[:-1],
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; '
'rv:39.0) Gecko/20100101 Firefox/39.0',
'X-XSRF-TOKEN': self._session.cookies.get('XSRF-TOKEN')
}
res = self._session.put(put_url, json.dumps(data), headers=headers)
return res.status_code == 204
else:
raise ValueError('argument something need to be '
'zhihu.Answer or zhihu.Post object.')
|
def vote(self, something, vote='up')
|
给答案或文章点赞或取消点赞
:param Answer/Post something: 需要点赞的答案或文章对象
:param str vote:
===== ================ ======
取值 说明 默认值
===== ================ ======
up 赞同 √
down 反对 X
clear 既不赞同也不反对 X
===== ================ ======
:return: 成功返回True,失败返回False
:rtype: bool
| 2.41326 | 2.292348 | 1.052746 |
from .answer import Answer
if isinstance(answer, Answer) is False:
raise ValueError('argument answer need to be Zhihu.Answer object.')
if answer.author.url == self.url:
return False
data = {
'_xsrf': answer.xsrf,
'aid': answer.aid
}
res = self._session.post(Thanks_Url if thanks else Cancel_Thanks_Url,
data=data)
return res.json()['r'] == 0
|
def thanks(self, answer, thanks=True)
|
感谢或取消感谢回答
:param Answer answer: 要感谢或取消感谢的回答
:param thanks: True-->感谢,False-->取消感谢
:return: 成功返回True,失败返回False
:rtype: bool
| 5.187813 | 4.358518 | 1.19027 |
from .question import Question
from .topic import Topic
from .collection import Collection
if isinstance(something, Author):
if something.url == self.url:
return False
data = {
'_xsrf': something.xsrf,
'method': ' follow_member' if follow else 'unfollow_member',
'params': json.dumps({'hash_id': something.hash_id})
}
res = self._session.post(Follow_Author_Url, data=data)
return res.json()['r'] == 0
elif isinstance(something, Question):
data = {
'_xsrf': something.xsrf,
'method': 'follow_question' if follow else 'unfollow_question',
'params': json.dumps({'question_id': str(something.qid)})
}
res = self._session.post(Follow_Question_Url, data=data)
return res.json()['r'] == 0
elif isinstance(something, Topic):
data = {
'_xsrf': something.xsrf,
'method': 'follow_topic' if follow else 'unfollow_topic',
'params': json.dumps({'topic_id': something.tid})
}
res = self._session.post(Follow_Topic_Url, data=data)
return res.json()['r'] == 0
elif isinstance(something, Collection):
data = {
'_xsrf': something.xsrf,
'favlist_id': something.cid
}
res = self._session.post(
Follow_Collection_Url if follow else Unfollow_Collection_Url,
data=data)
return res.json()['r'] == 0
else:
raise ValueError('argument something need to be '
'zhihu.Author, zhihu.Question'
', Zhihu.Topic or Zhihu.Collection object.')
|
def follow(self, something, follow=True)
|
关注用户、问题、话题或收藏夹
:param Author/Question/Topic something: 需要关注的对象
:param bool follow: True-->关注,False-->取消关注
:return: 成功返回True,失败返回False
:rtype: bool
| 2.215459 | 2.038098 | 1.087023 |
from .answer import Answer
if isinstance(answer, Answer) is False:
raise ValueError('argument answer need to be Zhihu.Answer object.')
if not content:
raise ValueError('answer content cannot be empty')
data = {
'method': 'add_comment',
'params': json.dumps({'answer_id': answer.aid, 'content': content}),
'_xsrf': answer.xsrf
}
res = self._session.post(Answer_Add_Comment_URL,
data=data)
return res.json()['r'] == 0
|
def add_comment(self, answer, content)
|
给指定答案添加评论
:param Answer answer: 答案对象
:param string content: 评论内容
:return: 成功返回 True,失败返回 False
:rtype: bool
| 4.409957 | 3.967502 | 1.11152 |
if isinstance(author, Author) is False:
raise ValueError('argument answer need to be Zhihu.Author object.')
if not content:
raise ValueError('answer content cannot be empty')
if author.url == self.url:
return False
data = {
'member_id': author.hash_id,
'content': content,
'token': '',
'_xsrf': author.xsrf
}
res = self._session.post(Send_Message_Url,
data=data)
return res.json()['r'] == 0
|
def send_message(self, author, content)
|
发送私信给一个用户
:param Author author: 接收私信用户对象
:param string content: 发送给用户的私信内容
:return: 成功返回 True,失败返回 False
:rtype: bool
| 5.560614 | 5.035258 | 1.104336 |
from .topic import Topic
if isinstance(something, Author):
if something.url == self.url:
return False
data = {
'_xsrf': something.xsrf,
'action': 'add' if block else 'cancel',
}
block_author_url = something.url + 'block'
res = self._session.post(block_author_url, data=data)
return res.json()['r'] == 0
elif isinstance(something, Topic):
tid = something.tid
data = {
'_xsrf': something.xsrf,
'method': 'add' if block else 'del',
'tid': tid,
}
block_topic_url = 'http://www.zhihu.com/topic/ignore'
res = self._session.post(block_topic_url, data=data)
return res.status_code == 200
else:
raise ValueError('argument something need to be '
'Zhihu.Author or Zhihu.Topic object.')
|
def block(self, something, block=True)
|
屏蔽某个用户、话题
:param Author/Topic something:
:param block: True-->屏蔽,False-->取消屏蔽
:return: 成功返回 True,失败返回 False
:rtype: bool
| 3.260779 | 2.815515 | 1.158146 |
from .answer import Answer
if isinstance(answer, Answer) is False:
raise ValueError('argument answer need to be Zhihu.Answer object.')
if answer.author.url == self.url:
return False
data = {
'_xsrf': answer.xsrf,
'aid': answer.aid
}
res = self._session.post(Unhelpful_Url if unhelpful else Cancel_Unhelpful_Url,
data=data)
return res.json()['r'] == 0
|
def unhelpful(self, answer, unhelpful=True)
|
没有帮助或取消没有帮助回答
:param Answer answer: 要没有帮助或取消没有帮助回答
:param unhelpful: True-->没有帮助,False-->取消没有帮助
:return: 成功返回 True,失败返回 False
:rtype: bool
| 4.911932 | 4.18118 | 1.174772 |
from .column import Column
if 'column' in self.soup:
url = Column_Url + '/' + self.soup['column']['slug']
name = self.soup['column']['name']
return Column(url, name, session=self._session)
else:
return None
|
def column(self)
|
获取文章所在专栏.
:return: 文章所在专栏
:rtype: Column
| 5.63668 | 4.940172 | 1.140988 |
from .author import Author
url = self.soup['author']['profileUrl']
name = self.soup['author']['name']
motto = self.soup['author']['bio']
template = self.soup['author']['avatar']['template']
photo_id = self.soup['author']['avatar']['id']
photo_url = template.format(id=photo_id, size='r')
return Author(url, name, motto, photo_url=photo_url,
session=self._session)
|
def author(self)
|
获取文章作者.
:return: 文章作者
:rtype: Author
| 3.971525 | 4.028121 | 0.98595 |
if mode not in ["html", "md", "markdown"]:
raise ValueError("`mode` must be 'html', 'markdown' or 'md',"
" got {0}".format(mode))
self._make_soup()
file = get_path(filepath, filename, mode, self.column.name,
self.title + '-' + self.author.name)
with open(file, 'wb') as f:
if mode == "html":
f.write(self.soup['content'].encode('utf-8'))
else:
import html2text
h2t = html2text.HTML2Text()
h2t.body_width = 0
f.write(h2t.handle(self.soup['content']).encode('utf-8'))
|
def save(self, filepath=None, filename=None, mode="md")
|
保存答案为 Html 文档或 markdown 文档.
:param str filepath: 要保存的文件所在的目录,
不填为当前目录下以专栏标题命名的目录, 设为"."则为当前目录。
:param str filename: 要保存的文件名,
不填则默认为 所在文章标题 - 作者名.html/md。
如果文件已存在,自动在后面加上数字区分。
**自定义文件名时请不要输入后缀 .html 或 .md。**
:param str mode: 保存类型,可选 `html` 、 `markdown` 、 `md` 。
:return: 无
:rtype: None
| 3.019075 | 2.768025 | 1.090696 |
from .author import Author, ANONYMOUS
self._make_soup()
headers = dict(Default_Header)
headers['Host'] = 'zhuanlan.zhihu.com'
json = self._session.get(
Post_Get_Upvoter.format(self.slug),
headers=headers
).json()
for au in json:
try:
yield Author(
au['profileUrl'],
au['name'],
au['bio'],
photo_url=au['avatar']['template'].format(
id=au['avatar']['id'], size='r'),
session=self._session
)
except ValueError: # invalid url
yield ANONYMOUS
|
def upvoters(self)
|
获取文章的点赞用户
:return: 文章的点赞用户,返回生成器。
| 6.092627 | 5.781113 | 1.053885 |
from .author import Author
author = self.soup.find('div', class_='zm-item-answer-author-info')
url, name, motto, photo = parser_author_from_tag(author)
if name == '匿名用户':
return ANONYMOUS
else:
return Author(url, name, motto, photo_url=photo,
session=self._session)
|
def author(self)
|
获取答案作者.
:return: 答案作者
:rtype: Author
| 5.591129 | 4.750951 | 1.176844 |
from .question import Question
question_link = self.soup.find(
"h2", class_="zm-item-title").a
url = Zhihu_URL + question_link["href"]
title = question_link.text.strip()
followers_num = int(self.soup.find(
'div', class_='zh-question-followers-sidebar').div.a.strong.text)
answers_num = int(re_get_number.match(self.soup.find(
'div', class_='zh-answers-title').h3.a.text).group(1))
return Question(url, title, followers_num, answers_num,
session=self._session)
|
def question(self)
|
获取答案所在问题.
:return: 答案所在问题
:rtype: Question
| 3.968771 | 3.835109 | 1.034852 |
self._make_soup()
next_req = '/answer/' + str(self.aid) + '/voters_profile'
while next_req != '':
data = self._session.get(Zhihu_URL + next_req).json()
next_req = data['paging']['next']
for html in data['payload']:
soup = BeautifulSoup(html)
yield self._parse_author_soup(soup)
|
def upvoters(self)
|
获取答案点赞用户,返回生成器.
:return: 点赞用户
:rtype: Author.Iterable
| 6.294541 | 5.810788 | 1.083251 |
answer_wrap = self.soup.find('div', id='zh-question-answer-wrap')
content = answer_wrap.find('div', class_='zm-editable-content')
content = answer_content_process(content)
return content
|
def content(self)
|
以处理过的Html代码形式返回答案内容.
:return: 答案内容
:rtype: str
| 6.170931 | 5.223493 | 1.18138 |
element = self.soup.find("a", {
"data-za-a": "click_answer_collected_count"
})
if element is None:
return 0
else:
return int(element.get_text())
|
def collect_num(self)
|
获取答案收藏数
:return: 答案收藏数量
:rtype: int
| 7.400236 | 5.796441 | 1.276686 |
import time
gotten_feed_num = 20
offset = 0
data = {
'method':'next',
'_xsrf': self.xsrf
}
while gotten_feed_num >= 10:
data['params'] = "{\"answer_url\": %d,\"offset\": %d}" % (self.id, offset)
res = self._session.post(url=Get_Collection_Url, data=data)
gotten_feed_num = len(res.json()['msg'])
offset += gotten_feed_num
soup = BeautifulSoup(''.join(res.json()['msg']))
for zm_item in soup.find_all('div', class_='zm-item'):
url = Zhihu_URL + zm_item.h2.a['href']
name = zm_item.h2.a.text
links = zm_item.div.find_all('a')
owner = Author(links[0]['href'], session=self._session)
follower_num = int(links[1].text.split()[0])
yield Collection(url, owner=owner, name=name,
follower_num=follower_num,
session=self._session)
time.sleep(0.2)
|
def collections(self)
|
获取包含该答案的收藏夹
:return: 包含该答案的收藏夹
:rtype: Collection.Iterable
collect_num 未必等于 len(collections),比如:
https://www.zhihu.com/question/20064699/answer/13855720
显示被收藏 38 次,但只有 30 个收藏夹
| 3.935322 | 3.564147 | 1.104141 |
comment = self.soup.select_one("div.answer-actions a.toggle-comment")
comment_num_string = comment.text
number = comment_num_string.split()[0]
return int(number) if number.isdigit() else 0
|
def comment_num(self)
|
:return: 答案下评论的数量
:rtype: int
| 4.586106 | 4.188812 | 1.094847 |
import math
from .author import Author, ANONYMOUS
from .comment import Comment
api_url = Get_Answer_Comment_URL.format(self.aid)
page = pages = 1
while page <= pages:
res = self._session.get(api_url + '?page=' + str(page))
if page == 1:
total = int(res.json()['paging']['totalCount'])
if total == 0:
return
pages = math.ceil(total / 30)
page += 1
comment_items = res.json()['data']
for comment_item in comment_items:
comment_id = comment_item['id']
content = comment_item['content']
upvote_num = comment_item['likesCount']
time_string = comment_item['createdTime'][:19]
time = datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S")
if comment_item['author'].get('url') is not None:
a_url = comment_item['author']['url']
a_name = comment_item['author']['name']
photo_url_tmp = comment_item['author']['avatar']['template']
photo_url_id = comment_item['author']['avatar']['id']
a_photo_url = photo_url_tmp.replace(
'{id}', photo_url_id).replace('_{size}', '')
author_obj = Author(a_url, a_name, photo_url=a_photo_url,
session=self._session)
else:
author_obj = ANONYMOUS
yield Comment(comment_id, self, author_obj, upvote_num, content, time)
|
def comments(self)
|
获取答案下的所有评论.
:return: 答案下的所有评论,返回生成器
:rtype: Comments.Iterable
| 2.795039 | 2.703223 | 1.033965 |
super().refresh()
self._html = None
self._upvote_num = None
self._content = None
self._collect_num = None
self._comment_num = None
|
def refresh(self)
|
刷新 Answer object 的属性.
例如赞同数增加了, 先调用 ``refresh()``
再访问 upvote_num属性, 可获得更新后的赞同数.
:return: None
| 6.734808 | 4.711159 | 1.429544 |
answer_num_block = self.soup.find('h3', id='zh-question-answer-num')
# 当0人回答或1回答时,都会找不到 answer_num_block,
# 通过找答案的赞同数block来判断到底有没有答案。
# (感谢知乎用户 段晓晨 提出此问题)
if answer_num_block is None:
if self.soup.find('span', class_='count') is not None:
return 1
else:
return 0
return int(answer_num_block['data-num'])
|
def answer_num(self)
|
获取问题答案数量.
:return: 问题答案数量
:rtype: int
| 7.488444 | 7.648565 | 0.979065 |
follower_num_block = self.soup.find('div', class_='zg-gray-normal')
# 无人关注时 找不到对应block,直接返回0 (感谢知乎用户 段晓晨 提出此问题)
if follower_num_block is None or follower_num_block.strong is None:
return 0
return int(follower_num_block.strong.text)
|
def follower_num(self)
|
获取问题关注人数.
:return: 问题关注人数
:rtype: int
| 7.532012 | 7.666164 | 0.982501 |
from .topic import Topic
for topic in self.soup.find_all('a', class_='zm-item-tag'):
yield Topic(Zhihu_URL + topic['href'], topic.text.replace('\n', ''),
session=self._session)
|
def topics(self)
|
获取问题所属话题.
:return: 问题所属话题
:rtype: Topic.Iterable
| 6.053818 | 5.596897 | 1.081638 |
self._make_soup()
followers_url = self.url + 'followers'
for x in common_follower(followers_url, self.xsrf, self._session):
yield x
|
def followers(self)
|
获取关注此问题的用户
:return: 关注此问题的用户
:rtype: Author.Iterable
:问题: 要注意若执行过程中另外有人关注,可能造成重复获取到某些用户
| 8.698111 | 9.223561 | 0.943032 |
for j, a in enumerate(self.answers):
if j == i - 1:
return a
|
def top_i_answer(self, i)
|
获取排名某一位的答案.
:param int i: 要获取的答案的排名
:return: 答案对象,能直接获取的属性参见answers方法
:rtype: Answer
| 4.995723 | 4.632547 | 1.078396 |
for j, a in enumerate(self.answers):
if j <= i - 1:
yield a
else:
return
|
def top_i_answers(self, i)
|
获取排名在前几位的答案.
:param int i: 获取前几个
:return: 答案对象,返回生成器
:rtype: Answer.Iterable
| 4.469532 | 4.694953 | 0.951986 |
from .author import Author, ANONYMOUS
logs = self._query_logs()
author_a = logs[-1].find_all('div')[0].a
if author_a.text == '匿名用户':
return ANONYMOUS
else:
url = Zhihu_URL + author_a['href']
return Author(url, name=author_a.text, session=self._session)
|
def author(self)
|
获取问题的提问者.
:return: 提问者
:rtype: Author or zhihu.ANONYMOUS
| 5.227213 | 4.230649 | 1.235558 |
logs = self._query_logs()
time_string = logs[-1].find('div', class_='zm-item-meta').time[
'datetime']
return datetime.strptime(time_string, "%Y-%m-%d %H:%M:%S")
|
def creation_time(self)
|
:return: 问题创建时间
:rtype: datetime.datetime
| 5.863644 | 5.260046 | 1.114751 |
data = {'_xsrf': self.xsrf, 'offset': '1'}
res = self._session.post(self.url + 'log', data=data)
_, content = res.json()['msg']
soup = BeautifulSoup(content)
time_string = soup.find_all('time')[0]['datetime']
return datetime.strptime(time_string, "%Y-%m-%d %H:%M:%S")
|
def last_edit_time(self)
|
:return: 问题最后编辑时间
:rtype: datetime.datetime
| 3.913861 | 3.704146 | 1.056616 |
super().refresh()
self._html = None
self._title = None
self._details = None
self._answer_num = None
self._follower_num = None
self._topics = None
self._last_edit_time = None
self._logs = None
|
def refresh(self)
|
刷新 Question object 的属性.
例如回答数增加了, 先调用 ``refresh()``
再访问 answer_num 属性, 可获得更新后的答案数量.
:return: None
| 5.612347 | 4.36059 | 1.287061 |
return int(re_get_number.match(
self.soup.find('a', attrs={'name': 'focus'})['id']).group(1))
|
def cid(self)
|
获取收藏夹内部Id(用不到忽视就好)
:return: 内部Id
:rtype: int
| 13.186164 | 11.952086 | 1.103252 |
return re_del_empty_line.match(
self.soup.find('h2', id='zh-fav-head-title').text).group(1)
|
def name(self)
|
获取收藏夹名字.
:return: 收藏夹名字
:rtype: str
| 18.105408 | 14.003441 | 1.292926 |
from .author import Author
a = self.soup.find('h2', class_='zm-list-content-title').a
name = a.text
url = Zhihu_URL + a['href']
motto = self.soup.find(
'div', id='zh-single-answer-author-info').div.text
photo_url = PROTOCOL + self.soup.find(
'img', class_='zm-list-avatar-medium')['src'].replace('_m', '_r')
return Author(url, name, motto, photo_url=photo_url,
session=self._session)
|
def owner(self)
|
获取收藏夹拥有者,返回Author对象.
:return: 收藏夹拥有者
:rtype: Author
| 5.176143 | 4.81271 | 1.075515 |
href = re_collection_url_split.match(self.url).group(1)
return int(self.soup.find('a', href=href + 'followers').text)
|
def follower_num(self)
|
获取关注此收藏夹的人数.
:return: 关注此收藏夹的人数
:rtype: int
| 10.11902 | 10.140835 | 0.997849 |
self._make_soup()
# noinspection PyTypeChecker
for question in self._page_get_questions(self.soup):
yield question
i = 2
while True:
soup = BeautifulSoup(self._session.get(
self.url[:-1] + '?page=' + str(i)).text)
for question in self._page_get_questions(soup):
if question == 0:
return
yield question
i += 1
|
def questions(self)
|
获取收藏夹内所有问题对象.
:return: 收藏夹内所有问题,返回生成器
:rtype: Question.Iterable
| 4.175993 | 4.130267 | 1.011071 |
self._make_soup()
# noinspection PyTypeChecker
for answer in self._page_get_answers(self.soup):
yield answer
i = 2
while True:
soup = BeautifulSoup(self._session.get(
self.url[:-1] + '?page=' + str(i)).text)
for answer in self._page_get_answers(soup):
if answer == 0:
return
yield answer
i += 1
|
def answers(self)
|
获取收藏夹内所有答案对象.
:return: 收藏夹内所有答案,返回生成器
:rtype: Answer.Iterable
| 4.199093 | 4.186428 | 1.003025 |
import time
from datetime import datetime
from .answer import Answer
from .question import Question
from .acttype import CollectActType
self._make_soup()
gotten_feed_num = 20
offset = 0
data = {
'start': 0,
'_xsrf': self.xsrf
}
api_url = self.url + 'log'
while gotten_feed_num == 20:
data['offset'] = offset
res = self._session.post(url=api_url, data=data)
gotten_feed_num = res.json()['msg'][0]
soup = BeautifulSoup(res.json()['msg'][1])
offset += gotten_feed_num
zm_items = soup.find_all('div', class_='zm-item')
for zm_item in zm_items:
act_time = datetime.strptime(zm_item.find('time').text, "%Y-%m-%d %H:%M:%S")
if zm_item.find('ins'):
link = zm_item.find('ins').a
act_type = CollectActType.INSERT_ANSWER
elif zm_item.find('del'):
link = zm_item.find('del').a
act_type = CollectActType.DELETE_ANSWER
else:
continue
try:
answer_url = Zhihu_URL + link['href']
question_url = re_a2q.match(answer_url).group(1)
question = Question(question_url, link.text)
answer = Answer(
answer_url, question, session=self._session)
yield CollectActivity(
act_type, act_time, self.owner, self, answer)
except AttributeError:
act_type = CollectActType.CREATE_COLLECTION
yield CollectActivity(
act_type, act_time, self.owner, self)
data['start'] = zm_items[-1]['id'][8:]
time.sleep(0.5)
|
def logs(self)
|
获取收藏夹日志
:return: 收藏夹日志中的操作,返回生成器
:rtype: CollectActivity.Iterable
| 3.314245 | 3.013276 | 1.099881 |
origin_host = self._session.headers.get('Host')
for offset in range(0, (self.post_num - 1) // 10 + 1):
self._session.headers.update(Host='zhuanlan.zhihu.com')
res = self._session.get(
Column_Posts_Data.format(self._in_name, offset * 10))
soup = res.json()
self._session.headers.update(Host=origin_host)
for post in soup:
yield self._parse_post_data(post)
|
def posts(self)
|
获取专栏的所有文章.
:return: 专栏所有文章,返回生成器
:rtype: Post.Iterable
| 4.617586 | 4.36317 | 1.05831 |
invalid_char_list = ['/', '\\', ':', '*', '?', '"', '<', '>', '|', '\n']
res = ''
for char in text:
if char not in invalid_char_list:
res += char
return res
|
def remove_invalid_char(text)
|
去除字符串中的无效字符,一般用于保存文件时保证文件名的有效性.
:param str text: 待处理的字符串
:return: 处理后的字符串
:rtype: str
| 2.371584 | 2.483718 | 0.954852 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.