code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
# Use globals to load CJK tokenizers on demand, so that we can still run
# in environments that lack the CJK dependencies
global _mecab_tokenize, _jieba_tokenize
language = langcodes.get(lang)
info = get_language_info(language)
text = preprocess_text(text, language)
if info['tokenizer'] == 'mecab':
from wordfreq.mecab import mecab_tokenize as _mecab_tokenize
# Get just the language code out of the Language object, so we can
# use it to select a MeCab dictionary
tokens = _mecab_tokenize(text, language.language)
if not include_punctuation:
tokens = [token for token in tokens if not PUNCT_RE.match(token)]
elif info['tokenizer'] == 'jieba':
from wordfreq.chinese import jieba_tokenize as _jieba_tokenize
tokens = _jieba_tokenize(text, external_wordlist=external_wordlist)
if not include_punctuation:
tokens = [token for token in tokens if not PUNCT_RE.match(token)]
else:
# This is the default case where we use the regex tokenizer. First
# let's complain a bit if we ended up here because we don't have an
# appropriate tokenizer.
if info['tokenizer'] != 'regex' and lang not in _WARNED_LANGUAGES:
logger.warning(
"The language '{}' is in the '{}' script, which we don't "
"have a tokenizer for. The results will be bad."
.format(lang, info['script'])
)
_WARNED_LANGUAGES.add(lang)
tokens = simple_tokenize(text, include_punctuation=include_punctuation)
return tokens
|
def tokenize(text, lang, include_punctuation=False, external_wordlist=False)
|
Tokenize this text in a way that's relatively simple but appropriate for
the language. Strings that are looked up in wordfreq will be run through
this function first, so that they can be expected to match the data.
The text will be run through a number of pre-processing steps that vary
by language; see the docstring of `wordfreq.preprocess.preprocess_text`.
If `include_punctuation` is True, punctuation will be included as separate
tokens. Otherwise, punctuation will be omitted in the output.
CJK scripts
-----------
In the CJK languages, word boundaries can't usually be identified by a
regular expression. Instead, there needs to be some language-specific
handling. In Chinese, we use the Jieba tokenizer, with a custom word list
to match the words whose frequencies we can look up. In Japanese and
Korean, we use the MeCab tokenizer.
The `external_wordlist` option only affects Chinese tokenization. If it's
True, then wordfreq will not use its own Chinese wordlist for tokenization.
Instead, it will use the large wordlist packaged with the Jieba tokenizer,
and it will leave Traditional Chinese characters as is. This will probably
give more accurate tokenization, but the resulting tokens won't necessarily
have word frequencies that can be looked up.
If you end up seeing tokens that are entire phrases or sentences glued
together, that probably means you passed in CJK text with the wrong
language code.
| 3.72763 | 3.515698 | 1.060282 |
global _simplify_chinese
info = get_language_info(lang)
tokens = tokenize(text, lang, include_punctuation, external_wordlist)
if info['lookup_transliteration'] == 'zh-Hans':
from wordfreq.chinese import simplify_chinese as _simplify_chinese
tokens = [_simplify_chinese(token) for token in tokens]
return [smash_numbers(token) for token in tokens]
|
def lossy_tokenize(text, lang, include_punctuation=False, external_wordlist=False)
|
Get a list of tokens for this text, with largely the same results and
options as `tokenize`, but aggressively normalize some text in a lossy way
that's good for counting word frequencies.
In particular:
- Any sequence of 2 or more adjacent digits, possibly with intervening
punctuation such as a decimal point, will replace each digit with '0'
so that frequencies for numbers don't have to be counted separately.
This is similar to but not quite identical to the word2vec Google News
data, which replaces digits with '#' in tokens with more than one digit.
- In Chinese, unless Traditional Chinese is specifically requested using
'zh-Hant', all characters will be converted to Simplified Chinese.
| 6.000307 | 5.39549 | 1.112097 |
with gzip.open(filename, 'rb') as infile:
data = msgpack.load(infile, raw=False)
header = data[0]
if (
not isinstance(header, dict) or header.get('format') != 'cB'
or header.get('version') != 1
):
raise ValueError("Unexpected header: %r" % header)
return data[1:]
|
def read_cBpack(filename)
|
Read a file from an idiosyncratic format that we use for storing
approximate word frequencies, called "cBpack".
The cBpack format is as follows:
- The file on disk is a gzipped file in msgpack format, which decodes to a
list whose first element is a header, and whose remaining elements are
lists of words.
- The header is a dictionary with 'format' and 'version' keys that make
sure that we're reading the right thing.
- Each inner list of words corresponds to a particular word frequency,
rounded to the nearest centibel -- that is, one tenth of a decibel, or
a factor of 10 ** .01.
0 cB represents a word that occurs with probability 1, so it is the only
word in the data (this of course doesn't happen). -200 cB represents a
word that occurs once per 100 tokens, -300 cB represents a word that
occurs once per 1000 tokens, and so on.
- The index of each list within the overall list (without the header) is
the negative of its frequency in centibels.
- Each inner list is sorted in alphabetical order.
As an example, consider a corpus consisting only of the words "red fish
blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red"
and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word
frequencies would decode to this:
[
{'format': 'cB', 'version': 1},
[], [], [], ... # 30 empty lists
['fish'],
[], [], [], ... # 29 more empty lists
['blue', 'red']
]
| 3.163114 | 3.264735 | 0.968873 |
if wordlist == 'best':
available = available_languages('small')
available.update(available_languages('large'))
return available
elif wordlist == 'combined':
logger.warning(
"The 'combined' wordlists have been renamed to 'small'."
)
wordlist = 'small'
available = {}
for path in DATA_PATH.glob('*.msgpack.gz'):
if not path.name.startswith('_'):
list_name = path.name.split('.')[0]
name, lang = list_name.split('_')
if name == wordlist:
available[lang] = str(path)
return available
|
def available_languages(wordlist='best')
|
Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available.
| 3.565268 | 3.510666 | 1.015553 |
available = available_languages(wordlist)
best, score = langcodes.best_match(lang, list(available),
min_score=match_cutoff)
if score == 0:
raise LookupError("No wordlist %r available for language %r"
% (wordlist, lang))
if best != lang:
logger.warning(
"You asked for word frequencies in language %r. Using the "
"nearest match, which is %r (%s)."
% (lang, best, langcodes.get(best).language_name('en'))
)
return read_cBpack(available[best])
|
def get_frequency_list(lang, wordlist='best', match_cutoff=30)
|
Read the raw data from a wordlist file, returning it as a list of
lists. (See `read_cBpack` for what this represents.)
Because we use the `langcodes` module, we can handle slight
variations in language codes. For example, looking for 'pt-BR',
'pt_br', or even 'PT_BR' will get you the 'pt' (Portuguese) list.
Looking up the alternate code 'por' will also get the same list.
| 6.917068 | 5.336514 | 1.296177 |
freqs = {}
pack = get_frequency_list(lang, wordlist, match_cutoff)
for index, bucket in enumerate(pack):
freq = cB_to_freq(-index)
for word in bucket:
freqs[word] = freq
return freqs
|
def get_frequency_dict(lang, wordlist='best', match_cutoff=30)
|
Get a word frequency list as a dictionary, mapping tokens to
frequencies as floating-point probabilities.
| 5.59791 | 5.57875 | 1.003435 |
args = (word, lang, wordlist, minimum)
try:
return _wf_cache[args]
except KeyError:
if len(_wf_cache) >= CACHE_SIZE:
_wf_cache.clear()
_wf_cache[args] = _word_frequency(*args)
return _wf_cache[args]
|
def word_frequency(word, lang, wordlist='best', minimum=0.)
|
Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity.
| 2.214803 | 2.615117 | 0.846923 |
freq_min = zipf_to_freq(minimum)
freq = word_frequency(word, lang, wordlist, freq_min)
return round(freq_to_zipf(freq), 2)
|
def zipf_frequency(word, lang, wordlist='best', minimum=0.)
|
Get the frequency of `word`, in the language with code `lang`, on the Zipf
scale.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
Zipf values for reasonable words are between 0 and 8. The value this
function returns will always be at last as large as `minimum`, even for a
word that never appears. The default minimum is 0, representing words
that appear once per billion words or less.
wordfreq internally quantizes its frequencies to centibels, which are
1/100 of a Zipf unit. The output of `zipf_frequency` will be rounded to
the nearest hundredth to match this quantization.
| 3.977384 | 5.505508 | 0.722437 |
results = []
for word in iter_wordlist(lang, wordlist):
if (not ascii_only) or max(word) <= '~':
results.append(word)
if len(results) >= n:
break
return results
|
def top_n_list(lang, n, wordlist='best', ascii_only=False)
|
Return a frequency list of length `n` in descending order of frequency.
This list contains words from `wordlist`, of the given language.
If `ascii_only`, then only ascii words are considered.
| 3.610487 | 3.694591 | 0.977236 |
n_choices = 2 ** bits_per_word
choices = top_n_list(lang, n_choices, wordlist, ascii_only=ascii_only)
if len(choices) < n_choices:
raise ValueError(
"There aren't enough words in the wordlist to provide %d bits of "
"entropy per word." % bits_per_word
)
return ' '.join([random.choice(choices) for i in range(nwords)])
|
def random_words(lang='en', wordlist='best', nwords=5, bits_per_word=12,
ascii_only=False)
|
Returns a string of random, space separated words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
You can restrict the selection of words to those written in ASCII
characters by setting `ascii_only` to True.
| 3.293526 | 3.210435 | 1.025882 |
return random_words(lang, wordlist, nwords, bits_per_word, ascii_only=True)
|
def random_ascii_words(lang='en', wordlist='best', nwords=5,
bits_per_word=12)
|
Returns a string of random, space separated, ASCII words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
| 2.792948 | 4.850792 | 0.575771 |
global jieba_tokenizer, jieba_orig_tokenizer
if external_wordlist:
if jieba_orig_tokenizer is None:
jieba_orig_tokenizer = jieba.Tokenizer(dictionary=ORIG_DICT_FILENAME)
return jieba_orig_tokenizer.lcut(text)
else:
if jieba_tokenizer is None:
jieba_tokenizer = jieba.Tokenizer(dictionary=DICT_FILENAME)
# Tokenize the Simplified Chinese version of the text, but return
# those spans from the original text, even if it's in Traditional
# Chinese
tokens = []
for _token, start, end in jieba_tokenizer.tokenize(simplify_chinese(text), HMM=False):
tokens.append(text[start:end])
return tokens
|
def jieba_tokenize(text, external_wordlist=False)
|
Tokenize the given text into tokens whose word frequencies can probably
be looked up. This uses Jieba, a word-frequency-based tokenizer.
If `external_wordlist` is False, we tell Jieba to default to using
wordfreq's own Chinese wordlist, and not to infer unknown words using a
hidden Markov model. This ensures that the multi-character tokens that it
outputs will be ones whose word frequencies we can look up.
If `external_wordlist` is True, this will use the largest version of
Jieba's original dictionary, with HMM enabled, so its results will be
independent of the data in wordfreq. These results will be better optimized
for purposes that aren't looking up word frequencies, such as general-
purpose tokenization, or collecting word frequencies in the first place.
| 3.78261 | 3.967599 | 0.953375 |
text = unicodedata.normalize('NFC', text).replace('İ', 'i').replace('I', 'ı')
return text.casefold()
|
def casefold_with_i_dots(text)
|
Convert capital I's and capital dotted İ's to lowercase in the way
that's appropriate for Turkish and related languages, then case-fold
the rest of the letters.
| 3.642746 | 3.659752 | 0.995353 |
matched = best_match(language, targets, min_score=min_score)
return matched[1] > 0
|
def _language_in_list(language, targets, min_score=80)
|
A helper function to determine whether this language matches one of the
target languages, with a match score above a certain threshold.
The languages can be given as strings (language tags) or as Language
objects. `targets` can be any iterable of such languages.
| 6.189904 | 9.854833 | 0.628108 |
suggested_pkg = names[0]
paths = [
os.path.expanduser('~/.local/lib/mecab/dic'),
'/var/lib/mecab/dic',
'/var/local/lib/mecab/dic',
'/usr/lib/mecab/dic',
'/usr/local/lib/mecab/dic',
'/usr/lib/x86_64-linux-gnu/mecab/dic',
]
full_paths = [os.path.join(path, name) for path in paths for name in names]
checked_paths = [path for path in full_paths if len(path) <= MAX_PATH_LENGTH]
for path in checked_paths:
if os.path.exists(path):
return path
error_lines = [
"Couldn't find the MeCab dictionary named %r." % suggested_pkg,
"You should download or use your system's package manager to install",
"the %r package." % suggested_pkg,
"",
"We looked in the following locations:"
] + ["\t%s" % path for path in checked_paths]
skipped_paths = [path for path in full_paths if len(path) > MAX_PATH_LENGTH]
if skipped_paths:
error_lines += [
"We had to skip these paths that are too long for MeCab to find:",
] + ["\t%s" % path for path in skipped_paths]
raise OSError('\n'.join(error_lines))
|
def find_mecab_dictionary(names)
|
Find a MeCab dictionary with a given name. The dictionary has to be
installed separately -- see wordfreq's README for instructions.
| 2.539567 | 2.499767 | 1.015922 |
if lang not in MECAB_DICTIONARY_NAMES:
raise ValueError("Can't run MeCab on language %r" % lang)
if lang not in MECAB_ANALYZERS:
MECAB_ANALYZERS[lang] = make_mecab_analyzer(MECAB_DICTIONARY_NAMES[lang])
analyzer = MECAB_ANALYZERS[lang]
text = unicodedata.normalize('NFKC', text.strip())
analyzed = analyzer.parse(text)
if not analyzed:
return []
return [line.split('\t')[0]
for line in analyzed.split('\n')
if line != '' and line != 'EOS']
|
def mecab_tokenize(text, lang)
|
Use the mecab-python3 package to tokenize the given text. The `lang`
must be 'ja' for Japanese or 'ko' for Korean.
The simplest output from mecab-python3 is the single-string form, which
contains the same table that the command-line version of MeCab would output.
We find the tokens in the first column of this table.
| 2.761717 | 2.972903 | 0.928963 |
if table == 'sr-Latn':
return text.translate(SR_LATN_TABLE)
elif table == 'az-Latn':
return text.translate(AZ_LATN_TABLE)
else:
raise ValueError("Unknown transliteration table: {!r}".format(table))
|
def transliterate(table, text)
|
Transliterate text according to one of the tables above.
`table` chooses the table. It looks like a language code but comes from a
very restricted set:
- 'sr-Latn' means to convert Serbian, which may be in Cyrillic, into the
Latin alphabet.
- 'az-Latn' means the same for Azerbaijani Cyrillic to Latn.
| 2.682949 | 2.277372 | 1.17809 |
'''Set the current exit code based on the Statistics.'''
for error_type in statistics.errors:
exit_code = app.ERROR_CODE_MAP.get(error_type)
if exit_code:
app.update_exit_code(exit_code)
|
def _update_exit_code_from_stats(cls, statistics: Statistics,
app: Application)
|
Set the current exit code based on the Statistics.
| 5.04246 | 3.528266 | 1.429161 |
'''Return whether the document is likely to be a Sitemap.'''
if response.body:
if cls.is_file(response.body):
return True
|
def is_response(cls, response)
|
Return whether the document is likely to be a Sitemap.
| 12.576195 | 5.007771 | 2.511336 |
'''Return whether the file is likely a Sitemap.'''
peeked_data = wpull.util.peek_file(file)
if is_gzip(peeked_data):
try:
peeked_data = wpull.decompression.gzip_uncompress(
peeked_data, truncated=True
)
except zlib.error:
pass
peeked_data = wpull.string.printable_bytes(peeked_data)
if b'<?xml' in peeked_data \
and (b'<sitemapindex' in peeked_data or b'<urlset' in peeked_data):
return True
|
def is_file(cls, file)
|
Return whether the file is likely a Sitemap.
| 4.923816 | 4.01192 | 1.227296 |
'''Parse and return a URLInfo.
This function logs a warning if the URL cannot be parsed and returns
None.
'''
try:
url_info = URLInfo.parse(url, encoding=encoding)
except ValueError as error:
_logger.warning(__(
_('Unable to parse URL ‘{url}’: {error}.'),
url=wpull.string.printable_str(url), error=error))
else:
return url_info
|
def parse_url_or_log(url, encoding='utf-8')
|
Parse and return a URLInfo.
This function logs a warning if the URL cannot be parsed and returns
None.
| 5.919345 | 4.03636 | 1.466506 |
'''Normalizes a hostname so that it is ASCII and valid domain name.'''
try:
new_hostname = hostname.encode('idna').decode('ascii').lower()
except UnicodeError as error:
raise UnicodeError('Hostname {} rejected: {}'.format(hostname, error)) from error
if hostname != new_hostname:
# Check for round-trip. May raise UnicodeError
new_hostname.encode('idna')
return new_hostname
|
def normalize_hostname(hostname)
|
Normalizes a hostname so that it is ASCII and valid domain name.
| 4.892896 | 4.225237 | 1.158017 |
'''Normalize a path string.
Flattens a path by removing dot parts,
percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
if not path.startswith('/'):
path = '/' + path
path = percent_encode(flatten_path(path, flatten_slashes=True), encoding=encoding)
return uppercase_percent_encoding(path)
|
def normalize_path(path, encoding='utf-8')
|
Normalize a path string.
Flattens a path by removing dot parts,
percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
| 8.786017 | 3.219705 | 2.728827 |
'''Normalize a query string.
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode_plus(text, encoding=encoding)
return uppercase_percent_encoding(path)
|
def normalize_query(text, encoding='utf-8')
|
Normalize a query string.
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
| 11.021081 | 4.553272 | 2.420475 |
'''Normalize a fragment.
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode(text, encoding=encoding, encode_set=FRAGMENT_ENCODE_SET)
return uppercase_percent_encoding(path)
|
def normalize_fragment(text, encoding='utf-8')
|
Normalize a fragment.
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
| 10.680976 | 4.665184 | 2.289508 |
'''Normalize a username
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode(text, encoding=encoding, encode_set=USERNAME_ENCODE_SET)
return uppercase_percent_encoding(path)
|
def normalize_username(text, encoding='utf-8')
|
Normalize a username
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
| 11.180542 | 5.325304 | 2.099513 |
'''Normalize a password
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode(text, encoding=encoding, encode_set=PASSWORD_ENCODE_SET)
return uppercase_percent_encoding(path)
|
def normalize_password(text, encoding='utf-8')
|
Normalize a password
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
| 12.06625 | 5.753571 | 2.097176 |
'''Percent encode text.
Unlike Python's ``quote``, this function accepts a blacklist instead of
a whitelist of safe characters.
'''
byte_string = text.encode(encoding)
try:
mapping = _percent_encoder_map_cache[encode_set]
except KeyError:
mapping = _percent_encoder_map_cache[encode_set] = PercentEncoderMap(
encode_set).__getitem__
return ''.join([mapping(char) for char in byte_string])
|
def percent_encode(text, encode_set=DEFAULT_ENCODE_SET, encoding='utf-8')
|
Percent encode text.
Unlike Python's ``quote``, this function accepts a blacklist instead of
a whitelist of safe characters.
| 4.822957 | 3.020801 | 1.596582 |
'''Percent encode text for query strings.
Unlike Python's ``quote_plus``, this function accepts a blacklist instead
of a whitelist of safe characters.
'''
if ' ' not in text:
return percent_encode(text, encode_set, encoding)
else:
result = percent_encode(text, encode_set, encoding)
return result.replace(' ', '+')
|
def percent_encode_plus(text, encode_set=QUERY_ENCODE_SET,
encoding='utf-8')
|
Percent encode text for query strings.
Unlike Python's ``quote_plus``, this function accepts a blacklist instead
of a whitelist of safe characters.
| 4.605263 | 2.479007 | 1.857705 |
'''Return whether URL schemes are similar.
This function considers the following schemes to be similar:
* HTTP and HTTPS
'''
if scheme1 == scheme2:
return True
if scheme1 in ('http', 'https') and scheme2 in ('http', 'https'):
return True
return False
|
def schemes_similar(scheme1, scheme2)
|
Return whether URL schemes are similar.
This function considers the following schemes to be similar:
* HTTP and HTTPS
| 3.470663 | 2.38799 | 1.453383 |
'''Return whether the a path is a subpath of another.
Args:
base_path: The base path
test_path: The path which we are testing
trailing_slash: If True, the trailing slash is treated with importance.
For example, ``/images/`` is a directory while ``/images`` is a
file.
wildcards: If True, globbing wildcards are matched against paths
'''
if trailing_slash:
base_path = base_path.rsplit('/', 1)[0] + '/'
test_path = test_path.rsplit('/', 1)[0] + '/'
else:
if not base_path.endswith('/'):
base_path += '/'
if not test_path.endswith('/'):
test_path += '/'
if wildcards:
return fnmatch.fnmatchcase(test_path, base_path)
else:
return test_path.startswith(base_path)
|
def is_subdir(base_path, test_path, trailing_slash=False, wildcards=False)
|
Return whether the a path is a subpath of another.
Args:
base_path: The base path
test_path: The path which we are testing
trailing_slash: If True, the trailing slash is treated with importance.
For example, ``/images/`` is a directory while ``/images`` is a
file.
wildcards: If True, globbing wildcards are matched against paths
| 2.735394 | 1.487866 | 1.838469 |
'''Uppercases percent-encoded sequences.'''
if '%' not in text:
return text
return re.sub(
r'%[a-f0-9][a-f0-9]',
lambda match: match.group(0).upper(),
text)
|
def uppercase_percent_encoding(text)
|
Uppercases percent-encoded sequences.
| 3.48162 | 3.520421 | 0.988979 |
'''Split the query string.
Note for empty values: If an equal sign (``=``) is present, the value
will be an empty string (``''``). Otherwise, the value will be ``None``::
>>> list(split_query('a=&b', keep_blank_values=True))
[('a', ''), ('b', None)]
No processing is done on the actual values.
'''
items = []
for pair in qs.split('&'):
name, delim, value = pair.partition('=')
if not delim and keep_blank_values:
value = None
if keep_blank_values or value:
items.append((name, value))
return items
|
def split_query(qs, keep_blank_values=False)
|
Split the query string.
Note for empty values: If an equal sign (``=``) is present, the value
will be an empty string (``''``). Otherwise, the value will be ``None``::
>>> list(split_query('a=&b', keep_blank_values=True))
[('a', ''), ('b', None)]
No processing is done on the actual values.
| 3.60736 | 1.802721 | 2.001064 |
'''Return a key-values mapping from a query string.
Plus symbols are replaced with spaces.
'''
dict_obj = {}
for key, value in split_query(text, True):
if key not in dict_obj:
dict_obj[key] = []
if value:
dict_obj[key].append(value.replace('+', ' '))
else:
dict_obj[key].append('')
return query_to_map(text)
|
def query_to_map(text)
|
Return a key-values mapping from a query string.
Plus symbols are replaced with spaces.
| 4.363529 | 2.768111 | 1.576356 |
'''Join URLs like ``urllib.parse.urljoin`` but allow scheme-relative URL.'''
if url.startswith('//') and len(url) > 2:
scheme = base_url.partition(':')[0]
if scheme:
return urllib.parse.urljoin(
base_url,
'{0}:{1}'.format(scheme, url),
allow_fragments=allow_fragments
)
return urllib.parse.urljoin(
base_url, url, allow_fragments=allow_fragments)
|
def urljoin(base_url, url, allow_fragments=True)
|
Join URLs like ``urllib.parse.urljoin`` but allow scheme-relative URL.
| 3.35728 | 2.662867 | 1.260776 |
'''Flatten an absolute URL path by removing the dot segments.
:func:`urllib.parse.urljoin` has some support for removing dot segments,
but it is conservative and only removes them as needed.
Arguments:
path (str): The URL path.
flatten_slashes (bool): If True, consecutive slashes are removed.
The path returned will always have a leading slash.
'''
# Based on posixpath.normpath
# Fast path
if not path or path == '/':
return '/'
# Take off leading slash
if path[0] == '/':
path = path[1:]
parts = path.split('/')
new_parts = collections.deque()
for part in parts:
if part == '.' or (flatten_slashes and not part):
continue
elif part != '..':
new_parts.append(part)
elif new_parts:
new_parts.pop()
# If the filename is empty string
if flatten_slashes and path.endswith('/') or not len(new_parts):
new_parts.append('')
# Put back leading slash
new_parts.appendleft('')
return '/'.join(new_parts)
|
def flatten_path(path, flatten_slashes=False)
|
Flatten an absolute URL path by removing the dot segments.
:func:`urllib.parse.urljoin` has some support for removing dot segments,
but it is conservative and only removes them as needed.
Arguments:
path (str): The URL path.
flatten_slashes (bool): If True, consecutive slashes are removed.
The path returned will always have a leading slash.
| 3.989214 | 2.418833 | 1.649231 |
'''Parse a URL and return a URLInfo.'''
if url is None:
return None
url = url.strip()
if frozenset(url) & C0_CONTROL_SET:
raise ValueError('URL contains control codes: {}'.format(ascii(url)))
scheme, sep, remaining = url.partition(':')
if not scheme:
raise ValueError('URL missing scheme: {}'.format(ascii(url)))
scheme = scheme.lower()
if not sep and default_scheme:
# Likely something like example.com/mystuff
remaining = url
scheme = default_scheme
elif not sep:
raise ValueError('URI missing colon: {}'.format(ascii(url)))
if default_scheme and '.' in scheme or scheme == 'localhost':
# Maybe something like example.com:8080/mystuff or
# maybe localhost:8080/mystuff
remaining = '{}:{}'.format(scheme, remaining)
scheme = default_scheme
info = URLInfo()
info.encoding = encoding
if scheme not in RELATIVE_SCHEME_DEFAULT_PORTS:
info.raw = url
info.scheme = scheme
info.path = remaining
return info
if remaining.startswith('//'):
remaining = remaining[2:]
path_index = remaining.find('/')
query_index = remaining.find('?')
fragment_index = remaining.find('#')
try:
index_tuple = (path_index, query_index, fragment_index)
authority_index = min(num for num in index_tuple if num >= 0)
except ValueError:
authority_index = len(remaining)
authority = remaining[:authority_index]
resource = remaining[authority_index:]
try:
index_tuple = (query_index, fragment_index)
path_index = min(num for num in index_tuple if num >= 0)
except ValueError:
path_index = len(remaining)
path = remaining[authority_index + 1:path_index] or '/'
if fragment_index >= 0:
query_index = fragment_index
else:
query_index = len(remaining)
query = remaining[path_index + 1:query_index]
fragment = remaining[query_index + 1:]
userinfo, host = cls.parse_authority(authority)
hostname, port = cls.parse_host(host)
username, password = cls.parse_userinfo(userinfo)
if not hostname:
raise ValueError('Hostname is empty: {}'.format(ascii(url)))
info.raw = url
info.scheme = scheme
info.authority = authority
info.path = normalize_path(path, encoding=encoding)
info.query = normalize_query(query, encoding=encoding)
info.fragment = normalize_fragment(fragment, encoding=encoding)
info.userinfo = userinfo
info.username = percent_decode(username, encoding=encoding)
info.password = percent_decode(password, encoding=encoding)
info.host = host
info.hostname = hostname
info.port = port or RELATIVE_SCHEME_DEFAULT_PORTS[scheme]
info.resource = resource
return info
|
def parse(cls, url, default_scheme='http', encoding='utf-8')
|
Parse a URL and return a URLInfo.
| 2.533142 | 2.52003 | 1.005203 |
'''Parse the authority part and return userinfo and host.'''
userinfo, sep, host = authority.partition('@')
if not sep:
return '', userinfo
else:
return userinfo, host
|
def parse_authority(cls, authority)
|
Parse the authority part and return userinfo and host.
| 6.191607 | 3.902547 | 1.586556 |
'''Parse the userinfo and return username and password.'''
username, sep, password = userinfo.partition(':')
return username, password
|
def parse_userinfo(cls, userinfo)
|
Parse the userinfo and return username and password.
| 7.264726 | 4.846736 | 1.49889 |
'''Parse the host and return hostname and port.'''
if host.endswith(']'):
return cls.parse_hostname(host), None
else:
hostname, sep, port = host.rpartition(':')
if sep:
port = int(port)
if port < 0 or port > 65535:
raise ValueError('Port number invalid')
else:
hostname = port
port = None
return cls.parse_hostname(hostname), port
|
def parse_host(cls, host)
|
Parse the host and return hostname and port.
| 3.219844 | 2.937485 | 1.096123 |
'''Parse the hostname and normalize.'''
if hostname.startswith('['):
return cls.parse_ipv6_hostname(hostname)
else:
try:
new_hostname = normalize_ipv4_address(hostname)
except ValueError:
# _logger.debug('', exc_info=True)
new_hostname = hostname
new_hostname = normalize_hostname(new_hostname)
if any(char in new_hostname for char in FORBIDDEN_HOSTNAME_CHARS):
raise ValueError('Invalid hostname: {}'
.format(ascii(hostname)))
return new_hostname
|
def parse_hostname(cls, hostname)
|
Parse the hostname and normalize.
| 3.88736 | 3.689328 | 1.053677 |
'''Parse and normalize a IPv6 address.'''
if not hostname.startswith('[') or not hostname.endswith(']'):
raise ValueError('Invalid IPv6 address: {}'
.format(ascii(hostname)))
hostname = ipaddress.IPv6Address(hostname[1:-1]).compressed
return hostname
|
def parse_ipv6_hostname(cls, hostname)
|
Parse and normalize a IPv6 address.
| 4.768398 | 4.137809 | 1.152397 |
'''Return a dict of the attributes.'''
return dict(
raw=self.raw,
scheme=self.scheme,
authority=self.authority,
netloc=self.authority,
path=self.path,
query=self.query,
fragment=self.fragment,
userinfo=self.userinfo,
username=self.username,
password=self.password,
host=self.host,
hostname=self.hostname,
port=self.port,
resource=self.resource,
url=self.url,
encoding=self.encoding,
)
|
def to_dict(self)
|
Return a dict of the attributes.
| 2.425935 | 2.227289 | 1.089187 |
'''Return whether the URL is using the default port.'''
if self.scheme in RELATIVE_SCHEME_DEFAULT_PORTS:
return RELATIVE_SCHEME_DEFAULT_PORTS[self.scheme] == self.port
|
def is_port_default(self)
|
Return whether the URL is using the default port.
| 6.418642 | 4.641787 | 1.382795 |
'''Return the host portion but omit default port if needed.'''
default_port = RELATIVE_SCHEME_DEFAULT_PORTS.get(self.scheme)
if not default_port:
return ''
assert '[' not in self.hostname
assert ']' not in self.hostname
if self.is_ipv6():
hostname = '[{}]'.format(self.hostname)
else:
hostname = self.hostname
if default_port != self.port:
return '{}:{}'.format(hostname, self.port)
else:
return hostname
|
def hostname_with_port(self)
|
Return the host portion but omit default port if needed.
| 3.731959 | 2.803531 | 1.331164 |
'''Return new empty URLRecord.'''
url_record = URLRecord()
url_record.url = request.url_info.url
url_record.status = Status.in_progress
url_record.try_count = 0
url_record.level = 0
return url_record
|
def _new_url_record(cls, request: Request) -> URLRecord
|
Return new empty URLRecord.
| 4.406719 | 3.942846 | 1.117649 |
'''Request callback handler.'''
self._item_session = self._new_item_session(request)
self._item_session.request = request
if self._cookie_jar:
self._cookie_jar.add_cookie_header(request)
verdict, reason = self._fetch_rule.check_subsequent_web_request(self._item_session)
self._file_writer_session.process_request(request)
if verdict:
_logger.info(__(
_('Fetching ‘{url}’.'),
url=request.url_info.url
))
return verdict
|
def _client_request_callback(self, request: Request)
|
Request callback handler.
| 7.757873 | 7.380856 | 1.05108 |
'''Pre-response callback handler.'''
self._item_session.response = response
if self._cookie_jar:
self._cookie_jar.extract_cookies(response, self._item_session.request)
action = self._result_rule.handle_pre_response(self._item_session)
self._file_writer_session.process_response(response)
return action == Actions.NORMAL
|
def _server_begin_response_callback(self, response: Response)
|
Pre-response callback handler.
| 8.527858 | 7.010046 | 1.216519 |
'''Response callback handler.'''
request = self._item_session.request
response = self._item_session.response
_logger.info(__(
_('Fetched ‘{url}’: {status_code} {reason}. '
'Length: {content_length} [{content_type}].'),
url=request.url,
status_code=response.status_code,
reason=wpull.string.printable_str(response.reason),
content_length=wpull.string.printable_str(
response.fields.get('Content-Length', _('none'))),
content_type=wpull.string.printable_str(
response.fields.get('Content-Type', _('none'))),
))
self._result_rule.handle_response(self._item_session)
if response.status_code in WebProcessor.DOCUMENT_STATUS_CODES:
filename = self._file_writer_session.save_document(response)
self._processing_rule.scrape_document(self._item_session)
self._result_rule.handle_document(self._item_session, filename)
elif response.status_code in WebProcessor.NO_DOCUMENT_STATUS_CODES:
self._file_writer_session.discard_document(response)
self._result_rule.handle_no_document(self._item_session)
else:
self._file_writer_session.discard_document(response)
self._result_rule.handle_document_error(self._item_session)
|
def _server_end_response_callback(self, respoonse: Response)
|
Response callback handler.
| 3.282732 | 3.214109 | 1.02135 |
'''Create streams and commander.
Coroutine.
'''
assert not self._control_connection
self._control_connection = yield from self._acquire_request_connection(self._request)
self._control_stream = ControlStream(self._control_connection)
self._commander = Commander(self._control_stream)
read_callback = functools.partial(self.event_dispatcher.notify, self.Event.control_receive_data)
self._control_stream.data_event_dispatcher.add_read_listener(read_callback)
write_callback = functools.partial(self.event_dispatcher.notify, self.Event.control_send_data)
self._control_stream.data_event_dispatcher.add_write_listener(write_callback)
|
def _init_stream(self)
|
Create streams and commander.
Coroutine.
| 3.844867 | 3.110008 | 1.236289 |
'''Connect and login.
Coroutine.
'''
username = self._request.url_info.username or self._request.username or 'anonymous'
password = self._request.url_info.password or self._request.password or '-wpull@'
cached_login = self._login_table.get(self._control_connection)
if cached_login and cached_login == (username, password):
_logger.debug('Reusing existing login.')
return
try:
yield from self._commander.login(username, password)
except FTPServerError as error:
raise AuthenticationError('Login error: {}'.format(error)) \
from error
self._login_table[self._control_connection] = (username, password)
|
def _log_in(self)
|
Connect and login.
Coroutine.
| 5.385537 | 4.586727 | 1.174157 |
'''Start a file or directory listing download.
Args:
request: Request.
Returns:
A Response populated with the initial data connection reply.
Once the response is received, call :meth:`download`.
Coroutine.
'''
if self._session_state != SessionState.ready:
raise RuntimeError('Session not ready')
response = Response()
yield from self._prepare_fetch(request, response)
response.file_transfer_size = yield from self._fetch_size(request)
if request.restart_value:
try:
yield from self._commander.restart(request.restart_value)
response.restart_value = request.restart_value
except FTPServerError:
_logger.debug('Could not restart file.', exc_info=1)
yield from self._open_data_stream()
command = Command('RETR', request.file_path)
yield from self._begin_stream(command)
self._session_state = SessionState.file_request_sent
return response
|
def start(self, request: Request) -> Response
|
Start a file or directory listing download.
Args:
request: Request.
Returns:
A Response populated with the initial data connection reply.
Once the response is received, call :meth:`download`.
Coroutine.
| 6.579813 | 3.890371 | 1.691307 |
'''Fetch a file listing.
Args:
request: Request.
Returns:
A listing response populated with the initial data connection
reply.
Once the response is received, call :meth:`download_listing`.
Coroutine.
'''
if self._session_state != SessionState.ready:
raise RuntimeError('Session not ready')
response = ListingResponse()
yield from self._prepare_fetch(request, response)
yield from self._open_data_stream()
mlsd_command = Command('MLSD', self._request.file_path)
list_command = Command('LIST', self._request.file_path)
try:
yield from self._begin_stream(mlsd_command)
self._listing_type = 'mlsd'
except FTPServerError as error:
if error.reply_code in (ReplyCodes.syntax_error_command_unrecognized,
ReplyCodes.command_not_implemented):
self._listing_type = None
else:
raise
if not self._listing_type:
# This code not in exception handler to avoid incorrect
# exception chaining
yield from self._begin_stream(list_command)
self._listing_type = 'list'
_logger.debug('Listing type is %s', self._listing_type)
self._session_state = SessionState.directory_request_sent
return response
|
def start_listing(self, request: Request) -> ListingResponse
|
Fetch a file listing.
Args:
request: Request.
Returns:
A listing response populated with the initial data connection
reply.
Once the response is received, call :meth:`download_listing`.
Coroutine.
| 5.424972 | 3.698614 | 1.466758 |
'''Prepare for a fetch.
Coroutine.
'''
self._request = request
self._response = response
yield from self._init_stream()
connection_closed = self._control_connection.closed()
if connection_closed:
self._login_table.pop(self._control_connection, None)
yield from self._control_stream.reconnect()
request.address = self._control_connection.address
connection_reused = not connection_closed
self.event_dispatcher.notify(self.Event.begin_control, request, connection_reused=connection_reused)
if connection_closed:
yield from self._commander.read_welcome_message()
yield from self._log_in()
self._response.request = request
|
def _prepare_fetch(self, request: Request, response: Response)
|
Prepare for a fetch.
Coroutine.
| 6.585885 | 5.751999 | 1.144973 |
'''Start data stream transfer.'''
begin_reply = yield from self._commander.begin_stream(command)
self._response.reply = begin_reply
self.event_dispatcher.notify(self.Event.begin_transfer, self._response)
|
def _begin_stream(self, command: Command)
|
Start data stream transfer.
| 13.003174 | 10.076858 | 1.2904 |
'''Read the response content into file.
Args:
file: A file object or asyncio stream.
rewind: Seek the given file back to its original offset after
reading is finished.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated with the final data connection reply.
Be sure to call :meth:`start` first.
Coroutine.
'''
if self._session_state != SessionState.file_request_sent:
raise RuntimeError('File request not sent')
if rewind and file and hasattr(file, 'seek'):
original_offset = file.tell()
else:
original_offset = None
if not hasattr(file, 'drain'):
self._response.body = file
if not isinstance(file, Body):
self._response.body = Body(file)
read_future = self._commander.read_stream(file, self._data_stream)
try:
reply = yield from \
asyncio.wait_for(read_future, timeout=duration_timeout)
except asyncio.TimeoutError as error:
raise DurationTimeout(
'Did not finish reading after {} seconds.'
.format(duration_timeout)
) from error
self._response.reply = reply
if original_offset is not None:
file.seek(original_offset)
self.event_dispatcher.notify(self.Event.end_transfer, self._response)
self._session_state = SessionState.response_received
return self._response
|
def download(self, file: Optional[IO]=None, rewind: bool=True,
duration_timeout: Optional[float]=None) -> Response
|
Read the response content into file.
Args:
file: A file object or asyncio stream.
rewind: Seek the given file back to its original offset after
reading is finished.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated with the final data connection reply.
Be sure to call :meth:`start` first.
Coroutine.
| 4.987805 | 2.777398 | 1.795855 |
'''Read file listings.
Args:
file: A file object or asyncio stream.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated the file listings
Be sure to call :meth:`start_file_listing` first.
Coroutine.
'''
if self._session_state != SessionState.directory_request_sent:
raise RuntimeError('File request not sent')
self._session_state = SessionState.file_request_sent
yield from self.download(file=file, rewind=False,
duration_timeout=duration_timeout)
try:
if self._response.body.tell() == 0:
listings = ()
elif self._listing_type == 'mlsd':
self._response.body.seek(0)
machine_listings = wpull.protocol.ftp.util.parse_machine_listing(
self._response.body.read().decode('utf-8',
errors='surrogateescape'),
convert=True, strict=False
)
listings = list(
wpull.protocol.ftp.util.machine_listings_to_file_entries(
machine_listings
))
else:
self._response.body.seek(0)
file = io.TextIOWrapper(self._response.body, encoding='utf-8',
errors='surrogateescape')
listing_parser = ListingParser(file=file)
listings = list(listing_parser.parse_input())
_logger.debug('Listing detected as %s', listing_parser.type)
# We don't want the file to be closed when exiting this function
file.detach()
except (ListingError, ValueError) as error:
raise ProtocolError(*error.args) from error
self._response.files = listings
self._response.body.seek(0)
self._session_state = SessionState.response_received
return self._response
|
def download_listing(self, file: Optional[IO],
duration_timeout: Optional[float]=None) -> \
ListingResponse
|
Read file listings.
Args:
file: A file object or asyncio stream.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated the file listings
Be sure to call :meth:`start_file_listing` first.
Coroutine.
| 4.515748 | 3.230286 | 1.397941 |
'''Open the data stream connection.
Coroutine.
'''
@asyncio.coroutine
def connection_factory(address: Tuple[int, int]):
self._data_connection = yield from self._acquire_connection(address[0], address[1])
return self._data_connection
self._data_stream = yield from self._commander.setup_data_stream(
connection_factory
)
self._response.data_address = self._data_connection.address
read_callback = functools.partial(self.event_dispatcher.notify, self.Event.transfer_receive_data)
self._data_stream.data_event_dispatcher.add_read_listener(read_callback)
write_callback = functools.partial(self.event_dispatcher.notify, self.Event.transfer_send_data)
self._data_stream.data_event_dispatcher.add_write_listener(write_callback)
|
def _open_data_stream(self)
|
Open the data stream connection.
Coroutine.
| 3.790362 | 3.504998 | 1.081416 |
'''Return size of file.
Coroutine.
'''
try:
size = yield from self._commander.size(request.file_path)
return size
except FTPServerError:
return
|
def _fetch_size(self, request: Request) -> int
|
Return size of file.
Coroutine.
| 10.999022 | 7.067925 | 1.556188 |
'''Parses text for HTTP Refresh URL.
Returns:
str, None
'''
match = re.search(r'url\s*=(.+)', text, re.IGNORECASE)
if match:
url = match.group(1)
if url.startswith('"'):
url = url.strip('"')
elif url.startswith("'"):
url = url.strip("'")
return clean_link_soup(url)
|
def parse_refresh(text)
|
Parses text for HTTP Refresh URL.
Returns:
str, None
| 3.863269 | 2.928662 | 1.319124 |
'''urljoin with warning log on error.
Returns:
str, None'''
try:
return wpull.url.urljoin(
base_url, url, allow_fragments=allow_fragments
)
except ValueError as error:
_logger.warning(__(
_('Unable to parse URL ‘{url}’: {error}.'),
url=url, error=error
))
|
def urljoin_safe(base_url, url, allow_fragments=True)
|
urljoin with warning log on error.
Returns:
str, None
| 7.255051 | 5.119178 | 1.41723 |
'''Return whether the link is likely to be inline.'''
file_type = mimetypes.guess_type(link, strict=False)[0]
if file_type:
top_level_type, subtype = file_type.split('/', 1)
return top_level_type in ('image', 'video', 'audio') or subtype == 'javascript'
|
def is_likely_inline(link)
|
Return whether the link is likely to be inline.
| 3.563092 | 3.539924 | 1.006545 |
'''Return whether the text is likely to be a link.
This function assumes that leading/trailing whitespace has already been
removed.
Returns:
bool
'''
text = text.lower()
# Check for absolute or relative URLs
if (
text.startswith('http://')
or text.startswith('https://')
or text.startswith('ftp://')
or text.startswith('/')
or text.startswith('//')
or text.endswith('/')
or text.startswith('../')
):
return True
# Check if it has a alphanumeric file extension and not a decimal number
dummy, dot, file_extension = text.rpartition('.')
if dot and file_extension and len(file_extension) <= 4:
file_extension_set = frozenset(file_extension)
if file_extension_set \
and file_extension_set <= ALPHANUMERIC_CHARS \
and not file_extension_set <= NUMERIC_CHARS:
if file_extension in COMMON_TLD:
return False
file_type = mimetypes.guess_type(text, strict=False)[0]
if file_type:
return True
else:
return False
|
def is_likely_link(text)
|
Return whether the text is likely to be a link.
This function assumes that leading/trailing whitespace has already been
removed.
Returns:
bool
| 3.425267 | 3.094922 | 1.106738 |
'''Return whether the text is likely to cause false positives.
This function assumes that leading/trailing whitespace has already been
removed.
Returns:
bool
'''
# Check for string concatenation in JavaScript
if text[:1] in ',;+:' or text[-1:] in '.,;+:':
return True
# Check for unusual characters
if re.search(r'''[\\$()'"[\]{}|<>`]''', text):
return True
if text[:1] == '.' \
and not text.startswith('./') \
and not text.startswith('../'):
return True
if text in ('/', '//'):
return True
if '//' in text and '://' not in text and not text.startswith('//'):
return True
# Forbid strings like mimetypes
if text in MIMETYPES:
return True
tag_1, dummy, tag_2 = text.partition('.')
if tag_1 in HTML_TAGS and tag_2 != 'html':
return True
# Forbid things where the first part of the path looks like a domain name
if FIRST_PART_TLD_PATTERN.match(text):
return True
|
def is_unlikely_link(text)
|
Return whether the text is likely to cause false positives.
This function assumes that leading/trailing whitespace has already been
removed.
Returns:
bool
| 5.712247 | 4.824005 | 1.18413 |
'''Return link type guessed by filename extension.
Returns:
str: A value from :class:`.item.LinkType`.
'''
mime_type = mimetypes.guess_type(filename)[0]
if not mime_type:
return
if mime_type == 'text/css':
return LinkType.css
elif mime_type == 'application/javascript':
return LinkType.javascript
elif mime_type == 'text/html' or mime_type.endswith('xml'):
return LinkType.html
elif mime_type.startswith('video') or \
mime_type.startswith('image') or \
mime_type.startswith('audio') or \
mime_type.endswith('shockwave-flash'):
return LinkType.media
|
def identify_link_type(filename)
|
Return link type guessed by filename extension.
Returns:
str: A value from :class:`.item.LinkType`.
| 2.654921 | 1.986208 | 1.336678 |
'''Return a stream writer.'''
if args.ascii_print:
return wpull.util.ASCIIStreamWriter(stream)
else:
return stream
|
def new_encoded_stream(args, stream)
|
Return a stream writer.
| 15.625262 | 12.799896 | 1.220734 |
'''Schedule check function.'''
if self._running:
_logger.debug('Schedule check function.')
self._call_later_handle = self._event_loop.call_later(
self._timeout, self._check)
|
def _schedule(self)
|
Schedule check function.
| 6.853693 | 6.249079 | 1.096753 |
'''Check and close connection if needed.'''
_logger.debug('Check if timeout.')
self._call_later_handle = None
if self._touch_time is not None:
difference = self._event_loop.time() - self._touch_time
_logger.debug('Time difference %s', difference)
if difference > self._timeout:
self._connection.close()
self._timed_out = True
if not self._connection.closed():
self._schedule()
|
def _check(self)
|
Check and close connection if needed.
| 5.884392 | 5.113838 | 1.15068 |
'''Stop running timers.'''
if self._call_later_handle:
self._call_later_handle.cancel()
self._running = False
|
def close(self)
|
Stop running timers.
| 9.346817 | 6.594716 | 1.417319 |
'''Return whether the connection is closed.'''
return not self.writer or not self.reader or self.reader.at_eof()
|
def closed(self) -> bool
|
Return whether the connection is closed.
| 9.140193 | 6.993078 | 1.307034 |
'''Establish a connection.'''
_logger.debug(__('Connecting to {0}.', self._address))
if self._state != ConnectionState.ready:
raise Exception('Closed connection must be reset before reusing.')
if self._sock:
connection_future = asyncio.open_connection(
sock=self._sock, **self._connection_kwargs()
)
else:
# TODO: maybe we don't want to ignore flow-info and scope-id?
host = self._address[0]
port = self._address[1]
connection_future = asyncio.open_connection(
host, port, **self._connection_kwargs()
)
self.reader, self.writer = yield from \
self.run_network_operation(
connection_future,
wait_timeout=self._connect_timeout,
name='Connect')
if self._timeout is not None:
self._close_timer = CloseTimer(self._timeout, self)
else:
self._close_timer = DummyCloseTimer()
self._state = ConnectionState.created
_logger.debug('Connected.')
|
def connect(self)
|
Establish a connection.
| 4.856978 | 4.855966 | 1.000208 |
'''Close the connection.'''
if self.writer:
_logger.debug('Closing connection.')
self.writer.close()
self.writer = None
self.reader = None
if self._close_timer:
self._close_timer.close()
self._state = ConnectionState.dead
|
def close(self)
|
Close the connection.
| 5.17013 | 5.24565 | 0.985603 |
'''Write data.'''
assert self._state == ConnectionState.created, \
'Expect conn created. Got {}.'.format(self._state)
self.writer.write(data)
if drain:
fut = self.writer.drain()
if fut:
yield from self.run_network_operation(
fut, close_timeout=self._timeout, name='Write')
|
def write(self, data: bytes, drain: bool=True)
|
Write data.
| 9.10297 | 8.848121 | 1.028803 |
'''Read data.'''
assert self._state == ConnectionState.created, \
'Expect conn created. Got {}.'.format(self._state)
data = yield from \
self.run_network_operation(
self.reader.read(amount),
close_timeout=self._timeout,
name='Read')
return data
|
def read(self, amount: int=-1) -> bytes
|
Read data.
| 11.208344 | 11.245153 | 0.996727 |
'''Read a line of data.'''
assert self._state == ConnectionState.created, \
'Expect conn created. Got {}.'.format(self._state)
with self._close_timer.with_timeout():
data = yield from \
self.run_network_operation(
self.reader.readline(),
close_timeout=self._timeout,
name='Readline')
return data
|
def readline(self) -> bytes
|
Read a line of data.
| 9.776624 | 9.892412 | 0.988295 |
'''Run the task and raise appropriate exceptions.
Coroutine.
'''
if wait_timeout is not None and close_timeout is not None:
raise Exception(
'Cannot use wait_timeout and close_timeout at the same time')
try:
if close_timeout is not None:
with self._close_timer.with_timeout():
data = yield from task
if self._close_timer.is_timeout():
raise NetworkTimedOut(
'{name} timed out.'.format(name=name))
else:
return data
elif wait_timeout is not None:
data = yield from asyncio.wait_for(task, wait_timeout)
return data
else:
return (yield from task)
except asyncio.TimeoutError as error:
self.close()
raise NetworkTimedOut(
'{name} timed out.'.format(name=name)) from error
except (tornado.netutil.SSLCertificateError, SSLVerificationError) \
as error:
self.close()
raise SSLVerificationError(
'{name} certificate error: {error}'
.format(name=name, error=error)) from error
except AttributeError as error:
self.close()
raise NetworkError(
'{name} network error: connection closed unexpectedly: {error}'
.format(name=name, error=error)) from error
except (socket.error, ssl.SSLError, OSError, IOError) as error:
self.close()
if isinstance(error, NetworkError):
raise
if error.errno == errno.ECONNREFUSED:
raise ConnectionRefused(
error.errno, os.strerror(error.errno)) from error
# XXX: This quality case brought to you by OpenSSL and Python.
# Example: _ssl.SSLError: [Errno 1] error:14094418:SSL
# routines:SSL3_READ_BYTES:tlsv1 alert unknown ca
error_string = str(error).lower()
if 'certificate' in error_string or 'unknown ca' in error_string:
raise SSLVerificationError(
'{name} certificate error: {error}'
.format(name=name, error=error)) from error
else:
if error.errno:
raise NetworkError(
error.errno, os.strerror(error.errno)) from error
else:
raise NetworkError(
'{name} network error: {error}'
.format(name=name, error=error)) from error
|
def run_network_operation(self, task, wait_timeout=None,
close_timeout=None,
name='Network operation')
|
Run the task and raise appropriate exceptions.
Coroutine.
| 3.022807 | 2.879392 | 1.049807 |
'''Start client TLS on this connection and return SSLConnection.
Coroutine
'''
sock = self.writer.get_extra_info('socket')
ssl_conn = SSLConnection(
self._address,
ssl_context=ssl_context,
hostname=self._hostname, timeout=self._timeout,
connect_timeout=self._connect_timeout, bind_host=self._bind_host,
bandwidth_limiter=self._bandwidth_limiter, sock=sock
)
yield from ssl_conn.connect()
return ssl_conn
|
def start_tls(self, ssl_context: Union[bool, dict, ssl.SSLContext]=True) \
-> 'SSLConnection'
|
Start client TLS on this connection and return SSLConnection.
Coroutine
| 4.744287 | 3.753605 | 1.263928 |
'''Check if certificate matches hostname.'''
# Based on tornado.iostream.SSLIOStream
# Needed for older OpenSSL (<0.9.8f) versions
verify_mode = self._ssl_context.verify_mode
assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED,
ssl.CERT_OPTIONAL), \
'Unknown verify mode {}'.format(verify_mode)
if verify_mode == ssl.CERT_NONE:
return
cert = sock.getpeercert()
if not cert and verify_mode == ssl.CERT_OPTIONAL:
return
if not cert:
raise SSLVerificationError('No SSL certificate given')
try:
ssl.match_hostname(cert, self._hostname)
except ssl.CertificateError as error:
raise SSLVerificationError('Invalid SSL certificate') from error
|
def _verify_cert(self, sock: ssl.SSLSocket)
|
Check if certificate matches hostname.
| 3.529789 | 3.234478 | 1.091301 |
'''Remove items that are expired or exceed the max size.'''
now_time = time.time()
while self._seq and self._seq[0].expire_time < now_time:
item = self._seq.popleft()
del self._map[item.key]
if self._max_items:
while self._seq and len(self._seq) > self._max_items:
item = self._seq.popleft()
del self._map[item.key]
|
def trim(self)
|
Remove items that are expired or exceed the max size.
| 3.342668 | 2.540171 | 1.315923 |
'''Strip session ID from URL path.'''
for pattern in SESSION_ID_PATH_PATTERNS:
match = pattern.match(path)
if match:
path = match.group(1) + match.group(3)
return path
|
def strip_path_session_id(path)
|
Strip session ID from URL path.
| 3.42532 | 3.263795 | 1.04949 |
'''Rewrite the given URL.'''
if url_info.scheme not in ('http', 'https'):
return url_info
if self._session_id_enabled:
url = '{scheme}://{authority}{path}?{query}#{fragment}'.format(
scheme=url_info.scheme,
authority=url_info.authority,
path=strip_path_session_id(url_info.path),
query=strip_query_session_id(url_info.query),
fragment=url_info.fragment,
)
url_info = parse_url_or_log(url) or url_info
if self._hash_fragment_enabled and url_info.fragment.startswith('!'):
if url_info.query:
url = '{}&_escaped_fragment_={}'.format(url_info.url,
url_info.fragment[1:])
else:
url = '{}?_escaped_fragment_={}'.format(url_info.url,
url_info.fragment[1:])
url_info = parse_url_or_log(url) or url_info
return url_info
|
def rewrite(self, url_info: URLInfo) -> URLInfo
|
Rewrite the given URL.
| 2.478719 | 2.476195 | 1.001019 |
'''Parse PASV address.'''
match = re.search(
r'\('
r'(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*'
r'\)',
text)
if match:
return (
'{0}.{1}.{2}.{3}'.format(int(match.group(1)),
int(match.group(2)),
int(match.group(3)),
int(match.group(4))
),
int(match.group(5)) << 8 | int(match.group(6))
)
else:
raise ValueError('No address found')
|
def parse_address(text: str) -> Tuple[str, int]
|
Parse PASV address.
| 1.944897 | 1.802513 | 1.078992 |
'''Return the reply code as a tuple.
Args:
code: The reply code.
Returns:
Each item in the tuple is the digit.
'''
return code // 100, code // 10 % 10, code % 10
|
def reply_code_tuple(code: int) -> Tuple[int, int, int]
|
Return the reply code as a tuple.
Args:
code: The reply code.
Returns:
Each item in the tuple is the digit.
| 3.409451 | 2.150044 | 1.585759 |
'''Parse machine listing.
Args:
text: The listing.
convert: Convert sizes and dates.
strict: Method of handling errors. ``True`` will raise
``ValueError``. ``False`` will ignore rows with errors.
Returns:
list: A list of dict of the facts defined in RFC 3659.
The key names must be lowercase. The filename uses the key
``name``.
'''
# TODO: this function should be moved into the 'ls' package
listing = []
for line in text.splitlines(False):
facts = line.split(';')
row = {}
filename = None
for fact in facts:
name, sep, value = fact.partition('=')
if sep:
name = name.strip().lower()
value = value.strip().lower()
if convert:
try:
value = convert_machine_list_value(name, value)
except ValueError:
if strict:
raise
row[name] = value
else:
if name[0:1] == ' ':
# Is a filename
filename = name[1:]
else:
name = name.strip().lower()
row[name] = ''
if filename:
row['name'] = filename
listing.append(row)
elif strict:
raise ValueError('Missing filename.')
return listing
|
def parse_machine_listing(text: str, convert: bool=True, strict: bool=True) -> \
List[dict]
|
Parse machine listing.
Args:
text: The listing.
convert: Convert sizes and dates.
strict: Method of handling errors. ``True`` will raise
``ValueError``. ``False`` will ignore rows with errors.
Returns:
list: A list of dict of the facts defined in RFC 3659.
The key names must be lowercase. The filename uses the key
``name``.
| 4.067794 | 2.20325 | 1.84627 |
'''Convert sizes and time values.
Size will be ``int`` while time value will be :class:`datetime.datetime`.
'''
if name == 'modify':
return convert_machine_list_time_val(value)
elif name == 'size':
return int(value)
else:
return value
|
def convert_machine_list_value(name: str, value: str) -> \
Union[datetime.datetime, str, int]
|
Convert sizes and time values.
Size will be ``int`` while time value will be :class:`datetime.datetime`.
| 5.756972 | 3.173718 | 1.813952 |
'''Convert RFC 3659 time-val to datetime objects.'''
# TODO: implement fractional seconds
text = text[:14]
if len(text) != 14:
raise ValueError('Time value not 14 chars')
year = int(text[0:4])
month = int(text[4:6])
day = int(text[6:8])
hour = int(text[8:10])
minute = int(text[10:12])
second = int(text[12:14])
return datetime.datetime(year, month, day, hour, minute, second,
tzinfo=datetime.timezone.utc)
|
def convert_machine_list_time_val(text: str) -> datetime.datetime
|
Convert RFC 3659 time-val to datetime objects.
| 2.4284 | 2.024275 | 1.199639 |
'''Convert results from parsing machine listings to FileEntry list.'''
for listing in listings:
yield FileEntry(
listing['name'],
type=listing.get('type'),
size=listing.get('size'),
date=listing.get('modify')
)
|
def machine_listings_to_file_entries(listings: Iterable[dict]) -> \
Iterable[FileEntry]
|
Convert results from parsing machine listings to FileEntry list.
| 4.765667 | 3.618038 | 1.317196 |
'''Return reply code.'''
if len(self.args) >= 2 and isinstance(self.args[1], int):
return self.args[1]
|
def reply_code(self)
|
Return reply code.
| 4.612545 | 4.190355 | 1.100753 |
'''Run the producer, if exception, stop engine.'''
try:
yield from self._producer.process()
except Exception as error:
if not isinstance(error, StopIteration):
# Stop the workers so the producer exception will be handled
# when we finally yield from this coroutine
_logger.debug('Producer died.', exc_info=True)
self.stop()
raise
else:
self.stop()
|
def _run_producer_wrapper(self)
|
Run the producer, if exception, stop engine.
| 8.398386 | 6.107337 | 1.375131 |
'''Iterate CDX file.
Args:
file (str): A file object.
encoding (str): The encoding of the file.
Returns:
iterator: Each item is a dict that maps from field key to value.
'''
with codecs.getreader(encoding)(file) as stream:
header_line = stream.readline()
separator = header_line[0]
field_keys = header_line.strip().split(separator)
if field_keys.pop(0) != 'CDX':
raise ValueError('CDX header not found.')
for line in stream:
yield dict(zip(field_keys, line.strip().split(separator)))
|
def read_cdx(file, encoding='utf8')
|
Iterate CDX file.
Args:
file (str): A file object.
encoding (str): The encoding of the file.
Returns:
iterator: Each item is a dict that maps from field key to value.
| 3.102074 | 2.237926 | 1.386138 |
'''Set the required fields for the record.'''
self.fields[self.WARC_TYPE] = warc_type
self.fields[self.CONTENT_TYPE] = content_type
self.fields[self.WARC_DATE] = wpull.util.datetime_str()
self.fields[self.WARC_RECORD_ID] = '<{0}>'.format(uuid.uuid4().urn)
|
def set_common_fields(self, warc_type: str, content_type: str)
|
Set the required fields for the record.
| 3.463691 | 2.981565 | 1.161702 |
'''Find and set the content length.
.. seealso:: :meth:`compute_checksum`.
'''
if not self.block_file:
self.fields['Content-Length'] = '0'
return
with wpull.util.reset_file_offset(self.block_file):
wpull.util.seek_file_end(self.block_file)
self.fields['Content-Length'] = str(self.block_file.tell())
|
def set_content_length(self)
|
Find and set the content length.
.. seealso:: :meth:`compute_checksum`.
| 4.914501 | 3.766743 | 1.304708 |
'''Compute and add the checksum data to the record fields.
This function also sets the content length.
'''
if not self.block_file:
self.fields['Content-Length'] = '0'
return
block_hasher = hashlib.sha1()
payload_hasher = hashlib.sha1()
with wpull.util.reset_file_offset(self.block_file):
if payload_offset is not None:
data = self.block_file.read(payload_offset)
block_hasher.update(data)
while True:
data = self.block_file.read(4096)
if data == b'':
break
block_hasher.update(data)
payload_hasher.update(data)
content_length = self.block_file.tell()
content_hash = block_hasher.digest()
self.fields['WARC-Block-Digest'] = 'sha1:{0}'.format(
base64.b32encode(content_hash).decode()
)
if payload_offset is not None:
payload_hash = payload_hasher.digest()
self.fields['WARC-Payload-Digest'] = 'sha1:{0}'.format(
base64.b32encode(payload_hash).decode()
)
self.fields['Content-Length'] = str(content_length)
|
def compute_checksum(self, payload_offset: Optional[int]=None)
|
Compute and add the checksum data to the record fields.
This function also sets the content length.
| 2.466726 | 2.079885 | 1.185991 |
'''Return the HTTP header.
It only attempts to read the first 4 KiB of the payload.
Returns:
Response, None: Returns an instance of
:class:`.http.request.Response` or None.
'''
with wpull.util.reset_file_offset(self.block_file):
data = self.block_file.read(4096)
match = re.match(br'(.*?\r?\n\r?\n)', data)
if not match:
return
status_line, dummy, field_str = match.group(1).partition(b'\n')
try:
version, code, reason = Response.parse_status_line(status_line)
except ValueError:
return
response = Response(status_code=code, reason=reason, version=version)
try:
response.fields.parse(field_str, strict=False)
except ValueError:
return
return response
|
def get_http_header(self) -> Response
|
Return the HTTP header.
It only attempts to read the first 4 KiB of the payload.
Returns:
Response, None: Returns an instance of
:class:`.http.request.Response` or None.
| 4.711443 | 3.122276 | 1.508977 |
'''Write the parameters to a file for PhantomJS to read.'''
param_dict = {
'url': self._params.url,
'snapshot_paths': self._params.snapshot_paths,
'wait_time': self._params.wait_time,
'num_scrolls': self._params.num_scrolls,
'smart_scroll': self._params.smart_scroll,
'snapshot': self._params.snapshot,
'viewport_width': self._params.viewport_size[0],
'viewport_height': self._params.viewport_size[1],
'paper_width': self._params.paper_size[0],
'paper_height': self._params.paper_size[1],
'custom_headers': self._params.custom_headers,
'page_settings': self._params.page_settings,
}
if self._params.event_log_filename:
param_dict['event_log_filename'] = \
os.path.abspath(self._params.event_log_filename)
if self._params.action_log_filename:
param_dict['action_log_filename'] = \
os.path.abspath(self._params.action_log_filename)
config_text = json.dumps(param_dict)
self._config_file.write(config_text.encode('utf-8'))
# Close it so the phantomjs process can read it on Windows
self._config_file.close()
|
def _write_config(self)
|
Write the parameters to a file for PhantomJS to read.
| 2.396977 | 2.181736 | 1.098656 |
'''Clean closed connections.
Args:
force: Clean connected and idle connections too.
Coroutine.
'''
with (yield from self._lock):
for connection in tuple(self.ready):
if force or connection.closed():
connection.close()
self.ready.remove(connection)
|
def clean(self, force: bool=False)
|
Clean closed connections.
Args:
force: Clean connected and idle connections too.
Coroutine.
| 7.11576 | 3.164496 | 2.248623 |
'''Forcibly close all connections.
This instance will not be usable after calling this method.
'''
for connection in self.ready:
connection.close()
for connection in self.busy:
connection.close()
self._closed = True
|
def close(self)
|
Forcibly close all connections.
This instance will not be usable after calling this method.
| 5.388882 | 3.252485 | 1.656851 |
'''Register and return a connection.
Coroutine.
'''
assert not self._closed
yield from self._condition.acquire()
while True:
if self.ready:
connection = self.ready.pop()
break
elif len(self.busy) < self.max_connections:
connection = self._connection_factory()
break
else:
yield from self._condition.wait()
self.busy.add(connection)
self._condition.release()
return connection
|
def acquire(self) -> Connection
|
Register and return a connection.
Coroutine.
| 3.958705 | 3.482425 | 1.136767 |
'''Unregister a connection.
Args:
connection: Connection instance returned from :meth:`acquire`.
reuse: If True, the connection is made available for reuse.
Coroutine.
'''
yield from self._condition.acquire()
self.busy.remove(connection)
if reuse:
self.ready.add(connection)
self._condition.notify()
self._condition.release()
|
def release(self, connection: Connection, reuse: bool=True)
|
Unregister a connection.
Args:
connection: Connection instance returned from :meth:`acquire`.
reuse: If True, the connection is made available for reuse.
Coroutine.
| 5.61726 | 2.88639 | 1.946119 |
'''Return an available connection.
Args:
host: A hostname or IP address.
port: Port number.
use_ssl: Whether to return a SSL connection.
host_key: If provided, it overrides the key used for per-host
connection pooling. This is useful for proxies for example.
Coroutine.
'''
assert isinstance(port, int), 'Expect int. Got {}'.format(type(port))
assert not self._closed
yield from self._process_no_wait_releases()
if use_ssl:
connection_factory = functools.partial(
self._ssl_connection_factory, hostname=host)
else:
connection_factory = functools.partial(
self._connection_factory, hostname=host)
connection_factory = functools.partial(
HappyEyeballsConnection, (host, port), connection_factory,
self._resolver, self._happy_eyeballs_table,
is_ssl=use_ssl
)
key = host_key or (host, port, use_ssl)
with (yield from self._host_pools_lock):
if key not in self._host_pools:
host_pool = self._host_pools[key] = HostPool(
connection_factory,
max_connections=self._max_host_count
)
self._host_pool_waiters[key] = 1
else:
host_pool = self._host_pools[key]
self._host_pool_waiters[key] += 1
_logger.debug('Check out %s', key)
connection = yield from host_pool.acquire()
connection.key = key
# TODO: Verify this assert is always true
# assert host_pool.count() <= host_pool.max_connections
# assert key in self._host_pools
# assert self._host_pools[key] == host_pool
with (yield from self._host_pools_lock):
self._host_pool_waiters[key] -= 1
return connection
|
def acquire(self, host: str, port: int, use_ssl: bool=False,
host_key: Optional[Any]=None) \
-> Union[Connection, SSLConnection]
|
Return an available connection.
Args:
host: A hostname or IP address.
port: Port number.
use_ssl: Whether to return a SSL connection.
host_key: If provided, it overrides the key used for per-host
connection pooling. This is useful for proxies for example.
Coroutine.
| 3.436715 | 2.76295 | 1.243857 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.