code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
'''Put a connection back in the pool.
Coroutine.
'''
assert not self._closed
key = connection.key
host_pool = self._host_pools[key]
_logger.debug('Check in %s', key)
yield from host_pool.release(connection)
force = self.count() > self._max_count
yield from self.clean(force=force)
|
def release(self, connection: Connection)
|
Put a connection back in the pool.
Coroutine.
| 8.76388 | 6.744192 | 1.299471 |
'''Synchronous version of :meth:`release`.'''
_logger.debug('No wait check in.')
release_task = asyncio.get_event_loop().create_task(
self.release(connection)
)
self._release_tasks.add(release_task)
|
def no_wait_release(self, connection: Connection)
|
Synchronous version of :meth:`release`.
| 6.090673 | 4.766615 | 1.277777 |
'''Return a context manager that returns a connection.
Usage::
session = yield from connection_pool.session('example.com', 80)
with session as connection:
connection.write(b'blah')
connection.close()
Coroutine.
'''
connection = yield from self.acquire(host, port, use_ssl)
@contextlib.contextmanager
def context_wrapper():
try:
yield connection
finally:
self.no_wait_release(connection)
return context_wrapper()
|
def session(self, host: str, port: int, use_ssl: bool=False)
|
Return a context manager that returns a connection.
Usage::
session = yield from connection_pool.session('example.com', 80)
with session as connection:
connection.write(b'blah')
connection.close()
Coroutine.
| 4.55918 | 2.313808 | 1.970423 |
'''Clean all closed connections.
Args:
force: Clean connected and idle connections too.
Coroutine.
'''
assert not self._closed
with (yield from self._host_pools_lock):
for key, pool in tuple(self._host_pools.items()):
yield from pool.clean(force=force)
if not self._host_pool_waiters[key] and pool.empty():
del self._host_pools[key]
del self._host_pool_waiters[key]
|
def clean(self, force: bool=False)
|
Clean all closed connections.
Args:
force: Clean connected and idle connections too.
Coroutine.
| 4.953238 | 3.025405 | 1.637214 |
'''Close all the connections and clean up.
This instance will not be usable after calling this method.
'''
for key, pool in tuple(self._host_pools.items()):
pool.close()
del self._host_pools[key]
del self._host_pool_waiters[key]
self._closed = True
|
def close(self)
|
Close all the connections and clean up.
This instance will not be usable after calling this method.
| 5.03229 | 3.527801 | 1.426467 |
'''Return number of connections.'''
counter = 0
for pool in self._host_pools.values():
counter += pool.count()
return counter
|
def count(self) -> int
|
Return number of connections.
| 8.123512 | 6.67083 | 1.217766 |
'''Set the preferred address.'''
if addr_1 > addr_2:
addr_1, addr_2 = addr_2, addr_1
self._cache[(addr_1, addr_2)] = preferred_addr
|
def set_preferred(self, preferred_addr, addr_1, addr_2)
|
Set the preferred address.
| 3.114436 | 3.579438 | 0.870091 |
'''Return the preferred address.'''
if addr_1 > addr_2:
addr_1, addr_2 = addr_2, addr_1
return self._cache.get((addr_1, addr_2))
|
def get_preferred(self, addr_1, addr_2)
|
Return the preferred address.
| 3.076846 | 3.065624 | 1.003661 |
'''Connect using happy eyeballs.'''
self._primary_connection = self._connection_factory(primary_address)
self._secondary_connection = self._connection_factory(secondary_address)
@asyncio.coroutine
def connect_primary():
yield from self._primary_connection.connect()
return self._primary_connection
@asyncio.coroutine
def connect_secondary():
yield from self._secondary_connection.connect()
return self._secondary_connection
primary_fut = connect_primary()
secondary_fut = connect_secondary()
failed = False
for fut in asyncio.as_completed((primary_fut, secondary_fut)):
if not self._active_connection:
try:
self._active_connection = yield from fut
except NetworkError:
if not failed:
_logger.debug('Original dual stack exception', exc_info=True)
failed = True
else:
raise
else:
_logger.debug('Got first of dual stack.')
else:
@asyncio.coroutine
def cleanup():
try:
conn = yield from fut
except NetworkError:
pass
else:
conn.close()
_logger.debug('Closed abandoned connection.')
asyncio.get_event_loop().create_task(cleanup())
preferred_host = self._active_connection.host
self._happy_eyeballs_table.set_preferred(
preferred_host, primary_address[0], secondary_address[0])
|
def _connect_dual_stack(self, primary_address, secondary_address)
|
Connect using happy eyeballs.
| 3.182369 | 2.958933 | 1.075513 |
'''Get preferred host from DNS results.'''
host_1 = result.first_ipv4.ip_address if result.first_ipv4 else None
host_2 = result.first_ipv6.ip_address if result.first_ipv6 else None
if not host_2:
return host_1, None
elif not host_1:
return host_2, None
preferred_host = self._happy_eyeballs_table.get_preferred(
host_1, host_2)
if preferred_host:
return preferred_host, None
else:
return host_1, host_2
|
def _get_preferred_host(self, result: ResolveResult) -> Tuple[str, str]
|
Get preferred host from DNS results.
| 2.990054 | 2.791624 | 1.071081 |
'''Check if any journal files exist and raise an error.'''
files = list(glob.glob(self._prefix_filename + '*-wpullinc'))
if files:
raise OSError('WARC file {} is incomplete.'.format(files[0]))
|
def _check_journals_and_maybe_raise(self)
|
Check if any journal files exist and raise an error.
| 17.441744 | 12.01732 | 1.451384 |
'''Create and set as current WARC file.'''
if self._params.max_size and not meta and self._params.appending:
while True:
self._warc_filename = self._generate_warc_filename()
if os.path.exists(self._warc_filename):
_logger.debug('Skip {0}', self._warc_filename)
self._sequence_num += 1
else:
break
else:
self._warc_filename = self._generate_warc_filename(meta=meta)
_logger.debug('WARC file at {0}', self._warc_filename)
if not self._params.appending:
wpull.util.truncate_file(self._warc_filename)
self._warcinfo_record = WARCRecord()
self._populate_warcinfo(self._params.extra_fields)
self.write_record(self._warcinfo_record)
|
def _start_new_warc_file(self, meta=False)
|
Create and set as current WARC file.
| 3.598566 | 3.390756 | 1.061287 |
'''Return a suitable WARC filename.'''
if self._params.max_size is None:
sequence_name = ''
elif meta:
sequence_name = '-meta'
else:
sequence_name = '-{0:05d}'.format(self._sequence_num)
if self._params.compress:
extension = 'warc.gz'
else:
extension = 'warc'
return '{0}{1}.{2}'.format(
self._prefix_filename, sequence_name, extension
)
|
def _generate_warc_filename(self, meta=False)
|
Return a suitable WARC filename.
| 3.346865 | 3.181932 | 1.051834 |
'''Create and set current CDX file.'''
self._cdx_filename = '{0}.cdx'.format(self._prefix_filename)
if not self._params.appending:
wpull.util.truncate_file(self._cdx_filename)
self._write_cdx_header()
elif not os.path.exists(self._cdx_filename):
self._write_cdx_header()
|
def _start_new_cdx_file(self)
|
Create and set current CDX file.
| 4.5255 | 4.138933 | 1.093398 |
'''Add the metadata to the Warcinfo record.'''
self._warcinfo_record.set_common_fields(
WARCRecord.WARCINFO, WARCRecord.WARC_FIELDS)
info_fields = NameValueRecord(wrap_width=1024)
info_fields['Software'] = self._params.software_string \
or self.DEFAULT_SOFTWARE_STRING
info_fields['format'] = 'WARC File Format 1.0'
info_fields['conformsTo'] = \
'http://bibnum.bnf.fr/WARC/WARC_ISO_28500_version1_latestdraft.pdf'
if extra_fields:
for name, value in extra_fields:
info_fields.add(name, value)
self._warcinfo_record.block_file = io.BytesIO(
bytes(info_fields) + b'\r\n')
self._warcinfo_record.compute_checksum()
|
def _populate_warcinfo(self, extra_fields=None)
|
Add the metadata to the Warcinfo record.
| 3.734099 | 3.510607 | 1.063662 |
'''Set up the logging file.'''
logger = logging.getLogger()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self._log_temp_file = NamedTemporaryFile(
prefix='tmp-wpull-warc-',
dir=self._params.temp_dir,
suffix='.log.gz',
delete=False,
)
self._log_temp_file.close() # For Windows
self._log_handler = handler = logging.StreamHandler(
io.TextIOWrapper(
gzip.GzipFile(
filename=self._log_temp_file.name, mode='wb'
),
encoding='utf-8'
)
)
logger.setLevel(logging.DEBUG)
logger.debug('Wpull needs the root logger level set to DEBUG.')
handler.setFormatter(formatter)
logger.addHandler(handler)
handler.setLevel(logging.INFO)
|
def _setup_log(self)
|
Set up the logging file.
| 3.306236 | 3.207464 | 1.030794 |
'''Move the file to the ``move_to`` directory.'''
assert self._params.move_to
if os.path.isdir(self._params.move_to):
_logger.debug('Moved {} to {}.', self._warc_filename,
self._params.move_to)
shutil.move(filename, self._params.move_to)
else:
_logger.error('{} is not a directory; not moving {}.',
self._params.move_to, filename)
|
def _move_file_to_dest_dir(self, filename)
|
Move the file to the ``move_to`` directory.
| 3.678003 | 3.343346 | 1.100096 |
'''Set the content length and possibly the checksums.'''
if self._params.digests:
record.compute_checksum(payload_offset)
else:
record.set_content_length()
|
def set_length_and_maybe_checksums(self, record, payload_offset=None)
|
Set the content length and possibly the checksums.
| 7.588214 | 6.412765 | 1.183298 |
'''Append the record to the WARC file.'''
# FIXME: probably not a good idea to modifiy arguments passed to us
# TODO: add extra gzip headers that wget uses
record.fields['WARC-Warcinfo-ID'] = self._warcinfo_record.fields[
WARCRecord.WARC_RECORD_ID]
_logger.debug('Writing WARC record {0}.',
record.fields['WARC-Type'])
if self._params.compress:
open_func = gzip.GzipFile
else:
open_func = open
# Use getsize to get actual file size. Avoid tell() because it may
# not be the raw file position.
if os.path.exists(self._warc_filename):
before_offset = os.path.getsize(self._warc_filename)
else:
before_offset = 0
journal_filename = self._warc_filename + '-wpullinc'
with open(journal_filename, 'w') as file:
file.write('wpull-journal-version:1\n')
file.write('offset:{}\n'.format(before_offset))
try:
with open_func(self._warc_filename, mode='ab') as out_file:
for data in record:
out_file.write(data)
except (OSError, IOError) as error:
_logger.info(
_('Rolling back file {filename} to length {length}.'),
filename=self._warc_filename, length=before_offset
)
with open(self._warc_filename, mode='wb') as out_file:
out_file.truncate(before_offset)
raise error
finally:
os.remove(journal_filename)
after_offset = os.path.getsize(self._warc_filename)
if self._cdx_filename:
raw_file_offset = before_offset
raw_file_record_size = after_offset - before_offset
self._write_cdx_field(
record, raw_file_record_size, raw_file_offset
)
|
def write_record(self, record)
|
Append the record to the WARC file.
| 4.058313 | 3.965797 | 1.023329 |
'''Close the WARC file and clean up any logging handlers.'''
if self._log_temp_file:
self._log_handler.flush()
logger = logging.getLogger()
logger.removeHandler(self._log_handler)
self._log_handler.stream.close()
log_record = WARCRecord()
log_record.block_file = gzip.GzipFile(
filename=self._log_temp_file.name
)
log_record.set_common_fields('resource', 'text/plain')
log_record.fields['WARC-Target-URI'] = \
'urn:X-wpull:log'
if self._params.max_size is not None:
if self._params.move_to is not None:
self._move_file_to_dest_dir(self._warc_filename)
self._start_new_warc_file(meta=True)
self.set_length_and_maybe_checksums(log_record)
self.write_record(log_record)
log_record.block_file.close()
try:
os.remove(self._log_temp_file.name)
except OSError:
_logger.exception('Could not close log temp file.')
self._log_temp_file = None
self._log_handler.close()
self._log_handler = None
if self._params.move_to is not None:
self._move_file_to_dest_dir(self._warc_filename)
if self._cdx_filename and self._params.move_to is not None:
self._move_file_to_dest_dir(self._cdx_filename)
|
def close(self)
|
Close the WARC file and clean up any logging handlers.
| 3.639748 | 3.327615 | 1.093801 |
'''Write the CDX header.
It writes the fields:
1. a: original URL
2. b: UNIX timestamp
3. m: MIME Type from the HTTP Content-type
4. s: response code
5. k: new style checksum
6. S: raw file record size
7. V: offset in raw file
8. g: filename of raw file
9. u: record ID
'''
with open(self._cdx_filename, mode='a', encoding='utf-8') as out_file:
out_file.write(self.CDX_DELIMINATOR)
out_file.write(self.CDX_DELIMINATOR.join((
'CDX',
'a', 'b', 'm', 's',
'k', 'S', 'V', 'g',
'u'
)))
out_file.write('\n')
|
def _write_cdx_header(self)
|
Write the CDX header.
It writes the fields:
1. a: original URL
2. b: UNIX timestamp
3. m: MIME Type from the HTTP Content-type
4. s: response code
5. k: new style checksum
6. S: raw file record size
7. V: offset in raw file
8. g: filename of raw file
9. u: record ID
| 5.037392 | 1.856202 | 2.713816 |
'''Write the CDX field if needed.'''
if record.fields[WARCRecord.WARC_TYPE] != WARCRecord.RESPONSE \
or not re.match(r'application/http; *msgtype *= *response',
record.fields[WARCRecord.CONTENT_TYPE]):
return
url = record.fields['WARC-Target-URI']
_logger.debug('Writing CDX record {0}.', url)
http_header = record.get_http_header()
if http_header:
mime_type = self.parse_mimetype(
http_header.fields.get('Content-Type', '')
) or '-'
response_code = str(http_header.status_code)
else:
mime_type = '-'
response_code = '-'
timestamp = str(int(
wpull.util.parse_iso8601_str(record.fields[WARCRecord.WARC_DATE])
))
checksum = record.fields.get('WARC-Payload-Digest', '')
if checksum.startswith('sha1:'):
checksum = checksum.replace('sha1:', '', 1)
else:
checksum = '-'
raw_file_record_size_str = str(raw_file_record_size)
raw_file_offset_str = str(raw_file_offset)
filename = os.path.basename(self._warc_filename)
record_id = record.fields[WARCRecord.WARC_RECORD_ID]
fields_strs = (
url,
timestamp,
mime_type,
response_code,
checksum,
raw_file_record_size_str,
raw_file_offset_str,
filename,
record_id
)
with open(self._cdx_filename, mode='a', encoding='utf-8') as out_file:
out_file.write(self.CDX_DELIMINATOR.join(fields_strs))
out_file.write('\n')
|
def _write_cdx_field(self, record, raw_file_record_size, raw_file_offset)
|
Write the CDX field if needed.
| 2.730956 | 2.697295 | 1.01248 |
'''Return the MIME type from a Content-Type string.
Returns:
str, None: A string in the form ``type/subtype`` or None.
'''
match = re.match(r'([a-zA-Z0-9-]+/[a-zA-Z0-9-]+)', value)
if match:
return match.group(1)
|
def parse_mimetype(cls, value)
|
Return the MIME type from a Content-Type string.
Returns:
str, None: A string in the form ``type/subtype`` or None.
| 3.442158 | 1.954694 | 1.76097 |
'''Return new temp file.'''
return wpull.body.new_temp_file(
directory=self._temp_dir, hint=hint
)
|
def _new_temp_file(self, hint='warcrecsess')
|
Return new temp file.
| 14.798188 | 11.782578 | 1.255938 |
'''Record the revisit if possible.'''
fields = self._response_record.fields
ref_record_id = self._url_table.get_revisit_id(
fields['WARC-Target-URI'],
fields.get('WARC-Payload-Digest', '').upper().replace('SHA1:', '')
)
if ref_record_id:
try:
self._response_record.block_file.truncate(payload_offset)
except TypeError:
self._response_record.block_file.seek(0)
data = self._response_record.block_file.read(payload_offset)
self._response_record.block_file.truncate()
self._response_record.block_file.seek(0)
self._response_record.block_file.write(data)
self._recorder.set_length_and_maybe_checksums(
self._response_record
)
fields[WARCRecord.WARC_TYPE] = WARCRecord.REVISIT
fields['WARC-Refers-To'] = ref_record_id
fields['WARC-Profile'] = WARCRecord.SAME_PAYLOAD_DIGEST_URI
fields['WARC-Truncated'] = 'length'
|
def _record_revisit(self, payload_offset: int)
|
Record the revisit if possible.
| 3.78829 | 3.676503 | 1.030406 |
'''Increment the number of files downloaded.
Args:
size: The size of the file
'''
assert size >= 0, size
self.files += 1
self.size += size
self.bandwidth_meter.feed(size)
|
def increment(self, size: int)
|
Increment the number of files downloaded.
Args:
size: The size of the file
| 7.328775 | 5.72035 | 1.281176 |
'''Return whether the quota is exceeded.'''
if self.quota and self._url_table is not None:
return self.size >= self.quota and \
self._url_table.get_root_url_todo_count() == 0
|
def is_quota_exceeded(self) -> bool
|
Return whether the quota is exceeded.
| 9.929627 | 8.654328 | 1.14736 |
'''Increment the error counter preferring base exceptions.'''
_logger.debug('Increment error %s', error)
for error_class in ERROR_PRIORITIES:
if isinstance(error, error_class):
self.errors[error_class] += 1
return
self.errors[type(error)] += 1
|
def increment_error(self, error: Exception)
|
Increment the error counter preferring base exceptions.
| 5.17376 | 3.588367 | 1.441815 |
'''Add a node to the head. '''
assert not node.tail
old_head = self.head
if old_head:
assert old_head.tail == self
old_head.tail = node
node.head = old_head
node.tail = self
self.head = node
|
def link_head(self, node)
|
Add a node to the head.
| 3.669375 | 3.463351 | 1.059487 |
'''Add a node to the tail.'''
assert not node.head
old_tail = self.tail
if old_tail:
assert old_tail.head == self
old_tail.head = node
node.tail = old_tail
node.head = self
self.tail = node
|
def link_tail(self, node)
|
Add a node to the tail.
| 3.280923 | 3.233406 | 1.014696 |
'''Remove this node and link any head or tail.'''
old_head = self.head
old_tail = self.tail
self.head = None
self.tail = None
if old_head:
old_head.tail = old_tail
if old_tail:
old_tail.head = old_head
|
def unlink(self)
|
Remove this node and link any head or tail.
| 3.205808 | 2.126987 | 1.507206 |
'''Parse the status line bytes.
Returns:
tuple: An tuple representing the method, URI, and
version.
'''
match = re.match(
br'([a-zA-Z]+)[ \t]+([^ \t]+)[ \t]+(HTTP/\d+\.\d+)',
data
)
if match:
groups = match.groups()
if len(groups) == 3:
return wpull.string.to_str(
(groups[0], groups[1], groups[2]),
encoding=self.encoding,
)
raise ProtocolError('Error parsing status line.')
|
def parse_status_line(self, data)
|
Parse the status line bytes.
Returns:
tuple: An tuple representing the method, URI, and
version.
| 5.237605 | 3.571794 | 1.466379 |
'''Modify the request to be suitable for HTTP server.
Args:
full_url (bool): Use full URL as the URI. By default, only
the path of the URL is given to the server.
'''
assert self.url
assert self.method
assert self.version
url_info = self.url_info
if 'Host' not in self.fields:
self.fields['Host'] = url_info.hostname_with_port
if not full_url:
if url_info.query:
self.resource_path = '{0}?{1}'.format(url_info.path, url_info.query)
else:
self.resource_path = url_info.path
else:
self.resource_path = url_info.url
|
def prepare_for_send(self, full_url=False)
|
Modify the request to be suitable for HTTP server.
Args:
full_url (bool): Use full URL as the URI. By default, only
the path of the URL is given to the server.
| 3.586316 | 2.333339 | 1.536989 |
'''Parse the status line bytes.
Returns:
tuple: An tuple representing the version, code, and reason.
'''
match = re.match(
br'(HTTP/\d+\.\d+)[ \t]+([0-9]{1,3})[ \t]*([^\r\n]*)',
data
)
if match:
groups = match.groups()
if len(groups) == 3:
return wpull.string.to_str(
(groups[0], int(groups[1]), groups[2]),
encoding='latin-1',
)
raise ProtocolError(
'Error parsing status line {line}".'.format(line=ascii(data))
)
|
def parse_status_line(cls, data)
|
Parse the status line bytes.
Returns:
tuple: An tuple representing the version, code, and reason.
| 5.05749 | 3.830996 | 1.32015 |
'''Return whether the document is likely to be CSS.'''
if 'css' in response.fields.get('content-type', '').lower():
return True
if response.body:
# Stylesheet mistakenly served as HTML
if 'html' in response.fields.get('content-type', '').lower():
return cls.is_file(response.body)
|
def is_response(cls, response)
|
Return whether the document is likely to be CSS.
| 6.765862 | 4.759166 | 1.421649 |
'''Return whether the file is likely CSS.'''
peeked_data = wpull.string.printable_bytes(
wpull.util.peek_file(file)).lower()
if b'<html' in peeked_data:
return VeryFalse
if re.search(br'@import |color:|background[a-z-]*:|font[a-z-]*:',
peeked_data):
return True
|
def is_file(cls, file)
|
Return whether the file is likely CSS.
| 13.69078 | 10.549122 | 1.297812 |
'''Begin fetching the next request.'''
self._current_session = session = self._http_client.session()
request = self.next_request()
assert request
if request.url_info.password or \
request.url_info.hostname_with_port in self._hostnames_with_auth:
self._add_basic_auth_header(request)
response = yield from session.start(request)
self._process_response(response)
return response
|
def start(self)
|
Begin fetching the next request.
| 6.631562 | 5.736483 | 1.156033 |
'''Download content.
Args:
file: An optional file object for the document contents.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
Response: An instance of :class:`.http.request.Response`.
See :meth:`WebClient.session` for proper usage of this function.
Coroutine.
'''
yield from \
self._current_session.download(file, duration_timeout=duration_timeout)
|
def download(self, file: Optional[IO[bytes]]=None,
duration_timeout: Optional[float]=None)
|
Download content.
Args:
file: An optional file object for the document contents.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
Response: An instance of :class:`.http.request.Response`.
See :meth:`WebClient.session` for proper usage of this function.
Coroutine.
| 9.542941 | 2.108135 | 4.526722 |
'''Handle the response and update the internal state.'''
_logger.debug('Handling response')
self._redirect_tracker.load(response)
if self._redirect_tracker.is_redirect():
self._process_redirect()
self._loop_type = LoopType.redirect
elif response.status_code == http.client.UNAUTHORIZED and self._next_request.password:
self._process_authentication(response)
else:
self._next_request = None
self._loop_type = LoopType.normal
if self._cookie_jar:
self._extract_cookies(response)
if self._next_request:
self._add_cookies(self._next_request)
|
def _process_response(self, response: Response)
|
Handle the response and update the internal state.
| 4.585319 | 4.024821 | 1.13926 |
'''Update the Redirect Tracker.'''
_logger.debug('Handling redirect.')
if self._redirect_tracker.exceeded():
raise ProtocolError('Too many redirects.')
try:
url = self._redirect_tracker.next_location()
if not url:
raise ProtocolError('Redirect location missing.')
if self._redirect_tracker.is_repeat():
_logger.debug('Got redirect is repeat.')
request = self._original_request.copy()
request.url = url
else:
request = self._request_factory(url)
request.prepare_for_send()
except ValueError as error:
raise ProtocolError('Invalid redirect location.') from error
self._next_request = request
_logger.debug('Updated next redirect request to {0}.'.format(request))
|
def _process_redirect(self)
|
Update the Redirect Tracker.
| 4.59589 | 4.21595 | 1.09012 |
'''Return the referrer hostname.'''
referer = self._original_request.fields.get('Referer')
if referer:
return URLInfo.parse(referer).hostname
else:
return None
|
def _get_cookie_referrer_host(self)
|
Return the referrer hostname.
| 6.833744 | 5.929336 | 1.152531 |
'''Add the cookie headers to the Request.'''
self._cookie_jar.add_cookie_header(
request, self._get_cookie_referrer_host()
)
|
def _add_cookies(self, request: Request)
|
Add the cookie headers to the Request.
| 11.400079 | 7.026497 | 1.622441 |
'''Load the cookie headers from the Response.'''
self._cookie_jar.extract_cookies(
response, response.request, self._get_cookie_referrer_host()
)
|
def _extract_cookies(self, response: Response)
|
Load the cookie headers from the Response.
| 12.719192 | 7.113799 | 1.78796 |
'''Return a fetch session.
Args:
request: The request to be fetched.
Example usage::
client = WebClient()
session = client.session(Request('http://www.example.com'))
with session:
while not session.done():
request = session.next_request()
print(request)
response = yield from session.start()
print(response)
if session.done():
with open('myfile.html') as file:
yield from session.download(file)
else:
yield from session.download()
Returns:
WebSession
'''
return WebSession(
request,
http_client=self._http_client,
redirect_tracker=self._redirect_tracker_factory(),
request_factory=self._request_factory,
cookie_jar=self._cookie_jar,
)
|
def session(self, request: Request) -> WebSession
|
Return a fetch session.
Args:
request: The request to be fetched.
Example usage::
client = WebClient()
session = client.session(Request('http://www.example.com'))
with session:
while not session.done():
request = session.next_request()
print(request)
response = yield from session.start()
print(response)
if session.done():
with open('myfile.html') as file:
yield from session.download(file)
else:
yield from session.download()
Returns:
WebSession
| 3.944551 | 1.77149 | 2.226686 |
'''Return URL string with the path replaced with directory only.'''
dir_name = posixpath.dirname(url_info.path)
if not dir_name.endswith('/'):
url_template = 'ftp://{}{}/'
else:
url_template = 'ftp://{}{}'
return url_template.format(url_info.hostname_with_port, dir_name)
|
def to_dir_path_url(url_info: URLInfo) -> str
|
Return URL string with the path replaced with directory only.
| 4.757778 | 3.319978 | 1.433075 |
'''Process.
Coroutine.
'''
self._item_session.request = request = Request(self._item_session.url_record.url)
verdict = self._fetch_rule.check_ftp_request(self._item_session)[0]
if not verdict:
self._item_session.skip()
return
self._add_request_password(request)
dir_name, filename = self._item_session.url_record.url_info.split_path()
if self._processor.fetch_params.glob and frozenset(filename) & GLOB_CHARS:
request = self._to_directory_request(request)
is_file = False
self._glob_pattern = urllib.parse.unquote(filename)
else:
is_file = yield from self._prepare_request_file_vs_dir(request)
self._file_writer_session.process_request(request)
wait_time = yield from self._fetch(request, is_file)
if wait_time:
_logger.debug('Sleeping {0}.', wait_time)
yield from asyncio.sleep(wait_time)
|
def process(self)
|
Process.
Coroutine.
| 6.781769 | 6.47108 | 1.048012 |
'''Check if file, modify request, and return whether is a file.
Coroutine.
'''
if self._item_session.url_record.link_type:
is_file = self._item_session.url_record.link_type == LinkType.file
elif request.url_info.path.endswith('/'):
is_file = False
else:
is_file = 'unknown'
if is_file == 'unknown':
files = yield from self._fetch_parent_path(request)
if not files:
return True
filename = posixpath.basename(request.file_path)
for file_entry in files:
if file_entry.name == filename:
_logger.debug('Found entry in parent. Type {}',
file_entry.type)
is_file = file_entry.type != 'dir'
break
else:
_logger.debug('Did not find entry. Assume file.')
return True
if not is_file:
request.url = append_slash_to_path_url(request.url_info)
_logger.debug('Request URL changed to {}. Path={}.',
request.url, request.file_path)
return is_file
|
def _prepare_request_file_vs_dir(self, request: Request) -> bool
|
Check if file, modify request, and return whether is a file.
Coroutine.
| 4.852974 | 3.977931 | 1.219975 |
'''Fetch parent directory and return list FileEntry.
Coroutine.
'''
directory_url = to_dir_path_url(request.url_info)
if use_cache:
if directory_url in self._processor.listing_cache:
return self._processor.listing_cache[directory_url]
directory_request = copy.deepcopy(request)
directory_request.url = directory_url
_logger.debug('Check if URL {} is file with {}.', request.url,
directory_url)
with self._processor.ftp_client.session() as session:
try:
yield from session.start_listing(directory_request)
except FTPServerError:
_logger.debug('Got an error. Assume is file.')
if use_cache:
self._processor.listing_cache[directory_url] = None
return
temp_file = tempfile.NamedTemporaryFile(
dir=self._item_session.app_session.root_path,
prefix='tmp-wpull-list'
)
with temp_file as file:
directory_response = yield from session.download_listing(
file, duration_timeout=self._fetch_rule.duration_timeout)
if use_cache:
self._processor.listing_cache[directory_url] = \
directory_response.files
return directory_response.files
|
def _fetch_parent_path(self, request: Request, use_cache: bool=True)
|
Fetch parent directory and return list FileEntry.
Coroutine.
| 5.248778 | 4.527809 | 1.159231 |
'''Fetch the request
Coroutine.
'''
_logger.info(_('Fetching ‘{url}’.'), url=request.url)
self._item_session.request = request
response = None
try:
with self._processor.ftp_client.session() as session:
if is_file:
response = yield from session.start(request)
else:
response = yield from session.start_listing(request)
self._item_session.response = response
action = self._result_rule.handle_pre_response(
self._item_session
)
if action in (Actions.RETRY, Actions.FINISH):
raise HookPreResponseBreak()
self._file_writer_session.process_response(response)
if not response.body:
response.body = Body(
directory=self._item_session.app_session.root_path,
hint='resp_cb')
duration_timeout = self._fetch_rule.duration_timeout
if is_file:
yield from session.download(
response.body, duration_timeout=duration_timeout)
else:
yield from session.download_listing(
response.body, duration_timeout=duration_timeout)
except HookPreResponseBreak:
if response:
response.body.close()
except REMOTE_ERRORS as error:
self._log_error(request, error)
self._result_rule.handle_error(self._item_session, error)
wait_time = self._result_rule.get_wait_time(
self._item_session, error=error
)
if response:
response.body.close()
return wait_time
else:
self._log_response(request, response)
self._handle_response(request, response)
wait_time = self._result_rule.get_wait_time(
self._item_session
)
if is_file and \
self._processor.fetch_params.preserve_permissions and \
hasattr(response.body, 'name'):
yield from self._apply_unix_permissions(request, response)
response.body.close()
return wait_time
|
def _fetch(self, request: Request, is_file: bool)
|
Fetch the request
Coroutine.
| 4.272513 | 4.059051 | 1.052589 |
'''Add links from file listing response.'''
base_url = response.request.url_info.url
if self._glob_pattern:
level = self._item_session.url_record.level
else:
level = None
for file_entry in response.files:
if self._glob_pattern and \
not fnmatch.fnmatchcase(file_entry.name, self._glob_pattern):
continue
if file_entry.type == 'dir':
linked_url = urljoin_safe(base_url, file_entry.name + '/')
elif file_entry.type in ('file', 'symlink', None):
if not self._processor.fetch_params.retr_symlinks and \
file_entry.type == 'symlink':
self._make_symlink(file_entry.name, file_entry.dest)
linked_url = None
else:
linked_url = urljoin_safe(base_url, file_entry.name)
else:
linked_url = None
if linked_url:
linked_url_info = parse_url_or_log(linked_url)
if linked_url_info:
verdict = self._fetch_rule.check_ftp_request(self._item_session)[0]
if verdict:
if linked_url_info.path.endswith('/'):
self._item_session.add_child_url(linked_url_info.url, link_type=LinkType.directory)
else:
self._item_session.add_child_url(linked_url_info.url, link_type=LinkType.file, level=level)
|
def _add_listing_links(self, response: ListingResponse)
|
Add links from file listing response.
| 3.572457 | 3.355216 | 1.064747 |
'''Log response.'''
_logger.info(
_('Fetched ‘{url}’: {reply_code} {reply_text}. '
'Length: {content_length}.'),
url=request.url,
reply_code=response.reply.code,
reply_text=response.reply.text,
content_length=response.body.size(),
)
|
def _log_response(self, request: Request, response: Response)
|
Log response.
| 5.129367 | 4.603688 | 1.114187 |
'''Process a response.'''
self._item_session.update_record_value(status_code=response.reply.code)
is_listing = isinstance(response, ListingResponse)
if is_listing and not self._processor.fetch_params.remove_listing or \
not is_listing:
filename = self._file_writer_session.save_document(response)
action = self._result_rule.handle_document(self._item_session, filename)
else:
self._file_writer_session.discard_document(response)
action = self._result_rule.handle_no_document(self._item_session)
if isinstance(response, ListingResponse):
self._add_listing_links(response)
return action
|
def _handle_response(self, request: Request, response: Response)
|
Process a response.
| 6.506195 | 6.201765 | 1.049088 |
'''Make a symlink on the system.'''
path = self._file_writer_session.extra_resource_path('dummy')
if path:
dir_path = os.path.dirname(path)
symlink_path = os.path.join(dir_path, link_name)
_logger.debug('symlink {} -> {}', symlink_path, link_target)
os.symlink(link_target, symlink_path)
_logger.info(
_('Created symbolic link {symlink_path} to target {symlink_target}.'),
symlink_path=symlink_path,
symlink_target=link_target
)
|
def _make_symlink(self, link_name: str, link_target: str)
|
Make a symlink on the system.
| 4.072078 | 3.754415 | 1.084611 |
'''Fetch and apply Unix permissions.
Coroutine.
'''
files = yield from self._fetch_parent_path(request)
if not files:
return
filename = posixpath.basename(request.file_path)
for file_entry in files:
if file_entry.name == filename and file_entry.perm:
_logger.debug(
'Set chmod {} o{:o}.',
response.body.name, file_entry.perm
)
os.chmod(response.body.name, file_entry.perm)
|
def _apply_unix_permissions(self, request: Request, response: Response)
|
Fetch and apply Unix permissions.
Coroutine.
| 5.992356 | 4.742076 | 1.263657 |
'''Build URL rewriter if needed.'''
if session.args.escaped_fragment or session.args.strip_session_id:
return session.factory.new(
'URLRewriter',
hash_fragment=session.args.escaped_fragment,
session_id=session.args.strip_session_id
)
|
def _build_url_rewriter(cls, session: AppSession)
|
Build URL rewriter if needed.
| 8.339835 | 6.602797 | 1.263076 |
'''Create the URL filter instances.
Returns:
A list of URL filter instances
'''
args = session.args
filters = [
HTTPSOnlyFilter() if args.https_only else SchemeFilter(),
RecursiveFilter(
enabled=args.recursive, page_requisites=args.page_requisites
),
FollowFTPFilter(follow=args.follow_ftp),
]
if args.no_parent:
filters.append(ParentFilter())
if args.domains or args.exclude_domains:
filters.append(
BackwardDomainFilter(args.domains, args.exclude_domains)
)
if args.hostnames or args.exclude_hostnames:
filters.append(
HostnameFilter(args.hostnames, args.exclude_hostnames)
)
if args.tries:
filters.append(TriesFilter(args.tries))
if args.level and args.recursive or args.page_requisites_level:
filters.append(
LevelFilter(args.level,
inline_max_depth=args.page_requisites_level)
)
if args.accept_regex or args.reject_regex:
filters.append(RegexFilter(args.accept_regex, args.reject_regex))
if args.include_directories or args.exclude_directories:
filters.append(
DirectoryFilter(
args.include_directories, args.exclude_directories
)
)
if args.accept or args.reject:
filters.append(BackwardFilenameFilter(args.accept, args.reject))
return filters
|
def _build_url_filters(cls, session: AppSession)
|
Create the URL filter instances.
Returns:
A list of URL filter instances
| 3.10613 | 2.88805 | 1.075511 |
'''Build the Document Converter.'''
if not session.args.convert_links:
return
converter = session.factory.new(
'BatchDocumentConverter',
session.factory['HTMLParser'],
session.factory['ElementWalker'],
session.factory['URLTable'],
backup=session.args.backup_converted
)
return converter
|
def _build_document_converter(cls, session: AppSession)
|
Build the Document Converter.
| 14.508173 | 12.952223 | 1.12013 |
'''Set up the root logger if needed.
The root logger is set the appropriate level so the file and WARC logs
work correctly.
'''
assert (
logging.CRITICAL >
logging.ERROR >
logging.WARNING >
logging.INFO >
logging.DEBUG >
logging.NOTSET
)
assert (
LOG_VERY_QUIET >
LOG_QUIET >
LOG_NO_VERBOSE >
LOG_VERBOSE >
LOG_DEBUG
)
assert args.verbosity
root_logger = logging.getLogger()
current_level = root_logger.getEffectiveLevel()
min_level = LOG_VERY_QUIET
if args.verbosity == LOG_QUIET:
min_level = logging.ERROR
if args.verbosity in (LOG_NO_VERBOSE, LOG_VERBOSE) \
or args.warc_file \
or args.output_file or args.append_output:
min_level = logging.INFO
if args.verbosity == LOG_DEBUG:
min_level = logging.DEBUG
if current_level > min_level:
root_logger.setLevel(min_level)
root_logger.debug(
'Wpull needs the root logger level set to {0}.'
.format(min_level)
)
if current_level <= logging.INFO:
logging.captureWarnings(True)
|
def _setup_logging(cls, args)
|
Set up the root logger if needed.
The root logger is set the appropriate level so the file and WARC logs
work correctly.
| 3.768914 | 2.865078 | 1.315466 |
'''Set up the console logger.
A handler and with a formatter is added to the root logger.
'''
stream = new_encoded_stream(args, stderr)
logger = logging.getLogger()
session.console_log_handler = handler = logging.StreamHandler(stream)
formatter = logging.Formatter('%(levelname)s %(message)s')
log_filter = logging.Filter('wpull')
handler.setFormatter(formatter)
handler.setLevel(args.verbosity or logging.INFO)
handler.addFilter(log_filter)
logger.addHandler(handler)
|
def _setup_console_logger(cls, session: AppSession, args, stderr)
|
Set up the console logger.
A handler and with a formatter is added to the root logger.
| 4.468826 | 3.326422 | 1.343433 |
'''Set up the file message logger.
A file log handler and with a formatter is added to the root logger.
'''
if not (args.output_file or args.append_output):
return
logger = logging.getLogger()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if args.output_file:
filename = args.output_file
mode = 'w'
else:
filename = args.append_output
mode = 'a'
session.file_log_handler = handler = logging.FileHandler(
filename, mode, encoding='utf-8')
handler.setFormatter(formatter)
logger.addHandler(handler)
if args.verbosity == logging.DEBUG:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
|
def _setup_file_logger(cls, session: AppSession, args)
|
Set up the file message logger.
A file log handler and with a formatter is added to the root logger.
| 2.614063 | 1.982304 | 1.318699 |
'''Process PhantomJS.
Coroutine.
'''
if response.status_code != 200:
return
if not HTMLReader.is_supported(request=request, response=response):
return
_logger.debug('Starting PhantomJS processing.')
self._file_writer_session = file_writer_session
# FIXME: this is a quick hack for crashes. See #137.
attempts = int(os.environ.get('WPULL_PHANTOMJS_TRIES', 5))
for dummy in range(attempts):
try:
yield from self._run_driver(item_session, request, response)
except asyncio.TimeoutError:
_logger.warning(_('Waiting for page load timed out.'))
break
except PhantomJSCrashed as error:
_logger.exception(__('PhantomJS crashed: {}', error))
else:
break
else:
_logger.warning(__(
_('PhantomJS failed to fetch ‘{url}’. I am sorry.'),
url=request.url_info.url
))
|
def process(self, item_session: ItemSession, request, response, file_writer_session)
|
Process PhantomJS.
Coroutine.
| 5.718666 | 5.065488 | 1.128947 |
'''Start PhantomJS processing.'''
_logger.debug('Started PhantomJS processing.')
session = PhantomJSCoprocessorSession(
self._phantomjs_driver_factory, self._root_path,
self._processing_rule, self._file_writer_session,
request, response,
item_session, self._phantomjs_params, self._warc_recorder
)
with contextlib.closing(session):
yield from session.run()
_logger.debug('Ended PhantomJS processing.')
|
def _run_driver(self, item_session: ItemSession, request, response)
|
Start PhantomJS processing.
| 7.709269 | 6.487476 | 1.188331 |
'''Add the action log to the WARC file.'''
_logger.debug('Adding action log record.')
actions = []
with open(path, 'r', encoding='utf-8', errors='replace') as file:
for line in file:
actions.append(json.loads(line))
log_data = json.dumps(
{'actions': actions},
indent=4,
).encode('utf-8')
self._action_warc_record = record = WARCRecord()
record.set_common_fields('metadata', 'application/json')
record.fields['WARC-Target-URI'] = 'urn:X-wpull:snapshot?url={0}' \
.format(wpull.url.percent_encode_query_value(url))
record.block_file = io.BytesIO(log_data)
self._warc_recorder.set_length_and_maybe_checksums(record)
self._warc_recorder.write_record(record)
|
def _add_warc_action_log(self, path, url)
|
Add the action log to the WARC file.
| 4.890107 | 4.809161 | 1.016832 |
'''Add the snaphot to the WARC file.'''
_logger.debug('Adding snapshot record.')
extension = os.path.splitext(filename)[1]
content_type = {
'.pdf': 'application/pdf',
'.html': 'text/html',
'.png': 'image/png',
'.gif': 'image/gif'
}[extension]
record = WARCRecord()
record.set_common_fields('resource', content_type)
record.fields['WARC-Target-URI'] = 'urn:X-wpull:snapshot?url={0}' \
.format(wpull.url.percent_encode_query_value(url))
if self._action_warc_record:
record.fields['WARC-Concurrent-To'] = \
self._action_warc_record.fields[WARCRecord.WARC_RECORD_ID]
with open(filename, 'rb') as in_file:
record.block_file = in_file
self._warc_recorder.set_length_and_maybe_checksums(record)
self._warc_recorder.write_record(record)
|
def _add_warc_snapshot(self, filename, url)
|
Add the snaphot to the WARC file.
| 4.324939 | 4.128391 | 1.047609 |
'''Extract links from the DOM.'''
mock_response = self._new_mock_response(
self._response, self._get_temp_path('phantom', '.html')
)
self._item_session.request = self._request
self._item_session.response = mock_response
self._processing_rule.scrape_document(item_session)
if mock_response.body:
mock_response.body.close()
|
def _scrape_document(self)
|
Extract links from the DOM.
| 9.5626 | 8.380347 | 1.141074 |
'''Return a new mock Response with the content.'''
mock_response = copy.copy(response)
mock_response.body = Body(open(file_path, 'rb'))
mock_response.fields = NameValueRecord()
for name, value in response.fields.get_all():
mock_response.fields.add(name, value)
mock_response.fields['Content-Type'] = 'text/html; charset="utf-8"'
return mock_response
|
def _new_mock_response(self, response, file_path)
|
Return a new mock Response with the content.
| 4.392982 | 3.768934 | 1.165577 |
'''Clean up.'''
for path in self._temp_filenames:
if os.path.exists(path):
os.remove(path)
|
def close(self)
|
Clean up.
| 5.875703 | 5.152932 | 1.140264 |
'''Create the SSL options.
The options must be accepted by the `ssl` module.
'''
args = session.args
# Logic is based on tornado.netutil.ssl_options_to_context
ssl_context = ssl.SSLContext(args.secure_protocol)
if args.check_certificate:
ssl_context.verify_mode = ssl.CERT_REQUIRED
cls._load_ca_certs(session)
ssl_context.load_verify_locations(session.ca_certs_filename)
else:
ssl_context.verify_mode = ssl.CERT_NONE
if args.strong_crypto:
ssl_context.options |= ssl.OP_NO_SSLv2
ssl_context.options |= ssl.OP_NO_SSLv3 # POODLE
if hasattr(ssl, 'OP_NO_COMPRESSION'):
ssl_context.options |= ssl.OP_NO_COMPRESSION # CRIME
else:
_logger.warning(_('Unable to disable TLS compression.'))
if args.certificate:
ssl_context.load_cert_chain(args.certificate, args.private_key)
if args.edg_file:
ssl.RAND_egd(args.edg_file)
if args.random_file:
with open(args.random_file, 'rb') as in_file:
# Use 16KB because Wget
ssl.RAND_add(in_file.read(15360), 0.0)
return ssl_context
|
def _build_ssl_context(cls, session: AppSession) -> ssl.SSLContext
|
Create the SSL options.
The options must be accepted by the `ssl` module.
| 3.560763 | 3.206261 | 1.110566 |
'''Load the Certificate Authority certificates.
'''
args = session.args
if session.ca_certs_filename:
return session.ca_certs_filename
certs = set()
if args.use_internal_ca_certs:
pem_filename = os.path.join(
os.path.dirname(__file__), '..', '..', 'cert', 'ca-bundle.pem'
)
certs.update(cls._read_pem_file(pem_filename, from_package=True))
if args.ca_directory:
if os.path.isdir(args.ca_directory):
for filename in os.listdir(args.ca_directory):
if os.path.isfile(filename):
certs.update(cls._read_pem_file(filename))
else:
_logger.warning(__(
_('Certificate directory {path} does not exist.'),
path=args.ca_directory
))
if args.ca_certificate:
if os.path.isfile(args.ca_certificate):
certs.update(cls._read_pem_file(args.ca_certificate))
else:
_logger.warning(__(
_('Certificate file {path} does not exist.'),
path=args.ca_certificate
))
session.ca_certs_filename = certs_filename = tempfile.mkstemp(
suffix='.pem', prefix='tmp-wpull-')[1]
def clean_certs_file():
os.remove(certs_filename)
if clean:
atexit.register(clean_certs_file)
with open(certs_filename, 'w+b') as certs_file:
for cert in certs:
certs_file.write(cert)
_logger.debug('CA certs loaded.')
|
def _load_ca_certs(cls, session: AppSession, clean: bool=True)
|
Load the Certificate Authority certificates.
| 2.480714 | 2.412163 | 1.028419 |
'''Read the PEM file.
Returns:
iterable: An iterable of certificates. The certificate data
is :class:`byte`.
'''
_logger.debug('Reading PEM {0}.'.format(filename))
if from_package:
return wpull.util.filter_pem(wpull.util.get_package_data(filename))
with open(filename, 'rb') as in_file:
return wpull.util.filter_pem(in_file.read())
|
def _read_pem_file(cls, filename, from_package=False)
|
Read the PEM file.
Returns:
iterable: An iterable of certificates. The certificate data
is :class:`byte`.
| 4.804315 | 3.033884 | 1.583553 |
'''Begin a HTTP request
Args:
request: Request information.
Returns:
A response populated with the HTTP headers.
Once the headers are received, call :meth:`download`.
Coroutine.
'''
if self._session_state != SessionState.ready:
raise RuntimeError('Session already started')
assert not self._request
self._request = request
_logger.debug(__('Client fetch request {0}.', request))
connection = yield from self._acquire_request_connection(request)
full_url = connection.proxied and not connection.tunneled
self._stream = stream = self._stream_factory(connection)
yield from self._stream.reconnect()
request.address = connection.address
self.event_dispatcher.notify(self.Event.begin_request, request)
write_callback = functools.partial(self.event_dispatcher.notify, self.Event.request_data)
stream.data_event_dispatcher.add_write_listener(write_callback)
yield from stream.write_request(request, full_url=full_url)
if request.body:
assert 'Content-Length' in request.fields
length = int(request.fields['Content-Length'])
yield from stream.write_body(request.body, length=length)
stream.data_event_dispatcher.remove_write_listener(write_callback)
self.event_dispatcher.notify(self.Event.end_request, request)
read_callback = functools.partial(self.event_dispatcher.notify, self.Event.response_data)
stream.data_event_dispatcher.add_read_listener(read_callback)
self._response = response = yield from stream.read_response()
response.request = request
self.event_dispatcher.notify(self.Event.begin_response, response)
self._session_state = SessionState.request_sent
return response
|
def start(self, request: Request) -> Response
|
Begin a HTTP request
Args:
request: Request information.
Returns:
A response populated with the HTTP headers.
Once the headers are received, call :meth:`download`.
Coroutine.
| 3.746407 | 3.020566 | 1.2403 |
'''Read the response content into file.
Args:
file: A file object or asyncio stream.
raw: Whether chunked transfer encoding should be included.
rewind: Seek the given file back to its original offset after
reading is finished.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Be sure to call :meth:`start` first.
Coroutine.
'''
if self._session_state != SessionState.request_sent:
raise RuntimeError('Request not sent')
if rewind and file and hasattr(file, 'seek'):
original_offset = file.tell()
else:
original_offset = None
if not hasattr(file, 'drain'):
self._response.body = file
if not isinstance(file, Body):
self._response.body = Body(file)
read_future = self._stream.read_body(self._request, self._response, file=file, raw=raw)
try:
yield from asyncio.wait_for(read_future, timeout=duration_timeout)
except asyncio.TimeoutError as error:
raise DurationTimeout(
'Did not finish reading after {} seconds.'
.format(duration_timeout)
) from error
self._session_state = SessionState.response_received
if original_offset is not None:
file.seek(original_offset)
self.event_dispatcher.notify(self.Event.end_response, self._response)
self.recycle()
|
def download(
self,
file: Union[IO[bytes], asyncio.StreamWriter, None]=None,
raw: bool=False, rewind: bool=True,
duration_timeout: Optional[float]=None)
|
Read the response content into file.
Args:
file: A file object or asyncio stream.
raw: Whether chunked transfer encoding should be included.
rewind: Seek the given file back to its original offset after
reading is finished.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Be sure to call :meth:`start` first.
Coroutine.
| 4.407763 | 2.644009 | 1.667076 |
'''Convert an instance recursively to bytes.'''
if isinstance(instance, bytes):
return instance
elif hasattr(instance, 'encode'):
return instance.encode(encoding, error)
elif isinstance(instance, list):
return list([to_bytes(item, encoding, error) for item in instance])
elif isinstance(instance, tuple):
return tuple([to_bytes(item, encoding, error) for item in instance])
elif isinstance(instance, dict):
return dict(
[(to_bytes(key, encoding, error), to_bytes(value, encoding, error))
for key, value in instance.items()])
else:
return instance
|
def to_bytes(instance, encoding='utf-8', error='strict')
|
Convert an instance recursively to bytes.
| 1.715087 | 1.677815 | 1.022214 |
'''Convert an instance recursively to string.'''
if isinstance(instance, str):
return instance
elif hasattr(instance, 'decode'):
return instance.decode(encoding)
elif isinstance(instance, list):
return list([to_str(item, encoding) for item in instance])
elif isinstance(instance, tuple):
return tuple([to_str(item, encoding) for item in instance])
elif isinstance(instance, dict):
return dict(
[(to_str(key, encoding), to_str(value, encoding))
for key, value in instance.items()])
else:
return instance
|
def to_str(instance, encoding='utf-8')
|
Convert an instance recursively to string.
| 1.846904 | 1.792077 | 1.030594 |
'''Return the Python name of the encoder/decoder
Returns:
str, None
'''
name = UnicodeDammit.CHARSET_ALIASES.get(name.lower(), name)
try:
return codecs.lookup(name).name
except (LookupError, TypeError, ValueError):
# TypeError occurs when name contains \x00 (ValueError in Py3.5)
pass
|
def normalize_codec_name(name)
|
Return the Python name of the encoder/decoder
Returns:
str, None
| 7.784446 | 5.23848 | 1.486012 |
'''Detect the character encoding of the data.
Returns:
str: The name of the codec
Raises:
ValueError: The codec could not be detected. This error can only
occur if fallback is not a "lossless" codec.
'''
if encoding:
encoding = normalize_codec_name(encoding)
bs4_detector = EncodingDetector(
data,
override_encodings=(encoding,) if encoding else (),
is_html=is_html
)
candidates = itertools.chain(bs4_detector.encodings, (fallback,))
for candidate in candidates:
if not candidate:
continue
candidate = normalize_codec_name(candidate)
if not candidate:
continue
if candidate == 'ascii' and fallback != 'ascii':
# it's never ascii :)
# Falling back on UTF-8/CP-1252/Latin-1 reduces chance of
# failure
continue
if try_decoding(data, candidate):
return candidate
raise ValueError('Unable to detect encoding.')
|
def detect_encoding(data, encoding=None, fallback='latin1', is_html=False)
|
Detect the character encoding of the data.
Returns:
str: The name of the codec
Raises:
ValueError: The codec could not be detected. This error can only
occur if fallback is not a "lossless" codec.
| 5.268528 | 3.848488 | 1.368986 |
'''Return whether the Python codec could decode the data.'''
try:
data.decode(encoding, 'strict')
except UnicodeError:
# Data under 16 bytes is very unlikely to be truncated
if len(data) > 16:
for trim in (1, 2, 3):
trimmed_data = data[:-trim]
if trimmed_data:
try:
trimmed_data.decode(encoding, 'strict')
except UnicodeError:
continue
else:
return True
return False
else:
return True
|
def try_decoding(data, encoding)
|
Return whether the Python codec could decode the data.
| 3.896964 | 3.215368 | 1.211981 |
'''Format the file size into a human readable text.
http://stackoverflow.com/a/1094933/1524507
'''
for unit in ('B', 'KiB', 'MiB', 'GiB'):
if -1024 < num < 1024:
return format_str.format(num=num, unit=unit)
num /= 1024.0
return format_str.format(num=num, unit='TiB')
|
def format_size(num, format_str='{num:.1f} {unit}')
|
Format the file size into a human readable text.
http://stackoverflow.com/a/1094933/1524507
| 2.284372 | 1.664645 | 1.372287 |
'''Escape any control or non-ASCII characters from string.
This function is intended for use with strings from an untrusted
source such as writing to a console or writing to logs. It is
designed to prevent things like ANSI escape sequences from
showing.
Use :func:`repr` or :func:`ascii` instead for things such as
Exception messages.
'''
if isinstance(text, str):
new_text = ascii(text)[1:-1]
else:
new_text = ascii(text)
if keep_newlines:
new_text = new_text.replace('\\r', '\r').replace('\\n', '\n')
return new_text
|
def printable_str(text, keep_newlines=False)
|
Escape any control or non-ASCII characters from string.
This function is intended for use with strings from an untrusted
source such as writing to a console or writing to logs. It is
designed to prevent things like ANSI escape sequences from
showing.
Use :func:`repr` or :func:`ascii` instead for things such as
Exception messages.
| 5.763983 | 1.748889 | 3.295796 |
'''Convenience function for the print function.
This function prints no newline.
'''
string = ' '.join([str(arg) for arg in args])
print(string, end='', file=self._stream)
|
def _print(self, *args)
|
Convenience function for the print function.
This function prints no newline.
| 5.995656 | 3.180701 | 1.885011 |
'''Convenience function for the print function.'''
string = ' '.join([str(arg) for arg in args])
print(string, file=self._stream)
|
def _println(self, *args)
|
Convenience function for the print function.
| 5.02705 | 3.85938 | 1.302554 |
'''Print an entire status line including bar and stats.'''
self._clear_line()
self._print(' ')
if self.max_value:
self._print_percent()
self._print(' ')
self._print_bar()
else:
self._print_throbber()
self._print(' ')
if self.measurement == Measurement.bytes:
self._print_size_downloaded()
else:
self._print(self.current_value)
self._print(' ')
self._print_duration()
self._print(' ')
if self.measurement == Measurement.bytes:
self._print_speed()
self._flush()
|
def _print_status(self)
|
Print an entire status line including bar and stats.
| 4.325681 | 3.549298 | 1.218743 |
'''Print an indefinite progress bar.'''
self._print('[')
for position in range(self._bar_width):
self._print('O' if position == self._throbber_index else ' ')
self._print(']')
self._throbber_index = next(self._throbber_iter)
|
def _print_throbber(self)
|
Print an indefinite progress bar.
| 4.977401 | 4.73222 | 1.051811 |
'''Print a progress bar.'''
self._print('[')
for position in range(self._bar_width):
position_fraction = position / (self._bar_width - 1)
position_bytes = position_fraction * self.max_value
if position_bytes < (self.continue_value or 0):
self._print('+')
elif position_bytes <= (self.continue_value or 0) + self.current_value:
self._print('=')
else:
self._print(' ')
self._print(']')
|
def _print_bar(self)
|
Print a progress bar.
| 3.501693 | 3.624364 | 0.966154 |
'''Print the elapsed download time.'''
duration = int(time.time() - self._start_time)
self._print(datetime.timedelta(seconds=duration))
|
def _print_duration(self)
|
Print the elapsed download time.
| 6.63668 | 4.474001 | 1.483388 |
'''Print the current speed.'''
if self._bandwidth_meter.num_samples:
speed = self._bandwidth_meter.speed()
if self._human_format:
file_size_str = wpull.string.format_size(speed)
else:
file_size_str = '{:.1f} b'.format(speed * 8)
speed_str = _('{preformatted_file_size}/s').format(
preformatted_file_size=file_size_str
)
else:
speed_str = _('-- B/s')
self._print(speed_str)
|
def _print_speed(self)
|
Print the current speed.
| 5.260028 | 5.310452 | 0.990505 |
'''Print how much is done in percentage.'''
fraction_done = ((self.continue_value or 0 + self.current_value) /
self.max_value)
self._print('{fraction_done:.1%}'.format(fraction_done=fraction_done))
|
def _print_percent(self)
|
Print how much is done in percentage.
| 8.174711 | 6.509369 | 1.255838 |
'''Register hooks that can be connected.'''
if name in self._callbacks:
raise ValueError('Hook already registered')
self._callbacks[name] = None
if self._event_dispatcher is not None:
self._event_dispatcher.register(name)
|
def register(self, name: str)
|
Register hooks that can be connected.
| 5.950173 | 3.973991 | 1.497279 |
'''Unregister hook.'''
del self._callbacks[name]
if self._event_dispatcher is not None:
self._event_dispatcher.unregister(name)
|
def unregister(self, name: str)
|
Unregister hook.
| 7.525638 | 6.238605 | 1.206301 |
'''Add callback to hook.'''
if not self._callbacks[name]:
self._callbacks[name] = callback
else:
raise HookAlreadyConnectedError('Callback hook already connected.')
|
def connect(self, name, callback)
|
Add callback to hook.
| 7.454328 | 5.7646 | 1.293122 |
'''Invoke the callback.'''
if self._event_dispatcher is not None:
self._event_dispatcher.notify(name, *args, **kwargs)
if self._callbacks[name]:
return self._callbacks[name](*args, **kwargs)
else:
raise HookDisconnected('No callback is connected.')
|
def call(self, name: str, *args, **kwargs)
|
Invoke the callback.
| 5.632816 | 5.190449 | 1.085227 |
if PY_MAJOR_VERSION < 3:
# The robot rules are stored internally as Unicode. The two lines
# below ensure that the parameters passed to this function are
# also Unicode. If those lines were not present and the caller
# passed a non-Unicode user agent or URL string to this function,
# Python would silently convert it to Unicode before comparing it
# to the robot rules. Such conversions use the default encoding
# (usually US-ASCII) and if the string couldn't be converted using
# that encoding, Python would raise a UnicodeError later on in the
# guts of this code which would be confusing.
# Converting the strings to Unicode here doesn't make the problem
# go away but it does make the conversion explicit so that
# failures are easier to understand.
if not isinstance(user_agent, unicode):
user_agent = user_agent.decode()
if not isinstance(url, unicode):
url = url.decode()
if syntax not in (MK1996, GYM2008):
_raise_error(ValueError, "Syntax must be MK1996 or GYM2008")
for ruleset in self.__rulesets:
if ruleset.does_user_agent_match(user_agent):
return ruleset.is_url_allowed(url, syntax)
return True
|
def is_allowed(self, user_agent, url, syntax=GYM2008)
|
True if the user agent is permitted to visit the URL. The syntax
parameter can be GYM2008 (the default) or MK1996 for strict adherence
to the traditional standard.
| 5.651988 | 5.321328 | 1.062139 |
# See is_allowed() comment about the explicit unicode conversion.
if (PY_MAJOR_VERSION < 3) and (not isinstance(user_agent, unicode)):
user_agent = user_agent.decode()
for ruleset in self.__rulesets:
if ruleset.does_user_agent_match(user_agent):
return ruleset.crawl_delay
return None
|
def get_crawl_delay(self, user_agent)
|
Returns a float representing the crawl delay specified for this
user agent, or None if the crawl delay was unspecified or not a float.
| 5.615258 | 5.014761 | 1.119746 |
'''Decorator to close stream on error.'''
@asyncio.coroutine
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
with wpull.util.close_on_error(self.close):
return (yield from func(self, *args, **kwargs))
return wrapper
|
def close_stream_on_error(func)
|
Decorator to close stream on error.
| 3.930496 | 3.8134 | 1.030706 |
'''Return a filename from a URL.
Args:
url (str): The URL.
index (str): If a filename could not be derived from the URL path,
use index instead. For example, ``/images/`` will return
``index.html``.
alt_char (bool): If True, the character for the query deliminator
will be ``@`` intead of ``?``.
This function does not include the directories and does not sanitize
the filename.
Returns:
str
'''
assert isinstance(url, str), 'Expect str. Got {}.'.format(type(url))
url_split_result = urllib.parse.urlsplit(url)
filename = url_split_result.path.split('/')[-1]
if not filename:
filename = index
if url_split_result.query:
if alt_char:
query_delim = '@'
else:
query_delim = '?'
filename = '{0}{1}{2}'.format(
filename, query_delim, url_split_result.query
)
return filename
|
def url_to_filename(url, index='index.html', alt_char=False)
|
Return a filename from a URL.
Args:
url (str): The URL.
index (str): If a filename could not be derived from the URL path,
use index instead. For example, ``/images/`` will return
``index.html``.
alt_char (bool): If True, the character for the query deliminator
will be ``@`` intead of ``?``.
This function does not include the directories and does not sanitize
the filename.
Returns:
str
| 3.594404 | 1.714337 | 2.096673 |
'''Return a list of directory parts from a URL.
Args:
url (str): The URL.
include_protocol (bool): If True, the scheme from the URL will be
included.
include_hostname (bool): If True, the hostname from the URL will be
included.
alt_char (bool): If True, the character for the port deliminator
will be ``+`` intead of ``:``.
This function does not include the filename and the paths are not
sanitized.
Returns:
list
'''
assert isinstance(url, str), 'Expect str. Got {}.'.format(type(url))
url_split_result = urllib.parse.urlsplit(url)
parts = []
if include_protocol:
parts.append(url_split_result.scheme)
if include_hostname:
hostname = url_split_result.hostname
if url_split_result.port:
if alt_char:
port_delim = '+'
else:
port_delim = ':'
hostname = '{0}{1}{2}'.format(
hostname, port_delim, url_split_result.port
)
parts.append(hostname)
for path_part in url_split_result.path.split('/'):
if path_part:
parts.append(path_part)
if not url.endswith('/') and parts:
parts.pop()
return parts
|
def url_to_dir_parts(url, include_protocol=False, include_hostname=False,
alt_char=False)
|
Return a list of directory parts from a URL.
Args:
url (str): The URL.
include_protocol (bool): If True, the scheme from the URL will be
included.
include_hostname (bool): If True, the hostname from the URL will be
included.
alt_char (bool): If True, the character for the port deliminator
will be ``+`` intead of ``:``.
This function does not include the filename and the paths are not
sanitized.
Returns:
list
| 2.563679 | 1.655615 | 1.548475 |
'''Return a safe filename or path part.
Args:
filename (str): The filename or path component.
os_type (str): If ``unix``, escape the slash. If ``windows``, escape
extra Windows characters.
no_control (bool): If True, escape control characters.
ascii_only (bool): If True, escape non-ASCII characters.
case (str): If ``lower``, lowercase the string. If ``upper``, uppercase
the string.
encoding (str): The character encoding.
max_length (int): The maximum length of the filename.
This function assumes that `filename` has not already been percent-encoded.
Returns:
str
'''
assert isinstance(filename, str), \
'Expect str. Got {}.'.format(type(filename))
if filename in ('.', os.curdir):
new_filename = '%2E'
elif filename in ('.', os.pardir):
new_filename = '%2E%2E'
else:
unix = os_type == 'unix'
windows = os_type == 'windows'
encoder_args = (unix, no_control, windows, ascii_only)
if encoder_args not in _encoder_cache:
_encoder_cache[encoder_args] = PercentEncoder(
unix=unix, control=no_control, windows=windows,
ascii_=ascii_only
)
encoder = _encoder_cache[encoder_args]
encoded_filename = filename.encode(encoding)
new_filename = encoder.quote(encoded_filename).decode(encoding)
if os_type == 'windows':
if new_filename[-1] in ' .':
new_filename = '{0}{1:02X}'.format(
new_filename[:-1], new_filename[-1]
)
if max_length and len(new_filename) > max_length:
hash_obj = hashlib.sha1(new_filename.encode(encoding))
new_length = max(0, max_length - 8)
new_filename = '{0}{1}'.format(
new_filename[:new_length], hash_obj.hexdigest()[:8]
)
if case == 'lower':
new_filename = new_filename.lower()
elif case == 'upper':
new_filename = new_filename.upper()
return new_filename
|
def safe_filename(filename, os_type='unix', no_control=True, ascii_only=True,
case=None, encoding='utf8', max_length=None)
|
Return a safe filename or path part.
Args:
filename (str): The filename or path component.
os_type (str): If ``unix``, escape the slash. If ``windows``, escape
extra Windows characters.
no_control (bool): If True, escape control characters.
ascii_only (bool): If True, escape non-ASCII characters.
case (str): If ``lower``, lowercase the string. If ``upper``, uppercase
the string.
encoding (str): The character encoding.
max_length (int): The maximum length of the filename.
This function assumes that `filename` has not already been percent-encoded.
Returns:
str
| 2.550309 | 1.949946 | 1.307887 |
'''Return a directory path free of filenames.
Args:
dir_path (str): A directory path.
suffix (str): The suffix to append to the part of the path that is
a file.
Returns:
str
'''
dir_path = os.path.normpath(dir_path)
parts = dir_path.split(os.sep)
for index in range(len(parts)):
test_path = os.sep.join(parts[:index + 1])
if os.path.isfile(test_path):
parts[index] += suffix
return os.sep.join(parts)
return dir_path
|
def anti_clobber_dir_path(dir_path, suffix='.d')
|
Return a directory path free of filenames.
Args:
dir_path (str): A directory path.
suffix (str): The suffix to append to the part of the path that is
a file.
Returns:
str
| 2.818325 | 1.811555 | 1.555749 |
'''Parse a Content-Disposition header value.'''
match = re.search(r'filename\s*=\s*(.+)', text, re.IGNORECASE)
if not match:
return
filename = match.group(1)
if filename[0] in '"\'':
match = re.match(r'(.)(.+)(?!\\)\1', filename)
if match:
filename = match.group(2).replace('\\"', '"')
return filename
else:
filename = filename.partition(';')[0].strip()
return filename
|
def parse_content_disposition(text)
|
Parse a Content-Disposition header value.
| 3.469827 | 3.520371 | 0.985642 |
'''Return a safe filename or file part.'''
return safe_filename(
part,
os_type=self._os_type, no_control=self._no_control,
ascii_only=self._ascii_only, case=self._case,
max_length=self._max_filename_length,
)
|
def safe_filename(self, part)
|
Return a safe filename or file part.
| 5.432997 | 4.530481 | 1.19921 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.