code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
'''Return the document encoding from a HTTP header. Args: response (Response): An instance of :class:`.http.Response`. Returns: ``str``, ``None``: The codec name. ''' encoding = wpull.protocol.http.util.parse_charset( response.fields.get('content-type', '')) if encoding: return wpull.string.normalize_codec_name(encoding) else: return None
def get_heading_encoding(response)
Return the document encoding from a HTTP header. Args: response (Response): An instance of :class:`.http.Response`. Returns: ``str``, ``None``: The codec name.
6.415457
3.211436
1.997691
'''Return the likely encoding of the response document. Args: response (Response): An instance of :class:`.http.Response`. is_html (bool): See :func:`.util.detect_encoding`. peek (int): The maximum number of bytes of the document to be analyzed. Returns: ``str``, ``None``: The codec name. ''' encoding = get_heading_encoding(response) encoding = wpull.string.detect_encoding( wpull.util.peek_file(response.body, peek), encoding=encoding, is_html=is_html ) _logger.debug(__('Got encoding: {0}', encoding)) return encoding
def detect_response_encoding(response, is_html=False, peek=131072)
Return the likely encoding of the response document. Args: response (Response): An instance of :class:`.http.Response`. is_html (bool): See :func:`.util.detect_encoding`. peek (int): The maximum number of bytes of the document to be analyzed. Returns: ``str``, ``None``: The codec name.
5.24546
2.627507
1.996364
'''Return whether the URL is in the table.''' try: self.get_one(url) except NotFound: return False else: return True
def contains(self, url: str)
Return whether the URL is in the table.
7.313952
4.493268
1.627758
'''Add a single URL to the table. Args: url: The URL to be added url_properties: Additional values to be saved url_data: Additional data to be saved ''' self.add_many([AddURLInfo(url, url_properties, url_data)])
def add_one(self, url: str, url_properties: Optional[URLProperties]=None, url_data: Optional[URLData]=None)
Add a single URL to the table. Args: url: The URL to be added url_properties: Additional values to be saved url_data: Additional data to be saved
4.162097
2.480285
1.678072
'''Update record for processed URL. Args: url: The URL. new_status: Update the item status to `new_status`. increment_try_count: Whether to increment the try counter for the URL. url_result: Additional values. '''
def check_in(self, url: str, new_status: Status, increment_try_count: bool=True, url_result: Optional[URLResult]=None)
Update record for processed URL. Args: url: The URL. new_status: Update the item status to `new_status`. increment_try_count: Whether to increment the try counter for the URL. url_result: Additional values.
7.128164
1.910034
3.731957
'''Iterate the file stream. Returns: iterator: Each item is a tuple: 1. None, regex match 2. str ''' chunk_a = None chunk_b = None chunk_a_index = 0 chunk_b_index = 0 search_start_index = 0 while True: chunk_a = chunk_b chunk_a_index = chunk_b_index chunk_b = self._file.read(self._read_size) if chunk_a is None: continue chunk_b_index = chunk_a_index + len(chunk_a) if not chunk_a: break current_chunk = chunk_a + chunk_b[:self._overlap_size] offset_end = len(chunk_a) + self._overlap_size while True: offset_start = search_start_index - chunk_a_index match = self._pattern.search( current_chunk, offset_start, offset_end) if not match: unmatched_part = chunk_a[offset_start:] if unmatched_part: yield (None, unmatched_part) search_start_index += len(unmatched_part) break start_index, end_index = match.span(match.lastindex) unmatched_part = current_chunk[offset_start:start_index] if unmatched_part: yield (None, unmatched_part) yield (match, match.group(match.lastindex)) search_start_index += len(unmatched_part) + \ len(match.group(match.lastindex))
def stream(self)
Iterate the file stream. Returns: iterator: Each item is a tuple: 1. None, regex match 2. str
2.648618
2.24295
1.180864
'''Call all the callback handlers with given arguments.''' for handler in tuple(self.handlers): handler(*args, **kwargs)
def notify(self, *args, **kwargs)
Call all the callback handlers with given arguments.
8.446522
4.635988
1.821947
'''Create an instance. Args: name (str): The name of the class args: The arguments to pass to the class. kwargs: The keyword arguments to pass to the class. Returns: instance ''' if name in self._instance_map: raise ValueError('Instance {0} is already initialized' .format(name)) instance = self._class_map[name](*args, **kwargs) self._instance_map[name] = instance return instance
def new(self, name, *args, **kwargs)
Create an instance. Args: name (str): The name of the class args: The arguments to pass to the class. kwargs: The keyword arguments to pass to the class. Returns: instance
2.65677
2.132887
1.245621
'''Return whether all the instances have been initialized. Returns: bool ''' return frozenset(self._class_map.keys()) == \ frozenset(self._instance_map.keys())
def is_all_initialized(self)
Return whether all the instances have been initialized. Returns: bool
6.049797
4.865908
1.243303
'''Normalize the key name to title case. For example, ``normalize_name('content-id')`` will become ``Content-Id`` Args: name (str): The name to normalize. overrides (set, sequence): A set or sequence containing keys that should be cased to themselves. For example, passing ``set('WARC-Type')`` will normalize any key named "warc-type" to ``WARC-Type`` instead of the default ``Warc-Type``. Returns: str ''' normalized_name = name.title() if overrides: override_map = dict([(name.title(), name) for name in overrides]) return override_map.get(normalized_name, normalized_name) else: return normalized_name
def normalize_name(name, overrides=None)
Normalize the key name to title case. For example, ``normalize_name('content-id')`` will become ``Content-Id`` Args: name (str): The name to normalize. overrides (set, sequence): A set or sequence containing keys that should be cased to themselves. For example, passing ``set('WARC-Type')`` will normalize any key named "warc-type" to ``WARC-Type`` instead of the default ``Warc-Type``. Returns: str
4.816845
1.695166
2.841519
'''Return the most likely line delimiter from the string.''' assert isinstance(string, str), 'Expect str. Got {}'.format(type(string)) crlf_count = string.count('\r\n') lf_count = string.count('\n') if crlf_count >= lf_count: return '\r\n' else: return '\n'
def guess_line_ending(string)
Return the most likely line delimiter from the string.
2.856048
2.35925
1.210575
'''Join lines that are wrapped. Any line that starts with a space or tab is joined to the previous line. ''' assert isinstance(string, str), 'Expect str. Got {}'.format(type(string)) lines = string.splitlines() line_buffer = io.StringIO() for line_number in range(len(lines)): line = lines[line_number] if line and line[0:1] in (' ', '\t'): line_buffer.write(' ') elif line_number != 0: line_buffer.write('\r\n') line_buffer.write(line.strip()) line_buffer.write('\r\n') return line_buffer.getvalue()
def unfold_lines(string)
Join lines that are wrapped. Any line that starts with a space or tab is joined to the previous line.
3.101724
2.369334
1.309112
'''Parse the string or bytes. Args: strict (bool): If True, errors will not be ignored Raises: :class:`ValueError` if the record is malformed. ''' if isinstance(string, bytes): errors = 'strict' if strict else 'replace' string = string.decode(self.encoding, errors=errors) if not self.raw: self.raw = string else: self.raw += string lines = unfold_lines(string).splitlines() for line in lines: if line: if ':' not in line: if strict: raise ValueError('Field missing colon.') else: continue name, value = line.split(':', 1) name = name.strip() value = value.strip() self.add(name, value)
def parse(self, string, strict=True)
Parse the string or bytes. Args: strict (bool): If True, errors will not be ignored Raises: :class:`ValueError` if the record is malformed.
3.067532
2.273729
1.349119
'''Append the name-value pair to the record.''' normalized_name = normalize_name(name, self._normalize_overrides) self._map[normalized_name].append(value)
def add(self, name, value)
Append the name-value pair to the record.
7.671743
6.841027
1.121431
'''Return all the values for given name.''' normalized_name = normalize_name(name, self._normalize_overrides) return self._map[normalized_name]
def get_list(self, name)
Return all the values for given name.
9.448408
6.866709
1.375973
'''Return an iterator of name-value pairs.''' for name, values in self._map.items(): for value in values: yield (name, value)
def get_all(self)
Return an iterator of name-value pairs.
6.28374
3.91706
1.604198
'''Convert to string.''' pairs = [] for name, value in self.get_all(): if value and self._wrap_width: pairs.append('{0}:{1}'.format( name, '\r\n'.join(textwrap.wrap( value, width=self._wrap_width, drop_whitespace=False, initial_indent=' ', subsequent_indent=' ' )) )) elif value: pairs.append('{0}: {1}'.format(name, value)) else: pairs.append('{0}:'.format(name)) pairs.append('') return '\r\n'.join(pairs)
def to_str(self)
Convert to string.
2.914507
2.89256
1.007587
'''Convert to bytes.''' return str(self).encode(self.encoding, errors=errors)
def to_bytes(self, errors='strict')
Convert to bytes.
5.617267
6.799517
0.826127
'''Clean up and return connections back to the pool. Connections should be kept alive if supported. ''' for connection in self._connections: self._connection_pool.no_wait_release(connection) self._connections.clear()
def recycle(self)
Clean up and return connections back to the pool. Connections should be kept alive if supported.
10.123113
3.760515
2.691949
'''Return a connection.''' host = request.url_info.hostname port = request.url_info.port use_ssl = request.url_info.scheme == 'https' tunnel = request.url_info.scheme != 'http' connection = yield from self._acquire_connection(host, port, use_ssl, tunnel) return connection
def _acquire_request_connection(self, request)
Return a connection.
3.78013
3.541367
1.067421
'''Return a connection.''' if hasattr(self._connection_pool, 'acquire_proxy'): connection = yield from \ self._connection_pool.acquire_proxy(host, port, use_ssl, tunnel=tunnel) else: connection = yield from \ self._connection_pool.acquire(host, port, use_ssl) self._connections.add(connection) return connection
def _acquire_connection(self, host, port, use_ssl=False, tunnel=True)
Return a connection.
3.150662
3.031886
1.039175
'''Return a new session.''' session = self._session_class()( connection_pool=self._connection_pool, ) self.event_dispatcher.notify(self.ClientEvent.new_session, session) return session
def session(self) -> SessionT
Return a new session.
7.920622
7.375857
1.073858
'''Return the number of cookies for the given domain.''' cookies = self.cookie_jar._cookies if domain in cookies: return sum( [len(cookie) for cookie in cookies[domain].values()] ) else: return 0
def count_cookies(self, domain)
Return the number of cookies for the given domain.
3.760595
3.552955
1.058441
'''Return approximate length of all cookie key-values for a domain.''' cookies = self.cookie_jar._cookies if domain not in cookies: return 0 length = 0 for path in cookies[domain]: for name in cookies[domain][path]: cookie = cookies[domain][path][name] length += len(path) + len(name) + len(cookie.value or '') return length
def cookie_length(self, domain)
Return approximate length of all cookie key-values for a domain.
3.680903
2.724667
1.350955
'''Guess the style of directory listing. Returns: str: ``unix``, ``msdos``, ``nlst``, ``unknown``. ''' scores = { 'unix': 0, 'msdos': 0, 'nlst': 0, } for line in lines: if not line: continue if re.search(r'---|r--|rw-|rwx', line): scores['unix'] += 1 if '<DIR>' in line or re.search(r'^.{0,4}\d\d', line): scores['msdos'] += 1 words = line.split(' ', 1) if len(words) == 1: scores['nlst'] += 1 if max(scores.values()) > threshold: break top = max(scores.items(), key=lambda item: item[1]) if top[1]: return top[0] else: return 'unknown'
def guess_listing_type(lines, threshold=100)
Guess the style of directory listing. Returns: str: ``unix``, ``msdos``, ``nlst``, ``unknown``.
3.545032
2.765467
1.281893
'''Parse a Unix permission string and return integer value.''' # Based on ftp-ls.c symperms if len(text) != 9: return 0 perms = 0 for triad_index in range(3): string_index = triad_index * 3 perms <<= 3 if text[string_index] == 'r': perms |= 1 << 2 if text[string_index + 1] == 'w': perms |= 1 << 1 if text[string_index + 2] in 'xs': perms |= 1 return perms
def parse_unix_perm(text)
Parse a Unix permission string and return integer value.
3.821501
3.681427
1.038049
'''Parse the lines.''' if self.type == 'msdos': return self.parse_msdos(lines) elif self.type == 'unix': return self.parse_unix(lines) elif self.type == 'nlst': return self.parse_nlst(lines) else: raise UnknownListingError('Unsupported listing type.')
def parse(self, lines)
Parse the lines.
3.561174
3.567948
0.998101
'''Parse datetime from line of text.''' return parse_datetime(text, date_format=self.date_format, is_day_period=self.is_day_period)
def parse_datetime(self, text)
Parse datetime from line of text.
6.465922
5.060005
1.277849
'''Parse lines from a MS-DOS format.''' for line in lines: fields = line.split(None, 4) date_str = fields[0] time_str = fields[1] datetime_str = '{} {}'.format(date_str, time_str) file_datetime = self.parse_datetime(datetime_str)[0] if fields[2] == '<DIR>': file_size = None file_type = 'dir' else: file_size = parse_int(fields[2]) file_type = 'file' filename = fields[3] yield FileEntry(filename, file_type, file_size, file_datetime)
def parse_msdos(self, lines)
Parse lines from a MS-DOS format.
2.817982
2.668874
1.055869
'''Parse listings from a Unix ls command format.''' # This method uses some Filezilla parsing algorithms for line in lines: original_line = line fields = line.split(' ') after_perm_index = 0 # Search for the permissions field by checking the file type for field in fields: after_perm_index += len(field) if not field: continue # If the filesystem goes corrupt, it may show ? instead # but I don't really care in that situation. if field[0] in 'bcdlps-': if field[0] == 'd': file_type = 'dir' elif field[0] == '-': file_type = 'file' elif field[0] == 'l': file_type = 'symlink' else: file_type = 'other' perms = parse_unix_perm(field[1:]) break else: raise ListingError('Failed to parse file type.') line = line[after_perm_index:] # We look for the position of the date and use the integer # before it as the file size. # We look for the position of the time and use the text # after it as the filename while line: try: datetime_obj, start_index, end_index = self.parse_datetime(line) except ValueError: line = line[4:] else: break else: raise ListingError( 'Could parse a date from {}'.format(repr(original_line))) file_size = int(line[:start_index].rstrip().rpartition(' ')[-1]) filename = line[end_index:].strip() if file_type == 'symlink': filename, sep, symlink_dest = filename.partition(' -> ') else: symlink_dest = None yield FileEntry(filename, file_type, file_size, datetime_obj, symlink_dest, perm=perms)
def parse_unix(self, lines)
Parse listings from a Unix ls command format.
4.905752
4.632464
1.058994
'''Parse the listings. Returns: iter: A iterable of :class:`.ftp.ls.listing.FileEntry` ''' if self._text: lines = iter(self._text.splitlines()) elif self._file: lines = self._file else: lines = () sample_lines = [] for line in lines: if len(sample_lines) > 100: break sample_lines.append(line) lines = itertools.chain(sample_lines, lines) self.guess_type(sample_lines) datetime_format = wpull.protocol.ftp.ls.date.guess_datetime_format( sample_lines) self.set_datetime_format(datetime_format) return self.parse(lines)
def parse_input(self)
Parse the listings. Returns: iter: A iterable of :class:`.ftp.ls.listing.FileEntry`
5.335406
3.511883
1.519244
'''Open a file object on to the Response Body. Args: filename: The path where the file is to be saved response: Response mode: The file mode This function will create the directories if not exist. ''' _logger.debug('Saving file to {0}, mode={1}.', filename, mode) dir_path = os.path.dirname(filename) if dir_path and not os.path.exists(dir_path): os.makedirs(dir_path) response.body = Body(open(filename, mode))
def open_file(cls, filename: str, response: BaseResponse, mode='wb+')
Open a file object on to the Response Body. Args: filename: The path where the file is to be saved response: Response mode: The file mode This function will create the directories if not exist.
4.489051
2.266611
1.980513
'''Set the Last-Modified timestamp onto the given file. Args: filename: The path of the file response: Response ''' last_modified = response.fields.get('Last-Modified') if not last_modified: return try: last_modified = email.utils.parsedate(last_modified) except ValueError: _logger.exception('Failed to parse date.') return last_modified = time.mktime(last_modified) os.utime(filename, (time.time(), last_modified))
def set_timestamp(cls, filename: str, response: HTTPResponse)
Set the Last-Modified timestamp onto the given file. Args: filename: The path of the file response: Response
3.259104
2.601446
1.252805
'''Prepend the HTTP response header to the file. Args: filename: The path of the file response: Response ''' new_filename = filename + '-new' with open('wb') as new_file: new_file.write(response.header()) with wpull.util.reset_file_offset(response.body): response.body.seek(0) shutil.copyfileobj(response.body, new_file) os.remove(filename) os.rename(new_filename, filename)
def save_headers(cls, filename: str, response: HTTPResponse)
Prepend the HTTP response header to the file. Args: filename: The path of the file response: Response
5.204457
4.11961
1.263337
'''Get the appropriate filename from the request.''' path = self._path_namer.get_filename(request.url_info) if os.path.isdir(path): path += '.f' else: dir_name, name = os.path.split(path) path = os.path.join(anti_clobber_dir_path(dir_name), name) return path
def _compute_filename(self, request: BaseRequest)
Get the appropriate filename from the request.
6.264019
5.18339
1.208479
'''Modify the request to resume downloading file.''' if os.path.exists(self._filename): size = os.path.getsize(self._filename) request.set_continue(size) self._file_continue_requested = True _logger.debug('Continue file from {0}.', size) else: _logger.debug('No file to continue.')
def _process_file_continue_request(self, request: BaseRequest)
Modify the request to resume downloading file.
6.700779
4.930898
1.358937
'''Process a partial content response.''' code = response.status_code if code == http.client.PARTIAL_CONTENT: self.open_file(self._filename, response, mode='ab+') else: self._raise_cannot_continue_error()
def _process_file_continue_response(self, response: HTTPResponse)
Process a partial content response.
8.806062
7.022558
1.253968
'''Process a restarted content response.''' if response.request.restart_value and response.restart_value: self.open_file(self._filename, response, mode='ab+') else: self._raise_cannot_continue_error()
def _process_file_continue_ftp_response(self, response: FTPResponse)
Process a restarted content response.
17.024786
10.210422
1.667393
'''Append an HTML/CSS file suffix as needed.''' if not self._filename: return if response.request.url_info.scheme not in ('http', 'https'): return if not re.search(r'\.[hH][tT][mM][lL]?$', self._filename) and \ HTMLReader.is_response(response): self._filename += '.html' elif not re.search(r'\.[cC][sS][sS]$', self._filename) and \ CSSReader.is_response(response): self._filename += '.css'
def _append_filename_extension(self, response: BaseResponse)
Append an HTML/CSS file suffix as needed.
3.538013
2.802504
1.262447
'''Rename using the Content-Disposition header.''' if not self._filename: return if response.request.url_info.scheme not in ('http', 'https'): return header_value = response.fields.get('Content-Disposition') if not header_value: return filename = parse_content_disposition(header_value) if filename: dir_path = os.path.dirname(self._filename) new_filename = self._path_namer.safe_filename(filename) self._filename = os.path.join(dir_path, new_filename)
def _rename_with_content_disposition(self, response: HTTPResponse)
Rename using the Content-Disposition header.
3.385975
3.136914
1.079397
'''Return the File Writer Session.''' return self.session_class( self._path_namer, self._file_continuing, self._headers_included, self._local_timestamping, self._adjust_extension, self._content_disposition, self._trust_server_names, )
def session(self) -> BaseFileWriterSession
Return the File Writer Session.
11.778156
10.28643
1.145019
'''Parse a "Content-Type" string for the document encoding. Returns: str, None ''' match = re.search( r'''charset[ ]?=[ ]?["']?([a-z0-9_-]+)''', header_string, re.IGNORECASE ) if match: return match.group(1)
def parse_charset(header_string)
Parse a "Content-Type" string for the document encoding. Returns: str, None
4.363602
2.839627
1.536682
'''Return whether the connection should be closed. Args: http_version (str): The HTTP version string like ``HTTP/1.0``. connection_field (str): The value for the ``Connection`` header. ''' connection_field = (connection_field or '').lower() if http_version == 'HTTP/1.0': return connection_field.replace('-', '') != 'keepalive' else: return connection_field == 'close'
def should_close(http_version, connection_field)
Return whether the connection should be closed. Args: http_version (str): The HTTP version string like ``HTTP/1.0``. connection_field (str): The value for the ``Connection`` header.
3.142678
2.365653
1.328461
'''Seek to the end of the file.''' try: file.seek(0, 2) except ValueError: # gzip files don't support seek from end while True: data = file.read(4096) if not data: break
def seek_file_end(file)
Seek to the end of the file.
3.715808
4.244593
0.875421
'''Parse a fixed ISO8601 datetime string. .. Note:: This function only parses dates in the format ``%Y-%m-%dT%H:%M:%SZ``. You must use a library like ``dateutils`` to properly parse dates and times. Returns: float: A UNIX timestamp. ''' datetime_obj = datetime.datetime.strptime(string, "%Y-%m-%dT%H:%M:%SZ") return int(calendar.timegm(datetime_obj.utctimetuple()))
def parse_iso8601_str(string)
Parse a fixed ISO8601 datetime string. .. Note:: This function only parses dates in the format ``%Y-%m-%dT%H:%M:%SZ``. You must use a library like ``dateutils`` to properly parse dates and times. Returns: float: A UNIX timestamp.
4.236467
1.586348
2.670578
'''Return the Python version as a string.''' major, minor, patch = sys.version_info[0:3] return '{0}.{1}.{2}'.format(major, minor, patch)
def python_version()
Return the Python version as a string.
2.967744
2.95784
1.003348
'''Processes the bytes for PEM certificates. Returns: ``set`` containing each certificate ''' assert isinstance(data, bytes), 'Expect bytes. Got {}.'.format(type(data)) certs = set() new_list = [] in_pem_block = False for line in re.split(br'[\r\n]+', data): if line == b'-----BEGIN CERTIFICATE-----': assert not in_pem_block in_pem_block = True elif line == b'-----END CERTIFICATE-----': assert in_pem_block in_pem_block = False content = b''.join(new_list) content = rewrap_bytes(content) certs.add(b'-----BEGIN CERTIFICATE-----\n' + content + b'\n-----END CERTIFICATE-----\n') new_list = [] elif in_pem_block: new_list.append(line) return certs
def filter_pem(data)
Processes the bytes for PEM certificates. Returns: ``set`` containing each certificate
2.90402
2.389466
1.215343
'''Rewrap characters to 70 character width. Intended to rewrap base64 content. ''' return b'\n'.join( data[index:index+70] for index in range(0, len(data), 70) )
def rewrap_bytes(data)
Rewrap characters to 70 character width. Intended to rewrap base64 content.
6.801651
3.045839
2.233096
'''Return the contents of a real file or a zip file.''' if os.path.exists(filename): with open(filename, mode=mode) as in_file: return in_file.read() else: parts = os.path.normpath(filename).split(os.sep) for part, index in zip(parts, range(len(parts))): if part.endswith('.zip'): zip_path = os.sep.join(parts[:index + 1]) member_path = os.sep.join(parts[index + 1:]) break if platform.system() == 'Windows': member_path = member_path.replace('\\', '/') with zipfile.ZipFile(zip_path) as zip_file: return zip_file.read(member_path)
def get_package_data(filename, mode='rb')
Return the contents of a real file or a zip file.
2.42211
2.17927
1.111432
'''Return the filename of the data file.''' if getattr(sys, 'frozen', False): package_dir = os.path.join( sys._MEIPASS, os.path.basename(os.path.dirname(__file__)) ) elif not package_dir: package_dir = os.path.dirname(__file__) return os.path.join(package_dir, filename)
def get_package_filename(filename, package_dir=None)
Return the filename of the data file.
2.520008
2.328206
1.082382
'''Try to get the exception message or the class name.''' args = getattr(instance, 'args', None) if args: return str(instance) try: return type(instance).__name__ except AttributeError: return str(instance)
def get_exception_message(instance)
Try to get the exception message or the class name.
4.230756
3.228966
1.310251
'''Pickle an object.''' pickle.dump(obj, self._file, protocol=self._protocol)
def dump(self, obj)
Pickle an object.
6.404327
5.372455
1.192067
quote_with = '"' if '"' in value: if "'" in value: # The string contains both single and double # quotes. Turn the double quotes into # entities. We quote the double quotes rather than # the single quotes because the entity name is # "&quot;" whether this is HTML or XML. If we # quoted the single quotes, we'd have to decide # between &apos; and &squot;. replace_with = "&quot;" value = value.replace('"', replace_with) else: # There are double quotes but no single quotes. # We can use single quotes to quote the attribute. quote_with = "'" return quote_with + value + quote_with
def quoted_attribute_value(self, value)
Make a value into a quoted XML attribute, possibly escaping it. Most strings will be quoted using double quotes. Bob's Bar -> "Bob's Bar" If a string contains double quotes, it will be quoted using single quotes. Welcome to "my bar" -> 'Welcome to "my bar"' If a string contains both single and double quotes, the double quotes will be escaped, and the string will be quoted using double quotes. Welcome to "Bob's Bar" -> "Welcome to &quot;Bob's bar&quot;
5.055864
4.921728
1.027254
# Escape angle brackets and ampersands. value = cls.AMPERSAND_OR_BRACKET.sub( cls._substitute_xml_entity, value) if make_quoted_attribute: value = cls.quoted_attribute_value(value) return value
def substitute_xml(cls, value, make_quoted_attribute=False)
Substitute XML entities for special XML characters. :param value: A string to be substituted. The less-than sign will become &lt;, the greater-than sign will become &gt;, and any ampersands will become &amp;. If you want ampersands that appear to be part of an entity definition to be left alone, use substitute_xml_containing_entities() instead. :param make_quoted_attribute: If True, then the string will be quoted, as befits an attribute value.
4.604748
6.195684
0.743219
# Escape angle brackets, and ampersands that aren't part of # entities. value = cls.BARE_AMPERSAND_OR_BRACKET.sub( cls._substitute_xml_entity, value) if make_quoted_attribute: value = cls.quoted_attribute_value(value) return value
def substitute_xml_containing_entities( cls, value, make_quoted_attribute=False)
Substitute XML entities for special XML characters. :param value: A string to be substituted. The less-than sign will become &lt;, the greater-than sign will become &gt;, and any ampersands that are not part of an entity defition will become &amp;. :param make_quoted_attribute: If True, then the string will be quoted, as befits an attribute value.
4.825114
5.595525
0.862317
tried = set() for e in self.override_encodings: if self._usable(e, tried): yield e # Did the document originally start with a byte-order mark # that indicated its encoding? if self._usable(self.sniffed_encoding, tried): yield self.sniffed_encoding # Look within the document for an XML or HTML encoding # declaration. if self.declared_encoding is None: self.declared_encoding = self.find_declared_encoding( self.markup, self.is_html) if self._usable(self.declared_encoding, tried): yield self.declared_encoding # Use third-party character set detection to guess at the # encoding. if self.chardet_encoding is None: self.chardet_encoding = chardet_dammit(self.markup) if self._usable(self.chardet_encoding, tried): yield self.chardet_encoding # As a last-ditch effort, try utf-8 and windows-1252. for e in ('utf-8', 'windows-1252'): if self._usable(e, tried): yield e
def encodings(self)
Yield a number of encodings that might work for this markup.
3.439005
3.280665
1.048265
encoding = None if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == b'\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == b'\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == b'\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] return data, encoding
def strip_byte_order_mark(cls, data)
If a byte-order mark is present, strip it and return the encoding it implies.
1.453414
1.413958
1.027905
if search_entire_document: xml_endpos = html_endpos = len(markup) else: xml_endpos = 1024 html_endpos = max(2048, int(len(markup) * 0.05)) declared_encoding = None declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos) if not declared_encoding_match and is_html: declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos) if declared_encoding_match is not None: declared_encoding = declared_encoding_match.groups()[0].decode( 'ascii', 'replace') if declared_encoding: return declared_encoding.lower() return None
def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False)
Given a document, tries to find its declared encoding. An XML encoding is declared at the beginning of the document. An HTML encoding is declared in a <meta> tag, hopefully near the beginning of the document.
2.593895
2.424003
1.070087
orig = match.group(1) if self.smart_quotes_to == 'ascii': sub = self.MS_CHARS_TO_ASCII.get(orig).encode() else: sub = self.MS_CHARS.get(orig) if type(sub) == tuple: if self.smart_quotes_to == 'xml': sub = '&#x'.encode() + sub[1].encode() + ';'.encode() else: sub = '&'.encode() + sub[0].encode() + ';'.encode() else: sub = sub.encode() return sub
def _sub_ms_char(self, match)
Changes a MS smart quote character to an XML or HTML entity, or an ASCII character.
2.909774
2.688188
1.08243
if embedded_encoding.replace('_', '-').lower() not in ( 'windows-1252', 'windows_1252'): raise NotImplementedError( "Windows-1252 and ISO-8859-1 are the only currently supported " "embedded encodings.") if main_encoding.lower() not in ('utf8', 'utf-8'): raise NotImplementedError( "UTF-8 is the only currently supported main encoding.") byte_chunks = [] chunk_start = 0 pos = 0 while pos < len(in_bytes): byte = in_bytes[pos] if not isinstance(byte, int): # Python 2.x byte = ord(byte) if (byte >= cls.FIRST_MULTIBYTE_MARKER and byte <= cls.LAST_MULTIBYTE_MARKER): # This is the start of a UTF-8 multibyte character. Skip # to the end. for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES: if byte >= start and byte <= end: pos += size break elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8: # We found a Windows-1252 character! # Save the string up to this point as a chunk. byte_chunks.append(in_bytes[chunk_start:pos]) # Now translate the Windows-1252 character into UTF-8 # and add it as another, one-byte chunk. byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte]) pos += 1 chunk_start = pos else: # Go on to the next character. pos += 1 if chunk_start == 0: # The string is unchanged. return in_bytes else: # Store the final chunk. byte_chunks.append(in_bytes[chunk_start:]) return b''.join(byte_chunks)
def detwingle(cls, in_bytes, main_encoding="utf8", embedded_encoding="windows-1252")
Fix characters from one encoding embedded in some other encoding. Currently the only situation supported is Windows-1252 (or its subset ISO-8859-1), embedded in UTF-8. The input must be a bytestring. If you've already converted the document to Unicode, you're too late. The output is a bytestring in which `embedded_encoding` characters have been converted to their `main_encoding` equivalents.
2.533497
2.560495
0.989456
'''Scrape a file for links. See :meth:`scrape` for the return value. ''' elements = self.iter_elements(file, encoding=encoding) link_contexts = set() link_infos = self._element_walker.iter_links(elements) for link_info in link_infos: element_base_url = base_url if link_info.base_link: clean_base_url = clean_link_soup(link_info.base_link) if element_base_url and base_url: element_base_url = urljoin_safe( base_url, clean_base_url ) or base_url if element_base_url: url = urljoin_safe( element_base_url, clean_link_soup(link_info.link), allow_fragments=False ) else: url = clean_link_soup(link_info.link) if url: link_contexts.add(LinkContext( url, inline=link_info.inline, linked=link_info.linked, link_type=link_info.link_type, extra=link_info )) scrape_result = ScrapeResult(link_contexts, encoding) scrape_result['base_url'] = base_url return scrape_result
def scrape_file(self, file, encoding=None, base_url=None)
Scrape a file for links. See :meth:`scrape` for the return value.
3.058363
2.887403
1.059209
'''Return if the link is accepted by the filters.''' element_tag = element_tag.lower() if self._ignored_tags is not None \ and element_tag in self._ignored_tags: return False if self._followed_tags is not None: return element_tag in self._followed_tags else: return True
def _is_accepted(self, element_tag)
Return if the link is accepted by the filters.
3.430966
2.653978
1.292763
'''Iterate the document root for links. Returns: iterable: A iterator of :class:`LinkedInfo`. ''' for element in elements: if not isinstance(element, Element): continue for link_infos in self.iter_links_element(element): yield link_infos
def iter_links(self, elements)
Iterate the document root for links. Returns: iterable: A iterator of :class:`LinkedInfo`.
7.287094
3.42179
2.129614
'''Iterate a HTML element.''' # reference: lxml.html.HtmlMixin.iterlinks() attrib = element.attrib tag = element.tag if tag == 'link': iterable = self.iter_links_link_element(element) elif tag == 'meta': iterable = self.iter_links_meta_element(element) elif tag in ('object', 'applet'): iterable = self.iter_links_object_element(element) elif tag == 'param': iterable = self.iter_links_param_element(element) elif tag == 'style': iterable = self.iter_links_style_element(element) elif tag == 'script': iterable = self.iter_links_script_element(element) else: iterable = self.iter_links_plain_element(element) # RSS/Atom if tag in ('link', 'url', 'icon'): iterable = itertools.chain( iterable, self.iter_links_element_text(element) ) for link_info in iterable: yield link_info if 'style' in attrib and self.css_scraper: for link in self.css_scraper.scrape_links(attrib['style']): yield LinkInfo( element=element, tag=element.tag, attrib='style', link=link, inline=True, linked=False, base_link=None, value_type='css', link_type=LinkType.media, )
def iter_links_element(self, element)
Iterate a HTML element.
3.11222
3.058099
1.017697
'''Get the element text as a link.''' if element.text: link_type = identify_link_type(element.text) yield LinkInfo( element=element, tag=element.tag, attrib=None, link=element.text, inline=False, linked=True, base_link=None, value_type='plain', link_type=link_type )
def iter_links_element_text(cls, element)
Get the element text as a link.
5.82364
5.438762
1.070766
'''Iterate a ``link`` for URLs. This function handles stylesheets and icons in addition to standard scraping rules. ''' rel = element.attrib.get('rel', '') stylesheet = 'stylesheet' in rel icon = 'icon' in rel inline = stylesheet or icon if stylesheet: link_type = LinkType.css elif icon: link_type = LinkType.media else: link_type = None for attrib_name, link in self.iter_links_by_attrib(element): yield LinkInfo( element=element, tag=element.tag, attrib=attrib_name, link=link, inline=inline, linked=not inline, base_link=None, value_type='plain', link_type=link_type )
def iter_links_link_element(self, element)
Iterate a ``link`` for URLs. This function handles stylesheets and icons in addition to standard scraping rules.
5.639349
3.440711
1.639007
'''Iterate the ``meta`` element for links. This function handles refresh URLs. ''' if element.attrib.get('http-equiv', '').lower() == 'refresh': content_value = element.attrib.get('content') if content_value: link = parse_refresh(content_value) if link: yield LinkInfo( element=element, tag=element.tag, attrib='http-equiv', link=link, inline=False, linked=True, base_link=None, value_type='refresh', link_type=None # treat it as a redirect ) else: for link_info in cls.iter_links_open_graph_meta(element): yield link_info
def iter_links_meta_element(cls, element)
Iterate the ``meta`` element for links. This function handles refresh URLs.
5.910778
4.467669
1.323011
'''Iterate ``object`` and ``embed`` elements. This function also looks at ``codebase`` and ``archive`` attributes. ''' base_link = element.attrib.get('codebase', None) if base_link: # lxml returns codebase as inline link_type = element.attrib.get(base_link) yield LinkInfo( element=element, tag=element.tag, attrib='codebase', link=base_link, inline=True, linked=False, base_link=None, value_type='plain', link_type=link_type ) for attribute in ('code', 'src', 'classid', 'data'): if attribute in element.attrib: link_type = identify_link_type(element.attrib.get(attribute)) yield LinkInfo( element=element, tag=element.tag, attrib=attribute, link=element.attrib.get(attribute), inline=True, linked=False, base_link=base_link, value_type='plain', link_type=link_type ) if 'archive' in element.attrib: for match in re.finditer(r'[^ ]+', element.attrib.get('archive')): value = match.group(0) link_type = identify_link_type(value) yield LinkInfo( element=element, tag=element.tag, attrib='archive', link=value, inline=True, linked=False, base_link=base_link, value_type='list', link_type=link_type )
def iter_links_object_element(cls, element)
Iterate ``object`` and ``embed`` elements. This function also looks at ``codebase`` and ``archive`` attributes.
2.995203
2.546542
1.176184
'''Iterate a ``param`` element.''' valuetype = element.attrib.get('valuetype', '') if valuetype.lower() == 'ref' and 'value' in element.attrib: link_type = identify_link_type(element.attrib.get('value')) yield LinkInfo( element=element, tag=element.tag, attrib='value', link=element.attrib.get('value'), inline=True, linked=False, base_link=None, value_type='plain', link_type=link_type )
def iter_links_param_element(cls, element)
Iterate a ``param`` element.
5.328643
4.944437
1.077705
'''Iterate a ``style`` element.''' if self.css_scraper and element.text: link_iter = self.css_scraper.scrape_links(element.text, context=True) for link, context in link_iter: if context == 'import': link_type = LinkType.css else: link_type = LinkType.media yield LinkInfo( element=element, tag=element.tag, attrib=None, link=link, inline=True, linked=False, base_link=None, value_type='css', link_type=link_type )
def iter_links_style_element(self, element)
Iterate a ``style`` element.
5.62505
5.367342
1.048014
'''Iterate a ``script`` element.''' if self.javascript_scraper and element.text: link_iter = self.javascript_scraper.scrape_links(element.text, context=True) for link, context in link_iter: inline = is_likely_inline(link) if context is True: link_type = None else: link_type = context yield LinkInfo( element=element, tag=element.tag, attrib=None, link=link, inline=inline, linked=not inline, base_link=None, value_type='script', link_type=link_type ) for link in self.iter_links_plain_element(element): yield link
def iter_links_script_element(self, element)
Iterate a ``script`` element.
5.320519
5.092275
1.044822
'''Iterate any element for links using generic rules.''' for attrib_name, link in self.iter_links_by_attrib(element): if attrib_name in self.LINK_ATTRIBUTES: inline = self.is_link_inline(element.tag, attrib_name) linked = self.is_html_link(element.tag, attrib_name) else: inline = is_likely_inline(link) linked = not inline link_type = identify_link_type(link) yield LinkInfo( element=element, tag=element.tag, attrib=attrib_name, link=link, inline=inline, linked=linked, base_link=None, value_type='plain', link_type=link_type )
def iter_links_plain_element(self, element)
Iterate any element for links using generic rules.
4.530453
3.895387
1.16303
'''Iterate an element by looking at its attributes for links.''' for attrib_name in element.attrib.keys(): attrib_value = element.attrib.get(attrib_name) if attrib_name in self.LINK_ATTRIBUTES: if self.javascript_scraper and \ attrib_value.lstrip().startswith('javascript:'): for link in self.iter_links_by_js_attrib( attrib_name, percent_decode(attrib_value)): yield link else: yield attrib_name, attrib_value elif self.javascript_scraper and \ attrib_name[:5] in self.DYNAMIC_ATTRIBUTES: for link in self.iter_links_by_js_attrib(attrib_name, attrib_value): yield link elif attrib_name.startswith('data-'): if is_likely_link(attrib_value) \ and not is_unlikely_link(attrib_value): yield attrib_name, attrib_value elif attrib_name == 'srcset': items = self.iter_links_by_srcset_attrib( attrib_name, attrib_value) for item in items: yield item
def iter_links_by_attrib(self, element)
Iterate an element by looking at its attributes for links.
3.00969
2.786951
1.079922
'''Iterate links of a JavaScript pseudo-link attribute.''' links = self.javascript_scraper.scrape_links(attrib_value) for link in links: yield attrib_name, link
def iter_links_by_js_attrib(self, attrib_name, attrib_value)
Iterate links of a JavaScript pseudo-link attribute.
12.78779
6.581281
1.943055
'''Return whether the link is likely to be inline object.''' if tag in cls.TAG_ATTRIBUTES \ and attribute in cls.TAG_ATTRIBUTES[tag]: attr_flags = cls.TAG_ATTRIBUTES[tag][attribute] return attr_flags & cls.ATTR_INLINE return attribute != 'href'
def is_link_inline(cls, tag, attribute)
Return whether the link is likely to be inline object.
5.956699
4.068377
1.464146
'''Return whether the link is likely to be external object.''' if tag in cls.TAG_ATTRIBUTES \ and attribute in cls.TAG_ATTRIBUTES[tag]: attr_flags = cls.TAG_ATTRIBUTES[tag][attribute] return attr_flags & cls.ATTR_HTML return attribute == 'href'
def is_html_link(cls, tag, attribute)
Return whether the link is likely to be external object.
6.625923
3.992575
1.659561
'''Return whether we cannot follow links due to robots.txt directives. ''' return ( element.tag == 'meta' and element.attrib.get('name', '').lower() == 'robots' and 'nofollow' in element.attrib.get('value', '').lower() )
def robots_cannot_follow(cls, element)
Return whether we cannot follow links due to robots.txt directives.
4.050589
2.723187
1.487444
'''Return the file text and processed absolute links. Args: file: A file object containing the document. encoding (str): The encoding of the document. base_url (str): The URL at which the document is located. Returns: iterator: Each item is a tuple: 1. str: The text 2. bool: Whether the text a link ''' for text, is_link in self.iter_text(file, encoding): if is_link and base_url: new_link = urljoin_safe(base_url, text, allow_fragments=False) if new_link: yield (new_link, is_link) else: yield (new_link, False) else: yield (text, is_link)
def iter_processed_text(self, file, encoding=None, base_url=None)
Return the file text and processed absolute links. Args: file: A file object containing the document. encoding (str): The encoding of the document. base_url (str): The URL at which the document is located. Returns: iterator: Each item is a tuple: 1. str: The text 2. bool: Whether the text a link
3.643075
1.898187
1.919239
'''Return the links. This function is a convenience function for calling :meth:`iter_processed_text` and returning only the links. ''' if context: return [item for item in self.iter_processed_text(file, encoding, base_url) if item[1]] else: return [item[0] for item in self.iter_processed_text(file, encoding, base_url) if item[1]]
def iter_processed_links(self, file, encoding=None, base_url=None, context=False)
Return the links. This function is a convenience function for calling :meth:`iter_processed_text` and returning only the links.
3.179467
1.906613
1.6676
'''Convenience function for scraping from a text string.''' return self.iter_processed_links(io.StringIO(text), context=context)
def scrape_links(self, text, context=False)
Convenience function for scraping from a text string.
10.842299
7.329967
1.479174
'''Return the links. Returns: iterator: Each item is a str which represents a link. ''' for link in self.iter_links(file, encoding): new_link = urljoin_safe(base_url, link, allow_fragments=False) if new_link: yield new_link
def iter_processed_links(self, file, encoding=None, base_url=None)
Return the links. Returns: iterator: Each item is a str which represents a link.
4.54026
2.812235
1.614467
'''Iterate the scrapers, returning the first of the results.''' for scraper in self._document_scrapers: scrape_result = scraper.scrape(request, response, link_type) if scrape_result is None: continue if scrape_result.link_contexts: return scrape_result
def scrape(self, request, response, link_type=None)
Iterate the scrapers, returning the first of the results.
5.003508
3.346623
1.495092
'''Iterate the scrapers and return a dict of results. Returns: dict: A dict where the keys are the scrapers instances and the values are the results. That is, a mapping from :class:`BaseDocumentScraper` to :class:`ScrapeResult`. ''' info = {} for scraper in self._document_scrapers: scrape_result = scraper.scrape(request, response, link_type) info[scraper] = scrape_result return info
def scrape_info(self, request, response, link_type=None)
Iterate the scrapers and return a dict of results. Returns: dict: A dict where the keys are the scrapers instances and the values are the results. That is, a mapping from :class:`BaseDocumentScraper` to :class:`ScrapeResult`.
4.35237
1.702004
2.557203
'''Update the bandwidth meter. Args: data_len (int): The number of bytes transfered since the last call to :func:`feed`. feed_time (float): Current time. ''' self._bytes_transferred += data_len self._collected_bytes_transferred += data_len time_now = feed_time or time.time() time_diff = time_now - self._last_feed_time if time_diff < self._sample_min_time: return self._last_feed_time = time.time() if data_len == 0 and time_diff >= self._stall_time: self._stalled = True return self._samples.append((time_diff, self._collected_bytes_transferred)) self._collected_bytes_transferred = 0
def feed(self, data_len, feed_time=None)
Update the bandwidth meter. Args: data_len (int): The number of bytes transfered since the last call to :func:`feed`. feed_time (float): Current time.
3.038721
2.368744
1.282841
'''Return the current transfer speed. Returns: int: The speed in bytes per second. ''' if self._stalled: return 0 time_sum = 0 data_len_sum = 0 for time_diff, data_len in self._samples: time_sum += time_diff data_len_sum += data_len if time_sum: return data_len_sum / time_sum else: return 0
def speed(self)
Return the current transfer speed. Returns: int: The speed in bytes per second.
3.229913
2.650168
1.218758
'''Return ResourceInfo instances.''' if self._min_disk: for path in self._resource_paths: usage = psutil.disk_usage(path) yield ResourceInfo(path, usage.free, self._min_disk) if self._min_memory: usage = psutil.virtual_memory() yield ResourceInfo(None, usage.available, self._min_memory)
def get_info(self)
Return ResourceInfo instances.
4.319926
3.551675
1.216307
'''Check resource levels. Returns: None, ResourceInfo: If None is provided, no levels are exceeded. Otherwise, the first ResourceInfo exceeding limits is returned. ''' for info in self.get_info(): if info.free < info.limit: return info
def check(self)
Check resource levels. Returns: None, ResourceInfo: If None is provided, no levels are exceeded. Otherwise, the first ResourceInfo exceeding limits is returned.
15.991046
2.837376
5.635856
'''Load the response and increment the counter. Args: response (:class:`.http.request.Response`): The response from a previous request. ''' self._response = response if self.next_location(raw=True): self._num_redirects += 1
def load(self, response)
Load the response and increment the counter. Args: response (:class:`.http.request.Response`): The response from a previous request.
8.810186
4.280033
2.058439
'''Returns the next location. Args: raw (bool): If True, the original string contained in the Location field will be returned. Otherwise, the URL will be normalized to a complete URL. Returns: str, None: If str, the location. Otherwise, no next location. ''' if self._response: location = self._response.fields.get('location') if not location or raw: return location return wpull.url.urljoin(self._response.request.url_info.url, location)
def next_location(self, raw=False)
Returns the next location. Args: raw (bool): If True, the original string contained in the Location field will be returned. Otherwise, the URL will be normalized to a complete URL. Returns: str, None: If str, the location. Otherwise, no next location.
6.654618
2.688106
2.475578
'''Return whether the response contains a redirect code.''' if self._response: status_code = self._response.status_code return status_code in self._codes \ or status_code in self._repeat_codes
def is_redirect(self)
Return whether the response contains a redirect code.
6.033013
4.468596
1.350092
'''Build resolver.''' args = session.args dns_timeout = args.dns_timeout if args.timeout: dns_timeout = args.timeout if args.inet_family == 'IPv4': family = IPFamilyPreference.ipv4_only elif args.inet_family == 'IPv6': family = IPFamilyPreference.ipv6_only elif args.prefer_family == 'IPv6': family = IPFamilyPreference.prefer_ipv6 elif args.prefer_family == 'IPv4': family = IPFamilyPreference.prefer_ipv4 else: family = IPFamilyPreference.any return session.factory.new( 'Resolver', family=family, timeout=dns_timeout, rotate=args.rotate_dns, cache=session.factory.class_map['Resolver'].new_cache() if args.dns_cache else None, )
def _build_resolver(cls, session: AppSession)
Build resolver.
3.73801
3.635873
1.028091
'''Create connection pool.''' args = session.args connect_timeout = args.connect_timeout read_timeout = args.read_timeout if args.timeout: connect_timeout = read_timeout = args.timeout if args.limit_rate: bandwidth_limiter = session.factory.new('BandwidthLimiter', args.limit_rate) else: bandwidth_limiter = None connection_factory = functools.partial( Connection, timeout=read_timeout, connect_timeout=connect_timeout, bind_host=session.args.bind_address, bandwidth_limiter=bandwidth_limiter, ) ssl_connection_factory = functools.partial( SSLConnection, timeout=read_timeout, connect_timeout=connect_timeout, bind_host=session.args.bind_address, ssl_context=session.ssl_context, ) if not session.args.no_proxy: if session.args.https_proxy: http_proxy = session.args.http_proxy.split(':', 1) proxy_ssl = True elif session.args.http_proxy: http_proxy = session.args.http_proxy.split(':', 1) proxy_ssl = False else: http_proxy = None proxy_ssl = None if http_proxy: http_proxy[1] = int(http_proxy[1]) if session.args.proxy_user: authentication = (session.args.proxy_user, session.args.proxy_password) else: authentication = None session.factory.class_map['ConnectionPool'] = \ HTTPProxyConnectionPool host_filter = session.factory.new( 'ProxyHostFilter', accept_domains=session.args.proxy_domains, reject_domains=session.args.proxy_exclude_domains, accept_hostnames=session.args.proxy_hostnames, reject_hostnames=session.args.proxy_exclude_hostnames ) return session.factory.new( 'ConnectionPool', http_proxy, proxy_ssl=proxy_ssl, authentication=authentication, resolver=session.factory['Resolver'], connection_factory=connection_factory, ssl_connection_factory=ssl_connection_factory, host_filter=host_filter, ) return session.factory.new( 'ConnectionPool', resolver=session.factory['Resolver'], connection_factory=connection_factory, ssl_connection_factory=ssl_connection_factory )
def _build_connection_pool(cls, session: AppSession)
Create connection pool.
2.195393
2.16903
1.012154
'''Convert all links in URL table.''' for url_record in self._url_table.get_all(): if url_record.status != Status.done: continue self.convert_by_record(url_record)
def convert_all(self)
Convert all links in URL table.
7.730319
5.870794
1.316742
'''Convert using given URL Record.''' filename = url_record.filename if not os.path.exists(filename): return if url_record.link_type: if url_record.link_type not in ('css', 'html'): return else: link_type = url_record.link_type else: with open(filename, 'rb') as in_file: if HTMLScraper.is_supported( file=in_file, url_info=url_record.url_info): link_type = 'html' elif CSSScraper.is_supported( file=in_file, url_info=url_record.url_info): link_type = 'css' else: link_type = None _logger.info(__( _('Converting links in file ‘{filename}’ (type={type}).'), filename=filename, type=link_type )) if self._backup_enabled: shutil.copy2(filename, filename + '.orig') temp_filename = filename + '-new' if link_type == 'css': self._css_converter.convert( filename, temp_filename, base_url=url_record.url) elif link_type == 'html': self._html_converter.convert( filename, temp_filename, base_url=url_record.url) else: raise Exception('Unknown link type.') os.remove(filename) os.rename(temp_filename, filename)
def convert_by_record(self, url_record)
Convert using given URL Record.
2.787416
2.729005
1.021404
'''Read a single chunk's header. Returns: tuple: 2-item tuple with the size of the content in the chunk and the raw header byte string. Coroutine. ''' # _logger.debug('Reading chunk.') try: chunk_size_hex = yield from self._connection.readline() except ValueError as error: raise ProtocolError( 'Invalid chunk size: {0}'.format(error)) from error if not chunk_size_hex.endswith(b'\n'): raise NetworkError('Connection closed.') try: chunk_size = int(chunk_size_hex.split(b';', 1)[0].strip(), 16) except ValueError as error: raise ProtocolError( 'Invalid chunk size: {0}'.format(error)) from error if chunk_size < 0: raise ProtocolError('Chunk size cannot be negative.') self._chunk_size = self._bytes_left = chunk_size return chunk_size, chunk_size_hex
def read_chunk_header(self)
Read a single chunk's header. Returns: tuple: 2-item tuple with the size of the content in the chunk and the raw header byte string. Coroutine.
3.420877
2.455548
1.393122
'''Read a fragment of a single chunk. Call :meth:`read_chunk_header` first. Returns: tuple: 2-item tuple with the content data and raw data. First item is empty bytes string when chunk is fully read. Coroutine. ''' # chunk_size = self._chunk_size bytes_left = self._bytes_left # _logger.debug(__('Getting chunk size={0}, remain={1}.', # chunk_size, bytes_left)) if bytes_left > 0: size = min(bytes_left, self._read_size) data = yield from self._connection.read(size) self._bytes_left -= len(data) return (data, data) elif bytes_left < 0: raise ProtocolError('Chunked-transfer overrun.') elif bytes_left: raise NetworkError('Connection closed.') newline_data = yield from self._connection.readline() if len(newline_data) > 2: # Should be either CRLF or LF # This could our problem or the server's problem raise ProtocolError('Error reading newline after chunk.') self._chunk_size = self._bytes_left = None return (b'', newline_data)
def read_chunk_body(self)
Read a fragment of a single chunk. Call :meth:`read_chunk_header` first. Returns: tuple: 2-item tuple with the content data and raw data. First item is empty bytes string when chunk is fully read. Coroutine.
5.841275
3.69026
1.58289
'''Read the HTTP trailer fields. Returns: bytes: The trailer data. Coroutine. ''' _logger.debug('Reading chunked trailer.') trailer_data_list = [] while True: trailer_data = yield from self._connection.readline() trailer_data_list.append(trailer_data) if not trailer_data.strip(): break return b''.join(trailer_data_list)
def read_trailer(self)
Read the HTTP trailer fields. Returns: bytes: The trailer data. Coroutine.
4.25485
2.959725
1.437583
'''Return a version tuple from a string.''' match = re.match(r'(\d+)\.(\d+)\.?(\d*)([abc]?)(\d*)', string) major = int(match.group(1)) minor = int(match.group(2)) patch = int(match.group(3) or 0) level = RELEASE_LEVEL_MAP.get(match.group(4), 'final') serial = int(match.group(5) or 0) return major, minor, patch, level, serial
def get_version_tuple(string)
Return a version tuple from a string.
2.476979
2.462891
1.00572
'''Read the URLs provided by the user.''' url_string_iter = session.args.urls or () # FIXME: url rewriter isn't created yet url_rewriter = session.factory.get('URLRewriter') if session.args.input_file: if session.args.force_html: lines = cls._input_file_as_html_links(session) else: lines = cls._input_file_as_lines(session) url_string_iter = itertools.chain(url_string_iter, lines) base_url = session.args.base for url_string in url_string_iter: _logger.debug(__('Parsing URL {0}', url_string)) if base_url: url_string = wpull.url.urljoin(base_url, url_string) try: url_info = wpull.url.URLInfo.parse( url_string, default_scheme=default_scheme) _logger.debug(__('Parsed URL {0}', url_info)) if url_rewriter: # TODO: this logic should be a hook url_info = url_rewriter.rewrite(url_info) _logger.debug(__('Rewritten URL {0}', url_info)) yield url_info except ValueError as e: _logger.info(__('Invalid URL {0}: {1}', url_string, e))
def _read_input_urls(cls, session: AppSession, default_scheme='http')
Read the URLs provided by the user.
3.193889
3.110906
1.026675
'''Read lines from input file and return them.''' if session.args.input_file == sys.stdin: input_file = session.args.input_file else: reader = codecs.getreader(session.args.local_encoding or 'utf-8') input_file = reader(session.args.input_file) return input_file
def _input_file_as_lines(cls, session: AppSession)
Read lines from input file and return them.
3.598359
3.033051
1.186382