code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
'''Print warnings about any enabled hazardous options.
This function will print messages complaining about:
* ``--save-headers``
* ``--no-iri``
* ``--output-document``
* ``--ignore-fatal-errors``
'''
enabled_options = []
for option_name in cls.UNSAFE_OPTIONS:
if getattr(args, option_name):
enabled_options.append(option_name)
if enabled_options:
_logger.warning(__(
_('The following unsafe options are enabled: {list}.'),
list=enabled_options
))
_logger.warning(
_('The use of unsafe options may lead to unexpected behavior '
'or file corruption.'))
if not args.retr_symlinks:
_logger.warning(
_('The --retr-symlinks=off option is a security risk.')
)
|
def _warn_unsafe_options(cls, args)
|
Print warnings about any enabled hazardous options.
This function will print messages complaining about:
* ``--save-headers``
* ``--no-iri``
* ``--output-document``
* ``--ignore-fatal-errors``
| 6.039191 | 3.130624 | 1.92907 |
'''Print warnings about any options that may be silly.'''
if 'page-requisites' in args.span_hosts_allow \
and not args.page_requisites:
_logger.warning(
_('Spanning hosts is allowed for page requisites, '
'but the page requisites option is not on.')
)
if 'linked-pages' in args.span_hosts_allow \
and not args.recursive:
_logger.warning(
_('Spanning hosts is allowed for linked pages, '
'but the recursive option is not on.')
)
if args.warc_file and \
(args.http_proxy or args.https_proxy):
_logger.warning(_('WARC specifications do not handle proxies.'))
if (args.password or args.ftp_password or
args.http_password or args.proxy_password) and \
args.warc_file:
_logger.warning(
_('Your password is recorded in the WARC file.'))
|
def _warn_silly_options(cls, args)
|
Print warnings about any options that may be silly.
| 4.920646 | 4.522014 | 1.088154 |
'''Guess whether order of the year, month, day and 12/24 hour.
Returns:
tuple: First item is either str ``ymd``, ``dmy``, ``mdy``
or ``None``.
Second item is either True for 12-hour time or False for 24-hour time
or None.
'''
time_12_score = 0
time_24_score = 0
date_ymd_score = 0
date_dmy_score = 0
date_mdy_score = 0
for line in lines:
line = unicodedata.normalize('NFKD', line).lower()
if DAY_PERIOD_PATTERN.search(line):
time_12_score += 1
else:
time_24_score += 1
if ISO_8601_DATE_PATTERN.search(line):
date_ymd_score += 1
if MMM_DD_YY_PATTERN.search(line):
date_mdy_score += 1
match = NN_NN_NNNN_PATTERN.search(line)
if match:
num_1 = int(match.group(1))
num_2 = int(match.group(2))
if num_1 > 12:
date_dmy_score += 1
elif num_2 > 12:
date_mdy_score += 1
time_score = time_12_score + time_24_score
date_score = date_ymd_score + date_dmy_score + date_mdy_score
if time_score >= threshold and date_score >= threshold:
break
if date_ymd_score or date_dmy_score or date_mdy_score:
top = max([
(date_ymd_score, 'ymd'),
(date_dmy_score, 'dmy'),
(date_mdy_score, 'mdy'),
],
key=lambda item: item[0]
)
date_format = top[1]
else:
date_format = None
if time_12_score or time_24_score:
day_period = True if time_12_score > time_24_score else False
else:
day_period = None
return date_format, day_period
|
def guess_datetime_format(lines: Iterable[str], threshold: int=5) \
-> Tuple[Optional[str], Optional[bool]]
|
Guess whether order of the year, month, day and 12/24 hour.
Returns:
tuple: First item is either str ``ymd``, ``dmy``, ``mdy``
or ``None``.
Second item is either True for 12-hour time or False for 24-hour time
or None.
| 2.295828 | 1.838226 | 1.248937 |
'''Parse date/time from a line of text into datetime object.'''
datetime_now = datetime_now or \
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
year = datetime_now.year
month = datetime_now.month
day = datetime_now.day
hour = 0
minute = 0
second = 0
date_ok = False
start_index = float('+inf')
end_index = float('-inf')
ambiguous_year = False
text = unicodedata.normalize('NFKD', text).lower()
# Let's do time first
match = TIME_PATTERN.search(text)
if match:
hour_str = match.group(1)
hour = int(hour_str)
minute = int(match.group(2))
day_period = match.group(4)
if match.group(3):
second = int(match.group(3))
if day_period and is_day_period and hour < 13:
if day_period.lower() in PM_STRINGS:
if hour != 12:
hour += 12
elif hour == 12:
hour = 0
start_index = match.start()
end_index = match.end()
# Now try dates
if date_format == 'ymd' or not date_format:
match = ISO_8601_DATE_PATTERN.search(text)
if match:
date_ok = True
year = int(match.group(1))
month = int(match.group(2))
day = int(match.group(3))
start_index = min(start_index, match.start())
end_index = max(end_index, match.end())
if not date_ok and (date_format == 'mdy' or not date_format):
match = MMM_DD_YY_PATTERN.search(text)
if match:
date_ok = True
month_str = match.group(1)
month = parse_month(month_str)
day = int(match.group(2))
year_str = match.group(3)
if year_str and len(year_str) == 4:
year = int(year_str)
else:
ambiguous_year = True
start_index = min(start_index, match.start())
end_index = max(end_index, match.end())
if not date_ok:
match = NN_NN_NNNN_PATTERN.search(text)
if match:
date_ok = True
num_1 = int(match.group(1))
num_2 = int(match.group(2))
year = int(match.group(3))
if year < 100:
year = y2k(year)
if date_format == 'mdy' or num_2 > 12:
month = num_1
day = num_2
else:
day = num_1
month = num_2
start_index = min(start_index, match.start())
end_index = max(end_index, match.end())
if date_ok:
guess_date = datetime.datetime(year, month, day, hour, minute, second,
tzinfo=datetime.timezone.utc)
if ambiguous_year and guess_date > datetime_now:
# Sometimes year is not shown within 6 months
# Year is shown for dates in the future
guess_date = guess_date.replace(year=year - 1)
return guess_date, start_index, end_index
else:
raise ValueError('Failed to parse date from {}'.format(repr(text)))
|
def parse_datetime(text: str, date_format: str=None,
is_day_period: Optional[bool]=None,
datetime_now: datetime.datetime=None) \
-> Tuple[datetime.datetime, int, int]
|
Parse date/time from a line of text into datetime object.
| 2.122995 | 2.083925 | 1.018748 |
'''Parse month string into integer.'''
text = text.lower()
try:
return MONTH_MAP[text]
except KeyError:
pass
try:
return MONTH_MAP[text[:3]]
except KeyError:
pass
raise ValueError('Month {} not found.'.format(repr(text)))
|
def parse_month(text: str) -> int
|
Parse month string into integer.
| 3.180573 | 2.942173 | 1.081029 |
'''Convert two digit year to four digit year.'''
assert 0 <= year <= 99, 'Not a two digit year {}'.format(year)
return year + 1000 if year >= 69 else year + 2000
|
def y2k(year: int) -> int
|
Convert two digit year to four digit year.
| 4.451751 | 3.788875 | 1.174953 |
'''Parse CLDR JSON datasets to for date time things.'''
am_strings = set()
pm_strings = set()
month_to_int = {}
for lang in language_codes:
path = os.path.join(directory, 'main', lang, 'ca-gregorian.json')
with open(path) as in_file:
doc = json.load(in_file)
months_dict = doc['main'][lang]['dates']['calendars']['gregorian']['months']['format']['abbreviated']
day_periods_dict = doc['main'][lang]['dates']['calendars']['gregorian']['dayPeriods']['format']['abbreviated']
for month, month_str in months_dict.items():
if massage:
month_str = unicodedata.normalize('NFKD', month_str).lower().strip('.')
month_to_int[month_str] = int(month)
am_str = day_periods_dict['am']
pm_str = day_periods_dict['pm']
if massage:
am_str = unicodedata.normalize('NFKD', am_str).lower().strip('.')
pm_str = unicodedata.normalize('NFKD', pm_str).lower().strip('.')
am_strings.add(am_str)
pm_strings.add(pm_str)
print(pprint.pformat(am_strings))
print(pprint.pformat(pm_strings))
print(pprint.pformat(month_to_int))
|
def parse_cldr_json(directory, language_codes=DEFAULT_LANGUAGE_CODES,
massage=True)
|
Parse CLDR JSON datasets to for date time things.
| 2.179244 | 1.966964 | 1.107922 |
'''Check out a connection.
This function is the same as acquire but with extra arguments
concerning proxies.
Coroutine.
'''
if self._host_filter and not self._host_filter.test(host):
connection = yield from \
super().acquire(host, port, use_ssl, host_key)
return connection
host_key = host_key or (host, port, use_ssl)
proxy_host, proxy_port = self._proxy_address
connection = yield from super().acquire(
proxy_host, proxy_port, self._proxy_ssl, host_key=host_key
)
connection.proxied = True
_logger.debug('Request for proxy connection.')
if connection.closed():
_logger.debug('Connecting to proxy.')
yield from connection.connect()
if tunnel:
yield from self._establish_tunnel(connection, (host, port))
if use_ssl:
ssl_connection = yield from connection.start_tls(self._ssl_context)
ssl_connection.proxied = True
ssl_connection.tunneled = True
self._connection_map[ssl_connection] = connection
connection.wrapped_connection = ssl_connection
return ssl_connection
if connection.wrapped_connection:
ssl_connection = connection.wrapped_connection
self._connection_map[ssl_connection] = connection
return ssl_connection
else:
return connection
|
def acquire_proxy(self, host, port, use_ssl=False, host_key=None,
tunnel=True)
|
Check out a connection.
This function is the same as acquire but with extra arguments
concerning proxies.
Coroutine.
| 3.72249 | 3.003156 | 1.239526 |
'''Establish a TCP tunnel.
Coroutine.
'''
host = '[{}]'.format(address[0]) if ':' in address[0] else address[0]
port = address[1]
request = RawRequest('CONNECT', '{0}:{1}'.format(host, port))
self.add_auth_header(request)
stream = Stream(connection, keep_alive=True)
_logger.debug('Sending Connect.')
yield from stream.write_request(request)
_logger.debug('Read proxy response.')
response = yield from stream.read_response()
if response.status_code != 200:
debug_file = io.BytesIO()
_logger.debug('Read proxy response body.')
yield from stream.read_body(request, response, file=debug_file)
debug_file.seek(0)
_logger.debug(ascii(debug_file.read()))
if response.status_code == 200:
connection.tunneled = True
else:
raise NetworkError(
'Proxy does not support CONNECT: {} {}'
.format(response.status_code,
wpull.string.printable_str(response.reason))
)
|
def _establish_tunnel(self, connection, address)
|
Establish a TCP tunnel.
Coroutine.
| 4.049041 | 3.829849 | 1.057232 |
'''Return whether the file is likely to be HTML.'''
peeked_data = wpull.string.printable_bytes(
wpull.util.peek_file(file)).lower()
if b'<!doctype html' in peeked_data \
or b'<head' in peeked_data \
or b'<title' in peeked_data \
or b'<html' in peeked_data \
or b'<script' in peeked_data \
or b'<table' in peeked_data \
or b'<a href' in peeked_data:
return True
|
def is_file(cls, file)
|
Return whether the file is likely to be HTML.
| 4.241719 | 3.50971 | 1.208567 |
'''Convert a HTTP request.
Args:
request: An instance of :class:`.http.request.Request`.
referrer_host (str): The referrering hostname or IP address.
Returns:
Request: An instance of :class:`urllib.request.Request`
'''
new_request = urllib.request.Request(
request.url_info.url,
origin_req_host=referrer_host,
)
for name, value in request.fields.get_all():
new_request.add_header(name, value)
return new_request
|
def convert_http_request(request, referrer_host=None)
|
Convert a HTTP request.
Args:
request: An instance of :class:`.http.request.Request`.
referrer_host (str): The referrering hostname or IP address.
Returns:
Request: An instance of :class:`urllib.request.Request`
| 3.669879 | 2.3208 | 1.581299 |
'''Return the header fields as a Message:
Returns:
Message: An instance of :class:`email.message.Message`. If
Python 2, returns an instance of :class:`mimetools.Message`.
'''
if sys.version_info[0] == 2:
return mimetools.Message(io.StringIO(str(self._response.fields)))
else:
return email.message_from_string(str(self._response.fields))
|
def info(self)
|
Return the header fields as a Message:
Returns:
Message: An instance of :class:`email.message.Message`. If
Python 2, returns an instance of :class:`mimetools.Message`.
| 5.040745 | 2.16404 | 2.329321 |
'''Wrapped ``add_cookie_header``.
Args:
request: An instance of :class:`.http.request.Request`.
referrer_host (str): An hostname or IP address of the referrer
URL.
'''
new_request = convert_http_request(request, referrer_host)
self._cookie_jar.add_cookie_header(new_request)
request.fields.clear()
for name, value in new_request.header_items():
request.fields.add(name, value)
|
def add_cookie_header(self, request, referrer_host=None)
|
Wrapped ``add_cookie_header``.
Args:
request: An instance of :class:`.http.request.Request`.
referrer_host (str): An hostname or IP address of the referrer
URL.
| 4.311406 | 2.721541 | 1.584178 |
'''Wrapped ``extract_cookies``.
Args:
response: An instance of :class:`.http.request.Response`.
request: An instance of :class:`.http.request.Request`.
referrer_host (str): An hostname or IP address of the referrer
URL.
'''
new_response = HTTPResponseInfoWrapper(response)
new_request = convert_http_request(request, referrer_host)
self._cookie_jar.extract_cookies(new_response, new_request)
|
def extract_cookies(self, response, request, referrer_host=None)
|
Wrapped ``extract_cookies``.
Args:
response: An instance of :class:`.http.request.Response`.
request: An instance of :class:`.http.request.Request`.
referrer_host (str): An hostname or IP address of the referrer
URL.
| 4.378606 | 2.522736 | 1.735658 |
'''Save the cookie jar if needed.'''
if self._save_filename:
self._cookie_jar.save(
self._save_filename,
ignore_discard=self._keep_session_cookies
)
|
def close(self)
|
Save the cookie jar if needed.
| 8.068757 | 4.958349 | 1.627307 |
'''Start the executable.
Args:
use_atexit (bool): If True, the process will automatically be
terminated at exit.
'''
assert not self._process
_logger.debug('Starting process %s', self._proc_args)
process_future = asyncio.create_subprocess_exec(
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
*self._proc_args
)
self._process = yield from process_future
self._stderr_reader = asyncio.async(self._read_stderr())
self._stdout_reader = asyncio.async(self._read_stdout())
if use_atexit:
atexit.register(self.close)
|
def start(self, use_atexit=True)
|
Start the executable.
Args:
use_atexit (bool): If True, the process will automatically be
terminated at exit.
| 2.887882 | 2.580177 | 1.119258 |
'''Terminate or kill the subprocess.
This function is blocking.
'''
if not self._process:
return
if self._process.returncode is not None:
return
_logger.debug('Terminate process.')
try:
self._process.terminate()
except OSError as error:
if error.errno != errno.ESRCH:
raise
for dummy in range(10):
if self._process.returncode is not None:
return
time.sleep(0.05)
_logger.debug('Failed to terminate. Killing.')
try:
self._process.kill()
except OSError as error:
if error.errno != errno.ESRCH:
raise
|
def close(self)
|
Terminate or kill the subprocess.
This function is blocking.
| 2.562576 | 2.1918 | 1.169165 |
'''Continuously read the stdout for messages.'''
try:
while self._process.returncode is None:
line = yield from self._process.stdout.readline()
_logger.debug('Read stdout line %s', repr(line))
if not line:
break
if self._stdout_callback:
yield from self._stdout_callback(line)
except Exception:
_logger.exception('Unhandled read stdout exception.')
raise
|
def _read_stdout(self)
|
Continuously read the stdout for messages.
| 4.306691 | 3.597849 | 1.197018 |
'''Continuously read stderr for error messages.'''
try:
while self._process.returncode is None:
line = yield from self._process.stderr.readline()
if not line:
break
if self._stderr_callback:
yield from self._stderr_callback(line)
except Exception:
_logger.exception('Unhandled read stderr exception.')
raise
|
def _read_stderr(self)
|
Continuously read stderr for error messages.
| 4.683593 | 3.862346 | 1.212629 |
'''Read from connection to file.
Args:
file: A file object or a writer stream.
'''
if file:
file_is_async = hasattr(file, 'drain')
while True:
data = yield from self._connection.read(4096)
if not data:
break
if file:
file.write(data)
if file_is_async:
yield from file.drain()
self._data_event_dispatcher.notify_read(data)
|
def read_file(self, file: Union[IO, asyncio.StreamWriter]=None)
|
Read from connection to file.
Args:
file: A file object or a writer stream.
| 4.337477 | 3.261185 | 1.330031 |
'''Connected the stream if needed.
Coroutine.
'''
if self._connection.closed():
self._connection.reset()
yield from self._connection.connect()
|
def reconnect(self)
|
Connected the stream if needed.
Coroutine.
| 15.811276 | 6.106815 | 2.58912 |
'''Write a command to the stream.
Args:
command: The command.
Coroutine.
'''
_logger.debug('Write command.')
data = command.to_bytes()
yield from self._connection.write(data)
self._data_event_dispatcher.notify_write(data)
|
def write_command(self, command: Command)
|
Write a command to the stream.
Args:
command: The command.
Coroutine.
| 7.280462 | 4.736557 | 1.537079 |
'''Read a reply from the stream.
Returns:
.ftp.request.Reply: The reply
Coroutine.
'''
_logger.debug('Read reply')
reply = Reply()
while True:
line = yield from self._connection.readline()
if line[-1:] != b'\n':
raise NetworkError('Connection closed.')
self._data_event_dispatcher.notify_read(line)
reply.parse(line)
if reply.code is not None:
break
return reply
|
def read_reply(self) -> Reply
|
Read a reply from the stream.
Returns:
.ftp.request.Reply: The reply
Coroutine.
| 6.868052 | 4.386003 | 1.565902 |
'''Return whether the request can be fetched based on the pool.'''
url_info = request.url_info
user_agent = request.fields.get('User-agent', '')
if self._robots_txt_pool.has_parser(url_info):
return self._robots_txt_pool.can_fetch(url_info, user_agent)
else:
raise NotInPoolError()
|
def can_fetch_pool(self, request: Request)
|
Return whether the request can be fetched based on the pool.
| 5.717547 | 4.404008 | 1.29826 |
'''Fetch the robots.txt file for the request.
Coroutine.
'''
url_info = request.url_info
url = URLInfo.parse('{0}://{1}/robots.txt'.format(
url_info.scheme, url_info.hostname_with_port)).url
if not file:
file = wpull.body.new_temp_file(os.getcwd(), hint='robots')
with contextlib.closing(file):
request = Request(url)
session = self._web_client.session(request)
while not session.done():
wpull.util.truncate_file(file.name)
try:
response = yield from session.start()
yield from session.download(file=file)
except ProtocolError:
self._accept_as_blank(url_info)
return
status_code = response.status_code
if 500 <= status_code <= 599:
raise ServerError('Server returned error for robots.txt.')
if status_code == 200:
self._read_content(response, url_info)
else:
self._accept_as_blank(url_info)
|
def fetch_robots_txt(self, request: Request, file=None)
|
Fetch the robots.txt file for the request.
Coroutine.
| 5.311103 | 4.875911 | 1.089254 |
'''Return whether the request can fetched.
Args:
request: Request.
file: A file object to where the robots.txt contents are written.
Coroutine.
'''
try:
return self.can_fetch_pool(request)
except NotInPoolError:
pass
yield from self.fetch_robots_txt(request, file=file)
return self.can_fetch_pool(request)
|
def can_fetch(self, request: Request, file=None) -> bool
|
Return whether the request can fetched.
Args:
request: Request.
file: A file object to where the robots.txt contents are written.
Coroutine.
| 6.85532 | 2.998532 | 2.286226 |
'''Read response and parse the contents into the pool.'''
data = response.body.read(4096)
url_info = original_url_info
try:
self._robots_txt_pool.load_robots_txt(url_info, data)
except ValueError:
_logger.warning(__(
_('Failed to parse {url} for robots exclusion rules. '
'Ignoring.'), url_info.url))
self._accept_as_blank(url_info)
else:
_logger.debug(__('Got a good robots.txt for {0}.',
url_info.url))
|
def _read_content(self, response: Response, original_url_info: URLInfo)
|
Read response and parse the contents into the pool.
| 6.997296 | 5.566297 | 1.257083 |
'''Mark the URL as OK in the pool.'''
_logger.debug(__('Got empty robots.txt for {0}.', url_info.url))
self._robots_txt_pool.load_robots_txt(url_info, '')
|
def _accept_as_blank(self, url_info: URLInfo)
|
Mark the URL as OK in the pool.
| 11.749733 | 7.259795 | 1.618466 |
'''Format as detached DNS information as text.'''
return '\n'.join(itertools.chain(
(self.fetch_date.strftime('%Y%m%d%H%M%S'), ),
(rr.to_text() for rr in self.resource_records),
(),
))
|
def to_text_format(self)
|
Format as detached DNS information as text.
| 9.236078 | 5.649911 | 1.63473 |
'''The first IPv4 address.'''
for info in self._address_infos:
if info.family == socket.AF_INET:
return info
|
def first_ipv4(self) -> Optional[AddressInfo]
|
The first IPv4 address.
| 4.226957 | 4.263265 | 0.991484 |
'''The first IPV6 address.'''
for info in self._address_infos:
if info.family == socket.AF_INET6:
return info
|
def first_ipv6(self) -> Optional[AddressInfo]
|
The first IPV6 address.
| 4.399712 | 4.117081 | 1.068648 |
'''Move the first address to the last position.'''
item = self._address_infos.pop(0)
self._address_infos.append(item)
|
def rotate(self)
|
Move the first address to the last position.
| 11.234889 | 6.316999 | 1.778517 |
'''Resolve hostname.
Args:
host: Hostname.
Returns:
Resolved IP addresses.
Raises:
DNSNotFound if the hostname could not be resolved or
NetworkError if there was an error connecting to DNS servers.
Coroutine.
'''
_logger.debug(__('Lookup address {0}.', host))
try:
host = self.hook_dispatcher.call(PluginFunctions.resolve_dns, host
) or host
except HookDisconnected:
pass
cache_key = (host, self._family)
if self._cache and cache_key in self._cache:
resolve_result = self._cache[cache_key]
_logger.debug(__('Return by cache {0}.', resolve_result))
if self._rotate:
resolve_result.rotate()
return resolve_result
address_infos = []
dns_infos = []
if not self.dns_python_enabled:
families = ()
elif self._family == IPFamilyPreference.any:
families = (socket.AF_INET, socket.AF_INET6)
elif self._family == IPFamilyPreference.ipv4_only:
families = (socket.AF_INET, )
else:
families = (socket.AF_INET6, )
for family in families:
datetime_now = datetime.datetime.utcnow()
try:
answer = yield from self._query_dns(host, family)
except DNSNotFound:
continue
else:
dns_infos.append(DNSInfo(datetime_now, answer.response.answer))
address_infos.extend(self._convert_dns_answer(answer))
if not address_infos:
# Maybe the address is defined in hosts file or mDNS
if self._family == IPFamilyPreference.any:
family = socket.AF_UNSPEC
elif self._family == IPFamilyPreference.ipv4_only:
family = socket.AF_INET
else:
family = socket.AF_INET6
results = yield from self._getaddrinfo(host, family)
address_infos.extend(self._convert_addrinfo(results))
_logger.debug(__('Resolved addresses: {0}.', address_infos))
resolve_result = ResolveResult(address_infos, dns_infos)
if self._cache:
self._cache[cache_key] = resolve_result
self.event_dispatcher.notify(PluginFunctions.resolve_dns_result, host, resolve_result)
if self._rotate:
resolve_result.shuffle()
return resolve_result
|
def resolve(self, host: str) -> ResolveResult
|
Resolve hostname.
Args:
host: Hostname.
Returns:
Resolved IP addresses.
Raises:
DNSNotFound if the hostname could not be resolved or
NetworkError if there was an error connecting to DNS servers.
Coroutine.
| 3.316786 | 2.940119 | 1.128113 |
'''Query DNS using Python.
Coroutine.
'''
record_type = {socket.AF_INET: 'A', socket.AF_INET6: 'AAAA'}[family]
event_loop = asyncio.get_event_loop()
query = functools.partial(
self._dns_resolver.query, host, record_type,
source=self._bind_address)
try:
answer = yield from event_loop.run_in_executor(None, query)
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as error:
# dnspython doesn't raise an instance with a message, so use the
# class name instead.
raise DNSNotFound(
'DNS resolution failed: {error}'
.format(error=wpull.util.get_exception_message(error))
) from error
except dns.exception.DNSException as error:
raise NetworkError(
'DNS resolution error: {error}'
.format(error=wpull.util.get_exception_message(error))
) from error
else:
return answer
|
def _query_dns(self, host: str, family: int=socket.AF_INET) \
-> dns.resolver.Answer
|
Query DNS using Python.
Coroutine.
| 3.183479 | 2.930892 | 1.086181 |
'''Query DNS using system resolver.
Coroutine.
'''
event_loop = asyncio.get_event_loop()
query = event_loop.getaddrinfo(host, 0, family=family,
proto=socket.IPPROTO_TCP)
if self._timeout:
query = asyncio.wait_for(query, self._timeout)
try:
results = yield from query
except socket.error as error:
if error.errno in (
socket.EAI_FAIL,
socket.EAI_NODATA,
socket.EAI_NONAME):
raise DNSNotFound(
'DNS resolution failed: {error}'.format(error=error)
) from error
else:
raise NetworkError(
'DNS resolution error: {error}'.format(error=error)
) from error
except asyncio.TimeoutError as error:
raise NetworkError('DNS resolve timed out.') from error
else:
return results
|
def _getaddrinfo(self, host: str, family: int=socket.AF_UNSPEC) \
-> List[tuple]
|
Query DNS using system resolver.
Coroutine.
| 2.990604 | 2.619635 | 1.141611 |
'''Convert the DNS answer to address info.'''
assert answer.rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA)
if answer.rdtype == dns.rdatatype.A:
family = socket.AF_INET
else:
family = socket.AF_INET6
for record in answer:
ip_address = record.to_text()
if family == socket.AF_INET6:
flow_info, control_id = cls._get_ipv6_info(ip_address)
else:
flow_info = control_id = None
yield AddressInfo(ip_address, family, flow_info, control_id)
|
def _convert_dns_answer(cls, answer: dns.resolver.Answer) \
-> Iterable[AddressInfo]
|
Convert the DNS answer to address info.
| 2.432552 | 2.398297 | 1.014283 |
'''Convert the result list to address info.'''
for result in results:
family = result[0]
address = result[4]
ip_address = address[0]
if family == socket.AF_INET6:
flow_info = address[2]
control_id = address[3]
else:
flow_info = None
control_id = None
yield AddressInfo(ip_address, family, flow_info, control_id)
|
def _convert_addrinfo(cls, results: List[tuple]) -> Iterable[AddressInfo]
|
Convert the result list to address info.
| 3.114991 | 2.92907 | 1.063474 |
'''Extract the flow info and control id.'''
results = socket.getaddrinfo(
ip_address, 0, proto=socket.IPPROTO_TCP,
flags=socket.AI_NUMERICHOST)
flow_info = results[0][4][2]
control_id = results[0][4][3]
return flow_info, control_id
|
def _get_ipv6_info(cls, ip_address: str) -> tuple
|
Extract the flow info and control id.
| 3.914187 | 2.615057 | 1.496788 |
'''Raise FTPServerError if not expected reply code.
Args:
action: Label to use in the exception message.
expected_code: Expected 3 digit code.
reply: Reply from the server.
'''
if isinstance(expected_code, int):
expected_codes = (expected_code,)
else:
expected_codes = expected_code
if reply.code not in expected_codes:
raise FTPServerError(
'Failed action {action}: {reply_code} {reply_text}'
.format(action=action, reply_code=reply.code,
reply_text=ascii(reply.text)
),
reply.code
)
|
def raise_if_not_match(cls, action: str,
expected_code: Union[int, Sequence[int]],
reply: Reply)
|
Raise FTPServerError if not expected reply code.
Args:
action: Label to use in the exception message.
expected_code: Expected 3 digit code.
reply: Reply from the server.
| 3.266941 | 2.238412 | 1.45949 |
'''Read the welcome message.
Coroutine.
'''
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match(
'Server ready', ReplyCodes.service_ready_for_new_user, reply)
|
def read_welcome_message(self)
|
Read the welcome message.
Coroutine.
| 14.705783 | 11.987078 | 1.226803 |
'''Log in.
Coroutine.
'''
yield from self._control_stream.write_command(Command('USER', username))
reply = yield from self._control_stream.read_reply()
if reply.code == ReplyCodes.user_logged_in_proceed:
return
self.raise_if_not_match(
'Login username', ReplyCodes.user_name_okay_need_password, reply)
yield from self._control_stream.write_command(Command('PASS', password))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match(
'Login password', ReplyCodes.user_logged_in_proceed, reply)
|
def login(self, username: str='anonymous', password: str='-wpull-lib@')
|
Log in.
Coroutine.
| 3.792711 | 3.423512 | 1.107842 |
'''Enable passive mode.
Returns:
The address (IP address, port) of the passive port.
Coroutine.
'''
yield from self._control_stream.write_command(Command('PASV'))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match(
'Passive mode', ReplyCodes.entering_passive_mode, reply)
try:
return wpull.protocol.ftp.util.parse_address(reply.text)
except ValueError as error:
raise ProtocolError(str(error)) from error
|
def passive_mode(self) -> Tuple[str, int]
|
Enable passive mode.
Returns:
The address (IP address, port) of the passive port.
Coroutine.
| 7.311674 | 5.56839 | 1.313068 |
'''Create and setup a data stream.
This function will set up passive and binary mode and handle
connecting to the data connection.
Args:
connection_factory: A coroutine callback that returns a connection
data_stream_factory: A callback that returns a data stream
Coroutine.
Returns:
DataStream
'''
yield from self._control_stream.write_command(Command('TYPE', 'I'))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match('Binary mode', ReplyCodes.command_okay, reply)
address = yield from self.passive_mode()
connection = yield from connection_factory(address)
# TODO: unit test for following line for connections that have
# the same port over time but within pool cleaning intervals
connection.reset()
yield from connection.connect()
data_stream = data_stream_factory(connection)
return data_stream
|
def setup_data_stream(
self,
connection_factory: Callable[[tuple], Connection],
data_stream_factory: Callable[[Connection], DataStream]=DataStream) -> \
DataStream
|
Create and setup a data stream.
This function will set up passive and binary mode and handle
connecting to the data connection.
Args:
connection_factory: A coroutine callback that returns a connection
data_stream_factory: A callback that returns a data stream
Coroutine.
Returns:
DataStream
| 8.39632 | 5.14132 | 1.633106 |
'''Start sending content on the data stream.
Args:
command: A command that tells the server to send data over the
data connection.
Coroutine.
Returns:
The begin reply.
'''
yield from self._control_stream.write_command(command)
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match(
'Begin stream',
(
ReplyCodes.file_status_okay_about_to_open_data_connection,
ReplyCodes.data_connection_already_open_transfer_starting,
),
reply
)
return reply
|
def begin_stream(self, command: Command) -> Reply
|
Start sending content on the data stream.
Args:
command: A command that tells the server to send data over the
data connection.
Coroutine.
Returns:
The begin reply.
| 7.802737 | 4.348727 | 1.794258 |
'''Read from the data stream.
Args:
file: A destination file object or a stream writer.
data_stream: The stream of which to read from.
Coroutine.
Returns:
Reply: The final reply.
'''
yield from data_stream.read_file(file=file)
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match(
'End stream',
ReplyCodes.closing_data_connection,
reply
)
data_stream.close()
return reply
|
def read_stream(self, file: IO, data_stream: DataStream) -> Reply
|
Read from the data stream.
Args:
file: A destination file object or a stream writer.
data_stream: The stream of which to read from.
Coroutine.
Returns:
Reply: The final reply.
| 7.571666 | 3.957274 | 1.913354 |
'''Get size of file.
Coroutine.
'''
yield from self._control_stream.write_command(Command('SIZE', filename))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match('File size', ReplyCodes.file_status, reply)
try:
return int(reply.text.strip())
except ValueError:
return
|
def size(self, filename: str) -> int
|
Get size of file.
Coroutine.
| 6.991732 | 6.122795 | 1.141918 |
'''Send restart command.
Coroutine.
'''
yield from self._control_stream.write_command(Command('REST', str(offset)))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match('Restart', ReplyCodes.requested_file_action_pending_further_information, reply)
|
def restart(self, offset: int)
|
Send restart command.
Coroutine.
| 12.149048 | 9.050671 | 1.342337 |
'''Get the version string of youtube-dl.'''
process = subprocess.Popen(
[exe_path, '--version'],
stdout=subprocess.PIPE
)
version_string = process.communicate()[0]
version_string = version_string.decode().strip()
assert ' ' not in version_string, version_string
return version_string
|
def get_version(exe_path='youtube-dl')
|
Get the version string of youtube-dl.
| 2.672759 | 2.803789 | 0.953267 |
'''Return the path prefix and output template.'''
path = self._file_writer_session.extra_resource_path('.youtube-dl')
if not path:
self._temp_dir = tempfile.TemporaryDirectory(
dir=self._root_path, prefix='tmp-wpull-youtubedl'
)
path = '{}/tmp'.format(self._temp_dir.name)
return path, '{}.%(id)s.%(format_id)s.%(ext)s'.format(path)
|
def _get_output_template(self)
|
Return the path prefix and output template.
| 7.84901 | 6.415105 | 1.22352 |
'''Write the JSON metadata to WARC.
Uses pywb spec.
'''
uri = 'metadata://{}{}'.format(self._item_session.url_record.url_info.authority,
self._item_session.url_record.url_info.resource)
glob_pattern = self._path_prefix + '*.info.json'
filenames = list(glob.glob(glob_pattern))
if not filenames:
_logger.warning(__(
_('Could not find external process metadata file: {filename}'),
filename=glob_pattern
))
return
for filename in filenames:
record = WARCRecord()
record.set_common_fields('metadata', 'application/vnd.youtube-dl_formats+json')
record.fields['WARC-Target-URI'] = uri
record.block_file = open(filename, 'rb')
self._warc_recorder.set_length_and_maybe_checksums(record)
self._warc_recorder.write_record(record)
record.block_file.close()
|
def _write_warc_metadata(self)
|
Write the JSON metadata to WARC.
Uses pywb spec.
| 6.752751 | 5.669173 | 1.191135 |
'''Populate the visits from the CDX into the URL table.'''
if not session.args.warc_dedup:
return
iterable = wpull.warc.format.read_cdx(
session.args.warc_dedup,
encoding=session.args.local_encoding or 'utf-8'
)
missing_url_msg = _('The URL ("a") is missing from the CDX file.')
missing_id_msg = _('The record ID ("u") is missing from the CDX file.')
missing_checksum_msg = \
_('The SHA1 checksum ("k") is missing from the CDX file.')
counter = 0
def visits():
nonlocal counter
checked_fields = False
for record in iterable:
if not checked_fields:
if 'a' not in record:
raise ValueError(missing_url_msg)
if 'u' not in record:
raise ValueError(missing_id_msg)
if 'k' not in record:
raise ValueError(missing_checksum_msg)
checked_fields = True
yield record['a'], record['u'], record['k']
counter += 1
url_table = session.factory['URLTable']
url_table.add_visits(visits())
_logger.info(__(
gettext.ngettext(
'Loaded {num} record from CDX file.',
'Loaded {num} records from CDX file.',
counter
),
num=counter
))
|
def process(self, session: AppSession)
|
Populate the visits from the CDX into the URL table.
| 4.145125 | 3.479849 | 1.19118 |
'''Check if lxml supports the specified encoding.
Returns:
str, None
'''
# XXX: Workaround lxml not liking utf-16-le
try:
lxml.html.HTMLParser(encoding=encoding)
except LookupError:
encoding = encoding.replace('-', '')
else:
return encoding
try:
lxml.html.HTMLParser(encoding=encoding)
except LookupError:
encoding = encoding.replace('_', '')
else:
return encoding
try:
lxml.html.HTMLParser(encoding=encoding)
except LookupError:
pass
else:
return encoding
|
def to_lxml_encoding(encoding)
|
Check if lxml supports the specified encoding.
Returns:
str, None
| 3.150594 | 2.643881 | 1.191655 |
'''Return an iterator of elements found in the document.
Args:
file: A file object containing the document.
encoding (str): The encoding of the document.
target_class: A class to be used for target parsing.
parser_type (str): The type of parser to use. Accepted values:
``html``, ``xhtml``, ``xml``.
Returns:
iterator: Each item is an element from
:mod:`.document.htmlparse.element`
'''
if encoding:
lxml_encoding = to_lxml_encoding(encoding) or 'latin1'
else:
lxml_encoding = encoding
elements = []
callback_func = elements.append
target = target_class(callback_func)
if parser_type == 'html':
parser = lxml.html.HTMLParser(
encoding=lxml_encoding, target=target
)
elif parser_type == 'xhtml':
parser = lxml.html.XHTMLParser(
encoding=lxml_encoding, target=target, recover=True
)
else:
parser = lxml.etree.XMLParser(
encoding=lxml_encoding, target=target, recover=True
)
if parser_type == 'html':
# XXX: Force libxml2 to do full read in case of early "</html>"
# See https://github.com/chfoo/wpull/issues/104
# See https://bugzilla.gnome.org/show_bug.cgi?id=727935
for dummy in range(3):
parser.feed('<html>'.encode(encoding))
while True:
data = file.read(self.BUFFER_SIZE)
if not data:
break
parser.feed(data)
for element in elements:
yield element
del elements[:]
parser.close()
for element in elements:
yield element
|
def parse_lxml(self, file, encoding=None, target_class=HTMLParserTarget,
parser_type='html')
|
Return an iterator of elements found in the document.
Args:
file: A file object containing the document.
encoding (str): The encoding of the document.
target_class: A class to be used for target parsing.
parser_type (str): The type of parser to use. Accepted values:
``html``, ``xhtml``, ``xml``.
Returns:
iterator: Each item is an element from
:mod:`.document.htmlparse.element`
| 3.465718 | 2.532896 | 1.368283 |
'''Get the doctype from the document.
Returns:
str, None
'''
if encoding:
lxml_encoding = to_lxml_encoding(encoding) or 'latin1'
else:
lxml_encoding = encoding
try:
parser = lxml.etree.XMLParser(encoding=lxml_encoding, recover=True)
tree = lxml.etree.parse(
io.BytesIO(wpull.util.peek_file(file)), parser=parser
)
if tree.getroot() is not None:
return tree.docinfo.doctype
except lxml.etree.LxmlError:
pass
|
def parse_doctype(cls, file, encoding=None)
|
Get the doctype from the document.
Returns:
str, None
| 3.72657 | 3.319064 | 1.122777 |
'''Get the suitable parser type for the document.
Returns:
str
'''
is_xml = XMLDetector.is_file(file)
doctype = cls.parse_doctype(file, encoding=encoding) or ''
if not doctype and is_xml:
return 'xml'
if 'XHTML' in doctype:
return 'xhtml'
return 'html'
|
def detect_parser_type(cls, file, encoding=None)
|
Get the suitable parser type for the document.
Returns:
str
| 5.235341 | 3.974987 | 1.317071 |
'''Return a new temporary file.'''
return tempfile.NamedTemporaryFile(
prefix='tmp-wpull-{0}-'.format(hint), suffix='.tmp', dir=directory)
|
def new_temp_file(directory=None, hint='')
|
Return a new temporary file.
| 6.241229 | 6.400947 | 0.975048 |
'''Return the content of the file.
If this function is invoked, the contents of the entire file is read
and cached.
Returns:
``bytes``: The entire content of the file.
'''
if not self._content_data:
if is_seekable(self.file):
with wpull.util.reset_file_offset(self.file):
self._content_data = self.file.read()
else:
self._content_data = self.file.read()
return self._content_data
|
def content(self)
|
Return the content of the file.
If this function is invoked, the contents of the entire file is read
and cached.
Returns:
``bytes``: The entire content of the file.
| 4.799145 | 2.645949 | 1.813771 |
'''Return the size of the file.'''
try:
return os.fstat(self.file.fileno()).st_size
except io.UnsupportedOperation:
pass
if is_seekable(self.file):
with wpull.util.reset_file_offset(self.file):
self.file.seek(0, os.SEEK_END)
return self.file.tell()
raise OSError('Unsupported operation.')
|
def size(self)
|
Return the size of the file.
| 4.310344 | 4.142633 | 1.040484 |
'''Convert the body to a :class:`dict`.
Returns:
dict: The items are:
* ``filename`` (string, None): The path of the file.
* ``length`` (int, None): The size of the file.
'''
try:
name = self.file.name
except AttributeError:
name = None
try:
size = self.size()
except OSError:
size = None
return {
'filename': name,
'length': size,
'content_size': size,
}
|
def to_dict(self)
|
Convert the body to a :class:`dict`.
Returns:
dict: The items are:
* ``filename`` (string, None): The path of the file.
* ``length`` (int, None): The size of the file.
| 4.10712 | 2.215971 | 1.853418 |
# Taken from the session docs.
session = self._session_maker()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
|
def _session(self)
|
Provide a transactional scope around a series of operations.
| 4.379582 | 2.975161 | 1.472049 |
'''Set SQLite pragmas.
Write-ahead logging, synchronous=NORMAL is used.
'''
_logger.debug('Setting pragmas.')
connection.execute('PRAGMA journal_mode=WAL')
connection.execute('PRAGMA synchronous=NORMAL')
|
def _apply_pragmas_callback(cls, connection, record)
|
Set SQLite pragmas.
Write-ahead logging, synchronous=NORMAL is used.
| 8.549463 | 2.964961 | 2.8835 |
'''Return whether a parser has been created for the URL.'''
key = self.url_info_key(url_info)
return key in self._parsers
|
def has_parser(self, url_info: URLInfo)
|
Return whether a parser has been created for the URL.
| 7.123025 | 3.858423 | 1.846098 |
'''Return whether the URL can be fetched.'''
key = self.url_info_key(url_info)
parser = self._parsers[key]
return parser.is_allowed(user_agent, url_info.url)
|
def can_fetch(self, url_info: URLInfo, user_agent: str)
|
Return whether the URL can be fetched.
| 6.468658 | 5.188527 | 1.246723 |
'''Load the robot.txt file.'''
key = self.url_info_key(url_info)
parser = robotexclusionrulesparser.RobotExclusionRulesParser()
parser.parse(text)
self._parsers[key] = parser
|
def load_robots_txt(self, url_info: URLInfo, text: str)
|
Load the robot.txt file.
| 5.507194 | 4.73897 | 1.162108 |
'''Create demux document scraper.'''
session.factory.new(
'DemuxDocumentScraper', cls._build_document_scrapers(session))
|
def _build_demux_document_scraper(cls, session: AppSession)
|
Create demux document scraper.
| 15.127653 | 10.777643 | 1.403614 |
'''Create the document scrapers.
Returns:
A list of document scrapers
'''
html_parser = session.factory['HTMLParser']
element_walker = session.factory.new('ElementWalker')
scrapers = [
session.factory.new(
'HTMLScraper',
html_parser,
element_walker,
followed_tags=session.args.follow_tags,
ignored_tags=session.args.ignore_tags,
only_relative=session.args.relative,
robots=session.args.robots,
encoding_override=session.args.remote_encoding,
),
]
if 'css' in session.args.link_extractors:
css_scraper = session.factory.new(
'CSSScraper',
encoding_override=session.args.remote_encoding,
)
scrapers.append(css_scraper)
element_walker.css_scraper = css_scraper
if 'javascript' in session.args.link_extractors:
javascript_scraper = session.factory.new(
'JavaScriptScraper',
encoding_override=session.args.remote_encoding,
)
scrapers.append(javascript_scraper)
element_walker.javascript_scraper = javascript_scraper
if session.args.sitemaps:
scrapers.append(session.factory.new(
'SitemapScraper', html_parser,
encoding_override=session.args.remote_encoding,
))
return scrapers
|
def _build_document_scrapers(cls, session: AppSession)
|
Create the document scrapers.
Returns:
A list of document scrapers
| 2.451197 | 2.326387 | 1.05365 |
'''Create the request factory.
A request factory is any callable object that returns a
:class:`.http.Request`. The callable must accept the same
arguments to Request.
Returns:
A callable object
'''
def request_factory(*args, **kwargs):
request = session.factory.class_map['Request'](*args, **kwargs)
user_agent = session.args.user_agent or session.default_user_agent
request.fields['User-Agent'] = user_agent
if session.args.referer:
request.fields['Referer'] = session.args.referer
for header_string in session.args.header:
request.fields.parse(header_string)
if session.args.http_compression:
request.fields['Accept-Encoding'] = 'gzip, deflate'
if session.args.no_cache:
request.fields['Cache-Control'] = 'no-cache, must-revalidate'
request.fields['Pragma'] = 'no-cache'
return request
return request_factory
|
def _build_request_factory(cls, session: AppSession)
|
Create the request factory.
A request factory is any callable object that returns a
:class:`.http.Request`. The callable must accept the same
arguments to Request.
Returns:
A callable object
| 3.195158 | 2.325691 | 1.373853 |
'''Create the HTTP client.
Returns:
Client: An instance of :class:`.http.Client`.
'''
# TODO:
# recorder = self._build_recorder()
stream_factory = functools.partial(
HTTPStream,
ignore_length=session.args.ignore_length,
keep_alive=session.args.http_keep_alive)
return session.factory.new(
'HTTPClient',
connection_pool=session.factory['ConnectionPool'],
stream_factory=stream_factory
)
|
def _build_http_client(cls, session: AppSession)
|
Create the HTTP client.
Returns:
Client: An instance of :class:`.http.Client`.
| 7.477415 | 5.584149 | 1.339043 |
'''Build Web Client.'''
cookie_jar = cls._build_cookie_jar(session)
http_client = cls._build_http_client(session)
redirect_factory = functools.partial(
session.factory.class_map['RedirectTracker'],
max_redirects=session.args.max_redirect
)
return session.factory.new(
'WebClient',
http_client,
redirect_tracker_factory=redirect_factory,
cookie_jar=cookie_jar,
request_factory=cls._build_request_factory(session),
)
|
def _build_web_client(cls, session: AppSession)
|
Build Web Client.
| 4.448474 | 4.186852 | 1.062487 |
'''Build the cookie jar'''
if not session.args.cookies:
return
if session.args.load_cookies or session.args.save_cookies:
session.factory.set('CookieJar', BetterMozillaCookieJar)
cookie_jar = session.factory.new('CookieJar')
if session.args.load_cookies:
cookie_jar.load(session.args.load_cookies, ignore_discard=True)
else:
cookie_jar = session.factory.new('CookieJar')
policy = session.factory.new('CookiePolicy', cookie_jar=cookie_jar)
cookie_jar.set_policy(policy)
_logger.debug(__('Loaded cookies: {0}', list(cookie_jar)))
cookie_jar_wrapper = session.factory.new(
'CookieJarWrapper',
cookie_jar,
save_filename=session.args.save_cookies,
keep_session_cookies=session.args.keep_session_cookies,
)
return cookie_jar_wrapper
|
def _build_cookie_jar(cls, session: AppSession)
|
Build the cookie jar
| 3.254334 | 3.211534 | 1.013327 |
'''Build FTP client.'''
return session.factory.new(
'FTPClient',
connection_pool=session.factory['ConnectionPool'],
# TODO: recorder
# recorder=session.factory['DemuxRecorder'],
)
|
def _build_ftp_client(cls, session: AppSession)
|
Build FTP client.
| 13.932327 | 12.159746 | 1.145775 |
'''Build MITM proxy server.'''
args = session.args
if not (args.phantomjs or args.youtube_dl or args.proxy_server):
return
proxy_server = session.factory.new(
'HTTPProxyServer',
session.factory['HTTPClient'],
)
cookie_jar = session.factory.get('CookieJarWrapper')
proxy_coprocessor = session.factory.new(
'ProxyCoprocessor',
session
)
proxy_socket = tornado.netutil.bind_sockets(
session.args.proxy_server_port,
address=session.args.proxy_server_address
)[0]
proxy_port = proxy_socket.getsockname()[1]
proxy_async_server = yield from asyncio.start_server(proxy_server, sock=proxy_socket)
session.async_servers.append(proxy_async_server)
session.proxy_server_port = proxy_port
|
def process(self, session: AppSession)
|
Build MITM proxy server.
| 4.955908 | 4.28089 | 1.157682 |
'''Create the Processor
Returns:
Processor: An instance of :class:`.processor.BaseProcessor`.
'''
web_processor = cls._build_web_processor(session)
ftp_processor = cls._build_ftp_processor(session)
delegate_processor = session.factory.new('Processor')
delegate_processor.register('http', web_processor)
delegate_processor.register('https', web_processor)
delegate_processor.register('ftp', ftp_processor)
|
def _build_processor(cls, session: AppSession)
|
Create the Processor
Returns:
Processor: An instance of :class:`.processor.BaseProcessor`.
| 4.273889 | 2.953026 | 1.447291 |
'''Build WebProcessor.'''
args = session.args
url_filter = session.factory['DemuxURLFilter']
document_scraper = session.factory['DemuxDocumentScraper']
file_writer = session.factory['FileWriter']
post_data = cls._get_post_data(session.args)
web_client = session.factory['WebClient']
robots_txt_checker = cls._build_robots_txt_checker(session)
http_username = args.user or args.http_user
http_password = args.password or args.http_password
ftp_username = args.user or args.ftp_user
ftp_password = args.password or args.ftp_password
fetch_rule = session.factory.new(
'FetchRule',
url_filter=url_filter, robots_txt_checker=robots_txt_checker,
http_login=(http_username, http_password),
ftp_login=(ftp_username, ftp_password),
duration_timeout=args.session_timeout,
)
waiter = session.factory.new(
'Waiter',
wait=args.wait,
random_wait=args.random_wait,
max_wait=args.waitretry)
result_rule = session.factory.new(
'ResultRule',
ssl_verification=args.check_certificate,
retry_connrefused=args.retry_connrefused,
retry_dns_error=args.retry_dns_error,
waiter=waiter,
statistics=session.factory['Statistics'],
)
processing_rule = session.factory.new(
'ProcessingRule',
fetch_rule,
document_scraper=document_scraper,
sitemaps=session.args.sitemaps,
url_rewriter=session.factory.get('URLRewriter'),
)
web_processor_fetch_params = session.factory.new(
'WebProcessorFetchParams',
post_data=post_data,
strong_redirects=args.strong_redirects,
content_on_error=args.content_on_error,
)
processor = session.factory.new(
'WebProcessor',
web_client,
web_processor_fetch_params,
)
return processor
|
def _build_web_processor(cls, session: AppSession)
|
Build WebProcessor.
| 3.371771 | 3.322777 | 1.014745 |
'''Build FTPProcessor.'''
ftp_client = session.factory['FTPClient']
fetch_params = session.factory.new(
'FTPProcessorFetchParams',
remove_listing=session.args.remove_listing,
retr_symlinks=session.args.retr_symlinks,
preserve_permissions=session.args.preserve_permissions,
glob=session.args.glob,
)
return session.factory.new(
'FTPProcessor',
ftp_client,
fetch_params,
)
|
def _build_ftp_processor(cls, session: AppSession)
|
Build FTPProcessor.
| 4.733621 | 4.367434 | 1.083845 |
'''Return the post data.'''
if args.post_data:
return args.post_data
elif args.post_file:
return args.post_file.read()
|
def _get_post_data(cls, args)
|
Return the post data.
| 4.028565 | 3.517935 | 1.145151 |
'''Build robots.txt checker.'''
if session.args.robots:
robots_txt_pool = session.factory.new('RobotsTxtPool')
robots_txt_checker = session.factory.new(
'RobotsTxtChecker',
web_client=session.factory['WebClient'],
robots_txt_pool=robots_txt_pool
)
return robots_txt_checker
|
def _build_robots_txt_checker(cls, session: AppSession)
|
Build robots.txt checker.
| 4.323327 | 4.059565 | 1.064973 |
'''Build proxy server and PhantomJS client. controller, coprocessor.'''
page_settings = {}
default_headers = NameValueRecord()
for header_string in session.args.header:
default_headers.parse(header_string)
# Since we can only pass a one-to-one mapping to PhantomJS,
# we put these last since NameValueRecord.items() will use only the
# first value added for each key.
default_headers.add('Accept-Language', '*')
if not session.args.http_compression:
default_headers.add('Accept-Encoding', 'identity')
default_headers = dict(default_headers.items())
if session.args.read_timeout:
page_settings['resourceTimeout'] = session.args.read_timeout * 1000
page_settings['userAgent'] = session.args.user_agent \
or session.default_user_agent
# Test early for executable
wpull.driver.phantomjs.get_version(session.args.phantomjs_exe)
phantomjs_params = PhantomJSParams(
wait_time=session.args.phantomjs_wait,
num_scrolls=session.args.phantomjs_scroll,
smart_scroll=session.args.phantomjs_smart_scroll,
snapshot=session.args.phantomjs_snapshot,
custom_headers=default_headers,
page_settings=page_settings,
load_time=session.args.phantomjs_max_time,
)
extra_args = [
'--proxy',
'{}:{}'.format(session.args.proxy_server_address, proxy_port),
'--ignore-ssl-errors=true'
]
phantomjs_driver_factory = functools.partial(
session.factory.class_map['PhantomJSDriver'],
exe_path=session.args.phantomjs_exe,
extra_args=extra_args,
)
phantomjs_coprocessor = session.factory.new(
'PhantomJSCoprocessor',
phantomjs_driver_factory,
session.factory['ProcessingRule'],
phantomjs_params,
root_path=session.args.directory_prefix,
warc_recorder=session.factory.get('WARCRecorder'),
)
return phantomjs_coprocessor
|
def _build_phantomjs_coprocessor(cls, session: AppSession, proxy_port: int)
|
Build proxy server and PhantomJS client. controller, coprocessor.
| 4.543847 | 4.127168 | 1.10096 |
'''Build youtube-dl coprocessor.'''
# Test early for executable
wpull.processor.coprocessor.youtubedl.get_version(session.args.youtube_dl_exe)
coprocessor = session.factory.new(
'YoutubeDlCoprocessor',
session.args.youtube_dl_exe,
(session.args.proxy_server_address, proxy_port),
root_path=session.args.directory_prefix,
user_agent=session.args.user_agent or session.default_user_agent,
warc_recorder=session.factory.get('WARCRecorder'),
inet_family=session.args.inet_family,
# Proxy will always present a invalid MITM cert
#check_certificate=session.args.check_certificate
check_certificate=False
)
return coprocessor
|
def _build_youtube_dl_coprocessor(cls, session: AppSession, proxy_port: int)
|
Build youtube-dl coprocessor.
| 7.809433 | 7.714942 | 1.012248 |
'''Put the application together.
'''
pipelines = self._build_pipelines()
self._factory.new('Application', pipelines)
return self._factory['Application']
|
def build(self) -> Application
|
Put the application together.
| 17.324858 | 13.666281 | 1.267708 |
'''Given the hints, return whether the document is supported.
Args:
file: A file object containing the document.
request (:class:`.http.request.Request`): An HTTP request.
response (:class:`.http.request.Response`): An HTTP response.
url_info (:class:`.url.URLInfo`): A URLInfo.
Returns:
bool: If True, the reader should be able to read it.
'''
tests = (
(response, cls.is_response),
(file, cls.is_file),
(request, cls.is_request),
(url_info, cls.is_url)
)
for instance, method in tests:
if instance:
try:
result = method(instance)
except NotImplementedError:
pass
else:
if result:
return True
elif result is VeryFalse:
return VeryFalse
|
def is_supported(cls, file=None, request=None, response=None,
url_info=None)
|
Given the hints, return whether the document is supported.
Args:
file: A file object containing the document.
request (:class:`.http.request.Request`): An HTTP request.
response (:class:`.http.request.Response`): An HTTP response.
url_info (:class:`.url.URLInfo`): A URLInfo.
Returns:
bool: If True, the reader should be able to read it.
| 3.759959 | 2.134978 | 1.761123 |
'''Return the links.
This function is a convenience function for calling :meth:`iter_text`
and returning only the links.
'''
if context:
return [item for item in self.iter_text(file, encoding) if item[1]]
else:
return [item[0] for item in self.iter_text(file, encoding) if item[1]]
|
def iter_links(self, file, encoding=None, context=False)
|
Return the links.
This function is a convenience function for calling :meth:`iter_text`
and returning only the links.
| 3.580039 | 2.108466 | 1.697935 |
'''Log the final statistics to the user.'''
time_length = datetime.timedelta(
seconds=int(stats.stop_time - stats.start_time)
)
file_size = wpull.string.format_size(stats.size)
if stats.bandwidth_meter.num_samples:
speed = stats.bandwidth_meter.speed()
if human_format_speed:
speed_size_str = wpull.string.format_size(speed)
else:
speed_size_str = '{:.1f} b'.format(speed * 8)
else:
speed_size_str = _('-- B')
_logger.info(_('FINISHED.'))
_logger.info(__(
_(
'Duration: {preformatted_timedelta}. '
'Speed: {preformatted_speed_size}/s.'
),
preformatted_timedelta=time_length,
preformatted_speed_size=speed_size_str,
))
_logger.info(__(
gettext.ngettext(
'Downloaded: {num_files} file, {preformatted_file_size}.',
'Downloaded: {num_files} files, {preformatted_file_size}.',
stats.files
),
num_files=stats.files,
preformatted_file_size=file_size
))
if stats.is_quota_exceeded:
_logger.info(_('Download quota exceeded.'))
|
def _print_stats(cls, stats: Statistics, human_format_speed: bool=True)
|
Log the final statistics to the user.
| 3.485043 | 3.255297 | 1.070576 |
'''Return whether a content body is not expected.'''
if 'Content-Length' not in response.fields \
and 'Transfer-Encoding' not in response.fields \
and (
response.status_code in no_content_codes
or request.method.upper() == 'HEAD'
):
return True
else:
return False
|
def is_no_body(request, response, no_content_codes=DEFAULT_NO_CONTENT_CODES)
|
Return whether a content body is not expected.
| 3.932461 | 3.283056 | 1.197805 |
'''Send the request's HTTP status line and header fields.
This class will automatically connect the connection if the
connection is closed.
Coroutine.
'''
_logger.debug('Sending headers.')
if hasattr(request, 'prepare_for_send'):
request.prepare_for_send(full_url=full_url)
if self._ignore_length:
request.fields['Connection'] = 'close'
data = request.to_bytes()
self._data_event_dispatcher.notify_write(data)
# XXX: Connection lost is raised too early on Python 3.2, 3.3 so
# don't flush but check for connection closed on reads
yield from self._connection.write(data, drain=False)
|
def write_request(self, request, full_url=False)
|
Send the request's HTTP status line and header fields.
This class will automatically connect the connection if the
connection is closed.
Coroutine.
| 9.886933 | 6.343119 | 1.558686 |
'''Send the request's content body.
Coroutine.
'''
_logger.debug('Sending body.')
file_is_async = (asyncio.iscoroutine(file.read) or
asyncio.iscoroutinefunction(file.read))
_logger.debug(__('Body is async: {0}', file_is_async))
if length is not None:
bytes_left = length
while True:
if length is not None:
if bytes_left <= 0:
break
read_size = min(bytes_left, self._read_size)
else:
read_size = self._read_size
if file_is_async:
data = yield from file.read(read_size)
else:
data = file.read(read_size)
if not data:
break
self._data_event_dispatcher.notify_write(data)
if bytes_left <= self._read_size:
# XXX: Connection lost is raised too early on Python 3.2, 3.3
# so don't flush on the last chunk but check for connection
# closed on reads
drain = False
else:
drain = True
yield from self._connection.write(data, drain=drain)
if length is not None:
bytes_left -= len(data)
|
def write_body(self, file, length=None)
|
Send the request's content body.
Coroutine.
| 4.017354 | 3.713218 | 1.081906 |
'''Read the response's HTTP status line and header fields.
Coroutine.
'''
_logger.debug('Reading header.')
if response is None:
response = Response()
header_lines = []
bytes_read = 0
while True:
try:
data = yield from self._connection.readline()
except ValueError as error:
raise ProtocolError(
'Invalid header: {0}'.format(error)) from error
self._data_event_dispatcher.notify_read(data)
if not data.endswith(b'\n'):
raise NetworkError('Connection closed.')
elif data in (b'\r\n', b'\n'):
break
header_lines.append(data)
assert data.endswith(b'\n')
bytes_read += len(data)
if bytes_read > 32768:
raise ProtocolError('Header too big.')
if not header_lines:
raise ProtocolError('No header received.')
response.parse(b''.join(header_lines))
return response
|
def read_response(self, response=None)
|
Read the response's HTTP status line and header fields.
Coroutine.
| 3.702469 | 3.219682 | 1.149949 |
'''Read the response's content body.
Coroutine.
'''
if is_no_body(request, response):
return
if not raw:
self._setup_decompressor(response)
read_strategy = self.get_read_strategy(response)
if self._ignore_length and read_strategy == 'length':
read_strategy = 'close'
if read_strategy == 'chunked':
yield from self._read_body_by_chunk(response, file, raw=raw)
elif read_strategy == 'length':
yield from self._read_body_by_length(response, file)
else:
yield from self._read_body_until_close(response, file)
should_close = wpull.protocol.http.util.should_close(
request.version, response.fields.get('Connection'))
if not self._keep_alive or should_close:
_logger.debug('Not keep-alive. Closing connection.')
self.close()
|
def read_body(self, request, response, file=None, raw=False)
|
Read the response's content body.
Coroutine.
| 4.284655 | 3.953483 | 1.083767 |
'''Read the response until the connection closes.
Coroutine.
'''
_logger.debug('Reading body until close.')
file_is_async = hasattr(file, 'drain')
while True:
data = yield from self._connection.read(self._read_size)
if not data:
break
self._data_event_dispatcher.notify_read(data)
content_data = self._decompress_data(data)
if file:
file.write(content_data)
if file_is_async:
yield from file.drain()
content_data = self._flush_decompressor()
if file:
file.write(content_data)
if file_is_async:
yield from file.drain()
|
def _read_body_until_close(self, response, file)
|
Read the response until the connection closes.
Coroutine.
| 4.015377 | 3.499984 | 1.147256 |
'''Read the connection specified by a length.
Coroutine.
'''
_logger.debug('Reading body by length.')
file_is_async = hasattr(file, 'drain')
try:
body_size = int(response.fields['Content-Length'])
if body_size < 0:
raise ValueError('Content length cannot be negative.')
except ValueError as error:
_logger.warning(__(
_('Invalid content length: {error}'), error=error
))
yield from self._read_body_until_close(response, file)
return
bytes_left = body_size
while bytes_left > 0:
data = yield from self._connection.read(self._read_size)
if not data:
break
bytes_left -= len(data)
if bytes_left < 0:
data = data[:bytes_left]
_logger.warning(_('Content overrun.'))
self.close()
self._data_event_dispatcher.notify_read(data)
content_data = self._decompress_data(data)
if file:
file.write(content_data)
if file_is_async:
yield from file.drain()
if bytes_left > 0:
raise NetworkError('Connection closed.')
content_data = self._flush_decompressor()
if file and content_data:
file.write(content_data)
if file_is_async:
yield from file.drain()
|
def _read_body_by_length(self, response, file)
|
Read the connection specified by a length.
Coroutine.
| 3.733983 | 3.394757 | 1.099927 |
'''Read the connection using chunked transfer encoding.
Coroutine.
'''
reader = ChunkedTransferReader(self._connection)
file_is_async = hasattr(file, 'drain')
while True:
chunk_size, data = yield from reader.read_chunk_header()
self._data_event_dispatcher.notify_read(data)
if raw:
file.write(data)
if not chunk_size:
break
while True:
content, data = yield from reader.read_chunk_body()
self._data_event_dispatcher.notify_read(data)
if not content:
if raw:
file.write(data)
break
content = self._decompress_data(content)
if file:
file.write(content)
if file_is_async:
yield from file.drain()
content = self._flush_decompressor()
if file:
file.write(content)
if file_is_async:
yield from file.drain()
trailer_data = yield from reader.read_trailer()
self._data_event_dispatcher.notify_read(trailer_data)
if file and raw:
file.write(trailer_data)
if file_is_async:
yield from file.drain()
response.fields.parse(trailer_data)
|
def _read_body_by_chunk(self, response, file, raw=False)
|
Read the connection using chunked transfer encoding.
Coroutine.
| 3.16885 | 2.909384 | 1.089182 |
'''Return the appropriate algorithm of reading response.
Returns:
str: ``chunked``, ``length``, ``close``.
'''
chunked_match = re.match(
r'chunked($|;)',
response.fields.get('Transfer-Encoding', '')
)
if chunked_match:
return 'chunked'
elif 'Content-Length' in response.fields:
return 'length'
else:
return 'close'
|
def get_read_strategy(cls, response)
|
Return the appropriate algorithm of reading response.
Returns:
str: ``chunked``, ``length``, ``close``.
| 5.017975 | 2.589772 | 1.937613 |
'''Set up the content encoding decompressor.'''
encoding = response.fields.get('Content-Encoding', '').lower()
if encoding == 'gzip':
self._decompressor = wpull.decompression.GzipDecompressor()
elif encoding == 'deflate':
self._decompressor = wpull.decompression.DeflateDecompressor()
else:
self._decompressor = None
|
def _setup_decompressor(self, response)
|
Set up the content encoding decompressor.
| 3.461384 | 3.086381 | 1.121503 |
'''Decompress the given data and return the uncompressed data.'''
if self._decompressor:
try:
return self._decompressor.decompress(data)
except zlib.error as error:
raise ProtocolError(
'zlib error: {0}.'.format(error)
) from error
else:
return data
|
def _decompress_data(self, data)
|
Decompress the given data and return the uncompressed data.
| 3.219438 | 2.955981 | 1.089127 |
'''Return any data left in the decompressor.'''
if self._decompressor:
try:
return self._decompressor.flush()
except zlib.error as error:
raise ProtocolError(
'zlib flush error: {0}.'.format(error)
) from error
else:
return b''
|
def _flush_decompressor(self)
|
Return any data left in the decompressor.
| 3.965215 | 3.205002 | 1.237196 |
'''Uncompress gzip data.
Args:
data (bytes): The gzip data.
truncated (bool): If True, the decompressor is not flushed.
This is a convenience function.
Returns:
bytes: The inflated data.
Raises:
zlib.error
'''
decompressor = SimpleGzipDecompressor()
inflated_data = decompressor.decompress(data)
if not truncated:
inflated_data += decompressor.flush()
return inflated_data
|
def gzip_uncompress(data, truncated=False)
|
Uncompress gzip data.
Args:
data (bytes): The gzip data.
truncated (bool): If True, the decompressor is not flushed.
This is a convenience function.
Returns:
bytes: The inflated data.
Raises:
zlib.error
| 3.434039 | 2.19813 | 1.562255 |
'''Mark the item as processed without download.'''
_logger.debug(__(_('Skipping ‘{url}’.'), url=self.url_record.url))
self.app_session.factory['URLTable'].check_in(self.url_record.url, Status.skipped)
self._processed = True
|
def skip(self)
|
Mark the item as processed without download.
| 20.841627 | 15.489302 | 1.34555 |
'''Mark the item with the given status.
Args:
status: a value from :class:`Status`.
increment_try_count: if True, increment the ``try_count``
value
'''
url = self.url_record.url
assert not self._try_count_incremented, (url, status)
if increment_try_count:
self._try_count_incremented = True
_logger.debug(__('Marking URL {0} status {1}.', url, status))
url_result = URLResult()
url_result.filename = filename
self.app_session.factory['URLTable'].check_in(
url,
status,
increment_try_count=increment_try_count,
url_result=url_result,
)
self._processed = True
|
def set_status(self, status: Status, increment_try_count: bool=True,
filename: str=None)
|
Mark the item with the given status.
Args:
status: a value from :class:`Status`.
increment_try_count: if True, increment the ``try_count``
value
| 5.385348 | 4.369468 | 1.232495 |
'''Add links scraped from the document with automatic values.
Args:
url: A full URL. (It can't be a relative path.)
inline: Whether the URL is an embedded object.
link_type: Expected link type.
post_data: URL encoded form data. The request will be made using
POST. (Don't use this to upload files.)
level: The child depth of this URL.
replace: Whether to replace the existing entry in the database
table so it will be redownloaded again.
This function provides values automatically for:
* ``inline``
* ``level``
* ``parent``: The referrering page.
* ``root``
See also :meth:`add_url`.
'''
url_properties = URLProperties()
url_properties.level = self.url_record.level + 1 if level is None else level
url_properties.inline_level = (self.url_record.inline_level or 0) + 1 if inline else None
url_properties.parent_url = self.url_record.url
url_properties.root_url = self.url_record.root_url or self.url_record.url
url_properties.link_type = link_type
url_data = URLData()
url_data.post_data = post_data
if replace:
self.app_session.factory['URLTable'].remove_many([url])
self.add_url(url, url_properties, url_data)
|
def add_child_url(self, url: str, inline: bool=False,
link_type: Optional[LinkType]=None,
post_data: Optional[str]=None,
level: Optional[int]=None,
replace: bool=False)
|
Add links scraped from the document with automatic values.
Args:
url: A full URL. (It can't be a relative path.)
inline: Whether the URL is an embedded object.
link_type: Expected link type.
post_data: URL encoded form data. The request will be made using
POST. (Don't use this to upload files.)
level: The child depth of this URL.
replace: Whether to replace the existing entry in the database
table so it will be redownloaded again.
This function provides values automatically for:
* ``inline``
* ``level``
* ``parent``: The referrering page.
* ``root``
See also :meth:`add_url`.
| 4.806215 | 1.825543 | 2.63276 |
'''Return a child URLRecord.
This function is useful for testing filters before adding to table.
'''
url_record = URLRecord()
url_record.url = url
url_record.status = Status.todo
url_record.try_count = 0
url_record.level = self.url_record.level + 1 if level is None else level
url_record.root_url = self.url_record.root_url or self.url_record.url
url_record.parent_url = self.url_record.url
url_record.inline_level = (self.url_record.inline_level or 0) + 1 if inline else 0
url_record.link_type = link_type
url_record.post_data = post_data
return url_record
|
def child_url_record(self, url: str, inline: bool=False,
link_type: Optional[LinkType]=None,
post_data: Optional[str]=None,
level: Optional[int]=None)
|
Return a child URLRecord.
This function is useful for testing filters before adding to table.
| 2.628778 | 1.99103 | 1.32031 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.