code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
'''Read input file as HTML and return the links.'''
scrape_result = session.factory['HTMLScraper'].scrape_file(
session.args.input_file,
encoding=session.args.local_encoding or 'utf-8'
)
for context in scrape_result.link_contexts:
yield context.link
|
def _input_file_as_html_links(cls, session: AppSession)
|
Read input file as HTML and return the links.
| 7.4694 | 5.871157 | 1.272219 |
'''Return a new Request to be passed to the Web Client.'''
url_record = self._item_session.url_record
url_info = url_record.url_info
request = self._item_session.app_session.factory['WebClient'].request_factory(url_info.url)
self._populate_common_request(request)
if with_body:
if url_record.post_data or self._processor.fetch_params.post_data:
self._add_post_data(request)
if self._file_writer_session:
request = self._file_writer_session.process_request(request)
return request
|
def _new_initial_request(self, with_body: bool=True)
|
Return a new Request to be passed to the Web Client.
| 5.696903 | 4.833519 | 1.178624 |
'''Populate the Request with common fields.'''
url_record = self._item_session.url_record
# Note that referrer may have already been set by the --referer option
if url_record.parent_url and not request.fields.get('Referer'):
self._add_referrer(request, url_record)
if self._fetch_rule.http_login:
request.username, request.password = self._fetch_rule.http_login
|
def _populate_common_request(self, request)
|
Populate the Request with common fields.
| 8.235021 | 7.86419 | 1.047154 |
'''Add referrer URL to request.'''
# Prohibit leak of referrer from HTTPS to HTTP
# rfc7231 section 5.5.2.
if url_record.parent_url.startswith('https://') and \
url_record.url_info.scheme == 'http':
return
request.fields['Referer'] = url_record.parent_url
|
def _add_referrer(cls, request: Request, url_record: URLRecord)
|
Add referrer URL to request.
| 6.152467 | 5.329999 | 1.154309 |
'''Process robots.txt.
Coroutine.
'''
try:
self._item_session.request = request = self._new_initial_request(with_body=False)
verdict, reason = (yield from self._should_fetch_reason_with_robots(
request))
except REMOTE_ERRORS as error:
_logger.error(
_('Fetching robots.txt for ‘{url}’ '
'encountered an error: {error}'),
url=self._next_url_info.url, error=error
)
self._result_rule.handle_error(self._item_session, error)
wait_time = self._result_rule.get_wait_time(
self._item_session, error=error
)
if wait_time:
_logger.debug('Sleeping {0}.', wait_time)
yield from asyncio.sleep(wait_time)
return False
else:
_logger.debug('Robots filter verdict {} reason {}', verdict, reason)
if not verdict:
self._item_session.skip()
return False
return True
|
def _process_robots(self)
|
Process robots.txt.
Coroutine.
| 5.697531 | 5.305345 | 1.073923 |
'''Fetch URL including redirects.
Coroutine.
'''
while not self._web_client_session.done():
self._item_session.request = self._web_client_session.next_request()
verdict, reason = self._should_fetch_reason()
_logger.debug('Filter verdict {} reason {}', verdict, reason)
if not verdict:
self._item_session.skip()
break
exit_early, wait_time = yield from self._fetch_one(cast(Request, self._item_session.request))
if wait_time:
_logger.debug('Sleeping {}', wait_time)
yield from asyncio.sleep(wait_time)
if exit_early:
break
|
def _process_loop(self)
|
Fetch URL including redirects.
Coroutine.
| 6.063748 | 4.858045 | 1.248187 |
'''Process one of the loop iteration.
Coroutine.
Returns:
If True, stop processing any future requests.
'''
_logger.info(_('Fetching ‘{url}’.'), url=request.url)
response = None
try:
response = yield from self._web_client_session.start()
self._item_session.response = response
action = self._result_rule.handle_pre_response(self._item_session)
if action in (Actions.RETRY, Actions.FINISH):
raise HookPreResponseBreak()
self._file_writer_session.process_response(response)
if not response.body:
response.body = Body(
directory=self._item_session.app_session.root_path,
hint='resp_cb'
)
yield from \
self._web_client_session.download(
file=response.body,
duration_timeout=self._fetch_rule.duration_timeout
)
except HookPreResponseBreak:
_logger.debug('Hook pre-response break.')
return True, None
except REMOTE_ERRORS as error:
self._log_error(request, error)
self._result_rule.handle_error(self._item_session, error)
wait_time = self._result_rule.get_wait_time(
self._item_session, error=error
)
if request.body:
request.body.close()
if response:
response.body.close()
return True, wait_time
else:
self._log_response(request, response)
action = self._handle_response(request, response)
wait_time = self._result_rule.get_wait_time(self._item_session)
yield from self._run_coprocessors(request, response)
response.body.close()
if request.body:
request.body.close()
return action != Actions.NORMAL, wait_time
|
def _fetch_one(self, request: Request) -> Tuple[bool, float]
|
Process one of the loop iteration.
Coroutine.
Returns:
If True, stop processing any future requests.
| 5.112914 | 4.417592 | 1.157398 |
'''Return the next URLInfo to be processed.
This returns either the original URLInfo or the next URLinfo
containing the redirect link.
'''
if not self._web_client_session:
return self._item_session.url_record.url_info
return self._web_client_session.next_request().url_info
|
def _next_url_info(self) -> URLInfo
|
Return the next URLInfo to be processed.
This returns either the original URLInfo or the next URLinfo
containing the redirect link.
| 8.678754 | 4.205795 | 2.063523 |
'''Return info about whether the URL should be fetched.
Returns:
tuple: A two item tuple:
1. bool: If True, the URL should be fetched.
2. str: A short reason string explaining the verdict.
'''
is_redirect = False
if self._strong_redirects:
try:
is_redirect = self._web_client_session.redirect_tracker\
.is_redirect()
except AttributeError:
pass
return self._fetch_rule.check_subsequent_web_request(
self._item_session, is_redirect=is_redirect)
|
def _should_fetch_reason(self) -> Tuple[bool, str]
|
Return info about whether the URL should be fetched.
Returns:
tuple: A two item tuple:
1. bool: If True, the URL should be fetched.
2. str: A short reason string explaining the verdict.
| 7.021034 | 3.792976 | 1.851062 |
'''Return info whether the URL should be fetched including checking
robots.txt.
Coroutine.
'''
result = yield from \
self._fetch_rule.check_initial_web_request(self._item_session, request)
return result
|
def _should_fetch_reason_with_robots(self, request: Request) -> Tuple[bool, str]
|
Return info whether the URL should be fetched including checking
robots.txt.
Coroutine.
| 18.759775 | 7.703486 | 2.435232 |
'''Add data to the payload.'''
if self._item_session.url_record.post_data:
data = wpull.string.to_bytes(self._item_session.url_record.post_data)
else:
data = wpull.string.to_bytes(
self._processor.fetch_params.post_data
)
request.method = 'POST'
request.fields['Content-Type'] = 'application/x-www-form-urlencoded'
request.fields['Content-Length'] = str(len(data))
_logger.debug('Posting with data {0}.', data)
if not request.body:
request.body = Body(io.BytesIO())
with wpull.util.reset_file_offset(request.body):
request.body.write(data)
|
def _add_post_data(self, request: Request)
|
Add data to the payload.
| 4.619382 | 4.217897 | 1.095186 |
'''Log response.'''
_logger.info(
_('Fetched ‘{url}’: {status_code} {reason}. '
'Length: {content_length} [{content_type}].'),
url=request.url,
status_code=response.status_code,
reason=wpull.string.printable_str(response.reason),
content_length=wpull.string.printable_str(
response.fields.get('Content-Length', _('unspecified'))),
content_type=wpull.string.printable_str(
response.fields.get('Content-Type', _('unspecified'))),
)
|
def _log_response(self, request: Request, response: Response)
|
Log response.
| 3.812608 | 3.601463 | 1.058628 |
'''Process the response.
Returns:
A value from :class:`.hook.Actions`.
'''
self._item_session.update_record_value(status_code=response.status_code)
if self._web_client_session.redirect_tracker.is_redirect() or \
self._web_client_session.loop_type() == LoopType.authentication:
self._file_writer_session.discard_document(response)
return self._result_rule.handle_intermediate_response(
self._item_session
)
elif (response.status_code in self._document_codes
or self._processor.fetch_params.content_on_error):
filename = self._file_writer_session.save_document(response)
self._processing_rule.scrape_document(self._item_session)
return self._result_rule.handle_document(
self._item_session, filename
)
elif response.status_code in self._no_document_codes:
self._file_writer_session.discard_document(response)
return self._result_rule.handle_no_document(
self._item_session
)
else:
self._file_writer_session.discard_document(response)
return self._result_rule.handle_document_error(
self._item_session
)
|
def _handle_response(self, request: Request, response: Response) -> Actions
|
Process the response.
Returns:
A value from :class:`.hook.Actions`.
| 4.293138 | 3.777161 | 1.136604 |
'''Convert string to int.
If ``inf`` is supplied, it returns ``0``.
'''
if string == 'inf':
return 0
try:
value = int(string)
except ValueError as error:
raise argparse.ArgumentTypeError(error)
if value < 0:
raise argparse.ArgumentTypeError(_('Value must not be negative.'))
else:
return value
|
def int_0_inf(cls, string)
|
Convert string to int.
If ``inf`` is supplied, it returns ``0``.
| 3.640235 | 2.628599 | 1.384858 |
'''Convert string describing size to int.'''
if string[-1] in ('k', 'm'):
value = cls.int_0_inf(string[:-1])
unit = string[-1]
if unit == 'k':
value *= 2 ** 10
else:
value *= 2 ** 20
return value
else:
return cls.int_0_inf(string)
|
def int_bytes(cls, string)
|
Convert string describing size to int.
| 3.42973 | 2.899446 | 1.182891 |
'''Convert a comma separated string to list.'''
items = string.split(',')
items = list([item.strip() for item in items])
return items
|
def comma_list(cls, string)
|
Convert a comma separated string to list.
| 5.095642 | 4.453029 | 1.144309 |
'''Convert a comma separated string to `CommaChoiceListArgs`.'''
items = string.split(',')
items = CommaChoiceListArgs([item.strip() for item in items])
return items
|
def comma_choice_list(cls, string)
|
Convert a comma separated string to `CommaChoiceListArgs`.
| 7.712875 | 4.365656 | 1.766716 |
'''Return an iterator of links found in the document.
Args:
file: A file object containing the document.
encoding (str): The encoding of the document.
Returns:
iterable: str
'''
return [item[0] for item in self.iter_text(file, encoding) if item[1]]
|
def read_links(self, file, encoding=None)
|
Return an iterator of links found in the document.
Args:
file: A file object containing the document.
encoding (str): The encoding of the document.
Returns:
iterable: str
| 5.568027 | 2.908546 | 1.914368 |
'''Create the File Writer.
Returns:
FileWriter: An instance of :class:`.writer.BaseFileWriter`.
'''
args = session.args
if args.delete_after:
return session.factory.new('FileWriter') # is a NullWriter
elif args.output_document:
session.factory.class_map['FileWriter'] = SingleDocumentWriter
return session.factory.new('FileWriter', args.output_document,
headers_included=args.save_headers)
use_dir = (len(args.urls) != 1 or args.page_requisites
or args.recursive)
if args.use_directories == 'force':
use_dir = True
elif args.use_directories == 'no':
use_dir = False
os_type = 'windows' if 'windows' in args.restrict_file_names \
else 'unix'
ascii_only = 'ascii' in args.restrict_file_names
no_control = 'nocontrol' not in args.restrict_file_names
if 'lower' in args.restrict_file_names:
case = 'lower'
elif 'upper' in args.restrict_file_names:
case = 'upper'
else:
case = None
path_namer = session.factory.new(
'PathNamer',
args.directory_prefix,
index=args.default_page,
use_dir=use_dir,
cut=args.cut_dirs,
protocol=args.protocol_directories,
hostname=args.host_directories,
os_type=os_type,
ascii_only=ascii_only,
no_control=no_control,
case=case,
max_filename_length=args.max_filename_length,
)
if args.recursive or args.page_requisites or args.continue_download:
if args.clobber_method == 'disable':
file_class = OverwriteFileWriter
else:
file_class = IgnoreFileWriter
elif args.timestamping:
file_class = TimestampingFileWriter
else:
file_class = AntiClobberFileWriter
session.factory.class_map['FileWriter'] = file_class
return session.factory.new(
'FileWriter',
path_namer,
file_continuing=args.continue_download,
headers_included=args.save_headers,
local_timestamping=args.use_server_timestamps,
adjust_extension=args.adjust_extension,
content_disposition=args.content_disposition,
trust_server_names=args.trust_server_names,
)
|
def _build_file_writer(cls, session: AppSession)
|
Create the File Writer.
Returns:
FileWriter: An instance of :class:`.writer.BaseFileWriter`.
| 4.029624 | 3.729072 | 1.080597 |
'''Setup Ctrl+C and SIGTERM handlers.'''
if platform.system() == 'Windows':
_logger.warning(_(
'Graceful stopping with Unix signals is not supported '
'on this OS.'
))
return
event_loop = asyncio.get_event_loop()
graceful_called = False
def graceful_stop_callback():
nonlocal graceful_called
if graceful_called:
forceful_stop_callback()
return
graceful_called = True
_logger.info(_('Stopping once all requests complete...'))
_logger.info(_('Interrupt again to force stopping immediately.'))
self.stop()
def forceful_stop_callback():
_logger.info(_('Forcing immediate stop...'))
logging.raiseExceptions = False
event_loop.stop()
event_loop.add_signal_handler(signal.SIGINT, graceful_stop_callback)
event_loop.add_signal_handler(signal.SIGTERM, forceful_stop_callback)
|
def setup_signal_handlers(self)
|
Setup Ctrl+C and SIGTERM handlers.
| 3.560772 | 3.353818 | 1.061707 |
'''Run the application.
This function is blocking.
Returns:
int: The exit status.
'''
exit_status = asyncio.get_event_loop().run_until_complete(self.run())
# The following loop close procedure should avoid deadlock while
# allowing all callbacks to process before close()
asyncio.get_event_loop().stop()
asyncio.get_event_loop().run_forever()
asyncio.get_event_loop().close()
return exit_status
|
def run_sync(self) -> int
|
Run the application.
This function is blocking.
Returns:
int: The exit status.
| 5.289728 | 4.223352 | 1.252495 |
'''Set the exit code based on the error type.
Args:
error (:class:`Exception`): An exception instance.
'''
for error_type, exit_code in self.ERROR_CODE_MAP.items():
if isinstance(error, error_type):
self.update_exit_code(exit_code)
break
else:
self.update_exit_code(ExitStatus.generic_error)
|
def _update_exit_code_from_error(self, error)
|
Set the exit code based on the error type.
Args:
error (:class:`Exception`): An exception instance.
| 3.072769 | 2.449517 | 1.254438 |
'''Set the exit code if it is serious than before.
Args:
code: The exit code.
'''
if code:
if self._exit_code:
self._exit_code = min(self._exit_code, code)
else:
self._exit_code = code
|
def update_exit_code(self, code: int)
|
Set the exit code if it is serious than before.
Args:
code: The exit code.
| 4.581692 | 2.431791 | 1.884082 |
'''Consult by fetching robots.txt as needed.
Args:
request: The request to be made
to get the file.
Returns:
True if can fetch
Coroutine
'''
if not self._robots_txt_checker:
return True
result = yield from self._robots_txt_checker.can_fetch(request)
return result
|
def consult_robots_txt(self, request: HTTPRequest) -> bool
|
Consult by fetching robots.txt as needed.
Args:
request: The request to be made
to get the file.
Returns:
True if can fetch
Coroutine
| 9.218379 | 2.912479 | 3.165132 |
'''Consult the URL filter.
Args:
url_record: The URL record.
is_redirect: Whether the request is a redirect and it is
desired that it spans hosts.
Returns
tuple:
1. bool: The verdict
2. str: A short reason string: nofilters, filters, redirect
3. dict: The result from :func:`DemuxURLFilter.test_info`
'''
if not self._url_filter:
return True, 'nofilters', None
test_info = self._url_filter.test_info(url_info, url_record)
verdict = test_info['verdict']
if verdict:
reason = 'filters'
elif is_redirect and self.is_only_span_hosts_failed(test_info):
verdict = True
reason = 'redirect'
else:
reason = 'filters'
return verdict, reason, test_info
|
def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool=False) \
-> Tuple[bool, str, dict]
|
Consult the URL filter.
Args:
url_record: The URL record.
is_redirect: Whether the request is a redirect and it is
desired that it spans hosts.
Returns
tuple:
1. bool: The verdict
2. str: A short reason string: nofilters, filters, redirect
3. dict: The result from :func:`DemuxURLFilter.test_info`
| 5.338534 | 1.996012 | 2.674601 |
'''Consult the scripting hook.
Returns:
tuple: (bool, str)
'''
try:
reasons = {
'filters': test_info['map'],
'reason': reason,
}
verdict = self.hook_dispatcher.call(
PluginFunctions.accept_url, item_session, verdict, reasons,
)
reason = 'callback_hook'
except HookDisconnected:
pass
return verdict, reason
|
def consult_hook(self, item_session: ItemSession, verdict: bool,
reason: str, test_info: dict)
|
Consult the scripting hook.
Returns:
tuple: (bool, str)
| 11.582754 | 8.905802 | 1.300585 |
'''Check robots.txt, URL filters, and scripting hook.
Returns:
tuple: (bool, str)
Coroutine.
'''
verdict, reason, test_info = self.consult_filters(item_session.request.url_info, item_session.url_record)
if verdict and self._robots_txt_checker:
can_fetch = yield from self.consult_robots_txt(request)
if not can_fetch:
verdict = False
reason = 'robotstxt'
verdict, reason = self.consult_hook(
item_session, verdict, reason, test_info
)
return verdict, reason
|
def check_initial_web_request(self, item_session: ItemSession, request: HTTPRequest) -> Tuple[bool, str]
|
Check robots.txt, URL filters, and scripting hook.
Returns:
tuple: (bool, str)
Coroutine.
| 6.809271 | 4.300473 | 1.583377 |
'''Check URL filters and scripting hook.
Returns:
tuple: (bool, str)
'''
verdict, reason, test_info = self.consult_filters(
item_session.request.url_info,
item_session.url_record, is_redirect=is_redirect)
# TODO: provide an option to change this
if item_session.is_virtual:
verdict = True
verdict, reason = self.consult_hook(item_session, verdict,
reason, test_info)
return verdict, reason
|
def check_subsequent_web_request(self, item_session: ItemSession,
is_redirect: bool=False) -> Tuple[bool, str]
|
Check URL filters and scripting hook.
Returns:
tuple: (bool, str)
| 6.794128 | 4.941014 | 1.375047 |
'''Check URL filters and scripting hook.
Returns:
tuple: (bool, str)
'''
verdict, reason, test_info = self.consult_filters(
item_session.request.url_info,
item_session.url_record)
verdict, reason = self.consult_hook(item_session, verdict,
reason, test_info)
return verdict, reason
|
def check_generic_request(self, item_session: ItemSession) -> Tuple[bool, str]
|
Check URL filters and scripting hook.
Returns:
tuple: (bool, str)
| 7.773744 | 4.814968 | 1.614495 |
'''Process a response that is starting.'''
action = self.consult_pre_response_hook(item_session)
if action == Actions.RETRY:
item_session.set_status(Status.skipped)
elif action == Actions.FINISH:
item_session.set_status(Status.done)
elif action == Actions.STOP:
raise HookStop('Script requested immediate stop.')
return action
|
def handle_pre_response(self, item_session: ItemSession) -> Actions
|
Process a response that is starting.
| 6.782657 | 5.714981 | 1.186821 |
'''Process a successful document response.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.reset()
action = self.handle_response(item_session)
if action == Actions.NORMAL:
self._statistics.increment(item_session.response.body.size())
item_session.set_status(Status.done, filename=filename)
return action
|
def handle_document(self, item_session: ItemSession, filename: str) -> Actions
|
Process a successful document response.
Returns:
A value from :class:`.hook.Actions`.
| 9.312389 | 5.599063 | 1.663205 |
'''Callback for successful responses containing no useful document.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.reset()
action = self.handle_response(item_session)
if action == Actions.NORMAL:
item_session.set_status(Status.skipped)
return action
|
def handle_no_document(self, item_session: ItemSession) -> Actions
|
Callback for successful responses containing no useful document.
Returns:
A value from :class:`.hook.Actions`.
| 11.863239 | 4.779058 | 2.482339 |
'''Callback for successful intermediate responses.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.reset()
action = self.handle_response(item_session)
return action
|
def handle_intermediate_response(self, item_session: ItemSession) -> Actions
|
Callback for successful intermediate responses.
Returns:
A value from :class:`.hook.Actions`.
| 12.848272 | 5.289739 | 2.428905 |
'''Callback for when the document only describes an server error.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.increment()
self._statistics.errors[ServerError] += 1
action = self.handle_response(item_session)
if action == Actions.NORMAL:
item_session.set_status(Status.error)
return action
|
def handle_document_error(self, item_session: ItemSession) -> Actions
|
Callback for when the document only describes an server error.
Returns:
A value from :class:`.hook.Actions`.
| 12.459411 | 5.594485 | 2.227088 |
'''Generic handler for a response.
Returns:
A value from :class:`.hook.Actions`.
'''
action = self.consult_response_hook(item_session)
if action == Actions.RETRY:
item_session.set_status(Status.error)
elif action == Actions.FINISH:
item_session.set_status(Status.done)
elif action == Actions.STOP:
raise HookStop('Script requested immediate stop.')
return action
|
def handle_response(self, item_session: ItemSession) -> Actions
|
Generic handler for a response.
Returns:
A value from :class:`.hook.Actions`.
| 6.065252 | 4.17787 | 1.451757 |
'''Process an error.
Returns:
A value from :class:`.hook.Actions`.
'''
if not self._ssl_verification and \
isinstance(error, SSLVerificationError):
# Change it into a different error since the user doesn't care
# about verifying certificates
self._statistics.increment_error(ProtocolError())
else:
self._statistics.increment_error(error)
self._waiter.increment()
action = self.consult_error_hook(item_session, error)
if action == Actions.RETRY:
item_session.set_status(Status.error)
elif action == Actions.FINISH:
item_session.set_status(Status.done)
elif action == Actions.STOP:
raise HookStop('Script requested immediate stop.')
elif self._ssl_verification and isinstance(error, SSLVerificationError):
raise
elif isinstance(error, ConnectionRefused) and \
not self.retry_connrefused:
item_session.set_status(Status.skipped)
elif isinstance(error, DNSNotFound) and \
not self.retry_dns_error:
item_session.set_status(Status.skipped)
else:
item_session.set_status(Status.error)
return action
|
def handle_error(self, item_session: ItemSession, error: BaseException) -> Actions
|
Process an error.
Returns:
A value from :class:`.hook.Actions`.
| 4.604653 | 4.102708 | 1.122345 |
'''Return the wait time in seconds between requests.'''
seconds = self._waiter.get()
try:
return self.hook_dispatcher.call(PluginFunctions.wait_time, seconds,
item_session, error)
except HookDisconnected:
return seconds
|
def get_wait_time(self, item_session: ItemSession, error=None)
|
Return the wait time in seconds between requests.
| 12.507126 | 9.316849 | 1.34242 |
'''Return the wait time between requests.
Args:
seconds: The original time in seconds.
item_session:
error:
Returns:
The time in seconds.
'''
return seconds
|
def plugin_wait_time(seconds: float, item_session: ItemSession, error: Optional[Exception]=None) -> float
|
Return the wait time between requests.
Args:
seconds: The original time in seconds.
item_session:
error:
Returns:
The time in seconds.
| 6.630971 | 2.315163 | 2.864148 |
'''Return scripting action when a response begins.'''
try:
return self.hook_dispatcher.call(
PluginFunctions.handle_pre_response,
item_session
)
except HookDisconnected:
return Actions.NORMAL
|
def consult_pre_response_hook(self, item_session: ItemSession) -> Actions
|
Return scripting action when a response begins.
| 19.845675 | 11.709604 | 1.69482 |
'''Return scripting action when a response ends.'''
try:
return self.hook_dispatcher.call(
PluginFunctions.handle_response, item_session
)
except HookDisconnected:
return Actions.NORMAL
|
def consult_response_hook(self, item_session: ItemSession) -> Actions
|
Return scripting action when a response ends.
| 22.475983 | 13.138756 | 1.710663 |
'''Return scripting action when an error occured.'''
try:
return self.hook_dispatcher.call(
PluginFunctions.handle_error, item_session, error)
except HookDisconnected:
return Actions.NORMAL
|
def consult_error_hook(self, item_session: ItemSession, error: BaseException)
|
Return scripting action when an error occured.
| 26.619019 | 11.891483 | 2.238494 |
'''Add additional URLs such as robots.txt, favicon.ico.'''
if item_session.url_record.level == 0 and self._sitemaps:
extra_url_infos = (
self.parse_url(
'{0}://{1}/robots.txt'.format(
item_session.url_record.url_info.scheme,
item_session.url_record.url_info.hostname_with_port)
),
self.parse_url(
'{0}://{1}/sitemap.xml'.format(
item_session.url_record.url_info.scheme,
item_session.url_record.url_info.hostname_with_port)
)
)
for url_info in extra_url_infos:
item_session.add_child_url(url_info.url)
|
def add_extra_urls(self, item_session: ItemSession)
|
Add additional URLs such as robots.txt, favicon.ico.
| 3.022271 | 2.584629 | 1.169325 |
'''Process document for links.'''
self.event_dispatcher.notify(
PluginFunctions.get_urls, item_session
)
if not self._document_scraper:
return
demux_info = self._document_scraper.scrape_info(
item_session.request, item_session.response,
item_session.url_record.link_type
)
num_inline_urls = 0
num_linked_urls = 0
for scraper, scrape_result in demux_info.items():
new_inline, new_linked = self._process_scrape_info(
scraper, scrape_result, item_session
)
num_inline_urls += new_inline
num_linked_urls += new_linked
_logger.debug('Candidate URLs: inline={0} linked={1}',
num_inline_urls, num_linked_urls
)
|
def scrape_document(self, item_session: ItemSession)
|
Process document for links.
| 5.270863 | 4.814843 | 1.094711 |
'''Collect the URLs from the scrape info dict.'''
if not scrape_result:
return 0, 0
num_inline = 0
num_linked = 0
for link_context in scrape_result.link_contexts:
url_info = self.parse_url(link_context.link)
if not url_info:
continue
url_info = self.rewrite_url(url_info)
child_url_record = item_session.child_url_record(
url_info.url, inline=link_context.inline
)
if not self._fetch_rule.consult_filters(item_session.request.url_info, child_url_record)[0]:
continue
if link_context.inline:
num_inline += 1
else:
num_linked += 1
item_session.add_child_url(url_info.url, inline=link_context.inline,
link_type=link_context.link_type)
return num_inline, num_linked
|
def _process_scrape_info(self, scraper: BaseScraper,
scrape_result: ScrapeResult,
item_session: ItemSession)
|
Collect the URLs from the scrape info dict.
| 3.767226 | 3.264922 | 1.153848 |
'''Return a rewritten URL such as escaped fragment.'''
if self._url_rewriter:
return self._url_rewriter.rewrite(url_info)
else:
return url_info
|
def rewrite_url(self, url_info: URLInfo) -> URLInfo
|
Return a rewritten URL such as escaped fragment.
| 6.306219 | 2.90606 | 2.170023 |
'''Log exceptions during a fetch.'''
_logger.error(
_('Fetching ‘{url}’ encountered an error: {error}'),
url=request.url, error=error
)
|
def _log_error(self, request, error)
|
Log exceptions during a fetch.
| 10.147425 | 6.556963 | 1.54758 |
repo = get_repo(token=token, org=org, name=name)
try:
repo.create_label(name=system.strip(), color=SYSTEM_LABEL_COLOR)
click.secho("Successfully added new system {}".format(system), fg="green")
if prompt and click.confirm("Run update to re-generate the page?"):
run_update(name=name, token=token, org=org)
except GithubException as e:
if e.status == 422:
click.secho(
"Unable to add new system {}, it already exists.".format(system), fg="yellow")
return
raise
|
def run_add_system(name, token, org, system, prompt)
|
Adds a new system to the repo.
| 3.141444 | 2.965998 | 1.059152 |
repo = get_repo(token=token, org=org, name=name)
try:
label = repo.get_label(name=system.strip())
label.delete()
click.secho("Successfully deleted {}".format(system), fg="green")
if prompt and click.confirm("Run update to re-generate the page?"):
run_update(name=name, token=token, org=org)
except UnknownObjectException:
click.secho("Unable to remove system {}, it does not exist.".format(system), fg="yellow")
|
def run_remove_system(name, token, org, system, prompt)
|
Removes a system from the repo.
| 3.330037 | 3.190214 | 1.043829 |
files = get_files(repo)
config = DEFAULT_CONFIG
if "config.json" in files:
# get the config file, parse JSON and merge it with the default config
config_file = repo.get_file_contents('/config.json', ref="gh-pages")
try:
repo_config = json.loads(config_file.decoded_content.decode("utf-8"))
config.update(repo_config)
except ValueError:
click.secho("WARNING: Unable to parse config file. Using defaults.", fg="yellow")
return config
|
def get_config(repo)
|
Get the config for the repo, merged with the default config. Returns the default config if
no config file is found.
| 3.172462 | 3.020574 | 1.050284 |
if not self.registration_allowed():
return HttpResponseRedirect(force_text(self.disallowed_url))
return super(RegistrationView, self).dispatch(*args, **kwargs)
|
def dispatch(self, *args, **kwargs)
|
Check that user signup is allowed before even bothering to
dispatch or do other processing.
| 4.597105 | 3.70956 | 1.239259 |
extra_context = {}
try:
activated_user = self.activate(*args, **kwargs)
except ActivationError as e:
extra_context['activation_error'] = {
'message': e.message,
'code': e.code,
'params': e.params
}
else:
signals.user_activated.send(
sender=self.__class__,
user=activated_user,
request=self.request
)
return HttpResponseRedirect(
force_text(
self.get_success_url(activated_user)
)
)
context_data = self.get_context_data()
context_data.update(extra_context)
return self.render_to_response(context_data)
|
def get(self, *args, **kwargs)
|
The base activation logic; subclasses should leave this method
alone and implement activate(), which is called from this
method.
| 2.300556 | 2.248507 | 1.023148 |
return signing.dumps(
obj=user.get_username(),
salt=REGISTRATION_SALT
)
|
def get_activation_key(self, user)
|
Generate the activation key which will be emailed to the user.
| 8.633016 | 8.224144 | 1.049716 |
scheme = 'https' if self.request.is_secure() else 'http'
return {
'scheme': scheme,
'activation_key': activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': get_current_site(self.request)
}
|
def get_email_context(self, activation_key)
|
Build the template context used for the activation email.
| 2.303771 | 2.222526 | 1.036556 |
activation_key = self.get_activation_key(user)
context = self.get_email_context(activation_key)
context['user'] = user
subject = render_to_string(
template_name=self.email_subject_template,
context=context,
request=self.request
)
# Force subject to a single line to avoid header-injection
# issues.
subject = ''.join(subject.splitlines())
message = render_to_string(
template_name=self.email_body_template,
context=context,
request=self.request
)
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
|
def send_activation_email(self, user)
|
Send the activation email. The activation key is the username,
signed using TimestampSigner.
| 1.9597 | 1.984867 | 0.987321 |
try:
username = signing.loads(
activation_key,
salt=REGISTRATION_SALT,
max_age=settings.ACCOUNT_ACTIVATION_DAYS * 86400
)
return username
except signing.SignatureExpired:
raise ActivationError(
self.EXPIRED_MESSAGE,
code='expired'
)
except signing.BadSignature:
raise ActivationError(
self.INVALID_KEY_MESSAGE,
code='invalid_key',
params={'activation_key': activation_key}
)
|
def validate_key(self, activation_key)
|
Verify that the activation key is valid and within the
permitted activation time window, returning the username if
valid or raising ``ActivationError`` if not.
| 2.410344 | 2.243006 | 1.074604 |
User = get_user_model()
try:
user = User.objects.get(**{
User.USERNAME_FIELD: username,
})
if user.is_active:
raise ActivationError(
self.ALREADY_ACTIVATED_MESSAGE,
code='already_activated'
)
return user
except User.DoesNotExist:
raise ActivationError(
self.BAD_USERNAME_MESSAGE,
code='bad_username'
)
|
def get_user(self, username)
|
Given the verified username, look up and return the
corresponding user account if it exists, or raising
``ActivationError`` if it doesn't.
| 2.620408 | 2.357385 | 1.111574 |
if not isinstance(value, six.text_type):
return
if confusables.is_dangerous(value):
raise ValidationError(CONFUSABLE, code='invalid')
|
def validate_confusables(value)
|
Validator which disallows 'dangerous' usernames likely to
represent homograph attacks.
A username is 'dangerous' if it is mixed-script (as defined by
Unicode 'Script' property) and contains one or more characters
appearing in the Unicode Visually Confusable Characters file.
| 5.205336 | 6.024405 | 0.864041 |
if '@' not in value:
return
local_part, domain = value.split('@')
if confusables.is_dangerous(local_part) or \
confusables.is_dangerous(domain):
raise ValidationError(CONFUSABLE_EMAIL, code='invalid')
|
def validate_confusables_email(value)
|
Validator which disallows 'dangerous' email addresses likely to
represent homograph attacks.
An email address is 'dangerous' if either the local-part or the
domain, considered on their own, are mixed-script and contain one
or more characters appearing in the Unicode Visually Confusable
Characters file.
| 3.304685 | 3.298599 | 1.001845 |
for k, v in JS_FILE_MAPPING.items():
input_files = " ".join(v["input_files"])
output_file = v["output_file"]
uglifyjs_command = "uglifyjs {input_files} -o {output_file}".format(
input_files=input_files,
output_file=output_file
)
local(uglifyjs_command)
|
def minify_js_files()
|
This command minified js files with UglifyJS
| 2.693926 | 2.547615 | 1.057431 |
for k, v in CSS_FILE_MAPPING.items():
input_files = " ".join(v["input_files"])
output_file = v["output_file"]
uglifyjs_command = "uglifycss {input_files} > {output_file}".format(
input_files=input_files,
output_file=output_file
)
local(uglifyjs_command)
|
def minify_css_files()
|
This command minified js files with UglifyCSS
| 2.994328 | 2.894411 | 1.034521 |
dt = dt or datetime.now()
if timezone is None:
return dt.strftime('%Y-%m-%d %H:%M%z')
if not dt.tzinfo:
tz = timezone.get_current_timezone()
if not tz:
tz = timezone.utc
dt = dt.replace(tzinfo=timezone.get_current_timezone())
return dt.strftime("%Y-%m-%d %H:%M%z")
|
def timestamp_with_timezone(dt=None)
|
Return a timestamp with a timezone for the configured locale. If all else
fails, consider localtime to be UTC.
| 2.236816 | 2.216631 | 1.009106 |
fn_path = getattr(settings, 'ROSETTA_ACCESS_CONTROL_FUNCTION', None)
if fn_path is None:
return is_superuser_staff_or_in_translators_group
# Dynamically load a permissions function
perm_module, perm_func = fn_path.rsplit('.', 1)
perm_module = importlib.import_module(perm_module)
return getattr(perm_module, perm_func)
|
def get_access_control_function()
|
Return a predicate for determining if a user can
access the Rosetta views
| 4.120236 | 3.914958 | 1.052435 |
# (Formerly known as "rosetta_i18n_lang_code")
lang_id = self.kwargs['lang_id']
if lang_id not in {l[0] for l in rosetta_settings.ROSETTA_LANGUAGES}:
raise Http404
if not can_translate_language(self.request.user, lang_id):
raise Http404
return lang_id
|
def language_id(self)
|
Determine/return the language id from the url kwargs, after
validating that:
1. the language is in rosetta_settings.ROSETTA_LANGUAGES, and
2. the current user is permitted to translate that language
(If either of the above fail, throw a 404.)
| 4.277371 | 2.754805 | 1.552695 |
# This was formerly referred to as 'rosetta_i18n_fn'
idx = self.kwargs['idx']
idx = int(idx) # idx matched url re expression; calling int() is safe
third_party_apps = self.po_filter in ('all', 'third-party')
django_apps = self.po_filter in ('all', 'django')
project_apps = self.po_filter in ('all', 'project')
po_paths = find_pos(self.language_id,
project_apps=project_apps,
django_apps=django_apps,
third_party_apps=third_party_apps,
)
po_paths.sort(key=get_app_name)
try:
path = po_paths[idx]
except IndexError:
raise Http404
return path
|
def po_file_path(self)
|
Based on the url kwargs, infer and return the path to the .po file to
be shown/updated.
Throw a 404 if a file isn't found.
| 5.532571 | 4.871686 | 1.135658 |
if self.po_file_is_writable:
# If we can write changes to file, then we pull it up fresh with
# each request.
# XXX: brittle; what if this path doesn't exist? Isn't a .po file?
po_file = pofile(self.po_file_path,
wrapwidth=rosetta_settings.POFILE_WRAP_WIDTH)
for entry in po_file:
# Entry is an object representing a single entry in the catalog.
# We interate through the *entire catalog*, pasting a hashed
# value of the meat of each entry on its side in an attribute
# called "md5hash".
str_to_hash = (
six.text_type(entry.msgid) +
six.text_type(entry.msgstr) +
six.text_type(entry.msgctxt or '')
).encode('utf8')
entry.md5hash = hashlib.md5(str_to_hash).hexdigest()
else:
storage = get_storage(self.request)
po_file = storage.get(self.po_file_cache_key, None)
if not po_file:
po_file = pofile(self.po_file_path)
for entry in po_file:
# Entry is an object representing a single entry in the
# catalog. We interate through the entire catalog, pasting
# a hashed value of the meat of each entry on its side in
# an attribute called "md5hash".
str_to_hash = (
six.text_type(entry.msgid) +
six.text_type(entry.msgstr) +
six.text_type(entry.msgctxt or '')
).encode('utf8')
entry.md5hash = hashlib.new('md5', str_to_hash).hexdigest()
storage.set(self.po_file_cache_key, po_file)
return po_file
|
def po_file(self)
|
Return the parsed .po file that is currently being translated/viewed.
(Note that this parsing also involves marking up each entry with a hash
of its contents.)
| 3.078038 | 2.952383 | 1.042561 |
if 0 == len(in_) or 0 == len(out_):
return out_
if "\r" in out_ and "\r" not in in_:
out_ = out_.replace("\r", '')
if "\n" == in_[0] and "\n" != out_[0]:
out_ = "\n" + out_
elif "\n" != in_[0] and "\n" == out_[0]:
out_ = out_.lstrip()
if 0 == len(out_):
pass
elif "\n" == in_[-1] and "\n" != out_[-1]:
out_ = out_ + "\n"
elif "\n" != in_[-1] and "\n" == out_[-1]:
out_ = out_.rstrip()
return out_
|
def fix_nls(self, in_, out_)
|
Fixes submitted translations by filtering carriage returns and pairing
newlines at the begging and end of the translated string with the original
| 1.933424 | 1.821579 | 1.0614 |
ref_lang = self._request_request('ref_lang', 'msgid')
if ref_lang != 'msgid':
allowed_languages = {l[0] for l in rosetta_settings.ROSETTA_LANGUAGES}
if ref_lang not in allowed_languages:
raise Http404
return ref_lang
|
def ref_lang(self)
|
Return the language id for the "reference language" (the language to
be translated *from*, if not English).
Throw a 404 if it's not in rosetta_settings.ROSETTA_LANGUAGES.
| 5.42379 | 3.944837 | 1.374908 |
ref_pofile = None
if rosetta_settings.ENABLE_REFLANG and self.ref_lang != 'msgid':
replacement = '{separator}locale{separator}{ref_lang}'.format(
separator=os.sep,
ref_lang=self.ref_lang
)
pattern = '\{separator}locale\{separator}[a-z]{{2}}'.format(separator=os.sep)
ref_fn = re.sub(pattern, replacement, self.po_file_path,)
try:
ref_pofile = pofile(ref_fn)
except IOError:
# there's a syntax error in the PO file and polib can't
# open it. Let's just do nothing and thus display msgids.
# XXX: :-/
pass
return ref_pofile
|
def ref_lang_po_file(self)
|
Return a parsed .po file object for the "reference language", if one
exists, otherwise None.
| 6.506901 | 6.190748 | 1.051069 |
if self.query:
msg_filter = None
else:
msg_filter = self._request_request('msg_filter', 'all')
available_msg_filters = {'untranslated', 'translated', 'fuzzy', 'all'}
if msg_filter not in available_msg_filters:
msg_filter = 'all'
return msg_filter
|
def msg_filter(self)
|
Validate/return msg_filter from request (e.g. 'fuzzy', 'untranslated'),
or a default.
If a query is also specified in the request, then return None.
| 5.447537 | 3.49381 | 1.559197 |
if self.query:
# Scenario #1: terms matching a search query
rx = re.compile(re.escape(self.query), re.IGNORECASE)
def concat_entry(e):
return (six.text_type(e.msgstr) +
six.text_type(e.msgid) +
six.text_type(e.msgctxt) +
six.text_type(e.comment) +
u''.join([o[0] for o in e.occurrences]) +
six.text_type(e.msgid_plural) +
u''.join(e.msgstr_plural.values())
)
entries = [e_ for e_ in self.po_file
if not e_.obsolete and rx.search(concat_entry(e_))]
else:
# Scenario #2: filtered list of messages
if self.msg_filter == 'untranslated':
entries = self.po_file.untranslated_entries()
elif self.msg_filter == 'translated':
entries = self.po_file.translated_entries()
elif self.msg_filter == 'fuzzy':
entries = [e_ for e_ in self.po_file.fuzzy_entries()
if not e_.obsolete]
else:
# ("all")
entries = [e_ for e_ in self.po_file if not e_.obsolete]
return entries
|
def get_entries(self)
|
Return a list of the entries (messages) that would be part of the
current "view"; that is, all of the ones from this .po file matching the
current query or msg_filter.
| 2.797476 | 2.548099 | 1.097868 |
converted_gps_time = None
gps_timestamp = float(gps_time)
if gps_week != None:
# image date
converted_gps_time = GPS_START + datetime.timedelta(seconds=int(gps_week) *
SECS_IN_WEEK + gps_timestamp)
else:
# TAI scale with 1970-01-01 00:00:10 (TAI) epoch
os.environ['TZ'] = 'right/UTC'
# by definition
gps_time_as_gps = GPS_START + \
datetime.timedelta(seconds=gps_timestamp)
# constant offset
gps_time_as_tai = gps_time_as_gps + \
datetime.timedelta(seconds=19)
tai_epoch_as_tai = datetime.datetime(1970, 1, 1, 0, 0, 10)
# by definition
tai_timestamp = (gps_time_as_tai - tai_epoch_as_tai).total_seconds()
converted_gps_time = (
datetime.datetime.utcfromtimestamp(tai_timestamp))
# "right" timezone is in effect
return converted_gps_time
|
def convert_from_gps_time(gps_time, gps_week=None)
|
Convert gps time in ticks to standard time.
| 3.753556 | 3.714213 | 1.010592 |
try:
return float(FFProbe(video_file).video[0].duration)
except Exception as e:
print("could not extract duration from video {} due to {}".format(video_file, e))
return None
|
def get_video_duration(video_file)
|
Get video duration in seconds
| 3.320182 | 3.191192 | 1.040421 |
if not os.path.isfile(video_file):
print("Error, video file {} does not exist".format(video_file))
return None
try:
time_string = FFProbe(video_file).video[0].creation_time
try:
creation_time = datetime.datetime.strptime(
time_string, TIME_FORMAT)
except:
creation_time = datetime.datetime.strptime(
time_string, TIME_FORMAT_2)
except:
return None
return creation_time
|
def get_video_end_time(video_file)
|
Get video end time in seconds
| 2.402246 | 2.325373 | 1.033058 |
if not os.path.isfile(video_file):
print("Error, video file {} does not exist".format(video_file))
return None
video_end_time = get_video_end_time(video_file)
duration = get_video_duration(video_file)
if video_end_time == None or duration == None:
return None
else:
video_start_time = (
video_end_time - datetime.timedelta(seconds=duration))
return video_start_time
|
def get_video_start_time(video_file)
|
Get start time in seconds
| 2.001729 | 2.007789 | 0.996982 |
'''
Format time string with invalid time elements in hours/minutes/seconds
Format for the timestring needs to be "%Y_%m_%d_%H_%M_%S"
e.g. 2014_03_31_24_10_11 => 2014_04_01_00_10_11
'''
subseconds = False
data = time_string.split("_")
hours, minutes, seconds = int(data[3]), int(data[4]), int(data[5])
date = datetime.datetime.strptime("_".join(data[:3]), "%Y_%m_%d")
subsec = 0.0
if len(data) == 7:
if float(data[6]) != 0:
subsec = float(data[6]) / 10**len(data[6])
subseconds = True
date_time = date + \
datetime.timedelta(hours=hours, minutes=minutes,
seconds=seconds + subsec)
return date_time, subseconds
|
def format_time(time_string)
|
Format time string with invalid time elements in hours/minutes/seconds
Format for the timestring needs to be "%Y_%m_%d_%H_%M_%S"
e.g. 2014_03_31_24_10_11 => 2014_04_01_00_10_11
| 3.092477 | 1.962948 | 1.575425 |
'''
Extract a value for a list of ordered fields.
Return the value of the first existed field in the list
'''
for field in fields:
if field in self.tags:
if field_type is float:
value = eval_frac(self.tags[field].values[0])
if field_type is str:
value = str(self.tags[field].values)
if field_type is int:
value = int(self.tags[field].values[0])
return value, field
return default, None
|
def _extract_alternative_fields(self, fields, default=None, field_type=float)
|
Extract a value for a list of ordered fields.
Return the value of the first existed field in the list
| 3.967167 | 2.622128 | 1.512957 |
'''
Name of file in the form {lat}_{lon}_{ca}_{datetime}_{filename}_{hash}
'''
mapillary_description = json.loads(self.extract_image_description())
lat = None
lon = None
ca = None
date_time = None
if "MAPLatitude" in mapillary_description:
lat = mapillary_description["MAPLatitude"]
if "MAPLongitude" in mapillary_description:
lon = mapillary_description["MAPLongitude"]
if "MAPCompassHeading" in mapillary_description:
if 'TrueHeading' in mapillary_description["MAPCompassHeading"]:
ca = mapillary_description["MAPCompassHeading"]['TrueHeading']
if "MAPCaptureTime" in mapillary_description:
date_time = datetime.datetime.strptime(
mapillary_description["MAPCaptureTime"], "%Y_%m_%d_%H_%M_%S_%f").strftime("%Y-%m-%d-%H-%M-%S-%f")[:-3]
filename = '{}_{}_{}_{}_{}'.format(
lat, lon, ca, date_time, uuid.uuid4())
return filename
|
def exif_name(self)
|
Name of file in the form {lat}_{lon}_{ca}_{datetime}_{filename}_{hash}
| 2.688666 | 2.066146 | 1.301295 |
'''
Extract altitude
'''
altitude_ref = {
0: 1,
1: -1}
fields = ['GPS GPSAltitude', 'EXIF GPS GPSAltitude']
refs = ['GPS GPSAltitudeRef', 'EXIF GPS GPSAltitudeRef']
altitude, _ = self._extract_alternative_fields(fields, 0, float)
ref = 0 if not any([True for x in refs if x in self.tags]) else [
self.tags[x].values for x in refs if x in self.tags][0][0]
return altitude * altitude_ref[ref]
|
def extract_altitude(self)
|
Extract altitude
| 5.13908 | 4.822637 | 1.065616 |
'''
Extract capture time from EXIF
return a datetime object
TODO: handle GPS DateTime
'''
time_string = exif_datetime_fields()[0]
capture_time, time_field = self._extract_alternative_fields(
time_string, 0, str)
if time_field in exif_gps_date_fields()[0]:
capture_time = self.extract_gps_time()
return capture_time
if capture_time is 0:
# try interpret the filename
try:
capture_time = datetime.datetime.strptime(os.path.basename(
self.filename)[:-4] + '000', '%Y_%m_%d_%H_%M_%S_%f')
except:
return None
else:
capture_time = capture_time.replace(" ", "_")
capture_time = capture_time.replace(":", "_")
capture_time = capture_time.replace(".", "_")
capture_time = capture_time.replace("-", "_")
capture_time = capture_time.replace(",", "_")
capture_time = "_".join(
[ts for ts in capture_time.split("_") if ts.isdigit()])
capture_time, subseconds = format_time(capture_time)
sub_sec = "0"
if not subseconds:
sub_sec = self.extract_subsec()
capture_time = capture_time + \
datetime.timedelta(seconds=float("0." + sub_sec))
return capture_time
|
def extract_capture_time(self)
|
Extract capture time from EXIF
return a datetime object
TODO: handle GPS DateTime
| 3.687655 | 3.25454 | 1.13308 |
'''
Extract image direction (i.e. compass, heading, bearing)
'''
fields = ['GPS GPSImgDirection',
'EXIF GPS GPSImgDirection',
'GPS GPSTrack',
'EXIF GPS GPSTrack']
direction, _ = self._extract_alternative_fields(fields)
if direction is not None:
direction = normalize_bearing(direction, check_hex=True)
return direction
|
def extract_direction(self)
|
Extract image direction (i.e. compass, heading, bearing)
| 6.47623 | 4.248909 | 1.52421 |
'''
Extract geo-related information from exif
'''
altitude = self.extract_altitude()
dop = self.extract_dop()
lon, lat = self.extract_lon_lat()
d = {}
if lon is not None and lat is not None:
d['latitude'] = lat
d['longitude'] = lon
if altitude is not None:
d['altitude'] = altitude
if dop is not None:
d['dop'] = dop
return d
|
def extract_geo(self)
|
Extract geo-related information from exif
| 2.943413 | 2.34335 | 1.25607 |
'''
Extract timestamp from GPS field.
'''
gps_date_field = "GPS GPSDate"
gps_time_field = "GPS GPSTimeStamp"
gps_time = 0
if gps_date_field in self.tags and gps_time_field in self.tags:
date = str(self.tags[gps_date_field].values).split(":")
if int(date[0]) == 0 or int(date[1]) == 0 or int(date[2]) == 0:
return None
t = self.tags[gps_time_field]
gps_time = datetime.datetime(
year=int(date[0]),
month=int(date[1]),
day=int(date[2]),
hour=int(eval_frac(t.values[0])),
minute=int(eval_frac(t.values[1])),
second=int(eval_frac(t.values[2])),
)
microseconds = datetime.timedelta(
microseconds=int((eval_frac(t.values[2]) % 1) * 1e6))
gps_time += microseconds
return gps_time
|
def extract_gps_time(self)
|
Extract timestamp from GPS field.
| 2.465314 | 2.272519 | 1.084837 |
'''
Extract a list of exif infos
'''
width, height = self.extract_image_size()
make, model = self.extract_make(), self.extract_model()
orientation = self.extract_orientation()
geo = self.extract_geo()
capture = self.extract_capture_time()
direction = self.extract_direction()
d = {
'width': width,
'height': height,
'orientation': orientation,
'direction': direction,
'make': make,
'model': model,
'capture_time': capture
}
d['gps'] = geo
return d
|
def extract_exif(self)
|
Extract a list of exif infos
| 2.995027 | 2.659517 | 1.126155 |
'''
Extract image height and width
'''
width, _ = self._extract_alternative_fields(
['Image ImageWidth', 'EXIF ExifImageWidth'], -1, int)
height, _ = self._extract_alternative_fields(
['Image ImageLength', 'EXIF ExifImageLength'], -1, int)
return width, height
|
def extract_image_size(self)
|
Extract image height and width
| 4.514529 | 3.909804 | 1.154669 |
'''
Extract camera make
'''
fields = ['EXIF LensMake', 'Image Make']
make, _ = self._extract_alternative_fields(
fields, default='none', field_type=str)
return make
|
def extract_make(self)
|
Extract camera make
| 12.58847 | 8.988137 | 1.400565 |
'''
Extract camera model
'''
fields = ['EXIF LensModel', 'Image Model']
model, _ = self._extract_alternative_fields(
fields, default='none', field_type=str)
return model
|
def extract_model(self)
|
Extract camera model
| 13.553288 | 9.841224 | 1.377195 |
'''
Extract camera firmware (tag is called 'software' in EXIF)
'''
fields = ['Image Software']
software, _ = self._extract_alternative_fields(
fields, default="", field_type=str)
return software
|
def extract_firmware(self)
|
Extract camera firmware (tag is called 'software' in EXIF)
| 21.641893 | 7.680771 | 2.817672 |
'''
Extract image orientation
'''
fields = ['Image Orientation']
orientation, _ = self._extract_alternative_fields(
fields, default=1, field_type=int)
if orientation not in range(1, 9):
return 1
return orientation
|
def extract_orientation(self)
|
Extract image orientation
| 7.55509 | 6.024837 | 1.253991 |
'''
Extract microseconds
'''
fields = [
'Image SubSecTimeOriginal',
'EXIF SubSecTimeOriginal',
'Image SubSecTimeDigitized',
'EXIF SubSecTimeDigitized',
'Image SubSecTime',
'EXIF SubSecTime'
]
sub_sec, _ = self._extract_alternative_fields(
fields, default='', field_type=str)
return sub_sec
|
def extract_subsec(self)
|
Extract microseconds
| 4.996644 | 4.452295 | 1.122262 |
'''
Check existence of a list fields in exif
'''
for rexif in fields:
vflag = False
for subrexif in rexif:
if subrexif in self.tags:
vflag = True
if not vflag:
print("Missing required EXIF tag: {0} for image {1}".format(
rexif[0], self.filename))
return False
return True
|
def fields_exist(self, fields)
|
Check existence of a list fields in exif
| 5.610631 | 3.846395 | 1.458673 |
'''
Check existence of required Mapillary tags
'''
description_tag = "Image ImageDescription"
if description_tag not in self.tags:
return False
for requirement in ["MAPSequenceUUID", "MAPSettingsUserKey", "MAPCaptureTime", "MAPLongitude", "MAPLatitude"]:
if requirement not in self.tags[description_tag].values or json.loads(self.tags[description_tag].values)[requirement] in ["", None, " "]:
return False
return True
|
def mapillary_tag_exists(self)
|
Check existence of required Mapillary tags
| 7.070605 | 5.475812 | 1.291243 |
'''
Send query to the search API and get dict with image data.
'''
# Create URL
params = urllib.urlencode(zip(
['client_id', 'bbox', 'per_page'],
[CLIENT_ID, ','.join([str(min_lon), str(min_lat), str(
max_lon), str(max_lat)]), str(max_results)]
))
print(MAPILLARY_API_IM_SEARCH_URL + params)
# Get data from server, then parse JSON
query = urllib2.urlopen(MAPILLARY_API_IM_SEARCH_URL + params).read()
query = json.loads(query)['features']
print("Result: {0} images in area.".format(len(query)))
return query
|
def query_search_api(min_lat, max_lat, min_lon, max_lon, max_results)
|
Send query to the search API and get dict with image data.
| 4.100819 | 3.452658 | 1.187728 |
'''
Download images in query result to path.
Return list of downloaded images with lat,lon.
There are four sizes available: 320, 640, 1024 (default), or 2048.
'''
im_size = "thumb-{0}.jpg".format(size)
im_list = []
for im in query:
# Use key to create url to download from and filename to save into
key = im['properties']['key']
url = MAPILLARY_API_IM_RETRIEVE_URL + key + '/' + im_size
filename = key + ".jpg"
try:
# Get image and save to disk
image = urllib.URLopener()
image.retrieve(url, path + filename)
# Log filename and GPS location
coords = ",".join(map(str, im['geometry']['coordinates']))
im_list.append([filename, coords])
print("Successfully downloaded: {0}".format(filename))
except KeyboardInterrupt:
break
except Exception as e:
print("Failed to download: {} due to {}".format(filename, e))
return im_list
|
def download_images(query, path, size=1024)
|
Download images in query result to path.
Return list of downloaded images with lat,lon.
There are four sizes available: 320, 640, 1024 (default), or 2048.
| 4.665938 | 3.385575 | 1.378182 |
'''
Read location and time stamps from a track in a GPX file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly.
'''
with open(gpx_file, 'r') as f:
gpx = gpxpy.parse(f)
points = []
if len(gpx.tracks) > 0:
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
t = utc_to_localtime(point.time) if local_time else point.time
points.append((t, point.latitude, point.longitude, point.elevation))
if len(gpx.waypoints) > 0:
for point in gpx.waypoints:
t = utc_to_localtime(point.time) if local_time else point.time
points.append((t, point.latitude, point.longitude, point.elevation))
# sort by time just in case
points.sort()
return points
|
def get_lat_lon_time_from_gpx(gpx_file, local_time=True)
|
Read location and time stamps from a track in a GPX file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly.
| 2.487591 | 1.588807 | 1.565698 |
'''
Read location and time stamps from a track in a NMEA file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly.
'''
with open(nmea_file, "r") as f:
lines = f.readlines()
lines = [l.rstrip("\n\r") for l in lines]
# Get initial date
for l in lines:
if "GPRMC" in l:
data = pynmea2.parse(l)
date = data.datetime.date()
break
# Parse GPS trace
points = []
for l in lines:
if "GPRMC" in l:
data = pynmea2.parse(l)
date = data.datetime.date()
if "$GPGGA" in l:
data = pynmea2.parse(l)
timestamp = datetime.datetime.combine(date, data.timestamp)
lat, lon, alt = data.latitude, data.longitude, data.altitude
points.append((timestamp, lat, lon, alt))
points.sort()
return points
|
def get_lat_lon_time_from_nmea(nmea_file, local_time=True)
|
Read location and time stamps from a track in a NMEA file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly.
| 3.130302 | 1.971723 | 1.587598 |
'''
Compute ECEF XYZ from latitude, longitude and altitude.
All using the WGS94 model.
Altitude is the distance to the WGS94 ellipsoid.
Check results here http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm
'''
a2 = WGS84_a ** 2
b2 = WGS84_b ** 2
lat = math.radians(lat)
lon = math.radians(lon)
L = 1.0 / math.sqrt(a2 * math.cos(lat) ** 2 + b2 * math.sin(lat) ** 2)
x = (a2 * L + alt) * math.cos(lat) * math.cos(lon)
y = (a2 * L + alt) * math.cos(lat) * math.sin(lon)
z = (b2 * L + alt) * math.sin(lat)
return x, y, z
|
def ecef_from_lla(lat, lon, alt)
|
Compute ECEF XYZ from latitude, longitude and altitude.
All using the WGS94 model.
Altitude is the distance to the WGS94 ellipsoid.
Check results here http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm
| 2.425774 | 1.537908 | 1.57732 |
'''
Distance between two (lat,lon) pairs.
>>> p1 = (42.1, -11.1)
>>> p2 = (42.2, -11.3)
>>> 19000 < gps_distance(p1, p2) < 20000
True
'''
x1, y1, z1 = ecef_from_lla(latlon_1[0], latlon_1[1], 0.)
x2, y2, z2 = ecef_from_lla(latlon_2[0], latlon_2[1], 0.)
dis = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2 + (z1 - z2) ** 2)
return dis
|
def gps_distance(latlon_1, latlon_2)
|
Distance between two (lat,lon) pairs.
>>> p1 = (42.1, -11.1)
>>> p2 = (42.2, -11.3)
>>> 19000 < gps_distance(p1, p2) < 20000
True
| 2.004842 | 1.469445 | 1.364353 |
'''
Returns the radius of an entire GPS track. Used to calculate whether or not the entire sequence was just stationary video
Takes a sequence of points as input
'''
latlon_list = []
# Remove timestamps from list
for idx, point in enumerate(latlon_track):
lat = latlon_track[idx][1]
lon = latlon_track[idx][2]
alt = latlon_track[idx][3]
latlon_list.append([lat, lon, alt])
start_position = latlon_list[0]
max_distance = 0
for position in latlon_list:
distance = gps_distance(start_position, position)
if distance > max_distance:
max_distance = distance
return max_distance
|
def get_max_distance_from_start(latlon_track)
|
Returns the radius of an entire GPS track. Used to calculate whether or not the entire sequence was just stationary video
Takes a sequence of points as input
| 3.815135 | 2.043617 | 1.866854 |
'''
Returns the total distance traveled of a GPS track. Used to calculate whether or not the entire sequence was just stationary video
Takes a sequence of points as input
'''
latlon_list = []
# Remove timestamps from list
for idx, point in enumerate(latlon_track):
lat = latlon_track[idx][1]
lon = latlon_track[idx][2]
alt = latlon_track[idx][3]
latlon_list.append([lat, lon, alt])
total_distance = 0
last_position = latlon_list[0]
for position in latlon_list:
total_distance += gps_distance(last_position, position)
last_position = position
return total_distance
|
def get_total_distance_traveled(latlon_track)
|
Returns the total distance traveled of a GPS track. Used to calculate whether or not the entire sequence was just stationary video
Takes a sequence of points as input
| 3.614199 | 2.034278 | 1.77665 |
'''
Convert from degrees, minutes, seconds to decimal degrees.
@author: mprins
'''
dms = float(degrees) + float(minutes) / 60 + float(seconds) / 3600
if hemisphere in "WwSs":
dms = -1 * dms
return dms
|
def dms_to_decimal(degrees, minutes, seconds, hemisphere)
|
Convert from degrees, minutes, seconds to decimal degrees.
@author: mprins
| 3.990396 | 2.839958 | 1.40509 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.