code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
try: # FIXME: Use `github3.Repository.contributors` to get this information contrib_url = gh.repository_with_id(repo_id).contributors_url r = requests.get(contrib_url) if r.status_code == 200: contributors = r.json() def get_author(contributor): r = requests.get(contributor['url']) if r.status_code == 200: data = r.json() return dict( name=(data['name'] if 'name' in data and data['name'] else data['login']), affiliation=data.get('company') or '', ) # Sort according to number of contributions contributors.sort(key=itemgetter('contributions')) contributors = [get_author(x) for x in reversed(contributors) if x['type'] == 'User'] contributors = filter(lambda x: x is not None, contributors) return contributors except Exception: return None
def get_contributors(gh, repo_id)
Get list of contributors to a repository.
2.939245
2.967424
0.990504
gh = GitHubAPI(user_id=token.remote_account.user_id) with db.session.begin_nested(): gh.init_account() # Create user <-> external id link. oauth_link_external_id( token.remote_account.user, dict(id=str(gh.account.extra_data['id']), method="github") )
def account_setup(remote, token=None, response=None, account_setup=None)
Setup user account.
9.653158
9.465898
1.019783
gh = GitHubAPI(user_id=token.remote_account.user_id) repos = [r.id for r in gh.api.repositories() if r.permissions['admin']] sync_hooks.delay(token.remote_account.user_id, repos)
def account_post_init(remote, token=None)
Perform post initialization.
7.169497
7.452936
0.96197
# User must be authenticated if not current_user.is_authenticated: return current_app.login_manager.unauthorized() external_method = 'github' external_ids = [i.id for i in current_user.external_identifiers if i.method == external_method] if external_ids: oauth_unlink_external_id(dict(id=external_ids[0], method=external_method)) user_id = int(current_user.get_id()) token = RemoteToken.get(user_id, remote.consumer_key) if token: extra_data = token.remote_account.extra_data # Delete the token that we issued for GitHub to deliver webhooks webhook_token_id = extra_data.get('tokens', {}).get('webhook') ProviderToken.query.filter_by(id=webhook_token_id).delete() # Disable GitHub webhooks from our side db_repos = Repository.query.filter_by(user_id=user_id).all() # Keep repositories with hooks to pass to the celery task later on repos_with_hooks = [(r.github_id, r.hook) for r in db_repos if r.hook] for repo in db_repos: try: Repository.disable(user_id=user_id, github_id=repo.github_id, name=repo.name) except NoResultFound: # If the repository doesn't exist, no action is necessary pass db.session.commit() # Send Celery task for webhooks removal and token revocation disconnect_github.delay(token.access_token, repos_with_hooks) # Delete the RemoteAccount (along with the associated RemoteToken) token.remote_account.delete() return redirect(url_for('invenio_oauthclient_settings.index'))
def disconnect(remote)
Disconnect callback handler for GitHub.
4.423737
4.36184
1.01419
attrs = { 'auto_close_field': publication_date_field, 'auto_moderate_field': publication_date_field, 'enable_field': enable_comments_field, } ModerationClass = type(ParentModel.__name__ + 'Moderator', (FluentCommentsModerator,), attrs) moderator.register(ParentModel, ModerationClass)
def moderate_model(ParentModel, publication_date_field=None, enable_comments_field=None)
Register a parent model (e.g. ``Blog`` or ``Article``) that should receive comment moderation. :param ParentModel: The parent model, e.g. a ``Blog`` or ``Article`` model. :param publication_date_field: The field name of a :class:`~django.db.models.DateTimeField` in the parent model which stores the publication date. :type publication_date_field: str :param enable_comments_field: The field name of a :class:`~django.db.models.BooleanField` in the parent model which stores the whether comments are enabled. :type enable_comments_field: str
4.151262
4.548418
0.912683
moderator = get_model_moderator(content_object.__class__) if moderator is None: return True # Check the 'enable_field', 'auto_close_field' and 'close_after', # by reusing the basic Django policies. return CommentModerator.allow(moderator, None, content_object, None)
def comments_are_open(content_object)
Return whether comments are still open for a given target object.
9.394444
8.953374
1.049263
moderator = get_model_moderator(content_object.__class__) if moderator is None: return False # Check the 'auto_moderate_field', 'moderate_after', # by reusing the basic Django policies. return CommentModerator.moderate(moderator, None, content_object, None)
def comments_are_moderated(content_object)
Return whether comments are moderated for a given target object.
8.434027
8.009002
1.053068
# Parent class check if not super(FluentCommentsModerator, self).allow(comment, content_object, request): return False # Akismet check if self.akismet_check: akismet_result = akismet_check(comment, content_object, request) if self.akismet_check_action == 'delete' and akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam): return False # Akismet marked the comment as spam. elif self.akismet_check_action == 'auto' and akismet_result == SpamStatus.DefiniteSpam: return False # Clearly spam return True
def allow(self, comment, content_object, request)
Determine whether a given comment is allowed to be posted on a given object. Returns ``True`` if the comment should be allowed, ``False`` otherwise.
3.545572
3.586064
0.988708
# Soft delete checks are done first, so these comments are not mistakenly "just moderated" # for expiring the `close_after` date, but correctly get marked as spam instead. # This helps staff to quickly see which comments need real moderation. if self.akismet_check: akismet_result = akismet_check(comment, content_object, request) if akismet_result: # Typically action=delete never gets here, unless the service was having problems. if akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam) and \ self.akismet_check_action in ('auto', 'soft_delete', 'delete'): comment.is_removed = True # Set extra marker # SpamStatus.Unknown or action=moderate will end up in the moderation queue return True # Parent class check if super(FluentCommentsModerator, self).moderate(comment, content_object, request): return True # Bad words check if self.moderate_bad_words: input_words = split_words(comment.comment) if self.moderate_bad_words.intersection(input_words): return True # Akismet check if self.akismet_check and self.akismet_check_action not in ('soft_delete', 'delete'): # Return True if akismet marks this comment as spam and we want to moderate it. if akismet_check(comment, content_object, request): return True return False
def moderate(self, comment, content_object, request)
Determine whether a given comment on a given object should be allowed to show up immediately, or should be marked non-public and await approval. Returns ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise.
6.415975
6.277433
1.02207
if not self.email_notification: return send_comment_posted(comment, request)
def email(self, comment, content_object, request)
Overwritten for a better email notification.
10.979377
8.303303
1.32229
recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS] site = get_current_site(request) content_object = comment.content_object content_title = force_text(content_object) if comment.is_removed: subject = u'[{0}] Spam comment on "{1}"'.format(site.name, content_title) elif not comment.is_public: subject = u'[{0}] Moderated comment on "{1}"'.format(site.name, content_title) else: subject = u'[{0}] New comment posted on "{1}"'.format(site.name, content_title) context = { 'site': site, 'comment': comment, 'content_object': content_object } message = render_to_string("comments/comment_notification_email.txt", context, request=request) if appsettings.FLUENT_COMMENTS_MULTIPART_EMAILS: html_message = render_to_string("comments/comment_notification_email.html", context, request=request) else: html_message = None send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True, html_message=html_message)
def send_comment_posted(comment, request)
Send the email to staff that an comment was posted. While the django_comments module has email support, it doesn't pass the 'request' to the context. This also changes the subject to show the page title.
2.230723
2.231821
0.999508
# Process the template line. tag_name, args, kwargs = parse_token_kwargs( parser, token, allowed_kwargs=cls.allowed_kwargs, compile_args=False, # Only overrule here, keep at render() phase. compile_kwargs=cls.compile_kwargs ) # remove "for" keyword, so all other args can be resolved in render(). if args[0] == 'for': args.pop(0) # And apply the compilation afterwards for i in range(len(args)): args[i] = parser.compile_filter(args[i]) cls.validate_args(tag_name, *args, **kwargs) return cls(tag_name, *args, **kwargs)
def parse(cls, parser, token)
Custom parsing for the ``{% ajax_comment_tags for ... %}`` tag.
6.097346
5.907145
1.032198
target_object = tag_args[0] # moved one spot due to .pop(0) new_context = { 'STATIC_URL': parent_context.get('STATIC_URL', None), 'USE_THREADEDCOMMENTS': appsettings.USE_THREADEDCOMMENTS, 'target_object': target_object, } # Be configuration independent: if new_context['STATIC_URL'] is None: try: request = parent_context['request'] except KeyError: new_context.update({'STATIC_URL': settings.STATIC_URL}) else: new_context.update(context_processors.static(request)) return new_context
def get_context_data(self, parent_context, *tag_args, **tag_kwargs)
The main logic for the inclusion node, analogous to ``@register.inclusion_node``.
4.184677
4.190144
0.998695
ctype = ContentType.objects.get_for_id(comment.content_type_id) return [ "comments/%s/%s/comment.html" % (ctype.app_label, ctype.model), "comments/%s/comment.html" % ctype.app_label, "comments/comment.html" ]
def get_comment_template_name(comment)
Internal function for the rendering of comments.
1.88657
1.862339
1.013011
# Writing the label values into the field placeholders. # This is done at rendering time, so the Form.__init__() could update any labels before. # Django 1.11 no longer lets EmailInput or URLInput inherit from TextInput, # so checking for `Input` instead while excluding `HiddenInput`. for field in form.fields.values(): if field.label and \ isinstance(field.widget, (Input, forms.Textarea)) and \ not isinstance(field.widget, forms.HiddenInput): field.widget.attrs['placeholder'] = u"{0}:".format(field.label) return super(CompactLabelsCommentFormHelper, self).render_layout(form, context, template_pack=template_pack)
def render_layout(self, form, context, template_pack=TEMPLATE_PACK)
Copy any field label to the ``placeholder`` attribute. Note, this method is called when :attr:`layout` is defined.
7.456183
7.163785
1.040816
qs = get_comments_model().objects.for_model(content_object) if not include_moderated: qs = qs.filter(is_public=True, is_removed=False) return qs
def get_comments_for_model(content_object, include_moderated=False)
Return the QuerySet with all comments for a given model.
2.538637
2.319546
1.094454
if 'captcha' not in ordering: raise ImproperlyConfigured( "When using 'FLUENT_COMMENTS_FIELD_ORDER', " "make sure the 'captcha' field included too to use '{}' form. ".format( self.__class__.__name__ ) ) super(CaptchaFormMixin, self)._reorder_fields(ordering) # Avoid making captcha required for previews. if self.is_preview: self.fields.pop('captcha')
def _reorder_fields(self, ordering)
Test that the 'captcha' field is really present. This could be broken by a bad FLUENT_COMMENTS_FIELD_ORDER configuration.
8.07149
5.05553
1.596567
if not request.is_ajax(): return HttpResponseBadRequest("Expecting Ajax call") # This is copied from django_comments. # Basically that view does too much, and doesn't offer a hook to change the rendering. # The request object is not passed to next_redirect for example. # # This is a separate view to integrate both features. Previously this used django-ajaxcomments # which is unfortunately not thread-safe (it it changes the comment view per request). # Fill out some initial data fields from an authenticated user, if present data = request.POST.copy() if request.user.is_authenticated: if not data.get('name', ''): data["name"] = request.user.get_full_name() or request.user.username if not data.get('email', ''): data["email"] = request.user.email # Look up the object we're trying to comment about ctype = data.get("content_type") object_pk = data.get("object_pk") if ctype is None or object_pk is None: return CommentPostBadRequest("Missing content_type or object_pk field.") try: model = apps.get_model(*ctype.split(".", 1)) target = model._default_manager.using(using).get(pk=object_pk) except ValueError: return CommentPostBadRequest("Invalid object_pk value: {0}".format(escape(object_pk))) except (TypeError, LookupError): return CommentPostBadRequest("Invalid content_type value: {0}".format(escape(ctype))) except AttributeError: return CommentPostBadRequest("The given content-type {0} does not resolve to a valid model.".format(escape(ctype))) except ObjectDoesNotExist: return CommentPostBadRequest("No object matching content-type {0} and object PK {1} exists.".format(escape(ctype), escape(object_pk))) except (ValueError, ValidationError) as e: return CommentPostBadRequest("Attempting go get content-type {0!r} and object PK {1!r} exists raised {2}".format(escape(ctype), escape(object_pk), e.__class__.__name__)) # Do we want to preview the comment? is_preview = "preview" in data # Construct the comment form form = django_comments.get_form()(target, data=data, is_preview=is_preview) # Check security information if form.security_errors(): return CommentPostBadRequest("The comment form failed security verification: {0}".format(form.security_errors())) # If there are errors or if we requested a preview show the comment if is_preview: comment = form.get_comment_object() if not form.errors else None return _ajax_result(request, form, "preview", comment, object_id=object_pk) if form.errors: return _ajax_result(request, form, "post", object_id=object_pk) # Otherwise create the comment comment = form.get_comment_object() comment.ip_address = request.META.get("REMOTE_ADDR", None) if request.user.is_authenticated: comment.user = request.user # Signal that the comment is about to be saved responses = signals.comment_will_be_posted.send( sender=comment.__class__, comment=comment, request=request ) for (receiver, response) in responses: if response is False: return CommentPostBadRequest("comment_will_be_posted receiver {0} killed the comment".format(receiver.__name__)) # Save the comment and signal that it was saved comment.save() signals.comment_was_posted.send( sender = comment.__class__, comment = comment, request = request ) return _ajax_result(request, form, "post", comment, object_id=object_pk)
def post_comment_ajax(request, using=None)
Post a comment, via an Ajax call.
2.879929
2.863876
1.005605
template = '{0}/layout/field_errors.html'.format(appsettings.CRISPY_TEMPLATE_PACK) return render_to_string(template, { 'field': field, 'form_show_errors': True, })
def _render_errors(field)
Render form errors in crispy-forms style.
4.180565
3.337553
1.252584
global form_class from fluent_comments import appsettings if form_class is None: if appsettings.FLUENT_COMMENTS_FORM_CLASS: from django.utils.module_loading import import_string form_class = import_string(appsettings.FLUENT_COMMENTS_FORM_CLASS) else: from fluent_comments.forms import FluentCommentForm form_class = FluentCommentForm return form_class
def get_form()
Return the form to use for commenting.
2.212231
2.041393
1.083687
if appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR == 'default': # Perform spam checks return moderation.FluentCommentsModerator(None) elif appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR == 'deny': # Deny all comments not from known registered models. return moderation.AlwaysDeny(None) elif str(appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR).lower() == 'none': # Disables default moderator return moderation.NullModerator(None) elif '.' in appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR: return import_string(appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR)(None) else: raise ImproperlyConfigured( "Bad FLUENT_COMMENTS_DEFAULT_MODERATOR value. Provide default/deny/none or a dotted path" )
def load_default_moderator()
Find a moderator object
3.818441
3.788697
1.007851
content_object = comment.content_object moderator = moderation.get_model_moderator(content_object.__class__) if moderator and comment.__class__ is not CommentModel: # Help with some hard to diagnose problems. The default Django moderator connects # to the configured comment model. When this model differs from the signal sender, # the the form stores a different model then COMMENTS_APP provides. moderator = None logger.warning( "Comment of type '%s' was not moderated by '%s', " "because the parent '%s' has a moderator installed for '%s' instead", comment.__class__.__name__, moderator.__class__.__name__, content_object.__class__.__name__, CommentModel.__name__ ) if moderator is None: logger.info( "Using default moderator for comment '%s' on parent '%s'", comment.__class__.__name__, content_object.__class__.__name__ ) _run_default_moderator(comment, content_object, request)
def on_comment_will_be_posted(sender, comment, request, **kwargs)
Make sure both the Ajax and regular comments are checked for moderation. This signal is also used to link moderators to the comment posting.
5.752176
5.691848
1.010599
# The default moderator will likely not check things like "auto close". # It can still provide akismet and bad word checking. if not default_moderator.allow(comment, content_object, request): # Comment will be disallowed outright (HTTP 403 response) return False if default_moderator.moderate(comment, content_object, request): comment.is_public = False
def _run_default_moderator(comment, content_object, request)
Run the default moderator
8.571913
8.790621
0.97512
content_object = comment.content_object moderator = moderation.get_model_moderator(content_object.__class__) if moderator is None or comment.__class__ is not CommentModel: # No custom moderator means no email would be sent. # This still pass the comment to the default moderator. default_moderator.email(comment, content_object, request)
def on_comment_posted(sender, comment, request, **kwargs)
Send email notification of a new comment to site staff when email notifications have been requested.
6.528943
6.329392
1.031528
# Return previously cached response akismet_result = getattr(comment, '_akismet_result_', None) if akismet_result is not None: return akismet_result # Get Akismet data AKISMET_API_KEY = appsettings.AKISMET_API_KEY if not AKISMET_API_KEY: raise ImproperlyConfigured('You must set AKISMET_API_KEY to use comment moderation with Akismet.') current_domain = get_current_site(request).domain auto_blog_url = '{0}://{1}/'.format(request.is_secure() and 'https' or 'http', current_domain) blog_url = appsettings.AKISMET_BLOG_URL or auto_blog_url akismet = Akismet( AKISMET_API_KEY, blog=blog_url, is_test=int(bool(appsettings.AKISMET_IS_TEST)), application_user_agent='django-fluent-comments/{0}'.format(fluent_comments.__version__), ) akismet_data = _get_akismet_data(blog_url, comment, content_object, request) akismet_result = akismet.check(**akismet_data) # raises AkismetServerError when key is invalid setattr(comment, "_akismet_result_", akismet_result) return akismet_result
def akismet_check(comment, content_object, request)
Connects to Akismet and evaluates to True if Akismet marks this comment as spam. :rtype: akismet.SpamStatus
2.5646
2.602924
0.985277
msg = msg.strip() if msg.startswith("runtime error:"): raise exception.OverpassRuntimeError(msg=msg) elif msg.startswith("runtime remark:"): raise exception.OverpassRuntimeRemark(msg=msg) raise exception.OverpassUnknownError(msg=msg)
def _handle_remark_msg(self, msg)
Try to parse the message provided with the remark tag or element. :param str msg: The message :raises overpy.exception.OverpassRuntimeError: If message starts with 'runtime error:' :raises overpy.exception.OverpassRuntimeRemark: If message starts with 'runtime remark:' :raises overpy.exception.OverpassUnknownError: If we are unable to identify the error
4.491766
2.071954
2.167889
if not isinstance(query, bytes): query = query.encode("utf-8") retry_num = 0 retry_exceptions = [] do_retry = True if self.max_retry_count > 0 else False while retry_num <= self.max_retry_count: if retry_num > 0: time.sleep(self.retry_timeout) retry_num += 1 try: f = urlopen(self.url, query) except HTTPError as e: f = e response = f.read(self.read_chunk_size) while True: data = f.read(self.read_chunk_size) if len(data) == 0: break response = response + data f.close() if f.code == 200: if PY2: http_info = f.info() content_type = http_info.getheader("content-type") else: content_type = f.getheader("Content-Type") if content_type == "application/json": return self.parse_json(response) if content_type == "application/osm3s+xml": return self.parse_xml(response) e = exception.OverpassUnknownContentType(content_type) if not do_retry: raise e retry_exceptions.append(e) continue if f.code == 400: msgs = [] for msg in self._regex_extract_error_msg.finditer(response): tmp = self._regex_remove_tag.sub(b"", msg.group("msg")) try: tmp = tmp.decode("utf-8") except UnicodeDecodeError: tmp = repr(tmp) msgs.append(tmp) e = exception.OverpassBadRequest( query, msgs=msgs ) if not do_retry: raise e retry_exceptions.append(e) continue if f.code == 429: e = exception.OverpassTooManyRequests if not do_retry: raise e retry_exceptions.append(e) continue if f.code == 504: e = exception.OverpassGatewayTimeout if not do_retry: raise e retry_exceptions.append(e) continue e = exception.OverpassUnknownHTTPStatusCode(f.code) if not do_retry: raise e retry_exceptions.append(e) continue raise exception.MaxRetriesReached(retry_count=retry_num, exceptions=retry_exceptions)
def query(self, query)
Query the Overpass API :param String|Bytes query: The query string in Overpass QL :return: The parsed result :rtype: overpy.Result
2.175782
2.139175
1.017113
if isinstance(data, bytes): data = data.decode(encoding) data = json.loads(data, parse_float=Decimal) if "remark" in data: self._handle_remark_msg(msg=data.get("remark")) return Result.from_json(data, api=self)
def parse_json(self, data, encoding="utf-8")
Parse raw response from Overpass service. :param data: Raw JSON Data :type data: String or Bytes :param encoding: Encoding to decode byte string :type encoding: String :return: Result object :rtype: overpy.Result
4.667569
3.878443
1.203465
if not isinstance(other, Result): raise ValueError("Provided argument has to be instance of overpy:Result()") other_collection_map = {Node: other.nodes, Way: other.ways, Relation: other.relations, Area: other.areas} for element_type, own_collection in self._class_collection_map.items(): for element in other_collection_map[element_type]: if is_valid_type(element, element_type) and element.id not in own_collection: own_collection[element.id] = element
def expand(self, other)
Add all elements from an other result to the list of elements of this result object. It is used by the auto resolve feature. :param other: Expand the result with the elements from this result. :type other: overpy.Result :raises ValueError: If provided parameter is not instance of :class:`overpy.Result`
4.616379
3.803428
1.213741
if is_valid_type(element, Element): self._class_collection_map[element.__class__].setdefault(element.id, element)
def append(self, element)
Append a new element to the result. :param element: The element to append :type element: overpy.Element
10.068097
11.245807
0.895276
result = [] if elem_id is not None: try: result = [self._class_collection_map[filter_cls][elem_id]] except KeyError: result = [] else: for e in self._class_collection_map[filter_cls].values(): result.append(e) return result
def get_elements(self, filter_cls, elem_id=None)
Get a list of elements from the result and filter the element type by a class. :param filter_cls: :param elem_id: ID of the object :type elem_id: Integer :return: List of available elements :rtype: List
2.424708
2.738659
0.885363
result = cls(api=api) for elem_cls in [Node, Way, Relation, Area]: for element in data.get("elements", []): e_type = element.get("type") if hasattr(e_type, "lower") and e_type.lower() == elem_cls._type_value: result.append(elem_cls.from_json(element, result=result)) return result
def from_json(cls, data, api=None)
Create a new instance and load data from json object. :param data: JSON data returned by the Overpass API :type data: Dict :param api: :type api: overpy.Overpass :return: New instance of Result object :rtype: overpy.Result
4.129535
3.574597
1.155245
if parser is None: if isinstance(data, str): parser = XML_PARSER_SAX else: parser = XML_PARSER_DOM result = cls(api=api) if parser == XML_PARSER_DOM: import xml.etree.ElementTree as ET if isinstance(data, str): root = ET.fromstring(data) elif isinstance(data, ET.Element): root = data else: raise exception.OverPyException("Unable to detect data type.") for elem_cls in [Node, Way, Relation, Area]: for child in root: if child.tag.lower() == elem_cls._type_value: result.append(elem_cls.from_xml(child, result=result)) elif parser == XML_PARSER_SAX: if PY2: from StringIO import StringIO else: from io import StringIO source = StringIO(data) sax_handler = OSMSAXHandler(result) parser = make_parser() parser.setContentHandler(sax_handler) parser.parse(source) else: # ToDo: better exception raise Exception("Unknown XML parser") return result
def from_xml(cls, data, api=None, parser=None)
Create a new instance and load data from xml data or object. .. note:: If parser is set to None, the functions tries to find the best parse. By default the SAX parser is chosen if a string is provided as data. The parser is set to DOM if an xml.etree.ElementTree.Element is provided as data value. :param data: Root element :type data: str | xml.etree.ElementTree.Element :param api: The instance to query additional information if required. :type api: Overpass :param parser: Specify the parser to use(DOM or SAX)(Default: None = autodetect, defaults to SAX) :type parser: Integer | None :return: New instance of Result object :rtype: Result
2.966142
2.58076
1.149329
areas = self.get_areas(area_id=area_id) if len(areas) == 0: if resolve_missing is False: raise exception.DataIncomplete("Resolve missing area is disabled") query = ("\n" "[out:json];\n" "area({area_id});\n" "out body;\n" ) query = query.format( area_id=area_id ) tmp_result = self.api.query(query) self.expand(tmp_result) areas = self.get_areas(area_id=area_id) if len(areas) == 0: raise exception.DataIncomplete("Unable to resolve requested areas") return areas[0]
def get_area(self, area_id, resolve_missing=False)
Get an area by its ID. :param area_id: The area ID :type area_id: Integer :param resolve_missing: Query the Overpass API if the area is missing in the result set. :return: The area :rtype: overpy.Area :raises overpy.exception.DataIncomplete: The requested way is not available in the result cache. :raises overpy.exception.DataIncomplete: If resolve_missing is True and the area can't be resolved.
3.192134
2.721116
1.173097
return self.get_elements(Area, elem_id=area_id, **kwargs)
def get_areas(self, area_id=None, **kwargs)
Alias for get_elements() but filter the result by Area :param area_id: The Id of the area :type area_id: Integer :return: List of elements
7.119244
8.122379
0.876497
nodes = self.get_nodes(node_id=node_id) if len(nodes) == 0: if not resolve_missing: raise exception.DataIncomplete("Resolve missing nodes is disabled") query = ("\n" "[out:json];\n" "node({node_id});\n" "out body;\n" ) query = query.format( node_id=node_id ) tmp_result = self.api.query(query) self.expand(tmp_result) nodes = self.get_nodes(node_id=node_id) if len(nodes) == 0: raise exception.DataIncomplete("Unable to resolve all nodes") return nodes[0]
def get_node(self, node_id, resolve_missing=False)
Get a node by its ID. :param node_id: The node ID :type node_id: Integer :param resolve_missing: Query the Overpass API if the node is missing in the result set. :return: The node :rtype: overpy.Node :raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache. :raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved.
3.006658
2.577747
1.16639
return self.get_elements(Node, elem_id=node_id, **kwargs)
def get_nodes(self, node_id=None, **kwargs)
Alias for get_elements() but filter the result by Node() :param node_id: The Id of the node :type node_id: Integer :return: List of elements
7.616557
8.679691
0.877515
relations = self.get_relations(rel_id=rel_id) if len(relations) == 0: if resolve_missing is False: raise exception.DataIncomplete("Resolve missing relations is disabled") query = ("\n" "[out:json];\n" "relation({relation_id});\n" "out body;\n" ) query = query.format( relation_id=rel_id ) tmp_result = self.api.query(query) self.expand(tmp_result) relations = self.get_relations(rel_id=rel_id) if len(relations) == 0: raise exception.DataIncomplete("Unable to resolve requested reference") return relations[0]
def get_relation(self, rel_id, resolve_missing=False)
Get a relation by its ID. :param rel_id: The relation ID :type rel_id: Integer :param resolve_missing: Query the Overpass API if the relation is missing in the result set. :return: The relation :rtype: overpy.Relation :raises overpy.exception.DataIncomplete: The requested relation is not available in the result cache. :raises overpy.exception.DataIncomplete: If resolve_missing is True and the relation can't be resolved.
3.25958
2.815489
1.157731
return self.get_elements(Relation, elem_id=rel_id, **kwargs)
def get_relations(self, rel_id=None, **kwargs)
Alias for get_elements() but filter the result by Relation :param rel_id: Id of the relation :type rel_id: Integer :return: List of elements
7.357014
7.455112
0.986841
ways = self.get_ways(way_id=way_id) if len(ways) == 0: if resolve_missing is False: raise exception.DataIncomplete("Resolve missing way is disabled") query = ("\n" "[out:json];\n" "way({way_id});\n" "out body;\n" ) query = query.format( way_id=way_id ) tmp_result = self.api.query(query) self.expand(tmp_result) ways = self.get_ways(way_id=way_id) if len(ways) == 0: raise exception.DataIncomplete("Unable to resolve requested way") return ways[0]
def get_way(self, way_id, resolve_missing=False)
Get a way by its ID. :param way_id: The way ID :type way_id: Integer :param resolve_missing: Query the Overpass API if the way is missing in the result set. :return: The way :rtype: overpy.Way :raises overpy.exception.DataIncomplete: The requested way is not available in the result cache. :raises overpy.exception.DataIncomplete: If resolve_missing is True and the way can't be resolved.
2.833235
2.641169
1.07272
return self.get_elements(Way, elem_id=way_id, **kwargs)
def get_ways(self, way_id=None, **kwargs)
Alias for get_elements() but filter the result by Way :param way_id: The Id of the way :type way_id: Integer :return: List of elements
6.076969
7.268125
0.836112
center_lat = None center_lon = None center = data.get("center") if isinstance(center, dict): center_lat = center.get("lat") center_lon = center.get("lon") if center_lat is None or center_lon is None: raise ValueError("Unable to get lat or lon of way center.") center_lat = Decimal(center_lat) center_lon = Decimal(center_lon) return (center_lat, center_lon)
def get_center_from_json(cls, data)
Get center information from json data :param data: json data :return: tuple with two elements: lat and lon :rtype: tuple
2.226508
2.362587
0.942403
if child.tag.lower() != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=child.tag.lower() ) tags = {} for sub_child in child: if sub_child.tag.lower() == "tag": name = sub_child.attrib.get("k") if name is None: raise ValueError("Tag without name/key.") value = sub_child.attrib.get("v") tags[name] = value area_id = child.attrib.get("id") if area_id is not None: area_id = int(area_id) attributes = {} ignore = ["id"] for n, v in child.attrib.items(): if n in ignore: continue attributes[n] = v return cls(area_id=area_id, attributes=attributes, tags=tags, result=result)
def from_xml(cls, child, result=None)
Create new way element from XML data :param child: XML node to be parsed :type child: xml.etree.ElementTree.Element :param result: The result this node belongs to :type result: overpy.Result :return: New Way oject :rtype: overpy.Way :raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match :raises ValueError: If the ref attribute of the xml node is not provided :raises ValueError: If a tag doesn't have a name
2.617192
2.338133
1.119351
if data.get("type") != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=data.get("type") ) tags = data.get("tags", {}) node_id = data.get("id") lat = data.get("lat") lon = data.get("lon") attributes = {} ignore = ["type", "id", "lat", "lon", "tags"] for n, v in data.items(): if n in ignore: continue attributes[n] = v return cls(node_id=node_id, lat=lat, lon=lon, tags=tags, attributes=attributes, result=result)
def from_json(cls, data, result=None)
Create new Node element from JSON data :param data: Element data from JSON :type data: Dict :param result: The result this element belongs to :type result: overpy.Result :return: New instance of Node :rtype: overpy.Node :raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
2.590113
2.165703
1.195969
if child.tag.lower() != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=child.tag.lower() ) tags = {} for sub_child in child: if sub_child.tag.lower() == "tag": name = sub_child.attrib.get("k") if name is None: raise ValueError("Tag without name/key.") value = sub_child.attrib.get("v") tags[name] = value node_id = child.attrib.get("id") if node_id is not None: node_id = int(node_id) lat = child.attrib.get("lat") if lat is not None: lat = Decimal(lat) lon = child.attrib.get("lon") if lon is not None: lon = Decimal(lon) attributes = {} ignore = ["id", "lat", "lon"] for n, v in child.attrib.items(): if n in ignore: continue attributes[n] = v return cls(node_id=node_id, lat=lat, lon=lon, tags=tags, attributes=attributes, result=result)
def from_xml(cls, child, result=None)
Create new way element from XML data :param child: XML node to be parsed :type child: xml.etree.ElementTree.Element :param result: The result this node belongs to :type result: overpy.Result :return: New Way oject :rtype: overpy.Node :raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match :raises ValueError: If a tag doesn't have a name
2.176021
1.96866
1.105331
result = [] resolved = False for node_id in self._node_ids: try: node = self._result.get_node(node_id) except exception.DataIncomplete: node = None if node is not None: result.append(node) continue if not resolve_missing: raise exception.DataIncomplete("Resolve missing nodes is disabled") # We tried to resolve the data but some nodes are still missing if resolved: raise exception.DataIncomplete("Unable to resolve all nodes") query = ("\n" "[out:json];\n" "way({way_id});\n" "node(w);\n" "out body;\n" ) query = query.format( way_id=self.id ) tmp_result = self._result.api.query(query) self._result.expand(tmp_result) resolved = True try: node = self._result.get_node(node_id) except exception.DataIncomplete: node = None if node is None: raise exception.DataIncomplete("Unable to resolve all nodes") result.append(node) return result
def get_nodes(self, resolve_missing=False)
Get the nodes defining the geometry of the way :param resolve_missing: Try to resolve missing nodes. :type resolve_missing: Boolean :return: List of nodes :rtype: List of overpy.Node :raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache. :raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved.
3.010983
2.581548
1.166348
if data.get("type") != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=data.get("type") ) tags = data.get("tags", {}) way_id = data.get("id") node_ids = data.get("nodes") (center_lat, center_lon) = cls.get_center_from_json(data=data) attributes = {} ignore = ["center", "id", "nodes", "tags", "type"] for n, v in data.items(): if n in ignore: continue attributes[n] = v return cls( attributes=attributes, center_lat=center_lat, center_lon=center_lon, node_ids=node_ids, tags=tags, result=result, way_id=way_id )
def from_json(cls, data, result=None)
Create new Way element from JSON data :param data: Element data from JSON :type data: Dict :param result: The result this element belongs to :type result: overpy.Result :return: New instance of Way :rtype: overpy.Way :raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
2.816895
2.352754
1.197276
if child.tag.lower() != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=child.tag.lower() ) tags = {} node_ids = [] center_lat = None center_lon = None for sub_child in child: if sub_child.tag.lower() == "tag": name = sub_child.attrib.get("k") if name is None: raise ValueError("Tag without name/key.") value = sub_child.attrib.get("v") tags[name] = value if sub_child.tag.lower() == "nd": ref_id = sub_child.attrib.get("ref") if ref_id is None: raise ValueError("Unable to find required ref value.") ref_id = int(ref_id) node_ids.append(ref_id) if sub_child.tag.lower() == "center": (center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child) way_id = child.attrib.get("id") if way_id is not None: way_id = int(way_id) attributes = {} ignore = ["id"] for n, v in child.attrib.items(): if n in ignore: continue attributes[n] = v return cls(way_id=way_id, center_lat=center_lat, center_lon=center_lon, attributes=attributes, node_ids=node_ids, tags=tags, result=result)
def from_xml(cls, child, result=None)
Create new way element from XML data :param child: XML node to be parsed :type child: xml.etree.ElementTree.Element :param result: The result this node belongs to :type result: overpy.Result :return: New Way oject :rtype: overpy.Way :raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match :raises ValueError: If the ref attribute of the xml node is not provided :raises ValueError: If a tag doesn't have a name
2.376032
2.116454
1.122647
if data.get("type") != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=data.get("type") ) tags = data.get("tags", {}) rel_id = data.get("id") (center_lat, center_lon) = cls.get_center_from_json(data=data) members = [] supported_members = [RelationNode, RelationWay, RelationRelation] for member in data.get("members", []): type_value = member.get("type") for member_cls in supported_members: if member_cls._type_value == type_value: members.append( member_cls.from_json( member, result=result ) ) attributes = {} ignore = ["id", "members", "tags", "type"] for n, v in data.items(): if n in ignore: continue attributes[n] = v return cls( rel_id=rel_id, attributes=attributes, center_lat=center_lat, center_lon=center_lon, members=members, tags=tags, result=result )
def from_json(cls, data, result=None)
Create new Relation element from JSON data :param data: Element data from JSON :type data: Dict :param result: The result this element belongs to :type result: overpy.Result :return: New instance of Relation :rtype: overpy.Relation :raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
2.651139
2.298977
1.153182
if child.tag.lower() != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=child.tag.lower() ) tags = {} members = [] center_lat = None center_lon = None supported_members = [RelationNode, RelationWay, RelationRelation, RelationArea] for sub_child in child: if sub_child.tag.lower() == "tag": name = sub_child.attrib.get("k") if name is None: raise ValueError("Tag without name/key.") value = sub_child.attrib.get("v") tags[name] = value if sub_child.tag.lower() == "member": type_value = sub_child.attrib.get("type") for member_cls in supported_members: if member_cls._type_value == type_value: members.append( member_cls.from_xml( sub_child, result=result ) ) if sub_child.tag.lower() == "center": (center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child) rel_id = child.attrib.get("id") if rel_id is not None: rel_id = int(rel_id) attributes = {} ignore = ["id"] for n, v in child.attrib.items(): if n in ignore: continue attributes[n] = v return cls( rel_id=rel_id, attributes=attributes, center_lat=center_lat, center_lon=center_lon, members=members, tags=tags, result=result )
def from_xml(cls, child, result=None)
Create new way element from XML data :param child: XML node to be parsed :type child: xml.etree.ElementTree.Element :param result: The result this node belongs to :type result: overpy.Result :return: New Way oject :rtype: overpy.Relation :raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match :raises ValueError: If a tag doesn't have a name
2.41029
2.220741
1.085354
if data.get("type") != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=data.get("type") ) ref = data.get("ref") role = data.get("role") attributes = {} ignore = ["geometry", "type", "ref", "role"] for n, v in data.items(): if n in ignore: continue attributes[n] = v geometry = data.get("geometry") if isinstance(geometry, list): geometry_orig = geometry geometry = [] for v in geometry_orig: geometry.append( RelationWayGeometryValue( lat=v.get("lat"), lon=v.get("lon") ) ) else: geometry = None return cls( attributes=attributes, geometry=geometry, ref=ref, role=role, result=result )
def from_json(cls, data, result=None)
Create new RelationMember element from JSON data :param child: Element data from JSON :type child: Dict :param result: The result this element belongs to :type result: overpy.Result :return: New instance of RelationMember :rtype: overpy.RelationMember :raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
2.967635
2.518195
1.178477
if child.attrib.get("type") != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=child.tag.lower() ) ref = child.attrib.get("ref") if ref is not None: ref = int(ref) role = child.attrib.get("role") attributes = {} ignore = ["geometry", "ref", "role", "type"] for n, v in child.attrib.items(): if n in ignore: continue attributes[n] = v geometry = None for sub_child in child: if sub_child.tag.lower() == "nd": if geometry is None: geometry = [] geometry.append( RelationWayGeometryValue( lat=Decimal(sub_child.attrib["lat"]), lon=Decimal(sub_child.attrib["lon"]) ) ) return cls( attributes=attributes, geometry=geometry, ref=ref, role=role, result=result )
def from_xml(cls, child, result=None)
Create new RelationMember from XML data :param child: XML node to be parsed :type child: xml.etree.ElementTree.Element :param result: The result this element belongs to :type result: overpy.Result :return: New relation member oject :rtype: overpy.RelationMember :raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
2.883575
2.434872
1.184282
if name in self.ignore_start: return try: handler = getattr(self, '_handle_start_%s' % name) except AttributeError: raise KeyError("Unknown element start '%s'" % name) handler(attrs)
def startElement(self, name, attrs)
Handle opening elements. :param name: Name of the element :type name: String :param attrs: Attributes of the element :type attrs: Dict
3.766956
4.249985
0.886346
if name in self.ignore_end: return try: handler = getattr(self, '_handle_end_%s' % name) except AttributeError: raise KeyError("Unknown element end '%s'" % name) handler()
def endElement(self, name)
Handle closing elements :param name: Name of the element :type name: String
3.962454
4.430992
0.894259
center_lat = attrs.get("lat") center_lon = attrs.get("lon") if center_lat is None or center_lon is None: raise ValueError("Unable to get lat or lon of way center.") self._curr["center_lat"] = Decimal(center_lat) self._curr["center_lon"] = Decimal(center_lon)
def _handle_start_center(self, attrs)
Handle opening center element :param attrs: Attributes of the element :type attrs: Dict
3.050517
3.700111
0.824439
try: tag_key = attrs['k'] except KeyError: raise ValueError("Tag without name/key.") self._curr['tags'][tag_key] = attrs.get('v')
def _handle_start_tag(self, attrs)
Handle opening tag element :param attrs: Attributes of the element :type attrs: Dict
7.335098
8.951973
0.819383
self._curr = { 'attributes': dict(attrs), 'lat': None, 'lon': None, 'node_id': None, 'tags': {} } if attrs.get('id', None) is not None: self._curr['node_id'] = int(attrs['id']) del self._curr['attributes']['id'] if attrs.get('lat', None) is not None: self._curr['lat'] = Decimal(attrs['lat']) del self._curr['attributes']['lat'] if attrs.get('lon', None) is not None: self._curr['lon'] = Decimal(attrs['lon']) del self._curr['attributes']['lon']
def _handle_start_node(self, attrs)
Handle opening node element :param attrs: Attributes of the element :type attrs: Dict
1.852912
1.908716
0.970764
self._result.append(Node(result=self._result, **self._curr)) self._curr = {}
def _handle_end_node(self)
Handle closing node element
9.579788
8.916809
1.074352
self._curr = { 'center_lat': None, 'center_lon': None, 'attributes': dict(attrs), 'node_ids': [], 'tags': {}, 'way_id': None } if attrs.get('id', None) is not None: self._curr['way_id'] = int(attrs['id']) del self._curr['attributes']['id']
def _handle_start_way(self, attrs)
Handle opening way element :param attrs: Attributes of the element :type attrs: Dict
3.321794
3.267685
1.016559
self._result.append(Way(result=self._result, **self._curr)) self._curr = {}
def _handle_end_way(self)
Handle closing way element
9.976416
7.39932
1.348288
self._curr = { 'attributes': dict(attrs), 'tags': {}, 'area_id': None } if attrs.get('id', None) is not None: self._curr['area_id'] = int(attrs['id']) del self._curr['attributes']['id']
def _handle_start_area(self, attrs)
Handle opening area element :param attrs: Attributes of the element :type attrs: Dict
3.395813
3.668847
0.92558
self._result.append(Area(result=self._result, **self._curr)) self._curr = {}
def _handle_end_area(self)
Handle closing area element
9.437071
8.757668
1.077578
if isinstance(self.cur_relation_member, RelationWay): if self.cur_relation_member.geometry is None: self.cur_relation_member.geometry = [] self.cur_relation_member.geometry.append( RelationWayGeometryValue( lat=Decimal(attrs["lat"]), lon=Decimal(attrs["lon"]) ) ) else: try: node_ref = attrs['ref'] except KeyError: raise ValueError("Unable to find required ref value.") self._curr['node_ids'].append(int(node_ref))
def _handle_start_nd(self, attrs)
Handle opening nd element :param attrs: Attributes of the element :type attrs: Dict
4.140232
4.372373
0.946907
self._curr = { 'attributes': dict(attrs), 'members': [], 'rel_id': None, 'tags': {} } if attrs.get('id', None) is not None: self._curr['rel_id'] = int(attrs['id']) del self._curr['attributes']['id']
def _handle_start_relation(self, attrs)
Handle opening relation element :param attrs: Attributes of the element :type attrs: Dict
3.52659
3.644725
0.967587
self._result.append(Relation(result=self._result, **self._curr)) self._curr = {}
def _handle_end_relation(self)
Handle closing relation element
8.991593
7.457947
1.205639
params = { # ToDo: Parse attributes 'attributes': {}, 'ref': None, 'result': self._result, 'role': None } if attrs.get('ref', None): params['ref'] = int(attrs['ref']) if attrs.get('role', None): params['role'] = attrs['role'] cls_map = { "area": RelationArea, "node": RelationNode, "relation": RelationRelation, "way": RelationWay } cls = cls_map.get(attrs["type"]) if cls is None: raise ValueError("Undefined type for member: '%s'" % attrs['type']) self.cur_relation_member = cls(**params) self._curr['members'].append(self.cur_relation_member)
def _handle_start_member(self, attrs)
Handle opening member element :param attrs: Attributes of the element :type attrs: Dict
3.876762
3.864934
1.00306
if api is None: api = overpy.Overpass() query = data = api.query(query % (areacode, street)) return data
def get_street(street, areacode, api=None)
Retrieve streets in a given bounding area :param overpy.Overpass api: First street of intersection :param String street: Name of street :param String areacode: The OSM id of the bounding area :return: Parsed result :raises overpy.exception.OverPyException: If something bad happens.
5.379318
6.491738
0.828641
if api is None: api = overpy.Overpass() query = data = api.query(query % (areacode, street1, street2)) return data.get_nodes()
def get_intersection(street1, street2, areacode, api=None)
Retrieve intersection of two streets in a given bounding area :param overpy.Overpass api: First street of intersection :param String street1: Name of first street of intersection :param String street2: Name of second street of intersection :param String areacode: The OSM id of the bounding area :return: List of intersections :raises overpy.exception.OverPyException: If something bad happens.
4.500415
5.324904
0.845163
def print_tree(current_node, childattr='children', nameattr='name', indent='', last='updown'): if hasattr(current_node, nameattr): name = lambda node: getattr(node, nameattr) else: name = lambda node: str(node) children = lambda node: getattr(node, childattr) nb_children = lambda node: sum(nb_children(child) for child in children(node)) + 1 size_branch = {child: nb_children(child) for child in children(current_node)} up = sorted(children(current_node), key=lambda node: nb_children(node)) down = [] while up and sum(size_branch[node] for node in down) < sum(size_branch[node] for node in up): down.append(up.pop()) for child in up: next_last = 'up' if up.index(child) is 0 else '' next_indent = '{0}{1}{2}'.format(indent, ' ' if 'up' in last else '│', ' ' * len(name(current_node))) print_tree(child, childattr, nameattr, next_indent, next_last) if last == 'up': start_shape = '┌' elif last == 'down': start_shape = '└' elif last == 'updown': start_shape = ' ' else: start_shape = '├' if up: end_shape = '┤' elif down: end_shape = '┐' else: end_shape = '' print('{0}{1}{2}{3}'.format(indent, start_shape, name(current_node), end_shape)) for child in down: next_last = 'down' if down.index(child) is len(down) - 1 else '' next_indent = '{0}{1}{2}'.format(indent, ' ' if 'down' in last else '│', ' ' * len(name(current_node))) print_tree(child, childattr, nameattr, next_indent, next_last)
Creation of balanced lists for "up" branch and "down" branch.
null
null
null
if len(platforms) > 0: return all(platform in PLATFORM_IDS for platform in platforms) return True
def check_platforms(platforms)
Checks if the platforms have a valid platform code
5.22761
4.683544
1.116165
if not check_platforms(platforms): raise IncorrectParametersException('Invlaid code for platform. Please check the platform ids') try: if active: active_challenges = active_contests(platforms) if goto: webbrowser.open(active_challenges[goto - 1]["contest_url"], new=2) else: writers.write_contests(active_challenges, "active") return if upcoming: upcoming_challenges = upcoming_contests(platforms, time) if goto: goto = int(goto) webbrowser.open(upcoming_challenges[goto - 1]["contest_url"], new=2) else: writers.write_contests(upcoming_challenges, "upcoming") return if hiring: hiring_challenges = hiring_contests() if goto: webbrowser.open(hiring_challenges[goto - 1]["contest_url"], new=2) else: writers.write_contests(hiring_challenges, "hiring") return if short: short_challenges = short_contests(platforms) if goto: goto = int(goto) webbrowser.open(short_challenges[goto - 1]["contest_url"], new=2) else: writers.write_contests(short_challenges, "short") return all_contests = get_all_contests(platforms, time) if goto: webbrowser.open(all_contests[goto - 1]["contest_url"], new=2) else: writers.write_contests(all_contests, "all") except IncorrectParametersException as e: click.secho(e.message, fg="red", bold=True)
def main(active, upcoming, hiring, short, goto, platforms, time)
A CLI for active and upcoming programming challenges from various platforms
1.933648
1.912681
1.010962
enums = dict( TIME_LEFT="red", CONTEST_NAME="yellow", HOST="green", MISC="blue", TIME_TO_START="green", ) return type('Enum', (), enums)
def colors()
Creates an enum for colors
12.87646
11.531578
1.116626
enums = dict( ACTIVE="active", UPCOMING="upcoming", HIRING="hiring", ALL="all", SHORT="short", ) return type('Enum', (), enums)
def challenge()
Creates an enum for contest type
9.533315
6.899948
1.38165
if contest_type == challenge().ACTIVE: time_diff = time_difference(contest["end"]) elif contest_type == challenge().UPCOMING: time_diff = time_difference(contest["start"]) elif contest_type in [challenge().HIRING, challenge().SHORT, challenge().ALL]: try: time_diff = time_difference(contest["start"]) except: time_diff = time_difference(contest["end"]) time_diff_string = "" if time_diff.days > 0: time_diff_string = "{0} days {1} hours".format(time_diff.days, time_diff.hours) elif time_diff.hours > 0: time_diff_string = "{0} hours {1} minutes".format(time_diff.hours, time_diff.minutes) else: time_diff_string = "{0} minutes".format(time_diff.minutes) return time_diff_string
def get_time_string(contest, contest_type)
Return a string with time for the contest to begin/end
2.194325
2.207815
0.99389
TimeDiff = namedtuple("TimeDiff", ["days", "hours", "minutes", "seconds"]) time_diff = format_date(target_time) - datetime.utcnow() hours, remainder = divmod(time_diff.seconds, 3600) minutes, seconds = divmod(remainder, 60) return TimeDiff(days=time_diff.days, hours=hours, minutes=minutes, seconds=seconds)
def time_difference(target_time)
Calculate the difference between the current time and the given time
2.162027
2.105503
1.026846
''' Checks whether a value is: - int, or - long, or - float without a fractional part, or - str or unicode composed only of digits ''' def fn(value): if not any([ isinstance(value, numbers.Integral), (isinstance(value, float) and value.is_integer()), (isinstance(value, basestring) and value.isdigit()) ]): raise Invalid(msg or ( 'Invalid input <{0}>; expected an integer'.format(value)) ) else: return value return fn
def IntegerLike(msg=None)
Checks whether a value is: - int, or - long, or - float without a fractional part, or - str or unicode composed only of digits
4.566205
2.727872
1.673907
''' Checks whether a value is: - int, or - long, or - float without a fractional part, or - str or unicode composed only of alphanumeric characters ''' def fn(value): if not any([ isinstance(value, numbers.Integral), (isinstance(value, float) and value.is_integer()), (isinstance(value, basestring) and value.isalnum()) ]): raise Invalid(msg or ( 'Invalid input <{0}>; expected an integer'.format(value)) ) else: return value return fn
def Alphanumeric(msg=None)
Checks whether a value is: - int, or - long, or - float without a fractional part, or - str or unicode composed only of alphanumeric characters
4.695483
2.787464
1.6845
''' Checks whether a value is: - str or unicode, and - composed of both alphabets and digits ''' def fn(value): if not ( isinstance(value, basestring) and value.isalnum() and not value.isdigit() and not value.isalpha() ): raise Invalid(msg or ( 'Invalid input <{0}>; expected an integer'.format(value)) ) else: return value return fn
def StrictlyAlphanumeric(msg=None)
Checks whether a value is: - str or unicode, and - composed of both alphabets and digits
5.234499
3.149012
1.662267
''' Checks whether a value is : - a valid castable datetime object with timezone. ''' def fn(value): try: date = parse_datetime(value) or parse_date(value) if date is not None: return date else: raise ValueError except ValueError: raise Invalid('<{0}> is not a valid datetime.'.format(value)) return fn
def DatetimeWithTZ(msg=None)
Checks whether a value is : - a valid castable datetime object with timezone.
6.056317
3.337971
1.814371
''' Checks whether a value is list of integers. Returns list of integers or just one integer in list if there is only one element in given CSV string. ''' def fn(value): try: if isinstance(value, basestring): if ',' in value: value = list(map( int, filter( bool, list(map( lambda x: x.strip(), value.split(',') )) ) )) return value else: return [int(value)] else: raise ValueError except ValueError: raise Invalid( '<{0}> is not a valid csv of integers'.format(value) ) return fn
def CSVofIntegers(msg=None)
Checks whether a value is list of integers. Returns list of integers or just one integer in list if there is only one element in given CSV string.
4.216976
2.564914
1.6441
query_params = self.request.query_params url_params = self.kwargs # get queryset_filters from FilterMixin queryset_filters = self.get_db_filters(url_params, query_params) # This dict will hold filter kwargs to pass in to Django ORM calls. db_filters = queryset_filters['db_filters'] # This dict will hold exclude kwargs to pass in to Django ORM calls. db_excludes = queryset_filters['db_excludes'] queryset = Team.objects.prefetch_related( 'players' ).all() return queryset.filter(**db_filters).exclude(**db_excludes)
def get_queryset(self)
Optionally restricts the queryset by filtering against query parameters in the URL.
4.029297
3.707735
1.086727
parent_name = "CMSPLUGIN_NEWS_{0}".format(name) return getattr(django_settings, parent_name, default)
def get_setting(name, default)
A little helper for fetching global settings with a common prefix.
8.092972
7.248284
1.116536
rows_updated = queryset.update(is_published=True) self.message_user(request, ungettext('%(count)d newsitem was published', '%(count)d newsitems were published', rows_updated) % {'count': rows_updated})
def make_published(self, request, queryset)
Marks selected news items as published
2.486456
2.497666
0.995512
rows_updated = queryset.update(is_published=False) self.message_user(request, ungettext('%(count)d newsitem was unpublished', '%(count)d newsitems were unpublished', rows_updated) % {'count': rows_updated})
def make_unpublished(self, request, queryset)
Marks selected news items as unpublished
2.447137
2.422255
1.010272
if value is None: value = '' value = smart_unicode(value) final_attrs = self.build_attrs(attrs) final_attrs['name'] = name assert 'id' in final_attrs, \ "TinyMCE widget attributes must contain 'id'" mce_config = cms.plugins.text.settings.TINYMCE_CONFIG.copy() mce_config.update(get_language_config(self.content_language)) if tinymce.settings.USE_FILEBROWSER: mce_config['file_browser_callback'] = "djangoFileBrowser" mce_config.update(self.mce_attrs) mce_config['mode'] = 'exact' mce_config['elements'] = final_attrs['id'] mce_config['strict_loading_mode'] = 1 json = simplejson.dumps(mce_config) html = [u'<textarea%s>%s</textarea>' % (flatatt(final_attrs), escape(value))] if tinymce.settings.USE_COMPRESSOR: compressor_config = { 'plugins': mce_config.get('plugins', ''), 'themes': mce_config.get('theme', 'advanced'), 'languages': mce_config.get('language', ''), 'diskcache': True, 'debug': False, } c_json = simplejson.dumps(compressor_config) html.append( (u'<script type="text/javascript">' 'tinyMCE_GZ.init(%s);</script>') % (c_json)) html.append( (u'<script type="text/javascript">%s;\ntinyMCE.init(%s);' '</script>') % ( self.render_additions( name, value, attrs), json)) return mark_safe(u'\n'.join(html))
def render(self, name, value, attrs=None)
plugins = mce_config.get("plugins", "") if len(plugins): plugins += "," plugins += "-cmsplugins" mce_config['plugins'] = plugins adv2 = mce_config.get('theme_advanced_buttons1', "") if len(adv2): adv2 = "," + adv2 adv2 = "cmsplugins,cmspluginsedit" + adv2 mce_config['theme_advanced_buttons1'] = adv2
3.212549
3.282926
0.978563
wk_args = (WK_PATH,) + args return subprocess.run(wk_args, input=input, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def _execute_wk(*args, input=None)
Generate path for the wkhtmltopdf binary and execute command. :param args: args to pass straight to subprocess.Popen :return: stdout, stderr
3.360648
4.649816
0.722749
if not cache_dir.exists(): Path.mkdir(cache_dir) py_args = dict( cache_dir=cache_dir, grayscale=grayscale, lowquality=lowquality, margin_bottom=margin_bottom, margin_left=margin_left, margin_right=margin_right, margin_top=margin_top, orientation=orientation, page_height=page_height, page_width=page_width, page_size=page_size, image_dpi=image_dpi, image_quality=image_quality, ) py_args.update(extra_kwargs) cmd_args = _convert_args(**py_args) p = _execute_wk(*cmd_args, input=html.encode()) pdf_content = p.stdout # it seems wkhtmltopdf's error codes can be false, we'll ignore them if we # seem to have generated a pdf if p.returncode != 0 and pdf_content[:4] != b'%PDF': raise RuntimeError('error running wkhtmltopdf, command: {!r}\n' 'response: "{}"'.format(cmd_args, p.stderr.decode().strip())) return pdf_content
def generate_pdf(html, *, cache_dir: Path=DFT_CACHE_DIR, grayscale: bool=False, lowquality: bool=False, margin_bottom: str=None, margin_left: str=None, margin_right: str=None, margin_top: str=None, orientation: str=None, page_height: str=None, page_width: str=None, page_size: str=None, image_dpi: str=None, image_quality: str=None, **extra_kwargs)
Generate a pdf from either a url or a html string. After the html and url arguments all other arguments are passed straight to wkhtmltopdf For details on extra arguments see the output of get_help() and get_extended_help() All arguments whether specified or caught with extra_kwargs are converted to command line args with "'--' + original_name.replace('_', '-')" Arguments which are True are passed with no value eg. just --quiet, False and None arguments are missed, everything else is passed with str(value). :param html: html string to generate pdf from :param grayscale: bool :param lowquality: bool :param margin_bottom: string eg. 10mm :param margin_left: string eg. 10mm :param margin_right: string eg. 10mm :param margin_top: string eg. 10mm :param orientation: Portrait or Landscape :param page_height: string eg. 10mm :param page_width: string eg. 10mm :param page_size: string: A4, Letter, etc. :param image_dpi: int default 600 :param image_quality: int default 94 :param extra_kwargs: any exotic extra options for wkhtmltopdf :return: string representing pdf
2.835771
2.837434
0.999414
try: wk_version = _string_execute('-V') except Exception as e: # we catch all errors here to make sure we get a version no matter what wk_version = '%s: %s' % (e.__class__.__name__, e) return 'pydf version: %s\nwkhtmltopdf version: %s' % (VERSION, wk_version)
def get_version()
Get version of pydf and wkhtmltopdf binary :return: version string
5.915164
4.718647
1.253572
'''Perform sctring escape - for regexp literals''' self.index = 0 self.length = len(string) self.source = string self.lineNumber = 0 self.lineStart = 0 octal = False st = '' inside_square = 0 while (self.index < self.length): template = '[%s]' if not inside_square else '%s' ch = self.source[self.index] self.index += 1 if ch == '\\': ch = self.source[self.index] self.index += 1 if (not isLineTerminator(ch)): if ch == 'u': digs = self.source[self.index:self.index + 4] if len(digs) == 4 and all(isHexDigit(d) for d in digs): st += template % unichr(int(digs, 16)) self.index += 4 else: st += 'u' elif ch == 'x': digs = self.source[self.index:self.index + 2] if len(digs) == 2 and all(isHexDigit(d) for d in digs): st += template % unichr(int(digs, 16)) self.index += 2 else: st += 'x' # special meaning - single char. elif ch == '0': st += '\\0' elif ch == 'n': st += '\\n' elif ch == 'r': st += '\\r' elif ch == 't': st += '\\t' elif ch == 'f': st += '\\f' elif ch == 'v': st += '\\v' # unescape special single characters like . so that they are interpreted literally elif ch in REGEXP_SPECIAL_SINGLE: st += '\\' + ch # character groups elif ch == 'b': st += '\\b' elif ch == 'B': st += '\\B' elif ch == 'w': st += '\\w' elif ch == 'W': st += '\\W' elif ch == 'd': st += '\\d' elif ch == 'D': st += '\\D' elif ch == 's': st += template % u' \f\n\r\t\v\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff' elif ch == 'S': st += template % u'\u0000-\u0008\u000e-\u001f\u0021-\u009f\u00a1-\u167f\u1681-\u180d\u180f-\u1fff\u200b-\u2027\u202a-\u202e\u2030-\u205e\u2060-\u2fff\u3001-\ufefe\uff00-\uffff' else: if isDecimalDigit(ch): num = ch while self.index < self.length and isDecimalDigit( self.source[self.index]): num += self.source[self.index] self.index += 1 st += '\\' + num else: st += ch # DONT ESCAPE!!! else: self.lineNumber += 1 if (ch == '\r' and self.source[self.index] == '\n'): self.index += 1 self.lineStart = self.index else: if ch == '[': inside_square = True elif ch == ']': inside_square = False st += ch # print string, 'was transformed to', st return st
def _interpret_regexp(self, string, flags)
Perform sctring escape - for regexp literals
2.110542
2.022409
1.043578
''' Search Crossref licenses :param query: [String] A query string :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.licenses() cr.licenses(query = "creative") ''' check_kwargs(["ids", "filter", "works"], kwargs) res = request(self.mailto, self.base_url, "/licenses/", None, query, None, offset, limit, None, sort, order, facet, None, None, None, None, **kwargs) return res
def licenses(self, query = None, offset = None, limit = None, sample = None, sort = None, order = None, facet = None, **kwargs)
Search Crossref licenses :param query: [String] A query string :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.licenses() cr.licenses(query = "creative")
6.364017
1.551612
4.101551
''' Determine registration agency for DOIs :param ids: [Array] DOIs (digital object identifier) or other identifiers :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples) :return: list of DOI minting agencies Usage:: from habanero import Crossref cr = Crossref() cr.registration_agency('10.1371/journal.pone.0033693') cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993']) ''' check_kwargs(["query", "filter", "offset", "limit", "sample", "sort", "order", "facet", "works"], kwargs) res = request(self.mailto, self.base_url, "/works/", ids, None, None, None, None, None, None, None, None, None, None, None, None, True, **kwargs) if res.__class__ != list: k = [] k.append(res) else: k = res return [ z['message']['agency']['label'] for z in k ]
def registration_agency(self, ids, **kwargs)
Determine registration agency for DOIs :param ids: [Array] DOIs (digital object identifier) or other identifiers :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples) :return: list of DOI minting agencies Usage:: from habanero import Crossref cr = Crossref() cr.registration_agency('10.1371/journal.pone.0033693') cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993'])
6.548873
2.238151
2.926019
''' Get a random set of DOIs :param sample: [Fixnum] Number of random DOIs to return. Default: 10. Max: 100 :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples) :return: [Array] of DOIs Usage:: from habanero import Crossref cr = Crossref() cr.random_dois(1) cr.random_dois(10) cr.random_dois(50) cr.random_dois(100) ''' res = request(self.mailto, self.base_url, "/works/", None, None, None, None, None, sample, None, None, None, None, True, None, None, None, **kwargs) return [ z['DOI'] for z in res['message']['items'] ]
def random_dois(self, sample = 10, **kwargs)
Get a random set of DOIs :param sample: [Fixnum] Number of random DOIs to return. Default: 10. Max: 100 :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples) :return: [Array] of DOIs Usage:: from habanero import Crossref cr = Crossref() cr.random_dois(1) cr.random_dois(10) cr.random_dois(50) cr.random_dois(100)
4.3941
1.915763
2.293655
''' Get list of styles from https://github.com/citation-style-language/styles :param kwargs: any additional arguments will be passed on to `requests.get` :return: list, of CSL styles Usage:: from habanero import cn cn.csl_styles() ''' base = "https://api.github.com/repos/citation-style-language/styles" tt = requests.get(base + '/commits?per_page=1', **kwargs) tt.raise_for_status() check_json(tt) commres = tt.json() sha = commres[0]['sha'] sty = requests.get(base + "/git/trees/" + sha, **kwargs) sty.raise_for_status() check_json(sty) res = sty.json() files = [ z['path'] for z in res['tree'] ] matches = [ re.search(".csl", g) for g in files ] csls = [ x.string for x in filter(None, matches) ] return [ re.sub(".csl", "", x) for x in csls ]
def csl_styles(**kwargs)
Get list of styles from https://github.com/citation-style-language/styles :param kwargs: any additional arguments will be passed on to `requests.get` :return: list, of CSL styles Usage:: from habanero import cn cn.csl_styles()
3.455679
2.370501
1.457784
''' Get a citation count with a DOI :param doi: [String] DOI, digital object identifier :param url: [String] the API url for the function (should be left to default) :param keyc: [String] your API key See http://labs.crossref.org/openurl/ for more info on this Crossref API service. Usage:: from habanero import counts counts.citation_count(doi = "10.1371/journal.pone.0042793") counts.citation_count(doi = "10.1016/j.fbr.2012.01.001") # DOI not found ## FIXME counts.citation_count(doi = "10.1016/j.fbr.2012") ''' args = {"id": "doi:" + doi, "pid": key, "noredirect": True} args = dict((k, v) for k, v in args.items() if v) res = requests.get(url, params = args, headers = make_ua(), **kwargs) xmldoc = minidom.parseString(res.content) val = xmldoc.getElementsByTagName('query')[0].attributes['fl_count'].value return int(str(val))
def citation_count(doi, url = "http://www.crossref.org/openurl/", key = "[email protected]", **kwargs)
Get a citation count with a DOI :param doi: [String] DOI, digital object identifier :param url: [String] the API url for the function (should be left to default) :param keyc: [String] your API key See http://labs.crossref.org/openurl/ for more info on this Crossref API service. Usage:: from habanero import counts counts.citation_count(doi = "10.1371/journal.pone.0042793") counts.citation_count(doi = "10.1016/j.fbr.2012.01.001") # DOI not found ## FIXME counts.citation_count(doi = "10.1016/j.fbr.2012")
4.348143
1.797403
2.419125
hierarchy = [] defaults = TOC.default_hierarchy + tuple( '%ssection' % ('sub'*i) for i in range(2, max_subs)) for level in defaults: if getattr(self.source, level, False): hierarchy.append(level) return tuple(hierarchy)
def findHierarchy(self, max_subs=10)
Find hierarchy for the LaTeX source. >>> TOC.fromLatex(r'\subsection{yo}\section{hello}').findHierarchy() ('section', 'subsection') >>> TOC.fromLatex( ... r'\subsubsubsection{huh}\subsubsection{hah}').findHierarchy() ('subsubsection', 'subsubsubsection') >>> TOC.fromLatex('\section{h1}\subsection{subh1}\section{h2}\ ... \subsection{subh2}').findHierarchy() ('section', 'subsection')
7.704581
6.508502
1.183772
try: return hierarchy.index(ts.name)+1 except ValueError: if ts.name.endswith('section'): i, name = 0, ts.name while name.startswith('sub'): name, i = name[3:], i+1 if name == 'section': return i+2 return float('inf') except (AttributeError, TypeError): return float('inf')
def getHeadingLevel(ts, hierarchy=default_hierarchy)
Extract heading level for a particular Tex element, given a specified hierarchy. >>> ts = TexSoup(r'\section{Hello}').section >>> TOC.getHeadingLevel(ts) 2 >>> ts2 = TexSoup(r'\chapter{hello again}').chapter >>> TOC.getHeadingLevel(ts2) 1 >>> ts3 = TexSoup(r'\subsubsubsubsection{Hello}').subsubsubsubsection >>> TOC.getHeadingLevel(ts3) 6
3.743455
4.007822
0.934037
descendants = list(descendants) or \ list(getattr(self.source, 'descendants', descendants)) if not descendants: return -1 return min(TOC.getHeadingLevel(e, self.hierarchy) for e in descendants)
def parseTopDepth(self, descendants=())
Parse tex for highest tag in hierarchy >>> TOC.fromLatex('\\section{Hah}\\subsection{No}').parseTopDepth() 1 >>> s = '\\subsubsubsection{Yo}\\subsubsection{Hah}' >>> TOC.fromLatex(s).parseTopDepth() 1 >>> h = ('section', 'subsubsection', 'subsubsubsection') >>> TOC.fromLatex(s, hierarchy=h).parseTopDepth() 2
8.159696
7.652165
1.066325
i, branches = self.parseTopDepth(descendants), [] for descendant in descendants: if self.getHeadingLevel(descendant, self.hierarchy) == i: branches.append({'source': descendant}) if self.getHeadingLevel(descendant, self.hierarchy) > i \ and branches: branches[-1].setdefault('descendants', []).append(descendant) return [TOC(str(descendant), depth=i, hierarchy=self.hierarchy, **branch) for branch in branches]
def parseBranches(self, descendants)
Parse top level of latex :param list elements: list of source objects :return: list of filtered TreeOfContents objects >>> toc = TOC.fromLatex(r'\section{h1}\subsection{subh1}\section{h2}\ ... \subsection{subh2}') >>> toc.parseTopDepth(toc.descendants) 1 >>> toc.parseBranches(toc.descendants) [h1, h2] >>> len(toc.branches) 2 >>> len(toc.section.branches) 1
5.443511
4.694856
1.159463
return TOC.fromLatex(open(path_or_buffer).read() if isinstance(path_or_buffer, str) else path_or_buffer)
def fromFile(path_or_buffer)
Creates abstraction using path to file :param str path_or_buffer: path to tex file or buffer :return: TreeOfContents object
4.879929
5.765104
0.84646
source = TexSoup(tex) return TOC('[document]', source=source, descendants=list(source.descendants), *args, **kwargs)
def fromLatex(tex, *args, **kwargs)
Creates abstraction using Latex :param str tex: Latex :return: TreeOfContents object
15.433957
15.249139
1.01212