code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
cleaned_data = super(MastodonForm, self).clean() tag = cleaned_data.get("tag") screen = cleaned_data.get("tooter") # check if one of the field is filled when a field is empty the clean() function set it as None if tag is None and screen is None: raise ValidationError(_("You have to fill ONE of the fields (or tag + tooter or tooter + fav)"))
def clean(self)
validate if tag or screen is filled :return:
8.704661
7.456034
1.167465
service = TriggerService.objects.get(id=trigger_id) # if status is True, reset *_failed counter if status: provider_failed = 0 consumer_failed = 0 counter_ok = service.counter_ok + 1 counter_ko = service.counter_ko # otherwise, add 1 to the consumer_failed else: provider_failed = service.provider_failed consumer_failed = service.consumer_failed + 1 counter_ok = service.counter_ko counter_ko = service.counter_ko + 1 status = False if consumer_failed > settings.DJANGO_TH.get('failed_tries', 5) else True warn_user_and_admin('consumer', service) TriggerService.objects.filter(id=trigger_id).update( result=msg, date_result=now(), provider_failed=provider_failed, consumer_failed=consumer_failed, counter_ok=counter_ok, counter_ko=counter_ko, status=status) UserService.objects.filter(user=service.user, name=service.consumer.name).update( counter_ok=counter_ok, counter_ko=counter_ko)
def update_result(trigger_id, msg, status)
:param trigger_id: trigger id :param msg: result msg :param status: status of the handling of the current trigger :return:
3.47743
3.520825
0.987675
module_name, class_name = path.rsplit('.', 1) try: return getattr(__import__(module_name, fromlist=[class_name]), class_name) except AttributeError: raise ImportError('Unable to import %s' % path)
def import_from_path(path)
Import a class dynamically, given it's dotted path. :param path: the path of the module :type path: string :return: Return the value of the named attribute of object. :rtype: object
2.096318
2.42424
0.864732
kwargs = {} for class_path in services: module_name, class_name = class_path.rsplit('.', 1) klass = import_from_path(class_path) service = klass(None, **kwargs) self.register(class_name, service)
def load_services(self, services=settings.TH_SERVICES)
get the service from the settings
3.443592
3.208504
1.07327
# http://niwinz.github.io/django-redis/latest/#_scan_delete_keys_in_bulk for service in cache.iter_keys('th_*'): try: # get the value from the cache version=2 service_value = cache.get(service, version=2) # put it in the version=1 cache.set(service, service_value) # remote version=2 cache.delete_pattern(service, version=2) except ValueError: pass logger.info('recycle of cache done!')
def recycle()
the purpose of this tasks is to recycle the data from the cache with version=2 in the main cache
7.034196
6.292641
1.117845
getattr(service_provider, '__init__')(kwargs.get('token')) return getattr(service_provider, 'read_data')(**kwargs)
def provider(self, service_provider, **kwargs)
get the data of the provider service :param service_provider: :param kwargs: :return:
8.542371
8.435262
1.012698
failed = service.provider_failed + 1 if failed > settings.DJANGO_TH.get('failed_tries', 10): TriggerService.objects.filter(id=service.id).update(date_result=now(), status=False) else: TriggerService.objects.filter(id=service.id).update(date_result=now(), provider_failed=failed) warn_user_and_admin('provider', service)
def is_ceil_reached(self, service)
check if the ceil of nb of tries is reached :param service: :return:
6.964576
6.762027
1.029954
# counting the new data to store to display them in the log provider - the service that offer data provider_token = service.provider.token default_provider.load_services() service_provider = default_provider.get_service(str(service.provider.name.name)) date_triggered = service.date_triggered if service.date_triggered else service.date_created # get the data from the provider service kwargs = {'token': provider_token, 'trigger_id': service.id, 'date_triggered': date_triggered} data = self.provider(service_provider, **kwargs) if len(data) > 0: logger.info("{} - {} new data".format(service, len(data))) elif data is False: # if data is False, something went wrong self.is_ceil_reached(service)
def reading(self, service)
get the data from the service and put theme in cache :param service: service object to read :type service: object
7.676714
7.708951
0.995818
cleaned_data = super(UserServiceForm, self).clean() sa = ServicesActivated.objects.get(name=self.initial['name']) # set the name of the service, related to ServicesActivated model cleaned_data['name'] = sa if sa.auth_required and sa.self_hosted: if cleaned_data.get('host') == '' or \ cleaned_data.get('username') == '' or \ cleaned_data.get('password') == '' or \ cleaned_data.get('client_id') == '' or \ cleaned_data.get('client_secret') == '': self.add_error('username', 'All the five fields are altogether mandatory') elif cleaned_data.get('host') is None: self.add_error('host', 'Check its protocol and its name') elif cleaned_data.get('host').endswith('/'): cleaned_data['host'] = cleaned_data['host'][:-1]
def clean(self)
check the content of each field :return:
3.763388
3.802288
0.989769
published = None if hasattr(entry, 'published_parsed'): if entry.published_parsed is not None: published = datetime.datetime.utcfromtimestamp(time.mktime(entry.published_parsed)) elif hasattr(entry, 'created_parsed'): if entry.created_parsed is not None: published = datetime.datetime.utcfromtimestamp(time.mktime(entry.created_parsed)) elif hasattr(entry, 'updated_parsed'): if entry.updated_parsed is not None: published = datetime.datetime.utcfromtimestamp(time.mktime(entry.updated_parsed)) return published
def _get_published(self, entry)
get the 'published' attribute :param entry: :return:
1.462131
1.536195
0.951788
date_triggered = kwargs.get('date_triggered') trigger_id = kwargs.get('trigger_id') kwargs['model_name'] = 'Rss' kwargs['app_label'] = 'django_th' # get the URL from the trigger id rss = super(ServiceRss, self).read_data(**kwargs) logger.debug("RSS Feeds from %s : url %s", rss.name, rss.url) now = arrow.utcnow().to(settings.TIME_ZONE) my_feeds = [] # retrieve the data feeds = Feeds(**{'url_to_parse': rss.url}).datas() for entry in feeds.entries: # entry.*_parsed may be None when the date in a RSS Feed is invalid # so will have the "now" date as default published = self._get_published(entry) if published: published = arrow.get(str(published)).to(settings.TIME_ZONE) date_triggered = arrow.get(str(date_triggered)).to(settings.TIME_ZONE) if date_triggered is not None and published is not None and now >= published >= date_triggered: my_feeds.append(entry) # digester self.send_digest_event(trigger_id, entry.title, entry.link) cache.set('th_rss_' + str(trigger_id), my_feeds) cache.set('th_rss_uuid_{}'.format(rss.uuid), my_feeds) # return the data return my_feeds
def read_data(self, **kwargs)
get the data from the service :param kwargs: contain keyword args : trigger_id and model name :type kwargs: dict :rtype: dict
4.824435
4.663751
1.034454
services = UserService.objects.filter(name__status=1, user=user) choices = [] data = () if provider is not None: services = services.exclude(name__exact=provider) for class_name in services: data = (class_name.name, class_name.name.name.rsplit('Service', 1)[1]) choices.append(data) return choices
def activated_services(self, user, provider=None)
get the activated services added from the administrator :param user: user :param provider: the selected provider :type user: current user :type provider: string :return: list of activated services :rtype: list
4.791271
5.736748
0.835189
now = arrow.utcnow().to(settings.TIME_ZONE) now = now.date() digest = Digest.objects.filter(date_end=str(now)).order_by('user', 'date_end') users = digest.distinct('user') subject = 'Your digester' msg_plain = render_to_string('digest/email.txt', {'digest': digest, 'subject': subject}) msg_html = render_to_string('digest/email.html', {'digest': digest, 'subject': subject}) message = msg_plain from_email = settings.ADMINS recipient_list = () for user in users: recipient_list += (user.user.email,) send_mail(subject, message, from_email, recipient_list, html_message=msg_html)
def handle(self, *args, **options)
get all the digest data to send to each user
3.036654
2.76946
1.096479
status = False taiga = Taiga.objects.get(trigger_id=trigger_id) title = self.set_title(data) body = self.set_content(data) # add a 'story' to the project if taiga.project_name: api = self.taiga_api() new_project = api.projects.get_by_slug(taiga.project_name) userstory = new_project.add_user_story(title, description=body) if userstory: status = True return status
def save_data(self, trigger_id, **data)
get the data from the service :param trigger_id: id of the trigger :params data, dict :rtype: dict
4.118941
4.516543
0.911968
notebook_id = 0 notebooks = note_store.listNotebooks() # get the notebookGUID ... for notebook in notebooks: if notebook.name.lower() == my_notebook.lower(): notebook_id = notebook.guid break return notebook_id
def get_notebook(note_store, my_notebook)
get the notebook from its name
4.004762
3.599596
1.112559
if notebook_id == 0: new_notebook = Types.Notebook() new_notebook.name = my_notebook new_notebook.defaultNotebook = False notebook_id = note_store.createNotebook(new_notebook).guid return notebook_id
def set_notebook(note_store, my_notebook, notebook_id)
create a notebook
3.255039
3.120531
1.043104
tag_id = [] listtags = note_store.listTags() # cut the string by piece of tag with comma for my_tag in my_tags.split(','): for tag in listtags: # remove space before and after # thus we keep "foo bar" # but not " foo bar" nor "foo bar " if tag.name.lower() == my_tag.lower().lstrip().rstrip(): tag_id.append(tag.guid) break return tag_id
def get_tag(note_store, my_tags)
get the tags from his Evernote account :param note_store Evernote Instance :param my_tags string :return: array of the tag to create
5.774781
5.702035
1.012758
new_tag = Types.Tag() for my_tag in my_tags.split(','): new_tag.name = my_tag note_tag_id = EvernoteMgr.create_tag(note_store, new_tag) if note_tag_id is not False: tag_id.append(note_tag_id) else: return False return tag_id
def set_tag(note_store, my_tags, tag_id)
create a tag if not exists :param note_store evernote instance :param my_tags string :param tag_id id of the tag(s) to create :return: array of the tag to create
3.207832
3.179324
1.008967
# create the note ! try: created_note = note_store.createNote(note) sentence = str('note %s created') % created_note.guid logger.debug(sentence) return True except EDAMSystemException as e: return error(trigger_id, data, e) except EDAMUserException as e: if e.errorCode == EDAMErrorCode.ENML_VALIDATION: sentence = "Data ignored due to validation error : err {code} {msg}".format(code=e.errorCode, msg=e.parameter) logger.warning(sentence) update_result(trigger_id, msg=sentence, status=True) return True except Exception as e: logger.critical(e) update_result(trigger_id, msg=e, status=False) return False
def create_note(note_store, note, trigger_id, data)
create a note :param note_store Evernote instance :param note :param trigger_id id of the trigger :param data to save or to put in cache :type note_store: Evernote Instance :type note: Note instance :type trigger_id: int :type data: dict :return boolean :rtype boolean
4.246738
4.134268
1.027204
try: return note_store.createTag(new_tag).guid except EDAMUserException as e: if e.errorCode == EDAMErrorCode.DATA_CONFLICT: logger.info("Evernote Data Conflict Err {0}".format(e)) elif e.errorCode == EDAMErrorCode.BAD_DATA_FORMAT: logger.critical("Evernote Err {0}".format(e)) return False
def create_tag(note_store, new_tag)
:param note_store Evernote instance :param new_tag: create this new tag :return: new tag id
3.726848
3.438818
1.083758
na = False if data.get('link'): na = Types.NoteAttributes() # add the url na.sourceURL = data.get('link') # add the object to the note return na
def set_note_attribute(data)
add the link of the 'source' in the note
12.119377
9.223772
1.313929
footer = '' if data.get('link'): provided_by = _('Provided by') provided_from = _('from') footer_from = "<br/><br/>{} <em>{}</em> {} <a href='{}'>{}</a>" footer = footer_from.format( provided_by, trigger.trigger.description, provided_from, data.get('link'), data.get('link')) return footer
def set_note_footer(data, trigger)
handle the footer of the note
4.299684
4.188291
1.026596
spec = NoteStore.NotesMetadataResultSpec() spec.includeTitle = True spec.includeAttributes = True return spec
def set_evernote_spec()
set the spec of the notes :return: spec
4.863919
7.101772
0.684888
now = arrow.utcnow().to(settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ssZZ') trigger = get_object_or_404(TriggerService, pk=trigger_id) if trigger.status: title = 'disabled' title_trigger = _('Set this trigger on') btn = 'success' trigger.status = False else: title = _('Edit your service') title_trigger = _('Set this trigger off') btn = 'primary' trigger.status = True # set the trigger to the current date when the # the trigger is back online trigger.date_triggered = now trigger.save() return render(request, 'triggers/trigger_line.html', {'trigger': trigger, 'title': title, 'title_trigger': title_trigger, 'btn': btn, 'fire': settings.DJANGO_TH.get('fire', False) } )
def trigger_on_off(request, trigger_id)
enable/disable the status of the trigger then go back home :param request: request object :param trigger_id: the trigger ID to switch the status to True or False :type request: HttpRequest object :type trigger_id: int :return render :rtype HttpResponse
4.324996
4.349823
0.994293
date = '' if cache.get('django_th' + '_fire_trigger_' + str(trigger_id)): template = 'triggers/fire_trigger_ko.html' trigger = TriggerService.objects.get(id=trigger_id) kwargs = {'trigger': trigger} else: now = arrow.utcnow().to(settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ssZZ') cache.set('django_th' + '_fire_trigger_' + str(trigger_id), '*') management.call_command('read_n_pub', trigger_id=trigger_id) trigger = TriggerService.objects.get(id=trigger_id) date_result = arrow.get(trigger.date_result).to(settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ssZZ') date_triggered = arrow.get(trigger.date_triggered).to(settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ssZZ') if date_result < date_triggered and date_triggered > now: date = '*' template = 'triggers/fire_trigger.html' kwargs = {'trigger': trigger, 'date': date} return render(request, template, kwargs)
def fire_trigger(request, trigger_id)
start the handling of only ONE trigger :param request: request object :param trigger_id: the trigger ID to switch the status to True or False :type request: HttpRequest object :type trigger_id: int :return render :rtype HttpResponse
2.710634
2.841471
0.953954
status = True if switch == 'off': status = False TriggerService.objects.filter(provider__id=user_service_id).update(status=status) TriggerService.objects.filter(consumer__id=user_service_id).update(status=status) service = UserService.objects.get(id=user_service_id).name.name.split('Service')[1] messages.warning(request, _('All triggers of %s are now %s') % (service, switch)) return HttpResponseRedirect(reverse('user_services'))
def service_related_triggers_switch_to(request, user_service_id, switch)
switch the status of all the triggers related to the service, then go back home :param request: request object :param user_service_id: the service ID to switch the status to True or False of all the related trigger :param switch: the switch value :type request: HttpRequest object :type user_service_id: int :type switch: string off or on
3.256594
3.370743
0.966135
now = arrow.utcnow().to(settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ss') status = True if switch == 'off': status = False if status: TriggerService.objects.filter(user=request.user).update(status=status, date_triggered=now) else: TriggerService.objects.filter(user=request.user).update(status=status) return HttpResponseRedirect(reverse('base'))
def trigger_switch_all_to(request, switch)
switch the status of all the "my" triggers then go back home :param request: request object :param switch: the switch value :type request: HttpRequest object :type switch: string off or on
2.640601
2.762274
0.955952
all_datas = [] if step == '0': services = ServicesActivated.objects.filter(status=1) elif step == '3': services = ServicesActivated.objects.filter(status=1, id__iexact=request.id) for class_name in services: all_datas.append({class_name: class_name.name.rsplit('Service', 1)[1]}) return all_datas
def list_services(request, step)
get the activated services added from the administrator :param request: request object :param step: the step which is proceeded :type request: HttpRequest object :type step: string :return the activated services added from the administrator
5.05124
4.942681
1.021964
if edit_what not in ('Provider', 'Consumer'): # bad request return redirect('base') form_name = edit_what + 'Form' # get the trigger object service = TriggerService.objects.get(id=trigger_id) if can_modify_trigger(request, service.provider.name.status, service.consumer.name.status): return HttpResponseRedirect(reverse('base')) if edit_what == 'Consumer': my_service = service.consumer.name.name else: my_service = service.provider.name.name # get the service name service_name = str(my_service).split('Service')[1] # get the model of this service model = get_service(my_service) # get the data of this service linked to that trigger data = model.objects.get(trigger_id=trigger_id) template = service_name.lower() + '/edit_' + edit_what.lower() + ".html" if request.method == 'POST': form = get_service(my_service, 'forms', form_name)(request.POST, instance=data) if form.is_valid(): form.save() return HttpResponseRedirect(reverse('trigger_edit_thanks')) else: form = get_service(my_service, 'forms', form_name)(instance=data) context = {'description': service.description, 'edit_what': edit_what, 'data': data, 'is_secure': request.is_secure(), 'host': request.get_host()} return render(request, template, {'form': form, 'context': context})
def trigger_edit(request, trigger_id, edit_what)
edit the provider :param request: request object :param trigger_id: ID of the trigger to edit :param edit_what: edit a 'Provider' or 'Consumer' ? :type request: HttpRequest object :type trigger_id: int :type edit_what: string :return render :rtype HttpResponse
3.062153
2.938045
1.042242
trigger_id = kwargs.get('trigger_id') date_triggered = kwargs.get('date_triggered') data = [] project_name = 'Main Project' items = self.todoist.sync() try: for item in items.get('items'): date_added = arrow.get(item.get('date_added'), 'ddd DD MMM YYYY HH:mm:ss ZZ') if date_added > date_triggered: for project in items.get('projects'): if item.get('project_id') == project.get('id'): project_name = project.get('name') title = 'From TodoIst Project {0}:'.format(project_name) data.append({'title': title, 'content': item.get('content')}) # digester self.send_digest_event(trigger_id, title, '') cache.set('th_todoist_' + str(trigger_id), data) except AttributeError: logger.error(items) return data
def read_data(self, **kwargs)
get the data from the service :param kwargs: contain keyword args : trigger_id at least :type kwargs: dict :rtype: list
3.990185
3.976847
1.003354
title, content = super(ServiceTodoist, self).save_data(trigger_id, **data) if self.token: if title or content or data.get('link'): content = title + ' ' + content + ' ' + data.get('link') self.todoist.add_item(content) sentence = str('todoist {} created').format(data.get('link')) logger.debug(sentence) status = True else: status = False else: logger.critical("no token or link provided for trigger ID {} ".format(trigger_id)) status = False return status
def save_data(self, trigger_id, **data)
let's save the data :param trigger_id: trigger ID from which to save data :param data: the data to check to be used and save :type trigger_id: int :type data: dict :return: the status of the save statement :rtype: boolean
5.296222
5.496648
0.963537
trigger_id = kwargs.get('trigger_id') data = list() cache.set('th_joplin_' + str(trigger_id), data)
def read_data(self, **kwargs)
get the data from the service as the pocket service does not have any date in its API linked to the note, add the triggered date to the dict data thus the service will be triggered when data will be found :param kwargs: contain keyword args : trigger_id at least :type kwargs: dict :rtype: list
10.121796
9.445809
1.071565
from th_joplin.models import Joplin status = False data['output_format'] = 'markdown_github' title, content = super(ServiceJoplin, self).save_data(trigger_id, **data) # get the data of this trigger trigger = Joplin.objects.get(trigger_id=trigger_id) status = self.joplin.create_note(title=title, body=content, parent_id=trigger.folder).status_code if status == 200: status = True return status
def save_data(self, trigger_id, **data)
let's save the data :param trigger_id: trigger ID from which to save data :param data: the data to check to be used and save :type trigger_id: int :type data: dict :return: the status of the save statement :rtype: boolean
4.841393
4.874439
0.993221
service_name = str(service).split('Service')[1] class_name = 'th_' + service_name.lower() + '.' + model_form if model_form == 'forms': return class_for_name(class_name, service_name + form_name) else: return class_for_name(class_name, service_name)
def get_service(service, model_form='models', form_name='')
get the service name then load the model :param service: the service name :param model_form: could be 'models' or 'forms' :param form_name: the name of the form is model_form is 'forms' :type service: string :type model_form: string :type form_name: string :return: the object of the spotted Class. :rtype: object :Example: class_name could be : th_rss.models th_rss.forms service_name could be : ServiceRss then could call : Rss+ProviderForm Evernote+ConsumerForm
3.815159
3.529075
1.081065
my_date_time = None if 'published_parsed' in data: my_date_time = datetime.datetime.utcfromtimestamp(time.mktime(data.get('published_parsed'))) elif 'created_parsed' in data: my_date_time = datetime.datetime.utcfromtimestamp(time.mktime(data.get('created_parsed'))) elif 'updated_parsed' in data: my_date_time = datetime.datetime.utcfromtimestamp(time.mktime(data.get('updated_parsed'))) elif 'my_date' in data: my_date_time = arrow.get(data['my_date']) return my_date_time
def to_datetime(data)
convert Datetime 9-tuple to the date and time format feedparser provides this 9-tuple :param data: data to be checked :type data: dict
1.853051
1.851643
1.000761
# get the data of this trigger trigger = model.objects.get(trigger_id=trigger_id) tags = '' if trigger.tag: # is there several tag ? tags = ["#" + tag.strip() for tag in trigger.tag.split(',')] tags = str(','.join(tags)) if isinstance(tags, list) else tags tags = ' ' + tags return tags
def get_tags(model, trigger_id)
get the tags if any :param model: the model object to request :param trigger_id: the id of the related trigger :return: tags string
4.69636
4.803208
0.977755
published = to_datetime(data) category = data.get('category') if data.get('category') else '' tags = data.get('tags') if data.get('tags') else '' filename = self._set_filename(data.get('title'), pelican_path) full_content = self._set_full_content(site_title, data.get('title'), published, content, url, category, tags) try: with open(filename, 'w') as f: f.write(full_content) status = True except Exception as e: logger.critical(e) status = False return status
def _create_content(self, site_title, content, pelican_path, url, **data)
create the file in the 'content' directory of pelican :param content: the content of the post :param pelican_path: where the files are created :param url: url of the datasource :param data: the data to check to be used and save :type content: string :type pelican_path: string :type url: string :type data: dict :return: the status of the save statement :rtype: boolean
2.875696
3.026424
0.950196
# cleaning the special char name = title.replace('/', '_').replace('\\', '_').\ replace(' ', '_').replace(':', '_').replace('&', '').\ replace('?', '').replace('!', '') return "{}/{}.html".format(pelican_path, name)
def _set_filename(title, pelican_path)
build the filename :param title: the title of the post :param pelican_path: where the files are created :type title: string :type pelican_path: string :return: the filename :rtype: string
4.983517
5.029151
0.990926
header = self._set_meta(title, published, category, tags) content = self._set_content(content) footer = self._set_footer(url, site_title) full_content = self._set_html_begin() + self._set_title(title) full_content += header + content + footer + self._set_html_end() return full_content
def _set_full_content(self, site_title, title, published, content, url, category='', tags='')
generate the full content of the file create the file in the 'content' directory of pelican :param site_title: title of the website :param title: the title of the post :param content: the content of the post :param published: the date when the data has been published by the provider :param url: url of the datasource :param category: category of this data :type site_title: string :type title: string :type content: string :type published: string :type url: string :param category: string :return: the the complet content :rtype: string
2.823313
3.104244
0.909501
slug_published = slugify(arrow.get(published).format( 'YYYY-MM-DD HH:mm')) slug_title = slugify(title) header = '\n\t\t<meta name="date" content="{}" />\n'.format(published) if tags: header += '\t\t<meta name="tags" content="{}" />\n'.format(tags) if category: header += '\t\t<meta name="category" content="{}" />\n'.format( category) if self.AUTHOR: header += '\t\t<meta name="authors" content="{}" />\n'.format( self.AUTHOR) header += '\t\t<meta name="slug" content="{}"/>\n'.format( slug_published + '-' + slug_title) header += '\t</head>' return header
def _set_meta(self, title, published, category='', tags='')
the header :param title: the title of the post :param published: the date when the data has been published by the provider :param category: category of this data :param tags: the tags :type title: string :type published: string :param category: string :param tags: string :return: the complet head :rtype: string
2.238549
2.25735
0.991671
from th_pelican.models import Pelican title, content = super(ServicePelican, self).save_data( trigger_id, **data) trigger = Pelican.objects.get(trigger_id=trigger_id) params = {'tags': trigger.tags.lower(), 'category': trigger.category.lower()} if 'tags' in data: del data['tags'] params.update(data) if self._create_content(trigger.title, content, trigger.path, trigger.url, **params): sentence = 'pelican {} created' status = True else: sentence = 'pelican {} not created' status = False logger.debug(sentence.format(title)) return status
def save_data(self, trigger_id, **data)
let's save the data :param trigger_id: trigger ID from which to save data :param data: the data to check to be used and save :type trigger_id: int :type data: dict :return: the status of the save statement :rtype: boolean
4.180663
4.187198
0.998439
now = arrow.utcnow().to(settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ssZZ') TriggerService.objects.filter(id=service.id).update(date_triggered=now, consumer_failed=0, provider_failed=0, )
def update_trigger(self, service)
update the date when occurs the trigger :param service: service object to update
4.823757
4.950843
0.97433
if to_update: if status: msg = "{} - {} new data".format(service, count) update_result(service.id, msg="OK", status=status) logger.info(msg) else: msg = "{} AN ERROR OCCURS ".format(service) update_result(service.id, msg=msg, status=status) logger.warning(msg) else: logger.debug("{} nothing new ".format(service))
def log_update(self, service, to_update, status, count)
lets log everything at the end :param service: service object :param to_update: boolean to check if we have to update :param status: is everything worked fine ? :param count: number of data to update :type service: service object :type to_update: boolean :type status: boolean :type count: interger
4.060523
3.688813
1.100767
service_provider = default_provider.get_service(str(service.provider.name.name)) # 1) get the data from the provider service module_name = 'th_' + service.provider.name.name.split('Service')[1].lower() kwargs = {'trigger_id': str(service.id), 'cache_stack': module_name} return getattr(service_provider, 'process_data')(**kwargs)
def provider(self, service)
get the data from (the cache of) the service provider :param service: :return: data
8.510758
7.770996
1.095195
# consumer - the service which uses the data service_consumer = default_provider.get_service(str(service.consumer.name.name)) kwargs = {'user': service.user} getattr(service_consumer, '__init__')(service.consumer.token, **kwargs) instance = getattr(service_consumer, 'save_data') # 2) for each one for d in data: d['userservice_id'] = service.consumer.id # the consumer will save the data and return if success or not status = instance(service.id, **d) to_update = True return to_update, status
def consumer(self, service, data, to_update, status)
call the consumer and handle the data :param service: :param data: :param to_update: :param status: :return: status
7.947919
8.058726
0.98625
# flag to know if we have to update to_update = False # flag to get the status of a service status = False # provider - the service that offer data # check if the service has already been triggered # if date_triggered is None, then it's the first run if service.date_triggered is None: logger.debug("first run {}".format(service)) to_update = True status = True # run run run data = self.provider(service) count_new_data = len(data) if data else 0 if count_new_data > 0: to_update, status = self.consumer(service, data, to_update, status) # let's log self.log_update(service, to_update, status, count_new_data) # let's update if to_update and status: self.update_trigger(service)
def publishing(self, service)
the purpose of this tasks is to get the data from the cache then publish them :param service: service object where we will publish :type service: object
5.353498
5.233747
1.02288
if isinstance(function, types.FunctionType): return function.__name__ else: return str(function)
def name(function)
Retrieve a pretty name for the function :param function: function to get name from :return: pretty name
2.635075
3.966642
0.664309
return Transformation('map({0})'.format(name(func)), partial(map, func), {ExecutionStrategies.PARALLEL})
def map_t(func)
Transformation for Sequence.map :param func: map function :return: transformation
19.014265
20.346457
0.934525
return Transformation('starmap({})'.format(name(func)), partial(starmap, func), {ExecutionStrategies.PARALLEL})
def starmap_t(func)
Transformation for Sequence.starmap and Sequence.smap :param func: starmap function :return: transformation
17.247505
19.739071
0.873775
return Transformation('filter({0})'.format(name(func)), partial(filter, func), {ExecutionStrategies.PARALLEL})
def filter_t(func)
Transformation for Sequence.filter :param func: filter function :return: transformation
21.122198
20.522255
1.029234
return Transformation('filter_not({0})'.format(name(func)), partial(six.moves.filterfalse, func), {ExecutionStrategies.PARALLEL})
def filter_not_t(func)
Transformation for Sequence.filter_not :param func: filter_not function :return: transformation
18.567131
16.820671
1.103828
return Transformation( 'slice({0}, {1})'.format(start, until), lambda sequence: islice(sequence, start, until), None )
def slice_t(start, until)
Transformation for Sequence.slice :param start: start index :param until: until index (does not include element at until) :return: transformation
5.709064
5.025651
1.135985
def distinct_by(sequence): distinct_lookup = {} for element in sequence: key = func(element) if key not in distinct_lookup: distinct_lookup[key] = element return distinct_lookup.values() return Transformation('distinct_by({0})'.format(name(func)), distinct_by, None)
def distinct_by_t(func)
Transformation for Sequence.distinct_by :param func: distinct_by function :return: transformation
3.55257
3.336676
1.064703
return Transformation( 'sorted', lambda sequence: sorted(sequence, key=key, reverse=reverse), None )
def sorted_t(key=None, reverse=False)
Transformation for Sequence.sorted :param key: key to sort by :param reverse: reverse or not :return: transformation
6.10745
5.328202
1.14625
return Transformation( 'order_by({0})'.format(name(func)), lambda sequence: sorted(sequence, key=func), None )
def order_by_t(func)
Transformation for Sequence.order_by :param func: order_by function :return: transformation
7.313952
7.30296
1.001505
if n <= 0: end_index = None else: end_index = -n return Transformation( 'drop_right({0})'.format(n), lambda sequence: sequence[:end_index], None )
def drop_right_t(n)
Transformation for Sequence.drop_right :param n: number to drop from right :return: transformation
5.072686
4.292897
1.181646
return Transformation( 'flat_map({0})'.format(name(func)), partial(flat_map_impl, func), {ExecutionStrategies.PARALLEL} )
def flat_map_t(func)
Transformation for Sequence.flat_map :param func: function to flat_map :return: transformation
9.630212
10.343616
0.931029
return Transformation( 'cartesian', lambda sequence: product(sequence, *iterables, repeat=repeat), None )
def cartesian_t(iterables, repeat)
Transformation for Sequence.cartesian :param iterables: elements for cartesian product :param repeat: how many times to repeat iterables :return: transformation
8.942971
8.730598
1.024325
return Transformation( 'inits', lambda sequence: [wrap(sequence[:i]) for i in reversed(range(len(sequence) + 1))], {ExecutionStrategies.PRE_COMPUTE} )
def inits_t(wrap)
Transformation for Sequence.inits :param wrap: wrap children values with this :return: transformation
10.341558
9.145201
1.130818
return Transformation( 'tails', lambda sequence: [wrap(sequence[i:]) for i in range(len(sequence) + 1)], {ExecutionStrategies.PRE_COMPUTE} )
def tails_t(wrap)
Transformation for Sequence.tails :param wrap: wrap children values with this :return: transformation
8.946104
8.458
1.057709
result = {} for element in sequence: if result.get(element[0]): result.get(element[0]).append(element[1]) else: result[element[0]] = [element[1]] return six.viewitems(result)
def group_by_key_impl(sequence)
Implementation for group_by_key_t :param sequence: sequence to group :return: grouped sequence
2.312099
2.646132
0.873765
result = {} for key, value in sequence: if key in result: result[key] = func(result[key], value) else: result[key] = value return six.viewitems(result)
def reduce_by_key_impl(func, sequence)
Implementation for reduce_by_key_t :param func: reduce function :param sequence: sequence to reduce :return: reduced sequence
2.113535
2.650512
0.797406
iterator = iter(sequence) total = next(iterator) yield total for element in iterator: total = func(total, element) yield total
def _accumulate(sequence, func)
Python2 accumulate implementation taken from https://docs.python.org/3/library/itertools.html#itertools.accumulate
2.172944
1.809822
1.200639
# pylint: disable=no-name-in-module if six.PY3: from itertools import accumulate return accumulate(sequence, func) else: return _accumulate(sequence, func)
def accumulate_impl(func, sequence)
Implementation for accumulate :param sequence: sequence to accumulate :param func: accumulate function
3.310552
4.056721
0.816066
counter = collections.Counter() for key, _ in sequence: counter[key] += 1 return six.viewitems(counter)
def count_by_key_impl(sequence)
Implementation for count_by_key_t :param sequence: sequence of (key, value) pairs :return: counts by key
3.991034
5.065471
0.78789
counter = collections.Counter() for e in sequence: counter[e] += 1 return six.viewitems(counter)
def count_by_value_impl(sequence)
Implementation for count_by_value_t :param sequence: sequence of values :return: counts by value
4.08415
5.498045
0.742837
result = {} for element in sequence: if result.get(func(element)): result.get(func(element)).append(element) else: result[func(element)] = [element] return six.viewitems(result)
def group_by_impl(func, sequence)
Implementation for group_by_t :param func: grouping function :param sequence: sequence to group :return: grouped sequence
2.456512
2.79826
0.877871
iterator = iter(sequence) try: while True: batch = islice(iterator, size) yield list(chain((wrap(next(batch)),), batch)) except StopIteration: return
def grouped_impl(wrap, size, sequence)
Implementation for grouped_t :param wrap: wrap children values with this :param size: size of groups :param sequence: sequence to group :return: grouped sequence
3.259459
4.349283
0.749424
return Transformation( 'grouped({0})'.format(size), partial(grouped_impl, wrap, size), None )
def grouped_t(wrap, size)
Transformation for Sequence.grouped :param wrap: wrap children values with this :param size: size of groups :return: transformation
10.637727
8.8346
1.204098
i = 0 n = len(sequence) while i + size <= n or (step != 1 and i < n): yield wrap(sequence[i: i + size]) i += step
def sliding_impl(wrap, size, step, sequence)
Implementation for sliding_t :param wrap: wrap children values with this :param size: size of window :param step: step size :param sequence: sequence to create sliding windows from :return: sequence of sliding windows
3.011377
3.993242
0.754118
return Transformation( 'sliding({0}, {1})'.format(size, step), partial(sliding_impl, wrap, size, step), {ExecutionStrategies.PRE_COMPUTE} )
def sliding_t(wrap, size, step)
Transformation for Sequence.sliding :param wrap: wrap children values with this :param size: size of window :param step: step size :return: transformation
9.734434
9.193368
1.058854
return Transformation( 'partition({0})'.format(name(func)), partial(partition_impl, wrap, func), None )
def partition_t(wrap, func)
Transformation for Sequence.partition :param wrap: wrap children values with this :param func: partition function :return: transformation
10.151655
10.40166
0.975965
seq_dict = {} for element in sequence: seq_dict[element[0]] = element[1] seq_kv = seq_dict other_kv = dict(other) keys = seq_kv.keys() if len(seq_kv) < len(other_kv) else other_kv.keys() result = {} for k in keys: if k in seq_kv and k in other_kv: result[k] = (seq_kv[k], other_kv[k]) return six.viewitems(result)
def inner_join_impl(other, sequence)
Implementation for part of join_impl :param other: other sequence to join with :param sequence: first sequence to join with :return: joined sequence
2.490145
2.659224
0.936418
if join_type == "inner": return inner_join_impl(other, sequence) seq_dict = {} for element in sequence: seq_dict[element[0]] = element[1] seq_kv = seq_dict other_kv = dict(other) if join_type == "left": keys = seq_kv.keys() elif join_type == "right": keys = other_kv.keys() elif join_type == "outer": keys = set(list(seq_kv.keys()) + list(other_kv.keys())) else: raise TypeError("Wrong type of join specified") result = {} for k in keys: result[k] = (seq_kv.get(k), other_kv.get(k)) return six.viewitems(result)
def join_impl(other, join_type, sequence)
Implementation for join_t :param other: other sequence to join with :param join_type: join type (inner, outer, left, right) :param sequence: first sequence to join with :return: joined sequence
2.313465
2.446107
0.945774
return Transformation( '{0}_join'.format(join_type), partial(join_impl, other, join_type), None )
def join_t(other, join_type)
Transformation for Sequence.join, Sequence.inner_join, Sequence.outer_join, Sequence.right_join, and Sequence.left_join :param other: other sequence to join with :param join_type: join type from left, right, inner, and outer :return: transformation
7.038313
7.504109
0.937928
return isinstance(val, (str, bool, float, complex, bytes, six.text_type) + six.string_types + six.integer_types)
def is_primitive(val)
Checks if the passed value is a primitive type. >>> is_primitive(1) True >>> is_primitive("abc") True >>> is_primitive(True) True >>> is_primitive({}) False >>> is_primitive([]) False >>> is_primitive(set([])) :param val: value to check :return: True if value is a primitive, else False
5.176116
8.733293
0.592688
val_type = type(val) bases = val_type.__bases__ if len(bases) != 1 or bases[0] != tuple: return False fields = getattr(val_type, '_fields', None) return all(isinstance(n, str) for n in fields)
def is_namedtuple(val)
Use Duck Typing to check if val is a named tuple. Checks that val is of type tuple and contains the attribute _fields which is defined for named tuples. :param val: value to check type of :return: True if val is a namedtuple
2.005283
2.303769
0.870436
if isinstance(val, list): return False return isinstance(val, collections.Iterable)
def is_iterable(val)
Check if val is not a list, but is a collections.Iterable type. This is used to determine when list() should be called on val >>> l = [1, 2] >>> is_iterable(l) False >>> is_iterable(iter(l)) True :param val: value to check :return: True if it is not a list, but is a collections.Iterable
4.14937
5.491452
0.755605
return takewhile(bool, (list(islice(iterable, parts)) for _ in count()))
def split_every(parts, iterable)
Split an iterable into parts of length parts >>> l = iter([1, 2, 3, 4]) >>> split_every(2, l) [[1, 2], [3, 4]] :param iterable: iterable to split :param parts: number of chunks :return: return the iterable split in parts
4.227597
8.220018
0.514305
func, args = serializer.loads(packed) result = func(*args) if isinstance(result, collections.Iterable): return list(result) return None
def unpack(packed)
Unpack the function and args then apply the function to the arguments and return result :param packed: input packed tuple of (func, args) :return: result of applying packed function on packed args
5.365486
6.02222
0.890948
parallel_iter = lazy_parallelize( func, result, processes=processes, partition_size=partition_size) return chain.from_iterable(parallel_iter)
def parallelize(func, result, processes=None, partition_size=None)
Creates an iterable which is lazily computed in parallel from applying func on result :param func: Function to apply :param result: Data to apply to :param processes: Number of processes to use in parallel :param partition_size: Size of partitions for each parallel process :return: Iterable of applying func on result
3.603128
4.676231
0.77052
if processes is None or processes < 1: processes = CPU_COUNT else: processes = min(processes, CPU_COUNT) partition_size = partition_size or compute_partition_size(result, processes) pool = Pool(processes=processes) partitions = split_every(partition_size, iter(result)) packed_partitions = (pack(func, (partition, )) for partition in partitions) for pool_result in pool.imap(unpack, packed_partitions): yield pool_result pool.terminate()
def lazy_parallelize(func, result, processes=None, partition_size=None)
Lazily computes an iterable in parallel, and returns them in pool chunks :param func: Function to apply :param result: Data to apply to :param processes: Number of processes to use in parallel :param partition_size: Size of partitions for each parallel process :return: Iterable of chunks where each chunk as func applied to it
2.943087
3.138734
0.937667
try: return max(math.ceil(len(result) / processes), 1) except TypeError: return 1
def compute_partition_size(result, processes)
Attempts to compute the partition size to evenly distribute work across processes. Defaults to 1 if the length of result cannot be determined. :param result: Result to compute on :param processes: Number of processes to use :return: Best partition size
3.960466
3.780948
1.04748
# pylint: disable=undefined-variable return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def compose(*functions)
Compose all the function arguments together :param functions: Functions to compose :return: Single composed function
2.883603
3.861946
0.746671
# pylint: disable=no-self-use result = sequence for transform in transformations: strategies = transform.execution_strategies if strategies is not None and ExecutionStrategies.PRE_COMPUTE in strategies: result = transform.function(list(result)) else: result = transform.function(result) return iter(result)
def evaluate(self, sequence, transformations)
Execute the sequence of transformations in serial :param sequence: Sequence to evaluation :param transformations: Transformations to apply :return: Resulting sequence or value
5.275676
5.660595
0.932
result = sequence parallel = partial( parallelize, processes=self.processes, partition_size=self.partition_size) staged = [] for transform in transformations: strategies = transform.execution_strategies or {} if ExecutionStrategies.PARALLEL in strategies: staged.insert(0, transform.function) else: if staged: result = parallel(compose(*staged), result) staged = [] if ExecutionStrategies.PRE_COMPUTE in strategies: result = list(result) result = transform.function(result) if staged: result = parallel(compose(*staged), result) return iter(result)
def evaluate(self, sequence, transformations)
Execute the sequence of transformations in parallel :param sequence: Sequence to evaluation :param transformations: Transformations to apply :return: Resulting sequence or value
4.788402
4.780601
1.001632
last_cache_index = self.cache_scan() transformations = self.transformations[last_cache_index:] return self.engine.evaluate(sequence, transformations)
def evaluate(self, sequence)
Compute the lineage on the sequence. :param sequence: Sequence to compute :return: Evaluated sequence
8.990556
10.57059
0.850525
try: return len(self.transformations) - self.transformations[::-1].index(CACHE_T) except ValueError: return 0
def cache_scan(self)
Scan the lineage for the index of the most recent cache. :return: Index of most recent cache
7.076616
6.00093
1.179253
if is_primitive(value): return value if isinstance(value, (dict, set)) or is_namedtuple(value): return value elif isinstance(value, collections.Iterable): try: if type(value).__name__ == 'DataFrame': import pandas if isinstance(value, pandas.DataFrame): return Sequence(value.values) except ImportError: # pragma: no cover pass return Sequence(value) else: return value
def _wrap(value)
Wraps the passed value in a Sequence if it is not a primitive. If it is a string argument it is expanded to a list of characters. >>> _wrap(1) 1 >>> _wrap("abc") ['a', 'b', 'c'] >>> type(_wrap([1, 2])) functional.pipeline.Sequence :param value: value to wrap :return: wrapped or not wrapped value
3.630299
3.745762
0.969175
sequence = None for transform in transforms: if sequence: sequence = Sequence(sequence, transform=transform) else: sequence = Sequence(self, transform=transform) return sequence
def _transform(self, *transforms)
Copies the given Sequence and appends new transformation :param transform: transform to apply or list of transforms to apply :return: transformed sequence
3.622339
3.807235
0.951436
if len(self._lineage) == 0 or self._lineage[-1] == transformations.CACHE_T: if not isinstance(self._base_sequence, list): self._base_sequence = list(self._base_sequence) self._lineage.apply(transformations.CACHE_T) else: self._base_sequence = list(self._evaluate()) self._lineage.apply(transformations.CACHE_T) if delete_lineage: self._lineage = Lineage(engine=self.engine) return self
def cache(self, delete_lineage=False)
Caches the result of the Sequence so far. This means that any functions applied on the pipeline before cache() are evaluated, and the result is stored in the Sequence. This is primarily used internally and is no more helpful than to_list() externally. delete_lineage allows for cache() to be used in internal initialization calls without the caller having knowledge of the internals via the lineage :param delete_lineage: If set to True, it will cache then erase the lineage
3.715467
3.651393
1.017548
return self._transform(transformations.cartesian_t(iterables, kwargs.get('repeat', 1)))
def cartesian(self, *iterables, **kwargs)
Returns the cartesian product of the passed iterables with the specified number of repetitions. The keyword argument `repeat` is read from kwargs to pass to itertools.cartesian. >>> seq.range(2).cartesian(range(2)) [(0, 0), (0, 1), (1, 0), (1, 1)] :param iterables: elements for cartesian product :param kwargs: the variable `repeat` is read from kwargs :return: cartesian product
14.63568
15.649772
0.935201
if n <= 0: return self._transform(transformations.drop_t(0)) else: return self._transform(transformations.drop_t(n))
def drop(self, n)
Drop the first n elements of the sequence. >>> seq([1, 2, 3, 4, 5]).drop(2) [3, 4, 5] :param n: number of elements to drop :return: sequence without first n elements
4.719912
5.373263
0.878407
return self._transform(transformations.CACHE_T, transformations.drop_right_t(n))
def drop_right(self, n)
Drops the last n elements of the sequence. >>> seq([1, 2, 3, 4, 5]).drop_right(2) [1, 2, 3] :param n: number of elements to drop :return: sequence with last n elements dropped
20.751015
38.58149
0.537849
if n <= 0: return self._transform(transformations.take_t(0)) else: return self._transform(transformations.take_t(n))
def take(self, n)
Take the first n elements of the sequence. >>> seq([1, 2, 3, 4]).take(2) [1, 2] :param n: number of elements to take :return: first n elements of sequence
4.85168
5.521576
0.878677
n = 0 for element in self: if func(element): n += 1 return n
def count(self, func)
Counts the number of elements in the sequence which satisfy the predicate func. >>> seq([-1, -2, 1, 2]).count(lambda x: x > 0) 2 :param func: predicate to count elements on :return: count of elements that satisfy predicate
3.390268
4.842073
0.700169
if len(initial) == 0: return _wrap(reduce(func, self)) elif len(initial) == 1: return _wrap(reduce(func, self, initial[0])) else: raise ValueError('reduce takes exactly one optional parameter for initial value')
def reduce(self, func, *initial)
Reduce sequence of elements using func. API mirrors functools.reduce >>> seq([1, 2, 3]).reduce(lambda x, y: x + y) 6 :param func: two parameter, associative reduce function :param initial: single optional argument acting as initial value :return: reduced value using func
3.208573
3.606494
0.889665
if self.empty(): if projection: return projection(1) else: return 1 if self.size() == 1: if projection: return projection(self.first()) else: return self.first() if projection: return self.map(projection).reduce(mul) else: return self.reduce(mul)
def product(self, projection=None)
Takes product of elements in sequence. >>> seq([1, 2, 3, 4]).product() 24 >>> seq([]).product() 1 >>> seq([(1, 2), (1, 3), (1, 4)]).product(lambda x: x[0]) 1 :param projection: function to project on the sequence before taking the product :return: product of elements in sequence
2.53999
2.495187
1.017956
if projection: return sum(self.map(projection)) else: return sum(self)
def sum(self, projection=None)
Takes sum of elements in sequence. >>> seq([1, 2, 3, 4]).sum() 10 >>> seq([(1, 2), (1, 3), (1, 4)]).sum(lambda x: x[0]) 3 :param projection: function to project on the sequence before taking the sum :return: sum of elements in sequence
4.124669
4.478183
0.921059
length = self.size() if projection: return sum(self.map(projection)) / length else: return sum(self) / length
def average(self, projection=None)
Takes the average of elements in the sequence >>> seq([1, 2]).average() 1.5 >>> seq([('a', 1), ('b', 2)]).average(lambda x: x[1]) :param projection: function to project on the sequence before taking the average :return: average of elements in the sequence
3.859826
3.940735
0.979468