code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# if this note was automatically filed, don't update the auto-classification information if not self.user: return # if the failure type isn't intermittent, ignore if self.failure_classification.name not in ["intermittent", "intermittent needs filing"]: return # if the linked Job has more than one TextLogError, ignore text_log_error = self.job.get_manual_classification_line() if not text_log_error: return # evaluate the QuerySet here so it can be used when creating new_bugs below existing_bugs = list(ClassifiedFailure.objects.filter(error_matches__text_log_error=text_log_error) .values_list('bug_number', flat=True)) new_bugs = (self.job.bugjobmap_set.exclude(bug_id__in=existing_bugs) .values_list('bug_id', flat=True)) if not new_bugs: return # Create Match instances for each new bug for bug_number in new_bugs: classification, _ = ClassifiedFailure.objects.get_or_create(bug_number=bug_number) text_log_error.create_match("ManualDetector", classification) # if there's only one new bug and no existing ones, verify it if len(new_bugs) == 1 and not existing_bugs: text_log_error.verify_classification(classification)
def _ensure_classification(self)
Ensures a single TextLogError's related bugs have Classifications. If the linked Job has a single meaningful TextLogError: - find the bugs currently related to it via a Classification - find the bugs mapped to the job related to this note - find the bugs that are mapped but not classified - link this subset of bugs to Classifications - if there's only one new bug and no existing ones, verify it
6.200332
4.812278
1.28844
# Only insert bugs for verified failures since these are automatically # mirrored to ES and the mirroring can't be undone # TODO: Decide whether this should change now that we're no longer mirroring. bug_numbers = set(ClassifiedFailure.objects .filter(best_for_errors__text_log_error__step__job=job, best_for_errors__best_is_verified=True) .exclude(bug_number=None) .exclude(bug_number=0) .values_list('bug_number', flat=True)) existing_maps = set(BugJobMap.objects.filter(bug_id__in=bug_numbers) .values_list('bug_id')) for bug_number in (bug_numbers - existing_maps): BugJobMap.objects.create(job_id=job.id, bug_id=bug_number, user=user) # if user is not specified, then this is an autoclassified job note and # we should mark it as such classification_name = 'intermittent' if user else 'autoclassified intermittent' classification = FailureClassification.objects.get(name=classification_name) return JobNote.objects.create(job=job, failure_classification=classification, user=user, text="")
def create_autoclassify_job_note(self, job, user=None)
Create a JobNote, possibly via auto-classification. Create mappings from the given Job to Bugs via verified Classifications of this Job. Also creates a JobNote.
5.837388
5.56088
1.049724
components = self._serialized_components() if not components: return [] from treeherder.model.error_summary import get_useful_search_results job = Job.objects.get(guid=self.job_guid) rv = [] ids_seen = set() for item in get_useful_search_results(job): if all(component in item["search"] for component in components): for suggestion in itertools.chain(item["bugs"]["open_recent"], item["bugs"]["all_others"]): if suggestion["id"] not in ids_seen: ids_seen.add(suggestion["id"]) rv.append(suggestion) return rv
def unstructured_bugs(self)
Get bugs that match this line in the Bug Suggestions artifact for this job.
6.09486
5.555965
1.096994
data = { "action": self.action, "line_number": self.line, "test": self.test, "subtest": self.subtest, "status": self.status, "expected": self.expected, "message": self.message, "signature": self.signature, "level": self.level, "stack": self.stack, "stackwalk_stdout": self.stackwalk_stdout, "stackwalk_stderr": self.stackwalk_stderr, } # Remove empty values data = {k: v for k, v in data.items() if v} return data
def to_mozlog_format(self)
Convert a FailureLine into a mozlog formatted dictionary.
2.700786
2.493135
1.083289
if bug_number == self.bug_number: return self other = ClassifiedFailure.objects.filter(bug_number=bug_number).first() if not other: self.bug_number = bug_number self.save(update_fields=['bug_number']) return self self.replace_with(other) return other
def set_bug(self, bug_number)
Set the bug number of this Classified Failure If an existing ClassifiedFailure exists with the same bug number replace this instance with the existing one.
3.290671
2.413797
1.363276
match_ids_to_delete = list(self.update_matches(other)) TextLogErrorMatch.objects.filter(id__in=match_ids_to_delete).delete() # Update best classifications self.best_for_errors.update(best_classification=other) self.delete()
def replace_with(self, other)
Replace this instance with the given other. Deletes stale Match objects and updates related TextLogErrorMetadatas' best_classifications to point to the given other.
9.349099
4.435562
2.10776
for match in self.error_matches.all(): other_matches = TextLogErrorMatch.objects.filter( classified_failure=other, text_log_error=match.text_log_error, ) if not other_matches: match.classified_failure = other match.save(update_fields=['classified_failure']) continue # if any of our matches have higher scores than other's matches, # overwrite with our score. other_matches.filter(score__lt=match.score).update(score=match.score) yield match.id
def update_matches(self, other)
Update this instance's Matches to point to the given other's Matches. Find Matches with the same TextLogError as our Matches, updating their score if less than ours and mark our matches for deletion. If there are no other matches, update ours to point to the other ClassifiedFailure.
5.047113
3.422959
1.474488
if classification is None: classification = ClassifiedFailure.objects.create() TextLogErrorMatch.objects.create( text_log_error=self, classified_failure=classification, matcher_name=matcher_name, score=1, )
def create_match(self, matcher_name, classification)
Create a TextLogErrorMatch instance Typically used for manual "matches" or tests.
6.308585
4.051741
1.557006
if classification not in self.classified_failures.all(): self.create_match("ManualDetector", classification) # create a TextLogErrorMetadata instance for this TextLogError if it # doesn't exist. We can't use update_or_create here since OneToOne # relations don't use an object manager so a missing relation is simply # None as opposed to RelatedManager. if self.metadata is None: TextLogErrorMetadata.objects.create(text_log_error=self, best_classification=classification, best_is_verified=True) else: self.metadata.best_classification = classification self.metadata.best_is_verified = True self.metadata.save(update_fields=['best_classification', 'best_is_verified']) self.metadata.failure_line.elastic_search_insert() # Send event to NewRelic when a verifing an autoclassified failure. match = self.matches.filter(classified_failure=classification).first() if not match: return newrelic.agent.record_custom_event('user_verified_classification', { 'matcher': match.matcher_name, 'job_id': self.id, })
def verify_classification(self, classification)
Mark the given ClassifiedFailure as verified. Handles the classification not currently being related to this TextLogError and no Metadata existing.
6.828479
5.618361
1.215386
if settings.BUGFILER_API_KEY is None: return Response({"failure": "Bugzilla API key not set!"}, status=HTTP_400_BAD_REQUEST) params = request.data # Arbitrarily cap crash signatures at 2048 characters to prevent perf issues on bmo crash_signature = params.get("crash_signature") if crash_signature and len(crash_signature) > 2048: return Response({"failure": "Crash signature can't be more than 2048 characters."}, status=HTTP_400_BAD_REQUEST) description = u"**Filed by:** {}\n{}".format( request.user.email.replace('@', " [at] "), params.get("comment", "") ).encode("utf-8") summary = params.get("summary").encode("utf-8").strip() url = settings.BUGFILER_API_URL + "/rest/bug" headers = { 'x-bugzilla-api-key': settings.BUGFILER_API_KEY, 'Accept': 'application/json' } data = { 'product': params.get("product"), 'component': params.get("component"), 'summary': summary, 'keywords': params.get("keywords"), 'blocks': params.get("blocks"), 'depends_on': params.get("depends_on"), 'see_also': params.get("see_also"), 'version': params.get("version"), 'cf_crash_signature': params.get("crash_signature"), 'severity': params.get("severity"), 'priority': params.get("priority"), 'description': description, 'comment_tags': "treeherder", } try: response = make_request(url, method='POST', headers=headers, json=data) except requests.exceptions.HTTPError as e: try: message = e.response.json()['message'] except (ValueError, KeyError): message = e.response.text return Response({"failure": message}, status=HTTP_400_BAD_REQUEST) return Response({"success": response.json()["id"]})
def create_bug(self, request)
Create a bugzilla bug with passed params
2.749194
2.707821
1.015279
min_id = 0 while True: chunk = qs.filter(id__gt=min_id).order_by('id') if fields is not None: chunk = chunk.only(*fields) # Cast to a list to execute the QuerySet here and allow us to get the # last ID when updating min_id. We can't use .last() later as it # ignores the slicing we do. rows = list(chunk[:chunk_size]) total = len(rows) if total < 1: break yield rows # update the minimum ID for next iteration min_id = rows[-1].id
def chunked_qs(qs, chunk_size=10000, fields=None)
Generator to iterate over the given QuerySet, chunk_size rows at a time Usage: >>> qs = FailureLine.objects.filter(action='test_result') >>> for qs in chunked_qs(qs, chunk_size=10000, fields=['id', 'message']): ... for line in qs: ... print(line.message) Note: While Django 2.0 provides chunking [1] via QuerySet.iterator() we can't make use of this while using MySQL which doesn't support streaming results. [1]: https://docs.djangoproject.com/en/2.0/ref/models/querysets/#iterator
5.336995
6.220594
0.857956
if not qs: return qs = qs.order_by('-id') # Can't use .only() here in case the query used select_related max_id = qs.first().id while True: chunk = qs.filter(id__lte=max_id) # upper bound of this chunk rows = chunk[:chunk_size] if len(rows) < 1: break yield rows # update the maximum ID for next iteration max_id = max_id - chunk_size
def chunked_qs_reverse(qs, chunk_size=10000)
Generator to iterate over the given QuerySet in reverse chunk_size rows at a time Usage: >>> qs = FailureLine.objects.filter(action='test_result') >>> for qs in chunked_qs_reverse(qs, chunk_size=100): ... for line in qs: ... print(line.message) Note: This method is just different enough that it seemed easier to keep this function separate to chunked_qs.
4.60999
4.784137
0.963599
job_id = int(request.data['job_id']) bug_id = int(request.data['bug_id']) try: BugJobMap.create( job_id=job_id, bug_id=bug_id, user=request.user, ) message = "Bug job map saved" except IntegrityError: message = "Bug job map skipped: mapping already exists" return Response({"message": message})
def create(self, request, project)
Add a new relation between a job and a bug.
3.308303
2.828607
1.169587
job_id, bug_id = map(int, pk.split("-")) job = Job.objects.get(repository__name=project, id=job_id) BugJobMap.objects.filter(job=job, bug_id=bug_id).delete() return Response({"message": "Bug job map deleted"})
def destroy(self, request, project, pk=None)
Delete bug-job-map entry. pk is a composite key in the form bug_id-job_id
4.376632
2.82737
1.547952
job_id, bug_id = map(int, pk.split("-")) job = Job.objects.get(repository__name=project, id=job_id) try: bug_job_map = BugJobMap.objects.get(job=job, bug_id=bug_id) serializer = BugJobMapSerializer(bug_job_map) return Response(serializer.data) except BugJobMap.DoesNotExist: return Response("Object not found", status=HTTP_404_NOT_FOUND)
def retrieve(self, request, project, pk=None)
Retrieve a bug-job-map entry. pk is a composite key in the form bug_id-job_id
2.747061
2.137282
1.285306
match = match_obj.group(0) code_point = ord(match) hex_repr = hex(code_point) hex_code_point = hex_repr[2:] hex_value = hex_code_point.zfill(6).upper() return '<U+{}>'.format(hex_value)
def convert_unicode_character_to_ascii_repr(match_obj)
Converts a matched pattern from a unicode character to an ASCII representation For example the emoji 🍆 would get converted to the literal <U+01F346>
3.110009
3.170081
0.98105
for repo in Repository.objects.filter(dvcs_type='hg', active_status="active"): fetch_hg_push_log.apply_async( args=(repo.name, repo.url), queue='pushlog' )
def fetch_push_logs()
Run several fetch_hg_push_log subtasks, one per repository
8.205929
5.342367
1.53601
newrelic.agent.add_custom_parameter("repo_name", repo_name) process = HgPushlogProcess() process.run(repo_url + '/json-pushes/?full=1&version=2', repo_name)
def fetch_hg_push_log(repo_name, repo_url)
Run a HgPushlog etl process
8.448538
6.980814
1.210251
newrelic.agent.add_custom_parameter("exchange", exchange) newrelic.agent.add_custom_parameter("routing_key", routing_key) JobLoader().process_job(pulse_job)
def store_pulse_jobs(pulse_job, exchange, routing_key)
Fetches the jobs pending from pulse exchanges and loads them.
3.342905
3.300565
1.012828
newrelic.agent.add_custom_parameter("exchange", exchange) newrelic.agent.add_custom_parameter("routing_key", routing_key) PushLoader().process(body, exchange)
def store_pulse_pushes(body, exchange, routing_key)
Fetches the pushes pending from pulse exchanges and loads them.
3.925619
3.930312
0.998806
logger.debug('Running store_failure_lines for job %s', job_log.job.id) failureline.store_failure_lines(job_log)
def store_failure_lines(job_log)
Store the failure lines from a log corresponding to the structured errorsummary file.
4.533306
4.556267
0.994961
try: serializer = JobNoteSerializer(JobNote.objects.get(id=pk)) return Response(serializer.data) except JobNote.DoesNotExist: return Response("No note with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
def retrieve(self, request, project, pk=None)
GET method implementation for a note detail
2.884963
2.909244
0.991654
job_id = request.query_params.get('job_id') if not job_id: raise ParseError(detail="The job_id parameter is mandatory for this endpoint") try: job_id = int(job_id) except ValueError: raise ParseError(detail="The job_id parameter must be an integer") job = Job.objects.get(repository__name=project, id=job_id) serializer = JobNoteSerializer(JobNote.objects.filter(job=job), many=True) return Response(serializer.data)
def list(self, request, project)
GET method implementation for list view job_id -- Mandatory filter indicating which job these notes belong to.
2.516751
2.156811
1.166885
JobNote.objects.create( job=Job.objects.get(repository__name=project, id=int(request.data['job_id'])), failure_classification_id=int(request.data['failure_classification_id']), user=request.user, text=request.data.get('text', '')) return Response( {'message': 'note stored for job {0}'.format( request.data['job_id'] )} )
def create(self, request, project)
POST method implementation
4.223063
4.154098
1.016602
try: note = JobNote.objects.get(id=pk) note.delete() return Response({"message": "Note deleted"}) except JobNote.DoesNotExist: return Response("No note with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
def destroy(self, request, project, pk=None)
Delete a note entry
2.92039
2.704852
1.079686
# parse a log given its url artifact_bc = ArtifactBuilderCollection(job_log.url) artifact_bc.parse() artifact_list = [] for name, artifact in artifact_bc.artifacts.items(): artifact_list.append({ "job_guid": job_log.job.guid, "name": name, "type": 'json', "blob": json.dumps(artifact) }) return artifact_list
def extract_text_log_artifacts(job_log)
Generate a set of artifacts by parsing from the raw text log.
4.844983
4.708055
1.029084
logger.debug("Downloading/parsing log for log %s", job_log.id) try: artifact_list = extract_text_log_artifacts(job_log) except LogSizeException as e: job_log.update_status(JobLog.SKIPPED_SIZE) logger.warning('Skipping parsing log for %s: %s', job_log.id, e) return except Exception as e: job_log.update_status(JobLog.FAILED) # Unrecoverable http error (doesn't exist or permission denied). # Apparently this can happen somewhat often with taskcluster if # the job fails (bug 1154248), so just warn rather than raising, # to prevent the noise/load from retrying. if isinstance(e, HTTPError) and e.response.status_code in (403, 404): logger.warning("Unable to retrieve log for %s: %s", job_log.id, e) return logger.error("Failed to download/parse log for %s: %s", job_log.id, e) raise try: serialized_artifacts = serialize_artifact_json_blobs(artifact_list) store_job_artifacts(serialized_artifacts) job_log.update_status(JobLog.PARSED) logger.debug("Stored artifact for %s %s", job_log.job.repository.name, job_log.job.id) except Exception as e: logger.error("Failed to store parsed artifact for %s: %s", job_log.id, e) raise
def post_log_artifacts(job_log)
Post a list of artifacts to a job.
3.957648
3.973348
0.996049
cache_key = 'error-summary-{}'.format(job.id) cached_error_summary = cache.get(cache_key) if cached_error_summary is not None: return cached_error_summary # don't cache or do anything if we have no text log errors to get # results for errors = TextLogError.objects.filter(step__job=job) if not errors: return [] # cache terms generated from error line to save excessive querying term_cache = {} error_summary = [bug_suggestions_line(err, term_cache) for err in errors] cache.set(cache_key, error_summary, BUG_SUGGESTION_CACHE_TIMEOUT) return error_summary
def get_error_summary(job)
Create a list of bug suggestions for a job. Caches the results if there are any.
5.131773
4.644391
1.10494
if term_cache is None: term_cache = {} # remove the mozharness prefix clean_line = get_mozharness_substring(err.line) # get a meaningful search term out of the error line search_term = get_error_search_term(clean_line) bugs = dict(open_recent=[], all_others=[]) # collect open recent and all other bugs suggestions search_terms = [] if search_term: search_terms.append(search_term) if search_term not in term_cache: term_cache[search_term] = Bugscache.search(search_term) bugs = term_cache[search_term] if not bugs or not (bugs['open_recent'] or bugs['all_others']): # no suggestions, try to use # the crash signature as search term crash_signature = get_crash_signature(clean_line) if crash_signature: search_terms.append(crash_signature) if crash_signature not in term_cache: term_cache[crash_signature] = Bugscache.search(crash_signature) bugs = term_cache[crash_signature] # TODO: Rename 'search' to 'error_text' or similar, since that's # closer to what it actually represents (bug 1091060). return { "search": clean_line, "search_terms": search_terms, "bugs": bugs, }
def bug_suggestions_line(err, term_cache=None)
Get Bug suggestions for a given TextLogError (err). Tries to extract a search term from a clean version of the given error's line. We build a search term from the cleaned line and use that to search for bugs. Returns a dictionary with the cleaned line, the generated search term, and any bugs found with said search term.
4.065295
3.872999
1.04965
if not error_line: return None # This is strongly inspired by # https://hg.mozilla.org/webtools/tbpl/file/tip/php/inc/AnnotatedSummaryGenerator.php#l73 tokens = error_line.split(" | ") search_term = None if len(tokens) >= 3: # If this is process output then discard the token with the PID if len(tokens) > 3 and OUTPUT_RE.match(tokens[0]): tokens = tokens[1:] # it's in the "FAILURE-TYPE | testNameOrFilePath | message" type format. test_name_or_path = tokens[1] message = tokens[2] # Leak failure messages are of the form: # leakcheck | .*\d+ bytes leaked (Object-1, Object-2, Object-3, ...) match = LEAK_RE.search(message) if match: search_term = match.group(1) if match.group(1) is not None else match.group(2) else: # For reftests, remove the reference path from the tokens as this is # not very unique test_name_or_path = REFTEST_RE.sub("", test_name_or_path) for splitter in ("/", "\\"): # if this is a path, we are interested in the last part test_name_or_path = test_name_or_path.split(splitter)[-1] search_term = test_name_or_path # If the failure line was not in the pipe symbol delimited format or the search term # will likely return too many (or irrelevant) results (eg: too short or matches terms # on the blacklist), then we fall back to searching for the entire failure line if # it is suitable. if not (search_term and is_helpful_search_term(search_term)): if is_helpful_search_term(error_line): search_term = error_line else: search_term = None # Searching for extremely long search terms is undesirable, since: # a) Bugzilla's max summary length is 256 characters, and once "Intermittent " # and platform/suite information is prefixed, there are even fewer characters # left for us to use for the failure string against which we need to match. # b) For long search terms, the additional length does little to prevent against # false positives, but means we're more susceptible to false negatives due to # run-to-run variances in the error messages (eg paths, process IDs). if search_term: search_term = search_term[:100] return search_term
def get_error_search_term(error_line)
Generate a search term from the given error_line string. Attempt to build a search term that will yield meaningful results when used in a MySQL FTS query.
7.588298
7.559613
1.003795
search_term = None match = CRASH_RE.match(error_line) if match and is_helpful_search_term(match.group(1)): search_term = match.group(1) return search_term
def get_crash_signature(error_line)
Try to get a crash signature from the given error_line string.
3.860795
3.921077
0.984626
# Search terms that will match too many bug summaries # and so not result in useful suggestions. search_term = search_term.strip() blacklist = [ 'automation.py', 'remoteautomation.py', 'Shutdown', 'undefined', 'Main app process exited normally', 'Traceback (most recent call last):', 'Return code: 0', 'Return code: 1', 'Return code: 2', 'Return code: 9', 'Return code: 10', 'mozalloc_abort(char const*)', 'mozalloc_abort', 'Exiting 1', 'Exiting 9', 'CrashingThread(void *)', 'libSystem.B.dylib + 0xd7a', 'linux-gate.so + 0x424', 'TypeError: content is null', 'leakcheck', 'ImportError: No module named pygtk', '# TBPL FAILURE #' ] return len(search_term) > 4 and search_term not in blacklist
def is_helpful_search_term(search_term)
Decide if the given search_term string is helpful or not. We define "helpful" here as search terms that won't match an excessive number of bug summaries. Very short terms and those matching generic strings (listed in the blacklist) are deemed unhelpful since they wouldn't result in useful suggestions.
9.508535
8.292622
1.146626
for match in matches: # generate a new score from the current match dividend, divisor = score_multiplier score = match.score * dividend / divisor yield (score, match.classified_failure_id)
def score_matches(matches, score_multiplier=(1, 1))
Get scores for the given matches. Given a QuerySet of TextLogErrorMatches produce a score for each one until Good Enough™. An optional score multiplier can be passed in.
9.281891
8.116714
1.143553
time_budget = time_budget / 1000 # budget in milliseconds start = time.time() for thing in iterable: yield func(thing, *args) end = time.time() - start if end > time_budget: # Putting the condition at the end of the loop ensures that we # always run it once, which is useful for testing return
def time_boxed(func, iterable, time_budget, *args)
Apply a function to the items of an iterable within a given time budget. Loop the given iterable, calling the given function on each item. The expended time is compared to the given time budget after each iteration.
5.699295
5.83808
0.976227
'''This structure helps with finding data from the job priorities table''' jp_index = {} # Creating this data structure which reduces how many times we iterate through the DB rows for jp in job_priorities: key = jp.unique_identifier() # This is guaranteed by a unique composite index for these 3 fields in models.py if key in jp_index: msg = '"{}" should be a unique job priority and that is unexpected.'.format(key) raise DuplicateKeyError(msg) # (testtype, buildtype, platform) jp_index[key] = {'pk': jp.id, 'build_system_type': jp.buildsystem} return jp_index
def job_priority_index(job_priorities)
This structure helps with finding data from the job priorities table
11.97682
9.753647
1.227933
job_guid = pulse_job["taskId"] x = { "job": { "job_guid": job_guid, "name": pulse_job["display"].get("jobName", "unknown"), "job_symbol": self._get_job_symbol(pulse_job), "group_name": pulse_job["display"].get("groupName", "unknown"), "group_symbol": pulse_job["display"].get("groupSymbol"), "product_name": pulse_job.get("productName", "unknown"), "state": pulse_job["state"], "result": self._get_result(pulse_job), "reason": pulse_job.get("reason", "unknown"), "who": pulse_job.get("owner", "unknown"), "build_system_type": pulse_job["buildSystem"], "tier": pulse_job.get("tier", 1), "machine": self._get_machine(pulse_job), "option_collection": self._get_option_collection(pulse_job), "log_references": self._get_log_references(pulse_job), "artifacts": self._get_artifacts(pulse_job, job_guid), }, "superseded": pulse_job.get("coalesced", []), "revision": pulse_job["origin"]["revision"] } # some or all the time fields may not be present in some cases for k, v in self.TIME_FIELD_MAP.items(): if v in pulse_job: x["job"][k] = to_timestamp(pulse_job[v]) # if only one platform is given, use it. default_platform = pulse_job.get( "buildMachine", pulse_job.get("runMachine", {})) for k, v in self.PLATFORM_FIELD_MAP.items(): platform_src = pulse_job[v] if v in pulse_job else default_platform x["job"][k] = self._get_platform(platform_src) # add some taskcluster metadata if it's available # currently taskcluster doesn't pass the taskId directly, so we'll # derive it from the guid, where it is stored in uncompressed # guid form of a slug (see: https://github.com/taskcluster/slugid) # FIXME: add support for processing the taskcluster information # properly, when it's available: # https://bugzilla.mozilla.org/show_bug.cgi?id=1323110#c7 try: (decoded_task_id, retry_id) = job_guid.split('/') # As of slugid v2, slugid.encode() returns a string not bytestring under Python 3. real_task_id = slugid.encode(uuid.UUID(decoded_task_id)) x["job"].update({ "taskcluster_task_id": real_task_id, "taskcluster_retry_id": int(retry_id) }) # TODO: Figure out what exception types we actually expect here. except Exception: pass return x
def transform(self, pulse_job)
Transform a pulse job into a job that can be written to disk. Log References and artifacts will also be transformed and loaded with the job. We can rely on the structure of ``pulse_job`` because it will already have been validated against the JSON Schema at this point.
4.276568
4.23673
1.009403
try: push = Push.objects.get(repository__name=project, id=pk) serializer = PushSerializer(push) return Response(serializer.data) except Push.DoesNotExist: return Response("No push with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
def retrieve(self, request, project, pk=None)
GET method implementation for detail view of ``push``
2.646844
2.502431
1.057709
try: push = Push.objects.get(id=pk) except Push.DoesNotExist: return Response("No push with id: {0}".format(pk), status=HTTP_404_NOT_FOUND) return Response(push.get_status())
def status(self, request, project, pk=None)
Return a count of the jobs belonging to this push grouped by job status.
2.855919
2.447019
1.167101
revision = request.query_params.get('revision') try: push = Push.objects.get(revision=revision, repository__name=project) except Push.DoesNotExist: return Response("No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND) push_health_test_failures = get_push_health_test_failures(push, REPO_GROUPS['trunk']) test_result = 'fail' if len(push_health_test_failures['needInvestigation']) else 'pass' return Response({ 'revision': revision, 'id': push.id, 'result': test_result, 'metrics': [ { 'name': 'Tests', 'result': test_result, 'failures': push_health_test_failures, }, { 'name': 'Builds (Not yet implemented)', 'result': 'pass', 'details': ['Wow, everything passed!'], }, { 'name': 'Linting (Not yet implemented)', 'result': 'pass', 'details': ['Gosh, this code is really nicely formatted.'], }, { 'name': 'Coverage (Not yet implemented)', 'result': 'pass', 'details': [ 'Covered 42% of the tests that are needed for feature ``foo``.', 'Covered 100% of the tests that are needed for feature ``bar``.', 'The ratio of people to cake is too many...', ], }, { 'name': 'Performance (Not yet implemented)', 'result': 'pass', 'details': ['Ludicrous Speed'], }, ], })
def health(self, request, project)
Return a calculated assessment of the health of this push.
4.030907
3.848023
1.047527
push_ids = request.query_params.getlist('push_ids') job_type = JobType.objects.get(name='Gecko Decision Task') decision_jobs = Job.objects.filter( push_id__in=push_ids, job_type=job_type ).select_related('taskcluster_metadata') if decision_jobs: return Response( {job.push_id: job.taskcluster_metadata.task_id for job in decision_jobs} ) else: return Response("No decision tasks found for pushes: {}".format(push_ids), status=HTTP_404_NOT_FOUND)
def decisiontask(self, request, project)
Return the decision task ids for the pushes.
3.549741
3.118267
1.13837
filename = '{0}.js'.format(language_code) path = os.path.join('tinymce', 'js', 'tinymce', 'langs', filename) return finders.find(path) is not None
def language_file_exists(language_code)
Check if TinyMCE has a language file for the specified lang code :param language_code: language code :type language_code: str :return: check result :rtype: bool
4.555304
4.431204
1.028006
language_code = convert_language_code(get_language() or settings.LANGUAGE_CODE) if not language_file_exists(language_code): language_code = language_code[:2] if not language_file_exists(language_code): # Fall back to English if Tiny MCE 4 does not have required translation language_code = 'en' config = {'language': language_code} if get_language_bidi(): config['directionality'] = 'rtl' else: config['directionality'] = 'ltr' return config
def get_language_config()
Creates a language configuration for TinyMCE4 based on Django project settings :return: language- and locale-related parameters for TinyMCE 4 :rtype: dict
3.234429
3.272619
0.98833
config = {} if mce_settings.USE_SPELLCHECKER: from enchant import list_languages enchant_languages = list_languages() if settings.DEBUG: logger.info('Enchant languages: {0}'.format(enchant_languages)) lang_names = [] for lang, name in settings.LANGUAGES: lang = convert_language_code(lang) if lang not in enchant_languages: lang = lang[:2] if lang not in enchant_languages: logger.warning('Missing {0} spellchecker dictionary!'.format(lang)) continue if config.get('spellchecker_language') is None: config['spellchecker_language'] = lang lang_names.append('{0}={1}'.format(name, lang)) config['spellchecker_languages'] = ','.join(lang_names) return config
def get_spellcheck_config()
Create TinyMCE spellchecker config based on Django settings :return: spellchecker parameters for TinyMCE :rtype: dict
2.952448
2.994308
0.98602
lang_and_country = django_lang.split('-') try: return '_'.join((lang_and_country[0], lang_and_country[1].upper())) except IndexError: return lang_and_country[0]
def convert_language_code(django_lang)
Converts Django language codes "ll-cc" into ISO codes "ll_CC" or "ll" :param django_lang: Django language code as ll-cc :type django_lang: str :return: ISO language code as ll_CC :rtype: str
2.638319
2.943265
0.896392
if mce_settings.USE_FILEBROWSER and 'file_browser_callback' not in callbacks: callbacks['file_browser_callback'] = 'djangoFileBrowser' if mce_settings.USE_SPELLCHECKER and 'spellchecker_callback' not in callbacks: callbacks['spellchecker_callback'] = 'tinymce4_spellcheck' if id_: mce_config['selector'] = mce_config.get('selector', 'textarea') + '#{0}'.format(id_) config = json.dumps(mce_config, cls=DjangoJSONEncoder)[1:-1] return render_to_string('tinymce/tinymce_init.js', context={'callbacks': callbacks, 'tinymce_config': config, 'is_admin_inline': '__prefix__' in id_})
def render_tinymce_init_js(mce_config, callbacks, id_)
Renders TinyMCE.init() JavaScript code :param mce_config: TinyMCE 4 configuration :type mce_config: dict :param callbacks: TinyMCE callbacks :type callbacks: dict :param id_: HTML element's ID to which TinyMCE is attached. :type id_: str :return: TinyMCE.init() code :rtype: str
3.217269
3.322925
0.968204
for item in sys.argv: if re.search(r'manage.py|django-admin|django', item) is not None: return True return False
def is_managed()
Check if a Django project is being managed with ``manage.py`` or ``django-admin`` scripts :return: Check result :rtype: bool
5.649064
4.732367
1.193708
data = json.loads(request.body.decode('utf-8')) output = {'id': data['id']} error = None status = 200 try: if data['params']['lang'] not in list_languages(): error = 'Missing {0} dictionary!'.format(data['params']['lang']) raise LookupError(error) spell_checker = checker.SpellChecker(data['params']['lang']) spell_checker.set_text(strip_tags(data['params']['text'])) output['result'] = {spell_checker.word: spell_checker.suggest() for err in spell_checker} except NameError: error = 'The pyenchant package is not installed!' logger.exception(error) except LookupError: logger.exception(error) except Exception: error = 'Unknown error!' logger.exception(error) if error is not None: output['error'] = error status = 500 return JsonResponse(output, status=status)
def spell_check(request)
Implements the TinyMCE 4 spellchecker protocol :param request: Django http request with JSON-RPC payload from TinyMCE 4 containing a language code and a text to check for errors. :type request: django.http.request.HttpRequest :return: Django http response containing JSON-RPC payload with spellcheck results for TinyMCE 4 :rtype: django.http.JsonResponse
3.051659
2.979917
1.024075
if 'grappelli' in settings.INSTALLED_APPS: margin_left = 0 elif VERSION[:2] <= (1, 8): margin_left = 110 # For old style admin else: margin_left = 170 # For Django >= 1.9 style admin # For Django >= 2.0 responsive admin responsive_admin = VERSION[:2] >= (2, 0) return HttpResponse(render_to_string('tinymce/tinymce4.css', context={ 'margin_left': margin_left, 'responsive_admin': responsive_admin }, request=request), content_type='text/css; charset=utf-8')
def css(request)
Custom CSS for TinyMCE 4 widget By default it fixes widget's position in Django Admin :param request: Django http request :type request: django.http.request.HttpRequest :return: Django http response with CSS file for TinyMCE 4 :rtype: django.http.HttpResponse
3.667949
3.464633
1.058683
try: fb_url = reverse('fb_browse') except: fb_url = reverse('filebrowser:fb_browse') return HttpResponse(jsmin(render_to_string('tinymce/filebrowser.js', context={'fb_url': fb_url}, request=request)), content_type='application/javascript; charset=utf-8')
def filebrowser(request)
JavaScript callback function for `django-filebrowser`_ :param request: Django http request :type request: django.http.request.HttpRequest :return: Django http response with filebrowser JavaScript code for for TinyMCE 4 :rtype: django.http.HttpResponse .. _django-filebrowser: https://github.com/sehmaschine/django-filebrowser
3.380924
3.1104
1.086974
err = "glaad.offensive_terms" msg = "Offensive term. Remove it or consider the context." list = [ "fag", "faggot", "dyke", "sodomite", "homosexual agenda", "gay agenda", "transvestite", "homosexual lifestyle", "gay lifestyle" # homo - may create false positives without additional context # FIXME use topic detetor to decide whether "homo" is offensive ] return existence_check(text, list, err, msg, join=True, ignore_case=False)
def check(text)
Flag offensive words based on the GLAAD reference guide.
13.461788
11.319368
1.18927
tp = 0 fp = 0 parent_directory = os.path.dirname(proselint_path) path_to_corpus = os.path.join(parent_directory, "corpora", "0.1.0") for root, _, files in os.walk(path_to_corpus): files = [f for f in files if f.endswith(".md")] for f in files: fullpath = os.path.join(root, f) # Run the linter. print("Linting {}".format(f)) out = subprocess.check_output(["proselint", fullpath]) # Determine the number of errors. regex = r".+?:(?P<line>\d+):(?P<col>\d+): (?P<message>.+)" num_errors = len(tuple(re.finditer(regex, out))) print("Found {} errors.".format(num_errors)) # Open the document. subprocess.call(["open", fullpath]) # Ask the scorer how many of the errors were false alarms? input_val = None while not isinstance(input_val, int): try: input_val = input("# of false alarms? ") if input_val == "exit": return else: input_val = int(input_val) fp += input_val tp += (num_errors - input_val) except ValueError: pass print("Currently {} hits and {} false alarms\n---".format(tp, fp)) if (tp + fp) > 0: return tp * (1.0 * tp / (tp + fp)) ** 2 else: return 0
def score(check=None)
Compute the linter's score on the corpus. Proselint's score reflects the desire to have a linter that catches many errors, but which takes false alarms seriously. It is better not to say something than to say the wrong thing, and the harm from saying the wrong thing is greater than the benefit of saying the right thing. Thus our score metric is defined as: TP * (TP / (FP + TP)) ^ k, where TP is the number of true positives (hits), FP is the number of false positives (false alarms), and k > 0 is a temperature parameter that determines the penalty for imprecision. In general, we should choose a large value of k, one that strongly discourages the creation of rules that can't be trusted. Suppose that k = 2. Then if the linter detects 100 errors, of which 10 are false positives, the score is 81.
3.355248
3.179037
1.055429
err = "oxford.venery_terms" msg = "The venery term is '{}'." term_list = [ ["alligators", "congregation"], ["antelopes", "herd"], ["baboons", "troop"], ["badgers", "cete"], ["bats", "colony"], ["bears", "sloth"], ["buffalo", "herd"], ["bullfinches", "bellowing"], ["caribou", "herd"], ["cats", "glaring"], ["caterpillars", "army"], ["cockroaches", "intrusion"], ["coyotes", "pack"], ["crows", "murder"], ["dogs", "pack"], ["eagles", "convocation"], ["emus", "mob"], ["flamingos", "stand"], ["frogs", "army"], ["goldfinches", "charm"], ["gorillas", "band"], ["guineafowl", "rasp"], ["hedgehogs", "array"], ["herons", "siege"], ["hogs", "parcel"], ["hyenas", "cackle"], ["ibex", "herd"], ["iguanas", "mess"], ["lions", "pride"], ["locusts", "plague"], ["mackerel", "shoal"], ["mares", "stud"], ["minnows", "shoal"], ["moose", "herd"], ["mosquitoes", "scourge"], ["nightingales", "watch"], ["oysters", "bed"], ["partridges", "covey"], ["pelicans", "pod"], ["raccoons", "gaze"], ["ravens", "unkindness"], ["rhinoceroses", "crash"], ["sea urchins", "sea"], ["starlings", "murmuration"], ["toads", "knot"], ["wombats", "wisdom"], ["woodcocks", "fall"], ["woodpeckers", "descent"], ["wrens", "herd"], ] generic_terms = [ "group", "bunch", ] list = [] for term_pair in term_list: for generic in generic_terms: wrong = "a {} of {}".format(generic, term_pair[0]) right = "a {} of {}".format(term_pair[1], term_pair[0]) list += [[right, [wrong]]] return preferred_forms_check(text, list, err, msg)
def check(text)
Check the text.
4.668755
4.671267
0.999462
err = "links.valid" msg = u"Broken link: {}" regex = re.compile( r, re.U | re.X) errors = [] for m in re.finditer(regex, text): url = m.group(0).strip() if "http://" not in url and "https://" not in url: url = "http://" + url if is_broken_link(url): errors.append((m.start(), m.end(), err, msg.format(url), None)) return errors
def check(text)
Check the text.
3.838065
3.977308
0.964991
try: request = urllib_request.Request( url, headers={'User-Agent': 'Mozilla/5.0'}) urllib_request.urlopen(request).read() return False except urllib_request.URLError: return True except SocketError: return True
def is_broken_link(url)
Determine whether the link returns a 404 error.
2.516671
2.455321
1.024987
err = "security.credit_card" msg = u"Don't put credit card numbers in plain text." credit_card_numbers = [ "4\d{15}", "5[1-5]\d{14}", "3[4,7]\d{13}", "3[0,6,8]\d{12}", "6011\d{12}", ] return existence_check(text, credit_card_numbers, err, msg)
def check(text)
Check the text.
4.289152
4.239241
1.011774
err = "misc.annotations" msg = u"Annotation left in text." annotations = [ "FIXME", "FIX ME", "TODO", "todo", "ERASE THIS", "FIX THIS", ] return existence_check( text, annotations, err, msg, ignore_case=False, join=True)
def check(text)
Check the text.
10.978914
11.335146
0.968573
err = "misc.suddenly" msg = u"Suddenly is nondescript, slows the action, and warns your reader." regex = "Suddenly," return existence_check(text, [regex], err, msg, max_errors=3, require_padding=False, offset=-1, ignore_case=False)
def check(text)
Advice on sudden vs suddenly.
22.165125
19.893539
1.114187
err = "weasel_words.very" msg = ("Substitute 'damn' every time you're " "inclined to write 'very'; your editor will delete it " "and the writing will be just as it should be.") regex = "very" return existence_check(text, [regex], err, msg, max_errors=1)
def check(text)
Avoid 'very'.
23.986689
20.770266
1.154857
err = "psychology.p_equals_zero" msg = "Unless p really equals zero, you should use more decimal places." list = [ "p = 0.00", "p = 0.000", "p = 0.0000", ] return existence_check(text, list, err, msg, join=True)
def check_p_equals_zero(text)
Check for p = 0.000.
7.265971
6.642661
1.093834
err = "garner.phrasal_adjectives.ly" msg = u regex = "\s[^\s-]+ly-" return existence_check(text, [regex], err, msg, require_padding=False, offset=-1)
def check_ly(text)
Check the text.
36.062756
35.962135
1.002798
auth = request.authorization if not auth or not check_auth(auth.username, auth.password): return "60/minute" else: return "600/minute"
def rate()
Set rate limits for authenticated and nonauthenticated users.
4.877757
4.391913
1.110622
if 'text' in request.values: text = unquote(request.values['text']) job = q.enqueue(worker_function, text) return jsonify(job_id=job.id), 202 elif 'job_id' in request.values: job = q.fetch_job(request.values['job_id']) if not job: return jsonify( status="error", message="No job with requested job_id."), 404 elif job.result is None: return jsonify( status="error", message="Job is not yet ready."), 202 else: errors = [] for i, e in enumerate(job.result): app.logger.debug(e) errors.append({ "check": e[0], "message": e[1], "line": e[2], "column": e[3], "start": e[4], "end": e[5], "extent": e[5] - e[4], "severity": e[7], "replacements": e[8], "source_name": "", "source_url": "", }) return jsonify( status="success", data={"errors": errors})
def lint()
Run linter on the provided text and return the results.
2.742808
2.663967
1.029596
err = "MAU102" msg = "Months should be capitalized. '{}' is the preferred form." list = [ ["January", ["january"]], ["February", ["february"]], # ["March", ["march"]], ["April", ["april"]], # ["May", ["may"]], ["June", ["june"]], ["July", ["july"]], ["August", ["august"]], ["September", ["september"]], ["October", ["october"]], ["November", ["november"]], ["December", ["december"]], ] return preferred_forms_check(text, list, err, msg, ignore_case=False)
def check_months(text)
Suggest the preferred forms.
3.162834
2.734136
1.156795
err = "MAU102" msg = "Days of the week should be capitalized. '{}' is the preferred form." list = [ ["Monday", ["monday"]], ["Tuesday", ["tuesday"]], ["Wednesday", ["wednesday"]], ["Thursday", ["thursday"]], ["Friday", ["friday"]], ["Saturday", ["saturday"]], ["Sunday", ["sunday"]], ] return preferred_forms_check(text, list, err, msg, ignore_case=False)
def check_days(text)
Suggest the preferred forms.
4.426268
3.5859
1.234353
err = "skunked_terms.misc" msg = u skunked_terms = [ "bona fides", "deceptively", "decimate", "effete", "fulsome", "hopefully", "impassionate", "Thankfully,", ] return existence_check(text, skunked_terms, err, msg)
def check(text)
Check the text.
15.694012
15.503902
1.012262
err = "misc.illogic" msg = u"'{}' is illogical." illogics = [ "preplan", "more than .{1,10} all", "appraisal valuations?", "(?:i|you|he|she|it|y'all|all y'all|you all|they) could care less", "least worst", "much-needed gaps?", "much-needed voids?", "no longer requires oxygen", "without scarcely", ] return existence_check(text, illogics, err, msg, offset=1)
def check(text)
Check the text.
19.28051
19.229511
1.002652
err = "misc.illogic.coin" msg = "You can't coin an existing phrase. Did you mean 'borrow'?" regex = "to coin a phrase from" return existence_check(text, [regex], err, msg, offset=1)
def check_coin_a_phrase_from(text)
Check the text.
20.879539
21.50918
0.970727
err = "misc.illogic.collusion" msg = "It's impossible to defraud yourself. Try 'aquiescence'." regex = "without your collusion" return existence_check( text, [regex], err, msg, require_padding=False, offset=-1)
def check_without_your_collusion(text)
Check the textself.
23.977152
24.857704
0.964576
err = "leonard.exclamation.multiple" msg = u"Stop yelling. Keep your exclamation points under control." regex = r"[\!]\s*?[\!]{1,}" return existence_check( text, [regex], err, msg, require_padding=False, ignore_case=False, max_errors=1, dotall=True)
def check_repeated_exclamations(text)
Check the text.
15.493361
15.64305
0.990431
err = "leonard.exclamation.30ppm" msg = u"More than 30 ppm of exclamations. Keep them under control." regex = r"\w!" count = len(re.findall(regex, text)) num_words = len(text.split(" ")) ppm = (count*1.0 / num_words) * 1e6 if ppm > 30 and count > 1: loc = re.search(regex, text).start() + 1 return [(loc, loc+1, err, msg, ".")] else: return []
def check_exclamations_ppm(text)
Make sure that the exclamation ppm is under 30.
5.85971
5.650705
1.036987
err = "strunk_white.composition" msg = "Try '{}' instead of '{}'." bad_forms = [ # Put statements in positive form ["dishonest", ["not honest"]], ["trifling", ["not important"]], ["forgot", ["did not remember"]], ["ignored", ["did not pay (any )?attention to"]], ["distrusted", ["did not have much confidence in"]], # Omit needless words ["whether", ["the question as to whether"]], ["no doubt", ["there is no doubt but that"]], ["used for fuel", ["used for fuel purposes"]], ["he", ["he is a man who"]], ["hastily", ["in a hasty manner"]], ["this subject", ["this is a subject that"]], ["Her story is strange.", ["Her story is a strange one."]], ["because", ["the reason why is that"]], ["because / since", ["owing to the fact that"]], ["although / though", ["in spite of the fact that"]], ["remind you / notify you", ["call your attention to the fact that"]], ["I did not know that / I was unaware that", ["I was unaware of the fact that"]], ["his failure", ["the fact that he had not succeeded"]], ["my arrival", ["the fact that i had arrived"]] ] return preferred_forms_check(text, bad_forms, err, msg)
def check(text)
Suggest the preferred forms.
11.244614
10.978909
1.024201
err = "consistency.spelling" msg = "Inconsistent spelling of '{}' (vs. '{}')." word_pairs = [ ["advisor", "adviser"], # ["analyse", "analyze"], ["centre", "center"], ["colour", "color"], ["emphasise", "emphasize"], ["finalise", "finalize"], ["focussed", "focused"], ["labour", "labor"], ["learnt", "learned"], ["organise", "organize"], ["organised", "organized"], ["organising", "organizing"], ["recognise", "recognize"], ] return consistency_check(text, word_pairs, err, msg)
def check(text)
Check the text.
4.784875
4.794759
0.997939
err = "uncomparables.misc" msg = "Comparison of an uncomparable: '{}' is not comparable." comparators = [ "most", "more", "less", "least", "very", "quite", "largely", "extremely", "increasingly", "kind of", "mildly" ] uncomparables = [ "absolute", "adequate", "chief", "complete", "correct", "devoid", "entire", "false", "fatal", "favorite", "final", "ideal", "impossible", "inevitable", "infinite", "irrevocable", "main", "manifest", "only", "paramount", "perfect", "perpetual", "possible", "preferable", "principal", "singular", "stationary", "sufficient", "true", "unanimous", "unavoidable", "unbroken", "uniform", "unique", "universal", "void", "whole", ] exceptions = [ ("more", "perfect"), ("more", "possible") # FIXME ] all = ["\\b" + i[0] + "\s" + i[1] + "[\W$]" for i in itertools.product( comparators, uncomparables) if i not in exceptions] occ = re.finditer("|".join(all), text.lower()) return [(o.start(), o.end(), err, msg.format(o.group(0)), None) for o in occ]
def check(text)
Check the text.
4.825146
4.823952
1.000248
err = "consistency.spacing" msg = "Inconsistent spacing after period (1 vs. 2 spaces)." regex = ["[\.\?!] [A-Z]", "[\.\?!] [A-Z]"] return consistency_check(text, [regex], err, msg)
def check(text)
Check the text.
14.491836
14.554904
0.995667
err = "pinker.latin" msg = "Use English. '{}' is the preferred form." list = [ ["other things being equal", ["ceteris paribus"]], ["among other things", ["inter alia"]], ["in and of itself", ["simpliciter"]], ["having made the necessary changes", ["mutatis mutandis"]], ] return preferred_forms_check(text, list, err, msg)
def check(text)
Suggest the preferred forms.
21.874271
19.782476
1.10574
err = "hyperbolic.misc" msg = u"'{}' is hyperbolic." words = [ "[a-z]*[!]{2,}", "[a-z]*\?{2,}" ] return existence_check(text, words, err, msg)
def check(text)
Check the text.
11.002844
11.453397
0.960662
err = "glaad.terms" msg = "Possibly offensive term. Consider using '{}' instead of '{}'." list = [ ["gay man", ["homosexual man"]], ["gay men", ["homosexual men"]], ["lesbian", ["homosexual woman"]], ["lesbians", ["homosexual women"]], ["gay people", ["homosexual people"]], ["gay couple", ["homosexual couple"]], ["sexual orientation", ["sexual preference"]], ["openly gay", ["admitted homosexual", "avowed homosexual"]], ["equal rights", ["special rights"]] ] return preferred_forms_check(text, list, err, msg, ignore_case=False)
def check(text)
Suggest preferred forms given the reference document.
6.472817
6.089002
1.063034
err = "MSC104" msg = u"Don't fail to capitalize roman numeral abbreviations." pwd_regex = " (I(i*)|i*)" password = [ "World War{}".format(pwd_regex), ] return blacklist(text, password, err, msg)
def check(text)
Check the text.
33.673737
35.320377
0.95338
for path, _, files in os.walk(os.getcwd()): for fname in [f for f in files if os.path.splitext(f)[1] == ".pyc"]: try: os.remove(os.path.join(path, fname)) except OSError: pass
def _delete_compiled_python_files()
Remove files with a 'pyc' extension.
2.24698
1.930749
1.163787
if output_json: click.echo(errors_to_json(errors)) else: for error in errors: (check, message, line, column, start, end, extent, severity, replacements) = error if compact: filename = "-" click.echo( filename + ":" + str(1 + line) + ":" + str(1 + column) + ": " + check + " " + message)
def print_errors(filename, errors, output_json=False, compact=False)
Print the errors, resulting from lint, for filename.
4.500093
4.214088
1.067869
if time: click.echo(timing_test()) return # In debug or clean mode, delete cache & *.pyc files before running. if debug or clean: clear_cache() # Use the demo file by default. if demo: paths = [demo_file] # Expand the list of directories and files. filepaths = extract_files(list(paths)) # Lint the files num_errors = 0 # Use stdin if no paths were specified if len(paths) == 0: filepaths.append('-') for fp in filepaths: try: if fp == '-': fp = '<stdin>' f = sys.stdin else: f = click.open_file( fp, 'r', encoding="utf-8", errors="replace") errors = lint(f, debug=debug) num_errors += len(errors) print_errors(fp, errors, output_json, compact=compact) except Exception: traceback.print_exc() # Return an exit code close_cache_shelves() if num_errors > 0: sys.exit(1) else: sys.exit(0)
def proselint(paths=None, version=None, clean=None, debug=None, output_json=None, time=None, demo=None, compact=None)
A CLI for proselint, a linter for prose.
4.045667
4.08882
0.989446
expanded_files = [] legal_extensions = [".md", ".txt", ".rtf", ".html", ".tex", ".markdown"] for f in files: # If it's a directory, recursively walk through it and find the files. if os.path.isdir(f): for dir_, _, filenames in os.walk(f): for filename in filenames: fn, file_extension = os.path.splitext(filename) if file_extension in legal_extensions: joined_file = os.path.join(dir_, filename) expanded_files.append(joined_file) # Otherwise add the file directly. else: expanded_files.append(f) return expanded_files
def extract_files(files)
Expand list of paths to include all text files matching the pattern.
2.663642
2.511143
1.060729
err = "misc.not_guilty" msg = u"'not guilty beyond a reasonable doubt' is an ambiguous phrasing." regex = r"not guilty beyond (a |any )?reasonable doubt" return existence_check(text, [regex], err, msg)
def check(text)
Check the text.
15.047781
15.589814
0.965232
err = "hedging.misc" msg = "Hedging. Just say it." narcissism = [ "I would argue that", ", so to speak", "to a certain degree", ] return existence_check(text, narcissism, err, msg)
def check(text)
Suggest the preferred forms.
20.015615
19.332626
1.035328
err = "misc.tense_present" msg = u"'{}'." illogics = [ u"up to \d{1,3}% ?[-\u2014\u2013]{0,3} ?(?:or|and) more\W?", "between you and I", "on accident", "somewhat of a", "all it's own", "reason is because", "audible to the ear", "in regards to", "would of", # "and so", "i ?(?:feel|am feeling|am|'m|'m feeling) nauseous", ] errors = [] for i in illogics: for m in re.finditer(u"\s{}\s".format(i), text, flags=re.U | re.I): txt = m.group(0).strip() errors.append(( m.start() + 1, m.end(), err, msg.format(txt), None)) return errors
def check(text)
Check the text.
10.480495
10.619309
0.986928
err = "terms.denizen_labels.garner" msg = "'{}' is the preferred denizen label." preferences = [ ["Afrikaner", ["Afrikaaner"]], ["Afrikaner", ["Afrikander"]], ["Alabamian", ["Alabaman"]], ["Albuquerquean", ["Albuquerquian"]], ["Anchorageite", ["Anchoragite"]], ["Angeleno", ["Los Angelean"]], ["Arizonan", ["Arizonian"]], ["Arkansan", ["Arkansawyer"]], ["Belarusian", ["Belarusan"]], ["Caymanian", ["Cayman Islander"]], ["Coloradan", ["Coloradoan"]], ["Fairbanksan", ["Fairbanksian"]], ["Fort Worthian", ["Fort Worther"]], ["Grenadan", ["Grenadian"]], ["Hong Konger", ["Hong Kongite", "Hong Kongian"]], ["Hoosier", ["Indianan", "Indianian"]], ["Illinoisan", ["Illinoisian"]], ["Iowan", ["Iowegian"]], ["Louisianian", ["Louisianan"]], ["Michigander", ["Michiganite", "Michiganian"]], ["Missourian", ["Missouran"]], ["Monegasque", ["Monacan"]], ["Neapolitan", ["Neopolitan"]], ["New Hampshirite", ["New Hampshireite", "New Hampshireman"]], ["New Jerseyan", ["New Jerseyite"]], ["New Orleanian", ["New Orleansian"]], ["Nutmegger", ["Connecticuter"]], ["Oklahoma Cityan", ["Oklahoma Citian"]], ["Oklahoman", ["Oklahomian"]], ["Seattleite", ["Seattlite"]], ["Surinamese", ["Surinamer"]], ["Tallahasseean", ["Tallahassean"]], ["Tennessean", ["Tennesseean"]], ["Tusconan", ["Tusconian", "Tusconite"]], ["Utahn", ["Utahan"]], ["Saudi", ["Saudi Arabian"]], ] return preferred_forms_check(text, preferences, err, msg)
def check(text)
Suggest the preferred forms. source: Garner's Modern American Usage source_url: http://bit.ly/1T4alrY
4.383043
4.292191
1.021167
err = "terms.denizen_labels.norris" msg = "Would you like '{}'?" preferences = [ ["Mancunian", ["Manchesterian"]], ["Mancunians", ["Manchesterians"]], ["Vallisoletano", ["Valladolidian"]], ["Wulfrunian", ["Wolverhamptonian", "Wolverhamptonite"]], ["Novocastrian", ["Newcastleite", "Newcastlite"]], ["Trifluvian", [u"Trois-Rivièrester"]], ["Leodenisian", ["Leedsian"]], ["Minneapolitan", ["Minneapolisian"]], ["Hartlepudlian", ["Hartlepoolian"]], ["Liverpudlian", ["Liverpoolian"]], ["Haligonian", ["Halifaxer"]], ["Varsovian", ["Warsawer", "Warsawian"]], ["Providentian", ["Providencian", "Providencer"]], ["Tridentine", ["Trentian", "Trentonian"]], ] return preferred_forms_check(text, preferences, err, msg)
def check_denizen_labels_norris(text)
Suggest the preferred forms. source: Mary Norris source_url: http://nyr.kr/1rGienj
8.563713
8.394503
1.020157
err = "institution.vtech" msg = "Incorrect name. Use '{}' instead of '{}'." institution = [ ["Virginia Polytechnic Institute and State University", ["Virginia Polytechnic and State University"]], ] return preferred_forms_check(text, institution, err, msg)
def check_vtech(text)
Suggest the correct name. source: Virginia Tech Division of Student Affairs source_url: http://bit.ly/2en1zbv
11.093228
9.530507
1.16397
err = "malapropisms.misc" msg = u"'{}' is a malapropism." illogics = [ "the infinitesimal universe", "a serial experience", "attack my voracity", ] return existence_check(text, illogics, err, msg, offset=1)
def check(text)
Check the text.
24.551813
25.541832
0.961239
err = "dates_times.am_pm.midnight_noon" msg = (u"'a.m.' is always morning; 'p.m.' is always night.") list = [ "\d{1,2} ?a\.?m\.? in the morning", "\d{1,2} ?p\.?m\.? in the evening", "\d{1,2} ?p\.?m\.? at night", "\d{1,2} ?p\.?m\.? in the afternoon", ] return existence_check(text, list, err, msg, join=True)
def check_redundancy(text)
Check the text.
5.085111
5.081238
1.000762
err = "sexism.misc" msg = "Gender bias. Use '{}' instead of '{}'." sexism = [ ["anchor", ["anchorman", "anchorwoman"]], ["chair", ["chairman", "chairwoman"]], ["drafter", ["draftman", "draftwoman"]], ["ombuds", ["ombudsman", "ombudswoman"]], ["tribe member", ["tribesman", "tribeswoman"]], ["police officer", ["policeman", "policewoman"]], ["firefighter", ["fireman", "firewoman"]], ["mail carrier", ["mailman", "mailwoman"]], ["history", ["herstory"]], ["women", ["womyn"]], ["poet", ["poetess"]], ["author", ["authoress"]], ["waiter", ["waitress"]], ["lawyer", ["lady lawyer"]], ["doctor", ["woman doctor"]], ["bookseller", ["female booksalesman"]], ["air pilot", ["female airman"]], ["executor", ["executrix"]], ["prosecutor", ["prosecutrix"]], ["testator", ["testatrix"]], ["husband and wife", ["man and wife"]], ["chairs", ["chairmen and chairs"]], ["men and women", ["men and girls"]], ["comedian", ["comedienne"]], ["confidant", ["confidante"]], ["scientist", ["woman scientist"]], ["scientists", ["women scientists"]] # ["hero", ["heroine"]] ] errors = preferred_forms_check(text, sexism, err, msg, ignore_case=False) msg = "Not a preferred form. Use '{}' instead of '{}'." pref = [ ["anchor", ["anchorperson"]], ["chair", ["chairperson"]], ["drafter", ["draftperson"]], ["ombuds", ["ombudsperson"]], ["tribe member", ["tribesperson"]], ["police officer", ["policeperson"]], ["firefighter", ["fireperson"]], ["mail carrier", ["mailperson"]], ] for x in preferred_forms_check(text, pref, err, msg, ignore_case=False): errors.append(x) return errors
def check(text)
Suggest the preferred forms.
3.796034
3.672098
1.033751
err = "dates_times.dates" msg = u"Apostrophes aren't needed for decades." regex = "\d0\'s" return existence_check( text, [regex], err, msg, excluded_topics=["50 Cent"])
def check_decade_apostrophes_short(text)
Check the text for dates of the form X0's.
24.259533
20.565775
1.179607
err = "dates_times.dates" msg = u"Apostrophes aren't needed for decades." regex = "\d\d\d0\'s" return existence_check(text, [regex], err, msg)
def check_decade_apostrophes_long(text)
Check the text for dates of the form XXX0's.
13.351347
11.710026
1.140164
err = "dates_times.dates" msg = u"When specifying a date range, write 'from X to Y'." regex = "[fF]rom \d+[^ \t\n\r\f\va-zA-Z0-9_\.]\d+" return existence_check(text, [regex], err, msg)
def check_dash_and_from(text)
Check the text.
13.72939
14.014288
0.979671
err = "dates_times.dates" msg = u"When specifying a month and year, no comma is needed." regex = "(?:" + "|".join(calendar.month_name[1:]) + "), \d{3,}" return existence_check(text, [regex], err, msg)
def check_month_year_comma(text)
Check the text.
12.650112
12.831022
0.985901
err = "dates_times.dates" msg = u"When specifying a month and year, 'of' is unnecessary." regex = "(?:" + "|".join(calendar.month_name[1:]) + ") of \d{3,}" return existence_check(text, [regex], err, msg)
def check_month_of_year(text)
Check the text.
12.820539
12.880151
0.995372
err = "redundancy.wallace" msg = "Redundancy. Use '{}' instead of '{}'." redundancies = [ ["rectangular", ["rectangular in shape"]], ["audible", ["audible to the ear"]], ] return preferred_forms_check(text, redundancies, err, msg)
def check(text)
Suggest the preferred forms.
17.369579
14.630193
1.187242
err = "redundancy.nordquist" msg = "Redundancy. Use '{}' instead of '{}'." redundancies = [ ["essential", ["absolutely essential"]], ["necessary", ["absolutely necessary"]], ["a.m.", ["a.m. in the morning"]], ["p.m.", ["p.m. at night"]], ] return preferred_forms_check(text, redundancies, err, msg)
def check_nordquist(text)
Suggest the preferred forms. source: Richard Nordquist source_url: http://grammar.about.com/bio/Richard-Nordquist-22176.htm
7.491718
6.611572
1.133122
@functools.wraps(f) def wrapped(*args, **kwargs): f(*args, **kwargs) close_cache_shelves() return wrapped
def close_cache_shelves_after(f)
Decorator that ensures cache shelves are closed after the call.
2.120782
1.928928
1.099462
# Determine the location of the cache. cache_dirname = os.path.join(_get_xdg_cache_home(), 'proselint') legacy_cache_dirname = os.path.join(os.path.expanduser("~"), ".proselint") if not os.path.isdir(cache_dirname): # Migrate the cache from the legacy path to XDG complaint location. if os.path.isdir(legacy_cache_dirname): os.rename(legacy_cache_dirname, cache_dirname) # Create the cache if it does not already exist. else: os.makedirs(cache_dirname) cache_filename = f.__module__ + "." + f.__name__ cachepath = os.path.join(cache_dirname, cache_filename) @functools.wraps(f) def wrapped(*args, **kwargs): # handle instance methods if hasattr(f, '__self__'): args = args[1:] signature = (f.__module__ + '.' + f.__name__).encode("utf-8") tempargdict = inspect.getcallargs(f, *args, **kwargs) for item in list(tempargdict.items()): signature += item[1].encode("utf-8") key = hashlib.sha256(signature).hexdigest() try: cache = _get_cache(cachepath) return cache[key] except KeyError: value = f(*args, **kwargs) cache[key] = value cache.sync() return value except TypeError: call_to = f.__module__ + '.' + f.__name__ print('Warning: could not disk cache call to %s;' 'it probably has unhashable args. Error: %s' % (call_to, traceback.format_exc())) return f(*args, **kwargs) return wrapped
def memoize(f)
Cache results of computations on disk.
2.931182
2.900421
1.010606