code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# Determine the number of failures we're going to process # A failure is a revision + all of the jobs that were fixed by it number_of_failures = int((target / 100) * len(failures)) low_value_jobs = [] for jobtype in active_jobs: # Determine if removing an active job will reduce the number of failures we would catch # or stay the same remaining_failures = check_removal(failures, [jobtype]) if len(remaining_failures) >= number_of_failures: low_value_jobs.append(jobtype) failures = remaining_failures else: failed_revisions = [] for revision in failures: if revision not in remaining_failures: failed_revisions.append(revision) logger.info("jobtype: %s is the root failure(s) of these %s revisions", jobtype, failed_revisions) return low_value_jobs
def build_removals(active_jobs, failures, target)
active_jobs - all possible desktop & android jobs on Treeherder (no PGO) failures - list of all failures target - percentage of failures we're going to process Return list of jobs to remove and list of revisions that are regressed
5.581114
5.146904
1.084363
total = len(fixed_by_commit_jobs) logger.info("Processing %s revision(s)", total) active_jobs = job_priorities_to_jobtypes() low_value_jobs = build_removals( active_jobs=active_jobs, failures=fixed_by_commit_jobs, target=target) # Only return high value jobs for low_value_job in low_value_jobs: try: active_jobs.remove(low_value_job) except ValueError: logger.warning("%s is missing from the job list", low_value_job) total = len(fixed_by_commit_jobs) total_detected = check_removal(fixed_by_commit_jobs, low_value_jobs) percent_detected = 100 * len(total_detected) / total logger.info("We will detect %.2f%% (%s) of the %s failures", percent_detected, len(total_detected), total) return active_jobs
def get_high_value_jobs(fixed_by_commit_jobs, target=100)
fixed_by_commit_jobs: Revisions and jobs that have been starred that are fixed with a push or a bug target: Percentage of failures to analyze
3.894063
3.964729
0.982176
self.state = self.STATES['step_in_progress'] self.stepnum += 1 self.steps.append({ "name": name, "started": timestamp, "started_linenumber": lineno, "errors": [], })
def start_step(self, lineno, name="Unnamed step", timestamp=None)
Create a new step and update the state to reflect we're now in the middle of a step.
4.690453
4.031925
1.163328
self.state = self.STATES['step_finished'] step_errors = self.sub_parser.get_artifact() step_error_count = len(step_errors) if step_error_count > settings.PARSER_MAX_STEP_ERROR_LINES: step_errors = step_errors[:settings.PARSER_MAX_STEP_ERROR_LINES] self.artifact["errors_truncated"] = True self.current_step.update({ "finished": timestamp, "finished_linenumber": lineno, # Whilst the result code is present on both the start and end buildbot-style step # markers, for Taskcluster logs the start marker line lies about the result, since # the log output is unbuffered, so Taskcluster does not know the real result at # that point. As such, we only set the result when ending a step. "result": self.RESULT_DICT.get(result_code, "unknown"), "errors": step_errors }) # reset the sub_parser for the next step self.sub_parser.clear()
def end_step(self, lineno, timestamp=None, result_code=None)
Fill in the current step's summary and update the state to show the current step has ended.
7.457176
7.410628
1.006281
if self.state == self.STATES['step_in_progress']: # We've reached the end of the log without seeing the final "step finish" # marker, which would normally have triggered updating the step. As such we # must manually close out the current step, so things like result, finish # time are set for it. This ensures that the error summary for Taskcluster # infra failures actually lists the error that occurs at the # end of the log. self.end_step(last_lineno_seen)
def finish_parse(self, last_lineno_seen)
Clean-up/summary tasks run at the end of parsing.
20.725492
19.085985
1.085901
match = self.RE_TINDERBOXPRINT.match(line) if line else None if match: line = match.group('line') for regexp_item in self.TINDERBOX_REGEXP_TUPLE: match = regexp_item['re'].match(line) if match: artifact = match.groupdict() # handle duplicate fields for to_field, from_field in regexp_item['duplicates_fields'].items(): # if to_field not present or None copy form from_field if to_field not in artifact or artifact[to_field] is None: artifact[to_field] = artifact[from_field] artifact.update(regexp_item['base_dict']) self.artifact.append(artifact) return # default case: consider it html content # try to detect title/value splitting on <br/> artifact = {"content_type": "raw_html", } if "<br/>" in line: title, value = line.split("<br/>", 1) artifact["title"] = title artifact["value"] = value # or similar long lines if they contain a url elif "href" in line and "title" in line: def parse_url_line(line_data): class TpLineParser(HTMLParser): def handle_starttag(self, tag, attrs): d = dict(attrs) artifact["url"] = d['href'] artifact["title"] = d['title'] def handle_data(self, data): artifact["value"] = data p = TpLineParser() p.feed(line_data) p.close() # strip ^M returns on windows lines otherwise # handle_data will yield no data 'value' parse_url_line(line.replace('\r', '')) else: artifact["value"] = line self.artifact.append(artifact)
def parse_line(self, line, lineno)
Parse a single line of the log
4.670633
4.696713
0.994447
# TaskCluster logs are a bit wonky. # # TaskCluster logs begin with output coming from TaskCluster itself, # before it has transitioned control of the task to the configured # process. These "internal" logs look like the following: # # [taskcluster 2016-09-09 17:41:43.544Z] Worker Group: us-west-2b # # If an error occurs during this "setup" phase, TaskCluster may emit # lines beginning with ``[taskcluster:error]``. # # Once control has transitioned from TaskCluster to the configured # task process, lines can be whatever the configured process emits. # The popular ``run-task`` wrapper prefixes output to emulate # TaskCluster's "internal" logs. e.g. # # [vcs 2016-09-09T17:45:02.842230Z] adding changesets # # This prefixing can confuse error parsing. So, we strip it. # # Because regular expression matching and string manipulation can be # expensive when performed on every line, we only strip the TaskCluster # log prefix if we know we're in a TaskCluster log. # First line of TaskCluster logs almost certainly has this. if line.startswith('[taskcluster '): self.is_taskcluster = True # For performance reasons, only do this if we have identified as # a TC task. if self.is_taskcluster: line = re.sub(self.RE_TASKCLUSTER_NORMAL_PREFIX, "", line) if self.is_error_line(line): self.add(line, lineno)
def parse_line(self, line, lineno)
Check a single line for an error. Keeps track of the linenumber
9.338872
9.437988
0.989498
log = JobLog.objects.get(id=pk) return Response(self._log_as_dict(log))
def retrieve(self, request, project, pk=None)
Returns a job_log_url object given its ID
7.013665
5.004616
1.401439
job_ids = request.query_params.getlist('job_id') if not job_ids: raise ParseError( detail="The job_id parameter is mandatory for this endpoint") try: job_ids = [int(job_id) for job_id in job_ids] except ValueError: raise ParseError(detail="The job_id parameter(s) must be integers") logs = JobLog.objects.filter(job__repository__name=project, job_id__in=job_ids) return Response([self._log_as_dict(log) for log in logs])
def list(self, request, project)
GET method implementation for list view job_id -- Mandatory filter indicating which job these log belongs to.
2.756822
2.514707
1.096279
if weight_fn is None: weight_fn = default_weights # get a weighted average for the full set of data -- this is complicated # by the fact that we might have multiple data points from each revision # which we would want to weight equally -- do this by creating a set of # weights only for each bucket containing (potentially) multiple results # for each value num_revisions = len(revision_data) weights = [weight_fn(i, num_revisions) for i in range(num_revisions)] weighted_sum = 0 sum_of_weights = 0 for i in range(num_revisions): weighted_sum += sum(value * weights[i] for value in revision_data[i].values) sum_of_weights += weights[i] * len(revision_data[i].values) weighted_avg = weighted_sum / sum_of_weights if num_revisions > 0 else 0.0 # now that we have a weighted average, we can calculate the variance of the # whole series all_data = [v for datum in revision_data for v in datum.values] variance = (sum(pow(d-weighted_avg, 2) for d in all_data) / (len(all_data)-1)) if len(all_data) > 1 else 0.0 return {"avg": weighted_avg, "n": len(all_data), "variance": variance}
def analyze(revision_data, weight_fn=None)
Returns the average and sample variance (s**2) of a list of floats. `weight_fn` is a function that takes a list index and a window width, and returns a weight that is used to calculate a weighted average. For example, see `default_weights` or `linear_weights` below. If no function is passed, `default_weights` is used and the average will be uniformly weighted.
3.336623
3.241956
1.029201
if i >= n: return 0.0 return float(n - i) / float(n)
def linear_weights(i, n)
A window function that falls off arithmetically. This is used to calculate a weighted moving average (WMA) that gives higher weight to changes near the point being analyzed, and smooth out changes at the opposite edge of the moving window. See bug 879903 for details.
3.65075
5.049783
0.722952
if not w1 or not w2: return 0 s1 = analyze(w1, weight_fn) s2 = analyze(w2, weight_fn) delta_s = s2['avg'] - s1['avg'] if delta_s == 0: return 0 if s1['variance'] == 0 and s2['variance'] == 0: return float('inf') return delta_s / (((s1['variance'] / s1['n']) + (s2['variance'] / s2['n'])) ** 0.5)
def calc_t(w1, w2, weight_fn=None)
Perform a Students t-test on the two sets of revision data. See the analyze() function for a description of the `weight_fn` argument.
2.411589
2.383897
1.011616
new_data = [] guids = [datum['job']['job_guid'] for datum in data] state_map = { guid: state for (guid, state) in Job.objects.filter( guid__in=guids).values_list('guid', 'state') } for datum in data: job = datum['job'] if not state_map.get(job['job_guid']): new_data.append(datum) else: # should not transition from running to pending, # or completed to any other state current_state = state_map[job['job_guid']] if current_state == 'completed' or ( job['state'] == 'pending' and current_state == 'running'): continue new_data.append(datum) return new_data
def _remove_existing_jobs(data)
Remove jobs from data where we already have them in the same state. 1. split the incoming jobs into pending, running and complete. 2. fetch the ``job_guids`` from the db that are in the same state as they are in ``data``. 3. build a new list of jobs in ``new_data`` that are not already in the db and pass that back. It could end up empty at that point.
3.042575
2.807775
1.083625
# importing here to avoid an import loop from treeherder.log_parser.tasks import parse_logs task_types = { "errorsummary_json", "buildbot_text", "builds-4h" } job_log_ids = [] for job_log in job_logs: # a log can be submitted already parsed. So only schedule # a parsing task if it's ``pending`` # the submitter is then responsible for submitting the # text_log_summary artifact if job_log.status != JobLog.PENDING: continue # if this is not a known type of log, abort parse if job_log.name not in task_types: continue job_log_ids.append(job_log.id) # TODO: Replace the use of different queues for failures vs not with the # RabbitMQ priority feature (since the idea behind separate queues was # only to ensure failures are dealt with first if there is a backlog). if result != 'success': queue = 'log_parser_fail' priority = 'failures' else: queue = 'log_parser' priority = "normal" parse_logs.apply_async(queue=queue, args=[job.id, job_log_ids, priority])
def _schedule_log_parsing(job, job_logs, result)
Kick off the initial task that parses the log data. log_data is a list of job log objects and the result for that job
8.873776
8.870222
1.000401
exchange = Exchange(name, type="topic", passive=not create) # bind the exchange to our connection so operations can be performed on it bound_exchange = exchange(connection) # ensure the exchange exists. Throw an error if it was created with # passive=True and it doesn't exist. bound_exchange.declare() return bound_exchange
def get_exchange(connection, name, create=False)
Get a Kombu Exchange object using the passed in name. Can create an Exchange but this is typically not wanted in production-like environments and only useful for testing.
5.680971
6.142656
0.92484
'''Return list of ref_data_name for job_priorities''' jobs = [] # we cache the reference data names in order to reduce API calls cache_key = '{}-{}-ref_data_names_cache'.format(project, build_system) ref_data_names_map = cache.get(cache_key) if not ref_data_names_map: # cache expired so re-build the reference data names map; the map # contains the ref_data_name of every treeherder *test* job for this project ref_data_names_map = self._build_ref_data_names(project, build_system) # update the cache cache.set(cache_key, ref_data_names_map, SETA_REF_DATA_NAMES_CACHE_TIMEOUT) # now check the JobPriority table against the list of valid runnable for jp in job_priorities: # if this JobPriority entry is no longer supported in SETA then ignore it if not valid_platform(jp.platform): continue if is_job_blacklisted(jp.testtype): continue key = jp.unique_identifier() if key in ref_data_names_map: # e.g. desktop-test-linux64-pgo/opt-reftest-13 or builder name jobs.append(ref_data_names_map[key]) else: logger.warning('Job priority (%s) not found in accepted jobs list', jp) return jobs
def _process(self, project, build_system, job_priorities)
Return list of ref_data_name for job_priorities
6.461244
5.807559
1.112558
''' We want all reference data names for every task that runs on a specific project. For example: * Buildbot - "Windows 8 64-bit mozilla-inbound debug test web-platform-tests-1" * TaskCluster = "test-linux64/opt-mochitest-webgl-e10s-1" ''' ignored_jobs = [] ref_data_names = {} runnable_jobs = list_runnable_jobs(project) for job in runnable_jobs: # get testtype e.g. web-platform-tests-4 testtype = parse_testtype( build_system_type=job['build_system_type'], job_type_name=job['job_type_name'], platform_option=job['platform_option'], ref_data_name=job['ref_data_name'] ) if not valid_platform(job['platform']): continue if is_job_blacklisted(testtype): ignored_jobs.append(job['ref_data_name']) continue key = unique_key(testtype=testtype, buildtype=job['platform_option'], platform=job['platform']) if build_system == '*': ref_data_names[key] = job['ref_data_name'] elif job['build_system_type'] == build_system: ref_data_names[key] = job['ref_data_name'] for ref_data_name in sorted(ignored_jobs): logger.info('Ignoring %s', ref_data_name) return ref_data_names
def _build_ref_data_names(self, project, build_system)
We want all reference data names for every task that runs on a specific project. For example: * Buildbot - "Windows 8 64-bit mozilla-inbound debug test web-platform-tests-1" * TaskCluster = "test-linux64/opt-mochitest-webgl-e10s-1"
5.175502
2.461832
2.102297
try: user = authenticate(request) if not user: raise AuthenticationFailed("User not authenticated.") if not user.is_active: raise AuthenticationFailed("This user has been disabled.") login(request, user) return Response(UserSerializer(user).data) except AuthError as ex: # This indicates an error that may require attention by the # Treeherder or Taskcluster teams. Logging this to New Relic to # increase visibility. newrelic.agent.record_exception() logger.exception("Error", exc_info=ex) raise AuthenticationFailed(str(ex))
def login(self, request)
Verify credentials
6.631744
6.644792
0.998036
fields = set() for leaf in node: if leaf.get('kind', None) == "Field": fields.add(leaf["name"]["value"]) if leaf.get("selection_set", None): fields = fields.union(collect_fields(leaf["selection_set"]["selections"])) return fields
def collect_fields(node)
Get all the unique field names that are eligible for optimization Requested a function like this be added to the ``info`` object upstream in graphene_django: https://github.com/graphql-python/graphene-django/issues/230
3.243596
3.035366
1.068601
fields = collect_fields(info_dict) for field in fields: if field in field_map: field_name, opt = field_map[field] qs = (qs.prefetch_related(field_name) if opt == "prefetch" else qs.select_related(field_name)) return qs
def optimize(qs, info_dict, field_map)
Add either select_related or prefetch_related to fields of the qs
3.230329
2.741871
1.178148
username = os.environ.get('ELASTICSEARCH_USERNAME') password = os.environ.get('ELASTICSEARCH_PASSWORD') if username and password: return Elasticsearch(url, http_auth=(username, password)) return Elasticsearch(url)
def build_connection(url)
Build an Elasticsearch connection with the given url Elastic.co's Heroku addon doesn't create credientials with access to the cluster by default so they aren't exposed in the URL they provide either. This function works around the situation by grabbing our credentials from the environment via Django settings and building a connection with them.
2.099077
1.95429
1.074087
# The parser may only need to run until it has seen a specific line. # Once that's occurred, it can mark itself as complete, to save # being run against later log lines. if self.parser.complete: return # Perf data is stored in a json structure contained in a single line, # if the MAX_LINE_LENGTH is applied the data structure could be # truncated, preventing it from being ingested. if 'PERFHERDER_DATA' not in line: line = line[:self.MAX_LINE_LENGTH] self.parser.parse_line(line, self.lineno) self.lineno += 1
def parse_line(self, line)
Parse a single line of the log.
11.012386
10.509747
1.047826
self.artifact[self.parser.name] = self.parser.get_artifact() return self.artifact
def get_artifact(self)
Return the job artifact built by the parser.
7.209158
4.053075
1.778689
failure_line = text_log_error.metadata.failure_line logger.debug("Looking for test match in failure %d", failure_line.id) if failure_line.action != "test_result" or failure_line.message is None: return f = { 'text_log_error___metadata__failure_line__action': 'test_result', 'text_log_error___metadata__failure_line__test': failure_line.test, 'text_log_error___metadata__failure_line__subtest': failure_line.subtest, 'text_log_error___metadata__failure_line__status': failure_line.status, 'text_log_error___metadata__failure_line__expected': failure_line.expected, 'text_log_error___metadata__failure_line__message': failure_line.message } qwargs = ( Q(text_log_error___metadata__best_classification=None) & (Q(text_log_error___metadata__best_is_verified=True) | Q(text_log_error__step__job=text_log_error.step.job)) ) qs = (TextLogErrorMatch.objects.filter(**f) .exclude(qwargs) .order_by('-score', '-classified_failure')) if not qs: return # chunk through the QuerySet because it could potentially be very large # time bound each call to the scoring function to avoid job timeouts # returns an iterable of (score, classified_failure_id) tuples chunks = chunked_qs_reverse(qs, chunk_size=20000) return chain.from_iterable(time_boxed(score_matches, chunks, time_budget=500))
def precise_matcher(text_log_error)
Query for TextLogErrorMatches identical to matches of the given TextLogError.
4.941144
4.756872
1.038738
# Note: Elasticsearch is currently disabled in all environments (see bug 1527868). if not settings.ELASTICSEARCH_URL: return [] failure_line = text_log_error.metadata.failure_line if failure_line.action != "test_result" or not failure_line.message: logger.debug("Skipped elasticsearch matching") return filters = [ {'term': {'test': failure_line.test}}, {'term': {'status': failure_line.status}}, {'term': {'expected': failure_line.expected}}, {'exists': {'field': 'best_classification'}} ] if failure_line.subtest: query = filters.append({'term': {'subtest': failure_line.subtest}}) query = { 'query': { 'bool': { 'filter': filters, 'must': [{ 'match_phrase': { 'message': failure_line.message[:1024], }, }], }, }, } try: results = search(query) except Exception: logger.error("Elasticsearch lookup failed: %s %s %s %s %s", failure_line.test, failure_line.subtest, failure_line.status, failure_line.expected, failure_line.message) raise if len(results) > 1: args = ( text_log_error.id, failure_line.id, len(results), ) logger.info('text_log_error=%i failure_line=%i Elasticsearch produced %i results' % args) newrelic.agent.record_custom_event('es_matches', { 'num_results': len(results), 'text_log_error_id': text_log_error.id, 'failure_line_id': failure_line.id, }) scorer = MatchScorer(failure_line.message) matches = [(item, item['message']) for item in results] best_match = scorer.best_match(matches) if not best_match: return score, es_result = best_match # TODO: score all results and return # TODO: just return results with score above cut off? return [(score, es_result['best_classification'])]
def elasticsearch_matcher(text_log_error)
Query Elasticsearch and score the results. Uses a filtered search checking test, status, expected, and the message as a phrase query with non-alphabet tokens removed.
3.634986
3.46913
1.047809
failure_line = text_log_error.metadata.failure_line if (failure_line.action != "crash" or failure_line.signature is None or failure_line.signature == "None"): return f = { 'text_log_error___metadata__failure_line__action': 'crash', 'text_log_error___metadata__failure_line__signature': failure_line.signature, } qwargs = ( Q(text_log_error___metadata__best_classification=None) & (Q(text_log_error___metadata__best_is_verified=True) | Q(text_log_error__step__job=text_log_error.step.job)) ) qs = (TextLogErrorMatch.objects.filter(**f) .exclude(qwargs) .select_related('text_log_error', 'text_log_error___metadata') .order_by('-score', '-classified_failure')) size = 20000 time_budget = 500 # See if we can get any matches when filtering by the same test first_attempt = qs.filter(text_log_error___metadata__failure_line__test=failure_line.test) chunks = chunked_qs_reverse(first_attempt, chunk_size=size) scored_matches = chain.from_iterable(time_boxed(score_matches, chunks, time_budget)) if scored_matches: return scored_matches # try again without filtering to the test but applying a .8 score multiplyer chunks = chunked_qs_reverse(qs, chunk_size=size) scored_matches = chain.from_iterable(time_boxed( score_matches, chunks, time_budget, score_multiplier=(8, 10), )) return scored_matches
def crash_signature_matcher(text_log_error)
Query for TextLogErrorMatches with the same crash signature. Produces two queries, first checking if the same test produces matches and secondly checking without the same test but lowering the produced scores.
4.578927
4.385738
1.044049
best_match = None for match, message in matches: self.matcher.set_seq1(message) ratio = self.matcher.quick_ratio() if best_match is None or ratio >= best_match[0]: new_ratio = self.matcher.ratio() if best_match is None or new_ratio > best_match[0]: best_match = (new_ratio, match) return best_match
def best_match(self, matches)
Find the most similar string to self.target. Given a list of candidate strings find the closest match to self.target, returning the best match with a score indicating closeness of match. :param matches: A list of candidate matches :returns: A tuple of (score, best_match)
3.018346
3.362082
0.897761
if not pushes: logger.info("No new pushes to store") return for push in pushes: store_push(repository, push)
def store_push_data(repository, pushes)
Stores push data in the treeherder database pushes = [ { "revision": "8afdb7debc82a8b6e0d56449dfdf916c77a7bf80", "push_timestamp": 1378293517, "author": "[email protected]", "revisions": [ { "comment": "Bug 911954 - Add forward declaration of JSScript to TraceLogging.h, r=h4writer", "author": "John Doe <[email protected]>", "revision": "8afdb7debc82a8b6e0d56449dfdf916c77a7bf80" }, ... ] }, ... ] returns = { }
3.357425
3.097896
1.083776
max_timestamp = datetime.datetime.now() - cycle_interval # seperate datums into chunks while True: perf_datums_to_cycle = list(self.filter( repository=repository, push_timestamp__lt=max_timestamp).values_list('id', flat=True)[:chunk_size]) if not perf_datums_to_cycle: # we're done! break self.filter(id__in=perf_datums_to_cycle).delete() if sleep_time: # Allow some time for other queries to get through time.sleep(sleep_time) # also remove any signatures which are (no longer) associated with # a job for signature in PerformanceSignature.objects.filter( repository=repository): if not self.filter(signature=signature).exists(): signature.delete()
def cycle_data(self, repository, cycle_interval, chunk_size, sleep_time)
Delete data older than cycle_interval, splitting the target data into chunks of chunk_size size.
4.393202
4.365158
1.006425
try: matches = failure_line.error.matches.all() except AttributeError: # failure_line.error can return None matches = [] tle_serializer = TextLogErrorMatchSerializer(matches, many=True) classified_failures = models.ClassifiedFailure.objects.filter(error_matches__in=matches) cf_serializer = ClassifiedFailureSerializer(classified_failures, many=True) response = super().to_representation(failure_line) response['matches'] = tle_serializer.data response['classified_failures'] = cf_serializer.data return response
def to_representation(self, failure_line)
Manually add matches our wrapper of the TLEMetadata -> TLE relation. I could not work out how to do this multiple relation jump with DRF (or even if it was possible) so using this manual method instead.
3.996667
3.84427
1.039642
bug_stats, bug_ids = self.get_bug_stats(startday, endday) alt_date_bug_totals = self.get_alt_date_bug_totals(alt_startday, alt_endday, bug_ids) test_run_count = self.get_test_runs(startday, endday) # if fetch_bug_details fails, None is returned bug_info = self.fetch_all_bug_details(bug_ids) all_bug_changes = [] template = Template(self.open_file('comment.template', False)) if self.weekly_mode: top_bugs = [bug[0] for bug in sorted(bug_stats.items(), key=lambda x: x[1]['total'], reverse=True)][:50] for bug_id, counts in bug_stats.items(): change_priority = None change_whiteboard = None priority = 0 rank = top_bugs.index(bug_id)+1 if self.weekly_mode and bug_id in top_bugs else None if bug_info and bug_id in bug_info: if self.weekly_mode: priority = self.assign_priority(counts) if priority == 2: change_priority, change_whiteboard = self.check_needswork_owner(bug_info[bug_id]) # change [stockwell needswork] to [stockwell unknown] when failures drop below 20 failures/week # if this block is true, it implies a priority of 0 (mutually exclusive to previous block) if (counts['total'] < 20): change_whiteboard = self.check_needswork(bug_info[bug_id]['whiteboard']) else: change_priority, change_whiteboard = self.check_needswork_owner(bug_info[bug_id]) # recommend disabling when more than 150 failures tracked over 21 days and # takes precedence over any prevous change_whiteboard assignments if (bug_id in alt_date_bug_totals and not self.check_whiteboard_status(bug_info[bug_id]['whiteboard'])): priority = 3 change_whiteboard = self.update_whiteboard(bug_info[bug_id]['whiteboard'], '[stockwell disable-recommended]') comment = template.render(bug_id=bug_id, total=counts['total'], test_run_count=test_run_count, rank=rank, priority=priority, failure_rate=round(counts['total']/float(test_run_count), 3), repositories=counts['per_repository'], platforms=counts['per_platform'], startday=startday, endday=endday.split()[0], weekly_mode=self.weekly_mode) bug_changes = {'bug_id': bug_id, 'changes': { 'comment': {'body': comment} } } if change_whiteboard: bug_changes['changes']['whiteboard'] = change_whiteboard if change_priority: bug_changes['changes']['priority'] = change_priority all_bug_changes.append(bug_changes) return all_bug_changes
def generate_bug_changes(self, startday, endday, alt_startday, alt_endday)
Returns a list of dicts containing a bug id, a bug comment (only for bugs whose total number of daily or weekly occurrences meet the appropriate threshold) and potentially an updated whiteboard or priority status.
3.842246
3.768207
1.019649
yesterday = date.today() - timedelta(days=1) endday = datetime(yesterday.year, yesterday.month, yesterday.day, 23, 59, 59, 999) if mode: startday = yesterday - timedelta(days=numDays) else: # daily mode startday = yesterday return startday.isoformat(), endday.strftime('%Y-%m-%d %H:%M:%S.%f')
def calculate_date_strings(self, mode, numDays)
Returns a tuple of start (in YYYY-MM-DD format) and end date strings (in YYYY-MM-DD HH:MM:SS format for an inclusive day).
2.713535
2.657152
1.021219
stockwell_text = re.search(r'\[stockwell (.+?)\]', whiteboard) if stockwell_text is not None: text = stockwell_text.group(1).split(':')[0] if text == 'fixed' or text == 'disable-recommended' or text == 'infra' or text == 'disabled': return True return False
def check_whiteboard_status(self, whiteboard)
Extracts stockwell text from a bug's whiteboard status to determine whether it matches specified stockwell text; returns a boolean.
6.55368
4.839516
1.354202
params = {'include_fields': 'product, component, priority, whiteboard, id'} params['id'] = bug_ids try: response = self.session.get(settings.BZ_API_URL + '/rest/bug', headers=self.session.headers, params=params, timeout=30) response.raise_for_status() except RequestException as e: logger.warning('error fetching bugzilla metadata for bugs due to {}'.format(e)) return None if response.headers['Content-Type'] == 'text/html; charset=UTF-8': return None data = response.json() if 'bugs' not in data: return None return data['bugs']
def fetch_bug_details(self, bug_ids)
Fetches bug metadata from bugzilla and returns an encoded dict if successful, otherwise returns None.
3.524623
3.057847
1.152649
# Min required failures per bug in order to post a comment threshold = 1 if self.weekly_mode else 15 bug_ids = (BugJobMap.failures.by_date(startday, endday) .values('bug_id') .annotate(total=Count('bug_id')) .filter(total__gte=threshold) .values_list('bug_id', flat=True)) bugs = (BugJobMap.failures.by_date(startday, endday) .filter(bug_id__in=bug_ids) .values('job__repository__name', 'job__machine_platform__platform', 'bug_id')) bug_map = dict() for bug in bugs: platform = bug['job__machine_platform__platform'] repo = bug['job__repository__name'] bug_id = bug['bug_id'] if bug_id in bug_map: bug_map[bug_id]['total'] += 1 bug_map[bug_id]['per_platform'][platform] += 1 bug_map[bug_id]['per_repository'][repo] += 1 else: bug_map[bug_id] = {} bug_map[bug_id]['total'] = 1 bug_map[bug_id]['per_platform'] = Counter([platform]) bug_map[bug_id]['per_repository'] = Counter([repo]) return bug_map, bug_ids
def get_bug_stats(self, startday, endday)
Get all intermittent failures per specified date range and repository, returning a dict of bug_id's with total, repository and platform totals if totals are greater than or equal to the threshold. eg: { "1206327": { "total": 5, "per_repository": { "fx-team": 2, "mozilla-inbound": 3 }, "per_platform": { "osx-10-10": 4, "b2g-emu-ics": 1 } }, ... }
2.559826
2.368881
1.080606
bugs = (BugJobMap.failures.by_date(startday, endday) .filter(bug_id__in=bug_ids) .values('bug_id') .annotate(total=Count('id')) .values('bug_id', 'total')) return {bug['bug_id']: bug['total'] for bug in bugs if bug['total'] >= 150}
def get_alt_date_bug_totals(self, startday, endday, bug_ids)
use previously fetched bug_ids to check for total failures exceeding 150 in 21 days
3.60102
3.252485
1.107159
min = 0 max = 600 bugs_list = [] bug_ids_length = len(bug_ids) while bug_ids_length >= min and bug_ids_length > 0: data = self.fetch_bug_details(bug_ids[min:max]) if data: bugs_list += data min = max max = max + 600 return {bug['id']: bug for bug in bugs_list} if len(bugs_list) else None
def fetch_all_bug_details(self, bug_ids)
batch requests for bugzilla data in groups of 1200 (which is the safe limit for not hitting the max url length)
2.84147
2.750964
1.0329
''' A lot of these transformations are from tasks before task labels and some of them are if we grab data directly from Treeherder jobs endpoint instead of runnable jobs API. ''' # XXX: Evaluate which of these transformations are still valid if testtype.startswith('[funsize'): return None testtype = testtype.split('/opt-')[-1] testtype = testtype.split('/debug-')[-1] # this is plain-reftests for android testtype = testtype.replace('plain-', '') testtype = testtype.strip() # https://bugzilla.mozilla.org/show_bug.cgi?id=1313844 testtype = testtype.replace('browser-chrome-e10s', 'e10s-browser-chrome') testtype = testtype.replace('devtools-chrome-e10s', 'e10s-devtools-chrome') testtype = testtype.replace('[TC] Android 4.3 API15+ ', '') # mochitest-gl-1 <-- Android 4.3 armv7 API 15+ mozilla-inbound opt test mochitest-gl-1 # mochitest-webgl-9 <-- test-android-4.3-arm7-api-15/opt-mochitest-webgl-9 testtype = testtype.replace('webgl-', 'gl-') return testtype
def transform(testtype)
A lot of these transformations are from tasks before task labels and some of them are if we grab data directly from Treeherder jobs endpoint instead of runnable jobs API.
9.875319
6.230085
1.585102
subject = user_info['sub'] email = user_info['email'] if "Mozilla-LDAP" in subject: return "mozilla-ldap/" + email elif "email" in subject: return "email/" + email elif "github" in subject: return "github/" + email elif "google" in subject: return "google/" + email # Firefox account elif "oauth2" in subject: return "oauth2/" + email else: raise AuthenticationFailed("Unrecognized identity")
def _get_username_from_userinfo(self, user_info)
Get the user's username from the jwt sub property
3.742995
3.52999
1.060341
# JWT Validator # Per https://auth0.com/docs/quickstart/backend/python/01-authorization#create-the-jwt-validation-decorator try: unverified_header = jwt.get_unverified_header(id_token) except jwt.JWTError: raise AuthError('Unable to decode the Id token header') if 'kid' not in unverified_header: raise AuthError('Id token header missing RSA key ID') rsa_key = None for key in jwks["keys"]: if key["kid"] == unverified_header["kid"]: rsa_key = { "kty": key["kty"], "kid": key["kid"], "use": key["use"], "n": key["n"], "e": key["e"] } break if not rsa_key: raise AuthError('Id token using unrecognised RSA key ID') try: # https://python-jose.readthedocs.io/en/latest/jwt/api.html#jose.jwt.decode user_info = jwt.decode( id_token, rsa_key, algorithms=['RS256'], audience=AUTH0_CLIENTID, access_token=access_token, issuer="https://"+AUTH0_DOMAIN+"/" ) return user_info except jwt.ExpiredSignatureError: raise AuthError('Id token is expired') except jwt.JWTClaimsError: raise AuthError("Incorrect claims: please check the audience and issuer") except jwt.JWTError: raise AuthError("Invalid header: Unable to parse authentication")
def _get_user_info(self, access_token, id_token)
Extracts the user info payload from the Id Token. Example return value: { "at_hash": "<HASH>", "aud": "<HASH>", "email_verified": true, "email": "[email protected]", "exp": 1551259495, "family_name": "Surname", "given_name": "Firstname", "https://sso.mozilla.com/claim/groups": [ "all_scm_level_1", "all_scm_level_2", "all_scm_level_3", # ... ], "iat": 1550654695, "iss": "https://auth.mozilla.auth0.com/", "name": "Firstname Surname", "nickname": "Firstname Surname", "nonce": "<HASH>", "picture": "<GRAVATAR_URL>", "sub": "ad|Mozilla-LDAP|fsurname", "updated_at": "2019-02-20T09:24:55.449Z", }
2.573336
2.497782
1.030248
access_token_expiry_timestamp = self._get_access_token_expiry(request) id_token_expiry_timestamp = self._get_id_token_expiry(user_info) now_in_seconds = int(time.time()) # The session length is set to match whichever token expiration time is closer. earliest_expiration_timestamp = min(access_token_expiry_timestamp, id_token_expiry_timestamp) seconds_until_expiry = earliest_expiration_timestamp - now_in_seconds if seconds_until_expiry <= 0: raise AuthError('Session expiry time has already passed!') return seconds_until_expiry
def _calculate_session_expiry(self, request, user_info)
Returns the number of seconds after which the Django session should expire.
3.003227
2.884582
1.041131
return unique_key(testtype=str(job['testtype']), buildtype=str(job['platform_option']), platform=str(job['platform']))
def _unique_key(job)
Return a key to query our uniqueness mapping system. This makes sure that we use a consistent key between our code and selecting jobs from the table.
10.064597
12.154963
0.828024
job_build_system_type = {} sanitized_list = [] for job in runnable_jobs_data: if not valid_platform(job['platform']): logger.info('Invalid platform %s', job['platform']) continue testtype = parse_testtype( build_system_type=job['build_system_type'], job_type_name=job['job_type_name'], platform_option=job['platform_option'], ref_data_name=job['ref_data_name'] ) if not testtype: continue # NOTE: This is *all* the data we need from the runnable API new_job = { 'build_system_type': job['build_system_type'], # e.g. {buildbot,taskcluster,*} 'platform': job['platform'], # e.g. windows8-64 'platform_option': job['platform_option'], # e.g. {opt,debug} 'testtype': testtype, # e.g. web-platform-tests-1 } key = _unique_key(new_job) # Let's build a map of all the jobs and if duplicated change the build_system_type to * if key not in job_build_system_type: job_build_system_type[key] = job['build_system_type'] sanitized_list.append(new_job) elif new_job['build_system_type'] != job_build_system_type[key]: new_job['build_system_type'] = job_build_system_type[key] # This will *replace* the previous build system type with '*' # This guarantees that we don't have duplicates sanitized_list[sanitized_list.index(new_job)]['build_system_type'] = '*' return sanitized_list
def _sanitize_data(runnable_jobs_data)
We receive data from runnable jobs api and return the sanitized data that meets our needs. This is a loop to remove duplicates (including buildsystem -> * transformations if needed) By doing this, it allows us to have a single database query It returns sanitized_list which will contain a subset which excludes: * jobs that don't specify the platform * jobs that don't specify the testtype * if the job appears again, we replace build_system_type with '*'. By doing so, if a job appears under both 'buildbot' and 'taskcluster', its build_system_type will be '*'
3.652459
2.985456
1.223417
jp_index, priority, expiration_date = _initialize_values() total_jobs = len(data) new_jobs, failed_changes, updated_jobs = 0, 0, 0 # Loop through sanitized jobs, add new jobs and update the build system if needed for job in data: key = _unique_key(job) if key in jp_index: # We already know about this job, we might need to update the build system # We're seeing the job again with another build system (e.g. buildbot vs # taskcluster). We need to change it to '*' if jp_index[key]['build_system_type'] != '*' and jp_index[key]['build_system_type'] != job["build_system_type"]: db_job = JobPriority.objects.get(pk=jp_index[key]['pk']) db_job.buildsystem = '*' db_job.save() logger.info('Updated %s/%s from %s to %s', db_job.testtype, db_job.buildtype, job['build_system_type'], db_job.buildsystem) updated_jobs += 1 else: # We have a new job from runnablejobs to add to our master list try: jobpriority = JobPriority( testtype=str(job["testtype"]), buildtype=str(job["platform_option"]), platform=str(job["platform"]), priority=priority, expiration_date=expiration_date, buildsystem=job["build_system_type"] ) jobpriority.save() logger.info('New job was found (%s,%s,%s,%s)', job['testtype'], job['platform_option'], job['platform'], job["build_system_type"]) new_jobs += 1 except Exception as error: logger.warning(str(error)) failed_changes += 1 logger.info('We have %s new jobs and %s updated jobs out of %s total jobs processed.', new_jobs, updated_jobs, total_jobs) if failed_changes != 0: logger.warning('We have failed %s changes out of %s total jobs processed.', failed_changes, total_jobs) return new_jobs, failed_changes, updated_jobs
def _update_table(data)
Add new jobs to the priority table and update the build system if required. data - it is a list of dictionaries that describe a job type returns the number of new, failed and updated jobs
3.691609
3.462925
1.066038
if not JobPriority.objects.exists(): return preseed = preseed_data() for job in preseed: queryset = JobPriority.objects.all() for field in ('testtype', 'buildtype', 'platform'): if job[field] != '*': queryset = queryset.filter(**{field: job[field]}) # Deal with the case where we have a new entry in preseed if not queryset: create_new_entry(job) else: # We can have wildcards, so loop on all returned values in data for jp in queryset: process_job_priority(jp, job)
def load_preseed()
Update JobPriority information from preseed.json The preseed data has these fields: buildtype, testtype, platform, priority, expiration_date The expiration_date field defaults to 2 weeks when inserted in the table The expiration_date field has the format "YYYY-MM-DD", however, it can have "*" to indicate to never expire The default priority is 1, however, if we want to force coalescing we can do that The fields buildtype, testtype and platform can have * which makes ut match all flavors of the * field. For example: (linux64, pgo, *) matches all Linux 64 pgo tests
7.102615
5.004345
1.41929
''' Helper method to return all possible valid time intervals for data stored by Perfherder ''' return [PerformanceTimeInterval.DAY, PerformanceTimeInterval.WEEK, PerformanceTimeInterval.TWO_WEEKS, PerformanceTimeInterval.SIXTY_DAYS, PerformanceTimeInterval.NINETY_DAYS, PerformanceTimeInterval.ONE_YEAR]
def all_valid_time_intervals()
Helper method to return all possible valid time intervals for data stored by Perfherder
6.779343
2.964559
2.286796
''' Returns a filtered subset of this collection of signatures, based on a set of key/value tuples This is useful when you only want a subset of the signatures in a project. Example usage: :: pc = PerfherderClient() signatures = pc.get_signatures('mozilla-central') signatures = signatures.filter(('suite', 'tp5o'), ('machine_platform', 'windowsxp')) ''' filtered_signatures = {} for (signature, signature_value) in self.items(): skip = False for (key, val) in args: if signature_value.get(key) != val: skip = True break if not skip: filtered_signatures[signature] = signature_value return PerformanceSignatureCollection(filtered_signatures)
def filter(self, *args)
Returns a filtered subset of this collection of signatures, based on a set of key/value tuples This is useful when you only want a subset of the signatures in a project. Example usage: :: pc = PerfherderClient() signatures = pc.get_signatures('mozilla-central') signatures = signatures.filter(('suite', 'tp5o'), ('machine_platform', 'windowsxp'))
6.074154
1.659515
3.660199
''' Returns all property names in this collection of signatures ''' property_names = set() for signature_value in self.values(): for property_name in signature_value.keys(): property_names.add(property_name) return property_names
def get_property_names(self)
Returns all property names in this collection of signatures
4.321405
2.642781
1.635173
''' Returns all property values for a particular property name in this collection ''' property_values = set() for signature_value in self.values(): if signature_value.get(property_name): property_values.add(signature_value[property_name]) return property_values
def get_property_values(self, property_name)
Returns all property values for a particular property name in this collection
3.940799
2.817029
1.39892
''' Gets a set of performance signatures associated with a project and time range ''' results = self._get_json(self.PERFORMANCE_SIGNATURES_ENDPOINT, project, **params) return PerformanceSignatureCollection(results)
def get_performance_signatures(self, project, **params)
Gets a set of performance signatures associated with a project and time range
6.690293
5.427223
1.232729
''' Gets a dictionary of PerformanceSeries objects You can specify which signatures to get by passing signature to this function ''' results = self._get_json(self.PERFORMANCE_DATA_ENDPOINT, project, **params) return {k: PerformanceSeries(v) for k, v in results.items()}
def get_performance_data(self, project, **params)
Gets a dictionary of PerformanceSeries objects You can specify which signatures to get by passing signature to this function
8.801908
3.341377
2.634215
from . import matchers def is_matcher_func(member): return inspect.isfunction(member) and member.__name__.endswith("_matcher") members = inspect.getmembers(matchers, is_matcher_func) for name, func in members: yield func
def get_matchers()
Get matcher functions from treeherder.autoclassify.matchers We classify matchers as any function treeherder.autoclassify.matchers with a name ending in _matcher. This is currently overkill but protects against the unwarey engineer adding new functions to the matchers module that shouldn't be treated as matchers.
3.535126
3.236061
1.092416
for text_log_error in errors: matches = find_all_matches(text_log_error, matchers) # TextLogErrorMatch instances, unsaved! best_match = first(matches, key=lambda m: (-m.score, -m.classified_failure_id)) if not best_match: continue newrelic.agent.record_custom_event('highest_scored_matcher', { 'matcher': best_match.matcher_name, 'score': best_match.score, 'text_log_error': best_match.text_log_error_id, }) yield best_match
def find_best_matches(errors, matchers)
Find the best match for each error We use the Good Enough™ ratio as a watershed level for match scores.
5.276407
5.491186
0.960887
for matcher_func in matchers: matches = matcher_func(text_log_error) # matches: iterator of (score, ClassifiedFailure.id) if not matches: continue for score, classified_failure_id in matches: yield TextLogErrorMatch( score=score, matcher_name=matcher_func.__name__, classified_failure_id=classified_failure_id, text_log_error=text_log_error, )
def find_all_matches(text_log_error, matchers)
Find matches for the given error using the given matcher classes Returns *unsaved* TextLogErrorMatch instances.
3.474066
3.287342
1.056801
score_cut_off = 0.7 return (text_log_error.matches.filter(score__gt=score_cut_off) .order_by("-score", "-classified_failure_id") .select_related('classified_failure') .first())
def get_best_match(text_log_error)
Get the best TextLogErrorMatch for a given TextLogErrorMatch. Matches are further filtered by the score cut off.
6.527027
4.64353
1.405617
text_log_error.metadata.best_classification = classified_failure text_log_error.metadata.save(update_fields=['best_classification']) text_log_error.metadata.failure_line.elastic_search_insert()
def mark_best_classification(text_log_error, classified_failure)
Wrapper for setting best_classification on both TextLogError and FailureLine. Set the given ClassifiedFailure as best_classification for the given TextLogError. Handles the duplication of best_classification on FailureLine so you don't have to!
5.405962
4.66382
1.159127
for text_log_error in errors: best_match = get_best_match(text_log_error) if not best_match: continue mark_best_classification(text_log_error, best_match.classified_failure)
def mark_best_classifications(errors)
Convenience wrapper around mark_best_classification. Finds the best match for each TextLogError in errors, handling no match meeting the cut off score and then mark_best_classification to save that information.
4.420884
3.057552
1.44589
for match in matches: try: match.save() except IntegrityError: args = (match.text_log_error_id, match.matcher_name, match.classified_failure_id) logger.warning( "Tried to create duplicate match for TextLogError %i with matcher %s and classified_failure %i", args, )
def update_db(matches)
Save TextLogErrorMatch instances to the DB We loop each Match instance instead of calling bulk_create() so we can catch any potential IntegrityErrors and continue.
7.119721
4.737544
1.502829
file_path = os.path.join("schemas", filename) with open(file_path) as f: schema = yaml.load(f) return schema
def get_json_schema(filename)
Get a JSON Schema by filename.
2.7546
2.556231
1.077602
headers = headers or {} headers['User-Agent'] = 'treeherder/{}'.format(settings.SITE_HOSTNAME) # Work around bug 1305768. if 'queue.taskcluster.net' in url: headers['x-taskcluster-skip-cache'] = 'true' response = requests.request(method, url, headers=headers, timeout=timeout, **kwargs) if response.history: params = { 'url': url, 'redirects': len(response.history), 'duration': sum(r.elapsed.total_seconds() for r in response.history) } newrelic.agent.record_custom_event('RedirectedRequest', params=params) response.raise_for_status() return response
def make_request(url, method='GET', headers=None, timeout=30, **kwargs)
A wrapper around requests to set defaults & call raise_for_status().
4.015986
3.975062
1.010295
job_details = json.loads(job_info_artifact['blob'])['job_details'] for job_detail in job_details: job_detail_dict = { 'title': job_detail.get('title'), 'value': job_detail['value'], 'url': job_detail.get('url') } for (k, v) in job_detail_dict.items(): max_field_length = JobDetail._meta.get_field(k).max_length if v is not None and len(v) > max_field_length: logger.warning("Job detail '%s' for job_guid %s too long, truncating", v[:max_field_length], job.guid) job_detail_dict[k] = v[:max_field_length] # move the url field to be updated in defaults now that it's # had its size trimmed, if necessary job_detail_dict['defaults'] = {'url': job_detail_dict['url']} del job_detail_dict['url'] JobDetail.objects.update_or_create( job=job, **job_detail_dict)
def store_job_info_artifact(job, job_info_artifact)
Store the contents of the job info artifact in job details
3.208526
3.143541
1.020673
step_data = json.loads( text_log_summary_artifact['blob'])['step_data'] result_map = {v: k for (k, v) in TextLogStep.RESULTS} with transaction.atomic(): for step in step_data['steps']: name = step['name'][:TextLogStep._meta.get_field('name').max_length] # process start/end times if we have them # we currently don't support timezones in treeherder, so # just ignore that when importing/updating the bug to avoid # a ValueError (though by default the text log summaries # we produce should have time expressed in UTC anyway) time_kwargs = {} for tkey in ('started', 'finished'): if step.get(tkey): time_kwargs[tkey] = dateutil.parser.parse( step[tkey], ignoretz=True) log_step = TextLogStep.objects.create( job=job, started_line_number=step['started_linenumber'], finished_line_number=step['finished_linenumber'], name=name, result=result_map[step['result']], **time_kwargs) if step.get('errors'): for error in step['errors']: TextLogError.objects.create( step=log_step, line_number=error['linenumber'], line=astral_filter(error['line'])) # get error summary immediately (to warm the cache) error_summary.get_error_summary(job)
def store_text_log_summary_artifact(job, text_log_summary_artifact)
Store the contents of the text log summary artifact
5.146551
5.118269
1.005526
for artifact in artifact_data: # Determine what type of artifact we have received if artifact: artifact_name = artifact.get('name') if not artifact_name: logger.error("load_job_artifacts: Unnamed job artifact, skipping") continue job_guid = artifact.get('job_guid') if not job_guid: logger.error("load_job_artifacts: Artifact '%s' with no " "job guid set, skipping", artifact_name) continue try: job = Job.objects.get(guid=job_guid) except Job.DoesNotExist: logger.error('load_job_artifacts: No job_id for guid %s', job_guid) continue if artifact_name == 'performance_data': store_performance_artifact(job, artifact) elif artifact_name == 'Job Info': store_job_info_artifact(job, artifact) elif artifact_name == 'text_log_summary': try: store_text_log_summary_artifact(job, artifact) except IntegrityError: logger.warning("Couldn't insert text log information " "for job with guid %s, this probably " "means the job was already parsed", job_guid) else: logger.warning("Unknown artifact type: %s submitted with job %s", artifact_name, job.guid) else: logger.error('store_job_artifacts: artifact type %s not understood', artifact_name)
def store_job_artifacts(artifact_data)
Store a list of job artifacts. All of the datums in artifact_data need to be in the following format: { 'type': 'json', 'name': 'my-artifact-name', # blob can be any kind of structured data 'blob': { 'stuff': [1, 2, 3, 4, 5] }, 'job_guid': 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' }
3.087538
3.023197
1.021282
for artifact in artifacts: blob = artifact['blob'] if (artifact['type'].lower() == 'json' and not isinstance(blob, str)): artifact['blob'] = json.dumps(blob) return artifacts
def serialize_artifact_json_blobs(artifacts)
Ensure that JSON artifact blobs passed as dicts are converted to JSON
3.425543
2.977386
1.15052
resp = self._get_json(self.OPTION_COLLECTION_HASH_ENDPOINT) ret = {} for result in resp: ret[result['option_collection_hash']] = result['options'] return ret
def get_option_collection_hash(self)
Gets option collection hash, a mapping of hash values to build properties Returns a dictionary with the following structure: { hashkey1: [ { key: value }, { key: value }, ... ], hashkey2: [ { key: value }, { key: value }, ... ], ... }
5.097984
4.86232
1.048467
return self._get_json_list(self.PUSH_ENDPOINT, project, **params)
def get_pushes(self, project, **params)
Gets pushes from project, filtered by parameters By default this method will just return the latest 10 pushes (if they exist) :param project: project (repository name) to query data for :param params: keyword arguments to filter results
7.298689
12.544583
0.58182
return self._get_json_list(self.JOBS_ENDPOINT, project, **params)
def get_jobs(self, project, **params)
Gets jobs from project, filtered by parameters :param project: project (repository name) to query data for :param params: keyword arguments to filter results
7.234625
10.394057
0.696035
return self._get_json(self.JOB_LOG_URL_ENDPOINT, project, **params)
def get_job_log_url(self, project, **params)
Gets job log url, filtered by parameters :param project: project (repository name) to query data for :param params: keyword arguments to filter results
9.23096
10.529729
0.876657
query = { 'query': { 'match_all': {} } } for result in raw_query(query, index=index): yield result
def all_documents(index=INDEX_NAME)
Get all documents from the given index. Returns full Elasticsearch objects so you can get metadata too.
3.439988
4.085732
0.841951
actions = compact(dict_to_op( to_dict(model), index_name=INDEX_NAME, doc_type=DOC_TYPE, op_type=action, ) for model in iterable) # fail fast if there are no actions if not actions: return 0 items, _ = es_bulk(es_conn, actions, doc_type=doc_type, index=index) return items
def bulk(iterable, index=INDEX_NAME, doc_type=DOC_TYPE, action='index')
Wrapper of elasticsearch's bulk method Converts an interable of models to document operations and submits them to Elasticsearch. Returns a count of operations when done. https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.bulk https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
5.042525
5.035824
1.001331
refresh_index() # Refresh the index so we can get a correct count query = { 'query': { 'match_all': {} } } result = es_conn.count(index=index, doc_type=DOC_TYPE, body=query) return result['count']
def count_index(index=INDEX_NAME)
Return a document count for the given index. https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.count https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html
3.431998
4.029686
0.851679
result = es_conn.get(index=index, doc_type=doc_type, id=id, **kwargs) return result['_source']
def get_document(id, index=INDEX_NAME, doc_type=DOC_TYPE, **kwargs)
Thin wrapper to get a single document by ID. https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.get https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
2.381536
2.648418
0.89923
doc = to_dict(obj) if doc is None: return id = doc.pop('id') return es_conn.index(index, doc_type, doc, id=id)
def index(obj, index=INDEX_NAME, doc_type=DOC_TYPE)
Index the given document. https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.index https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html
4.289249
4.834901
0.887143
result = es_conn.search(index=index, doc_type=DOC_TYPE, body=query) return result['hits']['hits']
def raw_query(query, index=INDEX_NAME, doc_type=DOC_TYPE)
Thin wrapper of the search function to provide useful defaults https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search http://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html
2.661195
2.892717
0.919964
es_conn.indices.delete(index, ignore=404) try: es_conn.indices.create(index, INDEX_SETTINGS.get(index, None)) except TransportError as e: raise Exception('Failed to created index, got: {}'.format(e.error))
def reinit_index(index=INDEX_NAME)
Delete and then initialise the given index name Gets settings if they exist in the mappings module. https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.create https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.delete https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html
4.123706
4.664145
0.884129
results = raw_query(query, index=index, doc_type=doc_type) return [r['_source'] for r in results]
def search(query, index=INDEX_NAME, doc_type=DOC_TYPE)
Thin wrapper of the main query function to provide just the resulting objects
2.971991
2.784519
1.067327
'''Set the expiration date of every job that has expired.''' # Only select rows where there is an expiration date set for job in JobPriority.objects.filter(expiration_date__isnull=False): if job.has_expired(): job.expiration_date = None job.save()
def clear_expiration_field_for_expired_jobs(self)
Set the expiration date of every job that has expired.
5.914834
4.895511
1.208216
# Only job priorities that don't have an expiration date (2 weeks for new jobs or year 2100 # for jobs update via load_preseed) are updated for jp in JobPriority.objects.filter(expiration_date__isnull=True): if jp.unique_identifier() not in high_value_jobs: if jp.priority != SETA_LOW_VALUE_PRIORITY: logger.warning('Decreasing priority of %s', jp.unique_identifier()) jp.priority = SETA_LOW_VALUE_PRIORITY jp.save(update_fields=['priority']) elif jp.priority != priority: logger.warning('Increasing priority of %s', jp.unique_identifier()) jp.priority = priority jp.save(update_fields=['priority'])
def adjust_jobs_priority(self, high_value_jobs, priority=1)
For every job priority determine if we need to increase or decrease the job priority Currently, high value jobs have a priority of 1 and a timeout of 0.
4.938
4.801813
1.028361
try: report = json.loads(request.body)['csp-report'] except (KeyError, TypeError, ValueError): return HttpResponseBadRequest('Invalid CSP violation report') logger.warning('CSP violation: %s', report) newrelic.agent.record_custom_event('CSP violation', report) return HttpResponse()
def csp_report_collector(request)
Accepts the Content-Security-Policy violation reports generated via the `report-uri` feature: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/report-uri This is written as a standard Django view rather than as a django-rest-framework APIView, since the latter ends up being more of a hindrance than a help, thanks to: * CSP violation reports being submitted with a Content-Type of `application/csp-report`, which d-r-f is unable to recognise as JSON without use of a custom parser class. * Needing to accept reports from unauthenticated users too, which requires overriding permission_classes.
3.663709
3.752913
0.976231
''' Routing to /api/project/{project}/seta/job-priorities/ This API can potentially have these consumers: * Buildbot * build_system_type=buildbot * priority=5 * format=json * TaskCluster (Gecko decision task) * build_system_type=taskcluster * format=json ''' build_system_type = request.query_params.get('build_system_type', '*') priority = request.query_params.get('priority') try: return Response(seta_job_scheduling(project, build_system_type, priority)) except SetaError as e: return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
def list(self, request, project)
Routing to /api/project/{project}/seta/job-priorities/ This API can potentially have these consumers: * Buildbot * build_system_type=buildbot * priority=5 * format=json * TaskCluster (Gecko decision task) * build_system_type=taskcluster * format=json
7.652375
1.968192
3.888022
''' custom method to serialize + format jobs information It's worth doing this big ugly thing (as opposed to using the django rest framework serializer or whatever) as this function is often in the critical path ''' option_collection_map = OptionCollection.objects.get_option_collection_map() results = [] for values in job_qs[offset:(offset+count)].values_list( *[pq[1] for pq in self._property_query_mapping]): platform_option = option_collection_map.get( values[self._option_collection_hash_idx], "") # some values need to be transformed values = list(values) for (i, _) in enumerate(values): func = self._property_query_mapping[i][2] if func: values[i] = func(values[i]) # append results differently depending on if we are returning # a dictionary or a list if return_type == 'dict': results.append(dict(zip( [pq[0] for pq in self._property_query_mapping] + ['platform_option'], values + [platform_option]))) else: results.append(values + [platform_option]) response_dict = { 'results': results } if return_type == 'list': response_dict.update({ 'job_property_names': [pq[0] for pq in self._property_query_mapping] + ['platform_option'] }) return response_dict
def _get_job_list_response(self, job_qs, offset, count, return_type)
custom method to serialize + format jobs information It's worth doing this big ugly thing (as opposed to using the django rest framework serializer or whatever) as this function is often in the critical path
4.499527
2.992344
1.50368
try: job = Job.objects.select_related( *self._default_select_related + ['taskcluster_metadata']).get( repository__name=project, id=pk) except Job.DoesNotExist: return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND) resp = serializers.JobSerializer(job, read_only=True).data resp["resource_uri"] = reverse("jobs-detail", kwargs={"project": project, "pk": pk}) resp["logs"] = [] for (name, url) in JobLog.objects.filter(job=job).values_list( 'name', 'url'): resp["logs"].append({'name': name, 'url': url}) platform_option = job.get_platform_option() if platform_option: resp["platform_option"] = platform_option try: resp['taskcluster_metadata'] = { 'task_id': job.taskcluster_metadata.task_id, 'retry_id': job.taskcluster_metadata.retry_id } except ObjectDoesNotExist: pass status_map = {k: v for k, v in Job.AUTOCLASSIFY_STATUSES} resp["autoclassify_status"] = status_map[job.autoclassify_status] return Response(resp)
def retrieve(self, request, project, pk=None)
GET method implementation for detail view Return a single job with log_references and artifact names and links to the artifact blobs.
2.94743
2.891133
1.019472
MAX_JOBS_COUNT = 2000 # make a mutable copy of these params filter_params = request.query_params.copy() # various hacks to ensure API backwards compatibility for param_key in filter_params.keys(): # replace `result_set_id` with `push_id` if param_key.startswith('result_set_id'): new_param_key = param_key.replace('result_set_id', 'push_id') filter_params[new_param_key] = filter_params[param_key] del filter_params[param_key] # convert legacy timestamp parameters to time ones elif param_key in ['submit_timestamp', 'start_timestamp', 'end_timestamp']: new_param_key = param_key.replace('timestamp', 'time') filter_params[new_param_key] = datetime.datetime.fromtimestamp( float(filter_params[param_key])) del filter_params[param_key] # sanity check 'last modified' elif param_key.startswith('last_modified'): datestr = filter_params[param_key] try: parser.parse(datestr) except ValueError: return Response( "Invalid date value for `last_modified`: {}".format(datestr), status=HTTP_400_BAD_REQUEST) try: offset = int(filter_params.get("offset", 0)) count = int(filter_params.get("count", 10)) except ValueError: return Response( "Invalid value for offset or count", status=HTTP_400_BAD_REQUEST) return_type = filter_params.get("return_type", "dict").lower() if count > MAX_JOBS_COUNT: msg = "Specified count exceeds API MAX_JOBS_COUNT value: {}".format(MAX_JOBS_COUNT) return Response({"detail": msg}, status=HTTP_400_BAD_REQUEST) try: repository = Repository.objects.get(name=project) except Repository.DoesNotExist: return Response({ "detail": "No project with name {}".format(project) }, status=HTTP_404_NOT_FOUND) jobs = JobFilter({k: v for (k, v) in filter_params.items()}, queryset=Job.objects.filter( repository=repository).select_related( *self._default_select_related)).qs response_body = self._get_job_list_response(jobs, offset, count, return_type) response_body["meta"] = dict(repository=project, offset=offset, count=count) return Response(response_body)
def list(self, request, project)
GET method implementation for list view Optional parameters (default): - offset (0) - count (10) - return_type (dict)
2.646385
2.576164
1.027258
try: job = Job.objects.get(repository__name=project, id=pk) except ObjectDoesNotExist: return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND) textlog_steps = TextLogStep.objects.filter(job=job).order_by( 'started_line_number').prefetch_related('errors') return Response(serializers.TextLogStepSerializer(textlog_steps, many=True, read_only=True).data)
def text_log_steps(self, request, project, pk=None)
Gets a list of steps associated with this job
3.378607
3.240114
1.042743
try: job = Job.objects.get(repository__name=project, id=pk) except Job.DoesNotExist: return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND) textlog_errors = (TextLogError.objects .filter(step__job=job) .select_related("_metadata", "_metadata__failure_line") .prefetch_related("classified_failures", "matches") .order_by('id')) return Response(serializers.TextLogErrorSerializer(textlog_errors, many=True, read_only=True).data)
def text_log_errors(self, request, project, pk=None)
Gets a list of steps associated with this job
4.327862
3.917022
1.104886
try: job = Job.objects.get(repository__name=project, id=pk) except ObjectDoesNotExist: return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND) return Response(get_error_summary(job))
def bug_suggestions(self, request, project, pk=None)
Gets a set of bug suggestions for this job
3.751872
3.413789
1.099035
try: repository = Repository.objects.get(name=project) except Repository.DoesNotExist: return Response({ "detail": "No project with name {}".format(project) }, status=HTTP_404_NOT_FOUND) try: job = Job.objects.get(repository=repository, id=pk) except ObjectDoesNotExist: return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND) filter_params = request.query_params.copy() try: offset = int(filter_params.get("offset", 0)) # we don't need a big page size on this endoint, # let's cap it to 50 elements count = int(filter_params.get("count", 50)) except ValueError: return Response("Invalid value for offset or count", status=HTTP_400_BAD_REQUEST) return_type = filter_params.get("return_type", "dict").lower() jobs = JobFilter({k: v for (k, v) in filter_params.items()}, queryset=Job.objects.filter( job_type_id=job.job_type_id, repository=repository).exclude( id=job.id).select_related( *self._default_select_related)).qs # similar jobs we want in descending order from most recent jobs = jobs.order_by('-start_time') response_body = self._get_job_list_response(jobs, offset, count, return_type) response_body["meta"] = dict(offset=offset, count=count, repository=project) return Response(response_body)
def similar_jobs(self, request, project, pk=None)
Get a list of jobs similar to the one selected.
3.055312
3.04452
1.003545
logger.debug("Crossreference %s: started", job.id) if job.autoclassify_status >= Job.CROSSREFERENCED: logger.info("Job %i already crossreferenced", job.id) return False try: rv = _crossreference(job) except IntegrityError: job.autoclassify_status = Job.FAILED job.save(update_fields=['autoclassify_status']) logger.warning("IntegrityError crossreferencing error lines for job %s", job.id) return False job.autoclassify_status = Job.CROSSREFERENCED job.save(update_fields=['autoclassify_status']) return rv
def crossreference_job(job)
Try to match the unstructured error lines with the corresponding structured error lines, relying on the fact that serialization of mozlog (and hence errorsummary files) is determinisic so we can reserialize each structured error line and perform an in-order textual match. :job: - Job for which to perform the crossreferencing
3.492698
3.47434
1.005284
summary = partial(failure_line_summary, TbplFormatter()) for failure_line in failure_lines: repr_str = summary(failure_line) if repr_str: yield failure_line, repr_str while True: yield None, None
def structured_iterator(failure_lines)
Create FailureLine, Tbpl-formatted-string tuples.
6.423127
4.340936
1.479664
if failure_line.action == "test_result": action = "test_status" if failure_line.subtest is not None else "test_end" elif failure_line.action == "truncated": return else: action = failure_line.action try: mozlog_func = getattr(formatter, action) except AttributeError: logger.warning('Unknown mozlog function "%s"', action) return formatted_log = mozlog_func(failure_line.to_mozlog_format()) split_log = first(formatted_log.split("\n", 1)) if not split_log: logger.debug('Failed to split log', formatted_log) return return split_log.strip()
def failure_line_summary(formatter, failure_line)
Create a mozlog formatted error summary string from the given failure_line. Create a string which can be compared to a TextLogError.line string to see if they match.
4.061197
3.818825
1.063468
url = furl(redis_url) url.port += 1 url.scheme += 's' # Disable TLS certificate validation (restoring the behaviour of the older redis-py 2.x), # since for now Heroku Redis uses self-signed certificates: # https://bugzilla.mozilla.org/show_bug.cgi?id=1510000 url.args['ssl_cert_reqs'] = 'none' return str(url)
def get_tls_redis_url(redis_url)
Returns the TLS version of a Heroku REDIS_URL string. Whilst Redis server (like memcached) doesn't natively support TLS, Heroku runs an stunnel daemon on their Redis instances, which can be connected to directly by Redis clients that support TLS (avoiding the need for stunnel on the client). The stunnel port is one higher than the Redis server port, and the informal `rediss://` scheme used to instruct clients to wrap the connection with TLS. Will convert 'redis://h:[email protected]:8409' ...to: 'rediss://h:[email protected]:8410?ssl_cert_reqs=none' See: https://devcenter.heroku.com/articles/securing-heroku-redis#connecting-directly-to-stunnel
5.828296
5.758591
1.012105
with make_request(self.url, stream=True) as response: download_size_in_bytes = int(response.headers.get('Content-Length', -1)) # Temporary annotation of log size to help set thresholds in bug 1295997. newrelic.agent.add_custom_parameter( 'unstructured_log_size', download_size_in_bytes ) newrelic.agent.add_custom_parameter( 'unstructured_log_encoding', response.headers.get('Content-Encoding', 'None') ) if download_size_in_bytes > MAX_DOWNLOAD_SIZE_IN_BYTES: raise LogSizeException('Download size of %i bytes exceeds limit' % download_size_in_bytes) # Lines must be explicitly decoded since `iter_lines()`` returns bytes by default # and we cannot use its `decode_unicode=True` mode, since otherwise Unicode newline # characters such as `\u0085` (which can appear in test output) are treated the same # as `\n` or `\r`, and so split into unwanted additional lines by `iter_lines()`. for line in response.iter_lines(): for builder in self.builders: # Using `replace` to prevent malformed unicode (which might possibly exist # in test message output) from breaking parsing of the rest of the log. builder.parse_line(line.decode('utf-8', 'replace')) # gather the artifacts from all builders for builder in self.builders: # Run end-of-parsing actions for this parser, # in case the artifact needs clean-up/summarising. builder.finish_parse() name = builder.name artifact = builder.get_artifact() if name == 'performance_data' and not artifact[name]: continue self.artifacts[name] = artifact
def parse(self)
Iterate over each line of the log, running each parser against it. Stream lines from the gzip file and run each parser against it, building the ``artifact`` as we go.
6.732206
6.400601
1.051808
''' Gets a summary of what passed/failed for the push ''' jobs = Job.objects.filter(push=self).filter( Q(failure_classification__isnull=True) | Q(failure_classification__name='not classified')).exclude(tier=3) status_dict = {} for (state, result, total) in jobs.values_list( 'state', 'result').annotate( total=Count('result')): if state == 'completed': status_dict[result] = total else: status_dict[state] = total if 'superseded' in status_dict: # backward compatability for API consumers status_dict['coalesced'] = status_dict['superseded'] return status_dict
def get_status(self)
Gets a summary of what passed/failed for the push
5.94355
4.643053
1.280095
options = sorted(list(options)) sha_hash = sha1() # equivalent to loop over the options and call sha_hash.update() sha_hash.update(''.join(options).encode('utf-8')) return sha_hash.hexdigest()
def calculate_hash(options)
returns an option_collection_hash given a list of options
4.61522
4.142479
1.11412
# Retrieve list of jobs to delete jobs_max_timestamp = datetime.datetime.now() - cycle_interval jobs_cycled = 0 while True: jobs_chunk = list(self.filter(repository=repository, submit_time__lt=jobs_max_timestamp) .values_list('guid', flat=True)[:chunk_size]) if not jobs_chunk: # no more jobs to cycle, we're done! return jobs_cycled # Remove ORM entries for these jobs that don't currently have a # foreign key relation lines = FailureLine.objects.filter(job_guid__in=jobs_chunk) if settings.ELASTICSEARCH_URL: # To delete the data from elasticsearch we need the document # id. However selecting all this data can be rather slow, so # split the job into multiple smaller chunks. failures = itertools.chain.from_iterable( chunked_qs( lines, chunk_size=chunk_size, fields=['id', 'test'], ), ) bulk(failures, action='delete') lines.delete() # cycle jobs *after* related data has been deleted, to be sure # we don't have any orphan data try: self.filter(guid__in=jobs_chunk).delete() except UnicodeDecodeError as e: # Some TextLogError `line` fields contain invalid Unicode, which causes a # UnicodeDecodeError since Django's .delete() fetches all fields (even those # not required for the delete). As such we delete the offending `TextLogError`s # separately (using only() to prevent pulling in `line`), before trying again. # This can likely be removed once all pre-Python 3 migration `TextLogError`s # have expired (check New Relic Insights at that point to confirm). See: # https://bugzilla.mozilla.org/show_bug.cgi?id=1528710 newrelic.agent.record_custom_event('cycle_data UnicodeDecodeError workaround', { 'exception': str(e), }) TextLogError.objects.filter(step__job__guid__in=jobs_chunk).only('id').delete() self.filter(guid__in=jobs_chunk).delete() jobs_cycled += len(jobs_chunk) if sleep_time: # Allow some time for other queries to get through time.sleep(sleep_time)
def cycle_data(self, repository, cycle_interval, chunk_size, sleep_time)
Delete data older than cycle_interval, splitting the target data into chunks of chunk_size size. Returns the number of result sets deleted
6.955956
6.860084
1.013975
if FailureLine.objects.filter(job_guid=self.guid, action="truncated").count() > 0: return False classified_error_count = TextLogError.objects.filter( _metadata__best_classification__isnull=False, step__job=self).count() if classified_error_count == 0: return False from treeherder.model.error_summary import get_useful_search_results return classified_error_count == len(get_useful_search_results(self))
def is_fully_autoclassified(self)
Returns whether a job is fully autoclassified (i.e. we have classification information for all failure lines)
8.939213
7.331954
1.219213
unverified_errors = TextLogError.objects.filter( _metadata__best_is_verified=False, step__job=self).count() if unverified_errors: logger.error("Job %r has unverified TextLogErrors", self) return False logger.info("Job %r is fully verified", self) return True
def is_fully_verified(self)
Determine if this Job is fully verified based on the state of its Errors. An Error (TextLogError or FailureLine) is considered Verified once its related TextLogErrorMetadata has best_is_verified set to True. A Job is then considered Verified once all its Errors TextLogErrorMetadata instances are set to True.
6.904137
3.746223
1.842959
if not self.is_fully_verified(): return classification = 'autoclassified intermittent' already_classified = (JobNote.objects.filter(job=self) .exclude(failure_classification__name=classification) .exists()) if already_classified: # Don't add an autoclassification note if a Human already # classified this job. return JobNote.create_autoclassify_job_note(job=self, user=user)
def update_after_verification(self, user)
Updates a job's state after being verified by a sheriff
8.178568
7.709654
1.060822
try: text_log_error = TextLogError.objects.get(step__job=self) except (TextLogError.DoesNotExist, TextLogError.MultipleObjectsReturned): return None # Can this TextLogError be converted into a single "useful search"? # FIXME: what is the significance of only one search result here? from treeherder.model.error_summary import get_useful_search_results search_results = get_useful_search_results(self) if len(search_results) != 1: return None # Check that we have a related FailureLine failure_line = text_log_error.get_failure_line() if failure_line is None: return None # Check our FailureLine is in a state we expect for # auto-classification. if not (failure_line.action == "test_result" and failure_line.test and failure_line.status and failure_line.expected): return None return text_log_error
def get_manual_classification_line(self)
If this Job has a single TextLogError line, return that TextLogError. Some Jobs only have one related [via TextLogStep] TextLogError. This method checks if this Job is one of those (returning None if not) by: * checking the number of related TextLogErrors * counting the number of search results for the single TextLogError * checking there is a related FailureLine * checking the related FailureLine is in a given state If all these checks pass the TextLogError is returned, any failure returns None.
6.564936
4.048279
1.621661
# update the job classification note = JobNote.objects.filter(job=self.job).order_by('-created').first() if note: self.job.failure_classification_id = note.failure_classification.id else: self.job.failure_classification_id = FailureClassification.objects.get(name='not classified').id self.job.save()
def _update_failure_type(self)
Updates the failure type of this Note's Job. Set the linked Job's failure type to that of the most recent JobNote or set to Not Classified if there are no JobNotes. This is called when JobNotes are created (via .save()) and deleted (via .delete()) and is used to resolved the FailureClassification which has been denormalised onto Job.
3.529579
2.561791
1.377778