code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
sys.path.append(proselint_path)
checks = []
check_names = [key for (key, val)
in list(options["checks"].items()) if val]
for check_name in check_names:
module = importlib.import_module("checks." + check_name)
for d in dir(module):
if re.match("check", d):
checks.append(getattr(module, d))
return checks | def get_checks(options) | Extract the checks. | 3.560186 | 3.521917 | 1.010866 |
possible_defaults = (
'/etc/proselintrc',
os.path.join(proselint_path, '.proselintrc'),
)
options = {}
has_overrides = False
for filename in possible_defaults:
try:
options = json.load(open(filename))
break
except IOError:
pass
try:
user_options = json.load(
open(os.path.join(_get_xdg_config_home(), 'proselint', 'config')))
has_overrides = True
except IOError:
pass
# Read user configuration from the legacy path.
if not has_overrides:
try:
user_options = json.load(
open(os.path.join(os.path.expanduser('~'), '.proselintrc')))
has_overrides = True
except IOError:
pass
if has_overrides:
if 'max_errors' in user_options:
options['max_errors'] = user_options['max_errors']
if 'checks' in user_options:
for (key, value) in user_options['checks'].items():
try:
options['checks'][key] = value
except KeyError:
pass
return options | def load_options() | Read various proselintrc files, allowing user overrides. | 2.649051 | 2.333797 | 1.135082 |
out = []
for e in errors:
out.append({
"check": e[0],
"message": e[1],
"line": 1 + e[2],
"column": 1 + e[3],
"start": 1 + e[4],
"end": 1 + e[5],
"extent": e[6],
"severity": e[7],
"replacements": e[8],
})
return json.dumps(
dict(status="success", data={"errors": out}), sort_keys=True) | def errors_to_json(errors) | Convert the errors to JSON. | 2.696646 | 2.62608 | 1.026871 |
position_counter = 0
for idx_line, line in enumerate(text.splitlines(True)):
if (position_counter + len(line.rstrip())) >= position:
return (idx_line, position - position_counter)
else:
position_counter += len(line) | def line_and_column(text, position) | Return the line number and column of a position in a string. | 2.833128 | 2.704334 | 1.047625 |
options = load_options()
if isinstance(input_file, string_types):
text = input_file
else:
text = input_file.read()
# Get the checks.
checks = get_checks(options)
# Apply all the checks.
errors = []
for check in checks:
result = check(text)
for error in result:
(start, end, check, message, replacements) = error
(line, column) = line_and_column(text, start)
if not is_quoted(start, text):
errors += [(check, message, line, column, start, end,
end - start, "warning", replacements)]
if len(errors) > options["max_errors"]:
break
# Sort the errors by line and column number.
errors = sorted(errors[:options["max_errors"]], key=lambda e: (e[2], e[3]))
return errors | def lint(input_file, debug=False) | Run the linter on the input file. | 3.796454 | 3.703453 | 1.025112 |
assert_error.description = "No {} error for '{}'".format(check, text)
assert(check in [error[0] for error in lint(text)]) | def assert_error(text, check, n=1) | Assert that text has n errors of type check. | 8.160982 | 8.981662 | 0.908627 |
errors = []
msg = " ".join(msg.split())
for w in word_pairs:
matches = [
[m for m in re.finditer(w[0], text)],
[m for m in re.finditer(w[1], text)]
]
if len(matches[0]) > 0 and len(matches[1]) > 0:
idx_minority = len(matches[0]) > len(matches[1])
for m in matches[idx_minority]:
errors.append((
m.start() + offset,
m.end() + offset,
err,
msg.format(w[~idx_minority], m.group(0)),
w[~idx_minority]))
return errors | def consistency_check(text, word_pairs, err, msg, offset=0) | Build a consistency checker for the given word_pairs. | 2.626238 | 2.5762 | 1.019423 |
if ignore_case:
flags = re.IGNORECASE
else:
flags = 0
msg = " ".join(msg.split())
errors = []
regex = u"[\W^]{}[\W$]"
for p in list:
for r in p[1]:
for m in re.finditer(regex.format(r), text, flags=flags):
txt = m.group(0).strip()
errors.append((
m.start() + 1 + offset,
m.end() + offset,
err,
msg.format(p[0], txt),
p[0]))
errors = truncate_to_max(errors, max_errors)
return errors | def preferred_forms_check(text, list, err, msg, ignore_case=True, offset=0,
max_errors=float("inf")) | Build a checker that suggests the preferred form. | 3.409505 | 3.228522 | 1.056058 |
flags = 0
msg = " ".join(msg.split())
if ignore_case:
flags = flags | re.IGNORECASE
if str:
flags = flags | re.UNICODE
if dotall:
flags = flags | re.DOTALL
if require_padding:
regex = u"(?:^|\W){}[\W$]"
else:
regex = u"{}"
errors = []
# If the topic of the text is in the excluded list, return immediately.
if excluded_topics:
tps = topics(text)
if any([t in excluded_topics for t in tps]):
return errors
rx = "|".join(regex.format(w) for w in list)
for m in re.finditer(rx, text, flags=flags):
txt = m.group(0).strip()
errors.append((
m.start() + 1 + offset,
m.end() + offset,
err,
msg.format(txt),
None))
errors = truncate_to_max(errors, max_errors)
return errors | def existence_check(text, list, err, msg, ignore_case=True,
str=False, max_errors=float("inf"), offset=0,
require_padding=True, dotall=False,
excluded_topics=None, join=False) | Build a checker that blacklists certain words. | 3.212716 | 3.147774 | 1.020631 |
if len(errors) > max_errors:
start1, end1, err1, msg1, replacements = errors[0]
if len(errors) == (max_errors + 1):
msg1 += " Found once elsewhere."
else:
msg1 += " Found {} times elsewhere.".format(len(errors))
errors = errors[1:max_errors]
errors = [(start1, end1, err1, msg1, replacements)] + errors
return errors | def truncate_to_max(errors, max_errors) | If max_errors was specified, truncate the list of errors.
Give the total number of times that the error was found elsewhere. | 3.632921 | 3.471447 | 1.046515 |
def matching(quotemark1, quotemark2):
straight = '\"\''
curly = '“”'
if quotemark1 in straight and quotemark2 in straight:
return True
if quotemark1 in curly and quotemark2 in curly:
return True
else:
return False
def find_ranges(text):
s = 0
q = pc = ''
start = None
ranges = []
seps = " .,:;-\r\n"
quotes = ['\"', '“', '”', "'"]
for i, c in enumerate(text + "\n"):
if s == 0 and c in quotes and pc in seps:
start = i
s = 1
q = c
elif s == 1 and matching(c, q):
s = 2
elif s == 2:
if c in seps:
ranges.append((start+1, i-1))
start = None
s = 0
else:
s = 1
pc = c
return ranges
def position_in_ranges(ranges, position):
for start, end in ranges:
if start <= position < end:
return True
return False
return position_in_ranges(find_ranges(text), position) | def is_quoted(position, text) | Determine if the position in the text falls within a quote. | 3.088852 | 3.009079 | 1.026511 |
keywords = [
"50 Cent",
"rap",
"hip hop",
"Curtis James Jackson III",
"Curtis Jackson",
"Eminem",
"Dre",
"Get Rich or Die Tryin'",
"G-Unit",
"Street King Immortal",
"In da Club",
"Interscope",
]
num_keywords = sum(word in text for word in keywords)
return ("50 Cent", float(num_keywords > 2)) | def detector_50_Cent(text) | Determine whether 50 Cent is a topic. | 12.98959 | 12.185721 | 1.065968 |
detectors = [
detector_50_Cent
]
ts = []
for detector in detectors:
ts.append(detector(text))
return [t[0] for t in ts if t[1] > 0.95] | def topics(text) | Return a list of topics. | 6.962973 | 7.310993 | 0.952398 |
err = "pinker.metadiscourse"
msg = "Excessive metadiscourse."
metadiscourse = [
"The preceeding discussion",
"The rest of this article",
"This chapter discusses",
"The preceding paragraph demonstrated",
"The previous section analyzed",
]
return existence_check(text, metadiscourse, err, msg) | def check(text) | Suggest the preferred forms. | 16.347567 | 16.372135 | 0.998499 |
err = "cursing.filth"
msg = u
list = [
"shit",
"piss",
"fuck",
"cunt",
"cocksucker",
"motherfucker",
"tits",
"fart",
"turd",
"twat",
]
return existence_check(text, list, err, msg) | def check(text) | Check the text. | 8.804914 | 8.747531 | 1.00656 |
err = "oxymorons.misc"
msg = u"'{}' is an oxymoron."
oxymorons = [
"amateur expert",
"increasingly less",
"advancing backwards?",
"alludes explicitly to",
"explicitly alludes to",
"totally obsolescent",
"completely obsolescent",
"generally always",
"usually always",
"increasingly less",
"build down",
"conspicuous absence",
"exact estimate",
"found missing",
"intense apathy",
"mandatory choice",
"nonworking mother",
"organized mess",
# "pretty ugly",
# "sure bet",
# "executive secretary",
]
return existence_check(text, oxymorons, err, msg, offset=1, join=True) | def check(text) | Check the text. | 15.266353 | 15.292411 | 0.998296 |
err = "misc.waxed"
msg = u"The modifier following 'waxed' must be an adj.: '{}' is correct"
waxes = ["wax", "waxes", "waxed", "waxing"]
modifiers = [("ebullient", "ebulliently"),
("ecstatic", "ecstatically"),
("eloquent", "eloquently"),
("enthusiastic", "enthusiastically"),
("euphoric", "euphorically"),
("indignant", "indignantly"),
("lyrical", "lyrically"),
("melancholic", "melancholically"),
("metaphorical", "metaphorically"),
("nostalgic", "nostalgically"),
("patriotic", "patriotically"),
("philosophical", "philosophically"),
("poetic", "poetically"),
("rhapsodic", "rhapsodically"),
("romantic", "romantically"),
("sentimental", "sentimentally")
]
def pairs(word):
return [[word + ' ' + pair[0], [word + ' ' + pair[1]]]
for pair in modifiers]
preferred = []
for word in waxes:
preferred += pairs(word)
return preferred_forms_check(text, preferred, err, msg) | def check(text) | Suggest the preferred forms. | 3.761364 | 3.590971 | 1.04745 |
err = "lexical_illusions.misc"
msg = u"There's a lexical illusion here: a word is repeated."
list = [
"the\sthe",
"am\sam",
"has\shas"
]
return existence_check(text, list, err, msg) | def check(text) | Check the text. | 14.467398 | 15.041242 | 0.961849 |
err = "security.password"
msg = u"Don't put passwords in plain text."
pwd_regex = "[:]? [\S]{6,30}"
password = [
"the password is{}".format(pwd_regex),
"my password is{}".format(pwd_regex),
"the password's{}".format(pwd_regex),
"my password's{}".format(pwd_regex),
"^[pP]assword{}".format(pwd_regex),
]
return existence_check(text, password, err, msg) | def check(text) | Check the text. | 7.086167 | 7.081383 | 1.000676 |
err = "typography.symbols.ellipsis"
msg = u"'...' is an approximation, use the ellipsis symbol '…'."
regex = "\.\.\."
return existence_check(text, [regex], err, msg, max_errors=3,
require_padding=False, offset=0) | def check_ellipsis(text) | Use an ellipsis instead of three dots. | 15.979132 | 14.775466 | 1.081464 |
err = "typography.symbols.copyright"
msg = u"(c) is a goofy alphabetic approximation, use the symbol ©."
regex = "\([cC]\)"
return existence_check(
text, [regex], err, msg, max_errors=1, require_padding=False) | def check_copyright_symbol(text) | Use the copyright symbol instead of (c). | 22.502985 | 18.839739 | 1.194443 |
err = "typography.symbols.trademark"
msg = u"(TM) is a goofy alphabetic approximation, use the symbol ™."
regex = "\(TM\)"
return existence_check(
text, [regex], err, msg, max_errors=3, require_padding=False) | def check_trademark_symbol(text) | Use the trademark symbol instead of (TM). | 24.333681 | 19.14217 | 1.271208 |
err = "typography.symbols.trademark"
msg = u"(R) is a goofy alphabetic approximation, use the symbol ®."
regex = "\([rR]\)"
return existence_check(
text, [regex], err, msg, max_errors=3, require_padding=False) | def check_registered_trademark_symbol(text) | Use the registered trademark symbol instead of (R). | 28.313541 | 20.226099 | 1.399852 |
err = "typography.symbols.sentence_spacing"
msg = u"More than two spaces after the period; use 1 or 2."
regex = "\. {3}"
return existence_check(
text, [regex], err, msg, max_errors=3, require_padding=False) | def check_sentence_spacing(text) | Use no more than two spaces after a period. | 14.230841 | 10.966174 | 1.297703 |
u
err = "typography.symbols.multiplication_symbol"
msg = u"Use the multiplication symbol ×, not the letter x."
regex = "[0-9]+ ?x ?[0-9]+"
return existence_check(
text, [regex], err, msg, max_errors=3, require_padding=False) | def check_multiplication_symbol(text) | u"""Use the multiplication symbol ×, not the lowercase letter x. | 13.698888 | 9.056939 | 1.51253 |
u
err = "typography.symbols.curly_quotes"
msg = u'Use curly quotes “”, not straight quotes "".'
list = [
[u"“ or ”", ['"']],
]
return preferred_forms_check(
text, list, err, msg, ignore_case=False, max_errors=2) | def check_curly_quotes(text) | u"""Use curly quotes, not straight quotes. | 14.867027 | 11.745075 | 1.265809 |
err = "strunk_white.greylist"
msg = "Use of '{}'. {}"
bad_words = [
"obviously",
"utilize"
]
explanations = {
"obviously":
"This is obviously an inadvisable word to use.",
"utilize":
r"Do you know anyone who *needs* to utilize the word utilize?"
}
errors = []
for word in bad_words:
occ = [m for m in re.finditer(word, text.lower())]
for o in occ:
errors.append((
o.start(),
o.end(),
err,
msg.format(word, explanations[word]),
None))
return errors | def check(text) | Check the text. | 7.43518 | 7.468659 | 0.995517 |
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(user, password)
g = gmail.login(user, password)
# Check for unread messages.
unread = g.inbox().mail(unread=True)
# Submit a job to lint each email sent to [email protected]. Record the
# resulting job_ids somewhere (in Redis, I suppose), keyed by a hash of the
# email.
for u in unread:
u.fetch()
signature = (u.fr.decode('utf-8') +
u.subject.decode('utf-8') +
u.body.decode('utf-8'))
hash = hashlib.sha256(signature.encode('utf-8')).hexdigest()
if user_to in u.to or user_to in u.headers.get('Cc', []):
job_id = conn.get(hash)
if not job_id:
# If the email hasn't been sent for processing, send it.
r = requests.post(api_url, data={"text": u.body})
conn.set(hash, r.json()["job_id"])
print("Email {} sent for processing.".format(hash))
else:
# Otherwise, check whether the results are ready, and if so,
# reply with them.
r = requests.get(api_url, params={"job_id": job_id})
if r.json()["status"] == "success":
reply = quoted(u.body)
errors = r.json()['data']['errors']
reply += "\r\n\r\n".join([json.dumps(e) for e in errors])
msg = MIMEMultipart()
msg["From"] = "{} <{}>".format(name, user)
msg["To"] = u.fr
msg["Subject"] = "Re: " + u.subject
if u.headers.get('Message-ID'):
msg.add_header("In-Reply-To", u.headers['Message-ID'])
msg.add_header("References", u.headers['Message-ID'])
body = reply + "\r\n\r\n--\r\n" + tagline + "\r\n" + url
msg.attach(MIMEText(body, "plain"))
text = msg.as_string()
server.sendmail(user, u.fr, text)
# Mark the email as read.
u.read()
u.archive()
print("Email {} has been replied to.".format(hash)) | def check_email() | Check the mail account and lint new mail. | 3.278801 | 3.252472 | 1.008095 |
err = "spelling.athletes"
msg = "Misspelling of athlete's name. '{}' is the preferred form."
misspellings = [
["Dwyane Wade", ["Dwayne Wade"]],
["Miikka Kiprusoff", ["Mikka Kiprusoff"]],
["Mark Buehrle", ["Mark Buerhle"]],
["Skylar Diggins", ["Skyler Diggins"]],
["Agnieszka Radwanska", ["Agnieska Radwanska"]],
["J.J. Redick", ["J.J. Reddick"]],
["Manny Pacquiao", ["Manny Packquaio"]],
["Antawn Jamison", ["Antwan Jamison"]],
["Cal Ripken", ["Cal Ripkin"]],
["Jhonny Peralta", ["Johnny Peralta"]],
["Monta Ellis", ["Monte Ellis"]],
["Alex Rodriguez", ["Alex Rodriquez"]],
["Mark Teixeira", ["Mark Texeira"]],
["Brett Favre", ["Brett Farve"]],
["Torii Hunter", ["Tori Hunter"]],
["Stephen Curry", ["Stephon Curry"]],
["Mike Krzyzewski", ["Mike Kryzewski"]],
]
return preferred_forms_check(text, misspellings, err, msg) | def check(text) | Suggest the preferred forms. | 8.690406 | 8.310693 | 1.04569 |
err = "leonard.hell"
msg = u"Never use the words 'all hell broke loose'."
regex = r"all hell broke loose"
return existence_check(
text, [regex], err, msg, max_errors=1) | def check_repeated_exclamations(text) | Check the text. | 22.855257 | 23.024647 | 0.992643 |
err = "cliches.garner"
msg = u"'{}' is cliché."
cliches = [
"a fate worse than death",
"alas and alack",
"at the end of the day",
"bald-faced lie",
"between a rock and a hard place",
"between Scylla and Charybdis",
"between the devil and the deep blue sea",
"betwixt and between",
"blissful ignorance",
"blow a fuse",
"bulk large",
"but that's another story",
"cast aspersions",
"chase a red herring",
"comparing apples and oranges",
"compleat",
"conspicuous by its absence",
"crystal clear",
"cutting edge",
"decision-making process",
"dubious distinction",
"duly authorized",
"eyes peeled",
"far be it from me",
"fast and loose",
"fills the bill",
"first and foremost",
"for free",
"get with the program",
"gilding the lily",
"have a short fuse",
"he's got his hands full",
"his own worst enemy",
"his work cut out for him",
"hither and yon",
"Hobson's choice",
"horns of a dilemma",
"if you catch my drift",
"in light of",
"in the final analysis",
"in the last analysis",
"innocent bystander",
"it's not what you know, it's who you know",
"last but not least",
"make a mockery of",
"male chauvinism",
"moment of truth",
"more in sorrow than in anger",
"more sinned against than sinning",
"my better half",
"nip in the bud",
"olden days",
"on the same page",
"presidential timber",
"pulled no punches",
"quantum jump",
"quantum leap",
"redound to one's credit",
"redound to the benefit of",
"sea change",
"shirked his duties",
"six of one, half a dozen of the other",
"stretched to the breaking point",
"than you can shake a stick at",
"the cream of the crop",
"the cream rises to the top",
"the straw that broke the camel's back",
"thick as thieves",
"thinking outside the box",
"thought leaders?",
"throw the baby out with the bathwater",
"various and sundry",
"viable alternative",
"wax eloquent",
"wax poetic",
"we've got a situation here",
"whet (?:the|your) appetite",
"wool pulled over our eyes",
"writ large",
]
return existence_check(text, cliches, err, msg, join=True) | def check_cliches_garner(text) | Check the text.
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY | 12.427691 | 12.565315 | 0.989047 |
err = "cliches.gnu_diction"
msg = u"'{}' is a cliché."
list = [
"a matter of concern",
"all things being equal",
"as a last resort",
"attached hereto",
"by no means",
"conspicuous by its absence",
"easier said than done",
"enclosed herewith",
"if and when",
"in reference to",
"in short supply",
"in the foreseeable future",
"in the long run",
"in the matter of",
"it stands to reason",
"many and diverse",
"on the right track",
"par for the course",
"please feel free to",
"pursuant to your request",
"regarding the matter of",
"slowly but surely",
"this will acknowledge",
"we are pleased to advice",
"we regret to inform you",
"we wish to state",
"you are hereby advised that",
]
return existence_check(text, list, err, msg, join=True, ignore_case=True) | def check_cliches_gnu_diction(text) | Check the text.
source: GNU diction
source_url: https://directory.fsf.org/wiki/Diction | 9.317133 | 9.650129 | 0.965493 |
err = "airlinese.misc"
msg = u"'{}' is airlinese."
airlinese = [
"enplan(?:e|ed|ing|ement)",
"deplan(?:e|ed|ing|ement)",
"taking off momentarily",
]
return existence_check(text, airlinese, err, msg) | def check(text) | Check the text. | 16.3535 | 16.553671 | 0.987908 |
err = "misc.currency"
msg = u"Incorrect use of symbols in {}."
symbols = [
"\$[\d]* ?(?:dollars|usd|us dollars)"
]
return existence_check(text, symbols, err, msg) | def check(text) | Check the text. | 21.980408 | 23.396612 | 0.93947 |
err = "mixed_metaphors.misc.bottleneck"
msg = u"Mixed metaphor — bottles with big necks are easy to pass through."
list = [
"biggest bottleneck",
"big bottleneck",
"large bottleneck",
"largest bottleneck",
"world-wide bottleneck",
"huge bottleneck",
"massive bottleneck",
]
return existence_check(text, list, err, msg, max_errors=1) | def check_bottleneck(text) | Avoid mixing metaphors about bottles and their necks.
source: Sir Ernest Gowers
source_url: http://bit.ly/1CQPH61 | 10.257853 | 8.547105 | 1.200155 |
err = "mixed_metaphors.misc.misc"
msg = u"Mixed metaphor. Try '{}'."
preferences = [
["cream rises to the top", ["cream rises to the crop"]],
["fasten your seatbelts", ["button your seatbelts"]],
["a minute to decompress", ["a minute to decompose"]],
["sharpest tool in the shed", ["sharpest marble in the (shed|box)"]],
["not rocket science", ["not rocket surgery"]],
]
return preferred_forms_check(text, preferences, err, msg) | def check_misc(text) | Avoid mixing metaphors.
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY | 17.718884 | 16.154474 | 1.096841 |
if self.repo.is_dirty():
raise DirtyGitRepositoryError(self.repo.untracked_files)
revisions = []
for commit in self.repo.iter_commits(
self.current_branch, max_count=max_revisions
):
rev = Revision(
key=commit.name_rev.split(" ")[0],
author_name=commit.author.name,
author_email=commit.author.email,
date=commit.committed_date,
message=commit.message,
)
revisions.append(rev)
return revisions | def revisions(self, path, max_revisions) | Get the list of revisions.
:param path: the path to target.
:type path: ``str``
:param max_revisions: the maximum number of revisions.
:type max_revisions: ``int``
:return: A list of revisions.
:rtype: ``list`` of :class:`Revision` | 2.583688 | 2.645851 | 0.976505 |
rev = revision.key
self.repo.git.checkout(rev) | def checkout(self, revision, options) | Checkout a specific revision.
:param revision: The revision identifier.
:type revision: :class:`Revision`
:param options: Any additional options.
:type options: ``dict`` | 11.129381 | 25.272753 | 0.440371 |
self.repo.git.checkout(self.current_branch)
self.repo.close() | def finish(self) | Clean up any state if processing completed/failed.
For git, will checkout HEAD on the original branch when finishing | 7.516054 | 5.109303 | 1.471053 |
logger.debug(f"Generating cache for {path}")
sha = hashlib.sha1(str(path).encode()).hexdigest()[:9]
HOME = pathlib.Path.home()
cache_path = str(HOME / ".wily" / sha)
logger.debug(f"Cache path is {cache_path}")
return cache_path | def generate_cache_path(path) | Generate a reusable path to cache results.
Will use the --path of the target and hash into
a 9-character directory within the HOME folder.
:return: The cache path
:rtype: ``str`` | 3.559858 | 3.373893 | 1.055119 |
if not pathlib.Path(config_path).exists():
logger.debug(f"Could not locate {config_path}, using default config.")
return DEFAULT_CONFIG
config = configparser.ConfigParser(default_section=DEFAULT_CONFIG_SECTION)
config.read(config_path)
operators = config.get(
section=DEFAULT_CONFIG_SECTION, option="operators", fallback=DEFAULT_OPERATORS
)
archiver = config.get(
section=DEFAULT_CONFIG_SECTION, option="archiver", fallback=DEFAULT_ARCHIVER
)
path = config.get(section=DEFAULT_CONFIG_SECTION, option="path", fallback=".")
max_revisions = int(
config.get(
section=DEFAULT_CONFIG_SECTION,
option="max_revisions",
fallback=DEFAULT_MAX_REVISIONS,
)
)
return WilyConfig(
operators=operators, archiver=archiver, path=path, max_revisions=max_revisions
) | def load(config_path=DEFAULT_CONFIG_PATH) | Load config file and set values to defaults where no present.
:return: The configuration ``WilyConfig``
:rtype: :class:`wily.config.WilyConfig` | 2.166518 | 2.154255 | 1.005693 |
config.targets = files
files = list(files)
state = State(config)
last_revision = state.index[state.default_archiver].revisions[0]
# Convert the list of metrics to a list of metric instances
operators = {resolve_operator(metric.split(".")[0]) for metric in metrics}
metrics = [(metric.split(".")[0], resolve_metric(metric)) for metric in metrics]
data = {}
results = []
# Build a set of operators
_operators = [operator.cls(config) for operator in operators]
cwd = os.getcwd()
os.chdir(config.path)
for operator in _operators:
logger.debug(f"Running {operator.name} operator")
data[operator.name] = operator.run(None, config)
os.chdir(cwd)
# Write a summary table..
extra = []
for operator, metric in metrics:
if detail and resolve_operator(operator).level == OperatorLevel.Object:
for file in files:
try:
extra.extend(
[
f"{file}:{k}"
for k in data[operator][file].keys()
if k != metric.name
and isinstance(data[operator][file][k], dict)
]
)
except KeyError:
logger.debug(f"File {file} not in cache")
logger.debug("Cache follows -- ")
logger.debug(data[operator])
files.extend(extra)
logger.debug(files)
for file in files:
metrics_data = []
has_changes = False
for operator, metric in metrics:
try:
current = last_revision.get(
config, state.default_archiver, operator, file, metric.name
)
except KeyError as e:
current = "-"
try:
new = get_metric(data, operator, file, metric.name)
except KeyError as e:
new = "-"
if new != current:
has_changes = True
if metric.type in (int, float) and new != "-" and current != "-":
if current > new:
metrics_data.append(
"{0:n} -> \u001b[{2}m{1:n}\u001b[0m".format(
current, new, BAD_COLORS[metric.measure]
)
)
elif current < new:
metrics_data.append(
"{0:n} -> \u001b[{2}m{1:n}\u001b[0m".format(
current, new, GOOD_COLORS[metric.measure]
)
)
else:
metrics_data.append("{0:n} -> {1:n}".format(current, new))
else:
if current == "-" and new == "-":
metrics_data.append("-")
else:
metrics_data.append("{0} -> {1}".format(current, new))
if has_changes or not changes_only:
results.append((file, *metrics_data))
else:
logger.debug(metrics_data)
descriptions = [metric.description for operator, metric in metrics]
headers = ("File", *descriptions)
if len(results) > 0:
print(
# But it still makes more sense to show the newest at the top, so reverse again
tabulate.tabulate(
headers=headers, tabular_data=results, tablefmt=DEFAULT_GRID_STYLE
)
) | def diff(config, files, metrics, changes_only=True, detail=True) | Show the differences in metrics for each of the files.
:param config: The wily configuration
:type config: :namedtuple:`wily.config.WilyConfig`
:param files: The files to compare.
:type files: ``list`` of ``str``
:param metrics: The metrics to measure.
:type metrics: ``list`` of ``str``
:param changes_only: Only include changes files in output.
:type changes_only: ``bool``
:param detail: Show details (function-level)
:type detail: ``bool`` | 3.290435 | 3.338566 | 0.985584 |
ctx.ensure_object(dict)
ctx.obj["DEBUG"] = debug
if debug:
logger.setLevel("DEBUG")
else:
logger.setLevel("INFO")
ctx.obj["CONFIG"] = load_config(config)
if path:
logger.debug(f"Fixing path to {path}")
ctx.obj["CONFIG"].path = path
if cache:
logger.debug(f"Fixing cache to {cache}")
ctx.obj["CONFIG"].cache_path = cache
logger.debug(f"Loaded configuration from {config}") | def cli(ctx, debug, config, path, cache) | \U0001F98A Inspect and search through the complexity of your source code.
To get started, run setup:
$ wily setup
To reindex any changes in your source code:
$ wily build <src>
Then explore basic metrics with:
$ wily report <file>
You can also graph specific metrics in a browser with:
$ wily graph <file> <metric> | 2.182263 | 2.620979 | 0.832614 |
config = ctx.obj["CONFIG"]
from wily.commands.build import build
if max_revisions:
logger.debug(f"Fixing revisions to {max_revisions}")
config.max_revisions = max_revisions
if operators:
logger.debug(f"Fixing operators to {operators}")
config.operators = operators.strip().split(",")
if archiver:
logger.debug(f"Fixing archiver to {archiver}")
config.archiver = archiver
if targets:
logger.debug(f"Fixing targets to {targets}")
config.targets = targets
build(
config=config,
archiver=resolve_archiver(config.archiver),
operators=resolve_operators(config.operators),
)
logger.info(
"Completed building wily history, run `wily report <file>` or `wily index` to see more."
) | def build(ctx, max_revisions, targets, operators, archiver) | Build the wily cache. | 3.015074 | 2.787972 | 1.081458 |
config = ctx.obj["CONFIG"]
if not exists(config):
handle_no_cache(ctx)
from wily.commands.index import index
index(config=config, include_message=message) | def index(ctx, message) | Show the history archive in the .wily/ folder. | 9.646467 | 7.592724 | 1.270488 |
config = ctx.obj["CONFIG"]
if not exists(config):
handle_no_cache(ctx)
if not metrics:
metrics = get_default_metrics(config)
logger.info(f"Using default metrics {metrics}")
new_output = Path().cwd()
if output:
new_output = new_output / Path(output)
else:
new_output = new_output / "wily_report" / "index.html"
from wily.commands.report import report
logger.debug(f"Running report on {file} for metric {metrics}")
logger.debug(f"Output format is {format}")
report(
config=config,
path=file,
metrics=metrics,
n=number,
output=new_output,
include_message=message,
format=ReportFormat[format],
console_format=console_format,
) | def report(ctx, file, metrics, number, message, format, console_format, output) | Show metrics for a given file. | 3.662779 | 3.647619 | 1.004156 |
config = ctx.obj["CONFIG"]
if not exists(config):
handle_no_cache(ctx)
if not metrics:
metrics = get_default_metrics(config)
logger.info(f"Using default metrics {metrics}")
else:
metrics = metrics.split(",")
logger.info(f"Using specified metrics {metrics}")
from wily.commands.diff import diff
logger.debug(f"Running diff on {files} for metric {metrics}")
diff(
config=config, files=files, metrics=metrics, changes_only=not all, detail=detail
) | def diff(ctx, files, metrics, all, detail) | Show the differences in metrics for each file. | 3.700631 | 3.718691 | 0.995143 |
config = ctx.obj["CONFIG"]
if not exists(config):
handle_no_cache(ctx)
from wily.commands.graph import graph
logger.debug(f"Running report on {path} for metrics {metrics}")
graph(
config=config,
path=path,
metrics=metrics,
output=output,
x_axis=x_axis,
changes=changes,
) | def graph(ctx, path, metrics, output, x_axis, changes) | Graph a specific metric for a given file, if a path is given, all files within path will be graphed.
Some common examples:
Graph all .py files within src/ for the raw.loc metric
$ wily graph src/ raw.loc
Graph test.py against raw.loc and cyclomatic.complexity metrics
$ wily graph src/test.py raw.loc cyclomatic.complexity
Graph test.py against raw.loc and raw.sloc on the x-axis
$ wily graph src/test.py raw.loc --x-axis raw.sloc | 4.040491 | 4.747622 | 0.851056 |
config = ctx.obj["CONFIG"]
if not exists(config):
handle_no_cache(ctx)
if not yes:
p = input("Are you sure you want to delete wily cache? [y/N]")
if p.lower() != "y":
exit(0)
from wily.cache import clean
clean(config) | def clean(ctx, yes) | Clear the .wily/ folder. | 4.527804 | 4.001426 | 1.131548 |
config = ctx.obj["CONFIG"]
if not exists(config):
handle_no_cache(ctx)
from wily.commands.list_metrics import list_metrics
list_metrics() | def list_metrics(ctx) | List the available metrics. | 8.521544 | 8.10871 | 1.050912 |
logger.error(
f"Could not locate wily cache, the cache is required to provide insights."
)
p = input("Do you want to run setup and index your project now? [y/N]")
if p.lower() != "y":
exit(1)
else:
revisions = input("How many previous git revisions do you want to index? : ")
revisions = int(revisions)
path = input("Path to your source files; comma-separated for multiple: ")
paths = path.split(",")
context.invoke(
build,
max_revisions=revisions,
targets=paths,
operators=None,
) | def handle_no_cache(context) | Handle lack-of-cache error, prompt user for index process. | 8.133601 | 7.286697 | 1.116226 |
operator, met = resolve_metric_as_tuple(metric)
return operator.name, met.name | def metric_parts(metric) | Convert a metric name into the operator and metric names. | 16.188152 | 9.259213 | 1.748329 |
logger.debug("Running report command")
data = []
state = State(config)
abs_path = config.path / pathlib.Path(path)
if x_axis is None:
x_axis = "history"
else:
x_operator, x_key = metric_parts(x_axis)
if abs_path.is_dir():
paths = [
p.relative_to(config.path) for p in pathlib.Path(abs_path).glob("**/*.py")
]
else:
paths = [path]
operator, key = metric_parts(metrics[0])
if len(metrics) == 1: # only y-axis
z_axis = None
else:
z_axis = resolve_metric(metrics[1])
z_operator, z_key = metric_parts(metrics[1])
for path in paths:
x = []
y = []
z = []
labels = []
last_y = None
for rev in state.index[state.default_archiver].revisions:
labels.append(f"{rev.revision.author_name} <br>{rev.revision.message}")
try:
val = rev.get(config, state.default_archiver, operator, str(path), key)
if val != last_y or not changes:
y.append(val)
if z_axis:
z.append(
rev.get(
config,
state.default_archiver,
z_operator,
str(path),
z_key,
)
)
if x_axis == "history":
x.append(format_datetime(rev.revision.date))
else:
x.append(
rev.get(
config,
state.default_archiver,
x_operator,
str(path),
x_key,
)
)
last_y = val
except KeyError:
# missing data
pass
# Create traces
trace = go.Scatter(
x=x,
y=y,
mode="lines+markers+text" if text else "lines+markers",
name=f"{path}",
ids=state.index[state.default_archiver].revision_keys,
text=labels,
marker=dict(
size=0 if z_axis is None else z,
color=list(range(len(y))),
# colorscale='Viridis',
),
xcalendar="gregorian",
hoveron="points+fills",
)
data.append(trace)
if output:
filename = output
auto_open = False
else:
filename = "wily-report.html"
auto_open = True
y_metric = resolve_metric(metrics[0])
title = f"{x_axis.capitalize()} of {y_metric.description} for {path}"
plotly.offline.plot(
{
"data": data,
"layout": go.Layout(
title=title,
xaxis={"title": x_axis},
yaxis={"title": y_metric.description},
),
},
auto_open=auto_open,
filename=filename,
) | def graph(config, path, metrics, output=None, x_axis=None, changes=True, text=False) | Graph information about the cache and runtime.
:param config: The configuration.
:type config: :class:`wily.config.WilyConfig`
:param path: The path to the files.
:type path: ``list``
:param metrics: The Y and Z-axis metrics to report on.
:type metrics: ``tuple``
:param output: Save report to specified path instead of opening browser.
:type output: ``str`` | 2.780478 | 2.765265 | 1.005501 |
for name, operator in ALL_OPERATORS.items():
print(f"{name} operator:")
if len(operator.cls.metrics) > 0:
print(
tabulate.tabulate(
headers=("Name", "Description", "Type"),
tabular_data=operator.cls.metrics,
tablefmt=DEFAULT_GRID_STYLE,
)
) | def list_metrics() | List metrics available. | 4.403189 | 4.299711 | 1.024066 |
state = State(config=config)
logger.debug("Running show command")
logger.info("--------Configuration---------")
logger.info(f"Path: {config.path}")
logger.info(f"Archiver: {config.archiver}")
logger.info(f"Operators: {config.operators}")
logger.info("")
logger.info("-----------History------------")
data = []
for archiver in state.archivers:
for rev in state.index[archiver].revisions:
if include_message:
data.append(
(
format_revision(rev.revision.key),
rev.revision.author_name,
rev.revision.message[:MAX_MESSAGE_WIDTH],
format_date(rev.revision.date),
)
)
else:
data.append(
(
format_revision(rev.revision.key),
rev.revision.author_name,
format_date(rev.revision.date),
)
)
if include_message:
headers = ("Revision", "Author", "Message", "Date")
else:
headers = ("Revision", "Author", "Date")
print(
tabulate.tabulate(
headers=headers, tabular_data=data, tablefmt=DEFAULT_GRID_STYLE
)
) | def index(config, include_message=False) | Show information about the cache and runtime.
:param config: The wily configuration
:type config: :namedtuple:`wily.config.WilyConfig`
:param include_message: Include revision messages
:type include_message: ``bool`` | 2.750752 | 2.810338 | 0.978798 |
instance = operator.cls(config)
logger.debug(f"Running {operator.name} operator on {revision.key}")
return operator.name, instance.run(revision, config) | def run_operator(operator, revision, config) | Run an operator for the multiprocessing pool. Not called directly. | 5.95716 | 5.577712 | 1.068029 |
try:
logger.debug(f"Using {archiver.name} archiver module")
archiver = archiver.cls(config)
revisions = archiver.revisions(config.path, config.max_revisions)
except InvalidGitRepositoryError:
# TODO: This logic shouldn't really be here (SoC)
logger.info(f"Defaulting back to the filesystem archiver, not a valid git repo")
archiver = FilesystemArchiver(config)
revisions = archiver.revisions(config.path, config.max_revisions)
except Exception as e:
if hasattr(e, "message"):
logger.error(f"Failed to setup archiver: '{e.message}'")
else:
logger.error(f"Failed to setup archiver: '{type(e)} - {e}'")
exit(1)
state = State(config, archiver=archiver)
# Check for existence of cache, else provision
state.ensure_exists()
index = state.index[archiver.name]
# remove existing revisions from the list
revisions = [revision for revision in revisions if revision not in index]
logger.info(
f"Found {len(revisions)} revisions from '{archiver.name}' archiver in '{config.path}'."
)
_op_desc = ",".join([operator.name for operator in operators])
logger.info(f"Running operators - {_op_desc}")
bar = Bar("Processing", max=len(revisions) * len(operators))
state.operators = operators
try:
with multiprocessing.Pool(processes=len(operators)) as pool:
for revision in revisions:
# Checkout target revision
archiver.checkout(revision, config.checkout_options)
stats = {"operator_data": {}}
# Run each operator as a seperate process
data = pool.starmap(
run_operator,
[(operator, revision, config) for operator in operators],
)
# Map the data back into a dictionary
for operator_name, result in data:
# aggregate values to directories
roots = []
# find all unique directories in the results
for entry in result.keys():
parent = pathlib.Path(entry).parents[0]
if parent not in roots:
roots.append(parent)
for root in roots:
# find all matching entries recursively
aggregates = [
path
for path in result.keys()
if root in pathlib.Path(path).parents
]
result[str(root)] = {}
# aggregate values
for metric in resolve_operator(operator_name).cls.metrics:
func = metric.aggregate
values = [
result[aggregate][metric.name]
for aggregate in aggregates
if aggregate in result
and metric.name in result[aggregate]
]
if len(values) > 0:
result[str(root)][metric.name] = func(values)
stats["operator_data"][operator_name] = result
bar.next()
ir = index.add(revision, operators=operators)
ir.store(config, archiver, stats)
index.save()
bar.finish()
except Exception as e:
logger.error(f"Failed to build cache: '{e}'")
raise e
finally:
# Reset the archive after every run back to the head of the branch
archiver.finish() | def build(config, archiver, operators) | Build the history given a archiver and collection of operators.
:param config: The wily configuration
:type config: :namedtuple:`wily.config.WilyConfig`
:param archiver: The archiver to use
:type archiver: :namedtuple:`wily.archivers.Archiver`
:param operators: The list of operators to execute
:type operators: `list` of :namedtuple:`wily.operators.Operator` | 3.937153 | 3.913995 | 1.005917 |
ir = IndexedRevision(
revision=revision, operators=[operator.name for operator in operators]
)
self._revisions[revision.key] = ir
return ir | def add(self, revision, operators) | Add a revision to the index.
:param revision: The revision.
:type revision: :class:`Revision` or :class:`LazyRevision` | 7.871447 | 11.073098 | 0.710862 |
data = [i.asdict() for i in self._revisions.values()]
logger.debug("Saving data")
cache.store_archiver_index(self.config, self.archiver, data) | def save(self) | Save the index data back to the wily cache. | 11.062715 | 8.084599 | 1.368369 |
if not cache.exists(self.config):
logger.debug("Wily cache not found, creating.")
cache.create(self.config)
logger.debug("Created wily cache")
else:
logger.debug(f"Cache {self.config.cache_path} exists") | def ensure_exists(self) | Ensure that cache directory exists. | 4.918933 | 4.469819 | 1.100477 |
mtime = os.path.getmtime(path)
key = hashlib.sha1(str(mtime).encode()).hexdigest()[:7]
return [
Revision(
key=key,
author_name="Local User", # Don't want to leak local data
author_email="-", # as above
date=int(mtime),
message="None",
)
] | def revisions(self, path, max_revisions) | Get the list of revisions.
:param path: the path to target.
:type path: ``str``
:param max_revisions: the maximum number of revisions.
:type max_revisions: ``int``
:return: A list of revisions.
:rtype: ``list`` of :class:`Revision` | 6.315225 | 7.034514 | 0.897749 |
doc = f.__doc__
f.__doc__ = "Version: " + __version__ + "\n\n" + doc
return f | def add_version(f) | Add the version of wily to the help heading.
:param f: function to decorate
:return: decorated function | 3.849117 | 4.331033 | 0.88873 |
if name.lower() in ALL_OPERATORS:
return ALL_OPERATORS[name.lower()]
else:
raise ValueError(f"Operator {name} not recognised.") | def resolve_operator(name) | Get the :namedtuple:`wily.operators.Operator` for a given name.
:param name: The name of the operator
:return: The operator type | 2.992232 | 3.796333 | 0.78819 |
if "." in metric:
_, metric = metric.split(".")
r = [
(operator, match) for operator, match in ALL_METRICS if match[0] == metric
]
if not r or len(r) == 0:
raise ValueError(f"Metric {metric} not recognised.")
else:
return r[0] | def resolve_metric_as_tuple(metric) | Resolve metric key to a given target.
:param metric: the metric name.
:type metric: ``str``
:rtype: :class:`Metric` | 4.246198 | 5.52992 | 0.767859 |
if ":" in path:
part, entry = path.split(":")
val = revision[operator][part][entry][key]
else:
val = revision[operator][path][key]
return val | def get_metric(revision, operator, path, key) | Get a metric from the cache.
:param revision: The revision id.
:type revision: ``str``
:param operator: The operator name.
:type operator: ``str``
:param path: The path to the file/function
:type path: ``str``
:param key: The key of the data
:type key: ``str``
:return: Data from the cache
:rtype: ``dict`` | 4.055954 | 5.51451 | 0.735506 |
logger.debug("Running halstead harvester")
results = {}
for filename, details in dict(self.harvester.results).items():
results[filename] = {}
for instance in details:
if isinstance(instance, list):
for item in instance:
function, report = item
results[filename][function] = self._report_to_dict(report)
else:
if isinstance(instance, str) and instance == "error":
logger.warning(
f"Failed to run Halstead harvester on {filename} : {details['error']}"
)
continue
results[filename] = self._report_to_dict(instance)
return results | def run(self, module, options) | Run the operator.
:param module: The target module path.
:type module: ``str``
:param options: Any runtime options.
:type options: ``dict``
:return: The operator results.
:rtype: ``dict`` | 4.023992 | 4.162018 | 0.966837 |
exists = (
pathlib.Path(config.cache_path).exists()
and pathlib.Path(config.cache_path).is_dir()
)
if not exists:
return False
index_path = pathlib.Path(config.cache_path) / "index.json"
if index_path.exists():
with open(index_path, "r") as out:
index = json.load(out)
if index["version"] != __version__:
# TODO: Inspect the versions properly.
logger.warning(
"Wily cache is old, you may incur errors until you rebuild the cache."
)
else:
logger.warning(
"Wily cache was not versioned, you may incur errors until you rebuild the cache."
)
create_index(config)
return True | def exists(config) | Check whether the .wily/ directory exists.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig`
:return: Whether the .wily directory exists
:rtype: ``boolean`` | 3.456569 | 3.600562 | 0.960008 |
filename = pathlib.Path(config.cache_path) / "index.json"
index = {"version": __version__}
with open(filename, "w") as out:
out.write(json.dumps(index, indent=2)) | def create_index(config) | Create the root index. | 3.408276 | 3.168769 | 1.075584 |
if exists(config):
logger.debug("Wily cache exists, skipping")
return config.cache_path
logger.debug(f"Creating wily cache {config.cache_path}")
pathlib.Path(config.cache_path).mkdir(parents=True, exist_ok=True)
create_index(config)
return config.cache_path | def create(config) | Create a wily cache.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig`
:return: The path to the cache
:rtype: ``str`` | 3.733763 | 2.976923 | 1.254236 |
if not exists(config):
logger.debug("Wily cache does not exist, skipping")
return
shutil.rmtree(config.cache_path)
logger.debug("Deleted wily cache") | def clean(config) | Delete a wily cache.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig` | 6.457431 | 4.756239 | 1.357676 |
root = pathlib.Path(config.cache_path) / archiver.name
if not root.exists():
logger.debug("Creating wily cache")
root.mkdir()
# fix absolute path references.
if config.path != ".":
for operator, operator_data in list(stats["operator_data"].items()):
if operator_data:
new_operator_data = operator_data.copy()
for k, v in list(operator_data.items()):
new_key = os.path.relpath(str(k), str(config.path))
del new_operator_data[k]
new_operator_data[new_key] = v
del stats["operator_data"][operator]
stats["operator_data"][operator] = new_operator_data
logger.debug(f"Creating {revision.key} output")
filename = root / (revision.key + ".json")
if filename.exists():
raise RuntimeError(f"File {filename} already exists, index may be corrupt.")
with open(filename, "w") as out:
out.write(json.dumps(stats, indent=2))
return filename | def store(config, archiver, revision, stats) | Store a revision record within an archiver folder.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig`
:param archiver: The name of the archiver type (e.g. 'git')
:type archiver: ``str``
:param revision: The revision ID
:type revision: ``str``
:param stats: The collected data
:type stats: ``dict``
:return: The absolute path to the created file
:rtype: ``str``
:rtype: `pathlib.Path` | 3.177238 | 3.116194 | 1.019589 |
root = pathlib.Path(config.cache_path) / archiver.name
if not root.exists():
root.mkdir()
logger.debug("Created archiver directory")
index = sorted(index, key=lambda k: k["date"], reverse=True)
filename = root / "index.json"
with open(filename, "w") as out:
out.write(json.dumps(index, indent=2))
logger.debug(f"Created index output")
return filename | def store_archiver_index(config, archiver, index) | Store an archiver's index record for faster search.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig`
:param archiver: The name of the archiver type (e.g. 'git')
:type archiver: ``str``
:param index: The archiver index record
:type index: ``dict``
:rtype: `pathlib.Path` | 3.21403 | 3.426754 | 0.937922 |
root = pathlib.Path(config.cache_path)
result = []
for name in ALL_ARCHIVERS.keys():
if (root / name).exists():
result.append(name)
return result | def list_archivers(config) | List the names of archivers with data.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig`
:return: A list of archiver names
:rtype: ``list`` of ``str`` | 4.184584 | 5.314587 | 0.787377 |
archivers = list_archivers(config)
default_metrics = []
for archiver in archivers:
index = get_archiver_index(config, archiver)
if len(index) == 0:
logger.warning("No records found in the index, no metrics available")
return []
operators = index[0]["operators"]
for operator in operators:
o = resolve_operator(operator)
if o.cls.default_metric_index is not None:
metric = o.cls.metrics[o.cls.default_metric_index]
default_metrics.append("{0}.{1}".format(o.cls.name, metric.name))
return default_metrics | def get_default_metrics(config) | Get the default metrics for a configuration.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig`
:return: Return the list of default metrics in this index
:rtype: ``list`` of ``str`` | 3.70078 | 3.456563 | 1.070653 |
root = pathlib.Path(config.cache_path) / archiver / "index.json"
return root.exists() | def has_archiver_index(config, archiver) | Check if this archiver has an index file.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig`
:param archiver: The name of the archiver type (e.g. 'git')
:type archiver: ``str``
:return: the exist
:rtype: ``bool`` | 5.647163 | 7.398861 | 0.763248 |
root = pathlib.Path(config.cache_path) / archiver
with (root / "index.json").open("r") as index_f:
index = json.load(index_f)
return index | def get_archiver_index(config, archiver) | Get the contents of the archiver index file.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig`
:param archiver: The name of the archiver type (e.g. 'git')
:type archiver: ``str``
:return: The index data
:rtype: ``dict`` | 3.486579 | 4.152 | 0.839735 |
root = pathlib.Path(config.cache_path) / archiver
# TODO : string escaping!!!
with (root / f"{revision}.json").open("r") as rev_f:
index = json.load(rev_f)
return index | def get(config, archiver, revision) | Get the data for a given revision.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig`
:param archiver: The name of the archiver type (e.g. 'git')
:type archiver: ``str``
:param revision: The revision ID
:type revision: ``str``
:return: The data record for that revision
:rtype: ``dict`` | 6.839008 | 8.101909 | 0.844123 |
c = Counter(data)
mode, freq = c.most_common(1)[0]
return mode | def mode(data) | Return the modal value of a iterable with discrete values.
If there is more than 1 modal value, arbritrarily return the first top n. | 3.956185 | 4.654023 | 0.850057 |
logger.debug("Running maintainability harvester")
return dict(self.harvester.results) | def run(self, module, options) | Run the operator.
:param module: The target module path.
:type module: ``str``
:param options: Any runtime options.
:type options: ``dict``
:return: The operator results.
:rtype: ``dict`` | 26.029112 | 32.493343 | 0.80106 |
logger.debug("Running CC harvester")
results = {}
for filename, details in dict(self.harvester.results).items():
results[filename] = {}
total = 0 # running CC total
for instance in details:
if isinstance(instance, Class):
i = self._dict_from_class(instance)
elif isinstance(instance, Function):
i = self._dict_from_function(instance)
else:
if isinstance(instance, str) and instance == "error":
logger.warning(
f"Failed to run CC harvester on {filename} : {details['error']}"
)
continue
else:
logger.warning(
f"Unexpected result from Radon : {instance} of {type(instance)}. Please report on Github."
)
continue
results[filename][i["fullname"]] = i
del i["fullname"]
total += i["complexity"]
results[filename]["complexity"] = total
return results | def run(self, module, options) | Run the operator.
:param module: The target module path.
:type module: ``str``
:param options: Any runtime options.
:type options: ``dict``
:return: The operator results.
:rtype: ``dict`` | 4.52482 | 4.66524 | 0.969901 |
'''(INTERNAL) Split a line into a list of values'''
if not _RE_NONTRIVIAL_DATA.search(s):
# Fast path for trivial cases (unfortunately we have to handle missing
# values because of the empty string case :(.)
return [None if s in ('?', '') else s
for s in next(csv.reader([s]))]
# _RE_DENSE_VALUES tokenizes despite quoting, whitespace, etc.
values, errors = zip(*_RE_DENSE_VALUES.findall(',' + s))
if not any(errors):
return [_unquote(v) for v in values]
if _RE_SPARSE_LINE.match(s):
try:
return {int(k): _unquote(v)
for k, v in _RE_SPARSE_KEY_VALUES.findall(s)}
except ValueError as exc:
# an ARFF syntax error in sparse data
for match in _RE_SPARSE_KEY_VALUES.finditer(s):
if not match.group(1):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
else:
# an ARFF syntax error
for match in _RE_DENSE_VALUES.finditer(s):
if match.group(2):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error') | def _parse_values(s) | (INTERNAL) Split a line into a list of values | 5.091122 | 4.771013 | 1.067094 |
'''Load a file-like object containing the ARFF document and convert it into
a Python object.
:param fp: a file-like object.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
:return: a dictionary.
'''
decoder = ArffDecoder()
return decoder.decode(fp, encode_nominal=encode_nominal,
return_type=return_type) | def load(fp, encode_nominal=False, return_type=DENSE) | Load a file-like object containing the ARFF document and convert it into
a Python object.
:param fp: a file-like object.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
:return: a dictionary. | 5.690259 | 1.501737 | 3.789117 |
'''Convert a string instance containing the ARFF document into a Python
object.
:param s: a string object.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
:return: a dictionary.
'''
decoder = ArffDecoder()
return decoder.decode(s, encode_nominal=encode_nominal,
return_type=return_type) | def loads(s, encode_nominal=False, return_type=DENSE) | Convert a string instance containing the ARFF document into a Python
object.
:param s: a string object.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
:return: a dictionary. | 6.123771 | 1.434709 | 4.2683 |
'''Serialize an object representing the ARFF document to a given file-like
object.
:param obj: a dictionary.
:param fp: a file-like object.
'''
encoder = ArffEncoder()
generator = encoder.iter_encode(obj)
last_row = next(generator)
for row in generator:
fp.write(last_row + u'\n')
last_row = row
fp.write(last_row)
return fp | def dump(obj, fp) | Serialize an object representing the ARFF document to a given file-like
object.
:param obj: a dictionary.
:param fp: a file-like object. | 4.691309 | 2.892247 | 1.622029 |
'''(INTERNAL) Encodes a line of data.
Data instances follow the csv format, i.e, attribute values are
delimited by commas. After converted from csv.
:param data: a list of values.
:param attributes: a list of attributes. Used to check if data is valid.
:return: a string with the encoded data line.
'''
current_row = 0
for inst in data:
if len(inst) != len(attributes):
raise BadObject(
'Instance %d has %d attributes, expected %d' %
(current_row, len(inst), len(attributes))
)
new_data = []
for value in inst:
if value is None or value == u'' or value != value:
s = '?'
else:
s = encode_string(unicode(value))
new_data.append(s)
current_row += 1
yield u','.join(new_data) | def encode_data(self, data, attributes) | (INTERNAL) Encodes a line of data.
Data instances follow the csv format, i.e, attribute values are
delimited by commas. After converted from csv.
:param data: a list of values.
:param attributes: a list of attributes. Used to check if data is valid.
:return: a string with the encoded data line. | 5.013484 | 2.404791 | 2.08479 |
'''(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string. The string must
start with alphabetic character and must be quoted if the name includes
spaces, otherwise this method will raise a `BadRelationFormat` exception.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded relation name.
'''
_, v = s.split(' ', 1)
v = v.strip()
if not _RE_RELATION.match(v):
raise BadRelationFormat()
res = unicode(v.strip('"\''))
return res | def _decode_relation(self, s) | (INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string. The string must
start with alphabetic character and must be quoted if the name includes
spaces, otherwise this method will raise a `BadRelationFormat` exception.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded relation name. | 6.340421 | 1.788747 | 3.544615 |
'''(INTERNAL) Decodes an attribute line.
The attribute is the most complex declaration in an arff file. All
attributes must follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, quoted if the name contains any
whitespace, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
The nominal names follow the rules for the attribute names, i.e., they
must be quoted if the name contains whitespaces.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES).
'''
_, v = s.split(' ', 1)
v = v.strip()
# Verify the general structure of declaration
m = _RE_ATTRIBUTE.match(v)
if not m:
raise BadAttributeFormat()
# Extracts the raw name and type
name, type_ = m.groups()
# Extracts the final name
name = unicode(name.strip('"\''))
# Extracts the final type
if _RE_TYPE_NOMINAL.match(type_):
try:
type_ = _parse_values(type_.strip('{} '))
except Exception:
raise BadAttributeType()
if isinstance(type_, dict):
raise BadAttributeType()
else:
# If not nominal, verify the type name
type_ = unicode(type_).upper()
if type_ not in ['NUMERIC', 'REAL', 'INTEGER', 'STRING']:
raise BadAttributeType()
return (name, type_) | def _decode_attribute(self, s) | (INTERNAL) Decodes an attribute line.
The attribute is the most complex declaration in an arff file. All
attributes must follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, quoted if the name contains any
whitespace, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
The nominal names follow the rules for the attribute names, i.e., they
must be quoted if the name contains whitespaces.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES). | 5.174249 | 1.97145 | 2.62459 |
'''Do the job the ``encode``.'''
# Make sure this method is idempotent
self._current_line = 0
# If string, convert to a list of lines
if isinstance(s, basestring):
s = s.strip('\r\n ').replace('\r\n', '\n').split('\n')
# Create the return object
obj = {
u'description': u'',
u'relation': u'',
u'attributes': [],
u'data': []
}
attribute_names = {}
# Create the data helper object
data = _get_data_object_for_decoding(matrix_type)
# Read all lines
STATE = _TK_DESCRIPTION
s = iter(s)
for row in s:
self._current_line += 1
# Ignore empty lines
row = row.strip(' \r\n')
if not row: continue
u_row = row.upper()
# DESCRIPTION -----------------------------------------------------
if u_row.startswith(_TK_DESCRIPTION) and STATE == _TK_DESCRIPTION:
obj['description'] += self._decode_comment(row) + '\n'
# -----------------------------------------------------------------
# RELATION --------------------------------------------------------
elif u_row.startswith(_TK_RELATION):
if STATE != _TK_DESCRIPTION:
raise BadLayout()
STATE = _TK_RELATION
obj['relation'] = self._decode_relation(row)
# -----------------------------------------------------------------
# ATTRIBUTE -------------------------------------------------------
elif u_row.startswith(_TK_ATTRIBUTE):
if STATE != _TK_RELATION and STATE != _TK_ATTRIBUTE:
raise BadLayout()
STATE = _TK_ATTRIBUTE
attr = self._decode_attribute(row)
if attr[0] in attribute_names:
raise BadAttributeName(attr[0], attribute_names[attr[0]])
else:
attribute_names[attr[0]] = self._current_line
obj['attributes'].append(attr)
if isinstance(attr[1], (list, tuple)):
if encode_nominal:
conversor = EncodedNominalConversor(attr[1])
else:
conversor = NominalConversor(attr[1])
else:
CONVERSOR_MAP = {'STRING': unicode,
'INTEGER': lambda x: int(float(x)),
'NUMERIC': float,
'REAL': float}
conversor = CONVERSOR_MAP[attr[1]]
self._conversors.append(conversor)
# -----------------------------------------------------------------
# DATA ------------------------------------------------------------
elif u_row.startswith(_TK_DATA):
if STATE != _TK_ATTRIBUTE:
raise BadLayout()
break
# -----------------------------------------------------------------
# COMMENT ---------------------------------------------------------
elif u_row.startswith(_TK_COMMENT):
pass
# -----------------------------------------------------------------
else:
# Never found @DATA
raise BadLayout()
def stream():
for row in s:
self._current_line += 1
row = row.strip()
# Ignore empty lines and comment lines.
if row and not row.startswith(_TK_COMMENT):
yield row
# Alter the data object
obj['data'] = data.decode_rows(stream(), self._conversors)
if obj['description'].endswith('\n'):
obj['description'] = obj['description'][:-1]
return obj | def _decode(self, s, encode_nominal=False, matrix_type=DENSE) | Do the job the ``encode``. | 3.028224 | 2.907727 | 1.04144 |
'''Returns the Python representation of a given ARFF file.
When a file object is passed as an argument, this method reads lines
iteratively, avoiding to load unnecessary information to the memory.
:param s: a string or file object with the ARFF file.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
'''
try:
return self._decode(s, encode_nominal=encode_nominal,
matrix_type=return_type)
except ArffException as e:
e.line = self._current_line
raise e | def decode(self, s, encode_nominal=False, return_type=DENSE) | Returns the Python representation of a given ARFF file.
When a file object is passed as an argument, this method reads lines
iteratively, avoiding to load unnecessary information to the memory.
:param s: a string or file object with the ARFF file.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_. | 6.78394 | 1.508501 | 4.49714 |
'''(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string.
:param name: a string.
:return: a string with the encoded relation declaration.
'''
for char in ' %{},':
if char in name:
name = '"%s"'%name
break
return u'%s %s'%(_TK_RELATION, name) | def _encode_relation(self, name) | (INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string.
:param name: a string.
:return: a string with the encoded relation declaration. | 7.478127 | 2.954786 | 2.530852 |
'''(INTERNAL) Encodes an attribute line.
The attribute follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
This method must receive a the name of the attribute and its type, if
the attribute type is nominal, ``type`` must be a list of values.
:param name: a string.
:param type_: a string or a list of string.
:return: a string with the encoded attribute declaration.
'''
for char in ' %{},':
if char in name:
name = '"%s"'%name
break
if isinstance(type_, (tuple, list)):
type_tmp = [u'%s' % encode_string(type_k) for type_k in type_]
type_ = u'{%s}'%(u', '.join(type_tmp))
return u'%s %s %s'%(_TK_ATTRIBUTE, name, type_) | def _encode_attribute(self, name, type_) | (INTERNAL) Encodes an attribute line.
The attribute follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
This method must receive a the name of the attribute and its type, if
the attribute type is nominal, ``type`` must be a list of values.
:param name: a string.
:param type_: a string or a list of string.
:return: a string with the encoded attribute declaration. | 5.455238 | 1.916456 | 2.846524 |
'''Encodes a given object to an ARFF file.
:param obj: the object containing the ARFF information.
:return: the ARFF file as an unicode string.
'''
data = [row for row in self.iter_encode(obj)]
return u'\n'.join(data) | def encode(self, obj) | Encodes a given object to an ARFF file.
:param obj: the object containing the ARFF information.
:return: the ARFF file as an unicode string. | 5.869418 | 2.963278 | 1.980718 |
'''The iterative version of `arff.ArffEncoder.encode`.
This encodes iteratively a given object and return, one-by-one, the
lines of the ARFF file.
:param obj: the object containing the ARFF information.
:return: (yields) the ARFF file as unicode strings.
'''
# DESCRIPTION
if obj.get('description', None):
for row in obj['description'].split('\n'):
yield self._encode_comment(row)
# RELATION
if not obj.get('relation'):
raise BadObject('Relation name not found or with invalid value.')
yield self._encode_relation(obj['relation'])
yield u''
# ATTRIBUTES
if not obj.get('attributes'):
raise BadObject('Attributes not found.')
attribute_names = set()
for attr in obj['attributes']:
# Verify for bad object format
if not isinstance(attr, (tuple, list)) or \
len(attr) != 2 or \
not isinstance(attr[0], basestring):
raise BadObject('Invalid attribute declaration "%s"'%str(attr))
if isinstance(attr[1], basestring):
# Verify for invalid types
if attr[1] not in _SIMPLE_TYPES:
raise BadObject('Invalid attribute type "%s"'%str(attr))
# Verify for bad object format
elif not isinstance(attr[1], (tuple, list)):
raise BadObject('Invalid attribute type "%s"'%str(attr))
# Verify attribute name is not used twice
if attr[0] in attribute_names:
raise BadObject('Trying to use attribute name "%s" for the '
'second time.' % str(attr[0]))
else:
attribute_names.add(attr[0])
yield self._encode_attribute(attr[0], attr[1])
yield u''
attributes = obj['attributes']
# DATA
yield _TK_DATA
if 'data' in obj:
data = _get_data_object_for_encoding(obj.get('data'))
for line in data.encode_data(obj.get('data'), attributes):
yield line
yield u'' | def iter_encode(self, obj) | The iterative version of `arff.ArffEncoder.encode`.
This encodes iteratively a given object and return, one-by-one, the
lines of the ARFF file.
:param obj: the object containing the ARFF information.
:return: (yields) the ARFF file as unicode strings. | 3.283198 | 2.541237 | 1.291969 |
resp = ws.ws2811_init(self._leds)
if resp != 0:
str_resp = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_init failed with code {0} ({1})'.format(resp, str_resp)) | def begin(self) | Initialize library, must be called once before other functions are
called. | 4.192369 | 4.036824 | 1.038532 |
resp = ws.ws2811_render(self._leds)
if resp != 0:
str_resp = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_render failed with code {0} ({1})'.format(resp, str_resp)) | def show(self) | Update the display with the data from the LED buffer. | 4.125012 | 3.638657 | 1.133664 |
self.setPixelColor(n, Color(red, green, blue)) | def setPixelColorRGB(self, n, red, green, blue) | Set LED at position n to the provided red, green, and blue color.
Each color component should be a value from 0 to 255 (where 0 is the
lowest intensity and 255 is the highest intensity). | 3.767993 | 3.508358 | 1.074005 |
Service.startService(self)
self._thread = threading.Thread(target=self._writer)
self._thread.start()
addDestination(self) | def startService(self) | Start the writer thread. | 6.065813 | 4.479077 | 1.354255 |
Service.stopService(self)
removeDestination(self)
self._reactor.callFromThread(self._reactor.stop)
return deferToThreadPool(
self._mainReactor,
self._mainReactor.getThreadPool(), self._thread.join) | def stopService(self) | Stop the writer thread, wait for it to finish. | 9.088571 | 8.339219 | 1.089859 |
contents = self._contents.copy()
contents.update(fields)
return Message(contents, self._serializer) | def bind(self, **fields) | Return a new L{Message} with this message's contents plus the
additional given bindings. | 6.901891 | 5.236032 | 1.318153 |
if action is None:
action = current_action()
if action is None:
task_uuid = unicode(uuid4())
task_level = [1]
else:
task_uuid = action._identification[TASK_UUID_FIELD]
task_level = action._nextTaskLevel().as_list()
timestamp = self._timestamp()
new_values = {
TIMESTAMP_FIELD: timestamp,
TASK_UUID_FIELD: task_uuid,
TASK_LEVEL_FIELD: task_level
}
if "action_type" not in self._contents and (
"message_type" not in self._contents
):
new_values["message_type"] = ""
new_values.update(self._contents)
return new_values | def _freeze(self, action=None) | Freeze this message for logging, registering it with C{action}.
@param action: The L{Action} which is the context for this message. If
C{None}, the L{Action} will be deduced from the current call
stack.
@return: A L{PMap} with added C{timestamp}, C{task_uuid}, and
C{task_level} entries. | 4.465819 | 3.524295 | 1.267152 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.