content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def unique_fitnesses(population: list) -> int:
""" Calculates the number of unique fitnesses in the population
Args:
population (list): The list of individual candidate solutions
Returns:
int: The number of unique fitnesses in the population
"""
fitnesses = [individual.fitness for individual in population]
unique = set(fitnesses)
return len(unique)
|
c0057c4e2874ca6ea586e999922ebcc633efb225
| 668,907 |
def YCoCgtoRGB(Y, Co, Cg):
""" convert YCoCg to RGB color
The YCoCg color model was developed to increase the effectiveness of the image compression.
This color model comprises the luminance (Y) and two color difference components
(Co - offset orange, Cg - offset green).
:param Y: Y value (0;255)
:param Co: Co value (0;255)
:param Cg: Cg value (0;255)
:return: RGB tuple (0;255) """
R = Y + Co - Cg
G = Y + Cg
B = Y - Co - Cg
return R, G, B
|
e73b7506b97da2112bf420ce20a988e20ef4628f
| 235,344 |
import itertools
def nwise(iter, n):
"""
Like pairwise, except returns n-tuples of adjacent items.
s -> (s0,s1,...,sn), (s1,s2,...,s(n+1)), ...
"""
iterset = [iter]
while len(iterset) < n:
iterset[-1:] = itertools.tee(iterset[-1])
next(iterset[-1], None)
return zip(*iterset)
|
30d781c3c07e68de5949d977f1e967b46096ed74
| 327,453 |
def generate_sorted_movie_vocab(movies_df, movie_counts):
"""Generate vocabulary for movies, and sort by usage count."""
vocab_movies = []
for movie_id, title, genres in movies_df.values:
count = movie_counts[movie_id] if movie_id in movie_counts else 0
vocab_movies.append([movie_id, title, genres, count])
vocab_movies.sort(key=lambda x: x[3], reverse=True)
return vocab_movies
|
17b325ad36cbbf73ef2cc4f7b1d5e9f1fbf7c4cc
| 314,757 |
def del_fake_nums(intList, step): #8
"""
Delete fake numbers added by the fake_nums function (only used in decryption)
"""
placeToDelNum = []
for index in range(0, len(intList), step+1):
placeToDelNum.append(index)
newIntList = [item for item in intList]
for index in reversed(placeToDelNum):
del newIntList[index]
return newIntList
|
a8bc781b60bfef5441bb69046f3b3db5196767a5
| 685,315 |
def rgb_to_hex(r, g, b): # pylint: disable=invalid-name
"""
Convert numeric r, g, b color channels to a hex standard #RRGGBBAA color format.
Arguments:
- r - red channel in (0, 255).
- g - green channel in (0, 255).
- b - blue channel in (0, 255).
"""
return "#%02X%02X%02XFF" % (r, g, b)
|
d595eb60ad6b6ea9fd9db8afed3bd111acb6255c
| 99,845 |
def get_normal_points(cx, cy, cos_t, sin_t, length):
"""
For a line passing through (*cx*, *cy*) and having a angle *t*,
return locations of the two points located along its perpendicular line at the distance of *length*.
"""
if length == 0.:
return cx, cy, cx, cy
cos_t1, sin_t1 = sin_t, -cos_t
cos_t2, sin_t2 = -sin_t, cos_t
x1, y1 = length*cos_t1 + cx, length*sin_t1 + cy
x2, y2 = length*cos_t2 + cx, length*sin_t2 + cy
return x1, y1, x2, y2
|
1f3188946d2ae9f7e50ec984b4135ac684ad7e7f
| 519,654 |
def _parse_list_file(path):
"""
Parses files with lists of items into a list of strings.
Files should contain one item per line.
"""
with open(path, 'r') as f:
items = [i for i in f.read().split('\n') if i != '']
return items
|
2c1203cd76e6b4382d8407d5d4197a2b8489f92a
| 672,804 |
def is_int(data):
"""Checks if data is an integer."""
return isinstance(data, int)
|
f9cd4f0f5b3cd35573caf3bc86fc1f92cf6fa4a7
| 471,411 |
def psd_matern_12(omega, lam, lam_t):
"""Spectral density of Matern-1/2 process.
Args:
omega (tensor): Frequency.
lam (tensor): Decay.
lam_t (tensor): Scale.
Returns:
tensor: Spectral density.
"""
return 2 * lam_t * lam / (lam ** 2 + omega ** 2)
|
e348e116c84a43e6f70c7ff72e6f82487cf0e497
| 399,702 |
import re
def parse_AFINN(afinnfile_name):
"""
Parse the AFIN-111 sentiment file
Input: afinnfile_name: the [path/] file name of AFIN-111.txt
Output: dicts of:
sentiment_words: score
sentiment_phrases: score
Usage: from twitter_functions import parse_AFINN
sentiment_words, sentiment_phrases = parse_AFINN("AFINN-111.txt")
"""
afinnfile = open(afinnfile_name)
sentiment_phrases = {}
sentiment_words = {}
for line in afinnfile:
key, val = line.split("\t")
if " " in key:
key = re.sub(r"\s{2,}", " ", key) # strip extra whitespace
sentiment_phrases[key.lower()] = int(val)
else:
sentiment_words[key.lower()] = int(val)
return (sentiment_words, sentiment_phrases)
|
2111252c62457811a6b178045e536b679ae0ac1f
| 484,028 |
def filter_matching_fields(fields, other_fields):
"""return fields which are same as in other_fields list, ignoring the case"""
other_fields_lowercase = set([f.lower() for f in other_fields])
return [f for f in fields if f.lower() in other_fields_lowercase]
|
f8b0909e4872c0bc6b1212b8bd658debb38da922
| 646,802 |
import math
def get_points_distance(point1, point2):
"""
Gets the distance between two points
:param point1: tuple with point 1
:param point2: tuple with point 2
:return: int distance
"""
return int(math.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2))
|
40af84836715b49ba531fe5113e40230110d49b9
| 28,971 |
from functools import reduce
def solve(k, lim):
"""Find the <lim>th term of a Van Eck's sequence with initial values
<k>. """
seq = [*k]
nums = reduce(lambda a, b: a | b, [{n: i} for i,n in enumerate(seq)])
for i in range(len(k) - 1, lim):
res = nums.get(seq[i], i)
seq.append(i-res)
nums[seq[i]] = i
return seq[lim - 1]
|
e9cf26c77d113e31ba21c90d8d482dcf8f66a4de
| 540,532 |
def __find_service_account_in_message(message):
"""
The command "gcloud logging sinks create", communicates a service account Id as part of its message.
Knowing the message format, this function extracts the service account Id and returns it to the caller,
which will grant it with BQ permissions.
Sample message:
"Created [https://logging.googleapis.com/v2/projects/hipaa-sample-project/sinks/audit-logs-to-bigquery].
Please remember to grant `serviceAccount:[email protected]` the WRITER role on the dataset.
More information about sinks can be found at https://cloud.google.com/logging/docs/export/configure_export"
:param message: the message communicated by "gcloud logging sinks create" command
:return: the service account Id that requires BQ permissions
"""
service_account = [t for t in message.split() if t.startswith('`serviceAccount:')]
if service_account:
service_account = service_account[0].replace('`', '')
service_account = service_account.replace('serviceAccount:', '')
return service_account
|
e791c81d62dae49b6298874dc194eda712e99857
| 50,967 |
def _create_element(parent, selenium_webelement):
"""Create an element from a Selenium webelement.
Parameters
----------
parent : browser or element
The browser or element.
selenium_webelement : Selenium webelement
The Selenium webelement.
Returns
-------
element
The element.
"""
element_class = parent.element_class
selenium_webdriver = parent.selenium_webdriver
return element_class(selenium_webdriver, selenium_webelement)
|
a5c80dfa8be8cea2b204f6961cb9ddaf24fd1713
| 397,650 |
import re
def _remove_environment(text, environment):
"""Removes '\\begin{environment}*\\end{environment}' from 'text'."""
return re.sub(
r'\\begin\{' + environment + r'\}[\s\S]*?\\end\{' + environment + r'\}',
'', text)
|
2e77f53fa482b78252a53c5c7a9764ae6991646a
| 525,437 |
def erase(string_letters: str, string_index: int) -> str:
"""Return the string_letters with the character at the string_index removed
if the string_index is between range 0 and the last string_index.
Otherwise return the original string_letters unchanged.
>>> erase('assignment', 4)
'assinment'
>>> erase('assignment', 13)
'assignment'
"""
if string_index in range(0, len(string_letters)):
return string_letters[:string_index] + string_letters[string_index + 1:]
else:
return string_letters
|
030a08f4131562b7eb3d5d640a0f5172bc4a2ec2
| 407,646 |
def get_units_title(unit_type):
"""
Get the title for units
"""
units_title = "m"
if unit_type == 'english':
units_title = "ft"
return units_title
|
d032f50661ad42aed97aff7f2b19ebb9d8ebc366
| 639,060 |
def validate_request(raw):
"""Checking for exists of 'action' and 'time' in request."""
return True if 'action' in raw and 'time' in raw else False
|
a8d612508e8f97b15186b2265e029c67711d106a
| 590,124 |
def to_base(n, base):
"""Transforms an integer to another defined base."""
len_base = len(base)
if n < len_base:
return base[n]
else:
return to_base(n // len_base, base) + base[n % len_base]
|
dfca71b16c1ed468345b096a720674c1277b0d77
| 421,020 |
def compound_inv_query(query, rel, dst):
"""
Create a compound inverse query, similar to
:meth:``compound_fw_query`` but only selects the
source nodes given a destination node.
:param query: The SQL subquery.
:param rel: The relation.
:param dst: The destination node.
"""
smt = 'SELECT src FROM %s WHERE src IN (%s) AND dst = ?'
return smt % (rel, query), (dst,)
|
459175805de1e3a35e3c6af5cdec6090c4df9618
| 378,335 |
def sos_gradient(params):
"""Calculate the gradient of the sum of squares function."""
return 2 * params["value"].to_numpy()
|
7a16dde8638b397c8caabd0f359bc6ca0e477216
| 679,186 |
def coord_to_index(width, x, y):
"""Return the 1D index which corresponds to 2D position (x, y).
Examples:
If we have a 2D grid like this:
0 1 2
3 4 5
6 7 8
We can assert that element 8 is of the coordinate (2, 2):
>>> 8 == coord_to_index(3, 2, 2)
True
"""
return (width * y) + x
|
b816f83dd21ffed44cbffbf6f26da55b702c5c62
| 339,401 |
from typing import List
def _parse_online_cpus_string(raw_string) -> List[int]:
"""
Parses string returned by /sys/devices/system/cpu/online
and returns a list of online cores
"""
parsed_cpus_info = []
for nr in raw_string.split(','):
if '-' not in nr:
parsed_cpus_info.append(int(nr))
else:
start, end = nr.split('-')
parsed_cpus_info.extend(range(int(start), int(end) + 1))
return parsed_cpus_info
|
d0932b855a6211cab6ef032d4ade8661fd23e3ee
| 631,086 |
def is_odd(number):
"""
*Determine if the number is odd or not*
**Key Arguments:**
- ``number`` -- number to be tested for 'odd'ness
Returns:
- True or False -- depending on whether the arugment is odd or even
"""
if number % 2 == 0:
return False
else:
return True
|
13357aa168317785833fb2183c7361fe943fda02
| 330,686 |
def get_category_name(text: str) -> str:
"""
Generate a category name.
Capitalise the first letter of every word.
Replace any `-` with a space.
Args:
text {str} The text value to parse
Returns:
{str} The parsed text
"""
return text.title().replace('-', ' ')
|
e9f2bd0bfe83e3594231408b61e59d5142688fd4
| 311,335 |
def format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime('%Y%m%d')
|
06c0823dfc593be0ed5b441640c82aa1eb3edea7
| 575,522 |
def get_form_choices(registration_question):
"""Takes a RegistrationQuestion object and returns a list of
properly formatted choices for a ChoiceField. Namely, a list of
tuples.
"""
raw_choices = registration_question.question_choice_set.all()
return [(str(rc.id), rc.choice) for rc in raw_choices]
|
486c41e70c4cfb639180b51a789e638a0ddee3f8
| 446,856 |
def fafn2fqfn(fafn):
"""return a FASTQ file name which corresponds to a FASTA file."""
if fafn.find('.') != -1:
return fafn[:fafn.rfind('.')] + ".fastq"
else:
return fafn + ".fastq"
|
13c18005b38b0686c3bca690f974d4547bba50ac
| 217,979 |
def is_hex_string(string):
"""Check if a string represents a hex value"""
if not string.startswith("0x"):
return False
try:
int(string, 16)
return True
except ValueError as _:
return False
|
ad237a1add58e93abcb65a0048a2e28c56ac2e9d
| 557,360 |
from typing import List
def centered_average(lst: List[int]) -> float:
"""Returns a centered average, an average that excludes the largest and
smallest number in a list, of a user-input list.
>>> centered_average([-145, 2, 2, 145])
2
>>> centered_average([1, 3, 8, 2, 25])
3
"""
avg = 0
count = -1
for i in lst:
count += 1
avg += lst[count]
# print(i)
avg -= max(lst)
avg -= min(lst)
avg /= (len(lst) - 2)
return avg
|
9b55970e32eeeb764912945b9a0103b4f6ceae08
| 493,740 |
def sort_by_length(arr):
"""
Takes an array of strings as an argument and returns a sorted array containing the same strings, ordered from
shortest to longest.
:param arr: an array of strings.
:return: array sorted from shortest to longest.
"""
return sorted(arr, key=len)
|
f25b631cc4444d0f05567ab72094618e0a1c5b8d
| 494,533 |
import six
def column_info(df):
"""Returns a list of columns and their datatypes
Args:
df (`pandas.DataFrame`): The dataframe to get info for
Returns:
:type:`list` of :type:`dict`: A list of dicts containing column ids and their
Dtypes
"""
column_info = []
for column_name, data_type in six.iteritems(df.dtypes):
temp = {'id': column_name, 'dtype': str(data_type)}
column_info.append(temp)
return column_info
|
ea1652133b976d529e6440cdcb2f397c1b3109bc
| 671,936 |
import six
def quote_cmd(cmd):
"""
Takes a shell command *cmd* given as a list and returns a single string representation of that
command with proper quoting. To denote nested commands (such as shown below), *cmd* can also
contain nested lists. Example:
.. code-block:: python
print(quote_cmd(["bash", "-c", "echo", "foobar"]))
# -> "bash -c echo foobar"
print(quote_cmd(["bash", "-c", ["echo", "foobar"]]))
# -> "bash -c 'echo foobar'"
"""
# expand lists recursively
cmd = [
(quote_cmd(part) if isinstance(part, (list, tuple)) else str(part))
for part in cmd
]
# quote all parts and join
return " ".join(six.moves.shlex_quote(part) for part in cmd)
|
63ad2441376cac43aa7e4865a6832ee42b485e3d
| 296,561 |
def read_add_mos_args(add_mos_result, get_opt_args=False):
"""
:param add_mos_result: result from add_mos
:param get_opt_args: if there is opt_args_function, an array of opt_args will be return as well
:return: array of key_args and opt_args
"""
key_args = []
opt_args = []
for i in add_mos_result:
key_args.append(i['key_args'])
if get_opt_args:
opt_args.append(i['opt_args'])
if get_opt_args:
return key_args, opt_args
else:
return key_args
|
db8926315eb4b01e5d154d0c742cc488f16f0f3b
| 128,291 |
def add_collapse_mapped_isoforms_io_arguments(arg_parser):
"""Add arguments for collapse isoforms."""
helpstr = "Input uncollapsed isoforms in a FASTA, FASTQ or ContigSet file."
arg_parser.add_argument("input_isoforms", type=str, help=helpstr)
helpstr = "Input SORTED SAM file mapping uncollapsed isoforms to reference genome using GMAP."
arg_parser.add_argument("sam_filename", type=str, help=helpstr)
helpstr = "Output prefix, will write output files to " + \
"<output_prefix>.fastq|.gff|.group.txt|.abundance.txt|.read_stat.txt"
arg_parser.add_argument("output_prefix", type=str, help=helpstr)
helpstr = "Output collapsed isoforms to a FASTA, FASTQ, or ContigSet file."
arg_parser.add_argument("--collapsed_isoforms", type=str, default=None, help=helpstr)
return arg_parser
|
8c882bbbce8ce593c4665242af12a12011c358cf
| 539,825 |
def threshold_mean(img, thresh_multiplier=0.95, color1=255, color2=0):
"""Threshold a greyscale image using mean thresholding."""
mean = img.mean()
ret = mean * thresh_multiplier
img[img > ret] = color1
img[img < ret] = color2
return img
|
65a4779264bcd068dd1661431f70a6d7f3402eef
| 100,575 |
import re
def __camel_to_snake(string):
"""Turn a CamelCase string to a snake_case string"""
string = re.sub(r'(?<!^)(?=[A-Z])', '_', string).lower()
return string
|
2d17191228ab5124d2370a9637f9a7356afbd9b5
| 204,200 |
def _wildcard_dih(atoms, idxs):
"""
Given atoms and idxs which define a
pattern return a tuple consisting of
a entry from atoms if the pattern is
an int and else the value in idxs.
"""
atom_key = []
for idx in idxs:
if type(idx) is int:
atom_key.append(atoms[idx])
else:
atom_key.append(idx)
return tuple(atom_key)
|
44d1849de43c6b35e13e7c8da7bba746cb4d2910
| 473,736 |
def get_key_from_dimensions(dimensions):
"""
Get a key for DERIVED_UNI or DERIVED_ENT.
Translate dimensionality into key for DERIVED_UNI and DERIVED_ENT
dictionaries.
"""
return tuple(tuple(i.items()) for i in dimensions)
|
c1eb93fa2dcd7783328581d808dde1a12e7a539f
| 406,477 |
def ach(structure, fields):
"""Get field from achievements structure."""
field = fields.pop(0)
if structure:
if hasattr(structure, field):
structure = getattr(structure, field)
if not fields:
return structure
return ach(structure, fields)
return None
|
d9b90f240cd3bee890831e4af43139c94ebc00b3
| 446,956 |
def parts(sequence, n):
"""Split a sequence into parts with size n."""
return [sequence[i : i + n] for i in iter(range(0, len(sequence), n))]
|
7d42f6a36d3975431aa40c2dfcb5e7e2cb036081
| 199,502 |
import re
def space_to_dash(text):
"""Replace spaces with dashes."""
return re.sub("\s+", "-", text)
|
ba1191a6f7470ceb53d14f7d5e5775fe54c031ea
| 502,573 |
def chunked(xs, chunk_size):
""" Splits a list into `chunk_size`-sized pieces. """
xs = list(xs)
return [
xs[i:i + chunk_size]
for i in range(0, len(xs), chunk_size)
]
|
57f5379edfda7f4b7ae63412d13deb19e611cbc1
| 227,587 |
def to_pass(line):
# The MIT License (MIT)
#
# Copyright (c) 2019 Michael Dawson-Haggerty
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Replace a line of code with a pass statement, with
the correct number of leading spaces
Arguments
----------
line : str, line of code
Returns
----------
passed : str, line of code with same leading spaces
but code replaced with pass statement
"""
# the number of leading spaces on the line
spaces = len(line) - len(line.lstrip(' '))
# replace statement with pass and correct leading spaces
passed = (' ' * spaces) + 'pass'
return passed
|
f7210ea18fa44d6e1e801ab7909f48e2a0e9a2cd
| 55,698 |
import json
def read_instance(filename):
"""Function to read an instance file from test data
Args:
filename (str): Path to file for instance
"""
with open(filename) as f:
data = json.load(f)
network_data = data["graph"]
num_vehicles = data["numVehicles"]
return network_data, num_vehicles
|
e16a1cf9c8905031785005420ae399823ab91a64
| 545,863 |
def calculate_mag(raw_val):
"""
Converts the raw value read from the magnetometer.
Currently does not take calibration into consideration.
"""
result = raw_val * 2 / ((2**15) - 1)
return result
|
1ca9fe469bd0921426f316356fb203868351ed0c
| 302,883 |
from re import X
def compVertical(p1, p2):
"""Compare according to X coordinate for vertical partitioning."""
return p1[X] - p2[X]
|
3e218925c0289f0c2d011208ce0f0f2a1c46e8b4
| 342,247 |
def polynomial5(x, p0, p1, p2, p3, p4):
"""
5th order polynomial
"""
return p0 * x + p1 * x ** 2 + p2 * x ** 3 + p3 * x ** 4 + p4 * x ** 5
|
3ef7a2e0ba57567b6a18202478c2d5befc200a67
| 221,448 |
import shutil
def check_space_requirements(new_files, changed_files, remove_files, output_path):
"""
Given lists of new files, changed files, and files to delete, this checks that the drive these file changes
will be made on will be able to hold all the new files.
:param new_files: A list of new files to backup generated by mark_files().
:param changed_files: A list of changed files to backup generated by mark_files().
:param remove_files: A list of files to delete from the backup generated by mark_files().
:param output_path: The path where the backup will be made.
:return: A tuple of three values. First, true if the backup will fit on the drive, false otherwise. Second, the
remaining free space on the target drive in bytes. This will be negative if the backup won't fit.
Third, the difference in bytes this backup will take. This will be positive if the new backup will
be bigger than what's already backed up, or negative if it's smaller.
"""
total, used, free = shutil.disk_usage(output_path)
original_free = free
# Increase the free space on the drive for every file deleted
for file_tuple in remove_files:
free += file_tuple[1]
# Decrease the free space on the drive for every new file added
for file_tuple in new_files:
free -= file_tuple[1]
# Increase free space when the old changed file is deleted, then decrease for the space of the new version
for file_tuple in changed_files:
free = free - file_tuple[1] + file_tuple[3]
# If free space ever dips below 0 during this, return
if free <= 0:
return free > 0, free, original_free - free
return free > 0, free, original_free - free
|
784b16aa816bee4f40780482bb529c4024f8c2ee
| 336,756 |
def get_num_labels(labels_or_logits):
"""Returns the number of labels inferred from labels_or_logits."""
if labels_or_logits.get_shape().ndims <= 1:
return 1
return labels_or_logits.get_shape()[1].value
|
21f3067250f88d1b6ed50fe043cb873b57c13491
| 508,134 |
from typing import Dict
from typing import List
def annotation_inside_slice(annotation: Dict, slice_bbox: List[int]) -> bool:
"""Check whether annotation coordinates lie inside slice coordinates.
Args:
annotation (dict): Single annotation entry in COCO format.
slice_bbox (List[int]): Generated from `get_slice_bboxes`.
Format for each slice bbox: [x_min, y_min, x_max, y_max].
Returns:
(bool): True if any annotation coordinate lies inside slice.
"""
left, top, width, height = annotation["bbox"]
right = left + width
bottom = top + height
if left >= slice_bbox[2]:
return False
if top >= slice_bbox[3]:
return False
if right <= slice_bbox[0]:
return False
if bottom <= slice_bbox[1]:
return False
return True
|
01168edc86c436ce878df4fc551094ab03f1b30a
| 195,446 |
def PresentValue(future_amount, interest_rate, periods):
"""Returns the present value of some future value.
interest rate: as a value (ie, 0.02 is the value for 2% per period)
periods: total number of interest payments which will be made
For more, see https://en.wikipedia.org/wiki/Present_value
"""
return future_amount / (1.0 + interest_rate)**periods
|
8943c84df58194d590e2cd5c7fd2f257346c072a
| 146,820 |
from pathlib import Path
def find_files(src_path: Path, glob_pattern: str) -> list[Path]:
"""
Returns a list of path objects to the files matching the glob pattern.
Searches in `src_path`
"""
files = src_path.glob(glob_pattern)
paths = [Path(file) for file in files]
return paths
|
32a1428a33f4a98a46c4a21a64b2653bdf673657
| 590,882 |
def calc_peak(motion):
"""Calculates the peak absolute response"""
return max(abs(min(motion)), max(motion))
|
2a49b2c454c204ab27097eae4617c000c6fea7df
| 372,784 |
def as_stars(value):
"""
Convert a float rating between 0 and 5 to a CSS class
The CSS class name is the number of stars to be displayed.
* Rounds to the nearest integer
* Maps no rating to 0 stars
* Fails quietly
"""
num_stars_to_class = {
0: '',
1: 'One',
2: 'Two',
3: 'Three',
4: 'Four',
5: 'Five',
}
num_stars = int(round(value or 0.0))
return num_stars_to_class.get(num_stars, '')
|
4572a7e580154d6c382e67e97bb3f47b3c8d505c
| 432,708 |
from bs4 import BeautifulSoup
def strip_html(html):
"""Returns the text only out of any potential HTML content."""
if not html:
return html
soup = BeautifulSoup(html, "html5lib")
return soup.text.strip()
|
ca08a9e102a440a783dddfc78e2ca19c5a6ae20e
| 217,112 |
def details(client, domain_id):
"""
Get domain details by reference.
"""
return client.domain.details(uuid=domain_id)
|
a41b45c7a343277fe5fa9345976b5d5d21964415
| 384,239 |
def components_contain_id(components_data, component_id):
"""
Tests whether component_id is id of component.
:param components_data: list of components
:param component_id: id of component
:return: True if component_id is component
"""
for component in components_data:
if component_id == component['id']:
return True
return False
|
2ec77f1a5502d90b899475a94924daf5e57d9c15
| 476,048 |
def _scale_to_fit(width, height, max_width, max_height):
"""scales input dimensions to fit within max dimensions"""
height_scale = height/max_height
width_scale = width/max_width
scale = max(height_scale, width_scale)
if scale > 1:
height /= scale
width /= scale
return width, height
|
6c604b291882101d00c98f9410a318f6eb32383a
| 472,688 |
def to_list(argument):
"""
Gets and converts the argument to list type if it's not the type of list.
Args:
argument (str/list): String or list of string.
Returns:
list: A list of strings.
"""
if type(argument) is not list:
return [argument]
return argument
|
af984c6f146a6d84e7c38d09b1434aa2e5300ba3
| 265,258 |
def make_filebody(number_of_lines: int) -> str:
""" Return a string representing the contents of a file with a given number of lines.
Only used for testing purposes.
"""
body = ''
for i in range(0, number_of_lines):
body += '{n}/{c}: line\n'.format(n=i, c=number_of_lines)
return body
|
928429e745495d9cbc407e24ce84c59a45596f53
| 240,815 |
def stmt_from_rule(rule_name, model, stmts):
"""Return the source INDRA Statement corresponding to a rule in a model.
Parameters
----------
rule_name : str
The name of a rule in the given PySB model.
model : pysb.core.Model
A PySB model which contains the given rule.
stmts : list[indra.statements.Statement]
A list of INDRA Statements from which the model was assembled.
Returns
-------
stmt : indra.statements.Statement
The Statement from which the given rule in the model was obtained.
"""
stmt_uuid = None
for ann in model.annotations:
if ann.subject == rule_name:
if ann.predicate == 'from_indra_statement':
stmt_uuid = ann.object
break
if stmt_uuid:
for stmt in stmts:
if stmt.uuid == stmt_uuid:
return stmt
|
1defd6b6f4f013c3ae5fc41bbda6e271fb6b4d0d
| 670,400 |
def concatenateToken(tokens):
"""
Concatenate the n-grams into a single string taking
into account the number of occurrences.
It is needed because the models works on String only.
:param tokens: Map of n-grams occurrences
:type tokens: {Int:{String:Int}}
:return: Concateneted tokens
:rtype: String
"""
res = []
for _, ngrams in tokens.items():
for token, f in ngrams.items():
res += [token] * f
return res
|
df3132faf095bdb187248e9f0d275b8599e861d3
| 512,836 |
from warnings import warn
def deprecated(message):
""" Decorator for deprecating functions and methods.
::
@deprecated("'foo' has been deprecated in favour of 'bar'")
def foo(x):
pass
"""
def f__(f):
def f_(*args, **kwargs):
warn(message, category=DeprecationWarning, stacklevel=2)
return f(*args, **kwargs)
f_.__name__ = f.__name__
f_.__doc__ = f.__doc__
f_.__dict__.update(f.__dict__)
return f_
return f__
|
9ef7c6757556d7b41684e9cf908104924397c561
| 361,297 |
def file_to_dataset(file):
"""Example function to derive datasets from file names"""
if "ZJet" in file:
return "Z"
elif "WJet" in file:
return "W"
elif "HToInvisible" in file:
return "Hinv"
|
4e759054df3889e3d6f1ca1b0d3ed080f035d233
| 32,303 |
def gettext(element):
"""Returns the atcual content of a leaf element as a text string."""
return '.'.join([node.data for node in element.childNodes
if node.nodeType == node.TEXT_NODE])
|
9124ff6f478c0d297d74c985124a06f895dc2874
| 183,856 |
def split_factory2(delimiter: str = ',', cast_to: type = list):
"""
Separates a text by a specific delimiter into separate sub-lists.
Returns a FUNCTION that can then process the text accordingly.
It DOES NOT ignore whitespaces.
*-------------------------------------------------------------*
ESCAPING ESCAPES *ONLY* A SINGLE CHARACTER AFTER ESCAPE SYMBOL.
IF YOU HAVE TO ESCAPE MORE THAN ONE CHARACTER THEN ESCAPE EVERY
SINGLE ONE INDIVIDUALLY.
*-------------------------------------------------------------*
Created because the original split factory had a huge flaw where
the vocabulary of a splitter did not accept delimiters and newlines.
:param str delimiter: separates individual objects
:param type cast_to: defines the type of iterable that the item container is
going to be
:return function: a function that does the separation
"""
def wrapper(text, remove_empty: bool = False)->list:
"""
Return-function that actually processes the text.
:param str text: text to process
:param bool remove_empty: defines if empty items are going to be removed or not
:return list: a list of split lines and items
"""
def list_to_str(this: list)->str:
"""
Concatenates all items from a list to a single string.
:param list this: a list of items
:return str: concatenated string
"""
res = ''
for item in this:
res = item if isinstance(item, str) else repr(item)
return res
def add_to_result(line: str):
"""
Adds a line to the result
:param line: a line
:return:
"""
if remove_empty:
if line:
result.append(line)
else:
result.append(line)
result = []
text = list(text)
current_item = ''
while text:
char = text.pop(0)
if char == '\\':
new_char = text.pop(0)
current_item += char + new_char
elif char == delimiter[0]:
if char + list_to_str(text[:len(delimiter) - 1]) == delimiter:
add_to_result(current_item)
current_item = ''
del(text[:len(delimiter) - 1])
else:
current_item += char
add_to_result(current_item)
return cast_to(result)
return wrapper
|
3e5683a751ecde8768d6b8b0ecb3ad37c0effe60
| 75,304 |
import logging
def process_covid_csv_data(covid_csv_data:list) -> tuple:
"""Returns cases in last 7 days, current hospitalisations,
and total deaths from parsed covid csv data"""
if not covid_csv_data:
logging.error("No csv data was inputted")
return 0, 0, 0
#Builds a 2D list of the csv data
for i, row in enumerate(covid_csv_data):
covid_csv_data[i] = row.split(",")
#Adds up cases for data entries representing the last 7 days
cases_last_7 = 0
for i in range(3, 10):
cases_last_7 += int(covid_csv_data[i][6])
current_hospitalisations = int(covid_csv_data[1][5])
total_deaths = 0
#Finds the latest data entry with a death count
i = 1
while covid_csv_data[i][4] == "":
i += 1
total_deaths = int(covid_csv_data[i][4])
return cases_last_7, current_hospitalisations, total_deaths
|
a9705af0877e112f0830daeb7fad17ba7832a640
| 526,060 |
def one_text_header(response, header_name):
"""
Retrieve one text header from the given HTTP response.
"""
return response.headers.getRawHeaders(header_name)[0].decode("utf-8")
|
afc4b3bba143a092173a935ede77ff8ecec6d7e3
| 18,303 |
def Claret_LD_law(mu, c1, c2, c3, c4):
"""
Claret 4-parameter limb-darkening law.
"""
I = (1 - c1*(1 - mu**0.5) - c2*(1 - mu) - c3*(1 - mu**1.5) - c4*(1 - mu**2)) * mu
return I
|
535136e560dba26da012b9d13d7edcc3b507ec54
| 38,933 |
def get_times(dt):
"""
Converts seconds into hours, minutes and seconds
:param dt: The time in seconds
:type dt: float
:return: hours, minutes and seconds
:rtype: int, int, float
"""
mins = (dt - dt % 60) / 60
dt = dt - 60 * mins
hours = (mins - mins % 60) / 60
mins = mins - 60 * hours
return hours, mins, dt
|
0ed7e1582e5c6366e827a434dd900abe6fadd254
| 144,810 |
def format_attachments(attachments=[]):
""" Return formatted_attachments for volume config """
formatted_attachments = []
for attachment in attachments:
formatted_attachment = {
'attach_time': str(attachment.get('AttachTime')),
'instance_id': attachment.get('InstanceId'),
'volume_id': attachment.get('VolumeId'),
'state': attachment.get('State'),
'delete_on_termination': attachment.get('DeleteOnTermination'),
'device': attachment.get('Device')
}
formatted_attachments.append(formatted_attachment)
return formatted_attachments
|
0b2b292ceb8a7273eda2ba8a2297929d8030d7bf
| 513,716 |
import torch
def complex_multiplication(a, b):
""" Computes element-wise complex-tensor multiplication.
Args:
a and b: multiplication operands (tensors of complex numbers). Each
tensor is in (batch x channel x height x width x complex_channel) format,
where complex_channel contains the real and imaginary parts of each
complex number in the tensor.
Returns:
results of a x b = (c+di)(j+hi) = (cj - dh) + (jd + ch)i.
"""
assert (len(a.shape) >= 4 and len(a.shape) >= 4)
assert (a.shape[-3:] == b.shape[-3:])
assert (a.shape[-1] == 2)
if len(a.shape) == 4:
a = torch.unsqueeze(a, dim=1)
if len(b.shape) == 4:
b = torch.unsqueeze(b, dim=1)
real_a = a[:, :, :, :, 0]
imag_a = a[:, :, :, :, 1]
real_b = b[:, :, :, :, 0]
imag_b = b[:, :, :, :, 1]
result = torch.stack([real_a * real_b - imag_a * imag_b,
real_a * imag_b + imag_a * real_b], dim=-1)
return result
|
c0a67750e7e0d0d53317f5736ede637e658e28fb
| 173,212 |
def create_annotation(args, imname, pred_mask, class_id, score, classes, is_valid = True):
"""Creates annotation object following the COCO API ground truth format"""
ann = dict()
ann['image_id'] = imname
ann['category_id'] = class_id
ann['category_name'] = classes[class_id]
# if the mask is all 0s after thresholding we don't use it either
ann['segmentation'] = pred_mask
ann['score'] = score
if is_valid:
return ann
else:
return None
|
e16f626af65a84a844df19aeff2c6710fc98d204
| 192,267 |
import struct
def binary_format(encoding_size):
"""
Return a Struct instance with the binary format of the encodings.
The binary format string can be understood as:
- "!" Use network byte order (big-endian).
- "I" store the entity ID as an unsigned int
- "<encoding size>s" Store the n (e.g. 128) raw bytes of the bitarray
https://docs.python.org/3/library/struct.html
:param encoding_size: the encoding size of one filter in number of bytes, excluding the entity ID info
:return:
A Struct object which can read and write the binary format.
"""
bit_packing_fmt = f"!I{encoding_size}s"
bit_packing_struct = struct.Struct(bit_packing_fmt)
return bit_packing_struct
|
6ad6613ac1b62b69ff74a75f0181f5db9a4791fa
| 141,712 |
def mock_email_backend(mocker, backend_settings):
"""Fixture that returns a fake EmailAuth backend object"""
backend = mocker.Mock()
backend.name = "email"
backend.setting.side_effect = lambda key, default, **kwargs: backend_settings.get(
key, default
)
return backend
|
adfddf86713418255690a15b6a2b28ed88585b82
| 424,624 |
def _calculate_cut(lemmawords, stems):
"""Count understemmed and overstemmed pairs for (lemma, stem) pair with common words.
:param lemmawords: Set or list of words corresponding to certain lemma.
:param stems: A dictionary where keys are stems and values are sets
or lists of words corresponding to that stem.
:type lemmawords: set(str) or list(str)
:type stems: dict(str): set(str)
:return: Amount of understemmed and overstemmed pairs contributed by words
existing in both lemmawords and stems.
:rtype: tuple(float, float)
"""
umt, wmt = 0.0, 0.0
for stem in stems:
cut = set(lemmawords) & set(stems[stem])
if cut:
cutcount = len(cut)
stemcount = len(stems[stem])
# Unachieved merge total
umt += cutcount * (len(lemmawords) - cutcount)
# Wrongly merged total
wmt += cutcount * (stemcount - cutcount)
return (umt, wmt)
|
a0d2bc7709237f425516e7930e6d34dc1cff4de5
| 213,870 |
def proba_func(threshold):
"""To associate an alpha to an empirical distribution function.
Parameters
----------
threshold : float
The threshold of the target probability.
Returns
-------
p_func : callable
The probability function.
"""
def p_func(x, axis=1):
return (x >= threshold).mean(axis=axis)
return p_func
|
4e8a079d8cd8157de82afc3b2dc40227090209e6
| 249,859 |
import random
def qty_discrimination(list_of_numbers):
"""Choose which value, "a" or "b", is greater.
Pass in a list of int values to compare.
:param list_of_numbers: List of ints to compare,
usually provided as range()
:return: String of "correct" or "incorrect" to be used by score function
Returns "exit" if user does not enter "a" or "b"
"""
a, b = random.choices(list_of_numbers, k=2)
if a < b:
correct = "b"
else:
correct = "a"
allowed = ("a", "b")
response = input(f"Which is larger?\na = {a} or b = {b}\n")
return correct, allowed, response
|
0ea7daebf75e6ea63f8ec9cc96650d870c0f0303
| 191,349 |
import re
def is_valid_version(v):
""" Check if a string is a valid version
>>> is_valid_version('20180525-1202')
True
>>> is_valid_version('20180525')
False
>>> is_valid_version('201805251221')
False
"""
if re.match('\A\d{8}-\d{4}\Z', v):
return True
return False
|
7dfd30de4e4d67c81bf9cf788df26839938ef88e
| 193,570 |
def has_replication_policy(sysmeta_pyxb):
"""Check if SystemMetadata has ReplicationPolicy section.
Args:
sysmeta_pyxb: SystemMetadata PyXB object
Returns: bool
``True`` if SystemMetadata includes the optional ReplicationPolicy section.
"""
return bool(getattr(sysmeta_pyxb, "replicationPolicy", False))
|
870e1abed4905842d13ddef6e8fd26729dc762b7
| 590,637 |
def _read_file(filename: str) -> str:
"""
读取文件的内容,并将文件中的制表符、换行符以及中文换行符替换为空字符。
:param filename:
:return:
"""
with open(filename, mode="r", encoding="UTF-8") as reader:
"""
1.不间断空格\u00A0,主要用在office中,让一个单词在结尾处不会换行显示,快捷键ctrl+shift+space ;
2.半角空格(英文符号)\u0020,代码中常用的;
3.全角空格(中文符号)\u3000,中文文章中使用;
"""
return reader.read().replace("\n", "").replace("\t", "").replace("\u3000", "")
|
bf73a087e350972d30cc38b249affed935794b4c
| 205,634 |
def _ss(data):
"""
MicroPy doesn't have statistics library, so we need this
Return sum of square deviations of sequence data.
"""
c = sum(data) / len(data)
ss = sum((x - c) ** 2 for x in data)
return ss
|
3a5e833fbbbc4a64f06fd30d3464331d308d7609
| 203,061 |
def lazy_property(func):
"""Decorator that makes a property lazy-evaluated"""
attr_name = "_lazy_" + func.__name__
@property
def _lazy_property(self):
"""Function for property lazy-evaluate decorator"""
if not hasattr(self, attr_name):
# The attr doesn't exist yet, so call the function (`func`) that has
# the decorator. The result of that call is set in the attr.
#
# It's okay to set (read: memoise) the client as an attr since we
# each call to the client's functions (e.g. `list`) are not
# memoised.
setattr(self, attr_name, func(self))
# Get the result of the function call that was set in the attr.
return getattr(self, attr_name)
return _lazy_property
|
04b5262135a075441f32dff186a785574b4cad55
| 142,191 |
def rpad(ls, size, val):
"""Right-pads a list with a prescribed value to a set length."""
return ls + (size - len(ls))*[val]
|
9cbc1a2e925efd90c75ead2a108ff19436441682
| 685,809 |
def fix_ncesid(ncesid, mode):
"""
Applies standard formatting (zero padding and typecasting) to
both schools' and districts' NCES IDs.
Args:
ncesid (int): Target NCES ID to fix (e.g. 100005).
mode (str): Should be either "school" or "district".
Returns:
str: Standardized NCES ID (does not perform zero padding if
unknown mode is porvided).
"""
padding = {
"school": 12,
"district": 7
}.get(mode, 0)
return str(ncesid).zfill(padding)
|
02d0db477039ef30beefff5470ec3781de9fc052
| 684,766 |
import calendar
def get_timestamps(series):
"""
Create a timestamp for every created_at date and set the seconds to zero.
"""
return [1000 * calendar.timegm(d.created_at.replace(second=0).timetuple()) for d in series]
|
5f7149568c8183bee85b5588c3a925adcbbaf4d5
| 267,611 |
def drop(n, xs):
"""
drop :: Int -> [a] -> [a]
drop(n, xs) returns the suffix of xs after the first n elements, or [] if n >
length xs
"""
return xs[n:]
|
e9261686022f5419edade3b47e82c68bd52b5cd8
| 697,737 |
from typing import Tuple
def split_off_address(line: str) -> Tuple[str, str]:
"""Split e.g. 'beqz $r0,1f0' into 'beqz $r0,' and '1f0'."""
parts = line.split(",")
if len(parts) < 2:
parts = line.split(None, 1)
if len(parts) < 2:
parts.append("")
off = len(line) - len(parts[-1].strip())
return line[:off], line[off:]
|
8c86eb4f02f0cb51e583099a1d37a86ec748cac8
| 279,979 |
def zzx_neg(f):
"""Negate a polynomial in Z[x]. """
return [ -coeff for coeff in f ]
|
c813597dc9540c8d85221352da10db894de4aa4c
| 18,877 |
def filter_packages(packages: list, key: str) -> list:
"""Filter out packages based on the given category."""
return [p for p in packages if p["category"] == key]
|
46f11f5a8269eceb9665ae99bdddfef8c62295a2
| 705,075 |
import re
def removeHex(text):
"""Remove hex from a string"""
text = text.encode().decode('unicode_escape')
return re.sub(r'[^\x00-\x7f]',r'', text)
|
bc3c779143171b1f0fb121181c029fd04978fd2a
| 195,031 |
def nearest_point_on_line(point, line):
"""Return the nearest point on a line
"""
return line.interpolate(line.project(point))
|
e100855ea775c43d54bc9e4256c9757386444ee6
| 572,228 |
def group_names(groups, group_id):
"""
Return English and Arabic names of group (either Office or Subconstituency) with matching id,
or None if not found.
"""
for g in groups:
if g['code'] == group_id:
return g['english_name'], g['arabic_name']
|
8f26f641c9f99a6d813e550751735eaf36f563f3
| 232,575 |
def analyze_headers(headers):
"""Gets the list of TMT channel headers."""
# look for reporter ion channels
channels = [c for c in headers if c.startswith('Abundance')]
if not channels:
channels = [c for c in headers if (c.startswith('12') or c.startswith('13'))]
if not channels:
print('...WARNING: no reporter ion channels were found')
return channels
|
71898d9e6c07b429bea38339962ff489581f1c24
| 463,506 |
def load_file(filepath):
"""Helper function to read a file and return the data."""
with open(filepath, "r") as resume_file_handler:
return resume_file_handler.read().lower().split()
|
878755a9897daf5ea24322f84c992fb08cb91524
| 329,366 |
def format_str_to_nba_response_header(s: str):
"""
Formats a string into NBA response column header format.
stats.nba response columns are made into ANGRY_SNAKE_CASE
format when they are parsed from json.
>>> format_str_to_nba_response_header('game_date')
'GAME_DATE'
>>> format_str_to_nba_response_header('player_id')
'PLAYER_ID'
>>> format_str_to_nba_response_header('GAME_ID')
'GAME_ID'
>>> format_str_to_nba_response_header('Season')
'SEASON'
"""
return s.upper()
|
43d246668e940b208e36b631434b04e33358641e
| 455,441 |
import re
def extract_regexp_groups(regexp,string):
"""
returns all matched groups from string usign given regular expression
NB: regexp must be provided with group names (?P<group>)
"""
valid = re.match(regexp, string)
if valid:
return valid.groupdict()
|
aa6cb38eb3eb2e4616ecd85a6a85c2ad0c7e3c36
| 278,452 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.