content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def calculate_error_rates(point_to_weight, classifier_to_misclassified):
"""Given a dictionary mapping training points to their weights, and another
dictionary mapping classifiers to the training points they misclassify,
returns a dictionary mapping classifiers to their error rates."""
error_dict = dict()
for c in classifier_to_misclassified.keys():
total = 0
for p in classifier_to_misclassified[c]:
total += point_to_weight[p]
error_dict[c] = total
return error_dict
|
fad9b2ce0a1811536de4c085be0a56ab1eb253c6
| 159,525 |
from datetime import datetime
def make_message(name: str) -> str:
"""
Takes a name and returns a message with the name and the time
:param name: str Which is the name for the message to refer to
:returns: str The message containing the name and the time
"""
time = datetime.now().strftime("%H:%M")
return f"Hello {name}, the time is {time}, how are you today?"
|
ec59e113dc4ecce62ddb7948d99ec3a8a2f51a1a
| 214,499 |
from pathlib import Path
def cookiecutter_cache_path(template):
"""
Determine the cookiecutter template cache directory given a template URL.
This will return a valid path, regardless of whether `template`
:param template: The template to use. This can be a filesystem path or
a URL.
:returns: The path that cookiecutter would use for the given template name.
"""
template = template.rstrip('/')
tail = template.split('/')[-1]
cache_name = tail.rsplit('.git')[0]
return Path.home() / '.cookiecutters' / cache_name
|
cbdc72195bf47fb1bb91368ac2bbca3890775a60
| 7,807 |
def calc_get_item(func, in_data, **kwargs):
"""[GetItem](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.get_item.html)
Extract part of an array. This operation is zero FLOPs.
Most of tensor libraries have view feature, which doesn't actually create
a new array unless necessary, but this is not considered in
chainer-computational-cost.
Memory read runs only for the necessary elements, so both memory
read and write are same as the size of output tensor.
| Item | Value |
|:-------|:------|
| FLOPs | $$ 0 $$ |
| mread | $$ \| y \| $$ |
| mwrite | $$ \| y \| $$ |
| params | `slices`: list of slices, a slice is an int or a tuple with 3 elements |
""" # NOQA
x, = in_data
y = x[func.slices]
slices = [(s.start, s.stop, s.step) if type(s) is slice else s
for s in func.slices]
return (0, y.size, y.size, {'slices': slices})
|
b83eaaab84871099f03982b37c763239b7d6ecb6
| 119,738 |
def period(freq):
"""
Convert array of frequencies to periods.
"""
return 1./freq
|
dcaa701006e3c517696771927e327bf5b94f6d4d
| 163,512 |
def is_prepositional_tag(nltk_pos_tag):
"""
Returns True iff the given nltk tag
is a preposition
"""
return nltk_pos_tag == "IN"
|
746a1439613a2203e6247924fe480792e171eb7b
| 20,798 |
def rename_clusters(name_dict, df, column_name):
"""
Takes in a dataframe, column name of the cluster, dictionary in the format: {'original_cluster_name' : 'new_cluster_name'}.
Returns a dataframe with the new cluster names.
"""
df = df.copy()
keys = list(name_dict.keys())
for key in keys:
df[column_name] = df[column_name].str.replace(key, name_dict[key])
return df
|
327fc288bf4cab3e5de28bdef32d8dd3a02b403a
| 346,858 |
from pathlib import Path
from typing import List
def read_csv_to_list(path: Path) -> List[str]:
"""Takes in a csv file path and splits the strings
and puts them into a list
Args:
path (Path): CSV file path
Returns:
List[str]: List of strings from the file
"""
list_of_strings = []
with open(path) as file:
for line in file:
line = line.strip()
list_of_strings.extend([match for match in line.split(",") if match != ""])
return list_of_strings
|
1bb6a4ff1dca94451970df96275d40f52672f4cf
| 385,731 |
def expand(path:list, conc_var:list) -> list:
"""
expand( path=[1, 2, 3], conc_var=[['a', 'c'], ['b', 'c']] )
---> [[1, 'a', 2, 'c', 3], [1, 'b', 2, 'c', 3]]
gives the detailed path!
Parameters
----------
path : list
The ids of formulas that form a path.
conc_var : list
the conected variables between two formulas.
Returns
-------
list
The detailed path.
"""
ft = []
cv = [list(cc) for cc in list(conc_var)]
for c in cv:
w = []
p = path.copy()
while c:
w.append(p.pop())
w.append(c.pop())
w.append(p.pop())
ft.append(w[::-1])
return ft
|
33be48c324570d50ac62db2f25004f3ab8385fef
| 83,240 |
def get_interface_state_from_api(data, index=-1, name="_"):
"""Process data from sw_interface_dump API and return state
of the interface specified by name or index.
:param data: Output of interface dump API call.
:param index: Index of the interface to find.
:param name: Name of the interface to find.
:type data: list
:type index: int
:type name: str
:returns: State of the specified interface.
:rtype: str
:raises RuntimeError: If the interface is not found.
"""
if index == -1 and name == "_":
raise ValueError("Provide either an interface index or a name.")
for iface in data:
if iface["sw_interface_details"]["sw_if_index"] == int(index)\
or iface["sw_interface_details"]["interface_name"] == name:
return iface["sw_interface_details"]
else:
raise RuntimeError(
"Interface with index {index} or name {name} not found in dump.\n "
"Dumped data:\n {data}".format(index=index, name=name, data=data))
|
acc8ff81fa177be0229aca8bd5ee71ebb36dd3a7
| 404,183 |
def preboil_grav(target_og: float,
preboil_vol: float = 7,
postboil_vol: float = 6):
"""Computes preboil gravity to to verify before concluding mash
Args:
preboil_vol: Pre-boil volume in gal
target_og: Target original gravity for primary fermentation e.g. 1.040
postboil_vol: Postboil wort volume to be collected for primary fermentation
"""
target_og = (target_og - 1.0) * 1000 # adjust to gravity points
return (((postboil_vol * target_og) / preboil_vol)/1000) + 1.0
|
aab73da4e2c7060851db9dad9420c08a10b25ca0
| 622,305 |
def parse_size(size):
"""
Converts a size specified as '800x600-fit' to a list like [800, 600]
and a string 'fit'. The strings in the error messages are really for the
developer so they don't need to be translated.
"""
first_split = size.split('-')
if len(first_split) != 2:
raise AttributeError(
'Size must be specified as 000x000-method such as 800x600-fit.')
size, method = first_split
if method not in ('fit', 'thumb'):
raise AttributeError(
'The method must either be "fit" or "thumb", not "%s".' % method)
try:
size_ints = [int(x) for x in size.split('x')]
except ValueError:
raise AttributeError(
'Size must be specified as 000x000-method such as 800x600-fit.')
if len(size_ints) != 2:
raise AttributeError(
'Size must be specified as 000x000-method such as 800x600-fit.')
if size_ints[0] <= 0 or size_ints[1] <= 0:
raise AttributeError(
'Height and width for size must both be greater than 0.')
return size_ints, method
|
7a3ee86a48e320df70dec8f2a8fcb72bbaf377fe
| 44,062 |
def prepare_deck(deck, number_of_cards):
"""Prepare sub_deck, from deck leaving starting number_of_cards cards."""
sub_deck = deck.copy()
while len(sub_deck) != number_of_cards:
sub_deck.pop()
return sub_deck
|
ee6c8b3c63d1e4ee1befe6384245ef692ba351e1
| 316,628 |
def get_catalog_record_preferred_identifier(cr):
"""Get preferred identifier for a catalog record.
Args:
cr (dict): A catalog record.
Returns:
str: The preferred identifier of e dataset. If not found then ''.
"""
return cr.get('research_dataset', {}).get('preferred_identifier', '')
|
c3ab1fe64c07b63760829124769fb3e39519c083
| 96,615 |
import pickle
def encode_message(topic, data):
"""Encode a message for sending via 0MQ
Given a string topic name and a pickle-able data object, encode and prep
the data for sending via `send_multipart`
Returns a list of the form:
[
Bytes object of String (UTF-8),
Pickled data object
]
If encoding fails None will be returned.
"""
try:
enc = [bytes(topic, 'utf-8'), pickle.dumps(data)]
except:
enc = None
return enc
|
73a083cc0edc574baf9843f245b3ee3a42ab35a0
| 569,679 |
import pathlib
from typing import List
def find_all_detectors(rootdir: pathlib.Path,
prefix: str = 'SingleCell') -> List[str]:
""" Find all the detectors under the root directory
If your directories look like this:
* ``/data/Experiments/2017-01-30/SingleCell-foo``
* ``/data/Experiments/2017-01-30/SingleCell-bar``
Then this will find detectors: ``['foo', 'bar']``
:param Path rootdir:
The experiment to load (e.g. '/data/Experiments/2017-01-30')
:param str prefix:
The prefix for subdirectories to look for detectors
:returns:
A list of detectors available for this experiment
"""
rootdir = pathlib.Path(rootdir)
if not rootdir.is_dir():
raise OSError(f'Cannot find experiment: {rootdir}')
detectors = []
for p in rootdir.iterdir():
if not p.name.startswith(prefix):
continue
if not p.is_dir():
continue
if '-' in p.name:
detectors.append(p.name.split('-', 1)[1])
else:
detectors.append(None)
if len(detectors) < 1:
raise OSError(f'No {prefix} folders found under {rootdir}')
return detectors
|
72e9deb8ae931373b80c4feb7c465ac3d10c60ed
| 168,169 |
from typing import Iterable
import functools
import operator
def product(iterable: Iterable):
"""
Multiplies all elements of iterable and returns result
ATTRIBUTION: code provided by Raymond Hettinger on SO
https://stackoverflow.com/questions/595374/whats-the-function-like-sum-but-for-multiplication-product
"""
return functools.reduce(operator.mul, iterable, 1)
|
9662680401fb63c96cef271959964039d08aa20c
| 638,967 |
def remove_spawning_profile(intersection, spawning_profile):
"""
Removes a spawning profile to an intersection
:param intersection: intersection
:param spawning_profile: spawning profile to be removed from intersection
:type intersection: Intersection
:type spawning_profile: SpawningProfile
:return: Intersection with spawning profile removed from spawning profile list
"""
return intersection.remove_spawning_profile(spawning_profile)
|
60d47e6eef993cd5cdab8e19a911b12874776c1e
| 531,720 |
import io
def formatted_text_to_markdown(ft):
"""
Simple method to convert formatted text to markdown. Does not escape special characters.
"""
s = io.StringIO()
for f, t in ft:
if f == "":
s.write(t)
elif f == "underline":
s.write(f"__{t}__")
elif f == "bold":
s.write(f"**{t}**")
elif f == "italic":
s.write(f"*{t}*")
else:
raise RuntimeError(f"unknown formatting: {f}")
return s.getvalue()
|
c6d653e6b3617eef7995860826f17fd815f29f80
| 278,330 |
import math
def transform(anchor_coords, reg_targets):
"""
Applies the bounding box regression transformation to an anchor box.
:param anchor_coords: numpy array of the anchor coordinates: [x1, y1, x2, y2]
:param reg_targets: numpy array of the bounding box regression parameters: [tx, ty, tw, th]
:return: numpy array with the coordinates of the transformed box: [x1, y1, x2, y2]
"""
x1, y1, x2, y2 = anchor_coords
cxa, cya = (x1 + x2) / 2, (y1 + y2) / 2
wa, ha = x2 - x1, y2 - y1
tx, ty, tw, th = reg_targets
cx = tx * wa + cxa
cy = ty * ha + cya
w = math.exp(tw) * wa
h = math.exp(th) * ha
x = cx - w/2
y = cy - h/2
return x, y, x+w, y+h
|
cf9834ccb55abeeef403b85bcd74d8a9705072e6
| 391,949 |
def preloader(align="center"):
"""
Returns a quick preloader html snippet.
:param align: string equal to "left", "center" (default), or "right"
"""
return {
'align': align,
}
|
94fe0fa80ba85155d367f4402731e189b01a84c8
| 435,132 |
def parse_labels_voc(label_file):
"""
Definition: Parses label file to extract label and bounding box
coordintates.
Parameters: label_file - list of labels in images
Returns: all_labels - contains a list of labels for objects in the image
all_coords - contains a list of coordinates for objects in image
"""
lfile = open(label_file)
coords = []
all_coords = []
all_labels = []
for line in lfile:
l = line.split(" ")
all_labels.append(l[0])
coords = list(map(int, list(map(float, l[4:8]))))
xmin = coords[0]
ymin = coords[1]
xmax = coords[2]
ymax = coords[3]
tmp = [xmin, ymin, xmax, ymax]
all_coords.append(list(map(int, tmp)))
lfile.close()
return all_labels, all_coords
|
a5ed15d8896a76e143cf106ca22c472131185fde
| 205,083 |
def finish(response):
"""
Transitions the execution to completion.
Args:
response: The object to return to the execution's invoker.
"""
def action(state):
return state.finish(response)
return action
|
5410fa6d5318acb238f9b91c885ec99b697997de
| 349,477 |
def get_location_string(
feature_text: str) -> str:
"""
Args:
feature_text: endswith '\n'
For example:
' CDS complement(join(<360626..360849,360919..360948,
' 361067..361220,361292..361470,361523..>361555))
' /gene="rIIA"
' /locus_tag="T4p001"
' /db_xref="GeneID:1258593"
Returns:
The example would be
'complement(join(<360626..360849,360919..360948,361067..361220,361292..361470,361523..>361555))'
"""
# The location string is before ' '*21 + '/', sometimes could be multiple lines
pos = feature_text.find(' ' * 21 + '/')
ret = feature_text[:pos]
# Multiple -> single line
ret = ret.replace('\n' + ' ' * 21, '')
# Remove the feature type such as 'CDS'
ret = ret.strip().split()[1]
return ret
|
46b0cd0176f57e764143a9d8114250185108e9e5
| 226,739 |
def squareMean3( array, centerPixel ):
"""
Kernel neighborhood function for focal map algebra. Reutrns mean of a 3x3 square array.
@param array - array from which to retrieve the neighborhood kernel
@param centerPixel - (i,j) corrdinates of center pixel of kernel in the array
@return - mean of 3x3 square neighborhood around centerPixel
"""
rows = centerPixel[0] - 1
cols = centerPixel[1] - 1
neighArray = array[rows:rows + 3, cols:cols + 3]
return neighArray.mean()
|
e017f38483fdc72163adcd65ef9ef4900fa8b350
| 19,802 |
def _get_provisioning_state(instance):
"""Return the provisioning state from the instance result if present."""
if instance.get('provisioning_state'):
return instance.get('provisioning_state')
elif instance.get('properties'):
return instance.get('properties').get('provisioning_state')
else:
return
|
df98a713cb03afa76a0327c52949710641778c94
| 491,565 |
import re
def get_master_names(desired_master_state, name_regex):
"""Returns masters found in <desired_master_state> that match <name_regex>.
Args:
desired_master_state: A "desired_master_state" object, e.g. as returned by
desired_state_parser
Returns:
[str1, str2, ...] All masters found in <desired_master_state>
"""
# Modify regex to allow for optional "master." prefix
name_regex = r'(master\.)?' + name_regex
master_matcher = re.compile(name_regex)
return [m for m in desired_master_state["master_states"].keys()
if master_matcher.match(m)]
|
9343964103d1e93ff0d6de7d019c1fd206e84d3b
| 696,323 |
import re
def is_passport_valid(p):
"""
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
"""
try:
yrs_valid = (
1920 <= int(p['byr']) <= 2002 and
2010 <= int(p['iyr']) <= 2020 and
2020 <= int(p['eyr']) <= 2030
)
if p['hgt'][-2:] == 'in':
hgt_valid = 59 <= int(p['hgt'][0:-2]) <= 76
else: # p['hgt'][-2:] == 'cm':
hgt_valid = 150 <= int(p['hgt'][0:-2]) <= 193
hcl_valid = re.compile('^#[a-f0-9]{6}$').match(p['hcl'])
ecl_valid = p['ecl'] in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
pid_valid = re.compile('^[0-9]{9}$').match(p['pid'])
return yrs_valid and hgt_valid and hcl_valid and ecl_valid and pid_valid
except Exception:
return False
|
3c2889b416db3f6344a3c5767673e5b3581dda85
| 312,223 |
import math
def slopeAngle(p1,p2):
"""Returns the minimum angle the line connecting two points makes with the x-axis.
Args:
p1 ([tuple]): point p1
p2 ([tuple]): point p2
Returns:
[float]: minimum angle between x-axis and line segment joining p1 and p2
"""
try :
m = (p2[1]-p1[1])/(p2[0]-p1[0])
except ZeroDivisionError:
m = 0
return abs(math.degrees(math.atan(m)))
|
12252bfb196aefcf50f38a7459a5e50009b26817
| 152,613 |
def get_usrid_from_league(league):
"""
get user id from a league and put them into a list
:param league: LeagueDto: an object contains league information
:return: usrid
"""
entries = league['entries']
usrid = []
for entry in entries:
usrid.append(entry['playerOrTeamId'])
usrid = list(set(usrid))
return usrid
|
ce91f7f1f0afcc064b8f59c90721b60be47ab4b9
| 13,016 |
def _event_QSlider(self):
"""
Return value change signal for QSlider
"""
return self.valueChanged
|
f7fce1b98d0814fa90144df8356a0ea4e76c89b3
| 151,886 |
def vertical_strip_gridmap(height, alternating=True):
"""
Returns a function that determines the pixel number for a grid with strips arranged vertically.
:param height: grid height in pixels
:param alternating: Whether or not the lines in the grid run alternate directions in a zigzag
:return: mapper(x, y)
"""
def mapper(x, y):
if alternating and x % 2:
return x * height + (height - 1 - y)
return x * height + y
return mapper
|
9042071bfb440db663a3a0c123b79706cb051823
| 122,612 |
def parse_port_range(port_range_or_num):
"""Parses a port range formatted as 'N', 'N-M', or 'all', where
N and M are integers, into a minimum and maximum port tuple."""
if port_range_or_num.lower() == 'all':
return 0, 65535
try:
port = int(port_range_or_num)
return port, port
except ValueError:
pass
from_port, to_port = port_range_or_num.split('-', maxsplit=1)
return int(from_port), int(to_port)
|
e2ed5d239e5961a8ddce1c19b8705391788315a1
| 369,178 |
from typing import List
from typing import Counter
def get_base_freq(reads: List[str]):
"""
Returns the aggregate frequency of bases in the sequencing reads
>>> get_base_freq(["NAACGTTA"])
Counter({'A': 3, 'T': 2, 'N': 1, 'C': 1, 'G': 1})
>>> get_base_freq(["AACGTTA", "CGCGTTT"])
Counter({'T': 5, 'A': 3, 'C': 3, 'G': 3})
"""
concatenated_reads = "".join(reads)
return Counter(concatenated_reads)
|
60b5dc85e50ad6ea6a99ea949322bd4828a12cc3
| 571,900 |
from typing import Dict
from typing import Any
def default_style() -> Dict[str, Any]:
"""Define default values of the pulse stylesheet."""
return {
'formatter.general.fig_width': 13,
'formatter.general.fig_chart_height': 1.5,
'formatter.general.vertical_resolution': 1e-6,
'formatter.general.max_scale': 100,
'formatter.color.waveforms': {
'W': ['#648fff', '#002999'],
'D': ['#648fff', '#002999'],
'U': ['#ffb000', '#994A00'],
'M': ['#dc267f', '#760019'],
'A': ['#dc267f', '#760019']
},
'formatter.color.baseline': '#000000',
'formatter.color.barrier': '#222222',
'formatter.color.background': '#f2f3f4',
'formatter.color.fig_title': '#000000',
'formatter.color.annotate': '#222222',
'formatter.color.frame_change': '#000000',
'formatter.color.snapshot': '#000000',
'formatter.color.axis_label': '#000000',
'formatter.color.opaque_shape': ['#f2f3f4', '#000000'],
'formatter.alpha.fill_waveform': 0.3,
'formatter.alpha.baseline': 1.0,
'formatter.alpha.barrier': 0.7,
'formatter.alpha.opaque_shape': 0.7,
'formatter.layer.fill_waveform': 2,
'formatter.layer.baseline': 1,
'formatter.layer.barrier': 1,
'formatter.layer.annotate': 5,
'formatter.layer.axis_label': 5,
'formatter.layer.frame_change': 4,
'formatter.layer.snapshot': 3,
'formatter.layer.fig_title': 6,
'formatter.margin.top': 0.5,
'formatter.margin.bottom': 0.5,
'formatter.margin.left_percent': 0.05,
'formatter.margin.right_percent': 0.05,
'formatter.margin.between_channel': 0.2,
'formatter.label_offset.pulse_name': 0.3,
'formatter.label_offset.chart_info': 0.3,
'formatter.label_offset.frame_change': 0.3,
'formatter.label_offset.snapshot': 0.3,
'formatter.text_size.axis_label': 15,
'formatter.text_size.annotate': 12,
'formatter.text_size.frame_change': 20,
'formatter.text_size.snapshot': 20,
'formatter.text_size.fig_title': 15,
'formatter.text_size.axis_break_symbol': 15,
'formatter.line_width.fill_waveform': 0,
'formatter.line_width.axis_break': 6,
'formatter.line_width.baseline': 1,
'formatter.line_width.barrier': 1,
'formatter.line_width.opaque_shape': 1,
'formatter.line_style.fill_waveform': '-',
'formatter.line_style.baseline': '-',
'formatter.line_style.barrier': ':',
'formatter.line_style.opaque_shape': '--',
'formatter.channel_scaling.drive': 1.0,
'formatter.channel_scaling.control': 1.0,
'formatter.channel_scaling.measure': 1.0,
'formatter.channel_scaling.acquire': 1.0,
'formatter.channel_scaling.pos_spacing': 0.1,
'formatter.channel_scaling.neg_spacing': -0.1,
'formatter.box_width.opaque_shape': 150,
'formatter.box_height.opaque_shape': 0.5,
'formatter.axis_break.length': 3000,
'formatter.axis_break.max_length': 1000,
'formatter.control.apply_phase_modulation': True,
'formatter.control.show_snapshot_channel': True,
'formatter.control.show_acquire_channel': True,
'formatter.control.show_empty_channel': True,
'formatter.control.auto_chart_scaling': True,
'formatter.control.axis_break': True,
'formatter.unicode_symbol.frame_change': u'\u21BA',
'formatter.unicode_symbol.snapshot': u'\u21AF',
'formatter.unicode_symbol.phase_parameter': u'\u03b8',
'formatter.unicode_symbol.freq_parameter': 'f',
'formatter.latex_symbol.frame_change': r'\circlearrowleft',
'formatter.latex_symbol.snapshot': '',
'formatter.latex_symbol.phase_parameter': r'\theta',
'formatter.latex_symbol.freq_parameter': 'f',
'generator.waveform': [],
'generator.frame': [],
'generator.chart': [],
'generator.snapshot': [],
'generator.barrier': [],
'layout.chart_channel_map': None,
'layout.time_axis_map': None,
'layout.figure_title': None}
|
cfd9219608e22a5f14a0b823d095beb752e6e8ff
| 556,689 |
def get_intervention_label(method_name, base_intervention_name):
"""
Maps raw method name to a readable name.
Args:
method_name (str): name of the folder in the experiments.
base_intervention_name (str): filename in `configs/simulation/intervention/` folder.
Returns:
(str): a readable name for the intervention
"""
assert type(method_name) == str, f"improper intervention type: {type(method_name)}"
# when experimental runs are named something else other than the intervention config filename
if base_intervention_name != method_name:
return method_name.upper()
without_hhld_string = " wo hhld" if "wo_hhld" in method_name else ""
base_method = method_name.replace("_wo_hhld", "")
if base_method == "bdt1":
return "Binary Digital Tracing" + without_hhld_string
if base_method == "bdt2":
return "Binary Digital Tracing (recursive)" + without_hhld_string
if base_method == "post-lockdown-no-tracing":
return "Post Lockdown No Tracing"
if base_method == "oracle":
return "Oracle" + without_hhld_string
if "heuristic" in base_method:
version = method_name.replace("heuristic", "")
return f"Heuristic{version}" + without_hhld_string
if "transformer" in base_method:
return "Transformer" + without_hhld_string
raise ValueError(f"Unknown raw intervention name: {method_name}")
|
ef4c349b0f0127ecf44d8b592846dd634ad3ebb2
| 284,662 |
def fen_to_board_pieces(fen, piece, n=8):
"""
:param str fen: The FEN string describing a position (state) in a Racing Kings Chess board.
:param str piece: Which piece type we want to find the positions. E.g. 'K', 'k', 'B', 'b'
:param int n: Chess game dimension (should always be 8).
:return: A list consisting of n sublists, each of length n, that depict and n x n grid, where
each entry is 0, except for the specified pieces positions, that have value 1.
:rtype: list[list[int]]
"""
piece_placement, _, _, _, _, _ = fen.split()
ranks = piece_placement.split('/')
setup = [[0] * n for _ in range(n)]
for rank_number, rank in enumerate(ranks):
current_file = 0
for info in rank:
if info.isdigit():
current_file += int(info)
else:
if info == piece:
setup[rank_number][current_file] = 1
current_file += 1
return setup
|
2fe1126e571312a543e36fe19ae306f01e1d423f
| 122,161 |
def is_title_case(line):
"""Determine if a line is title-case (i.e. the first letter of every
word is upper-case. More readable than the equivalent all([]) form."""
for word in line.split(u' '):
if len(word) > 0 and len(word) > 3 and word[0] != word[0].upper():
return False
return True
|
e769d589d0f84030768c901a5b5b2285788bdc97
| 11,718 |
def sum_digits(n : int) -> int:
"""
Given a non-negative integer n, return the sum of its digits.
Parameters
----------
n : int
number to return the sum of
Returns
-------
int
sum of numbers digits
"""
if n == 0:
return 0
x = sum_digits(n // 10) + (n % 10)
return(x)
|
ae8cd8b64d15548b2e5ede1feb40e564d01a1239
| 627,191 |
from typing import Dict
from typing import Any
def replace_module_prefix(
state_dict: Dict[str, Any], prefix: str, replace_with: str = "", ignore_prefix: str = ""
):
"""
Remove prefixes in a state_dict needed when loading models that are not VISSL
trained models.
Specify the prefix in the keys that should be removed.
Added by DLM contributors: ignore_prefix is used to ignore certain keys in the state dict
"""
state_dict = {
(key.replace(prefix, replace_with, 1) if key.startswith(prefix) else key): val
for (key, val) in state_dict.items() if ((not key.startswith(ignore_prefix)) or ignore_prefix == "")
}
return state_dict
|
b8499c818053e7798e9549fbe546bab7d5fbfa84
| 709,520 |
def dict_to_str(d):
"""Pretty print contents of a dict, with fixed width as [KEY] : [VALUE]"""
return "\n".join(["{0: <25}: {1}".format(k,v) for k,v in d.items()])
|
b60162c923a0df2ab528f97608d348eeea4438a1
| 608,205 |
import requests
def catfact(context) -> str:
"""Get a random cat fact"""
return requests.get("https://catfact.ninja/fact").json()["fact"]
|
3195dfe01a2e58f27e654e6b0723389d785ab13f
| 398,034 |
import torch
def kullback_leibner(p, q, batch=False):
"""Calculates Kullback-Leibner Divergence of two probability densities
Args:
p, q: density values (must be all in range [0..1])
batch: if True, will return summed KL, divided by batch shape
Returns:
KL divergence, summed and divided by batch size if `batch` is `True`
"""
kl = p * torch.log(p / q)
if batch:
kl = torch.sum(kl, 0) / kl.shape[0]
return kl
|
8d89ff5a01ee56d0d65e75ba8e8e0f0bdfcad4b5
| 213,884 |
import re
def _is_ignored(filename, ignored_paths):
""" Check if the file should be ignored
:type filename: str
:param filename: Name of file check
:type ignored_paths: list
:param ignored_paths:
List of regular expressions we should validate against
:returns: bool -- True if the file should be ignored
"""
for ignored_path in ignored_paths:
if re.findall(ignored_path, filename):
return True
return False
|
4e5512248627306d9bc7d04157a25fc569130f04
| 574,422 |
def numpy(tensor):
"""Convert a torch.tensor to a 1D numpy.ndarray."""
return tensor.cpu().detach().numpy().ravel()
|
cdea8e80a6129ba846d9f69dc4825bf574e688ac
| 41,383 |
import json
def get_tissue_mappings(mapping_filename):
"""Return a dictionary that maps tissue names to anatomical systems and organs"""
with open(mapping_filename, 'r') as mappings_file:
mapping_dict = json.load(mappings_file)
return mapping_dict['tissues']
|
a049481bcb618bf275d42065897bd997d2629632
| 433,023 |
def calc_damped_vs_kramer_1996(vs, xi):
"""
Calculates the damped shear wave velocity
Ref: Eq 7.9 from Kramer (1996)
:param vs:
:param xi:
:return:
"""
return vs * (1.0 + 1j * xi)
|
a67a3c33e4c6b4d900cf126ee2cd53e682dc97c3
| 259,445 |
def _get_period_from_imt(imtstr):
"""
Return a float representing the period of the SA IMT in the input.
Args:
imtstr (str): A string representing an SA IMT.
Returns:
float: The period of the SA IMT as a float.
"""
return float(imtstr.replace('SA(', '').replace(')', ''))
|
1cc4cbd1e46f04c56ac1b45155123353e4cbcbd9
| 279,544 |
def valid_interface_number(request):
"""
Fixture that yields valid interface numbers.
"""
return request.param
|
9fccfb9bc07270eca6ec2ee093ed7b83cda018bf
| 251,851 |
from datetime import datetime
def parse_time(value: str) -> datetime:
"""Parse and convert JSON encoded string to a datetime object
Parses a ``str`` and converts it to a ``datetime`` object. If the string is
not a valid JSON (JavaScript) encoded datetime object, this function will
return the minimum datetime value (``datetime.min``).
:param value: The string value to parse
:type value: str
:returns datetime: A ``datetime`` version of the provided JSON-time string
"""
try:
return datetime.strptime(value, "%Y-%m-%dT%H:%M:%SZ")
except ValueError:
return datetime.min
|
1dbf8768ea67553f8ef060fc670a4428a5f17367
| 551,433 |
import json
def get_user_parameters(job):
"""
Returns the user parameters that were defined in CodePipeline.
"""
return json.loads(
job["data"]["actionConfiguration"]["configuration"]["UserParameters"]
)
|
6e74c5fb59930cf1bdd7ac0c882fe15275d70135
| 132,336 |
import random
def wait_for_small_enough_number(small_number=10, max_number=50, print_it=True):
"""
What comes in: Two optional positive integers,
with the second greater than the first,
and an optional indicator for whether to print intermediate results.
What goes out:
Returns the number of random integers that are generated,
as described below.
Side effects:
-- Repeatedly generates random integers between 1 and max_number,
inclusive, where max_number is the second given integer.
-- Stops when the random integer is less than or equal to
small_number, where small_number is the first given integer.
-- Optionally prints the random numbers as they are generated.
Note that, each time through the loop, the probability that the
stopping-event occurs is small_number / max_number (0.2 by default).
Probability theory tells us that the expected number of times that
the loop will run is max_number / small_number (5 by default).
"""
if print_it:
print()
print("----------------------------------------------------------")
print("Demonstrating WAIT FOR A SMALL ENOUGH")
print(" randomly generated number")
print("----------------------------------------------------------")
print("I will now generate random integers between")
print("1 and {}, stopping when a generated random".format(max_number))
print("integer is less than or equal to {}.".format(small_number))
print()
count = 0
while True:
count = count + 1
number = random.randrange(1, max_number + 1)
if print_it:
print(" Randomly generated number: {}".format(number))
if number <= small_number:
break
if print_it:
print("{} random integers between 1 and {} were generated".format(
count, max_number))
print()
return count
|
41d6f74be2abe448ce39961a5fa2c4563cd73511
| 320,677 |
def get_method_identifier(qualified_name):
"""Takes com.some.thing.Class:method and returns Class_method."""
parts = qualified_name.split(".")
method_identifier = parts[-1]
return method_identifier.replace(":", "_")
|
2539a5160ff288225ab62f82596188e671e953ad
| 467,269 |
def _make_emm_plugin_finalizer(handle, allocations):
"""
Factory to make the finalizer function.
We need to bind *handle* and *allocations* into the actual finalizer, which
takes no args.
"""
def finalizer():
"""
Invoked when the MemoryPointer is freed
"""
# At exit time (particularly in the Numba test suite) allocations may
# have already been cleaned up by a call to Context.reset() for the
# context, even if there are some DeviceNDArrays and their underlying
# allocations lying around. Finalizers then get called by weakref's
# atexit finalizer, at which point allocations[handle] no longer
# exists. This is harmless, except that a traceback is printed just
# prior to exit (without abnormally terminating the program), but is
# worrying for the user. To avoid the traceback, we check if
# allocations is already empty.
#
# In the case where allocations is not empty, but handle is not in
# allocations, then something has gone wrong - so we only guard against
# allocations being completely empty, rather than handle not being in
# allocations.
if allocations:
del allocations[handle]
return finalizer
|
682378d6963bf924b77872c2ddf68105c90384b0
| 690,235 |
import re
def helper(filename):
"""
Reads file and build dictionary with words and their quantities in k,v
:param filename: the file to be read
:return: a dict with word: quantity
"""
# get words from file (split by space)
words = [w for line in open(filename).readlines() for w in line.split()]
# remove pontuation and transform lowercase
for i in range(len(words)):
words[i] = re.sub(
r'(`|_|\+|-|\.|,|\?|\!|@|#|\$|%|\^|&|\*|\(|\)|;|\\|\/|\||<|>|\"|\')',
'', words[i])
words[i] = words[i].lower()
# cast to set to remove repeated words and sort by alphabetical order
words_set = set(words)
# build dictionary with word => quantity
words_dict = dict()
for w in words_set:
words_dict[w] = words.count(w)
# finally return the dict
return words_dict
|
80db4bc50843bd09d3272171d3ee96e92f9cf908
| 540,282 |
import re
def FileExtension(file_name):
"""Return the file extension of file
'file' should be a string. It can be either the full path of
the file or just its name (or any string as long it contains
the file extension.)
Example #1:
input (file) --> 'abc.tar.gz'
return value --> 'tar.gz'
Example #2:
input (file) --> 'abc.tar'
return value --> 'tar'
"""
match = re.compile(r"^.*?[.](?P<ext>tar[.]gz|tar[.]bz2|\w+)$",
re.VERBOSE|re.IGNORECASE).match(file_name)
if match: # if match != None:
ext = match.group('ext')
return ext
else:
return '' # there is no file extension to file_name
|
53cff85d1b9c8faa0bb062fa459205ee8dad0b6d
| 62,835 |
import random
def gen_fake_cpu_work(num_cpus: int = 1) -> tuple:
"""Generates a fake CPU times.
:param num_cpus: number of CPU to simulate, defaults to 1
:type num_cpus: int, optional
:return: work statistics -> number of CPUs, wall time, CPU time, single CPU time and io time
:rtype: tuple
"""
wall_time = float(random.randint(60, 600))
single_cpu_time = (random.random() * wall_time)
cpu_time = float(single_cpu_time * num_cpus)
io_time = wall_time - float((cpu_time) / num_cpus)
return num_cpus, wall_time, cpu_time, single_cpu_time, io_time
|
62ae485c41f4f4f9d279e729ec4c2630150956d5
| 401,775 |
def literal_unicode_representer(dumper, data):
"""
Use |- literal syntax for long strings
"""
if '\n' in data:
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
else:
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data)
|
28cd9b63eb9fbbd95b95c014c53d23bca81bb255
| 430,133 |
def get_integer(request, key):
"""Returns the first value in the request args for the given key, if that
value is an integer. Otherwise returns None.
"""
if not request.args or key not in request.args:
return None
value = request.args[key][0]
if not value.isdigit():
return None
return int(value)
|
961d0737911df0665b30f210907740466aedb62b
| 250,039 |
import ast
def _non_kw_only_args_of(args: ast.arguments) -> list[ast.arg]:
"""Return a list containing the pos-only args and pos-or-kwd args of `args`"""
# pos-only args don't exist on 3.7
pos_only_args: list[ast.arg] = getattr(args, "posonlyargs", [])
return pos_only_args + args.args
|
a6db7f28275bc8764b0f495f17d9c0e15a3e620a
| 537,307 |
import string
from datetime import datetime
def make_temp_dirname(name=None):
"""Return a temp directory name."""
if name is not None:
chars = string.ascii_lowercase + "_"
name = name.lower().replace(" ", "_")
name = "".join((c for c in name if c in chars))
now = datetime.now().strftime("%Y%m%d_%H%M%S")
# For example, "temp_20141002_074531_my_election".
suffix = "" if name is None else "_" + name
return "temp_%s%s" % (now, suffix)
|
ad62f5b5120afc71fd1605e2b477ea9b2fd31db3
| 470,841 |
def get_nice_str_list(items, *, item_glue=', ', quoter='`'):
"""
Get a nice English phrase listing the items.
:param sequence items: individual items to put into a phrase.
:param str quoter: default is backtick because it is expected that the most
common items will be names (variables).
:return: nice phrase
:rtype: str
"""
nice_str_list = item_glue.join(
[f"{quoter}{item}{quoter}" for item in items[:-1]])
if nice_str_list:
nice_str_list += f"{item_glue}and "
nice_str_list += f"{quoter}{items[-1]}{quoter}"
return nice_str_list
|
caa4b581b1d9e5a2bacae974f28a300bd4cc5811
| 202,418 |
def generate_traffic_directions(nodes):
"""
This function returns all possible traffic directions on the network topology.
If the network is made up of only three nodes (e.g. A, B and C), the possible traffic directions are: AB, AC, BA, BC, CA, CB.
If the number of nodes is N, the number of traffic directions is N*(N-1).
Arguments:
nodes {List} -- List of topology node names.
Returns:
[List] -- List of possible traffic directions.
"""
# init traffic_directions
traffic_directions = []
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
direction = nodes[i] + nodes[j]
traffic_directions.append(direction)
return traffic_directions
|
c0d5a09e3a0d72039733a3e74fc1601c76fe26fa
| 225,688 |
def checksum(message):
"""
Calculate the GDB server protocol checksum of the message.
The GDB server protocol uses a simple modulo 256 sum.
"""
check = 0
for c in message:
check += ord(c)
return check % 256
|
bfba144414f26d3b65dc0c102cb7eaa903de780a
| 9,832 |
def repeat_str(s, num):
"""
Repeat string
:param s: string
:param num: repeat number
:return: string
"""
output = ''
for i in range(0, num):
output += s
if len(output) > 0:
output += ' '
return output
|
71c097459a299e591fe8ff8b045ca80a233bd24d
| 94,363 |
def skip(x,n):
"""
Reduces precision of the numeric value
:type x: floating point value
:param x: number to reduce precision
:type n: int
:param n: number of values after dot
"""
return int(x*(10**n))/10**n
|
65a416196e4055beb5876bcd79684f843ff76887
| 399,398 |
def xstr(this_string):
"""Return an empty string if the type is NoneType
This avoids error when we're looking for a string throughout the script
:param s: an object to be checked if it is NoneType
"""
if this_string is None:
return ''
return str(this_string)
|
98e88c6d081e8f36943d20e66f0c2c6d8ff6acae
| 264,931 |
def remove_keys(doc, keys):
"""Return a new document with keys in keys removed
>>> doc = {'a':1, 'b':2}
>>> remove_keys(doc, ['a'])
{'b': 2}
>>> # Show that the original document is not affected
>>> doc['a']
1
"""
return dict(
(k, v) for k, v in doc.items() if k not in keys)
|
dbee370982d6f3884dc2f4d137ba6b3e38791398
| 608,997 |
import re
def uncamel(val):
"""Return the snake case version of :attr:`str`
>>> uncamel('deviceId')
'device_id'
>>> uncamel('dataCenterName')
'data_center_name'
"""
s = lambda val: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', val).lower().strip('_')
return s(val)
|
27c6b5338d8da39a6b45d420749457147738e416
| 626,468 |
def score(touching_power_pellet, touching_dot):
"""Verify that Pac-Man has scored when a power pellet or dot has been eaten.
:param touching_power_pellet: bool - does the player have an active power pellet?
:param touching_dot: bool - is the player touching a dot?
:return: bool - has the player scored or not?
"""
return touching_power_pellet or touching_dot
|
c34cd1a1c3cc5a331e39004ba34ab373281cb68b
| 183,267 |
def dataset2metadataset_key(dataset_key):
"""Return the metadataset name corresponding to a dataset name
Args:
dataset_name (str): Name of a dataset
Returns:
str: Name of corresponding metadataset name
"""
return dataset_key.replace('/', '/_num_', 1)
|
a11a78ebcbc3deab6bba3e513e63584a2368e82f
| 489,804 |
def areas(ip):
"""Returns the area per triangle of the triangulation inside
a `LinearNDInterpolator` instance.
Is useful when defining custom loss functions.
Parameters
----------
ip : `scipy.interpolate.LinearNDInterpolator` instance
Returns
-------
numpy array
The area per triangle in ``ip.tri``.
"""
p = ip.tri.points[ip.tri.vertices]
q = p[:, :-1, :] - p[:, -1, None, :]
areas = abs(q[:, 0, 0] * q[:, 1, 1] - q[:, 0, 1] * q[:, 1, 0]) / 2
return areas
|
07033b45d273f23d4c68f0382a9ffa0e0c8aaab4
| 454,967 |
import encodings
def _c18n_encoding(encoding: str) -> str:
"""Canonicalize an encoding name
This performs normalization and translates aliases using python's
encoding aliases
"""
normed = encodings.normalize_encoding(encoding).lower()
return encodings.aliases.aliases.get(normed, normed)
|
8ccb52e06f361ba8492d9efa708ca7b30a7a9ae6
| 438,448 |
def _get_id_given_url(original_url: str) -> str:
""" Parse id from original URL """
return original_url.replace('https://', 'http://').replace('http://vocab.getty.edu/page/ia/', '')
|
c30f9ef1a6dde54c3a0f0e6fe6124a336b2e0e21
| 248,165 |
from typing import Any
def length(x: Any) -> int:
"""A length function we can bind to in gin."""
return len(x)
|
ac4abd25537d3aba3a1b5d73e9e766108465fe80
| 545,776 |
def parentheses_to_snake(x):
"""
Convert a string formatted as
"{a} ({b})" to "{a}_{b}"
Args:
x: input string
Returns:
Formatted string
"""
x_split = x.split(" (")
return f"{x_split[0]}_{x_split[1][:-1]}"
|
b82a734e409a110e7c8e776e6b110aa653d29a45
| 259,841 |
from typing import Optional
from typing import Tuple
def expandCommand(command: str, currentBranch: str, prevBranch: Optional[str]) -> Tuple[str, bool]:
"""
Replaces
%P with prevBranch,
%B with currentBranch.
If couldn't replace e.g. due to prevBranch being None, then does
no replacement.
Returns:
Tuple[expandedCommand, True if succeeded in expanding]
"""
expandedCommand = command
if command.find('%P') >= 0:
if prevBranch is None:
return command, False
expandedCommand = command.replace('%P', prevBranch)
expandedCommand = expandedCommand.replace('%B', currentBranch)
return expandedCommand, True
|
c5166fb870752703ae96bc368487c0c87f55d795
| 564,790 |
from typing import Union
from typing import Dict
from typing import List
def to_tags(values: Union[Dict, List, str], sep: str = ",") -> List[str]:
""" Coerce the passed values into a list of colon separated key-value pairs.
dict example:
{"tag1": "value1", "tag2": "value2", ...}
-> ["tag1:value1", "tag2:value2", ...]
list example:
["tag1", "tag2", ...] -> ["tag1", "tag2", ...]
str example (comma-delimited):
"tag1:value1, tag2:value2", ..." -> ["tag1:value1", "tag2:value2", ...]
str example (single):
"tag1:value1" -> ["tag1:value1"]
"""
result: List[str] = []
if isinstance(values, dict):
result = [
f"{key}:{str(value).lower().replace(' ','_')}"
for key, value in values.items()
if isinstance(value, (str, int))
]
elif isinstance(values, str):
if "," in values:
result = values.split(sep)
else:
result = [values]
elif isinstance(values, list):
result = values
else:
result = []
return result
|
fe6e2c0c25aa1bdb0e7c24ad10df124427fd3669
| 350,321 |
def slope(x_val, y_val):
"""
This function returns the slope of a line segment.
Parameters
----------
x_val, y_vals : coordinates of the two points of the line segments(x_val = [x1, x2], y_val = [y1, y2]).
Return
----------
slope of line segment.
"""
return ((y_val[1]-y_val[0])/(x_val[1]-x_val[0]))
|
85dc05238c3a8d302f5534fea6aff25831553f80
| 265,459 |
def dot(vect1, vect2):
""" Calculate dot product of vect1 and vect2 """
return sum(v1_i * v2_i
for v1_i, v2_i in zip(vect1, vect2))
|
2a4c186838bac4392426e04a9efc4bd43e513dda
| 214,949 |
def a_au(ms, p):
"""
ms : stellar mass [Solar]
p : period [days]
returns : semi-major axis [AU]
"""
return (ms * (p / 365.25) ** 2) ** (1.0 / 3.0)
|
8d3dbde383a52be7a95d429ca7b3c2d6999daf7e
| 233,640 |
def is_viv_ord_impname(impname):
"""
return if import name matches vivisect's ordinal naming scheme `'ord%d' % ord`
"""
if not impname.startswith("ord"):
return False
try:
int(impname[len("ord") :])
except ValueError:
return False
else:
return True
|
b8c1aa6f209000e04107cdff254a8fb3322a1921
| 178,266 |
def compare_user_with_lexicon(user_tokens, lexicon_tokens):
"""Function that compares input tokens with the resource tokens
from 1000 most common words with the help of two for-loop:
Input:
1. user_tokens (list): Liste der Benutzer-Tokens
2. lexicon_tokens (list): Liste der Lexicon-Tokens
Output:
1. scores (list): Die Anzahl der übereinstimmenden Tokens """
#die Variable "scores" auf "0" setzen
scores = 0
#sucht das Element der user_tokens
for user_words in user_tokens:
#mit den Elementen der lexicon_tokens
for lexicon_words in lexicon_tokens:
#wenn es Übereinstimmungen zwischen zwei Daten gibt
if user_words == lexicon_words:
#erhöht die Punktzahl um +1
scores +=1
return scores
|
6ab582acd3a12bed4b56be4199b84d02ae27fd27
| 289,557 |
def paint(width, height, performance):
"""Calculates how many paint does one need
for given area
@param width: area's width
@param height: area's height
@param performance: paint performance/m^2"""
area = width * height
return area / performance
|
02243f92ab5b3f714bb94f489b2b8e6e49f6c4f0
| 42,925 |
def _eval_feature_fn(fn, xs, classes):
"""_eval_feature_fn(fn, xs, classes) -> dict of values
Evaluate a feature function on every instance of the training set
and class. fn is a callback function that takes two parameters: a
training instance and a class. Return a dictionary of (training
set index, class index) -> non-zero value. Values of 0 are not
stored in the dictionary.
"""
values = {}
for i in range(len(xs)):
for j in range(len(classes)):
f = fn(xs[i], classes[j])
if f != 0:
values[(i, j)] = f
return values
|
11e300b2896197a16a0faa46961d348683aa143f
| 62,666 |
def disintegrate(obr):
"""Break image to RGB color representations, return list."""
r, g, b = obr.split()
return [r, g, b]
|
a7ec300d7089f2bde7a09f798252cdb1ca2b3443
| 54,925 |
import sqlite3
def _sqlite_json_enabled() -> bool:
"""Return True if this Python installation supports SQLite3 JSON1."""
connection = sqlite3.connect(":memory:")
cursor = connection.cursor()
try:
cursor.execute('SELECT JSON(\'{"a": "b"}\')')
except sqlite3.OperationalError:
return False
cursor.close()
connection.close()
return True
|
960c04887acad16885f12ed18dc9d0c8f583849b
| 292,206 |
import pathlib
import json
def json_data(request, name: str):
"""Loads a json file."""
path = pathlib.Path(request.fspath, "fixtures", name)
with path.open("r") as fp:
return json.load(fp)
|
ccc7fb361a25ebf3d0f97b832f2e6ba104a30c1d
| 123,942 |
def expect_z(zp_po):
"""Compute the expectation of z"""
return zp_po
|
c85be63b0d8c0f36cabace4032aecf920fc36f34
| 597,960 |
import ast
def black_repr(text):
"""Format some Chars as Source Chars, in the Black Style of Python"""
if text is None:
return None
assert text == str(text), repr(text)
# Glance over the oneline Repr from Python
repped = repr(text)
q1 = repped[0]
q2 = repped[-1]
middle_repped = repped[1:][:-1]
assert q1 == q2, (q1, q2)
assert q1 in ("'", '"'), repr(q1)
# Choose the Start Mark and End Mark from: " ' r" r'
q = '"' if ('"' not in middle_repped) else q1
pychars = q + middle_repped + q
if ("\\" in text) and not text.endswith("\\"):
if '"' not in text:
pychars = 'r"{}"'.format(text)
elif "'" not in text:
pychars = "r'{}'".format(text)
# Require correct Eval
evalled = ast.literal_eval(pychars)
assert evalled == text, (text, pychars, evalled)
return pychars
|
6b5c7abbbd5664a53bfe60604287b6d5fa0e3337
| 400,918 |
def select_bin(bin_list, values):
""" Select a bin from a list of bins based on an array of values.
Args:
bin_list (arr): List of Bin objects
values (arr): Array of parameters.
Not that the parameters need to be in the same order that the bins were generated from.
Returns:
Bin corresponding to the appropriate parameters. If no bin exists for those parameters, returns None.
"""
for b in bin_list:
if b.values_in_bin(values):
return b
return None
|
5ec77d2cddcf596e786467d96ce79ed3687515fc
| 693,917 |
import re
def create_div_id(key):
"""
One method to take course name and do the following:
1) make lower case
2) remove white space
3) remove non-alphanumeric charachters
"""
return re.sub(r'\W+', '', key.lower().replace(' ', ''))
|
530afc002473a07a7e8d5bdcbc9e7f27757b3079
| 296,437 |
def calculated_stat(base_stat, level, iv, effort, nature=None):
"""Returns the calculated stat -- i.e. the value actually shown in the game
on a Pokémon's status tab.
"""
# Remember: this is from C; use floor division!
stat = (base_stat * 2 + iv + effort // 4) * level // 100 + 5
if nature:
stat = int(stat * nature)
return stat
|
904213a03fea0e8d350e3a4d364632bcbda1ebb8
| 125,716 |
import math
def forward_kinematics(length1, length2, theta1, theta2):
"""
:param length1: length of link 1
:param length2: length of link 2
:param theta1: absolute angle made by link 1 and positive x axis (assumed to be right)
:param theta2: absolute angle made by link 2 and positive x axis (assumed to be right)
:return: coordinates of end position of link1, link2
"""
x1 = length1 * math.cos(math.radians(theta1))
y1 = length1 * math.sin(math.radians(theta1))
x2 = x1 + length2 * math.cos(math.radians(theta2))
y2 = y1 + length2 * math.sin(math.radians(theta2))
return round(x1, 2), round(y1, 2), round(x2, 2), round(y2, 2)
|
7eba6ef15fc98561da4aab4143bd778dbccb6bed
| 379,869 |
def gen_header( program_name, start_address, program_length ):
""" Generate a Header Record """
#specify the size of each column
col2_size, col3_size, col4_size = 6,6,6
col1 = "H"
col2 = program_name[:col2_size].ljust(col2_size).upper()
col3 = hex(start_address)[2:].zfill(col3_size).upper()
col4 = hex(program_length)[2:].zfill(col4_size).upper()
return col1 +col2 + col3 + col4
|
0d978f46a985ff2e94b195cefdb8fe9ecd1b1cb9
| 445,640 |
def make_pair(img_paths, label_paths):
"""Take a list of image paths and the list of corresponding label paths and
return a list of tuples, each containing one image path and the
corresponding label path.
Arguments:
img_paths (list of `Path`): list of paths to images.
labels_paths (list of `Path`): list of paths to labels.
Returns:
pairs (list of tuple): a list of tuples. Each tuple contain an image and
the corresponding label.
"""
if len(img_paths) != len(label_paths):
raise ValueError("The lengths of the two lists mismatch.")
pairs = []
for img_path, label_path in zip(sorted(img_paths), sorted(label_paths)):
img_stem = img_path.stem.replace("_leftImg8bit", "")
label_stem = label_path.stem.replace("_gtFine_labelIds", "")
if img_stem == label_stem:
pair = (img_path, label_path)
pairs.append(pair)
return pairs
|
3bb3b0de71443618a9afbe19b6e13953480c6eb3
| 357,766 |
def is_authenticated(user):
"""
Check if a user is authenticated
In Django 1.10 User.is_authenticated was changed to a property and
backwards compatible support for is_authenticated being callable was
finally removed in Django 2.0. This function can be removed once support
Django versions earlier than 1.10 are dropped.
"""
if callable(user.is_authenticated):
return user.is_authenticated()
else:
return user.is_authenticated
|
8fc279e5dc8182d634c3cc33fd451e402efe79b5
| 256,338 |
from typing import Callable
from typing import Any
import time
def _pytimed(callback: Callable[..., None], *args: Any, **kwargs: Any):
"""Call the given callback and return time in nanoseconds as result."""
start_time = time.monotonic_ns()
results = callback(*args, **kwargs)
end_time = time.monotonic_ns()
duration = (end_time - start_time)
return duration
|
1d7ece3d4da8fdb4305d96e1a7623bd2e3a9a89e
| 597,742 |
import time
def compute(n=26):
""" Computes 2 to the power of n and returns elapsed time"""
start = time.time()
res = 0
for i in range(2**n):
res += 1
end = time.time()
dt = end - start
print(f'Result {res} in {dt} seconds!')
return dt
|
d816c587302830f0acd20a59905c8634fcf20b49
| 706,148 |
def get_minibatch_blob_names(is_training=True):
"""Return blob names in the order in which they are read by the data loader.
"""
# data blob: holds a batch of N images, each with 3 channels
blob_names = ['data', 'labels']
return blob_names
|
9963dbb80c7b79860a40871260fa1459fb518f6d
| 134,383 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.