content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def stairmaster_mets(setting):
"""
For use in submaximal tests on the StairMaster 4000 PT step ergometer.
Howley, Edward T., Dennis L. Colacino, and Thomas C. Swensen. "Factors Affecting the Oxygen Cost of Stepping on an Electronic Stepping Ergometer." Medicine & Science in Sports & Exercise 24.9 (1992): n. pag. NCBI. Web. 10 Nov. 2016.
args:
setting (int): the setting of the step ergometer
Returns:
float: VO2:subscript:`2max` in kcal/kg*hour
"""
return 0.556 * 7.45 * setting
|
1d6cc9fc846773cfe82dfacb8a34fb6f46d69903
| 5,940 |
def get_profile_step_regs(step):
"""Send back an ever incrementing Step, stop after 99.
Each step consists of 15 registers.
Step 1 has registers 215 to 229
Step 2 has registers 230 to 244
Step 3 has registers 245 to 259
...
Step 99 has registers 1685 to 1699
"""
assert 1 <= step <= 99
offset = 15 * (step - 1)
return {
'PROFILE_STEP_TIME_HOURS': 215 + offset, # w,
'PROFILE_STEP_TIME_MM_SS': 216 + offset, # w,
'PROFILE_STEP_CHAMBER_EVENTS': 217 + offset, # w,
'PROFILE_STEP_CUSTOMER_EVENTS': 218 + offset, # w,
'PROFILE_STEP_GUARANTEED': 219 + offset, # w,
'PROFILE_STEP_WAIT_FOR_LOOP_EVENTS': 220 + offset, # w,
'PROFILE_STEP_WAIT_FOR_MONITOR_EVENTS': 221 + offset, # w,
'PROFILE_STEP_WAIT_FOR_SETPOINT': 222 + offset, # w,
'PROFILE_STEP_JUMP_STEP_NUMBER': 223 + offset, # w,
'PROFILE_STEP_JUMP_COUNT': 224 + offset, # w,
'PROFILE_STEP_TARGET_SETPOINT_FOR_LOOP_1': 225 + offset, # w,
'PROFILE_STEP_TARGET_SETPOINT_FOR_LOOP_2': 226 + offset, # w,
'PROFILE_STEP_TARGET_SETPOINT_FOR_LOOP_3': 227 + offset, # w,
'PROFILE_STEP_TARGET_SETPOINT_FOR_LOOP_4': 228 + offset, # w,
'PROFILE_STEP_TARGET_SETPOINT_FOR_LOOP_5': 229 + offset, # w,
}
|
916c92cd0ff8cd95c5427db9a928bd7296989f4c
| 352,141 |
from datetime import datetime
def check_bank_holiday(input_date):
"""
Returns 1 if the inputted date is a bank holiday in Ireland
or 0 otherwise
Args
---
input_date: datetime
A DateTime object
Returns
---
An int which is 1 if it's a bank holiday or 0 otherwise
"""
bank_holidays_2021 = [
# New Year's Day
datetime(2021, 1, 1),
# St Patricks' Day
datetime(2021, 3, 17),
# Easter Monday
datetime(2021, 4, 5),
# May Bank Holiday
datetime(2021, 5, 3),
# June Bank Holiday
datetime(2021, 6, 7),
# August Bank Holiday
datetime(2021, 8, 2),
# October Bank Holiday
datetime(2021, 10, 25),
# Christmas Day
datetime(2021, 12, 25),
# St Stephen's Day
datetime(2021, 12, 26),
]
return input_date in bank_holidays_2021
|
c176eef77ae94f1a3229bb04e91ea6974f09408d
| 406,789 |
def find_ss_regions(dssp_residues, loop_assignments=(' ', 'B', 'S', 'T')):
"""Separates parsed DSSP data into groups of secondary structure.
Notes
-----
Example: all residues in a single helix/loop/strand will be gathered
into a list, then the next secondary structure element will be
gathered into a separate list, and so on.
Parameters
----------
dssp_residues : [tuple]
Each internal list contains:
[0] int Residue number
[1] str Secondary structure type
[2] str Chain identifier
[3] str Residue type
[4] float Phi torsion angle
[5] float Psi torsion angle
[6] int dssp solvent accessibility
Returns
-------
fragments : [[list]]
Lists grouped in continuous regions of secondary structure.
Innermost list has the same format as above.
"""
loops = loop_assignments
previous_ele = None
fragment = []
fragments = []
for ele in dssp_residues:
if previous_ele is None:
fragment.append(ele)
elif ele[2] != previous_ele[2]:
fragments.append(fragment)
fragment = [ele]
elif previous_ele[1] in loops:
if ele[1] in loops:
fragment.append(ele)
else:
fragments.append(fragment)
fragment = [ele]
else:
if ele[1] == previous_ele[1]:
fragment.append(ele)
else:
fragments.append(fragment)
fragment = [ele]
previous_ele = ele
fragments.append(fragment)
return fragments
|
a6f59aacdd58c6cc30532a260e03e3d24ac9bae8
| 633,336 |
def _add_indent(script, indent=2):
""" Indent list of script with specfied number of spaces """
if not isinstance(script, list):
script = [script]
indent = ' ' * indent
return [indent + s for s in script]
|
f7805c4801703ea240ce8ed43d403689c428d620
| 518,023 |
def version_array_to_string(version_array):
"""Given an array of numbers representing a version, such as [1,2,3], returns
a string representation of the version, such as \"1.2.3\" """
return ".".join(str(x) for x in version_array)
|
bc730c9e991266fee421cd0a9a5419ff151b878a
| 274,111 |
def FirstActiveGeneral(activities, timepoints):
"""Given a list of temporal activities in the coded activity state from
ConvertWindowState, return the time point at which the protein is first
active (not in the 0 inactive state) or 'Not active'. Does not assume
any knowledge of the time points. Returns the 0-based index.
"""
assert len(list(activities)) == len(
list(timepoints)), "Must have same length activities and time points"
for t in range(len(list(activities))):
if not activities[t] == "0":
return str(timepoints[t])
return "Not active"
|
9fa8d00c9939e5912d1f7185a2c34325fcc8a9e2
| 557,817 |
def faction_type(faction):
"""Determines single character representation of faction."""
if faction == 'autobots':
return 'A'
elif faction == 'decepticons':
return 'D'
|
0dfe72b32b05cba042da4e0c58957da7a32ed5b8
| 439,509 |
def idfs(corpus):
""" Compute IDF
Args:
corpus (RDD): input corpus
Returns:
RDD: a RDD of (token, IDF value)
"""
N = corpus.count()
uniqueTokens = corpus.flatMap(lambda x: list(set(x[1])))
tokenCountPairTuple = uniqueTokens.map(lambda x: (x, 1))
tokenSumPairTuple = tokenCountPairTuple.reduceByKey(lambda a, b: a + b)
return tokenSumPairTuple.map(lambda x: (x[0], N / float(x[1])))
|
0da55160aebdfc3a74979619a959c694ae774c45
| 103,034 |
import re
def natural_sort(l):
"""
Takes in a list of strings and returns the list sorted in "natural" order.
(e.g. [test1, test10, test11, test2, test20] -> [test1, test2, test10, test11, test20])
Source: https://stackoverflow.com/questions/4836710/is-there-a-built-in-function-for-string-natural-sort
Parameters
----------
l : list of str
Unsorted list of strings
Returns
-------
sorted_l : list of str
Sorted list of strings
"""
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
|
7db22ee5f75703f52b25eecd888847eb35590e65
| 12,423 |
def west_valley(parcels):
"""
Dummy for presence in West Valley.
"""
in_wv = parcels['mpa'].isin([
'AV', 'BU', 'EL', 'GB', 'GL', 'GO', 'LP', 'PE', 'SU', 'TO', 'WI', 'YO'
])
return (parcels['is_MC'] & in_wv).astype(int)
|
fec326f82b21acb0cab670904b76096f84445e4d
| 20,569 |
def clean_borough(row : str) -> str:
"""
Removes the trailing space afer some boroughs.
:param: row (str): row in the the Pandas series
:rvalue: string
:returns: removed trailing space from the row
"""
if row == 'Manhattan ':
return 'Manhattan'
elif row == 'Brooklyn ':
return 'Brooklyn'
else:
return row
|
85b5a097917daad4ab56090d213a951aa1595391
| 311,952 |
from datetime import datetime
def datetime_string_to_datetime(datetime_string):
"""
This function converts ISO 8601 time string with timezone info to a datetime object
preserving the original timezone.
"""
try:
dt = datetime.strptime(datetime_string, '%Y-%m-%dT%H:%M:%S%z')
return dt
except ValueError:
return None
|
4a820a749b6a2b4bda67ce926bb1404c16e56e94
| 605,612 |
def two_bytes_to_int(hi_byte: int, low_byte: int) -> int:
"""
Converts two bytes to a normal integer value.
:param hi_byte: the high byte
:param low_byte: the low byte
:return: converted integer that has a value between [0-65535]
"""
return ((hi_byte & 0xFF)*256) + (low_byte & 0xFF)
|
979f59ebb860c2294eaa644609345f1146e6dc0b
| 687,620 |
def is_too_similar_for_axes(word1, word2):
""" Checks if the words contain each other """
return word1 in word2 or word2 in word1
|
7156490dcaac3081fd44845ee6350906fd196f19
| 681,665 |
def getsize(datadescriptor):
"""Get the size of a data descriptor tuple."""
if datadescriptor[0] == 'reg':
size = datadescriptor[1][2]
elif datadescriptor[0] == 'mem':
size = datadescriptor[1][1]
elif datadescriptor[0] == 'heap':
size = datadescriptor[1][2]
elif datadescriptor[0] == 'perp':
size = datadescriptor[1][2]
elif datadescriptor[0] == 'pmem':
size = datadescriptor[1][2]
else:
return (15, "Not a supported destination type.")
return (0, size)
|
feaaa9d0698b58649a55c53ba399a46ba81520b6
| 7,375 |
def prob_of_transmission(Ph, Pm):
"""
Total probability of transmission
Parameters
----------
Ph: Probability for members of each household to contract from their housemates
Pm: Probability for members of each household to contract during the movement (wandering around)
Returns
-------
out: Total probability of transmission
Notes
-----
From paper: "𝑝𝑖𝑑 = 1 − Π(1 − 𝑝𝑖𝑑𝑤) 𝑤∈{ℎ,𝑡,𝑓,𝑚}."
"""
return 1 - ((1 - Ph) * (1 - Pm))
|
c0696b85665ef6ce65fcbdefbc577d2daf26e676
| 242,051 |
def prepare_droid_list(device):
"""
Convert single devices to a list, if necessary.
:param device: Device to check.
:type device: str
"""
if isinstance(device, list):
devs = device
else:
devs = [device]
return devs
|
0c8d6fc9861b0cf01cc266889127cf958ea6862f
| 388,784 |
def flatten(items):
"""Flattens a list"""
return [item for sublist in items for item in sublist]
|
3251d8a1d3540e23b5bcfa1f94f0e70590cc6511
| 527,608 |
def get_from_subtree(subtree, key):
"""
Find the value associated to the key in the given subtree
:param subtree: the subtree to search in
:param key: the key to search for
:return: the value associated to the key in the given subtree or None if this key is not in the subtree
"""
temp_subtree = subtree
while temp_subtree is not None:
if key == temp_subtree.key:
return temp_subtree.value
elif key < temp_subtree.key:
temp_subtree = temp_subtree.left
elif key > temp_subtree.key:
temp_subtree = temp_subtree.right
return None
|
8a2b27ea64f98d5020d6141ca1a2964170fb3dea
| 119,565 |
def _get_relay_log(server):
"""Retrieve relay log and relay log position
server[in] Server instance
Returns tuple (relay log, relay log position)
"""
relay_log, relay_log_pos = '', ''
res = server.exec_query("SHOW SLAVE STATUS")
if res != [] and res is not None:
relay_log = res[0][7]
relay_log_pos = res[0][8]
return relay_log, relay_log_pos
|
0761a663d0ba0cbbd496b4cbec4f9774b0b4d8b1
| 597,922 |
import configparser
def section_has_keys(section: configparser.SectionProxy, *keys: str) -> bool:
"""Verify that an INI section has the keys.
:param section: INI section.
:param keys: Expected keys from the INI section.
:return: Whether the INI section has the expected keys.
"""
return len(set(keys).intersection(set(section.keys()))) == len(keys)
|
53965ba32a9bc7fffb3290f1c4c087f23c6ab978
| 434,225 |
def to_ascii_hex(value: int, digits: int) -> str:
"""Converts an int value to ASCII hex, as used by LifeSOS.
Unlike regular hex, it uses the first 6 characters that follow
numerics on the ASCII table instead of A - F."""
if digits < 1:
return ''
text = ''
for _ in range(0, digits):
text = chr(ord('0') + (value % 0x10)) + text
value //= 0x10
return text
|
7b6b762ff4e225e5a4c6bfa21ec69c430d2248a1
| 457,270 |
def birth_weight1(**kwargs):
"""
Speciation rate is proportional to the number of areas occupied.
"""
lineage = kwargs["lineage"]
num_areas = len(lineage.areas)
r = float(num_areas) / (num_areas + 1)
return r
|
65084a469329f03dc002be0d1887c98bccf37218
| 545,926 |
from typing import Sequence
def flatten(lst: Sequence) -> list:
"""
Flatten one level in the given list.
"""
return [item for sublist in lst for item in sublist]
|
62a608978481609d7e7d8020f92090f67040c390
| 422,824 |
def safename(name):
"""Make name filesystem-safe."""
name = name.replace(u'/', u'-')
name = name.replace(u':', u'-')
return name
|
29858ecc3422c22c481b5ff3409a082308a62072
| 450,885 |
def align(value, m):
""" Increase value to a multiple of m """
while ((value % m) != 0):
value = value + 1
return value
|
9171b62c71ae21b51b2f6cffe9e9e2a6d4778446
| 30,300 |
def relu_activation(x):
"""
Returns x if positive, 0 otherwise.
"""
return x if x > 0.0 else 0.0
|
9ddfc77a411a65783d9d51edeb181e12d3f3058d
| 362,852 |
def get_secret_variables(app_config: dict) -> dict:
"""Returns the secret variables from configuration."""
secret_variables = app_config.get("variables", {}).get("secret", {})
formatted_secret_variables = {
secret_name: {
"secretKeyRef": {
"name": secret_variables[secret_name]["name"],
"key": secret_variables[secret_name]["key"],
},
}
for secret_name in secret_variables.keys()
}
return formatted_secret_variables
|
55dbe9fac1aa8e9f59b318c71c31ac9965bb159a
| 288,706 |
def keymap(fn, d):
""" Apply function to keys of dictionary
>>> bills = {"Alice": [20, 15, 30], "Bob": [10, 35]}
>>> keymap(str.lower, bills) # doctest: +SKIP
{'alice': [20, 15, 30], 'bob': [10, 35]}
See Also:
valmap
"""
return dict(zip(map(fn, d.keys()), d.values()))
|
27b3a9cab7b24bff893e3f668d9fc55853eaad41
| 625,881 |
def fmeasure(precision, recall):
"""Returns the fmeasure (or F1-score)
http://en.wikipedia.org/wiki/F1_score
"""
return 2.0 * ((precision * recall) / (precision + recall))
|
c1cf8dc1835eb428b9c96d31837ccd39902e05b1
| 555,431 |
def ema(avg, sample, weight=0.1):
"""Exponential moving average."""
return sample if avg is None else (avg * (1.0 - weight)) + (sample * weight)
|
92d100229f66c61620970bcc2554a664868b3bb1
| 566,290 |
from datetime import datetime
def datetime_without_seconds(date: datetime) -> datetime:
"""
Returns given datetime with seconds and microseconds set to 0
"""
return date.replace(second=0, microsecond=0)
|
de30c7770d84751b555c78e045f37783030d8970
| 2,189 |
def _set_global(env, func_name, func):
"""Set a template environment global function."""
env.globals[func_name] = func
return env
|
5744cb51e0090da58735cf0ea225b342a008a535
| 520,911 |
def normalize_url(url: str) -> str:
"""If passed url doesn't include schema return it with default one - http."""
if not url.lower().startswith("http"):
return f"http://{url}"
return url
|
8658eb06644a22c200c06020a2ed4dab7d35b802
| 229,532 |
from typing import List
def generate_board(
row: int,
column: int,
) -> List[List[str]]:
"""
Generate a new board in a row * column manner.
Parameters
----------
row: int
A number that indicate how many row should be generated.
column: int
A numebr that indicated how many column should be generated.
Returns
-------
board: List[List[str]]
2D array containing all the game detail, including column header, row header and placed buildings.
"""
board = []
# * Preparation of the column header * #
ordinal = ord('A')
header_list = [' '] + [chr(ordinal + i) for i in range(column)]
board.append(header_list)
# * Preparation for each row * #
for i in range(1, row + 1):
row_list = [str(i)] + [' ' for _ in range(column)]
board.append(row_list)
return board
|
86610239ec107eb5261f10a0425773310b3fb343
| 687,336 |
import re
def guess_tag(description, tagmap):
"""
Given a series description return a list of series tags this might be.
By "series tag" we mean a short code like T1, DTI, etc.. that indicates
more generally what the data is (usually the DICOM header
SeriesDescription).
<tagmap> is a dictionary that maps a regex to a series tag, where the regex
matches the series description dicom header. If not specified this modules
SERIES_TAGS_MAP is used.
"""
matches = list(set(
[tag for p,tag in tagmap.iteritems() if re.search(p,description)]
))
if len(matches) == 0: return None
if len(matches) == 1: return matches[0]
return matches
|
23c7f54bf1ab540af4d7dd445a25d79d8806d23f
| 202,798 |
def get_complex_terms(ft_data, nsamples):
"""
Split input data into real and imaginary components
"""
ft_real = ft_data.real[:, 1:int(nsamples/2)]
ft_imag = ft_data.imag[:, 1:int(nsamples/2)]
return ft_real, ft_imag
|
83ceeddbd4d19c0d454134fae6ba39824889c8ba
| 374,637 |
import six
def _convert_str(data, to_binary=False):
"""Helper to convert str to corresponding string or binary type.
`data` has `str` type (in both Python 2/3), the function converts it
to corresponding string or binary representation depending on Python
version and boolean `to_binary` parameter.
"""
if to_binary and six.PY3:
return data.encode('utf-8')
elif not to_binary and six.PY2:
return data.decode('utf-8')
return data
|
10d16fbe6be8148299969f7f455f3f54089cec45
| 643,949 |
def directory_get_basename(path: str) -> str:
"""Returns the last directory in a path."""
p = path
if p.endswith('/'):
p = p[:-1]
elif '.' in p:
p = p[:p.rfind('/')]
return p[p.rfind('/') + 1:]
|
69af9336142c88cd705162b2e4522aef2ac95403
| 24,889 |
def _DecideSelfPromotion(lu, exceptions=None):
"""Decide whether I should promote myself as a master candidate.
"""
cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
# the new node will increase mc_max with one, so:
mc_should = min(mc_should + 1, cp_size)
return mc_now < mc_should
|
7bf82e0c1e343514fd77fd206edc444495c12f3e
| 366,454 |
import base64
def urlsafe_base64_encode(s):
"""
Encode a bytestring to a base64 string for use in URLs. Strip any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=').decode('ascii')
|
beb00646c24e11a76ad791aaa7c7fd2860e819fc
| 581,135 |
def HasRenderingStats(process):
""" Returns True if the process contains at least one
BenchmarkInstrumentation::*RenderingStats event with a frame.
"""
if not process:
return False
for event in process.IterAllSlicesOfName(
'BenchmarkInstrumentation::DisplayRenderingStats'):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return True
for event in process.IterAllSlicesOfName(
'BenchmarkInstrumentation::ImplThreadRenderingStats'):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return True
return False
|
fcc918c351d75bbb508b0c3553529e5c15714efe
| 287,152 |
def str_from_c_buffer(buf):
""" Create a string from a ctypes buffer. """
return ",".join(["0x%02x" % ord(c) for c in buf])
|
fb68c880a2de4279e80c7cc5a98bc49116c185db
| 213,236 |
def mock_ansible_module(ansible_mod_cls, params, check_mode):
"""
Prepare mocks for AnsibleModule object.
"""
mod_obj = ansible_mod_cls.return_value
mod_obj.params = params
mod_obj.check_mode = check_mode
mod_obj.fail_json.configure_mock(side_effect=SystemExit(1))
mod_obj.exit_json.configure_mock(side_effect=SystemExit(0))
return mod_obj
|
fcbc35f27bc16fe8e13c6c0f4b0d82c9d201e48f
| 465,359 |
def add_header(response):
"""Add CORS and Cache-Control headers to the response."""
if not response.cache_control:
response.cache_control.max_age = 30
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET'
return response
|
6e08a6d5958dcd0d46904b218b72b3519cfd283c
| 665,108 |
def convert_header_name(django_header):
"""Converts header name from django settings to real header name.
For example:
'HTTP_CUSTOM_CSRF' -> 'custom-csrf'
"""
return django_header.lower().replace("_", "-").split("http-")[-1]
|
c7c246af1e76d5aedd07962c92b72599466db5bf
| 438,546 |
def max2(x,y):
"""
Returns: max of x, y
This function shows the flow of if-else.
Parameter x: first value
Precondition: x is a number
Parameter y: second value
Precondition: y is a number
"""
# Put max of x, y in z
print('before if') # trace
if x > y:
print('if x>y') # trace
z = x
print('z is '+str(z)) # watch
else:
print('else x<=y') # trace
z = y
print('z is '+str(z)) # watch
print('after if') # trace
return z
|
fb9ea81c100d4705ec250cf96cea6eba90f06a80
| 606,910 |
import io
def load_supersenses(filename):
"""
Loads a file with a list of all 26 WordNet noun supersenses.
Returns:
stoi : A mapping from supersense name (str) to integer id.
itos : A list of supersense names (str), indices is integer id.
"""
stoi = {}
itos = []
with io.open(filename, 'r') as fp:
for i, line in enumerate(fp):
line = line.strip()
assert line.startswith("noun")
assert len(line.split()) == 1
itos.append(line)
stoi[line] = i
assert len(itos) == 26
return stoi, itos
|
6e99fcc7f6169382394b86e51471251e0282d204
| 164,314 |
import re
def strip_finalcif_of_name(pth: str) -> str:
"""
Strips '-finalcif' from the stem path
"""
return re.sub('-finalcif$', '', pth)
|
ac3fa2620496f49aaf3dcb6ada9f390898cf0679
| 166,355 |
def _alpha(rgb, a):
"""
Add alpha channel to a hex color.
:param str rgb: hex color.
:param float a: alpha channel (float between 0 and 1)
"""
a = int(a * 255)
return "{}{:02x}".format(rgb, a)
|
c4090d9fb793e3273da3557d470b262fc17f80df
| 594,580 |
import functools
def as_list(f):
"""Decorator that changes a generator into a function that returns a list."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return list(f(*args, **kwargs))
return wrapper
|
28b0de884a06f4a0933ef7afe9940e449cfb2144
| 636,884 |
def filter_none_kwargs(**kwargs):
"""
Filters out any keyword arguments that are None.
This is useful when specific optional keyword arguments might not be universally supported,
so that stripping them out when they are not set enables more uses to succeed.
Parameters
----------
kwargs: dict
The keyword arguments to filter
Returns
-------
filtered_kwargs: dict
The input dictionary, but with all entries having value None removed
"""
return {key: value for key, value in kwargs.items() if value is not None}
|
3a36a9d1fce6f8e1c6f55f8d67bd9631827aedd4
| 555,189 |
import yaml
def get_config(config_file_path):
"""Convert config yaml file to dictionary.
Args:
config_file_path : Path
Path to config directory.
Returns:
config : dict
Config represented as dictionary.
"""
with open(config_file_path) as data:
config = yaml.load(data, Loader=yaml.FullLoader)
return config
|
c569eed1a942071e5ae6404f53483ef90a35d1a3
| 59,078 |
def _chk_y_path(tile):
"""
Check to make sure tile is among left most possible tiles
"""
if tile[0] == 0:
return True
return False
|
cf733c778b647654652ae5c651c7586c8c3567b8
| 4,235 |
import requests
def download_file(url: str):
"""Download file from provided URL
Arguments:
url {str} -- file's URL
Raises:
SystemExit: is raised when incorrect URL is provided
Returns:
[type] -- content of downloaded file
"""
response = requests.get(url)
if response.status_code != 200:
raise SystemExit('Unable to download data file from URL: {}. Reason: {} {}'.format(url, response.status_code, response.reason))
return response.content
|
aba46f635b111a83e65783354bdf3f7bff87cee0
| 287,948 |
def convert_to_float(frac_str):
"""
convert string of fraction (1/3) to float (0.3333)
Parameters
----------
frac_str: string, string to be transloated into
Returns
-------
float value corresponding to the string of fraction
"""
try:
return float(frac_str)
except ValueError:
num, denom = frac_str.split('/')
try:
leading, num = num.split(' ')
whole = float(leading)
except ValueError:
whole = 0
frac = float(num) / float(denom)
return whole - frac if whole < 0 else whole + frac
|
4f4e2ad0e5eeee7a8cc54b8b724031a48a48d747
| 697,047 |
def get_job_attr(job_info, attr_name):
""" Get the content from a job attribute """
lines = [s.strip() for s in job_info.split("\n")]
# Find the line that starts with the attribute
cmd_line = next((l for l in lines if l.startswith(attr_name)), None)
if cmd_line is None:
return None
cmd = cmd_line.split(" ")[1:] # first entry is 'cmmand:'
return cmd
|
a1f7c719f73c6c7c19338904d475ae22453ba620
| 320,976 |
import re
def filterresults(searchresults):
"""Given a list of search results, select all those likely to be for flags or coats of arms
:param searchresults: A list of Strings represnting individual search results
:type searchresults: [String]
:returns: A list of those results that contained the substrings 'flag' or 'coat of arms' (case
insensitive)
:rtype: [String]"""
return [x for x in searchresults if re.search('flag|coat of arms', x, re.IGNORECASE)]
|
7a15f0754f0b99eb8c400a6d5ad949887f24f78e
| 338,231 |
def get_login_id(fw):
"""Get a readable login id for the current api key"""
status = fw.get_auth_status()
if status.is_device:
device = fw.get_device(status.origin.id)
return '{} - {}'.format(device.get('type', 'device'), device.get('name', status.origin.id))
user = fw.get_current_user()
return '{} {}'.format(user.firstname, user.lastname)
|
2ba29dd1ff7cb467d41a0b284d684a093b19b612
| 159,265 |
def extract_bc_matrices(mats):
"""Extract boundary matrices from list of ``mats``
Parameters
----------
mats : list of list of instances of :class:`.TPMatrix` or
:class:`.SparseMatrix`
Returns
-------
list
list of boundary matrices.
Note
----
The ``mats`` list is modified in place since boundary matrices are
extracted.
"""
bc_mats = []
for a in mats:
for b in a.copy():
if b.is_bc_matrix():
bc_mats.append(b)
a.remove(b)
return bc_mats
|
1eb35eebe29cf7dba47d176714eb49915d29917b
| 85,424 |
import pickle
def load(fn):
"""Utility function that loads model, function, etc as pickle"""
return pickle.load(open(fn,'rb'))
|
6763067a1ba8b047e7688f0da3511435d76b5fc1
| 438,122 |
def bubble_sort(input_data):
"""Implementation of the bubble sort algorithm, employs an enhancement where the inner loops
keep track of the last sorted element as described in the video below.
(https://www.youtube.com/watch?v=6Gv8vg0kcHc)
The input_data is sorted in-place, this is dangerous to do
"""
is_sorted = False
last_unsorted_index = len(input_data) - 1
while not is_sorted:
is_sorted = True
for curr_index in range(last_unsorted_index):
curr_element = input_data[curr_index]
# be sure to not go out of bounds in the loop since we are accessing
# the next element, happens at the edge of the list
next_index = curr_index + 1
next_element = input_data[next_index]
# swap the elements that are not in order
if curr_element > next_element:
input_data[curr_index], input_data[next_index] =\
input_data[next_index], input_data[curr_index]
is_sorted = False
# as we loop everytime, the last elements of the list will be in order
# this is explained in the video in the docstring above
last_unsorted_index -= 1
return input_data
|
75a298e645b1567c75befa25cdf703ac48cfe554
| 593,731 |
def compute_in_degrees(digraph):
""" dict -> dict
Takes a digraph represented as a dictionary, and returns a dictionary in
which the keys are the nodes and the values is the node's indegree value.
"""
indegrees = {}
for node in digraph:
indegrees[node] = 0
for node in digraph:
for edge in digraph[node]:
indegrees[edge] += 1
return indegrees
|
0bdb6f232407ad235dbccc46aecdbfc7de6467f8
| 217,798 |
def set_header_field(headers, name, value):
""" Return new headers based on `headers` but with `value` set for the
header field `name`.
:param headers: the existing headers
:type headers: list of tuples (name, value)
:param name: the header field name
:type name: string
:param value: the value to set for the `name` header
:type value: string
:return: the updated headers
:rtype: list of tuples (name, value)
"""
dictheaders = dict(headers)
dictheaders[name] = value
return list(dictheaders.items())
|
fc1562667b5b0e4b2184ba031a8d8f353c5704e7
| 575,134 |
def all_valid(formsets):
"""Returns true if every formset in formsets is valid."""
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
|
8b87fcf130707127f9b692acc3bb00fb4283eeb4
| 279,115 |
import requests
import logging
def internet_connectivity_check(url='http://www.google.com/', timeout=2):
"""
Checks for internet connection availability based on google page.
"""
try:
_ = requests.get(url, timeout=timeout)
return True
except requests.ConnectionError:
logging.info("No internet connection.")
return False
|
5322733f16929367336d6ad02ae39b029770bc74
| 168,996 |
import locale
def get_encoding(fd):
"""Guess terminal encoding."""
return fd.encoding or locale.getpreferredencoding()
|
27648b1559e6da2aeaa549c516b969ba11ded688
| 227,081 |
def set_defaults(event):
"""Set default values for optional arguments"""
defaults = {
"content": "",
"security_groups": [],
"assign_public_ip": True,
"cmd_to_run": "",
"image": "",
"task_role_arn": "",
"mountpoints": {},
"task_memory": "512",
"task_cpu": "256",
"token": "",
"log_stream_prefix": "task",
"credentials_secret_arn": "",
"metric_namespace": "",
"metric_dimensions": {},
"send_error_logs_to_stepfunctions": True,
}
return {**defaults, **event}
|
edc05eb2ee05eff34bf9a64c9fbf9961ed8780fe
| 409,747 |
def get_matching_points(requested_file_names, all_file_names, object_points, image_points):
"""
Gets the object points and image points of a requested set of files
:param requested_file_names: files to look through
:param all_file_names: the list of file names
:param object_points: the object points list of the images in the given directory
:param image_points: the image points list of the images in the given directory
:return: the requested object points and image points
"""
requested_file_nameset = set(requested_file_names)
requested_object_points = []
requested_image_points = []
for index, filename in enumerate(all_file_names):
if filename in requested_file_nameset:
requested_object_points.append(object_points[index])
requested_image_points.append(image_points[index])
return requested_object_points, requested_image_points
|
4c32090c11ab64775154465191bc1d345dcaaaf6
| 113,916 |
def _total_seconds_backport(offset):
"""Backport of timedelta.total_seconds() from python 2.7+.
:type offset: :class:`datetime.timedelta`
:param offset: A timedelta object.
:rtype: int
:returns: The total seconds (including microseconds) in the
duration.
"""
seconds = offset.days * 24 * 60 * 60 + offset.seconds
return seconds + offset.microseconds * 1e-6
|
4b5e57e967c47837df9de9f7db8430aa012a519a
| 573,153 |
import importlib
def import_module_attribute(function_path):
"""Import and return a module attribute given a full path."""
module, attribute = function_path.rsplit(".", 1)
app_module = importlib.import_module(module)
return getattr(app_module, attribute)
|
ce2647bb193c2a6c07949073f7c0d142ee8cd1b5
| 1,936 |
def assignment_explicit_no_context(arg):
"""Expected assignment_explicit_no_context __doc__"""
return "assignment_explicit_no_context - Expected result: %s" % arg
|
c11ef28d9942d73bea6ed14547aa11648d37f3a9
| 654,818 |
def similarity_regularisation(shapelet_similarity):
"""The regularisation penalty for ensuring that shapelets look like the input data.
Arguments:
shapelet_similarity: A tensor of shape (..., num_shapelets), where ... is any number of batch dimensions,
representing the similarity between each sample and each of the shapelets.
Returns:
A scalar for the regularisation penalty.
"""
return shapelet_similarity.sum(dim=-1).min()
|
07d32422ec9a78a26ff6a140a1aa981a547920d1
| 558,583 |
def get_features(encoder, x):
""" Extract the features generated by each block of the encoder model """
x = encoder.layers[0](x) # This is just the input layer
features = []
for layer in encoder.layers[1:]:
x = layer(x)
features.append(x)
return features
|
a26856bdf445d294f0f7b2c0995c55ca6324f672
| 491,352 |
def to_latex(x, dp=1, double_backslash=True):
"""
Convert a decimal into LaTeX scientific notation
Parameters
---------------
x : A float, the number to convert to LaTeX notation, e.g. 0.42
dp : An int, the number of decimal places for the
double_backslash : A bool, whether to use a double-backslash for LaTeX commands
Returns
-----------
A string where x is cast in LaTeX as scientific notation, e.g. "4.2 \times 10^{-1}"
"""
fmt = "%.{}e".format(dp)
s = fmt % x
arr = s.split('e')
m = arr[0]
n = str(int(arr[1]))
if double_backslash:
return str(m) + '\\times 10^{' + n + '}'
else:
return str(m) + '\times 10^{' + n + '}'
|
abd966f3514b2adac32cbb8dd22ffd4c194c6458
| 357,042 |
def url_to_fn(url):
"""
Convert `url` to filename used to download the datasets.
``http://kitakitsune.org/xe`` -> ``kitakitsune.org_xe``.
Args:
url (str): URL of the resource.
Returns:
str: Normalized URL.
"""
url = url.replace("http://", "").replace("https://", "")
url = url.split("?")[0]
return url.replace("%", "_").replace("/", "_")
|
375215594fe7193bde35bf33112e09e380760989
| 399,400 |
import re
import keyword
def is_valid_identifier(var, allow_unicode=False):
"""Return true if var contains a valid Python identifier
Parameters
----------
val : string
identifier to check
allow_unicode : bool (default: False)
if True, then allow Python 3 style unicode identifiers.
"""
flags = re.UNICODE if allow_unicode else re.ASCII
is_valid = re.match("^[^\d\W]\w*\Z", var, flags)
return is_valid and not keyword.iskeyword(var)
|
27e5f63de31d3f713395be5d0c9867cba0d62047
| 651,955 |
def compareEvents(test, actualEvents, expectedEvents):
"""
Compare two sequences of log events, examining only the the keys which are
present in both.
@param test: a test case doing the comparison
@type test: L{unittest.TestCase}
@param actualEvents: A list of log events that were emitted by a logger.
@type actualEvents: L{list} of L{dict}
@param expectedEvents: A list of log events that were expected by a test.
@type expected: L{list} of L{dict}
"""
if len(actualEvents) != len(expectedEvents):
test.assertEqual(actualEvents, expectedEvents)
allMergedKeys = set()
for event in expectedEvents:
allMergedKeys |= set(event.keys())
def simplify(event):
copy = event.copy()
for key in event.keys():
if key not in allMergedKeys:
copy.pop(key)
return copy
simplifiedActual = [simplify(event) for event in actualEvents]
test.assertEqual(simplifiedActual, expectedEvents)
|
53216d1c77cf8d2e104197ee5f7fb32963505433
| 39,745 |
import re
def removeSpecialChars(stringToChange):
"""Given a string, removes all non-alphanumeric characters"""
reg = re.compile('[^a-zA-Z0-9]')
return reg.sub('', stringToChange)
|
f0203f169f16785d5ee6e37bf43e4a864b5489c2
| 575,333 |
def build_recurrent_dpcl(num_features, hidden_size, num_layers, bidirectional,
dropout, embedding_size, embedding_activation, num_audio_channels=1,
rnn_type='lstm', normalization_class='BatchNorm',
normalization_args=None, mix_key='mix_magnitude'):
"""
Builds a config for a deep clustering network that can be passed to
SeparationModel. This deep clustering network uses a recurrent neural network (RNN)
to process the input representation.
Args:
num_features (int): Number of features in the input spectrogram (usually means
window length of STFT // 2 + 1.)
hidden_size (int): Hidden size of the RNN.
num_layers (int): Number of layers in the RNN.
bidirectional (int): Whether the RNN is bidirectional.
dropout (float): Amount of dropout to be used between layers of RNN.
embedding_size (int): Embedding dimensionality of the deep clustering network.
embedding_activation (list of str): Activation of the embedding ('sigmoid', 'softmax', etc.).
See ``nussl.ml.networks.modules.Embedding``.
num_audio_channels (int): Number of audio channels in input (e.g. mono or stereo).
Defaults to 1.
rnn_type (str, optional): RNN type, either 'lstm' or 'gru'. Defaults to 'lstm'.
normalization_class (str, optional): Type of normalization to apply, either
'InstanceNorm' or 'BatchNorm'. Defaults to 'BatchNorm'.
normalization_args (dict, optional): Args to normalization class, optional.
mix_key (str, optional): The key to look for in the input dictionary that contains
the mixture spectrogram. Defaults to 'mix_magnitude'.
Returns:
dict: A recurrent deep clustering network configuration that can be passed to
SeparationModel.
"""
normalization_args = {} if normalization_args is None else normalization_args
# define the building blocks
modules = {
mix_key: {},
'log_spectrogram': {
'class': 'AmplitudeToDB',
},
'normalization': {
'class': normalization_class,
'args': normalization_args,
},
'recurrent_stack': {
'class': 'RecurrentStack',
'args': {
'num_features': num_features,
'hidden_size': hidden_size,
'num_layers': num_layers,
'bidirectional': bidirectional,
'dropout': dropout,
'rnn_type': rnn_type
}
},
'embedding': {
'class': 'Embedding',
'args': {
'num_features': num_features,
'hidden_size': hidden_size * 2 if bidirectional else hidden_size,
'embedding_size': embedding_size,
'activation': embedding_activation,
'num_audio_channels': num_audio_channels
}
},
}
# define the topology
connections = [
['log_spectrogram', ['mix_magnitude', ]],
['normalization', ['log_spectrogram', ]],
['recurrent_stack', ['normalization', ]],
['embedding', ['recurrent_stack', ]],
]
# define the outputs
output = ['embedding']
# put it together
config = {
'name': 'DeepClustering',
'modules': modules,
'connections': connections,
'output': output
}
return config
|
a829d1b21e56ccf345a29363e32ada0ac1f5f26d
| 455,193 |
def get_revoke_key(revoke_statement):
"""
Create the key from the revoke statement.
The key will be used as the dictionnary key.
:param revoke_statement: The revoke statement
:return: The key
"""
splitted_statement = revoke_statement.split()
revoke_privilege = splitted_statement[1]
if "." in splitted_statement[3]:
revoke_table = splitted_statement[3].split('.')[1]
else:
revoke_table = splitted_statement[3]
return revoke_privilege + "_" + revoke_table
|
0001bc2d14569bef8acbd799487450af1cf337f7
| 628,068 |
def _ParseBucketName(name):
"""Normalizes bucket name.
Normalizes bucket name. If it starts with gs://, remove it.
Api_lib's function doesn't like the gs prefix.
Args:
name: gs bucket name string.
Returns:
A name string without 'gs://' prefix.
"""
gs = 'gs://'
if name.startswith(gs):
return name[len(gs):]
return name
|
07a3486152e50cdfd7e1567d6f867dbd7edef80a
| 566,811 |
def should_cache_titles(titles):
"""Return False if title is an empty dict to avoid caching it."""
return titles != {}
|
4ec0087cfaaaf3571901aee91f62523992924457
| 539,309 |
def get_string_of_int(n: int, gender: bool = True) -> str:
"""
Get a stringified version of an int in range(0, 1000).
gender is optional and is True for masculine, False for feminine.
Raise an OverflowError if the number is not in that range.
"""
if n in range(20):
return \
["нуль", "один" if gender else "одна", "два" if gender else "две", "три", "четыре", "пять", "шесть", "семь",
"восемь", "девять", "десять",
"одиннадцать",
"двенадцать", "тринадцать", "четырнадцать", "пятнадцать", "шестнадцать", "семнадцать", "восемнадцать",
"девятнадцать"][n]
if n in range(20, 100):
n0 = n % 10
n1 = n // 10
output=[]
output += [["", "", "двадцать", "тридцать", "сорок", "пятьдесят", "шестьдесят", "семьдесят", "восемьдесят",
"девяносто"][n1]]
output += [(["", "один" if gender else "одна", "два" if gender else "две", "три", "четыре", "пять", "шесть",
"семь",
"восемь", "девять"][n0])]
return " ".join(output).strip()
if n in range(100, 1000):
n0 = n % 10
n1 = n // 10 % 10
n2 = n // 100
output = ["", "сто", "двести", "триста", "четыреста", "пятьсот", "шестьсот", "семьсот", "восемьсот",
"девятьсот"][n2]
output += (" " + get_string_of_int(n1 * 10 + n0, gender)) if n1 + n0 > 0 else ""
return "".join(output).strip()
raise OverflowError("{} not in range(0, 1000).".format(n))
|
4f152b281fa81a3f67a90359ed7bd1678d05d34a
| 454,051 |
def getFirstTypeParent(start, node_type):
"""
Gets the first type of parent found in all of the parents from the given start.
Args:
start (pm.nodetypes.Transform): Transform to find type of parent.
node_type (string): Type of transform to find.
Returns:
(pm.nodetypes.Transform or None): First parent in of given node type if found, else None.
"""
parents = start.getAllParents()
for parent in parents:
if parent.nodeType() == node_type:
return parent
return None
|
2504562ff3e0332136795d20a8ff6481bd263766
| 493,318 |
import re
def _is_newstyle_arxiv_id(arxiv_number):
"""Check whether arxiv id is old or new style (or invalid)."""
if re.match(r'^[0-9]{4}\.[0-9]*$', arxiv_number):
return True
if re.match(r'^[a-z]*-[a-z]*/[0-9]*$', arxiv_number):
return False
raise ValueError('Not a proper arxiv id')
|
b63362005b5de4158de524965d8cd927be3affff
| 420,197 |
from typing import Dict
def default_credentials() -> Dict[str, str]:
"""
Returns default credentials
used for easier reuse across
tests in the project.
"""
return {
"email": "[email protected]",
"password": "testme",
}
|
5f8e8ca15d2b3ee4961bce8fe680ce165f904159
| 77,922 |
from typing import List
from typing import Any
from typing import Optional
def make_table(rows: List[List[Any]], labels: Optional[List[Any]] = None, centered: bool = False) -> str:
"""
:param rows: 2D list containing objects that have a single-line representation (via `str`).
All rows must be of the same length.
:param labels: List containing the column labels. If present, the length must equal to that of each row.
:param centered: If the items should be aligned to the center, else they are left aligned.
:return: A table representing the rows passed in.
"""
# Initiating final_table string
final_table = ""
n_rows = len(rows)
n_cols = len(rows[0])
max_length = [0 for i in range(n_cols)]
# Checking the dimensions of the input, to make sure entries are correct.
try:
if labels:
if (len(labels) != n_cols):
raise Exception("The dimensions of rows are unequal. Please recheck the input.")
for row in rows:
if (len(row) != n_cols):
raise Exception("The dimensions of rows are unequal. Please recheck the input.")
except Exception as e:
print(e)
exit(0)
# Taking the max_length of each column
if labels:
for i in range(n_cols):
max_length[i] = max(max_length[i], len(str(labels[i])))
for row in rows:
for i in range(n_cols):
max_length[i] = max(max_length[i], len(str(row[i])))
# Adding the top row
final_table += "┌"
for i in range(n_cols):
final_table += "─"*(max_length[i]+2)
if (i != n_cols-1):
final_table += "┬"
final_table += "┐"
final_table += "\n"
# Adding label row
if labels:
for i in range(n_cols):
final_table += "│ "
if not centered:
final_table += str(labels[i])
final_table += " "*(max_length[i]-len(str(labels[i])))
else:
diff = max_length[i]-len(str(labels[i]))
final_table += " "*(diff//2)
final_table += str(labels[i])
final_table += " "*(diff - diff//2)
final_table += " "
final_table += "│"
final_table += "\n"
final_table += "├"
for i in range(n_cols):
final_table += "─"*(max_length[i]+2)
if (i != n_cols-1):
final_table += "┼"
final_table += "┤"
final_table += "\n"
# Adding rows
for row in rows:
for i in range(n_cols):
final_table += "│ "
if not centered:
final_table += str(row[i])
final_table += " "*(max_length[i]-len(str(row[i])))
else:
diff = max_length[i]-len(str(row[i]))
final_table += " "*(diff//2)
final_table += str(row[i])
final_table += " "*(diff - diff//2)
final_table += " "
final_table += "│"
final_table += "\n"
# Adding bottom row
final_table += "└"
for i in range(n_cols):
final_table += "─"*(max_length[i]+2)
if (i != n_cols-1):
final_table += "┴"
final_table += "┘"
final_table += "\n"
return final_table
|
0a2d5cd1bb7732bc08611a9571afe292b6988a0e
| 493,171 |
def Synchronized(method):
"""Decorator to acquire a mutex around an APIProxyStub method.
Args:
method: An unbound method of APIProxyStub or a subclass.
Returns:
The method, altered such it acquires self._mutex throughout its execution.
"""
def WrappedMethod(self, *args, **kwargs):
with self._mutex:
return method(self, *args, **kwargs)
return WrappedMethod
|
7e25da54ca7a8fc7b1fe66d15e6856a41648728e
| 561,171 |
def format_float(f, precision=3):
""" returns float as a string with given precision """
fmt = "{:.%df}" % precision
return fmt.format(f)
|
3e39795e0c8e0de1ee41d18d9a632ca361ac8254
| 365,859 |
def first_line(text, keep_empty=False, default=None):
"""First line in 'data', if any
Args:
text (str | list | None): Text to examine
keep_empty (bool): When False skip empty lines (+ strip spaces/newlines), when True don't filter (strip newlines only)
default (str | None): Default to return if there was no first line
Returns:
(str | None): First line, if any
"""
if text is None:
return default
if hasattr(text, "splitlines"):
text = text.splitlines()
for line in text:
if keep_empty:
return line.strip("\n")
line = line.strip()
if line:
return line
return default
|
7090aca1d7e5e187d41ff0c39ec78a3866f4d3a1
| 633,262 |
def check_headers(request, headers):
"""check_headers will ensure that header keys are included in
a request. If one is missing, returns False
Parameters
==========
request: the request object
headers: the headers (keys) to check for
"""
for header in headers:
if header not in request.META:
return False
return True
|
1040780e227ced0d46450c9b74b0953613b528e3
| 256,567 |
def integral(field, direction='xyz', improper=False):
"""Integral.
This function calls ``integral`` method of the ``discrteisedfield.Field``
object.
For details, please refer to
:py:func:`~discretisedfield.Field.integral`
"""
return field.integral(direction=direction, improper=improper)
|
694926eebb911dcc6bd3bfb23bf758b046c6241d
| 171,486 |
def to_num(text):
"""
Convert a string to a number.
Returns an integer if the string represents an integer, a floating
point number if the string is a real number, or the string unchanged
otherwise.
"""
try:
return int(text)
except ValueError:
try:
return float(text)
except ValueError:
return text
|
1862319c06de7c646a16fdf8fabdef679f81f5c7
| 570,996 |
def tf_function_nop(*input_):
"""
No operation, yield the input parameters.
:param input_: Parameter(s) to pass thru
:return: input_
"""
return input_
|
a8d9b11702aa90ea315bef07cf5376754239d3f7
| 507,605 |
def split_and_strip(input_string, delim=","):
"""Convert a string into a list using the given delimiter"""
if not input_string: return list()
return map(str.strip, input_string.split(delim))
|
ef567b8d07113e244e682a6357ad4eae09f2f45c
| 436,038 |
def init_split(df, featname, init_bins=100):
"""
对df下的featname特征进行分割, 最后返回中间的分割点刻度
为了保证所有值都有对应的区间, 取两个值之间的中值作为分割刻度
注意这里的分割方式不是等频等常用的方法, 仅仅是简单地找出分割点再进行融合最终进行分割
注意, 分出的箱刻度与是否闭区间无关, 这个点取决于用户,这个函数仅考虑分箱的个数
同时, 分箱多余的部分会进入最后一个箱, 如101个分100箱, 则最后一个箱有两个样本
Parameters:
----------
df: dataframe,输入的df,
featname:str, 特征名称
init_bins:int, 需要分的箱个数
Returns:
-------
返回分割的刻度列表(升序),如[1,5,9,18]
"""
# 初始化取值个数列表, 同时排序
list_unique_vals_order = sorted(list(set(df[featname])))
# 取得中间的刻度值, 注意是遍历到len - 1
list_median_vals = []
for i in range(len(list_unique_vals_order) - 1):
list_median_vals.append((list_unique_vals_order[i] + list_unique_vals_order[i + 1]) / 2)
# 初始化初始分箱的个数,
cnt_unique_vals = len(list_median_vals)
# 如果初始分箱个数小于init_bins了, 则直接返回
# 如果初始分箱个数大于init_bins, 则从头开始抓取init_bins个值,所以剩余值会留在最后一组
if cnt_unique_vals <= init_bins:
return list_median_vals
else:
# 计算每个箱的个数, 注意这里要用求商
cnt_perbin = cnt_unique_vals // init_bins
# 取得中间的init_bins个值
list_median_vals = [list_median_vals[i * cnt_perbin] for i in range(init_bins - 1)]
return list_median_vals
|
172f48bb019fd4cf4941b2ff0fefcb24709fe7a4
| 703,375 |
def make_subselect(values, selected, display=None):
"""
Function to create the (inner)) html for a drop down list (<select>)
values: the list of options as seen by the app - displayed to the user if display is None
selected: the current value - if display is present, one of display, otherwise one of choices
display: if present, this is the list the user will see
"""
if display is None:
return ''.join(['<option{sel}>{val}</option>'.format(sel=' selected ' if item == selected else '', val=item) for item in values])
else:
assert len(display)==len(values)
return ''.join(['<option name="{}"{}>{}</option>'.format(name, ' selected ' if name == selected else '', disp) for name, disp in zip(values, display)])
|
64bfa87b3402a860ed2b512161a17128d1d49ef5
| 335,651 |
def _check_and_sanitize_args(args):
"""Perform checks on parsed arguments"""
if args.method == 'pure-metric-finetuned':
assert args.src1
return args
|
2014d3ea5ecba139a4949100592eca37fb6c76cf
| 238,525 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.