content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def input_valid_number(max: int) -> int:
"""
input a valid number from 1 through max
anything else is rejected and asked for again
"""
while True:
try:
nbr = int(input(f"Enter the number of counted domains : (1-{max}): "))
except ValueError:
print(f"Sorry, i need an integer between 1 and {max}")
continue
if nbr < 1:
print("Sorry, your response must not less than 1.")
continue
if nbr > max:
print(f"Sorry, your response must not be more than {max}")
continue
else:
# valid number entered
break
return nbr
|
6a4846f2d93318577692eb861e3a568721c55102
| 235,606 |
def clean_ad_id(ad_id):
"""Clean the ad_id."""
if len(ad_id) > 5:
ad_id = ad_id[6:]
return int(ad_id)
|
fa503bf70cb8a7332941f617487bc20be737294e
| 275,493 |
def try_dict(value):
"""Try converting objects first to dict, then to str so we can easily compare them against JSON."""
try:
return dict(value)
except (ValueError, TypeError):
return str(value)
|
9d36b9488e46b2b2bc30b8c007fa97f59315b930
| 378,443 |
from typing import List
import re
def parse_list(arg: str) -> List[str]:
"""
Parse script args containing a list of values.
Splits tokens by any non-alphanumeric character.
"""
return re.split(r'\W+', arg)
|
da9bc5d3beec9f88c2fcbee59f35f6d3bfb548dc
| 338,902 |
def _input_mock(promp_message: str) -> str:
"""Allows 'input' to be mocked with nose test."""
# https://stackoverflow.com/questions/25878616/attributeerror-none-does-not-have-the-attribute-print
return input(promp_message)
|
6ecffcf06984cb477c52edefec42416774ee248d
| 387,858 |
def is_cql(cql_collections):
"""
Checks whether any colelction resource supports feature filter
:param collections: collection object
:returns: boolean value
"""
for _, v in cql_collections.items():
if v:
return True
return False
|
01791854cf0fe7147b86f5d54ca1123712fa0b61
| 566,892 |
def deg_to_dms(deg):
"""Convert decimal degrees to (deg,arcmin,arcsec)"""
d = int(deg)
deg-=d
m = int(deg*60.)
s=deg-m//60
return d,m,s
|
c5db284ace27822d9090e5e4ecfd99e97f497198
| 86,987 |
import sqlite3
def create_sqlite_db_connection (filename):
""" Create a connection to the specified SQLite database. """
print("Creating SQLite DB connection...")
return sqlite3.connect(filename)
|
d4fa82594e777b9f4d330b95ce505da26816df6f
| 154,980 |
def transpose(arr):
"""Transpose array"""
return arr.T
|
cbfe4e932a981ed370b2de73b0188d3da25ad9ec
| 389,047 |
def catch(func, *args, verbose=False):
"""Error handling for list comprehensions. In practice, it's recommended
to use the higher-level robust_comp() function which uses catch() under the
hood.
Parameters
-----------
func: function
*args: any type
Arguments to be passed to func.
verbose: bool
If True, print the error message should one occur.
Returns
--------
any type: If the function executes successfully, its output is returned.
Otherwise, return None.
Examples
---------
[catch(lambda x: 1 / x, i) for i in range(3)]
>>> [None, 1.0, 0.5]
# Note that the filtering method shown below also removes zeros which is
# okay in this case.
list(filter(None, [catch(lambda x: 1 / x, i) for i in range(3)]))
>>> [1.0, 0.5]
"""
try:
return func(*args)
except Exception as e:
if verbose: print(e)
return
|
5eeb3e66e4cab1131bfe87ec3b3beb9db592834b
| 266,175 |
import typing
def extgcd(a: int, b: int) -> typing.Tuple[int, int, int]:
"""
Обобщённый алгоритм Евклида.
Возвращает наибольший общий делитель и их коэффициенты Безу.
Оба числа должны быть натуральными.
Пример:
egcd(12, 8) -> (4, 1, -1), при том 4 = 1 * 12 - 1 * 8
"""
if a <= 0 or b <= 0:
raise ValueError("Числа могут быть только натуральными")
# if a < b:
# a, b = b, a # Ломает алгоритм
u1, u2, u3 = a, 1, 0
v1, v2, v3 = b, 0, 1
while v1:
q = u1 // v1
t1, t2, t3 = u1 % v1, u2 - q * v2, u3 - q * v3
u1, u2, u3 = v1, v2, v3
v1, v2, v3 = t1, t2, t3
return u1, u2, u3
|
9fbeb2ebf9dcfbf4bcb7e396a59bb580feb61ac6
| 423,225 |
async def readyz():
"""
Readyz endpoint.
"""
return {"status": "ready"}
|
7f089fc63ee74ce8eec872d80eeb5129fc267272
| 626,816 |
import re
def convert_to_snake_case(name: str) -> str:
"""
Convert a given string to snake_case.
Converts a given string to snake_case from camel case or kebab case
>>> normalize_field_name('SomeCamelCase')
'some_camel_case'
>>> normalize_field_name('sample-kebab-case')
'sample_kebab_case'
"""
return re.sub(r"(?<!^)(?=[A-Z])", "_", name).replace("-", "_").lower()
|
98d5ed829eae7501d48ab83ada76ed24510fe5f3
| 403,591 |
import requests
def get_seed(pulse_url):
"""
Given a pulse url does a GET request to the Random UChile API to get the seed given by that pulse.
:param pulse_url: String representing the URL of the pulse.
:return: A 512-bit random string that can be used as seed by a pseudo random generator.
"""
response = requests.get(pulse_url)
return response.json()['pulse']['outputValue']
|
411746e0768599ea6d3e4fc23a5efc7482a18373
| 47,171 |
def eval_f_0123(f):
"""Evaluates the function f=f(x) at positions x=0, x=1, x=2 and x=3. The function should return
the list [f(0), f(1), f(2), f(3)]."""
return [f(x) for x in range(0, 4)]
|
f5da053f9f284bbe2973595083a4d1ba530080f4
| 356,060 |
from itertools import cycle
def encrypt_with_xor(message, key):
"""Encrypt message using xor cipher with key. The ascii code for each
character of message is returned after the xor function is applied to each
bit of the character with the ascii code of the cycling key.
The algorithim is symmetric, the same process with the same key will both
encrypt and decrypt.
"""
coded_message = ''
for (message_char, key_char) in zip(message, cycle(key)):
coded_char = chr(ord(message_char) ^ ord(key_char))
coded_message += coded_char
return coded_message
|
b10d12d66161b4ba9aa8fa22f22d08e23a2c7723
| 406,958 |
def _is_ascii_tipsy(f):
"""
Checks if a tipsy auxiliary array is ascii
"""
if isinstance(f, str):
with open(f, 'rb') as fstream:
return _is_ascii_tipsy(fstream)
ascii = True
try:
l = int(f.readline())
except ValueError:
ascii = False
return ascii
|
07c0108c34f53d76919389a938ae67e1bf693089
| 131,979 |
import secrets
import hashlib
def scramble(password: str):
"""Hash and salt the given password"""
salt = secrets.token_hex(16)
return hashlib.sha512((password + salt).encode('utf-8')).hexdigest()
|
184163db0afadd8eaa36132b43ad653826ed710d
| 479,271 |
import random
def des_sky_brightness(bands=''):
"""
Sample from the distribution of single epoch sky brightness for DES
"""
# Figure 4 in https://arxiv.org/pdf/1801.03181.pdf
dist = {'g': {'VALUES': [21.016, 21.057, 21.106, 21.179, 21.228, 21.269, 21.326,
21.367, 21.424, 21.465, 21.522, 21.571, 21.62, 21.677,
21.717, 21.774, 21.823, 21.872, 21.921, 21.97, 22.019,
22.068, 22.117, 22.174, 22.215, 22.272, 22.321, 22.378,
22.427, 22.476],
'WEIGHTS': [0.0, 0.0, 0.001, 0.001, 0.001, 0.001, 0.002, 0.003,
0.005, 0.007, 0.009, 0.012, 0.016, 0.023, 0.034, 0.048,
0.063, 0.073, 0.081, 0.093, 0.107, 0.099, 0.087, 0.076,
0.061, 0.05, 0.027, 0.013, 0.005, 0.0]},
'r': {'VALUES': [20.16, 20.209, 20.266, 20.323, 20.372, 20.421, 20.47,
20.519, 20.576, 20.625, 20.674, 20.715, 20.772, 20.821,
20.87, 20.918, 20.976, 21.024, 21.073, 21.122, 21.171,
21.22, 21.269, 21.326, 21.375, 21.424, 21.473, 21.522,
21.571, 21.62, 21.668, 21.726],
'WEIGHTS': [0.0, 0.0, 0.001, 0.001, 0.002, 0.002, 0.005, 0.008,
0.011, 0.011, 0.012, 0.02, 0.023, 0.034, 0.043, 0.046,
0.056, 0.07, 0.075, 0.083, 0.093, 0.095, 0.092, 0.078,
0.057, 0.041, 0.024, 0.012, 0.004, 0.001, 0.0, 0.0]},
'i': {'VALUES': [18.921, 18.978, 19.027, 19.076, 19.125, 19.174, 19.223,
19.272, 19.321, 19.378, 19.418, 19.476, 19.524, 19.573,
19.622, 19.671, 19.728, 19.777, 19.826, 19.875, 19.924,
19.973, 20.022, 20.071, 20.12, 20.177, 20.226, 20.274,
20.323, 20.372, 20.421, 20.478, 20.527, 20.576, 20.617,
20.674, 20.723, 20.772, 20.829],
'WEIGHTS': [0.0, 0.0, 0.002, 0.002, 0.001, 0.002, 0.003, 0.005,
0.013, 0.017, 0.018, 0.026, 0.029, 0.035, 0.036, 0.047,
0.053, 0.067, 0.078, 0.084, 0.073, 0.073, 0.063, 0.05,
0.045, 0.039, 0.031, 0.026, 0.021, 0.018, 0.014, 0.009,
0.009, 0.003, 0.002, 0.002, 0.001, 0.0, 0.0]},
'z': {'VALUES': [17.715, 17.772, 17.804, 17.861, 17.918, 17.976, 18.024,
18.073, 18.122, 18.171, 18.228, 18.277, 18.326, 18.375,
18.424, 18.473, 18.522, 18.579, 18.628, 18.677, 18.726,
18.774, 18.823, 18.872, 18.921, 18.97, 19.019, 19.076,
19.125, 19.174, 19.231, 19.264, 19.329, 19.37, 19.427,
19.467, 19.524, 19.573, 19.63],
'WEIGHTS': [0.0, 0.0, 0.0, 0.001, 0.001, 0.004, 0.007, 0.008,
0.012, 0.014, 0.015, 0.022, 0.028, 0.028, 0.033, 0.045,
0.052, 0.058, 0.064, 0.073, 0.082, 0.078, 0.069, 0.059,
0.051, 0.044, 0.036, 0.024, 0.019, 0.018, 0.017, 0.015,
0.01, 0.005, 0.002, 0.002, 0.002, 0.001, 0.0]},
'Y': {'VALUES': [17.062, 17.128, 17.177, 17.226, 17.274, 17.323, 17.372,
17.421, 17.47, 17.527, 17.576, 17.625, 17.674, 17.723,
17.772, 17.821, 17.878, 17.927, 17.976, 18.024, 18.073,
18.13, 18.179, 18.228, 18.277, 18.326, 18.375, 18.424,
18.473, 18.53, 18.579, 18.628, 18.668, 18.726, 18.774,
18.823, 18.88, 18.929, 18.97, 19.027, 19.076],
'WEIGHTS': [0.001, 0.002, 0.002, 0.003, 0.006, 0.008, 0.011, 0.015,
0.02, 0.027, 0.032, 0.041, 0.051, 0.051, 0.05, 0.05,
0.056, 0.066, 0.072, 0.068, 0.056, 0.047, 0.042, 0.033,
0.032, 0.029, 0.024, 0.022, 0.021, 0.02, 0.014, 0.011,
0.006, 0.003, 0.002, 0.001, 0.001, 0.0, 0.002, 0.001, 0.0]}
}
return [random.choices(dist[b]['VALUES'], dist[b]['WEIGHTS'])[0] for b in bands.split(',')]
|
0b5c1b4a084d640d441b7911dc68462d66a5cd1e
| 568,539 |
def string_length_fraction(str_list_1, str_list_2):
"""
Calculate the percentage difference in length between two strings, such that s1 / (s1 + s2)
:param str_list_1: List of strings.
:param str_list_2: List of strings.
:return: Float (0 to 1).
"""
str1_size = sum(sum(len(j.strip()) for j in i) for i in str_list_1)
str2_size = sum(sum(len(j.strip()) for j in i) for i in str_list_2)
return str1_size/(str1_size + str2_size)
|
b5cad771b411e7eeea61761442871addff71aa12
| 400,289 |
def read_cmd(argv):
"""
splits a list of strings `'--flag=val'` with combined flags and args
into a dict: `{'flag' : 'val'}` (lstrips the dashes)
if flag `' flag '` passed without value returns for that flag: `{'flag' : True}`
"""
output = {}
for x in argv:
x = x.lstrip('-')
pair = x.split('=',1)
if len(pair) < 2:
pair = pair + [True]
output[pair[0]] = pair[1]
return output
|
72483c5138ec85cdd35a5f20e950da24e952f7cf
| 560,602 |
def parse_str(x):
"""
Returns the string delimited by two characters.
Example:
`>>> parse_str('[my string]')`
`'my string'`
"""
return x[1:-1] if x is not None else x
|
6f236c53ab373ec6e73613f9d15ba1a3a2dbaf09
| 564,768 |
def omega_red(s) -> str:
"""Changes string color output to red"""
s = '\033[1;91m' + s + '\033[0m'
return s
|
3c8eb04cf738e8a1e6d2f9d590b64a5ced18be5c
| 178,441 |
import inspect
async def asyncfn_executor_wrapper(fn, *args, **kwargs):
"""
Checks if fn is a coroutine function, and if so
awaits it. Else function is computed normally.
Return:
Function result
"""
return await fn(*args, **kwargs) if inspect.iscoroutinefunction(fn) else fn(*args, **kwargs)
|
3627a3988e47b8478c9eae7f8658b2f6b09e9372
| 85,936 |
def shear_impedance(vs, rho):
"""
Compute shear impedance.
"""
return vs * rho
|
ed679478445514a8878e35bcef1d8281082a5056
| 151,329 |
def is_self(user, other):
"""
Check whether `user` is performing an action on themselves
:param user User: The user to check
:param other User: The other user to check
"""
if other is None:
return True
return user == other
|
589f02aac605ec68b7d48bfed1acdcfe5600ea94
| 477,253 |
import torch
def one_hot_embedding(labels: torch.Tensor, num_classes: int) -> torch.Tensor:
"""Embedding labels to one-hot form.
:param labels: Class labels. Sized [N].
:param num_classes: Number of classes.
:return: One-hot encoded labels. sized [N, #classes].
"""
eye = torch.eye(num_classes, device=labels.device)
return eye[labels]
|
f9765a5d836fa62a718c4333db6f951772e73535
| 358,772 |
import string
def strip_whitespace_and_punctuation(word: str):
"""
Static function to strip all whitespace and punctuation from a word. Also makes all letters lowercase.
:param word: A string containing the word to strip whitespace and punctuation from.
:return: A string containing the word, now all lowercase and with all whitespace and punctuation stripped out.
"""
word = word.translate({ord(c): None for c in string.whitespace})
word = word.translate(str.maketrans('', '', string.punctuation.join("“.”’")))
word = word.lower()
return word
|
b8cd12e4a9500c5f8048bc828ee4d65217c060e6
| 496,106 |
def calculatechroma(color):
"""Returns chroma value of color
"""
chroma = max(color.rgbfraction) - min(color.rgbfraction)
return chroma
|
e7288b3cc4aa9141c185664b1cb55abf714eef86
| 615,632 |
def nice_join(seq, sep=", "):
""" Join together sequences of strings into English-friendly phrases using
the conjunction ``or`` when appropriate.
Args:
seq (seq[str]) : a sequence of strings to nicely join
sep (str, optional) : a sequence delimiter to use (default: ", ")
Returns:
a joined string
Examples:
>>> nice_join(["a", "b", "c"])
'a, b or c'
"""
seq = [str(x) for x in seq]
if len(seq) <= 1:
return sep.join(seq)
else:
return "%s or %s" % (sep.join(seq[:-1]), seq[-1])
|
c6fe5afb0e495ed34468cf4919ab4fe7ddbf937b
| 517,285 |
def get_auto_start_list(cpc):
"""
Helper functoin that converts the 'auto-start-list' property of a CPC
to a list suitable for the zhmcclient.Cpc.set_auto_start_list() method.
Returns:
None - if the CPC is in classic mode
list, with items that are one of:
- tuple(partition, post_start_delay)
- tuple(partition_list, name, description, post_start_delay)
"""
auto_start_list = cpc.prop('auto-start-list', None)
if auto_start_list is None:
# CPC is in classic mode
return None
as_list = []
for auto_start_item in auto_start_list:
if auto_start_item['type'] == 'partition':
# item is a partition
uri = auto_start_item['partition-uri']
delay = auto_start_item['post-start-delay']
partition = cpc.partitions.resource_object(uri)
as_item = (partition, delay)
as_list.append(as_item)
if auto_start_item['type'] == 'partition-group':
# item is a partition group
name = auto_start_item['name']
description = auto_start_item['description']
delay = auto_start_item['post-start-delay']
partitions = []
for uri in auto_start_item['partition-uris']:
partition = cpc.partitions.resource_object(uri)
partitions.append(partition)
as_item = (partitions, name, description, delay)
as_list.append(as_item)
return as_list
|
8ceb8fb39ee93de0f1dd0d66bd5966bcaec3ba4b
| 372,650 |
def _get_qubit(store, host_id, q_id, purpose, wait=0):
"""
Gets the data qubit received from another host in the network. If qubit ID is specified,
qubit with that ID is returned, else, the last qubit received is returned.
Args:
store: The qubit storage to retrieve the qubit
host_id (str): The ID of the host that data qubit to be returned is received from.
q_id (str): The qubit ID of the data qubit to get.
purpose (str): The intended use of the qubit
Returns:
(Qubit): Qubit received from the host with *host_id* and *q_id*.
"""
return store.get_qubit_from_host(host_id, q_id, purpose, wait)
|
4237946fcd7d4d1b4978d03711feeb21fe1569a6
| 377,033 |
def get_checklist(full_name):
""" Generate a list of names that may contain the 'full_name'.
Notes:
Eg. if full_name looks like "a.b.c", then the list
["a.b.c", "a.*", "a.b.*", "a.b.c.*"]
is generated. So either the full name itself may be found, or when
full_name is included in some *-import.
Args:
full_name: The full module name
Returns:
List of possible "containers".
"""
if not full_name: # guard against nonsense
return []
mtab = full_name.split(".") # separate components by dots
checklist = [full_name] # full name is always looked up first
m0 = ""
for m in mtab: # generate *-import names
m0 += "." + m if m0 else m
checklist.append(m0 + ".*")
return tuple(checklist)
|
5454d8c3e7ef4170f7364901e8a23bde10d2bf0a
| 119,887 |
import re
def get_expected(file_name):
"""
Return the expected output for a given file.
"""
expected = []
with open(file_name) as fp:
for line in fp:
match = re.search(r"//.*OUT\((.*)\)", line)
if match:
expected.append(match.group(1))
return "\n".join(expected)
|
16246d084212715506979843330f1c9d534df454
| 455,929 |
def resetChapterProgress(chapterProgressDict, chapter, initRepeatLevel):
"""This method resets chapter progress and sets initial level for repeat routine.
Args:
chapterProgressDict (dict): Chapter progress data.
chapter (int): Number of the chapter.
initRepeatLevel (int): Initial level for repeat routine.
Returns:
dictionary: Return Reseted chapter progress dictionary with initial level set.
"""
chapterProgressDict[chapter]["status"] = "Not started"
chapterProgressDict[chapter]["progress"]["current"] = 0
chapterProgressDict[chapter]["correct"] = {"correct":0, "subtotal":0, "rate":''}
chapterProgressDict[chapter]["repeatLevel"] = initRepeatLevel
return chapterProgressDict
|
e02d6e97f556a2c080c2bc273255aacedf7bb086
| 708,893 |
async def _verify_provision_request(request, params):
"""Verifies a received 'provision' REST command.
Args:
request (aiohttp.Web.Request): The request from the client.
params (dict-like): A dictionary like object containing the REST command request parameters.
Returns:
(boolean, str): A boolean indicating if the request is valid. The other parameter is an error
message if the boolean is True, and is None otherwise.
"""
if not params: return False, "ERROR: Request parameters must not be null!"
if not isinstance(params, dict):
return False, "ERROR: Request parameters must be a JSON object!"
if "target" not in params: return False, "ERROR: Request params requires 'target' field!"
target = params["target"]
if target != "sensor" and target != "group":
return False, "ERROR: Invalid 'target' specified! Must be one of {'sensor', 'group'}."
if target == "sensor":
if "groupid" not in params: return False, "ERROR: Request params requires 'groupid' field!"
try:
groupid = int(params["groupid"])
except Exception:
return False, "ERROR: Request parameter 'groupid' must be an integer!"
if groupid <= 0: return False, "ERROR: Request parameter 'groupid' must be >= 0!"
if "alias" in params:
if not params["alias"]: return False, "ERROR: Request parameter 'alias' must contain at least one (1) character!"
return True, None
|
fb8db3b3cc8aa12232997e71d5c130402cb45525
| 682,046 |
def check_gamma(combinations):
"""This function is used to remove element combinations for
a single busbar which violate the 'One Line Constraint',(also called
gamma constraint) which states:
Out of the elements connected to a bus-bar, at least one of them has to be a line.
Or: a bus-bar cannot have ONLY non-line elements (loads, generators) connected to it.
The input to this function is a combination of elements,
e.g: [('loads_id1,generators_id1), ('line_ex_id1,generators_id1'), ('line_or_id2,generators_id1')]
In this example, the first combination:('loads_id1,generators_id1) violates the gamma constraint.
Hence, it has to be removed. So the resulting set of combinations of elements that will be returned
from this function are
combs : [('line_ex_id1,generators_id1'), ('line_or_id2,generators_id1')] """
for elem in combinations:
#the following loop is to remove combinations
#that have ONLY two non-line elements (gamma)
temp1=elem[0]
temp2=elem[1]
if((''.join(temp1)).find('loads_id') != -1 and (''.join(temp2)).find('generators_id') != -1):
combinations.remove(elem)
return combinations
|
756ca7460dcd34d38a9cb41ad16ced5f6036c297
| 394,675 |
import ast
def get_accessed(node):
"""Get names, but ignore variables names to the left hand side
That is to say, in case of
a = b + 1
we consider b as "being accessed", while a is not.
"""
if isinstance(node, ast.Assign):
return get_accessed(node.value)
elif isinstance(node, ast.Name):
return {node.id}
names = set()
if isinstance(node, list):
for x in node:
names |= get_accessed(x)
else:
for x in ast.iter_child_nodes(node):
names |= get_accessed(x)
return names
|
4ef89ccd5c8937ab0add5bf96957784ad034dea5
| 91,564 |
def parse_csv_header(line):
"""Parse the CSV header returned by TDS."""
units = {}
names = []
for var in line.split(','):
start = var.find('[')
if start < 0:
names.append(str(var))
continue
else:
names.append(str(var[:start]))
end = var.find(']', start)
unitstr = var[start + 1:end]
eq = unitstr.find('=')
if eq >= 0:
# go past = and ", skip final "
units[names[-1]] = unitstr[eq + 2:-1]
return names, units
|
f626eb3dd46db59a953cefd16f0d50577a55e2dc
| 209,380 |
from typing import List
def order_by_name(order_list: List[dict], order_name: str) -> int:
"""This method returns the current index of an order by its name. If the order does not exist, -1 is returned.
"""
for index, order in enumerate(order_list):
if order['name'] == order_name:
return index
return -1
|
5466672c63b1015476c1a04261ac88052b5b191b
| 67,425 |
def RGBToLumaCCIR601(rgb):
"""
RGB -> Luma conversion
Digital CCIR601 (gives less weight to the R and B components)
:param: rgb - The elements of the array rgb are unsigned chars (0..255).
:return: The luminance.
"""
Y = 0.299 * rgb[0] + 0.587 * rgb[1] + 0.114 * rgb[2]
return Y
|
0d7755cb3309434b35abce45c471db9d830f29ea
| 377,726 |
def memory_setting(label):
"""
Parse user-supplied memory setting.
Converts strings into floats, representing a number of bytes. Supports the
following notations.
- raw integers: 1, 1000, 1000000000
- scientific notation: 1, 1e3, 1e9
- "common" notation: 1, 1K, 1G
Suffixes supported: K/k, M/m, G/g, T/t. Do not include a trailing B/b.
"""
suffixes = {
'K': 1000.0,
'M': 1000.0 ** 2,
'G': 1000.0 ** 3,
'T': 1000.0 ** 4,
}
try:
mem = float(label)
return mem
except ValueError:
prefix = label[:-1]
suffix = label[-1:].upper()
if suffix not in suffixes.keys():
raise ValueError('cannot parse memory setting "{}"'.format(label))
try:
multiplier = float(prefix)
return multiplier * suffixes[suffix]
except ValueError:
raise ValueError('cannot parse memory setting "{}"'.format(label))
|
07076583847b676e1993f8ff5158fe48aa3b8765
| 601,545 |
import re
def re_pick(item_list, regex_str):
"""
Search list, return list with items that match regex.
:param item_list: list of items
:param regex_str: regex string
:return: list of items in item_list that match regex_str
"""
# compile regex
str_search = re.compile(regex_str)
result = []
# iterate list
for item_str in item_list:
# look for match
match = str_search.search(item_str)
# when match, add to return queue
if match:
result.append(item_str)
return result
|
0b38e79a8623242c8f4566847925d4fcc15f9adb
| 248,405 |
from typing import Iterable
def check_descriptor_length(descriptor, n_element):
"""
Checks whether the entries of a descriptor dictionary have the right length.
Converts single-strings to a list of 1 element.
Args:
descriptor(dict): the descriptor dictionary
n_element: the correct length of the descriptors
Returns:
bool
"""
for k, v in descriptor.items():
if isinstance(v, str):
v = [v]
if isinstance(v, Iterable) and len(v) != n_element:
return False
return True
|
63dbc3425641da0185012b297271421bf2cb9903
| 435,554 |
def max_parent_pfx_len(pfx_lengths, ipv6=False):
"""Computes largest parent prefix length given child prefix length(s).
>>> max_parent_pfx_len([26,27,28,29])
24
:param pfx_lengths: iterable: prefix lengths
:param ipv6: bool: set to False for IPv4 prefix lengths, True for IPv6
:return: int: computed largest prefix length
"""
if ipv6:
BIT_SIZE = 128
else:
BIT_SIZE = 32
try:
pfx_lengths = [int(pl) for pl in pfx_lengths]
# One of the inputs is not a number. Catch and re-raise.
except ValueError:
raise ValueError("Only numbers are accepted.")
# It only makes sense for prefix lengths to be be between 1 < pfx_len <= BIT_SIZE
if any(True for pl in pfx_lengths if pl < 1 or pl > BIT_SIZE):
raise ValueError(
"Prefix length has to be between 1 < 'pfx_len' <= {}".format(BIT_SIZE)
)
len_sum = 0
for pfx_len in pfx_lengths:
len_sum += 2 ** (BIT_SIZE - int(pfx_len))
# Compute pfx len, sub 1 to adjust for sum equal to powers of 2
# In Python2 we might overflow int into long so need to get type(len_sum)
pfx_len_needed = BIT_SIZE - type(len_sum).bit_length(len_sum - 1)
if pfx_len_needed < 0:
raise ValueError("Input prefix lengths too large to compute parent length.")
return pfx_len_needed
|
9b563155632683cfeac3afe1554ff5f97d0d73cb
| 468,895 |
import logging
def prepare_alfred_formatter(exp_id: str) -> logging.Formatter:
"""Returns a :class:`~logging.Formatter` with the standard alfred
logging format, including experiment id.
"""
formatter = logging.Formatter(
("%(asctime)s - %(name)s - %(levelname)s - " f"experiment id={exp_id} - %(message)s")
)
return formatter
|
dabfafba0065537c9182c1c8e341400c42135246
| 85,024 |
import hashlib
import base64
def checksum(string):
""" Base 64 checksum of MD5. Used by bisurl"""
m = hashlib.md5()
m.update(string.encode("utf-8"))
return base64.b85encode(m.digest()).decode("utf-8")
|
269f67cf906226d00c9230e33f9a4d9bc778edd2
| 458,804 |
from pathlib import Path
def get_logfile() -> str:
"""
查找原神客户端日志文件。
:return: 日志文件地址。找不到则返回空字符串。
"""
base = Path('~/AppData/LocalLow/miHoYo')
for folder in ('原神', 'Genshin Impact', 'YuanShen'):
logfile = base / folder / 'output_log.txt'
if logfile.is_file():
return str(logfile)
return ''
|
747412b2634ca10e8341c49a5157deda9e18684f
| 591,416 |
def watt_to_MJ(watt_m2):
"""
Transform watt per m² to MJ per m² per day
"""
joule_m2_s = watt_m2
joule_m2_day = joule_m2_s * 86400
mjoule_m2_day = joule_m2_day / 1000000
return mjoule_m2_day
|
3fdd4b4707e797002f760fb82f39e2eb042337dc
| 445,642 |
def descriptor_target_split(file):
"""
Split the input data into descriptors and target DataFrames
Parameters:
file: pandas.DataFrame
Input DataFrame containing descriptors and target data.
Returns:
descriptors: pandas.DataFrame
Descriptors DataFrame.
target: pandas.DataFrame
Target DataFrame.
"""
target = file.loc[:, file.columns == 'Target']
descriptors = file.loc[:, file.columns != 'Target']
return descriptors, target
|
95d518ee9cce190ed08015986fc5b2d86c33318a
| 644,584 |
def extract_raw_features(annotation):
"""
This function is used to extract raw features from annotations
:param annotation: annotation object
:return: features in dictionary
"""
features = annotation.select('Feature')
annotation_features = {}
for feature in features:
feature_name = feature.Name.text
feature_value = feature.Value.text
annotation_features[feature_name] = feature_value
features_dict = dict(filter(lambda element: element[1] != 'external', annotation_features.items()))
return features_dict
|
cc4e52be31538a101891cfa1170c8f1f7334e61b
| 529,743 |
from typing import Any
def couple_combinaison_from(elements: list[Any]) -> list[tuple[Any, Any]]:
"""create all possible combinaison of two elements from the input list"""
zip_size = len(elements)
return sum(
(list(zip([element] * zip_size, elements)) for element in elements),
[],
)
|
8c299a86fe3f2faf7f27b7363f9c4c6e2188a5db
| 83,969 |
def check_structure_acyclic(structure):
"""
Returns whether the directed graph g has a cycle.
g must be represented as a dictionary mapping vertices to
iterables of neighbouring vertices. For example:
>>> check_structure_acyclic({1: (2,), 2: (3,), 3: (1,)})
True
>>> check_structure_acyclic({1: (2,), 2: (3,), 3: (4,)})
False
"""
path = set()
visited = set()
def visit(vertex):
if vertex in visited:
return False
visited.add(vertex)
path.add(vertex)
for neighbour in structure.get(vertex, ()):
if neighbour in path or visit(neighbour):
return True
path.remove(vertex)
return False
return not any(visit(v) for v in structure)
|
1bfa384f4a9bb4fdd8c3aa00eaae8dffb5e50352
| 107,510 |
def get_ua_account_summaries(management_api):
"""Get a list of UA account summaries.
Args:
management_api: The Analytics Management API object.
Returns:
A list of the GA account summaries.
"""
account_summaries = (
management_api.management().accountSummaries().list().execute())
return account_summaries['items']
|
c32d2fd98609c89b6d874ef34f06c476b7245b3c
| 204,024 |
from typing import List
from typing import Tuple
def get_index_tuple_from_kraus_matrices_indices(
index: int, indices: List[int]
) -> Tuple[int, int]:
"""Get index i, j of Instrument object from a list of numbers corresponding to matrices of Kraus operators.
Parameters
----------
index: int
Index of the matrix in a list of matrices of Kraus operators.
indices: List[int]
List of numbers of matrices per Kraus operator.
Returns
-------
Tuple[int, int]
returns (i, j) meaning that j-th matrix of i-th Kraus operator.
"""
sum = 0
for j, num in enumerate(indices):
sum += num
if index < sum:
return (j, num + index - sum)
assert index < sum
return (len(indices) - 1, sum - index)
|
a416e6ceb4304916250222bcfc1547c401fa8b7b
| 598,209 |
def merge_field_into(s1, key, value):
"""Create a new namedtuple that is a copy of s1 with the key,value replaced."""
dd = {}
for k in s1._fields:
dd[k] = getattr(s1, k)
dd[key] = value
return s1.__class__(**dd)
|
b77a3e583cd888b3a7a9e2ded9690fd56e693b0f
| 611,340 |
def get_viewconf_status(files):
"""
Determine the Higlass viewconf's status based on the files used to compose it.
Args:
files(list) : A list of file objects that contain a status.
Returns:
A string.
"""
# The viewconf will be in "released to lab" status if any file:
# - Lacks a status
# - Has one of the "released to lab" statuses
# - Doesn't have a "released" or "released to project" status
released_to_lab = [
"uploading",
"uploaded",
"upload failed",
"deleted",
"replaced",
"revoked",
# "archived",
"pre-release",
"to be uploaded by workflow"
]
if any([ f["accession"] for f in files if f.get("status", None) in released_to_lab ]):
return "released to lab"
# If any file is in "released to project" the viewconf will also have that status.
released_to_project = [
"released to project",
"archived to project",
]
if any([ f["accession"] for f in files if f["status"] in released_to_project]):
return "released to project"
# All files are "released" so the viewconf is also released.
return "released"
|
873bbbb50082236e01a96ed6207e853e20e22ef9
| 616,817 |
def add_subparser_solve(subparsers):
"""Add a 'solve' option to `subparsers`.
Parameters
==========
subparsers : argparse ArgumentParser special action object
Special action object generated from a call to:
`argparse.ArgumentParser.add_subparsers()
Returns
=======
subparsers : argparse ArgumentParser special action object
Updated version of `subparsers`
"""
# Initialise subparser
parser_solve = subparsers.add_parser(
'solve',
help='solve the model')
# Add 'verbose' argument
parser_solve.add_argument(
'-v', '--verbose',
action='store_true',
help='print detailed solution output (not yet implemented)')
# Add 'input' and 'output' file arguments
parser_solve.add_argument(
'-f', '--input',
nargs='+',
metavar='INPUT',
default=None,
type=str,
required=False,
help='input file(s) for model data')
parser_solve.add_argument(
'-o', '--output',
nargs='+',
metavar='OUTPUT',
default=None,
type=str,
required=False,
help='output file(s) for model results')
# Add 'define' and 'set' arguments
parser_solve.add_argument(
'-D', '--define',
action='append',
nargs='+',
metavar='PARAMETER',
default=None,
type=str,
required=False,
help='set (time-invariant) model parameters')
parser_solve.add_argument(
'--set',
action='append',
nargs='+',
metavar='EXPRESSION',
default=None,
type=str,
required=False,
help='set (time-varying) model variables prior to run')
# Add 'span' and 'past' arguments
parser_solve.add_argument(
'--span',
nargs=2,
metavar='PERIOD',
default=None,
type=int,
required=False,
help='set the start and end periods of the model run')
parser_solve.add_argument(
'--past',
metavar='PERIOD',
default=None,
type=int,
required=False,
help='set the first historical period of the model run')
# Return
return subparsers
|
4fbec81e6e144b276aaf3c9a513475651a6e263b
| 515,798 |
def UnitToValue(unit):
"""Return the value of an unit."""
if not unit or unit.upper() in frozenset(["ECU", "GCEU", "HRZ"]):
return 1
unit = unit[0].upper()
if unit == "K":
return 1024
if unit == "M":
return 1024 * 1024
if unit == "G":
return 1024 * 1024 * 1024
raise Exception("Not valid unit: '%s'" % unit)
|
2aae0f6480150ab0ca17da8dac1026a8174a1333
| 536,492 |
def calcDivisor(factors):
"""
Return the number from its prime factors.
Args:
factors: Dictionary of prime factors (keys) and their exponents (values).
Returns:
An integer. The number made of the prime factors.
Usage:
>>> calcDivisor({5: 3, 7: 2})
6125
>>> calcDivisor({2: 3, 5: 2, 7: 1})
1400
"""
acc = 1
for f,e in factors.items():
acc *= f**e
return acc
|
3c3d68d86ab18ad558e1963e9868efba6e5a7793
| 162,700 |
import socket
def _get_local_ip(ip: str, port: int = 80) -> str:
"""
For a given IP, return the local IP that can access it
Args:
ip (str): Remote IP
port (int, optional): Remote port. Defaults to `80`
Returns:
str: The local IP that can access the given remote IP
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((ip, port))
local_ip = s.getsockname()[0]
s.close()
return local_ip
|
7533c6d38e88983cda3547af1edb908d4a831e91
| 181,472 |
import json
def load_low_voltage_prosumers(file):
"""retorna uma lista com os nós da baixa tensao
:param file: force.json
:return: list: lista dos nós de baixa tensão
"""
with open(file, 'r') as f:
data = json.load(f)
prosumers_id = list()
for i in data['nodes']:
if i['voltage_level'] == 'low voltage':
prosumers_id.append(i['name'])
return prosumers_id
|
249bf9a08a7c0fec71e7a6bdbd617f23c88d17c2
| 317,268 |
def validDBMLFile(s):
"""
Return a boolean indicating whether passed string has valid `.dbml` file extension. Case-sensitive (i.e. `.DBML` not accepted).
Parameters:
s (str): name of file.
Returns:
bool: True if s ends with '.dbml', else False.
"""
if s.endswith('.dbml'):
return True
else:
return False
|
ffa0a9f61bf9545efa2a170ddb1875cad0b75892
| 117,830 |
def color_pixel(index):
"""
Color the vertex by its index. Note the color index must lie between
0-255 since gif allows at most 256 colors in the global color table.
"""
return max(index % 256, 1)
|
6ef1e4481e50939dc2c107cc4ee151249a6ec44c
| 196,230 |
def fuel_cost(x1, x2):
"""
Returns the amount of fuel needed to move a crab submarine from x1 to x2
"""
d = abs(x1 - x2)
return (d*(d+1))/2
|
e628b2b36d31bb66b734fa22165d65be4da23df6
| 457,531 |
def get_region_of_all_exchanges(scenario: dict) -> dict:
"""Returns {ID: region_name, ...} map for `EnergyExchange` in given `scenario`. If no region is found, the string `Region` is applied"""
try:
exchanges = [
{exchange["Id"]: exchange["Attributes"]["Region"]}
for exchange in scenario["Agents"]
if exchange["Type"] == "EnergyExchange"
]
except KeyError:
exchanges = [
{exchange["Id"]: "Region"} for exchange in scenario["Agents"] if exchange["Type"] == "EnergyExchange"
]
output = {}
for exchange in exchanges:
output.update(exchange)
return output
|
475e7ef769c28fa9ad8653460ece3f0150dda244
| 74,526 |
def compose2(f, e):
"""Compose 2 functions"""
return lambda x: f(e(x))
|
9e48818b5c4150d1af138935c712e31c58a1f9c1
| 681,553 |
def enrichment_optimal(ligand_vs_kinase_data):
"""
Calculate the optimal enrichment, i.e. the ratio of ranked kinases, in which optimally all
active kinases have been found.
Parameters
----------
ranks : pandas.DataFrame
Must use `src.evaluation.kinase_ranks.kinase_ranks_by_ligand_vs_kinase_data`. See this
return value description in this function's docstring.
ligand_kinase_method : str
Name for ligand profiling method to be used as identifier in `ranks`.
Returns
-------
float
Percentage of ranked kinases that will optimally contain _all_ active kinases.
Notes
-----
N : Number of kinases in totoal
N_s : Number of kinases in top x% of ranked kinases
n : Number of active kinases in total
n_s : Number of active kinases in top x% of ranked kinases
For the enrichment plot, the x values are calculated as follows:
x = N_s / N
In the optimal case, N_s = n, hence:
x_optimal = n / N
"""
ranks = ligand_vs_kinase_data.data
n_kinases = ranks.shape[0]
n_active_kinases = ranks[f"{ligand_vs_kinase_data.ligand_kinase_method}.active"].sum()
ratio_active_kinases_identified_optimal = n_active_kinases / n_kinases * 100
return ratio_active_kinases_identified_optimal
|
ee5c765b7efe67c6a5df2a3ee5118f79872d6770
| 241,106 |
def load_station(df, code=111123):
"""filter dataframe by station code
Args:
df (pd.DataFrmae): DataFrame to filter
code (int, optional): station code. Defaults to 111123.
Returns:
pd.pandas.DataFrame: filtered DataFrame
"""
# return df[df['stationCode'] == code]
return df.query(f"stationCode == {code}")
|
e115e9d48cf9cca3429c99875b54ddc28cd7a36b
| 236,785 |
def is_unstructured_object(props):
"""Check if property is object and it has no properties."""
return 'object' in props['type'] and not props.get('properties')
|
c970c0a99fe1c872d9ce9320bd48aceb0fdb4c94
| 437,362 |
import torch
def get_cumulant(h, a, terminal_a, food_type, index):
"""
h: history
a: action
terminal_a: index of the terminal action = |A| + 1
food_type: type of food item from {(0, 1), (1, 0), (1, 1)}
index: index of the resource for which the cumulant value is required
"""
if a == terminal_a:
return torch.tensor(0.)
if not h[index] and food_type[index]:
return torch.tensor(food_type[index] * 1.)
elif h[index]:
return torch.tensor(-1.)
return torch.tensor(0.)
|
a03e6054bf5b2bc007743c597552ac7dbb7d2725
| 503,854 |
import itertools
def iterable_nth(iterable, n, default=None):
"""
Returns the nth item or a default value.
From: https://docs.python.org/3/library/itertools.html#recipes
:param iterable: iterable
:param n: item index
:param default: default value
:return: nth item of given iterable
"""
return next(itertools.islice(iterable, n, None), default)
|
f4f5694175905d6c3f22ee4386f15aa592a22296
| 277,948 |
def temperature_trans(air_in, fuel_in, ex_out):
"""
Convert degree celsius to kelvin.
Optional keyword arguments:
---------------------------
:param air_in: temperature of air coming in to fuel cell(FC)
:param fuel_in: temperature of fuel coming into (FC)/
temperature of reformer
:param ex_out: temperature of exhaust coming out of FC
:return: return the degree of kelvin
"""
T_FC_air_in = air_in + 273.15
T_FC_fuel_in = fuel_in + 273.15
T_FC_ex_out = ex_out + 273.15
return [T_FC_air_in, T_FC_fuel_in, T_FC_ex_out]
|
8b53781636062626fa8bfd93fb08ff783809d8ca
| 650,147 |
def load_classes(classes_filepath):
"""
Loads the file with class information. Usually called classes.csv
"""
with open(classes_filepath) as f:
return [line.split(",")[1].strip() for line in f]
|
ea4e9e38f17eca93786d853ea47d1ff2a99bb479
| 385,819 |
import json
def to_json(obj: object, *args, **kwargs):
"""Serialize a nested object to json. Tries to call `to_json` attribute on object first."""
def serialize(obj: object):
return getattr(obj, 'to_json', getattr(obj, '__dict__') if hasattr(obj, '__dict__') else str(obj))
return json.dumps(obj, default=serialize, *args, **kwargs)
|
7798bc28196f9598645751803f81fe711836ada9
| 643,470 |
def remove_empty(data):
"""Removes empty items from list"""
out = []
for item in data:
if item == '':
continue
out.append(item)
return out
|
9ef46381bb76846c92375f47eb481a26359b1d92
| 695,827 |
def find_bias(sf_stack, opt_sf):
"""
Finds difference between avg(sf) and optimal sf for one 72x46 grid
Parameters:
sf_stack (numpy array) : nxMx72x46 array with inverted scale factors
(n=number OSSEs and M=number of months)
opt_sf (numpy array) : Mx72x46 optimal scale factor array
Returns:
Mx46x76 numpy array of E(sf) - opt_sf
NOTE:
- we assume that the 0th index of sf_stack is the OSSE iterations
"""
assert sf_stack.shape[1] == opt_sf.shape[0] # same number of months
assert sf_stack.shape[2] == 72
assert opt_sf.shape[1] == 72
# find mean of the given stack of sf draws
sf_stack_avg = sf_stack.mean(axis=0)
return sf_stack_avg - opt_sf
|
652db10d3fb96550d77c9cb8f6538bf7b0f39cbf
| 375,519 |
def getOcclusions(df):
"""
Finds all occlusions in the annotated dataset, provided in a pandas dataframe
Input:
df: Pandas DataFrame contatining the dataset
Output:
occlusion_id_cam: Dictinary containing a dict per fish ID. The dict per fish ID contains two keys (cam1/cam2), each holding a list of tuples, where each tuple is the start and endframe of an occlusion
"""
unique_ids = df["id"].unique()
id_map = {id_n:idx for idx, id_n in enumerate(unique_ids)}
occlusion_id_cam = {id_n: {"cam1":[], "cam2": []} for id_n in id_map.keys()}
for id_n in unique_ids:
id_df = df[df["id"] == id_n]
for cam in ["cam1", "cam2"]:
id_df_cam = id_df.dropna(subset=["{}_occlusion".format(cam)])
occlusions = id_df_cam["{}_occlusion".format(cam)].values
frames = id_df_cam["{}_frame".format(cam)].unique()
occ_tuples = []
startFrame = -1
for idx, frame in enumerate(frames[:-1]):
if occlusions[idx] == 0:
continue
if startFrame == -1:
startFrame = frame
if occlusions[idx+1] == 0:
occ_tuples.append((startFrame, frame))
startFrame = -1
occlusion_id_cam[id_n][cam] = occ_tuples
return occlusion_id_cam
|
198deb5b7910d230430d7de329137d237f43d3b3
| 256,042 |
def _parser_setup(parser_obj, value, reset_default=False):
"""Add argument to argparse object
Parameters
----------
parser_obj : object
argparse object
value : dict
argparse settings
reset_default : bool
boolean that defines if default values should be used
Returns
-------
parser_obj : object
updated argparse object
"""
if reset_default:
default = None
else:
default = value["default"]
if value["action"] is None:
parser_obj.add_argument(
*value["tag"],
help=value["help"],
default=default,
choices=value["choices"],
)
else:
parser_obj.add_argument(
*value["tag"],
help=value["help"],
default=default,
action=value["action"],
)
return parser_obj
|
58d1ccde88a6ada8b7f4a09d50d083e3e86677bd
| 33,887 |
def palindrome(value: str) -> bool:
"""
This function determines if a word or phrase is a palindrome
:param value: A string
:return: A boolean
"""
value = "".join(value.split()).lower()
if len(value) <= 1:
return True
if value[0] != value[-1]:
return False
return palindrome(value[1:-1])
|
3f089c5e0330ff4b4b340c5ede07a9531805b63c
| 594,676 |
def get_marginal_rna_obs(r: int, p_max: int):
"""Get the observable for a marginalized RNA abundance."""
marginal = ''
for p in range(p_max-1):
marginal += f'x_{r}_{p} + '
marginal += f'x_{r}_{p_max-1}'
return {'name': f'x_r{r}', 'formula': marginal}
|
8d005c68fd2c56f2d161bea47862b4590b4505d9
| 90,823 |
def distinct_letters(X):
"""
Diagnostic function
:param X: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> dl = distinct_letters([['the', 'quick', 'brown'],['how', 'now', 'cow']])
>>> sorted(dl)
['b', 'c', 'e', 'h', 'i', 'k', 'n', 'o', 'q', 'r', 't', 'u', 'w']
"""
return set([letter
for sentence in X
for word in sentence
for letter in word])
|
32cbdd7b884a3472500f11a158989c47ec3fd7fe
| 502,391 |
def cuberoot(x):
"""Finds the cube root of x"""
return x**(1./3.)
|
64adfcc3307c42c86677c0cc405099150b92d1b6
| 634,849 |
def is_property_nullable(property):
"""
Return True if the property is nullable, False otherwise
"""
property_is_nullable = True
if property.get('Nullable') is not None:
property_is_nullable = property.get('Nullable') == 'true'
return property_is_nullable
|
afb4f8e08d6341df32a24ddc8fd58625c533df16
| 298,039 |
def sf2metadata_record_to_dict(record):
"""Helper function to convert a record from the sf2metadata table to a dict"""
return {
"pid": record[5],
"st": record[6],
"ctp": record[7],
"nsl": record[8],
"di": record[9],
"na": record[10],
"hp": record[11],
"np": record[12],
"hc": record[13],
"nc": record[14],
"husl": record[15],
"nusl": record[16],
"nslp": record[17],
"cm": record[18],
"sampleOrLibraryStartIndex": str(record[19]),
"unpooledSubmissionStartIndex": str(record[20]),
"poolStartIndex": str(record[21]),
"containerStartIndex": str(record[22])
}
|
6eefd3fcdc1b4db6fa89ac42e777866c9c7a2fcc
| 293,138 |
import locale
import math
def parse_unit(s, unit, ceil=True):
"""Converts '123.1unit' string into 124 if ceil is True
and converts '123.9unit' into 123 if ceil is False.
"""
flt = locale.atof(s.split(unit)[0])
if ceil:
return int(math.ceil(flt))
return int(math.floor(flt))
|
c67e6211756bb536ef257507392765182bd90776
| 528,542 |
def long_repeat(line):
"""
length the longest substring that consists of the same char
"""
line_length = len(line)
if line_length <= 1:
return line_length
count_list = [0]
current_count = 1
current_ch = ''
for i in range(0,line_length):
ch = line[i]
if ch == current_ch:
current_count += 1
if i == line_length - 1:
count_list.append(current_count)
else:
count_list.append(current_count)
current_count = 1
current_ch = ch
print(count_list)
return max(count_list)
|
4f7a2dfc5ec6ed08eaa9fc114e4484324b8bdfaa
| 633,772 |
import json
def validate(resp):
"""Check health status response from application.
Args:
resp (string): Response will be converted to JSON and then analyzed.
Returns:
(bool): True if application healthy.
"""
print(resp)
try:
data = json.loads(resp)
except:
return False
if 'initialized' not in data:
return False
if 'sealed' not in data:
return False
if not data['initialized'] or data['sealed']:
return False
return True
|
87ec568e908c01496f4b3c4fca1b25f518a6a2b3
| 661,373 |
def copy_traj_attributes(target, origin, start):
""" Inserts certain attributes of origin into target
:param target: target trajectory object
:param origin: origin trajectory object
:param start: :py:obj:`origin` attributes will be inserted in :py:obj:`target` starting at this index
:return: target: the md trajectory with the attributes of :py:obj:`origin` inserted
"""
# The list of copied attributes can be extended here with time
# Or perhaps ask the mdtraj guys to implement something similar?
target._xyz[start:start+origin.n_frames] = origin._xyz
target._unitcell_lengths[start:start+origin.n_frames] = origin._unitcell_lengths
target._unitcell_angles[start:start+origin.n_frames] = origin._unitcell_angles
target._time[start:start+origin.n_frames] = origin._time
return target
|
8a321ca293347588efa3e6a45013226103cef110
| 495,516 |
def get_param_grid(algorithm):
"""
Defines and returns a parameter grid for the designated model type.
"""
if algorithm == 'logistic':
param_grid = [{'penalty': ['l1', 'l2'], 'C': [0.1, 0.3, 1.0, 3.0]}]
elif algorithm == 'ridge':
param_grid = [{'alpha': [0.1, 0.3, 1.0, 3.0, 10.0]}]
elif algorithm == 'svm':
param_grid = [{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']}]
elif algorithm == 'sgd':
param_grid = [{'loss': ['hinge', 'log', 'modified_huber'], 'penalty': ['l1', 'l2'],
'alpha': [0.0001, 0.001, 0.01], 'iter': [100, 1000, 10000]}]
elif algorithm == 'forest' or algorithm == 'xt':
param_grid = [{'n_estimators': [10, 30, 100, 300], 'criterion': ['gini', 'entropy', 'mse'],
'max_features': ['auto', 'log2', None], 'max_depth': [3, 5, 7, 9, None],
'min_samples_split': [2, 10, 30, 100], 'min_samples_leaf': [1, 3, 10, 30, 100]}]
elif algorithm == 'boost':
param_grid = [{'learning_rate': [0.1, 0.3, 1.0], 'subsample': [1.0, 0.9, 0.7, 0.5],
'n_estimators': [100, 300, 1000], 'max_features': ['auto', 'log2', None],
'max_depth': [3, 5, 7, 9, None], 'min_samples_split': [2, 10, 30, 100],
'min_samples_leaf': [1, 3, 10, 30, 100]}]
elif algorithm == 'xgb':
param_grid = [{'max_depth': [3, 5, 7, 9, None], 'learning_rate': [0.003, 0.01, 0.03, 0.1, 0.3, 1.0],
'n_estimators': [100, 300, 1000, 3000, 10000], 'min_child_weight': [1, 3, 5, 7, None],
'subsample': [1.0, 0.9, 0.8, 0.7, 0.6, 0.5], 'colsample_bytree': [1.0, 0.9, 0.8, 0.7]}]
elif algorithm == 'nn':
param_grid = [{'layer_size': [64, 128, 256, 384, 512, 1024], 'n_hidden_layers': [1, 2, 3, 4, 5, 6],
'init_method': ['glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform'],
'loss_function': ['mse', 'mae', 'mape', 'msle', 'squared_hinge', 'hinge',
'binary_crossentropy', 'categorical_crossentropy'],
'input_activation': ['sigmoid', 'tanh', 'prelu', 'linear', 'softmax', 'softplus'],
'hidden_activation': ['sigmoid', 'tanh', 'prelu', 'linear', 'softmax', 'softplus'],
'output_activation': ['sigmoid', 'tanh', 'prelu', 'linear', 'softmax', 'softplus'],
'input_dropout': [0, 0.3, 0.5, 0.7], 'hidden_dropout': [0, 0.3, 0.5, 0.7],
'optimization_method': ['sgd', 'adagrad', 'adadelta', 'rmsprop', 'adam'],
'batch_size': [16, 32, 64, 128, 256], 'nb_epoch': [10, 30, 100, 300, 1000]}]
else:
raise Exception('No params defined for ' + algorithm)
return param_grid
|
8a951aa7a8b918424d115e094c3ca4f9d41651d7
| 418,867 |
def mysql_now(element, compiler, **kw):
"""Implement the MySQL now() using the TIMESTAMP datatype."""
return "CURRENT_TIMESTAMP()"
|
5b8e5fd0dbc116770ad603e5a71961ef53af1e1d
| 77,402 |
import json
def extract_json(infile):
"""Returns distance and RSSI measurements as tuple"""
# Load JSON output
with open(infile, "r") as f:
data = json.load(f)
# Extract data
dist = float(data.keys()[0])
adict = data.values()[0]
rssi_list = adict.values()[0]
return (dist,rssi_list)
|
cc5f846e0d876e69c6e88d0fdd88234fd7e9b36c
| 585,914 |
def reader_c_apdu(connection, apdu):
"""
:param connection: connection that active now
:param apdu: apdu string
:return: R-APDU
"""
conn = connection.transmit(apdu)
return conn
|
ecc7be8a753796a46ef54a2604104ebfcc78afc7
| 188,784 |
def merge_two_dicts(dict1, dict2):
"""
Helper function for merging two dictionaries into a
new dictionary as a shallow copy.
:param dict1: (dict) First of two dictonaries to merge
:param dict2: (dict) Second dictionary
:returns: Merged dictionary
:rtype: dict
"""
merged_dict = dict1.copy()
merged_dict.update(dict2)
return merged_dict
|
bf47850c043e75f6155c19993788fa3e017894c4
| 113,984 |
import requests
def get_meetup_events(group):
"""Returns a list of events and their details for a given meetup group."""
url = 'https://api.meetup.com/{group}/events?&sign=true&photo-host=public&page=200&status=past'.format(group=group)
r = requests.get(url)
events = r.json()
return events
|
6bda52c044f35b9d6d645c0ea88c76bd666c0020
| 363,222 |
def avg(lst: list):
"""
This function computes average of the given list. If the length of list is zero, it will return zero.
:param lst: list for which you want to compute average
:return: average of the given list
"""
if len(lst) == 0:
return 0
return sum(lst) / len(lst)
|
f2485e949ed187a66a6264cdf65329efe3b0ba93
| 183,402 |
def regex_search_matches_output(get_output, search):
"""
Applies a regex search func to the current output
"""
return search(get_output())
|
6f054990d3454a447656567b3ceb12a38c2809a5
| 701,132 |
import re
def remove_comments(text: str) -> str:
"""
Removes comments from text. Comment lines are removed completely, including newline.
:param text: Input text
:return: Text without comments
"""
return re.sub('(^[ \t]*#.*\n)|(#.*$)', '', text, flags=re.MULTILINE)
|
59dd517d0b409a634ceddcc326837f01e094fbf1
| 283,650 |
def _count_hours_gt_threshold(utilization, threshold):
"""Calculate number of hours above a given utilization threshold.
:param pandas.DataFrame utilization: normalized power flow data frame as returned
by :func:`get_utilization`.
:param float threshold: utilization threshold ([0,1]).
:return: (*pandas.Series*) -- number of hours above utilization threshold.
"""
return (utilization > threshold).sum()
|
e25947fb494d1b09556e25ca9b3c837197df5170
| 499,178 |
def human_oracle(evidence, possible_answers):
"""Simple text interface to query a human for fact generation."""
colored_fact, colored_segment = evidence.colored_fact_and_text()
print(u'SEGMENT: %s' % colored_segment)
question = ' FACT: {0}? ({1}) '.format(colored_fact,
u'/'.join(possible_answers))
answer = input(question)
while answer not in possible_answers:
answer = input(question)
return answer
|
d9580a5877c607277c4806dbe78a0f3d02b0d961
| 222,018 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.