content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
from typing import Dict
from typing import Any
def merge_dict(dict_1: Dict[Any, Any], **kw: Any) -> Dict[Any, Any]:
"""Merge a dict with some keys and values (or another dict)."""
return {**dict_1, **kw}
|
f5d830344cf24cb3efe78348ecc01b162b7599f0
| 344,690 |
def prediction(n: float, r: float, s: float) -> int:
"""
Returns label of the class with maximum probability
:param n: probability of being Neutral
:param r: probability of being Racist
:param s: probability of being Sexism
:return: label of the class with maximum probability
"""
lst = [n, r, s]
maximum = max(lst)
max_index = lst.index(maximum)
return [0, 1, 2][max_index]
|
6db0364c0d192bf6f38184576ffd43b6909d8fac
| 563,628 |
def old_hindu_lunar_leap(date):
"""Return the leap field of an Old Hindu lunar
date = [year, month, leap, day]."""
return date[2]
|
d11c9efab99552f608fe59761a16ee3a0dbf3246
| 687,122 |
def get_parent_build(build):
"""Returns the parent build for a triggered build."""
parent_buildername = build.properties_as_dict['parent_buildername']
parent_builder = build.builders[parent_buildername]
return parent_builder.builds[build.properties_as_dict['parent_buildnumber']]
|
ef4b1041bfd54b7aa2ac1469e6b58bfcf6acd724
| 19,643 |
def get_context_with_traceback(context):
"""
Returns the very first context that contains traceback information
"""
if context.tb_list:
return context
if context.parent:
return get_context_with_traceback(context.parent)
|
1b3a22ca43dfc78e7c5bb12b59188b16ddf68416
| 170,086 |
def filt_all(list_, func):
"""Like filter but reverse arguments and returns list"""
return [i for i in list_ if func(i)]
|
72010b483cab3ae95d49b55ca6a70b0838b0a34d
| 705,920 |
def interpolate_z(d_target, d_start, d_end, z_start, z_end):
"""Linearly interpolate z = f(d) between two points.
Parameters
----------
d_target : float
z is computed at this d value.
d_start : float
x coordinate of the first point.
d_end : float
x coordinate of the last point.
z_start : float
y coordinate of the first point.
z_end : float
y coordinate of the last point.
Returns
-------
float
Interpolated value of z.
Usage
-----
>>> interpolate_z(1, 0, 2, 0, 2)
1.0
"""
return z_start + (z_end - z_start) * (d_target - d_start) / (d_end - d_start)
|
f9a03eb5b68ddd8a3873c29b9522f9726339e059
| 595,532 |
def FindGreetingsContainerId(service, account_id):
"""Find the greetings container ID.
Args:
service: the Tag Manager service object.
account_id: the ID of the Tag Manager account from which to retrieve the
Greetings container.
Returns:
The dictionary that represents the greetings container if it exists, or None
if it does not.
"""
# Query the Tag Manager API to list all containers for the given account.
container_wrapper = service.accounts().containers().list(
accountId=account_id).execute()
# Find and return the Greetings container if it exists.
for container in container_wrapper['containers']:
if container['name'] == 'CONTAINER NAME':
return container['containerId']
return None
|
06b44b76d3405394a9ac7d8cde921556a11b9a2c
| 273,032 |
def get_ens(sets):
"""
Puts all entities appeared in interpretation sets in one set.
:param sets: [{en1, en2, ...}, {en3, ..}, ...]
:return: set {en1, en2, en3, ...}
"""
ens = set()
for en_set in sets:
for en in en_set:
ens.add(en)
return ens
|
7f5b59d6f5786ee9506bda72a4ee3f8cdf787c33
| 11,387 |
def sanitize_file_name(filename):
""" Replaces unsafe symbols in filenames
Args:
filename (str): file name
"""
filename = filename.replace(' ', '_')
filename = filename.replace("'", "")
filename = filename.replace('"', '')
return filename
|
aa8e6d4ea006c91d83a566d4d937ca4fa6257380
| 275,326 |
import pwd
def to_uid(name):
"""Return an uid, given a user name.
If the name is an integer, make sure it's an existing uid.
If the user name is unknown, raises a ValueError.
"""
if isinstance(name, int):
try:
pwd.getpwuid(name)
return name
except (KeyError, OverflowError):
raise ValueError("%r isn't a valid user id" % name)
if not isinstance(name, str):
raise TypeError(name)
try:
return pwd.getpwnam(name).pw_uid
except KeyError:
raise ValueError("%r isn't a valid user name" % name)
|
12681eb97bbfb78ac3aa0927904ecdf023477172
| 610,660 |
def transfer_turn(board):
"""Sets the turn to the opposite player"""
return -board
|
74fb9f483b5408894b08cb7efda843a3e3915e1e
| 61,964 |
from typing import List
from typing import Union
from typing import Dict
def _result_values_to_dict(treeview_headers: List[str],
result_values: List[Union[int, str, float]]) -> Dict[str, Union[int, str, float]]:
"""
Turn value list into a dict according to the keys based on treeview_headers
Parameters
----------
treeview_headers
headers that are used in tkinter treeview in order to make sure all needed values are present
result_values
result values that were just analysed
Returns
-------
result_dict_per_id
result values ordered to treeview_headers as key
"""
result_dict_per_id = {}
for key, value in zip(treeview_headers, result_values):
result_dict_per_id[key] = value
return result_dict_per_id
|
41e9c3dba5cdcf299a0a00b86861d2deb73ff6d5
| 600,648 |
def parse_refs_json(data):
""" Function to parse the json response from the references collection
in Solr. It returns the results as a list with the annotation and details.
"""
# docs contains annotation, fileName, details, id generated by Solr
docs = data['response']['docs']
# Create a list object for the results with annotation and details. Details is a list of
# a single string, flatten it: remove the string from the list.
results = [[docs[i].get('annotation'), docs[i].get('details')[0]]
for i in range(len(data['response']['docs']))]
#results = [result[0] for result in results]
return results
|
c8dadb7fa8e522788b731d535d18fb5343ddd95a
| 68,267 |
from typing import Dict
def package_extract_properties(package):
"""Extracts properties from the STIX package"""
result: Dict[str, str] = {}
header = package.find_all('STIX_Header')
if len(header) == 0:
return result
# share level
mstructures = header[0].find_all('Marking_Structure')
for ms in mstructures:
type_ = ms.get('xsi:type')
if type_ is result:
continue
color = ms.get('color')
if color is result:
continue
type_ = type_.lower()
if 'tlpmarkingstructuretype' not in type_:
continue
result['share_level'] = color.lower() # https://www.us-cert.gov/tlp
break
# decode title
title = next((c for c in header[0] if c.name == 'Title'), None)
if title is not None:
result['stix_package_title'] = title.text
# decode description
description = next((c for c in header[0] if c.name == 'Description'), None)
if description is not None:
result['stix_package_description'] = description.text
# decode description
sdescription = next((c for c in header[0] if c.name == 'Short_Description'), None)
if sdescription is not None:
result['stix_package_short_description'] = sdescription.text
# decode identity name from information_source
information_source = next((c for c in header[0] if c.name == 'Information_Source'), None)
if information_source is not None:
identity = next((c for c in information_source if c.name == 'Identity'), None)
if identity is not None:
name = next((c for c in identity if c.name == 'Name'))
if name is not None:
result['stix_package_information_source'] = name.text
return result
|
18bb15acfe82fd87d355920444765a20ce8943a5
| 408,190 |
from datetime import datetime
def get_model_name(params):
"""
Return the model name according to the hyperparameters and creation time.
"""
creation_time = datetime.now().strftime('%Y%m%d_%H%M%S')
# Name to be used for the model
model_name = "{}_nc{}_bs{}_ep{}_lr{}_ld{}_df{}_uf{}_bt{}_cr{}_bn{}_ts{}".format(
params['nn_type'],
params['n_classes'],
params['batch_size'],
params['num_epochs'],
params['learning_rate'],
params['lr_decay'],
params['doppler_filtering'],
params['undersampling_filtering'],
params['bound_undersampling_to'],
params['crop_strategy'],
params['batch_normalization'],
creation_time
)
return model_name
|
985aea874332ad43ca35ffc5b493ecaefe3e3291
| 74,905 |
import json
def _read_dca_metadata_file(metadata_path):
"""Loads context aware metadata from the given path.
Args:
metadata_path (str): context aware metadata path.
Returns:
Dict[str, str]: The metadata.
Raises:
ValueError: If failed to parse metadata as JSON.
"""
with open(metadata_path) as f:
metadata = json.load(f)
return metadata
|
2db33d031242c8fbf5750a54a365336a5327f52d
| 182,768 |
def parse_directions(filename):
""" Load the direction string from FILENAME. """
# Rooms = '.', walls = '#', doors = '|' or '-'.
# Current position = 'X'.
# Rooms are size 1.
# Movement is possible only through doors.
# Start = '^', end = '$'.
# Parenthesis containing a final '|', e.g. (NEWS|) mean that the
# branch could be skipped entirely.
directions = ''
with open(filename) as f:
directions = f.readline().rstrip()
return directions
|
376c2bd17fdd114820c685570cf4269e455aaff7
| 609,929 |
def _bound_mean_difference_ci(lower_ci, upper_ci):
"""Bound mean difference and normalized mean difference CI.
Since the plausible range of mean difference and normalized mean
difference is [-1, 1], bound the confidence interval to this range.
"""
lower_ci = lower_ci if lower_ci > -1 else -1
upper_ci = upper_ci if upper_ci < 1 else 1
return lower_ci, upper_ci
|
da0bffa36c6f61f1705416adf236a2afd4a71623
| 525,070 |
def unify_projection(dic):
"""Unifies names of projections.
Some projections are referred using different names like
'Universal Transverse Mercator' and 'Universe Transverse Mercator'.
This function replaces synonyms by a unified name.
Example of common typo in UTM replaced by correct spelling::
>>> unify_projection({'name': ['Universe Transverse Mercator']})
{'name': ['Universal Transverse Mercator']}
:param dic: The dictionary containing information about projection
:return: The dictionary with the new values if needed or a copy of old one
"""
# the lookup variable is a list of list, each list contains all the
# possible name for a projection system
lookup = [['Universal Transverse Mercator',
'Universe Transverse Mercator']]
dic = dict(dic)
for l in lookup:
for n in range(len(dic['name'])):
if dic['name'][n] in l:
dic['name'][n] = l[0]
return dic
|
a3cdf4bb2a26ee0391281e8f8b0d3e7ea5c8a962
| 10,173 |
def cli_billing_list_periods(client):
"""List all available billing periods of the subscription"""
return list(client.list())
|
4ce7b549395e087b78d06e3fc886d1f6651457c1
| 426,199 |
def deep_dictionary_check(dict1: dict, dict2: dict) -> bool:
"""Used to check if all keys and values between two dicts are equal, and recurses if it encounters a nested dict."""
if dict1.keys() != dict2.keys():
return False
for key in dict1:
if isinstance(dict1[key], dict) and not deep_dictionary_check(dict1[key], dict2[key]):
return False
elif dict1[key] != dict2[key]:
return False
return True
|
b5011c2c79c79ecc74953e5f44db5c4a62464c07
| 7,776 |
def check_source_address_prefix(source_address_prefix: str):
"""Check if source address prefix is BatchNodeManagement or default"""
required_prefix = 'BatchNodeManagement'
default_prefix = 'default'
if source_address_prefix.lower() == required_prefix.lower() or \
source_address_prefix.lower() == default_prefix.lower():
return True
return False
|
0ecd11d401f01cc52323a02e88a52553dad339cc
| 380,518 |
def flat_key(*keys):
"""Generate one key from a sequence of identifiers"""
return ':'.join([str(k) for k in keys])
|
19bc120f9e6bb013a4be4186274a4dc758c76d22
| 114,054 |
def extract_person_fullname(person):
"""Extracts the person's fullname or builds it if needed."""
full_name_elt = person.find("FullName")
if full_name_elt is None:
return []
full_name_list = set()
for name in full_name_elt.findall("Text"):
if name.text:
full_name_list.add(name.text)
return full_name_list
return []
|
2bbdacea35d04936e97e9988550752d4a5201efa
| 174,592 |
def _collect_params(param_generator, info):
"""Collects all output of the given coroutine into a single list."""
params = []
while True:
param = param_generator.send(info)
if param is None:
return params
params.append(param)
|
30e1b6bd45eb03a45b8076379d16af5183382b2d
| 200,047 |
import pathlib
import typing
def _get_file_format_id(path: pathlib.Path, file_format: typing.Optional[str]) -> str:
"""Determine the file format for writing based on the arguments."""
formats = {
"yaml": path.name.endswith((".yml", ".yaml")),
"toml": path.name.endswith(".toml"),
"json": path.name.endswith(".json"),
}
finder = (k for k, v in formats.items() if file_format == k or v)
return next(finder, "json")
|
d2fc516ba1a1fae1c7d91e6bac351ca7bead5f04
| 695,521 |
def get_free_symbols(func, symbs_to_ignore=None):
"""
Returns all free elements of a function, by ignoring symbs_to_ignore
:param func: function to check
:param symbs_to_ignore: values to ignore in result
:return: list of all free symbols
"""
if isinstance(func, int) or isinstance(func, float):
return [func]
if not isinstance(symbs_to_ignore, list):
symbs_to_ignore = [symbs_to_ignore]
free_symbs = []
all_free_symbs = func.free_symbols
for el in all_free_symbs:
if el not in symbs_to_ignore:
free_symbs.append(el)
return free_symbs
|
9887a9795c399d1695219307afa4d92e5d7fe55f
| 596,802 |
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
|
2edfcc3f1885a1562f72fdf7d81f8d3e92a39a75
| 279,968 |
def period_contribution(x):
"""
Turns period--1, 2, 3, OT, etc--into # of seconds elapsed in game until start.
:param x: str or int, 1, 2, 3, etc
:return: int, number of seconds elapsed until start of specified period
"""
try:
x = int(x)
return 1200 * (x - 1)
except ValueError:
return 3600 if x == 'OT' else 3900
|
fdb75dc92cd8b159dc6b73b26b38248e6d17f313
| 241,973 |
def optional_column(
dictionary: dict, key: str, column_width: int = 0, alignment: str = "<"
):
"""Adds a value to a column, if the key exists in the dictionary
and adds spaces of the appropriate width if not.
Args:
dictionary (dict): Dictionary with data inside
key (str): Key of the data that is to be checked and added if present
column_width (int): Number of spaces to be returned instead if the key is not present
alignment (str): Specified alignment of column
Returns:
entry (str): Either the value of the entry to be put into the table, column_width number of spaces
"""
if key in dictionary:
entry_string = str(dictionary[key])
if column_width > 0:
entry = f"{entry_string:{alignment}{column_width}}"
elif column_width == 0:
entry = entry_string
else:
raise ValueError("Column width for optional column must be non-negative")
else:
entry = f" " * column_width
return entry
|
6cfe690496ae0f35f1b62b39d4e7674f2f73a13f
| 587,195 |
def get_segment_mid_point(tigl, idx_wing, idx_segment, eta, xsi):
"""
Return a mid point for a segment
Args:
:tigl: Tigl handle
:idx_wing: Wing index
:idx_segment: Segment index
:eta: Relative segment coordinate
:xsi: Relative segment coordinate
Returns:
:mid_point: List with mid coordinate
"""
lower = tigl.wingGetLowerPoint(idx_wing, idx_segment, eta, xsi)
upper = tigl.wingGetUpperPoint(idx_wing, idx_segment, eta, xsi)
mid_point = [(l + u)/2.0 for l, u in zip(lower, upper)]
return mid_point
|
dd4bbf0383fff6cbb5283b15f03201aa35450025
| 447,024 |
import json
def json_object(object):
"""
Method for formatting object request into JSON
if not serialize the object, dont parse it
:param object: representation of request
:return: JSON-ify object
"""
if isinstance(object, dict) or isinstance(object, list):
return json.dumps(object)
return object
|
87db9e0ed4015d922622337996b68bac750b67ba
| 209,622 |
def get_pipe_dict(d, i):
"""
given a dictionary d for a given instrument, return the dictionary for the
ith pipeline version
"""
pipe_versions = list(d.keys())
k = pipe_versions[i]
mode = list(d[k].keys())[0]
return d[k][mode]
|
ad2a95866ed5a95009274a41ae75af526e3c2fc3
| 621,200 |
def add(vec1, vec2):
"""Adds two vectors.
Adds a length-n list to another length-n list.
Args:
vec1 (list): First vector.
vec2 (list): Second vector.
Returns:
Sum of vec1 and vec2.
"""
assert len(vec1) == len(vec2)
return [vec1[i] + vec2[i] for i in range(len(vec1))]
|
4e137cc4714d5c7f626a2ed3b2f294016404867f
| 296,974 |
def build_tac_env_variables(tournament_id: str, version_id: str, seed: int) -> str:
"""
Return a sequence of 'VARIABLE_1=VALUE_1 VARIABLE_2=VALUE_2 ...'.
:param tournament_id: the id of the tournament
:param version_id: the id of the experiment
:param seed: the seed for the random module
:return: a string encapsulating the params
"""
return "DATA_OUTPUT_DIR={} VERSION_ID={} SEED={}".format(
tournament_id, version_id, seed
)
|
6afc2615cf8ff7ff3cac4552e48dad9b1de185f6
| 240,145 |
def transform(res):
""" Transforms dictionary to a list of lists """
flattened = []
for name in sorted(res):
for root in sorted(res[name]):
for iteration in res[name][root]["peaks"]:
for peak in res[name][root]["peaks"][iteration]:
energy, wave, intensity = peak
flattened.append([name, root, iteration, energy, wave, intensity])
return flattened
|
8c45fd15f91102db8d869ca74c604151c36f526d
| 247,331 |
def has_valid_custom_headers(custom_headers: list):
"""
Check if the list of custom header is valid
:param custom_headers: list of CustomHeader to validate
:type custom_headers: list
:return the result
:rtype bool
"""
if custom_headers is None:
return True
for item in custom_headers:
valid = item.isvalid()
if valid is False:
return False
return True
|
8ff6bf5908717a723174be56ad89b7308199480f
| 613,198 |
def Force_Gravity(mass_ship, altitude):
"""Calculates the force of gravity acting on the ship at altitude in meters
Args:
mass_ship (float): The mass of the ship at timestep i.
altitude (float): The altitude of the rocket above Mean Sea level
Returns:
force_gravity (float): Calculated force of gravity at timestep i.
"""
G = 6.674 * 10**-11
MASS_EARTH = 5.972 * 10**24
RADIUS_EARTH = 6.371 * 10**6 #meters
STANDARD_GRAVITY = 9.80665 # m/s^2
if mass_ship < 0:
raise NameError("Mass error")
force_gravity = G * mass_ship * MASS_EARTH / ((RADIUS_EARTH + altitude)**2)
return force_gravity
|
2398742809b51a945bd47f3645327dca77c3f970
| 391,584 |
def get_recording_region_size_in_bytes(
n_machine_time_steps, bytes_per_timestep):
""" Get the size of a recording region in bytes
"""
if n_machine_time_steps is None:
raise Exception(
"Cannot record this parameter without a fixed run time")
return ((n_machine_time_steps * bytes_per_timestep) +
(n_machine_time_steps * 4))
|
a5f1f7a427dd7e6657067ea2316c25ca8dbb27e3
| 453,464 |
def is_provider_redis(provider):
"""
Check if a provider configuration is a redis configuration
"""
return provider.get("type") is not None and provider.get("type") == "redis"
|
d63b9bc2e2ac404cc8e688eed587cfb7b2916c16
| 500,904 |
def get_comments(events, comments=None):
"""
Pick comments and pull-request review comments out of a list of events.
Args:
events: a list of (event_type str, event_body dict, timestamp).
comments_prev: the previous output of this function.
Returns:
comments: a list of dict(author=..., comment=..., timestamp=...),
ordered with the earliest comment first.
"""
if not comments:
comments = {}
else:
comments = {c['id']: c for c in comments}
comments = {} # comment_id : comment
for event, body, _timestamp in events:
action = body.get('action')
if event in ('issue_comment', 'pull_request_review_comment'):
comment_id = body['comment']['id']
if action == 'deleted':
comments.pop(comment_id, None)
else:
c = body['comment']
comments[comment_id] = {
'author': c['user']['login'],
'comment': c['body'],
'timestamp': c['created_at'],
'id': c['id'],
}
return sorted(comments.values(), key=lambda c: c['timestamp'])
|
f3b412cb36463b523fc2d9c67554b856eeb1489e
| 82,043 |
def polynomial(coefficients):
"""
Create a polynomial from the given list of coefficients.
For example, if the coefficients are [1, 0, 3, 5], then
the polynomial returned is
P(t) = t^3 + 3t + 5
If the coefficients are [3, 2, -2, 9, 4, 0, 1, 0], then
the polynomial returned is
P(t) = 3t^7 + 2t^6 - 2t^5 + 9t^4 + 4t^3 + t
"""
def _f(x):
result = 0
for coefficient in coefficients:
result = result * x + coefficient
return result
return _f
|
6b17b1cd32b56c46ea6244e5666330d84fc5d310
| 631,907 |
def order_preserving_timestr_reslice(s):
"""Changes the Python format for asctime/ctime 'Sat Jun 06 16:26:11 1998' to '1998-06-06 16:26:11' so that it always increases over time"""
month_table = "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
s = s.replace(" ", "0")
y, m, d, t = int(s[-4:]), month_table.index(s[4:7]), int(s[8:10]), s[11:19]
return "%04d-%02d-%02d %s" % (y, m, d, t)
|
5a1bda58046fbb43f67b4e51795a41a71f47a20b
| 374,385 |
def to_bool(boolean: bool):
"""
Checks if an argument is of type bool:
- If argument is type bool, simply returns argument
- If argunent is type str, attempts to convert to type bool
- Raises TypeError otherwise
:param boolean: bool, str
argument to be converted to type bool
:return: bool
original argument or converted string-to-bool argument
"""
if isinstance(boolean, bool):
return boolean
if not isinstance(boolean, str):
raise TypeError('value {} is not a bool'.format(boolean))
if boolean.lower() == 'true':
return True
elif boolean.lower() == 'false':
return False
else:
raise TypeError('value {} is not a bool'.format(boolean))
|
da0848703887ae403934e2d16ddd7634d8348d57
| 641,901 |
import base64
def string_to_base64(string):
"""
Encodes a string in base64 (> str)
"""
string_bytes = string.encode('ascii')
base64_encoded = base64.b64encode(string_bytes)
base64string = base64_encoded.decode('ascii')
return base64string
|
d918ad8120331aa62efcc548ff6345cd8c67580b
| 643,934 |
def aminoacidSMILES(amino):
"""
Obtain the SMILES representation of a particular amino acid
Arguments:
amino -- One-letter code of the amino acid
Return:
smiles -- 1D Chemical representation of the amino acid
"""
# Dictionary with the SMILES per amino acid
aminoacids = {'G':{'SMILES': 'NCC(=O)O'},
'A':{'SMILES': 'N[C@@]([H])(C)C(=O)O'},
'R':{'SMILES': 'N[C@@]([H])(CCCNC(=N)N)C(=O)O'},
'N': {'SMILES': 'N[C@@]([H])(CC(=O)N)C(=O)O'},
'D': {'SMILES': 'N[C@@]([H])(CC(=O)O)C(=O)O'},
'C': {'SMILES': 'N[C@@]([H])(CS)C(=O)O'},
'E': {'SMILES': 'N[C@@]([H])(CCC(=O)O)C(=O)O'},
'Q': {'SMILES': 'N[C@@]([H])(CCC(=O)N)C(=O)O'},
'H': {'SMILES': 'N[C@@]([H])(CC1=CN=C-N1)C(=O)O'},
'I': {'SMILES': 'N[C@@]([H])(C(CC)C)C(=O)O'},
'L': {'SMILES': 'N[C@@]([H])(CC(C)C)C(=O)O'},
'K': {'SMILES': 'N[C@@]([H])(CCCCN)C(=O)O'},
'M': {'SMILES': 'N[C@@]([H])(CCSC)C(=O)O'},
'F': {'SMILES': 'N[C@@]([H])(Cc1ccccc1)C(=O)O'},
'P': {'SMILES': 'N1[C@@]([H])(CCC1)C(=O)O'},
'S': {'SMILES': 'N[C@@]([H])(CO)C(=O)O'},
'T': {'SMILES': 'N[C@@]([H])(C(O)C)C(=O)O'},
'W': {'SMILES': 'N[C@@]([H])(CC(=CN2)C1=C2C=CC=C1)C(=O)O'},
'Y': {'SMILES': 'N[C@@]([H])(Cc1ccc(O)cc1)C(=O)O'},
'V': {'SMILES': 'N[C@@]([H])(C(C)C)C(=O)O'}}
# Store the SMILES in a variable
smiles=aminoacids[amino]['SMILES']
return smiles
|
d2c4104ca2ba1a48a52f0846c976a3879654de47
| 589,692 |
def get_grid_data(df):
"""
Prunes dataframe to rows whose longitudes are multiples of 60
degrees and whose years are multiples of 10. This includes only
lat/lon grid locations for which we have USNO data for all eight
types of twilight events.
"""
bools = (df['Longitude'] % 60 == 0) & (df['Year'] % 10 == 0)
return df[bools]
|
4788356a9c0b14759a34c436f2f8267ea07043be
| 697,449 |
def lifetime_high(m):
"""Compute the lifetime of a high mass star (M > 6.6 Msun)
Args:
m (array): stellar mass.
Returns:
array: stellar lifetimes.
"""
return (1.2 * m**(-1.85) + 0.003) * 1000.
|
a7990108f50befd2c73ba4099c4005b827dded94
| 678,907 |
def list_duplicates(cells):
"""Find duplicates in list for detailed reporting
"""
# reference https://stackoverflow.com/questions/9835762
seen = set()
# save add function to avoid repeated lookups
seen_add = seen.add
# list comprehension below chosen for computational efficiency
# adds all new elements to seen and all other to seen_twice
# 'or' allows seen_add(x) evaluation only if x is not in seen
seen_twice = set(x for x in cells if x in seen or seen_add(x))
return list(seen_twice)
|
15ff3405d33d7e9b0a47021f164c5f53a316cbf6
| 629,073 |
import binascii
def hash_str(value: bytes) -> str:
"""
Take a hash as a binary value, and return it represented as a hexadecimal
string.
"""
return binascii.hexlify(value).decode("utf-8")
|
765531f7428db659e07e0f31a7d444b7ee6108b7
| 199,808 |
def int_to_python(self, value):
"""
Convert an 'int' field from solr's xml format to python and return it.
"""
return int(value)
|
025d97ddbb42c43483e5947b42f3a0d513979c0b
| 401,128 |
def value_to_HSL(value):
""" Convert a value ranging from 0 to 100 into a color
ranging from red to green using the HSL colorspace.
Args:
value (int): integer betwee 0-100 to be converted.
Returns:
Tuple[int]: hue, saturation and lightnes corresponding
to the converted value.
"""
# max(value) = 100
hue = int(value * 1.2) # 1.2 = green, 0 = red
saturation = 90
lightness = 40
return (hue, saturation, lightness)
|
1cf9db94cab0bd1580fcd232c4cd09a50fd529b9
| 374,887 |
def apply_label(node):
"""Return label of apply node."""
return node.op.__class__.__name__
|
bd3b20f744440771375678b25269bf24071780ac
| 592,948 |
def get_chart_error(bands, chart_filter_name):
"""
Helper fn for insert_chart_data().
:param bands: list of band dicts, as extracted from chart json.
:param chart_filter_name: chart-style filter name, as "B" or "Ic" (not "I").
:return: error if found, zero otherwise [float]
"""
chart_error = 0 # default if entry for filter not found.
for i in range(len(bands)):
if bands[i]['band'] == chart_filter_name:
chart_error = bands[i]['error'] if bands[i]['error'] is not None else 0
break
return chart_error
|
aecff65b1655b445b5d7c69aa71dec2df8686625
| 578,802 |
def bytes_to_int(b: bytes):
"""
Convert a byte array to an integer.
:param b: the byte array
:return: the integer
"""
neg = False # Have we determined the value to be negative? We'll see.
i=0 # This is the variable in which we'll store the value.
# Now we need to do some tap dancing to handle negative integers...
# We can't directly modify the original bytearray, so let's create a new variable to reference it.
_bytes = b
# If we discover that the highest-order bit in the higest-order byte (which should be at index 0) is flipped on...
if _bytes[0] >= 128:
# ...we need to create a new list of bite-sized integers.
_bytes = list(b)
# Now we flip the higest-order bit off so processing below can continue without worrying about negatives...
_bytes[0] = _bytes[0] & 127
# ...but we'll make note that the final result *is* negative.
neg = True
# Now we need to know how many bytes (or byte-sized ints) we're dealing with.
width = len(_bytes)
# Let's go through them all, starting with the highest-order byte.
for idx in range(0, width):
# Since the higest-order bytes are in the lower indexes, we'll bit shift each value *fewer* positions to the
# left as we move from lower index values to higher index values.
i_at_idx = _bytes[idx] << (width - idx - 1) * 8 # (Shift it 8 bits, or 1 byte.)
# Now we can just add it!
i = i + i_at_idx
# Return the number we have (or its negative twin).
return i if not neg else i * -1
|
db9b1e205def6e3652aa6de0aca0a00c54dad4a2
| 625,370 |
def ts_truncate_time(timestamp):
"""
Set time to zero in a timestamp.
:param ts: Timestamp in seconds.
:type ts: int
:return: Timestamp in seconds, without counting time (ie: DD-MM-YYYY 00:00:00)
:rtype: int
"""
return timestamp - (timestamp % 86400)
|
d5568c07cb13d991b5210e1652195512d9718ef1
| 77,074 |
import torch
def make_A(As, ns, device=torch.device("cpu"), dtype=torch.float32):
"""Create the 3D tensor A as needed by gel_solve, given a list of feature
matrices.
Arguments:
As: list of feature matrices, one per group (size mxn_j).
ns: LongTensor of group sizes.
device: torch device (default cpu).
dtype: torch dtype (default float32).
"""
A = torch.zeros(len(ns), ns.max(), As[0].shape[0], device=device, dtype=dtype)
for j, n_j in enumerate(ns):
# Fill A[j] with A_j'.
A[j, :n_j, :] = As[j].t()
return A
|
238fc4252bd93f64e08f519611582281660e372b
| 583,309 |
def read_var_int(handle):
"""
Read a variable-length integer from the given file handle.
"""
res = 0
shift = 1
while True:
byte = handle.read(1)[0]
res += (byte & 0x7f) * shift
if byte & 0x80: break
shift <<= 7
res += shift
return res
|
380a708925bf1d07fb0a81de073b559aed16eb0c
| 50,892 |
def paranteza_deschisa(paranteza):
"""
Functia returneaza, pentru un tip de paranteza inchisa, acelasi tip
de paranteza dar deschisa.
Astfel: ')' => '(', ']' => '['.
"""
if paranteza == ')':
return '('
if paranteza == ']':
return '['
|
a9f0386602ca97b8afb770c18544a80ffb41f0ed
| 546,599 |
import torch
def init_model(model_path: str):
"""
Loads PyTorch neural net located at <model_path>, and sets the model to
evaluation mode.
:param model_path: Global path to PyTorch model.
:return: Module
"""
model = torch.load(model_path, map_location=torch.device('cpu'))
model.eval()
return model
|
fc3c5d35099dd306c675b323094dc5d5660f045e
| 95,573 |
def get_top_n_kmers(kmer_count, num):
"""Get a list of top_n most frequent kmers."""
return [item[0] for item in sorted(kmer_count.items(), key=lambda x: x[1], reverse=True)[:num]]
|
c42c5722a5d0e578d451336896afaeb1f84a03d8
| 654,483 |
def extract_type(item):
"""Extract item possible types from jsonschema definition.
>>> extract_type({'type': 'string'})
['string']
>>> extract_type(None)
[]
>>> extract_type({})
[]
>>> extract_type({'type': ['string', 'null']})
['string', 'null']
"""
if not item or "type" not in item:
return []
type_ = item["type"]
if not isinstance(type_, list):
type_ = [type_]
return type_
|
9d8cca84845274e66b5381c840a6bff533130179
| 421,981 |
def _is_blank(line):
"""Return true if `line` is blank."""
return len(line.split())==0
|
36ecf817278395e5006e9b1746aeaa9851401c04
| 610,851 |
async def empty_get(*args, **kwargs):
"""
Defines a noop get method
:return:
"""
return {}, {}
|
a4575d6b8d3225cb7510ae976a73ac9c54d084f0
| 387,667 |
import binascii
def sha_to_hex(sha):
"""Takes a string and returns the hex of the sha within"""
hexsha = binascii.hexlify(sha)
assert len(hexsha) == 40, "Incorrect length of sha1 string: %d" % hexsha
return hexsha
|
6c4a5d2e16ed3acc718f36153ce71497da6914f3
| 479,378 |
def _get_binlog(server):
"""Retrieve binary log and binary log position
server[in] Server instance
Returns tuple (binary log, binary log position)
"""
binlog, binlog_pos = '', ''
res = server.exec_query("SHOW MASTER STATUS")
if res != [] and res is not None:
binlog = res[0][0]
binlog_pos = res[0][1]
return binlog, binlog_pos
|
afcd4548deebf466d867b3c1f0e8841955223086
| 481,716 |
def rem_mentions_urls(tokens):
"""
Replaces any mentions with 'at' and any url with 'url'.
"""
final = []
for t in tokens:
if t.startswith('@'):
final.append('at')
elif t.startswith('http'):
final.append('url')
else:
final.append(t)
return final
|
5a2a989c53606f9785fa11fa1f2b354a5c8f581a
| 480,127 |
def __get_page_component_filename_from_page_data(page_data):
"""Return a generated page component filename from json page data."""
return "{}_{}.js".format(page_data["style"], str(page_data["id"]).zfill(2))
|
3b2d4cfca40f689409f520cd9d1fa3c7e524872b
| 591,896 |
import torch
def embed_passage_batch(passages, tokenizer, embedding_model, max_length, device="cuda:0"):
"""
This is a refactoring of the embed_passages_for_retrieval function in the blog code.
:param passages: (N element list) the batch of passages to be embedded. Each element in the list is a string
:param tokenizer: the retribert tokenizer from https://huggingface.co/yjernite/retribert-base-uncased
:param embedding_model: the retribert model from https://huggingface.co/yjernite/retribert-base-uncased
:param max_length: (int) the maximum number of tokens for each embedded passage. The code either truncates or pads to this length
:param device: the device (cpu or cuda:x) that the embeddings will be calculated on
:return: N*max_length dimension numpy array containing the batch embeddings
"""
# the tokenization base class has (I think) been updated since the blog post.
# The blog code throws warnings. This should work.
tokenized_passages = tokenizer(passages,
max_length=max_length,
padding="max_length",
truncation=True,
return_tensors='pt')
# now make the embeddings
# note we are moving this to the gpu
# we don't want to calculate gradients here because we are only doing inference
with torch.no_grad():
embedded_passages = embedding_model.embed_answers(tokenized_passages["input_ids"].to(device),
tokenized_passages["attention_mask"].to(device))
# now change these to numpy and return (because we will save them as a np.memmap)
# note if we hadn't used the torch.no_grad() we would have had to detach the gradients.
return embedded_passages.cpu().numpy()
|
2bb6bedf553243cf4eb5279e573d23336d83b893
| 369,482 |
def separateLeafNameFromLeafAnnotation( leafName , sepSp = "_" , sepAnnot = (".","@") ):
"""
Takes:
- leafName (str) : name of a leaf, potentially containing reconciliation information (exemple: "g_g3.T@4|3@1|g" )
- sepSp (str) [default = "_" ] : separator between species name and gene name
- sepAnnot (tuple) [default = (".","@") ] : possible separators between gene name and annotations
Returns:
(tuple)
(str) : gene name
(str) : reconciliation annotation (empty string if there is none)
"""
spName, j , gNameAndAnnot = leafName.partition( sepSp )
x = 0
AnnotFound = False
while (not AnnotFound) and (x < len(gNameAndAnnot)):
if gNameAndAnnot[x] in sepAnnot:
AnnotFound=True
break
x+=1
#print "->", leafName[:x] , leafName[x:]
return spName + sepSp + gNameAndAnnot[:x] , gNameAndAnnot[x:]
|
7132af91f3493bebf9e6551d011cc8d668d200f7
| 509,683 |
def output_fn(prediction, response_content_type):
"""
Return model's prediction
"""
return {'generated_text':prediction}
|
7be30483787747191b458f46b8d4fc215a0e7de0
| 644,263 |
from functools import reduce
def _unique_from_end(in_str):
"""Return a string with all redundant characters removed,
removing left-most redundant entries
i.e. "ijikik" -> "jik"
Parameters
----------
in_str: str
Returns
-------
str
Examples
--------
>>> _unique_from_end("ijikik")
"jik"
"""
return reduce(lambda acc, x: acc + x if x not in acc else acc, in_str[::-1], "")[
::-1
]
|
df0996f9b523473d59862d2d71f94dd0bec9504a
| 553,923 |
import math
import random
def _get_random_string(size):
"""Get random string of given size."""
if not size:
return ""
chars = "abcdefghijklmnopqrstuvwxyz"
chars *= int(math.ceil(size / len(chars)))
chars = list(chars[:size])
random.shuffle(chars)
return "".join(chars)
|
a83574e2e48bbb8146dbcb41e48b294a6e337c4b
| 518,288 |
def diff(a, b):
"""Returns a new list of the differences between a and b"""
return set(b).difference(set(a))
|
b7e0b379bf9b05f1d246ae288f4a65f1834241f7
| 568,018 |
def add(self, ir="", ia="", ib="", ic="", name="", facta="", factb="",
factc="", **kwargs):
"""Adds (sums) variables.
APDL Command: ADD
Parameters
----------
ir
Arbitrary reference number assigned to the resulting variable (2 to
NV [NUMVAR]). If this number is the same as for a previously
defined variable, the previously defined variable will be
overwritten with this result.
ia, ib, ic
Reference numbers of the three variables to be operated on. If
only two variables, leave IC blank. If only one, leave IB and IC
blank.
name
Thirty-two character name for identifying the variable on the
printout and displays. Embedded blanks are compressed upon output.
facta, factb, factc
Scaling factors (positive or negative) applied to the corresponding
variables (default to 1.0).
Notes
-----
Adds variables (up to three at once) according to the operation:
IR = (FACTA x IA) + (FACTB x IB) + (FACTC x IC)
"""
command = f"ADD,{ir},{ia},{ib},{ic},{name},,,{facta},{factb},{factc}"
return self.run(command, **kwargs)
|
f361780ff0ec64e01fdfdb4425450b68c7706672
| 133,902 |
def make_interpolator(in_min, in_max, out_min, out_max):
"""Return a function that translates from one range to another"""
# Figure out how wide each range is
inSpan = in_max - in_min
outSpan = out_max - out_min
# Compute the scale factor between left and right values
scaleFactor = float(outSpan) / float(inSpan)
return lambda value: out_min + (value - in_min) * scaleFactor
|
2e8835e514992de70976691d53918324a75f8b24
| 413,092 |
import functools
def middleware(f):
"""Function decorator for making WSGI middlewares."""
return functools.update_wrapper(
lambda app: lambda wsgi_env, start_resp: f(app, wsgi_env, start_resp),
f)
|
33576f9bc905e5abc673e442198a19db4b43539f
| 668,241 |
def relroot_for_dirname(val):
"""For a directory, return the relative URL which returns to the
root. "if-archive/games" maps to "../../..".
"""
num = val.count('/')
return '../..' + num * '/..'
|
62ddb1573149687dd09efbd63c5dd0137a20eabb
| 281,348 |
def is_integrally_closed(x):
"""
Return whether ``x`` is integrally closed.
EXAMPLES::
sage: is_integrally_closed(QQ)
True
sage: K.<a> = NumberField(x^2 + 189*x + 394)
sage: R = K.order(2*a)
sage: is_integrally_closed(R)
False
"""
return x.is_integrally_closed()
|
1c070d7e7831750bbc6ee37f9a7e03bc519f3205
| 254,740 |
import logging
def log_aucs(**aucs):
"""Log and tabulate AUCs given as nested dictionaries in the format '{model: {label: auc}}'"""
def dashes(n): return '-' * n
header = "{:<35} {:<20} {:<15}"
row = "{:<35} {:<20} {:<15.10f}"
width = 85
logging.info(dashes(width))
for auc_name, auc_value in aucs.items():
logging.info(header.format('Model', 'Label', auc_name+' AUC'))
for model, model_value in auc_value.items():
for label, auc in model_value.items():
logging.info(row.format(model, label, auc))
logging.info(dashes(width))
|
c53c5ac3f230570a49bef02dab79b79603750f40
| 620,365 |
def number_of_non_blank_cells(row):
"""Count cells that are not None."""
return [(c is not None) for c in row].count(True)
|
fb6ecf0a26969bcc14a3dd4c9f71914e262871ed
| 447,202 |
def replace(image, replacement_image):
"""
Replaces all bands in image with the bands in image2. All properties from image are kept.
"""
return image.select([]).addBands(replacement_image)
|
c524bf62a1d75e09a59cebc4ee06ef7294683692
| 91,342 |
def arr_in_list(gast_list):
"""
returns true if there is a gast node of type arr in list of gast nodes
else returns false
"""
for node in gast_list:
if node["type"] == "arr":
return True
return False
|
fdc75481bd21018fccda185ec5fb2e5e3384ccc1
| 291,481 |
from pathlib import Path
def ds_kwargs(pudl_settings_fixture, request):
"""Return a dictionary of keyword args for creating a PUDL datastore."""
return dict(
gcs_cache_path=request.config.getoption("--gcs-cache-path"),
local_cache_path=Path(pudl_settings_fixture["pudl_in"]) / "data",
sandbox=pudl_settings_fixture["sandbox"],
)
|
42e602a3c3621eafc6f3d2edb582a6c4decaec19
| 90,818 |
def hash_file(f, hash_type, block_size=2**20):
"""
Computes the hash sum for a file
"""
hash_state = hash_type()
while True:
data = f.read(block_size)
if not data:
break
hash_state.update(data)
return hash_state.hexdigest()
|
5bbe73e2135fa45c90b3a237701dd44c80b785ab
| 560,133 |
def select_db(cli, dbname):
"""
Select a database with name
:param cli: Client instance
:param dbname: Database name
:return:A database object
"""
db = cli[dbname]
return db
|
a1149ba33128980cb52ea1e84e45a6eaeaa4aeaa
| 67,103 |
def match_edge(u_var, v_var, u_id, v_id, edge_var, u_label, v_label,
edge_label='edge'):
"""Query for matching an edge.
Parameters
----------
u_var
Name of the variable corresponding to the source of
the edge
v_var
Name of the variable corresponding to the target of
the edge
u_id
Id of the source node
v_id
Id of the target node
edge_var
Name of the variable to use for the matched edge
label
Label of the edge to match, default is 'edge'
"""
query =\
"MATCH ({}:{} {{id: '{}'}})-[{}:{}]->({}:{} {{id: '{}'}})\n".format(
u_var, u_label, u_id, edge_var, edge_label, v_var, v_label, v_id)
return query
|
cc8d0ce16cea7b5cb354887be09ff54761b871fe
| 371,854 |
def _make_snippet_bidi_safe(snippet):
"""Place "directional isolate" characters around text that might be RTL.
U+2068 "FIRST STRONG ISOLATE" tells the receiving client's text renderer
to choose directionality of this segment based on the first strongly
directional character it finds *after* this mark.
U+2069 is POP DIRECTIONAL ISOLATE, which tells the receiving client's text
renderer that this segment has ended, and it should go back to using the
directionality of the parent text segment.
Marking strings from the YouTube API that might contain RTL or
bidirectional text in this way minimizes the possibility of weird text
rendering/ordering in IRC clients' output due to renderers' incorrect
guesses about the directionality or flow of weakly directional or neutral
characters like digits, punctuation, and whitespace.
Weird text wrapping in lines with long opposite-direction phrases that
cross visual line breaks may still occur, and any values that *contain*
both RTL and LTR text might still render funny in other waysβbut that's
really much farther into the weeds than we need to go. This should be
enough of a hint to clients' text rendering that the results won't
*completely* suck.
See https://github.com/sopel-irc/sopel-youtube/issues/30
"""
keys = ['title', 'channelTitle']
for key in keys:
try:
snippet[key] = "\u2068" + snippet[key] + "\u2069"
except KeyError:
# no need to safeguard something that doesn't exist
pass
return snippet
|
71941b6e3c95881ac73ab55ef18871d7a3ca561e
| 93,789 |
import json
def load_from_json(suff=""):
"""loads previously saved data"""
with open("global_scores{}.json".format(suff), "r") as f:
global_scores = json.load(f)
with open("local_scores{}.json".format(suff), "r") as f:
local_scores = json.load(f)
return global_scores, local_scores
|
a764bfa2f4b5ea8d884f1e9496e43d587a4879d2
| 162,335 |
def sub(num1, num2):
"""
Subtract two numbers
"""
return num1 - num2
|
f5e0b2e47302c50f79f1ba8a3d3bc6dffa8054c6
| 674,732 |
def _get_chan_from_name(nm):
"""Extract channel number from filename.
Given a ABI filename, extract the channel number.
Args:
nm (str): Filename / path to ABI file
Returns:
int: channel number
"""
return int(nm.split("/")[-1].split("-")[3][3:5])
|
afff5be3786a99dc30090038d5fa3c9248db6357
| 437,124 |
def _min_dummies(dummies, sym, indices):
"""
Return list of minima of the orbits of indices in group of dummies
see `double_coset_can_rep` for the description of `dummies` and `sym`
indices is the initial list of dummy indices
Examples
========
>>> from sympy.combinatorics.tensor_can import _min_dummies
>>> _min_dummies([list(range(2, 8))], [0], list(range(10)))
[0, 1, 2, 2, 2, 2, 2, 2, 8, 9]
"""
num_types = len(sym)
m = []
for dx in dummies:
if dx:
m.append(min(dx))
else:
m.append(None)
res = indices[:]
for i in range(num_types):
for c, i in enumerate(indices):
for j in range(num_types):
if i in dummies[j]:
res[c] = m[j]
break
return res
|
aa046ddfa9f26a161906e29a3681ff8358f6c8c0
| 569,765 |
def resource_name_for_resource_type(resource_type, row):
"""Return the resource name for the resource type.
Each returned row contains all possible changed fields. This function
returns the resource name of the changed field based on the
resource type. The changed field's parent is also populated but is not used.
Args:
resource_type: the string equivalent of the resource type
row: a single row returned from the service
Returns:
The resource name of the field that changed.
"""
resource_name = '' # default for UNSPECIFIED or UNKNOWN
if resource_type == 'AD_GROUP':
resource_name = row.change_status.ad_group.value
elif resource_type == 'AD_GROUP_AD':
resource_name = row.change_status.ad_group_ad.value
elif resource_type == 'AD_GROUP_CRITERION':
resource_name = row.change_status.ad_group_criterion.value
elif resource_type == 'CAMPAIGN':
resource_name = row.change_status.campaign.value
elif resource_type == 'CAMPAIGN_CRITERION':
resource_name = row.change_status.campaign_criterion.value
return resource_name
|
500bc32be1765f1e516f4f7cd386b24c3c4f373f
| 704,329 |
def clock_emoji(time):
"""
Accepts an hour (in 12 or 24 hour format) and returns the correct clock emoji.
Args:
time: 12 or 24 hour format time (:00 or :30)
Returns:
clock: corresponding clock emoji.
"""
hour_emojis = {
"0": "π",
"1": "π",
"2": "π",
"3": "π",
"4": "π",
"5": "π",
"6": "π",
"7": "π",
"8": "π",
"9": "π",
"10": "π",
"11": "π",
}
half_emojis = {
"0": "π§",
"1": "π",
"2": "π",
"3": "π",
"4": "π",
"5": "π ",
"6": "π‘",
"7": "π’",
"8": "π£",
"9": "π€",
"10": "π₯",
"11": "π¦",
}
# Split up the time to get the hours & minutes sections
time_split = time.split(":")
hour = int(time_split[0])
minutes = time_split[1].split(" ")[0]
# We need to adjust the hour if we use 24 hour-time.
hour = 12 - hour if hour > 11 else hour
clock = half_emojis[str(hour)] if int(minutes) == 30 else hour_emojis[str(hour)]
return clock
|
4ba4864d6fbdced7413e3233cba35c3a8d45a9f7
| 658,491 |
import six
import re
def listsearch(query, item):
"""Return match with query on an item from the list of input/output files
The lists of input and output files can consist either of file names
(strings), or lists containing the filename and the hash of the file (if
hashing is enabled). This function provides a transparent interface to
these two options.
Parameters:
query : str
The search query
item : str or list containing two strings
A file name or a list containing a file name and hash
Returns:
boolean
"""
fh = ''
if not isinstance(item, six.string_types):
fh = item[1]
item = item[0]
return bool(re.search(query, item) or
re.search(query, fh))
|
143debe27f3a206021aa42272da08763a8cae425
| 33,360 |
def get_broker_leader_weights(brokers):
"""Get a list containing the weight from leaders on each broker"""
return [broker.leader_weight for broker in brokers]
|
64df9685168e6e000bdf69ae577291041160967e
| 590,028 |
def get_euler_solution(derivative, numsteps, upper, initial):
"""
Returns an explicit Euler's method solution to a given differential equation,
called `derivative`, of the form dy/dx = f(x, y)
Can operate on numpy vectors
Runs with `numsteps` iterations, from x = 0 to x = `upper`
Uses `initial` as the initial value for y
"""
inputs = [0]
outputs = [initial]
delta = float(upper) / numsteps
for i in range(1, numsteps):
inputs.append(inputs[i-1] + delta)
outputs.append(outputs[i-1] + delta * derivative(inputs[i-1], outputs[i-1]))
return inputs, outputs
|
e00c0411c854f31d71bc552f0361ead876a282f0
| 584,542 |
def return_true(*args, **kwargs):
"""Always return True
Returns:
bool -- True
"""
return True
|
227db51b7c476d4b865be3b9ff58f05c1e0f7e09
| 380,962 |
def _recursive_dict_without_none(a_dict, exclude=None):
"""
Remove all entries with `None` value from a dict, recursively.
Also drops all entries with keys in `exclude` in the top level.
"""
if exclude is None:
exclude = []
result = {}
for (k, v) in a_dict.items():
if v is not None and k not in exclude:
if isinstance(v, dict):
v = _recursive_dict_without_none(v)
elif isinstance(v, list) and v and isinstance(v[0], dict):
v = [_recursive_dict_without_none(element) for element in v]
result[k] = v
return result
|
4cb4bb045d840eeaad786804f8826d212f64d7c9
| 617,558 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.