content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def replace_right(source, target, replacement, replacements=None):
"""
String replace rightmost instance of a string.
Args:
source (string): the source to perform the replacement on
target (string): the string to search for
replacement (string): the replacement string
replacements (int or None): if an integer, only replaces N occurrences
otherwise only one occurrence is replaced
"""
return replacement.join(source.rsplit(target, replacements))
|
636fc9068329f6b6734a2fe1a360adf2285a47fa
| 443,957 |
def is_empty_or_html(line):
"""Return True for HTML line and empty (or whitespace only) line.
line -- string
The Rfam adaptor that retrieves records inlcudes two HTML tags in
the record. These lines need to be ignored in addition to empty lines.
"""
if line.startswith('<pre') or line.startswith('</pre'):
return True
return (not line) or line.isspace()
|
07fd597f43f158e03ac00065640de848f2540253
| 554,075 |
from typing import Dict
def is_balanced(color: Dict) -> bool:
"""takes a dictionary whose keys are 'R', 'G', and 'B'
and whose values are between 0 and 1 as input
and returns True if they add up to 1.0
>>> is_balanced({'R': 0.2, 'G': 0.6, 'B': 0.2})
True
>>> is_balanced({'R': 0.6, 'G': 0.6, 'B': 0.2})
False
>>> is_balanced({'R': 0.1, 'G': 0.6, 'B': 0.3})
True
"""
values = list(color.values())
sum = 0.0
for value in values:
sum += value
if sum == 1.0:
return True
else:
return False
|
350a28a27b37921966af485833a7be46622d2952
| 204,273 |
def edgepoints_from_network(network, attribute=False):
"""
Obtains a list of projected points which are midpoints of edges
Parameters:
network: a network with/without attributes
attribute: boolean
if true, one of return values includes attributes for each edge
Returns:
id2linkpoints: a dictionary that associates a sequential id to a projected, midpoint of each edge
id2attr: a dictionary that associates a sequential id to the attributes of each edge
link2id: a dictionary that associates each edge to its id
"""
link2id, id2linkpoints, id2attr = {}, {}, {}
counter = 0
for n1 in network:
for n2 in network[n1]:
if (n1,n2) not in link2id or (n2,n1) not in link2id:
link2id[(n1,n2)] = counter
link2id[(n2,n1)] = counter
if type(network[n1][n2]) != list:
half_dist = network[n1][n2]/2
else:
half_dist = network[n1][n2][0]/2
if n1[0] < n2[0] or (n1[0] == n2[0] and n1[1] < n2[1]):
id2linkpoints[counter] = (n1,n2,half_dist,half_dist)
else:
id2linkpoints[counter] = (n2,n1,half_dist,half_dist)
if attribute:
id2attr[counter] = network[n1][n2][1:]
counter += 1
return id2linkpoints, id2attr, link2id
|
2a5a40c43b803ed75d48800423123fb7a52ffa60
| 144,843 |
import torch
def mse_loss(x, x_hat):
"""
Returns the MSE between an image and its reconstruction
INPUT:
x: Tensor (B, C, H, W) -> source image
x_hat: Tensor (B, C, H, W) -> reconstruction
OUTPUT:
mse_loss_sum: Tensor (B, 1) -> sum of the MSE for each image pair
"""
mse_loss = torch.nn.MSELoss(reduction='none')
recons_error = mse_loss(x, x_hat)
mse_loss_sum = torch.sum(recons_error, dim=(1, 2, 3))
return mse_loss_sum
|
4eec22e6ae42595ab31dc73a78a73af24bdb6acc
| 402,898 |
def calcSurroundingIdxs(idxs, before, after, idxLimit=None):
"""
Returns (idxs - before), (idxs + after), where elements of
idxs that result in values < 0 or > idxLimit are removed; basically,
this is useful for extracting ranges around certain indices (say,
matches for a substring) that are contained fully in an array.
>>> idxs = [1, 3, 4]
>>> calcSurroundingIdxs(idxs, 0, 0)
([1, 3, 4], [1, 3, 4])
>>> calcSurroundingIdxs(idxs, 1, 0)
([0, 2, 3], [1, 3, 4])
>>> calcSurroundingIdxs(idxs, 1, 1)
([0, 2, 3], [2, 4, 5])
>>> calcSurroundingIdxs(idxs, 1, 1, 4)
([0, 2], [2, 4])
"""
if idxLimit is None:
idxs = [idx for idx in idxs if (idx - before >= 0)]
else:
idxs = [idx for idx in idxs if (idx - before >= 0) and (idx + after <= idxLimit)]
beforeIdxs = [x - before for x in idxs]
afterIdxs = [x + after for x in idxs]
return beforeIdxs, afterIdxs
|
72b064dd8f79762e18d1dd2a9d1ab84628a4e1a8
| 650,130 |
def parse(renpy_nodes, renpy_Define, py_eval_bytecode, renpy_ADVCharacter):
"""
Retrieves all declared characters in renpy_nodes
:param renpy_nodes: all nodes from renpy game
(renpy.game.script.namemap)
:param renpy_Define: the renpy Define class (renpy.ast.Define)
:param py_eval_bytecode: the renpy py_eval_bytecode
(renpy.python.py_eval_bytecode)
:param renpy_ADVCharacter: the renpy ADVCharacter class
(renpy.character.ADVCharacter)
:returns: a dict with all characters
"""
res = {}
for _key, value in renpy_nodes.iteritems():
# get all Define statements
if isinstance(value, renpy_Define):
char = py_eval_bytecode(value.code.bytecode)
# only if it's defining an ADVCharacter
if ( value.store == "store"
and isinstance(char, renpy_ADVCharacter)
and value.varname != "_narrator"
and value.varname != "centered"
and value.varname != "vcentered"):
color = char.who_args["color"] if "color" in char.who_args else None
res[value.varname] = {
"name": char.name,
"color": color
}
return res
|
2c97e91ee37f33310811890e85c481fa82f78539
| 270,200 |
def get_reference_keys(xml_node):
"""reference an xml_node in the catalog: ``catalog[section][line]``"""
section = xml_node.tag.split("}")[-1]
line = "Line %d" % xml_node.sourceline
return section, line
|
766e875a068c344093d9ecf11d08da28c7b497b6
| 64,906 |
def equals(version:str, releases:dict) -> list:
"""
Get a specific release
Parameters
----------
version : str
desired version
releases : dict
dictionary of all releases in one package
Returns
-------
list
desired release content
"""
vx = version.replace("==", "").replace("(", '').replace(")", '').replace(" ", '')
r = []
try:
remote = releases[f'{vx}']
for i in remote:
r.append(i)
r.append(vx)
except KeyError:
return ["Error"]
return r
|
2617995aa6b669140dbf18d9b2b0b52a2d176308
| 15,162 |
from pathlib import Path
def golden_snippet(filename: str) -> str:
"""Load the golden snippet with the name provided"""
snippet_path = Path(__file__).parent / "golden_snippets" / filename
return snippet_path.read_text()
|
f700e5928b48a881db299a139d5a1f616dcc65d6
| 358,608 |
def get_bind_addr(conf, default_port=None):
"""Return the host and port to bind to."""
return (conf.bind_host, conf.bind_port or default_port)
|
e7791a9eafd0b2386f92755a8a8d3dddd70d52ce
| 681,854 |
def parse_command(command):
"""
Parses a command string into a list of arguments.
"""
args = []
arg = ""
in_quote = False
for char in command:
if char == '"':
if in_quote:
args.append(arg)
arg = ""
in_quote = False
else:
in_quote = True
elif char == " " and not in_quote:
args.append(arg)
arg = ""
else:
arg += char
if arg:
args.append(arg)
return args
|
8a01040c6816793b9fda8731d481ae22161df017
| 293,917 |
import six
def _check_input(idadf, target, features, ignore_indexer=True):
"""
Check if the input is valid, i.e. if each column in target and features
exists in idadf.
Parameters
----------
target: str or list of str
A column or list of columns to be used as target
features: str or list of str
A column or list of columns to be used as feature
ignore_indexer: bool, default: True
If True, remove the indexer from the features set, as long as an
indexer is defined in idadf
"""
#import pdb ; pdb.set_trace()
if target is not None:
if isinstance(target, six.string_types):
if target not in idadf.columns:
raise ValueError("Unknown target column %s"%target)
target = [target]
else:
if hasattr(target, '__iter__'):
target = list(target)
for x in target:
if x not in idadf.columns:
raise ValueError("Unknown target column %s"%x)
if features is not None:
if isinstance(features, six.string_types):
if features not in idadf.columns:
raise ValueError("Unknown feature column %s"%features)
features = [features]
else:
if hasattr(features, '__iter__'):
features = list(features)
for x in features:
if x not in idadf.columns:
raise ValueError("Unknown feature column %s"%x)
if target is None:
if len(features) == 1:
raise ValueError("Cannot compute correlation coefficients of only one"+
" column (%s), need at least 2"%features[0])
else:
if target is not None:
if len(target) == 1:
features = [x for x in idadf.columns if x not in target]
else:
features = list(idadf.columns)
else:
features = list(idadf.columns)
## Remove indexer from feature list
# This is useless and expensive to compute with a primary key
if ignore_indexer is True:
if idadf.indexer:
if idadf.indexer in features:
features.remove(idadf.indexer)
# Catch the case where users ask for the correlation between the two same columns
#import pdb ; pdb.set_trace()
if target == features:
if len(target) == 1:
raise ValueError("The correlation value of two same columns is always maximal")
if target is None:
if features is None:
target = list(idadf.columns)
else:
target = features
return target, features
|
1f133839bf0c10396bdcf036db30251d71c7ff3f
| 27,591 |
def status(cert):
"""Return a string with the status of the certificate."""
value = 'Valid'
if cert.is_revoked:
value = 'Revoked'
elif cert.is_expired:
value = 'Expired'
return value
|
b5a22642074de6730f293e9d5b24d630d37606cc
| 386,408 |
def fill_gaps(df, gap_filler_dict):
"""
Given a Pandas dataframe and a dictionary containing the column names
the correct 'fillers' for each column, this function will fill
each column with the correct values when empty cells are found.
Parameters:
pd_df (pd.Dataframe): the variable to which the dataframe
containing the csv data is assigned
gap_filler_dict (dict): a dictionary with column name and value
to fill gaps as key value pairs, e.g.
{"Age":"All","Sex":"T"}
Returns:
pd.Dataframe: A dataframe with all cells full"""
df = df.fillna(gap_filler_dict, axis=1)
return df
|
bc7f6eaac7d9d51231027673ae8408a07bd12ddc
| 252,774 |
import time
def timeit(func):
"""
Simple wrapper to time a function. Prints the execution time after the
method finishes.
"""
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print('Execution Time ({0}): {1:.5f} seconds'.format(
func.__name__, end_time - start_time))
return result
return wrapper
|
37a657ac013739329a84b619153fdfa781181bd8
| 23,251 |
def get_max_length(graphs):
""" Get the max length among sequences. """
max_length = 0
for cascade_id in graphs:
# traverse the graphs for max length sequence
for sequence in graphs[cascade_id]:
max_length = max(max_length, len(sequence[0]))
return max_length
|
2dfa82526a291c08bfef0c8e5edb06b693caa224
| 19,970 |
async def get_events(database):
"""Get events."""
events_query = 'select id, name, year from events order by year, name'
events = await database.fetch_all(events_query)
return [dict(e) for e in events]
|
d4c722ea329ee46ce0f428adc7e4f6fef3d7365f
| 56,929 |
def mock_shutil_which_None(*args, **kwargs):
"""
Mock a call to ``shutil.which()`` that returns None
"""
print("[mock] shutil.which - NotFound")
#if True: raise FileNotFoundError("Generated by MOCK")
return None
|
fcf37b48fad0ddb42f58de664f0c683c06b64857
| 403,429 |
def _dart_web_application_outputs(dump_info, deferred_lib_count):
"""Returns the expected output map for dart_web_application."""
outputs = {
"js": "%{name}.js",
"deps_file": "%{name}.js.deps",
"sourcemap": "%{name}.js.map",
}
if dump_info:
outputs["info_json"] = "%{name}.js.info.json"
for i in range(1, deferred_lib_count + 1):
outputs["part_js%s" % i] = "%%{name}.js_%s.part.js" % i
outputs["part_sourcemap%s" % i] = "%%{name}.js_%s.part.js.map" % i
return outputs
|
f7dc13b2ced07a010baa86b03304acc17817befe
| 594,051 |
from typing import List
from typing import Any
from typing import Union
def trim_results_by_limit(results: List[Any], limit: Union[int, str] = 100) -> List[Any]:
"""Trim list of results so only a limited number is returned.
Args:
results: The list of results.
limit: The upper limit of the results to return. If limit is set to -1 all results will be returned.
Return the trimmed results.
"""
if int(limit) == -1:
return results
return results[:int(limit)]
|
da6178158f7da10a68ffcc878af7851104095860
| 357,031 |
from typing import List
def split_features_text(
features_text: str) -> List[str]:
"""
Args:
features_text:
The FEATURES (i.e. annotation) section of genbank file, for example:
'FEATURES Location/Qualifiers
' source 1..168903
' /organism="Enterobacteria phage T4"
' /mol_type="genomic DNA"
' /db_xref="taxon:10665"
' CDS complement(12..2189)
' /gene="rIIA"
' /locus_tag="T4p001"
' /db_xref="GeneID:1258593"
Returns:
Break it into a list of single feature text, for example
feature_text_1:
' source 1..168903
' /organism="Enterobacteria phage T4"
' /mol_type="genomic DNA"
' /db_xref="taxon:10665"
feature_text_2:
' CDS complement(12..2189)
' /gene="rIIA"
' /locus_tag="T4p001"
' /db_xref="GeneID:1258593"
"""
list_ = []
for line in features_text.splitlines()[1:]: # first line is always 'Location/Qualifiers', so we don't need it.
if line.strip() == '': # skip empty line
continue
if line.startswith(' '*21): # not the first line of a new feature
list_[-1] += line + '\n'
else: # the first line of a new feature, because it doesn't start with 21 spaces.
list_.append(line + '\n')
return list_
|
6100bf12ada0f50e8deabda8b09790306ec68556
| 382,515 |
def quantiles(data, quantiles):
"""
Get the given vars quantiles for the given data.
:param data The dataframe.
:param quantiles List of quantiles to compute.
:return List of quantiles for the provided data.
"""
return [data.quantile(q=quantile) for quantile in quantiles]
|
5d1227374383cc9a528ba55c14cb41ca9efce1d4
| 511,713 |
def split(history: str) -> list:
"""
Splits history by specific keyword and removes leading '/'
:param history: String
:return: [String]
"""
return [his[1:] if his[0:1] == '/' else his for his in history.split('-')]
|
1326d066e4a98de395c9f0e4d7a3ebe893daa39f
| 374,272 |
import warnings
def pull_halo_output(h5file, clusterID, apertureID, dataset):
"""
Function to extract a dataset from a Bahamas snapshot output.
:param h5file: The h5py File object to extract the data from
:param clusterID: The number of cluster in the order imposed by FoF
:param apertureID: int(0-22) The index of the spherical aperture centred on the CoP
:param dataset: The name of the dataset to pull
:return: None if the cluster does not exist in the file, the dataset as np.ndarray if it exists
"""
if f'halo_{clusterID:05d}' not in h5file:
warnings.warn(f"[-] Cluster {clusterID} not found in snap output.")
return None
else:
return h5file[f'halo_{clusterID:05d}/aperture{apertureID:02d}/{dataset}'][...]
|
d389af763c7dc7a3c6e54a1f61f4906c7fa4dc0e
| 15,679 |
def similar_email(anon, obj, field, val):
"""
Generate a random email address using the same domain.
"""
return val if 'betterworks.com' in val else '@'.join([anon.faker.user_name(field=field), val.split('@')[-1]])
|
d98f5e9d75f05efba3fce28c2f2c246129fc72b8
| 154,899 |
def is_discrete(num_records: int, cardinality: int, p=0.15):
"""
Estimate whether a feature is discrete given the number of records
observed and the cardinality (number of unique values)
The default assumption is that features are not discrete.
Parameters
----------
num_records : int
The number of observed records
cardinality : int
Number of unique observed values
Returns
-------
discrete : bool
Whether the feature is discrete
"""
if cardinality >= num_records:
return False
if num_records < 1:
return False
if cardinality < 1:
raise ValueError("Cardinality must be >= 1 for num records >= 1")
discrete = False
density = num_records/(cardinality + 1)
if 1/density <= p:
discrete = True
return discrete
|
44fe901c8301389327572b9795a9a0ea9622d7c0
| 188,522 |
def asy_number(value) -> str:
"""Format an asymptote number"""
return "%.5g" % value
|
47b6c3ec46a76a42673d12c3c8d6a7c7f465b59d
| 386,871 |
def autoSize(image, resolution=1920):
"""Determines the size of an image by setting the largest side to the size of the resolution"""
if image.width >= image.height:
size = (resolution, round((resolution / image.width) * image.height))
else:
size = (round((resolution / image.height) * image.width), resolution)
return size
|
c0559f8454946ec5b23020f656d2a70de046e415
| 624,070 |
from datetime import datetime
import hashlib
def genCode(user):
"""
Generates a unique code to be used with URLS
user : the user to generate the code for
Returns : created code
"""
secret = "{}{}".format(user,datetime.now().strftime("%Y%m%d%H%M%S%f"))
return hashlib.sha512(secret.encode('UTF-8')).hexdigest()
|
ec8d2dad193a52ffd908ffabca69cb99c3a4698e
| 504,883 |
def k_value(rela_count, answer_count, question_count):
"""
k = count(Q&A) / (count(Q) * count(A))
:param rela_count:
:param answer_count:
:param question_count:
:return:
"""
if answer_count == 0:
return 0
return rela_count / (answer_count * question_count)
|
c60703fc7dd51b7346b52955e9abfae373132027
| 371,526 |
def get_keys(dic, key_path=""):
"""get_keys of nested dictionary
>>> a = {"a": 1, "b": {"c": 2}}
>>> get_keys(a)
['/a', '/b/c']
"""
keys_list = []
def get_keys(dic, key_path):
if isinstance(dic, dict):
for i in dic:
get_keys(dic[i], key_path + "/" + str(i))
else:
keys_list.append(key_path)
get_keys(dic, key_path)
return keys_list
|
1dc0bbca91d51dc5074aaaa56466e267a28e1816
| 593,165 |
def capacity_rule(mod, g, p):
"""
The capacity of projects of the *gen_ret_bin* capacity type is a
pre-specified number for each of the project's operational periods
multiplied with 1 minus the binary retirement variable.
"""
return mod.gen_ret_bin_capacity_mw[g, p] \
* (1 - mod.GenRetBin_Retire[g, p])
|
ba4ccad8d620da084912a65a80793f54fb84b374
| 706,271 |
def quote_fname(fname):
"""quote file name or any string to avoid problems with file/directory names
that contain spaces or any other kind of nasty characters
"""
return '"%s"' % (
fname
.replace('\\', '\\\\')
.replace('"', '\"')
.replace('$', '\$')
.replace('`', '\`')
.replace('!', '\!')
)
|
605d9d2c8edc5dc0f71bb7f13151c1e6a6201228
| 409,379 |
def brown(text):
""" Return this text formatted brown (maroon) """
return '\x0305%s\x03' % text
|
36eac341d092c137160803972cce9a762555058e
| 268,204 |
def ranges_overlap(range1, range2):
"""Whether two 2-element lists have an overlap.
Args:
range1, range2: Each a 2-element list with numbers.
Returns:
True if there is overlap, False otherwise.
Raises:
TypeError: An argument is not a list.
ValueError: One of the list doesn’t have exactly 2 elements.
"""
if not isinstance(range1, list) or not isinstance(range2, list):
raise TypeError('Both arguments must be a list.')
if len(range1) != 2 or len(range2) != 2:
raise ValueError('Both lists must have two elements each.')
if max(range1) < min(range2):
return False
if max(range2) < min(range1):
return False
return True
|
31a9467045c53ad394a374e89d10e87110c46f40
| 492,848 |
import requests
def get_user_friends_locations_list(
bearer_token: str, screen_name: str, friends_num: int=50
) -> list:
"""
This function gets user's friends list consisting of name-location pairs.
"""
base_url = 'https://api.twitter.com/'
search_headers = {
'Authorization': f'Bearer {bearer_token}'
}
search_params = {
'screen_name': f'{screen_name}',
'count': friends_num
}
search_url = f'{base_url}1.1/friends/list.json'
response = requests.get(
search_url, headers=search_headers, params=search_params
)
data = response.json()
return [
(user['name'], user['location'])
for user in data['users']
if len(user['location']) != 0
]
|
f91f2692fa93134bdcdd5221e4f9b4aacbcecf61
| 307,530 |
import copy
def sharded_transpose(args, kwargs, pg):
"""
Handles ``__torch_function__`` dispatch for the ``torch.Tensor.transpose`` op.
Returns a new sharded tensor with the given dimensions transposed.
During the transpose, we keep the original shading dim, if the sharding
dim is not neither dim0 nor dim1. Otherwise, we will swap the sharding
dim with the other input of transpose.
Args: (same as ``torch.Tensor.transpose``.)
dim0 (Int): the first dimension to be transposed.
dim1 (Int): the second dimension to be transposed.
Returns:
new_local_shards (List[Shard]): Local shards for the new sharded tensor.
st_meta (ShardedTensorMetadata): Metadata of the new sharded tensor.
"""
def _swap_meta_data(data, idx0, idx1):
"""
Swap the item at idx0 and idx1 in the data list.
"""
data[idx0], data[idx1] = data[idx1], data[idx0]
st = args[0]
dim0 = args[1]
dim1 = args[2]
sharding_spec = copy.deepcopy(st.sharding_spec())
if sharding_spec.dim == dim0:
sharding_spec.dim = dim1
elif sharding_spec.dim == dim1:
sharding_spec.dim = dim0
st_size = list(st.size())
_swap_meta_data(st_size, dim0, dim1)
local_tensor = st.local_tensor().transpose(dim0, dim1).contiguous()
return local_tensor, sharding_spec, tuple(st_size)
|
432287927eaefaea87d06e47a0fbf04c3580a951
| 331,819 |
def _get_write_mode(name: str) -> str:
"""Get the write mode to use with :func:`tarfile.open`."""
if name.endswith('.tar'):
return 'w'
if name.endswith(".tar.gz") or name.endswith(".tgz"):
return "w:gz"
if name.endswith(".tar.bz2"):
return "w:bz2"
if name.endswith(".tar.lzma"):
return "w:xz"
raise ValueError(f"{name} does not end with a valid tarfile extension.")
|
427590f8c3d4ff6a61cbf2df7c735ede5338d678
| 160,657 |
import textwrap
def construct_using_clause(metarels, join_hint, index_hint):
"""
Create a Cypher query clause that gives the planner hints to speed up the query
Parameters
----------
metarels : a metarels or MetaPath object
the metapath to create the clause for
join_hint : 'midpoint', bool, or int
whether to add a join hint to tell neo4j to traverse form both ends of
the path and join at a specific index. `'midpoint'` or `True` specifies
joining at the middle node in the path (rounded down if an even number
of nodes). `False` specifies not to add a join hint. An int specifies
the node to join on.
index_hint : bool
whether to add index hints which specifies the properties of the source
and target nodes to use for lookup. Enabling both `index_hint` and
`join_hint` can cause the query to fail.
"""
using_query = ""
# Specify index hint for node lookup
if index_hint:
using_query = (
"\n"
+ textwrap.dedent(
"""\
USING INDEX n0:{source_label}({property})
USING INDEX n{length}:{target_label}({property})
"""
)
.rstrip()
.format(
property=property,
source_label=metarels[0][0],
target_label=metarels[-1][1],
length=len(metarels),
)
)
# Specify join hint with node to join on
if join_hint is not False:
if join_hint is True or join_hint == "midpoint":
join_hint = len(metarels) // 2
join_hint = int(join_hint)
assert join_hint >= 0
assert join_hint <= len(metarels)
using_query += f"\nUSING JOIN ON n{join_hint}"
return using_query
|
61c4dc58782aeb1bc31affb7ec2c74361eac8089
| 27,889 |
def has_duplicates(array):
"""Use a dictionary to write a faster, simpler version of has_duplicates"""
d = dict()
for elem in array:
if elem in d:
return True
d.setdefault(elem, 1)
return False
|
213964cef26c835a20604f788559ffb8dc28713c
| 66,942 |
import importlib
def get_suite_amount_of_workers(workspace, project, suite):
"""Get the amount of workers defined in a suite.
Default is 1 if suite does not have workers defined"""
amount = 1
suite_module = importlib.import_module('projects.{0}.suites.{1}'.format(project, suite),
package=None)
if hasattr(suite_module, 'workers'):
amount = suite_module.workers
return amount
|
7126ba3b1cfe3ab39f100fbdc8147009f50e7440
| 67,922 |
def make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
@param size : (int) total size of the data
@param batch_size : (int) batch size.
@return batch_slices: (list) contains (begin_idx, end_idx) for every batch.
"""
num_batches = (size-1)//batch_size + 1
return [(i*batch_size, min(size, (i+1)*batch_size)) for i in range(num_batches)]
|
b2cfd9b3b1982d0b01d61e55286421197b5a6a2b
| 475,342 |
def make_param_name_multiple_index(param_parts):
"""
Make the key name from param parts.
For example, ("param", "tag", "2", "1") -> ("param2", "1").
"""
return (param_parts[0] + param_parts[-2], param_parts[-1])
|
cc3cbad59bc89273bc35ba8811f1f5d202bc8c77
| 44,619 |
def to(*args, **kwargs):
"""
Returns a closure that applies x.to(*args, **kwargs) to x
"""
def apply_to(x):
return x.to(*args, **kwargs)
return apply_to
|
f9a52a0cba1513f743190330c46d881d13efa98d
| 579,213 |
def calc_hilo(min_val, max_val, df, cols_to_test):
""" Return lowest and highest values from min_val and max_val if present, or calculate from df. """
# Calculate (or blindly accept) the range of the y-axis, which must be the same for all four axes.
if (max_val is None) and (len(df.index) > 0):
highest_possible_score = max([max(df[col]) for col in cols_to_test])
else:
highest_possible_score = max_val
if (min_val is None) and (len(df.index) > 0):
lowest_possible_score = min([min(df[col]) for col in cols_to_test])
else:
lowest_possible_score = min_val
return lowest_possible_score, highest_possible_score
|
49f0bc0ed1080ed0c59828fcdf1263554f32dc5e
| 40,737 |
def anchor_inside_flags(flat_anchors,
valid_flags,
img_shape,
allowed_border=0):
"""Check whether the anchors are inside the border.
Args:
flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).
valid_flags (torch.Tensor): An existing valid flags of anchors.
img_shape (tuple(int)): Shape of current image.
allowed_border (int, optional): The border to allow the valid anchor.
Defaults to 0.
Returns:
torch.Tensor: Flags indicating whether the anchors are inside a
valid range.
"""
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
inside_flags = valid_flags & \
(flat_anchors[:, 0] >= -allowed_border) & \
(flat_anchors[:, 1] >= -allowed_border) & \
(flat_anchors[:, 2] < img_w + allowed_border) & \
(flat_anchors[:, 3] < img_h + allowed_border)
else:
inside_flags = valid_flags
return inside_flags
|
2fcc56484510e1140b8cb72835d1ea6c83736d35
| 488,608 |
import re
def split_string(instr, item, split):
"""
Split instr as a list of items separated by splits.
@param instr: string to be split
@param item: regex for item to be split out
@param split: regex for splitter
@return: list of strings
"""
if not instr:
return []
return [h.strip() for h in re.findall(
r'%s(?=%s|\s*$)' % (item, split), instr
)]
|
848de5c1ff9d953ba4881127f61fe9a133972344
| 332,161 |
def run_test(session, m, data, batch_size, num_steps, reader):
"""Runs the model on the given data."""
costs = 0.0
iters = 0
state = session.run(m.initial_state)
for step, (x, y) in enumerate(reader.dataset_iterator(data, batch_size, num_steps)):
cost, state = session.run([m.cost, m.final_state], {
m.input_data: x,
m.targets: y,
m.initial_state: state
})
costs += cost
iters += 1
return costs / iters
|
d430aadb12fca35cc674358c514a94da01298893
| 528,491 |
def vector_for_keys(keyed_vectors, keys: list[str]):
"""Return the sum of vectors in keyed_vectors for the given keys"""
if not keys:
raise ValueError("keys cannot be empty")
return keyed_vectors[keys].sum(axis=0)
|
8cdd638cf2b903985aac75ace124dffcbf7f249e
| 491,719 |
def ipv6_hdr_len(ipv6):
"""Calculate length of headers before IPv6 Fragment header.
Args:
ipv6 (dpkt.ip6.IP6): DPKT IPv6 packet.
Returns:
int: Length of headers before IPv6 Fragment header
:class:`dpkt.ip6.IP6FragmentHeader` (:rfc:`2460#section-4.5`).
As specified in :rfc:`2460#section-4.1`, such headers (before the IPv6 Fragment Header)
includes Hop-by-Hop Options header :class:`dpkt.ip6.IP6HopOptsHeader` (:rfc:`2460#section-4.3`),
Destination Options header :class:`dpkt.ip6.IP6DstOptHeader` (:rfc:`2460#section-4.6`) and
Routing header :class:`dpkt.ip6.IP6RoutingHeader` (:rfc:`2460#section-4.4`).
"""
hdr_len = ipv6.__hdr_len__
# IP6HopOptsHeader / IP6DstOptHeader / IP6RoutingHeader
for code in (0, 60, 43):
ext_hdr = ipv6.extension_hdrs.get(code)
if ext_hdr is not None:
hdr_len += ext_hdr.length
return hdr_len
|
746a720c0b6c5caa3749be09fb4d99566f7cdb8e
| 284,080 |
def get_user_and_conv(conversations, event):
"""gets user and conversation from a hangups event"""
conv = conversations.get(event.conversation_id)
user = conv.get_user(event.user_id)
return user, conv
|
fe27723f9047f4c595070025214173907d15a6b1
| 419,817 |
def sort_dict_desc(dict_of_dfs: dict, column_name: str) -> dict:
"""Function used to sort dictionary containing pandas dataframes in descending order based on supplied column name"""
sum_of_requested_columns = dict()
for _ in dict_of_dfs.keys():
sum_of_requested_columns[_] = dict_of_dfs[_][column_name].sum()
sorted_dict_of_dfs = dict(sorted(sum_of_requested_columns.items(), key=lambda item: item[1], reverse=True))
return sorted_dict_of_dfs
|
271b761e86f51ae59a19b8773f43718dfbdc80fa
| 250,497 |
def determine_color(number):
"""
In number ranges from 1 to 10 and 19 to 28, odd numbers are red and even are black.
In ranges from 11 to 18 and 29 to 36, odd numbers are black and even are red.
"""
if number >= 1 and number <= 10:
return "black" if number % 2 == 0 else "red"
elif number >= 19 and number <= 28:
return "black" if number % 2 == 0 else "red"
else:
return "red" if number % 2 == 0 else "black"
raise "not sure how it got here"
|
ab27cc7f40c497d5710645d393f909f176bbde28
| 627,860 |
def get_author_name(soup):
"""Get the author's name from its main page.
Args:
soup (bs4.element.Tag): connection to the author page.
Returns:
string: name of the author.
Examples::
>>> from scrapereads import connect
>>> url = 'https://www.goodreads.com/author/show/1077326'
>>> soup = connect(url)
>>> get_author_name(soup)
J.K. Rowling
"""
author_h1 = soup.find('h1', attrs={'class': 'authorName'})
return author_h1.find('span').text
|
bce8e3399ebf6d8179459bcb7f45184d7b90313d
| 506,577 |
def isHellaTemp(filename):
""" Determine whether or not the specified file is a 'hellanzb-tmp-' file """
return filename.find('hellanzb-tmp-') == 0
|
8c9276a169f72981f128e42902e19191969f14c3
| 449,700 |
import threading
def _MakeParallelBenchmark(p, work_func, *args):
"""Create and return a benchmark that runs work_func p times in parallel."""
def Benchmark(b): # pylint: disable=missing-docstring
e = threading.Event()
def Target():
e.wait()
for _ in range(b.N / p):
work_func(*args)
threads = []
for _ in range(p):
t = threading.Thread(target=Target)
t.start()
threads.append(t)
b.ResetTimer()
e.set()
for t in threads:
t.join()
return Benchmark
|
bf8afe146351a8f91ecb855f81cecc8f00ab0e43
| 193,143 |
def _get_conditional_probs_from_survival(surv):
"""
Return conditional failure probabilities (for each time interval) from survival curve.
P(T < t+1 | T > t): probability of failure up to time t+1 conditional on individual
survival up to time t.
Args:
surv (pd.DataFrame): dataframe of survival estimates, as .predict() methods return
Returns:
pd.DataFrame: conditional failurer probability of event
specifically at time bucket
"""
conditional_preds = 1 - (surv / surv.shift(1, axis=1).fillna(1))
conditional_preds = conditional_preds.fillna(0)
return conditional_preds
|
9f8067453dd9664e87a39724e2a373ed47adfcf9
| 603,722 |
def _parse_text_with_command(text: str):
""" Parse string that is passed when bot command is invoked. """
if after_bot_name_text := text.split('</at>')[-1].rstrip().strip():
return after_bot_name_text.split()
else:
return '', []
|
03559b81e696064fee90c8818cb2825a93cbe719
| 34,057 |
def percent_calc(totalX, totalY):
"""
Takes 2 integers, uses totalX to get 1% and totalY to get final percentage
Returns the final percentage as an int or 0 if not divisible e.g. if X is 0
"""
try:
percent = int((100 / totalX) * totalY)
except ZeroDivisionError:
percent = 0
return percent
|
25c44ebd7461db49433dac7dbf79f48178d31ace
| 405,119 |
import json
def parse_as_json(data):
"""Attempt to parse data as json."""
return json.loads(data)
|
3343e96a35d54f0b59ad6afefe8b40a0c1af2170
| 136,001 |
def _node_not_implemented(node_name, cls):
"""Return a function that raises a NotImplementedError with a passed node
name.
"""
def f(self, *args, **kwargs):
raise NotImplementedError("{0!r} nodes are not "
"implemented".format(node_name))
return f
|
2785a102dcfbea919d02360564206584e68896ba
| 490,590 |
import uuid
def get_new_id(data_dir):
"""
Produce a new unique ID for a receipt, amond the receipts in the given
folder.
"""
# A UUID is always unique
return str(uuid.uuid4())
|
9d17aa474f04990cb06aa14f41061c45ffcd8698
| 490,007 |
def get_common_elements(element_list):
"""
:param element_list: list of list where each internal list contains values
:return: a sorted list of elements which are common in all the internal lists
"""
common_element_list = set(element_list[0])
index = 1
while index < len(element_list):
common_element_list = common_element_list.intersection(element_list[index])
index += 1
return sorted(list(common_element_list))
|
fa3233bb2945949837fd70db4d75f5803100e3ee
| 50,396 |
import ipaddress
def is_address(entry):
""" Check if entry is a valid IP address """
try:
_ = ipaddress.ip_address(entry)
except ValueError:
return False
return True
|
08e9cc24e7319d03a7a5fc9b9768db4497f76039
| 679,862 |
import json
def kernel_for(notebook):
"""Parses the kernel metadata and returns the kernel display name.
Args:
notebook (Path): The path to the notebook for which to get the kernel.
Returns:
str: The kernel display name, if it exists.
"""
with open(notebook, "r") as f:
nb = json.load(f)
md = nb.get("metadata")
if md:
ks = md.get("kernelspec")
if ks:
return ks["display_name"]
return None
|
6d53b4423dde8ee08296972fccbcc87cf8d40d6b
| 294,796 |
def abbrev_prompt(msg: str, *options: str) -> str:
"""
Prompt the user to input one of several options, which can be entered as
either a whole word or the first letter of a word. All input is handled
case-insensitively. Returns the complete word corresponding to the input,
lowercased.
For example, ``abbrev_prompt("Delete assets?", "yes", "no", "list")``
prompts the user with the message ``Delete assets? ([y]es/[n]o/[l]ist): ``
and accepts as input ``y`, ``yes``, ``n``, ``no``, ``l``, and ``list``.
"""
options_map = {}
optstrs = []
for opt in options:
opt = opt.lower()
if opt in options_map:
raise ValueError(f"Repeated option: {opt}")
elif opt[0] in options_map:
raise ValueError(f"Repeated abbreviated option: {opt[0]}")
options_map[opt] = opt
options_map[opt[0]] = opt
optstrs.append(f"[{opt[0]}]{opt[1:]}")
msg += " (" + "/".join(optstrs) + "): "
while True:
answer = input(msg).lower()
if answer in options_map:
return options_map[answer]
|
bf3702f9254a8fe1f5879ecff987a9fb3d9ce81c
| 61,394 |
def neighbors(i, diag = False,inc_self=False):
"""
determine the neighbors, returns a set with neighboring tuples {(0,1)}
if inc_self: returns self in results
if diag: return diagonal moves as well
"""
r = [1,0,-1]
c = [1,-1,0]
if diag:
if inc_self:
return {(i[0]+dr, i[1]+dc) for dr in r for dc in c}
else:
return {(i[0]+dr, i[1]+dc) for dr in r for dc in c if not (dr == 0 and dc == 0)}
else:
res = {(i[0],i[1]+1), (i[0],i[1]-1),(i[0]+1,i[1]),(i[0]-1,i[1])}
if inc_self: res.add(i)
return res
|
24b55c3b092c5634ad6491f0111dc088e88d43fb
| 422,870 |
def secant(f, a, b, n):
"""Approximate solution of f(x)=0 on interval [a,b] by the secant method.
Parameters
----------
f : function
The function for which we are trying to approximate a solution f(x)=0.
a,b : numbers
The interval in which to search for a solution. The function returns
None if f(a)*f(b) >= 0 since a solution is not guaranteed.
n : (positive) integer
The number of iterations to implement.
Returns
-------
m_N : number
The x intercept of the secant line on the the Nth interval
m_n = a_n - f(a_n)*(b_n - a_n)/(f(b_n) - f(a_n))
The initial interval [a_0,b_0] is given by [a,b]. If f(m_n) == 0
for some intercept m_n then the function returns this solution.
If all signs of values f(a_n), f(b_n) and f(m_n) are the same at any
iterations, the secant method fails and return None.
Examples
--------
>>> f = lambda x: x**2 - x - 1
>>> secant(f,1,2,5)
1.6180257510729614
"""
if f(a) * f(b) >= 0:
print("Secant method fails.")
return None
a_n = a
b_n = b
for n in range(1, n + 1):
m_n = a_n - f(a_n) * (b_n - a_n) / (f(b_n) - f(a_n))
f_m_n = f(m_n)
if f(a_n) * f_m_n < 0:
a_n = a_n
b_n = m_n
elif f(b_n) * f_m_n < 0:
a_n = m_n
b_n = b_n
elif f_m_n == 0:
# print("Found exact solution.")
return m_n
else:
# print("Secant method fails.")
return None
return a_n - f(a_n) * (b_n - a_n) / (f(b_n) - f(a_n))
|
7f23c4efb0efc38319b87f747f69b3e873d2b5a4
| 373,786 |
import encodings
import base64
import hmac
import hashlib
def hmac_msg(prf_key_hash, feature):
"""
Using hmac to produce the pseudonyms for the feature vector.
:param prf_key_hash: hash key as bytes
:param feature: feature name as a string (encoded using 'UTF-8')
:return: hmac value
"""
message = bytes(feature, encodings.utf_8.getregentry().name)
sig = base64.b64encode(hmac.new(prf_key_hash, message, hashlib.sha256).digest())
return sig.decode()
|
7299335c7bcb0cb4bfe0beac8bfb930ab172891c
| 351,878 |
import re
def inputValid(userInput):
"""Verifies user input as valid. Accepts strings. Returns True for valid input, False for invalid."""
regex = "\d+[d,D]+\d+[+,-]\d+"
if re.search(regex, userInput):
return True
else:
return False
|
8278fc30a25c26e2e7816a4a729210d0005c4953
| 593,798 |
def pixelNeighborhood(point, image, sigma):
"""
Takes in a point, an image, and sigma
Calculates the width from sigma and creates a pixel "neighborhood"
with the point as the center
Returns the neighborhood
"""
width = int(8*sigma)//2
x,y = point
neighborhood = image[x-width:x+width+1, y-width:y+width+1]
return neighborhood
|
67e51f031e3a51dcea0156bab03805ddf27834cb
| 232,782 |
from typing import Union
from pathlib import Path
def ensure_existing_dir(path: Union[str, Path]):
"""Ensure provided path exists and is a directory.
Args:
path: path to check
Returns:
Path: Path object.
NoneType: If path does not exists or is not a directory
"""
path = Path(path)
if not path.exists():
return None
if not path.is_dir():
return None
return path
|
2e4fdbf0cce27e50839b70d78156eb1e5d0a76f0
| 97,912 |
def StartTranscoding(*, session, pipeline_id, video_path, outputs, output_key_prefix):
"""Start transcoding a video pointed to by keyname
:param session: The session to user for credentials
:type session: boto3.session.Session
:param pipeline_id: Name of the pipeline into which this job will be pushed
:type pipeline_id: str
:param video_path: Path to the video file (relative to the input bucket)
:type video_path: str
:param outputs: The outputs to which the video file should be transcoded
:type outputs: list
:param output_key_prefix: The prefix for output files
:type output_key_prefix: str
:return: Info about the newly created job
:rtype: dict
"""
etconn = session.connect_to("elastictranscoder")
Jobs = session.get_collection("elastictranscoder", "JobCollection")
jobs = Jobs(connection=etconn)
job = jobs.create(
pipeline_id=pipeline_id,
input={"Key": video_path},
output_key_prefix="{0}/".format(output_key_prefix),
outputs=outputs)
# Return info for the job so that we can get status etc...
return job.get()
|
a0bbee55ec46d0b0ec6e0c77bf72b64cc55c6360
| 159,522 |
def get_neighbors(point):
"""Given a 2D point (represented as a Point object), returns a list of the
four points that neighbor it in the four coordinate directions. Uses the
"copy" method to avoid modifying the original point."""
neighbor_points = []
up = point.copy()
up.setY(up.getY()+1)
neighbor_points.append(up)
down = point.copy()
down.setY(down.getY()-1)
neighbor_points.append(down)
left = point.copy()
left.setX(left.getX()-1)
neighbor_points.append(left)
right = point.copy()
right.setX(right.getX()+1)
neighbor_points.append(right)
return neighbor_points
|
eac40848e3148777df55ad7dfa3b42c2f2a35eb0
| 564,827 |
def is_tepid(G):
"""
Determine if G is tepid.
A Game is tepid if it is numberish, but not a number; that is, it differs
from a number by an infinitesimal amount.
Parameters
----------
G : Game
The Game of interest.
Returns
-------
tepid : bool
Whether the Game is tepid or not.
"""
return G.is_numberish and not G.is_number
|
44c365d205a57e4acf0d802c18815fbe9ee05d0b
| 342,253 |
import copy
def dict_merge(dict1, dict2):
"""
recursive update (not in-place).
dict2 has precendence for equal keys.
"""
dict1 = copy.deepcopy(dict1)
for key in dict2:
val = dict2[key]
if type(val) is dict:
# merge dictionaries
if key in dict1 and type(dict1[key]) is dict:
dict1[key] = dict_merge(dict1[key], val)
else:
dict1[key] = val
else:
dict1[key] = val
return dict1
|
ab43b0093cfffad60e53d9c2f61a562b011ac5e0
| 509,174 |
import re
def extract_variables(sFormula):
""" Extract variables in expression, e.g. {a}*x + {b} -> ['a','b']
The variables are replaced with p[0],..,p[n] in order of appearance
"""
regex = r"\{(.*?)\}"
matches = re.finditer(regex, sFormula, re.DOTALL)
formula_eval=sFormula
variables=[]
ivar=0
for i, match in enumerate(matches):
for groupNum in range(0, len(match.groups())):
var = match.group(1)
if var not in variables:
variables.append(var)
formula_eval = formula_eval.replace('{'+match.group(1)+'}','p[{:d}]'.format(ivar))
ivar+=1
return variables, formula_eval
|
7ae5b836504876c815b15c87bad774334fd4dd80
| 17,493 |
def reverse_string(input):
"""
Return reversed input string
Examples:
reverse_string("abc") returns "cba"
Args:
input(str): string to be reversed
Returns:
a string that us reversed of input
"""
if len(input) == 0:
return ""
else:
first_char = input[0]
the_rest = slice(1, None)
sub_string = input[the_rest]
reversed_substring = reverse_string(sub_string)
return reversed_substring + first_char
|
6fdae5469c4c2af268b0ea8cdb5875b078d28e0a
| 418,790 |
def centerel(elsize, contsize):
"""Centers an element of the given size in the container of the given size.
Returns the coordinates of the top-left corner of the element relative to
the container."""
w, h = elsize
W, H = contsize
x = (W-w)//2
y = (H-h)//2
return (x, y)
|
72aa035924ea2fe89d76073607f3156b75885410
| 83,629 |
import string
def generate_key_table(validated_key):
"""Description: returns a 5x5 array containing characters according to the
playfair cipher. In particular, J is replaced with L, and each of the
characters in the 5x5 array is unique.
Arguments:
validated_key (string): takes a valid key as input.
Returns:
key_square (array): 5x5 array containing characters according to the
playfair cipher.
"""
key_square = [[], [], [], [], []]
outer_index, count = 0, 0
# updating key_square with validated_key
for i in validated_key:
if count < 5:
key_square[outer_index].append(i)
else:
count = 0
outer_index += 1
key_square[outer_index].append(i)
count += 1
# filling rest of key_square
ascii_index = 0
for i in range(26):
if ((string.ascii_lowercase[ascii_index] in validated_key) or (ascii_index == 9)):
ascii_index += 1
continue
elif count < 5:
key_square[outer_index].append(string.ascii_lowercase[ascii_index])
else:
count = 0
outer_index += 1
key_square[outer_index].append(string.ascii_lowercase[ascii_index])
ascii_index += 1
count += 1
return key_square
|
41662b27c8e246dbba2bfddb07b46d8f263b853f
| 78,804 |
def get_output(i):
"""Составляет эталонный набор значений, в котором все нули, а i-й элемент — 1
:param i: индекс выхода, который будет установлен в 1
:type i: int
:return: эталонный набор значений
:rtype: list[float]
"""
result = []
for j in range(10):
result.append(0)
result[i] = 1
return result
|
18b5e6a003abb3cf6cc19f6b81fa2d3bb733237b
| 257,179 |
def percent_unique_ngrams_in_train(train_ngrams_dict, gen_ngrams_dict):
"""Compute the percent of ngrams generated by the model that are
present in the training text and are unique."""
# *Total* number of n-grams produced by the generator.
total_ngrams_produced = 0
for _, value in gen_ngrams_dict.iteritems():
total_ngrams_produced += value
# The unique ngrams in the training set.
unique_ngrams_in_train = 0.
for key, _ in gen_ngrams_dict.iteritems():
if key in train_ngrams_dict:
unique_ngrams_in_train += 1
return float(unique_ngrams_in_train) / float(total_ngrams_produced)
|
5f07e359c186d13545aebdb573c36e5d5556faea
| 592,554 |
def build_nested_schema_dict(schema_dict):
"""Accepts a dictionary in form of {SchemaRef(): schema} and returns
dictionary in form of {schema_name: {schema_version: schema}}
"""
result = {}
for schema_ref, schema in schema_dict.items():
result.setdefault(schema_ref.name, {})
result[schema_ref.name][schema_ref.version] = schema
return result
|
5e00c03e69053ed72f2ab44dae0ced3bdaa8759d
| 399,477 |
def int2ascii(i: int) -> str:
"""Convert an integer to an ASCII character.
Args:
i (int): Integer value to be converted to ASCII text.
Note:
The passed integer value must be <= 127.
Raises:
ValueError: If the passed integer is > 127.
Returns:
str: The ASCII character associated to the passed integer.
"""
if i > 127:
raise ValueError('The passed integer value must be <= 127.')
return chr(i)
|
f46ed05d425f9277ea6c97a0f8bafb070b15091c
| 28,958 |
import math
def nu_e(n_n, n_e, T_e):
"""approximate calculation of electron collision frequency from Kelly 89
Parameters
----------
n_n : (float)
neutral density cm-3
n_e : (float)
electron density cm-3
T_e : (float)
electron temperature K
"""
nu_e_n = 5.4 * 10**(-10) * n_n * T_e**(1/2)
nu_e_i = (34 + 4.18 * math.log(T_e**3 / n_e)) * n_e * T_e**(-3/2)
return nu_e_n + nu_e_i
|
3c73538dd97a4f03d0a98d8fe4427697089234e8
| 18,740 |
def get_map_annotation(conn, map_ann_id, across_groups=True):
"""Get the value of a map annotation object
Parameters
----------
conn : ``omero.gateway.BlitzGateway`` object
OMERO connection.
map_ann_id : int
ID of map annotation to get.
across_groups : bool, optional
Defines cross-group behavior of function - set to
``False`` to disable it.
Returns
-------
kv_dict : dict
The value of the specified map annotation object, as a Python dict.
Examples
--------
>>> ma_dict = get_map_annotation(conn, 62)
>>> print(ma_dict)
{'testkey': 'testvalue', 'testkey2': 'testvalue2'}
"""
if type(map_ann_id) is not int:
raise TypeError('Map annotation ID must be an integer')
return dict(conn.getObject('MapAnnotation', map_ann_id).getValue())
|
3b962fdf4e63fb2afef8ab3b1ce011b45c1626cb
| 519,967 |
def get_chromosome_object(agp):
"""Extracts centromere coordinates and chromosome length from AGP data,
and returns a chromosome object formatted in JSON"""
chr = {}
agp = agp.split('\n')
for i, line in enumerate(agp):
if len(line) == 0 or line[0] == '#':
continue
tabs = line.split("\t")
acc = tabs[0]
start = int(tabs[1])
stop = int(tabs[2])
comp_type = tabs[6]
if 'acc' not in chr:
chr['accession'] = acc
chr['type'] = 'nuclear'
if comp_type == 'centromere':
chr['centromere'] = {
'start': start,
'length': stop - start
}
if i == len(agp) - 2:
chr['length'] = stop
return chr
|
217d27e8e85618642bcda3380480b9f2e56945fb
| 475,923 |
def pipe_soil_heat_transfer(lambda_soil, D, h0e):
"""
:param lambda_soil: Heat conductivity of a soil, W/(m*K)
:param D: Outer diameter of the pipe, m
:param h0e: Equivalent depth of the gas pipeline axis, m
:return: Pipe-soil heat transfer coefficient, W/m2*K
"""
return (lambda_soil / D) * (0.65 + (D/h0e)**2)
|
5a47568b6bf7392d27897238fe1d6fa4f212eb26
| 244,641 |
def sim_file_to_run(file):
"""Extracts run number from a simulation file path
Parameters
----------
file : str
Simulation file path.
Returns
-------
run : int
Run number for simulation file
Examples
--------
>>> file = '/data/ana/CosmicRay/IceTop_level3/sim/IC79/7241/Level3_IC79_7241_Run005347.i3.gz'
>>> sim_file_to_run(file)
5347
"""
start_idx = file.find('Run')
run = int(file[start_idx+3: start_idx+9])
return run
|
cdbd04a85da96b163da95595dc3313be2c64dec8
| 668,173 |
def _format_key(key):
"""Format table key `key` to a string."""
schema, table = key
table = table or "(FACT)"
if schema:
return "{}.{}".format(schema, table)
else:
return table
|
0f8b6b0c847c88e51ef0ecb5cb9774edd12ff8e1
| 308,421 |
def header(request, response, name, value, append=False):
"""Set a HTTP header.
Replaces any existing HTTP header of the same name unless
append is set, in which case the header is appended without
replacement.
:param name: Name of the header to set.
:param value: Value to use for the header.
:param append: True if existing headers should not be replaced
"""
if not append:
response.headers.set(name, value)
else:
response.headers.append(name, value)
return response
|
a1f730d562b6a47be5e8619c1819ceb70944e469
| 487,886 |
def take(iter, n):
"""Return ``n`` items from ``iter`` iterator. """
return [ value for _, value in zip(range(n), iter) ]
|
a806efc2324ecb081328f9389a95a84b7414ecf7
| 303,593 |
def check_water(pdb_input):
"""
Given a pdb file checks if it contains water molecules.
:param pdb_input: pdb input file
:return: True or False
"""
checker = False
with open(pdb_input) as pdb:
for line in pdb:
if "HETATM" in line:
if line.split()[3] == "HOH":
print("Your pdb file contains water molecules")
checker = True
break
return checker
|
cc09b0d9566a006bf8f289cfd02a90d52582b74c
| 435,620 |
def rotate_left(node):
"""Perform left rotation around given node."""
new_root = node.right
grandson = new_root.left
node.right = grandson
new_root.left = node
node.compute_height()
return new_root
|
08128b90bd36c303025b5656077b79a24ca561ab
| 595,966 |
import logging
import time
def driveListFilesQueryWithNextToken(service, parentID, customQuery=None, pageToken=None):
"""Internal function to search items in drive folders
Args:
service: Drive service (from getGoogleServices()['drive'])
parentID (str): Drive folder ID of parent where to search
customQuery (str): optional custom query parameters
pageToken (str): optional page token for paging results of large sets
Returns:
Tuple (items, nextPageToken) containing the items found and pageToken to retrieve
remaining data
"""
param = {}
param['q'] = "'" + parentID + "' in parents and trashed = False"
if customQuery:
param['q'] += " and " + customQuery
param['fields'] = 'nextPageToken, files(id, name)'
param['pageToken'] = pageToken
param['supportsTeamDrives'] = True
param['includeTeamDriveItems'] = True
# print(param)
retriesLeft = 5
while retriesLeft > 0:
retriesLeft -= 1
try:
results = service.files().list(**param).execute()
items = results.get('files', [])
nextPageToken = results.get('nextPageToken')
# print('Files: ', items)
return (items, nextPageToken)
except Exception as e:
logging.warning('Error listing drive. %d retries left. %s', retriesLeft, str(e))
if retriesLeft > 0:
time.sleep(5) # wait 5 seconds before retrying
logging.error('Too many list failures')
return None
|
75280c3b78ac1b93cc65ee5561bc90e3e0da114d
| 91,904 |
def top_player_ids(info, statistics, formula, numplayers):
"""
Inputs:
info - Baseball data information dictionary
statistics - List of batting statistics dictionaries
formula - function that takes an info dictionary and a
batting statistics dictionary as input and
computes a compound statistic
numplayers - Number of top players to return
Outputs:
Returns a list of tuples, player ID and compound statistic
computed by formula, of the top numplayers players sorted in
decreasing order of the computed statistic.
"""
top_players = []
top_players_list = []
for values in statistics:
key = "".join(values[info["playerid"]])
value = formula(info,values)
top_players.append((key, value))
for _ in range(numplayers):
max_val, max_player = 0, ""
for player in top_players:
if player[1] > max_val and player not in top_players_list:
max_val = player[1]
max_player = player[0]
top_players_list.append((max_player, max_val))
return top_players_list
|
4fd92de417e7e1d5e239b903a108e12d0c2a41fe
| 351,714 |
def is_subset(l1: list, l2: list) -> bool:
"""
Test if l2 is a subset of l1, i.e. all elements of l2 are contained in l1 and return True if it the case, False
otherwise.
:param l1: main list
:param l2: list whose elements are to be checked (if they're in l1 or not)
:return: True if l2 is a subset of l1, False otherwise.
"""
set1 = set(l1)
set2 = set(l2)
return set1.issubset(set2)
|
09831684f55af2670ac4fd7e930c574f0265483c
| 44,813 |
def message_dict(message):
"""Convert a Message instance to a dict which can be written to JSON."""
msg_dict = dict()
msg_dict['guid'] = message.guid
msg_dict['text'] = message.text
msg_dict['handle_id'] = message.handle_id
msg_dict['date'] = message.date
msg_dict['is_from_me'] = bool(message.is_from_me)
if message.subject is not None:
msg_dict['subject'] = message.subject
if message.attachments:
msg_dict['attachments'] = message.attachments
return msg_dict
|
1496f7d9abbcd74e09391d585eb2eb330eeebddb
| 398,119 |
def _get_py_filename(module):
"""
return the full path to the .py file of a module
(not the .pyc file)
"""
return module.__file__.rstrip('c')
|
5b477abe3cc5c8c9d606bdad98c102aaa26ffe13
| 288,790 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.