content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def cpc(total_cost, total_clicks):
"""Return the CPC (Cost per Click).
Args:
total_cost (float): Total cost of marketing.
total_clicks (int): Total number of clicks.
Returns:
cpt (float) as total cost per click
"""
return total_cost / total_clicks
|
3d4ec2d15ac5653cefdb40ba51ad06a93e68462f
| 249,223 |
def valid_follow_type(follow_type: str):
"""Ensure follow type is valid steemd type."""
# ABW: should be extended with blacklists etc. (and those should be implemented as next 'state' values)
supported_follow_types = dict(blog=1, ignore=2)
assert follow_type in supported_follow_types, "Unsupported follow type, valid types: {}".format(", ".join(supported_follow_types.keys()))
return supported_follow_types[follow_type]
|
9d4cc977e91b1e4357c3f4f848b95855e75a1420
| 134,455 |
from typing import List
from typing import Dict
import itertools
def merge_memory_map_lists(
mml1: List[Dict[str, List[float]]], mml2: List[Dict[str, List[float]]]
) -> List[Dict[str, List[float]]]:
"""
Given two lists of memory maps, produce the "cartesian product" of the memory maps:
merge_memory_map_lists([{a: 1}, {a: 2}], [{b: 3, c: 4}, {b: 5, c: 6}])
-> [{a: 1, b: 3, c: 4}, {a: 1, b: 5, c: 6}, {a: 2, b: 3, c: 4}, {a: 2, b: 5, c: 6}]
:param mml1: The first memory map list.
:param mml2: The second memory map list.
:return: A list of the merged memory maps.
"""
if not mml1:
return mml2
if not mml2:
return mml1
return [{**d1, **d2} for d1, d2 in itertools.product(mml1, mml2)]
|
e9ea93810bcc13ade3a064816bdb7ad34398f0b2
| 346,810 |
def make_adder_inc(n):
"""
>>> adder1 = make_adder_inc(5)
>>> adder2 = make_adder_inc(6)
>>> adder1(2)
7
>>> adder1(2) # 5 + 2 + 1
8
>>> adder1(10) # 5 + 10 + 2
17
>>> [adder1(x) for x in [1, 2, 3]]
[9, 11, 13]
>>> adder2(5)
11
"""
count = 0
def real_adder(x):
nonlocal count
count +=1
return n + x + count - 1
return real_adder
|
58f9bd2e6f947b94454b8673cf73b3007584f583
| 495,093 |
def compute_wiener_filter(speech_psd, noise_psd):
"""
Compute Wiener filter in the frequency domain.
Parameters
----------
speech_psd : numpy array
Speech power spectral density.
noise_psd : float or numpy array
Noise variance if white noise, numpy array otherwise.
Returns
-------
numpy array
Frequency domain filter, computed at the same frequency values as
`speech_psd`.
"""
return speech_psd / (speech_psd + noise_psd)
|
2aa59a0f2b4d93fc22c20c4e29e0e79f35411191
| 609,297 |
def decode_rate_depth(x: int):
"""return rate and depth from encoded representation"""
rate = x >> 8
depth = x & 0xff
return rate, depth
|
2c5b59041a2867e2e5c5153696decb328d70bb80
| 209,836 |
def count_inversions(array, blank):
"""Returns the number of inversions in a list ignoring the blank value."""
count = 0
for i, tile1 in enumerate(array[:-1]):
if tile1 is not blank:
for tile2 in array[i + 1 :]:
if tile2 is not blank:
if tile1 > tile2:
count += 1
return count
|
7f70df885e39f10ad70343c302756e25f1573305
| 84,426 |
def _get_castep_output_file(calculation):
"""Return a list of the lines in the retrieved dot castep file"""
fname = calculation.get_option('output_filename')
fcontent = calculation.outputs.retrieved.get_object_content(fname)
return fcontent.split('\n')
|
0dc78bed80dc753192d1317f3373c0a4eb5e09c2
| 538,493 |
def get_pil_mode(value, alpha=False):
"""Get PIL mode from ColorMode."""
name = {
'GRAYSCALE': 'L',
'BITMAP': '1',
'DUOTONE': 'L',
'INDEXED': 'P',
}.get(value, value)
if alpha and name in ('L', 'RGB'):
name += 'A'
return name
|
4f66500fe15cb3e4bd865677225b428dfc2ed470
| 497,404 |
import inspect
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
results = []
for (name, kind, cls, value) in inspect.classify_class_attrs(object):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
results.append((name, kind, cls, value))
return results
|
0d13cd5026e770a4a79c3cbf06b403f0db08e8a9
| 392,978 |
from datetime import datetime
def check(str1, str2, format_str):
"""Check if two strings are equal (based on format), or both are redacted."""
try:
str1_conv = datetime.strptime(str1, format_str)
str2_conv = datetime.strptime(str2, format_str)
if str1_conv == str2_conv:
return True
else:
return False
except ValueError:
if str1 == str2:
return True # both are redacted the same way, assume correct.
else:
return False
|
0f03443136ebe1d55d360147dcf25aa40dffb167
| 691,935 |
import re
def _clean_expression(expression: str) -> str:
"""Clean the given expression.
:param expression: The expression to clean.
:return: The cleaned expression."""
expression = re.sub(r'[<>@]', '', expression)
expression = re.sub(r'&(\d+)', r'\1', expression)
return expression
|
3d0402cb9328ab06ec3473e36efa561614499254
| 492,245 |
from typing import List
def get_starttime_default(features: List[str]) -> int:
"""
Heuristic for selecting a sound attribute as default for start time
:param features: list of available features
:return: index of the suggested feature in the list
"""
for idx, f in enumerate(features):
if 'start' in f.lower():
return idx
return 0
|
4089ddfb7265659e64599c2d75ea084fbe34f0eb
| 491,970 |
def calculate_position_part2(infile_path: str) -> int:
"""
Read input, determine vertical and horizontal position and return the product of the values
:param infile_path: file path to input file with instructions separated by newlines
:type infile_path: str
:return: product of the vertical and horizontal position
:rtype: int
"""
horizontal = 0
vertical = 0
aim = 0
with open(infile_path, 'r', encoding='ascii') as commands:
for command in commands:
direction, units = command.split(' ')
units = int(units)
if direction == 'forward':
horizontal += units
vertical += aim * units
elif direction == 'down':
aim += units
elif direction == 'up':
aim -= units
else:
raise ValueError(f'bad command: "{command}"')
return horizontal * vertical
|
3dfe30a9b4e2971385f610417d2e9cefa34fc1d8
| 656,130 |
def get_comparative_word_freq(freqs):
"""
Returns a dictionary of the frequency of words counted relative to each other.
If frequency passed in is zero, returns zero
:param freqs: dictionary
:return: dictionary
>>> from gender_novels import novel
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1900',
... 'filename': 'hawthorne_scarlet.txt'}
>>> scarlet = novel.Novel(novel_metadata)
>>> d = {'he':scarlet.get_word_freq('he'), 'she':scarlet.get_word_freq('she')}
>>> d
{'he': 0.007329554965683813, 'she': 0.005894731807638042}
>>> x = get_comparative_word_freq(d)
>>> x
{'he': 0.554249547920434, 'she': 0.445750452079566}
>>> d2 = {'he': 0, 'she': 0}
>>> d2
{'he': 0, 'she': 0}
"""
total_freq = sum(freqs.values())
comp_freqs = {}
for k, v in freqs.items():
try:
freq = v / total_freq
except ZeroDivisionError:
freq = 0
comp_freqs[k] = freq
return comp_freqs
|
5d59d25a15119a8e5f934804f1b8ee4ab5ecdbc4
| 680,615 |
def xgcd(a, b):
"""
Performs the Extended Euclidean algorithm to return the result of Bézout's
identity.
Arguments:
a (:int) - the first integer
b (:int) - the second integer
Returns:
'r' such that ar + bs = d where d = gcd(a, b)
"""
r, s = 0, 1
while b != 0:
c, d = divmod(a, b)
r, s = s, r - c*s
a, b = b, d
return r
|
8a1cdf00e11a7af259476b5e9884a70fc2e4c04c
| 480,092 |
import six
def ported_string(raw_data, encoding='utf-8', errors='ignore'):
"""
Give as input raw data and output a str in Python 3
and unicode in Python 2.
Args:
raw_data: Python 2 str, Python 3 bytes or str to porting
encoding: string giving the name of an encoding
errors: his specifies the treatment of characters
which are invalid in the input encoding
Returns:
str (Python 3) or unicode (Python 2)
"""
if not raw_data:
return six.text_type()
if isinstance(raw_data, six.text_type):
return raw_data
if six.PY2:
try:
return six.text_type(raw_data, encoding, errors)
except LookupError:
return six.text_type(raw_data, "utf-8", errors)
if six.PY3:
try:
return six.text_type(raw_data, encoding)
except (LookupError, UnicodeDecodeError):
return six.text_type(raw_data, "utf-8", errors)
|
70afe0d3f0363959ba0b37f339e581403fb63798
| 456,749 |
def generic_struct(name):
"""
Wraps a C struct in an empty ctypes class.
:param name: the struct name
:return: string
"""
text = (
'class {name}(ctypes.Structure):\n'
' """Wrapper for the {name} C struct."""\n'
'\n'
' pass\n\n\n'
)
return text.format(name=name)
|
26c9cbf4f2ede3d18ae9ac52f95115d2149c1674
| 415,374 |
def render_literal(obj):
"""
Render obj as a literal expression in C.
"""
if isinstance(obj, str):
return '"%s"' % obj
else:
return repr(obj)
|
f3c3fd0b8325345ae8d421b3b8da9635f58fb876
| 349,735 |
def str2ds(strings):
"""
Convert numbers to strings conforming to dicom rules,
maximum 16 characters.
:param strings:
:return: formatted string representation.
"""
# Decimal string encoding, maximum 16 characters
h = ['{:+16.8e}'.format(x) for x in strings]
return h
|
7e7973044ce45f9684a3640f9d2a228d3801d9cc
| 516,953 |
import random
def shuffled(li):
"""Return a shuffled version of the list."""
copy = li[:]
random.shuffle(copy)
return copy
|
af04eaa4acd07b34b99809455aa8ff1ce514178b
| 397,057 |
def nbt_path_join(*args):
"""Join two NBT paths into a longer path, similar to os.path.join()."""
if len(args) == 0:
return '{}'
if len(args) == 1:
return args[0]
if args[-1] == '':
return nbt_path_join(*args[:-1])
if args[1].startswith('['):
return nbt_path_join(f'{args[0]}{args[1]}', *args[2:])
return nbt_path_join(f'{args[0]}.{args[1]}', *args[2:])
|
9f6e5ea5cfe28949a2f8d8f36dbe79a655caad84
| 402,037 |
def get_binary_indexes(coordinates):
"""Generate binary coordinates."""
bin_coordinates = []
for coord in coordinates:
coord = ("{:08b}".format(coord[0]),
"{:08b}".format(coord[1]),
"{:08b}".format(coord[2]),)
bin_coordinates.append(coord)
return bin_coordinates
|
2731cfbed13556faba21e47b8567c47569324e31
| 448,990 |
def is_staff_user(request):
"""
Args:
request (HTTPRequest): django request object
Returns:
bool: True if user is staff
"""
return request.user is not None and request.user.is_staff
|
d1f18ac5ef61b611128d9c7842c61a41004a72c6
| 371,051 |
def normal_percentile_to_label(percentile):
"""
Assigns a descriptive term to the MMSE percentile score.
"""
if percentile >= 98:
return 'Exceptionally High'
elif 91 <= percentile <= 97:
return 'Above Average'
elif 75 <= percentile <= 90:
return 'High Average'
elif 25 <= percentile <= 74:
return 'Average'
elif 9 <= percentile <= 24:
return 'Low Average'
elif 2 <= percentile <= 8:
return 'Below Average'
elif percentile < 2:
return 'Exceptionally Low'
|
7c2e92ee51b00308f10f14e89869cb339ee551b1
| 109,871 |
import json
def build_message(status, message=None, state=None):
"""
Build message string to send to connected clients.
:param status: HTTP status code of the message, int
:param message: Optional free-form message to display to the client, can be string or dictionary.
Either this or state should be set.
:param state: Optional state to pass to the client. This can be the full state by passing in config.data() or a
subset that you define yourself. Should be a dictionary. Either this or message should be set.
:return: JSON string
"""
# The server needs to send either a message or state to inform other clients what's going on.
# The client sending a command will know that its action was successful and can update its UI, but the other
# clients would only know that something was successful, not what.
if message is None and state is None:
raise ValueError('Either message or state should be set')
msg = {'status': status, 'message': message, 'state': state}
return json.dumps(msg)
|
82998943feefb8ceeadf0c03e80a49ba0a67cc0e
| 492,329 |
from typing import Dict
import json
def read_json(path: str) -> Dict[str, object]:
"""
Reads json from file path
:return: Dict[str, object]
"""
with open(path) as json_file:
return json.load(json_file)
|
345083155ef9aa42157a36feebf768d8cb52a00b
| 613,292 |
def get_dns_classification(self) -> list:
"""Get application definition for DNS from Cloud Portal. This
generates a lot of data and require longer response time.
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - spPortal
- GET
- /spPortal/dnsClassification
:return: Returns list of dictionaries with DNS details
:rtype: dict
"""
return self._get("/spPortal/dnsClassification")
|
7261ab56274e52e35cdb96921d9e6e160f86a1a9
| 598,282 |
def int_to_binary(integer):
"""Convert an integer to a 36-bit binary representation."""
return f'{integer:036b}'
|
95b0f77dc3ecb59cd7c2a85d8aacacb80575ae6f
| 223,572 |
def _makeBool(value, default=True):
""" Helper to make boolean out of a .ini value """
if default is True:
if (value or '').lower() in ('off', 'false', '0'):
return False
else:
if (value or '').lower() in ('on', 'true', '1'):
return True
return default
|
c79095a8636ed3c6e36e914ce5131a1389cd91d3
| 588,108 |
def generate_query_string(query_params):
"""Generate a query string given kwargs dictionary."""
query_frags = [
str(key) + "=" + str(value) for key, value in query_params.items()
]
query_str = "&".join(query_frags)
return query_str
|
ca656c1d3c43a069b1eb9b4f4d98ba94c7c98256
| 164,095 |
def is_upper_bound(y):
"""
Was right bound reached?
:param y: y coordinate.
:return: True - bound was reached, False - otherwise.
"""
return y <= 0
|
d784c6da3c9c760158f0c582a9945ea450b06119
| 523,885 |
def es_primo(n: int) -> bool:
"""Determina si un número es primo.
:param n: número a evaluar.
:n type: int
:return: True si es primo, False en caso contrario.
:rtype: bool
"""
if n == 2 or n == 3:
return True
if n % 2 == 0 or n < 2:
return False
for i in range(3, int(n**0.5)+1, 2):
if n % i == 0:
return False
return True
|
fff4802d6e47c379b6510f13e2ffe9a9d56429d2
| 698,494 |
from pathlib import Path
def count_standard_seeds(seeds_dir):
"""Count the number of seeds for a standard benchmark."""
return len([p for p in Path(seeds_dir).glob('**/*') if p.is_file()])
|
9e704fa63cee25069ad0f880d67151f6bb12761f
| 136,709 |
def build_model_identifier(model_id, model_version):
"""Generate the long identifier for a model given its ID and version. Used for
naming conda environments, storage directories, and similar.
Parameters
----------
model_id : int
ID of the model
model_version : str
Version of the model
Returns
-------
str
Environment name with the format ModMon-model-<model_id>-version-<model_version>
"""
return f"ModMon-model-{model_id}-version-{model_version}"
|
a73ef44df3c37e9ad51311dc3de040a575e1f304
| 443,962 |
def maxOfArray(A):
"""Durchlaufe die Elemente der Menge und merke das jeweils größte Element.
Args:
A (list): Menge A
Returns:
tuple: max. von A und Index i, sodass 0 <= i <= len(A) und A[i]==max
oder 0,-1 falls A leer ist.
"""
# abbruch, da leeres array.
if len(A) == 0:
return 0, -1
currentMax = A[0]
indexOfMax = 0
# durchlaufe array und merke größtest element+index
for i in range(1, len(A)):
if A[i] > currentMax:
currentMax = A[i]
indexOfMax = i
return currentMax, indexOfMax
|
9121308b63190af7e46f33101e25e8bfcec7ac05
| 341,489 |
def make_filename_from_version(handle, version):
"""Make a filename with the handle and version."""
return f"{handle}-v{version}.toml"
|
c548e2a7c651dd25ccdf4880033932798ba3df01
| 205,238 |
def mix_targets(preds, targets, target_name1, target_name2, weight1=0.5,
weight2=0.5, skip_missing=False, new_name=None):
"""
Linearly combines two targets into one, optionally with custom weights,
storing it under the name of the first one, unless `new_name` is given.
`weight1` and `weight2` can also be keys into `targets`.
If `skip_missing` is true-ish, then if the second target is not present,
just uses the first one without weighting.
"""
target1 = targets[target_name1]
try:
target2 = targets[target_name2]
except KeyError:
if skip_missing:
output = target1
else:
raise
else:
if isinstance(weight1, str):
weight1 = targets[weight1]
if isinstance(weight2, str):
weight2 = targets[weight2]
output = weight1 * target1 + weight2 * target2
if new_name is None:
new_name = target_name1
targets[new_name] = output
return 0
|
048341c365cc7166027e3aa46f805c14d233c227
| 665,966 |
def reg_read(spi, cs, reg, nbytes=1):
"""
Read byte(s) from specified register. If nbytes > 1, read from consecutive
registers.
"""
# Determine if multiple byte (MB) bit should be set
if nbytes < 1:
return bytearray()
elif nbytes == 1:
mb = 0
else:
mb = 1
# Construct message (set ~W bit high)
msg = bytearray()
msg.append(0x80 | (mb << 6) | reg)
# Send out SPI message and read
cs.value(0)
spi.write(msg)
data = spi.read(nbytes)
cs.value(1)
return data
|
2e84f78f4e0416b0a677d57d1caa8c32fb21a2fa
| 453,340 |
def check_individuals(ped_individuals, vcf_individuals):
"""
Check if the individuals from ped file is in vcf file
Arguments:
ped_individuals (iterator): An iterator with strings
vcf_individuals (iterator): An iterator with strings
Returns:
bool: if the individuals exists
"""
for individual in ped_individuals:
if individual not in vcf_individuals:
raise IOError("Individuals in PED file must exist in VCF file") # Raise proper exception here
return True
|
e65b24390c8cebff7870e46790cf1c0e9b2d37c6
| 12,026 |
def lag(array, n=1):
"""
>>> x = [19, 100, 36, 6, 100, 20, 75, 66, 98, 55]
>>> lag(x)
[100, 36, 6, 100, 20, 75, 66, 98, 55]
>>> lag(x, 2)
[36, 6, 100, 20, 75, 66, 98, 55]
"""
return array[n:]
|
6fea13e3f0d9b91c35683675c9f42ffea07e9563
| 526,566 |
def token_links_factory(token):
"""Links factory for token views."""
rec = token.get_record()
links = dict(
# token_detail=url_for('oarepo_tokens.token_detail', token_id=token.id, _external=True),
)
if rec is not None:
links['record'] = rec.canonical_url
links['files'] = f"{rec.canonical_url}/files/"
links['revoke'] = f"{rec.canonical_url}/revoke_token/{token.id}"
links['init_upload'] = f"{links['files']}?multipart=true"
return links
|
da85cdc237c3fa3665d304e2df982c5baac70005
| 463,361 |
def type(record):
"""
Put the type into lower case.
:param record: the record.
:type record: dict
:returns: dict -- the modified record.
"""
if "type" in record:
record["type"] = record["type"].lower()
return record
|
c014281e533752cc2d719166c3df8194bc906be9
| 474,403 |
def wrap_hashlib(hasher, length=None):
"""
Wraps hashlib's functions, returning a function that returns the hex-digest of its input.
>>> from hashlib import sha1
>>> wrap_hashlib(sha1)(b'heyo')
'f8bb1031d6d82b30817a872b8a2ec31d5380cee5'
:param hasher: A function from :mod:`hashlib`
:return: Function
"""
args = []
if length is not None:
args = [length]
def _hasher(data):
return hasher(data).hexdigest(*args)
return _hasher
|
dbd07d4151a5c5c523fe75c3f29b72abfd15c3b8
| 702,274 |
def backcomp(arglist):
"""Modify arglist for backwards compatibility"""
for i in range(len(arglist)):
if arglist[i] == "attr":
arglist[i] = "write-attr"
elif arglist[i] == "--known-endpoints":
arglist[i] = "--ep-known"
try:
arglist.remove("--echo")
except ValueError:
pass
return arglist
|
9d3f451b1bb5449df8dce672c45d051e378f5050
| 566,278 |
def wants_plain_hotdog(ketchup, mustard, onion):
"""Return whether the customer wants a plain hot dog with no toppings.
"""
return not ketchup and not mustard and not onion
|
78f7308cd63f7133e1f96202164246e0d0cd9a8a
| 84,758 |
def sort_by_relevance(repo_list):
"""
Build display order.
As of 2020-01-14 it sorts by number of forks and stars combined
:param repo_list: repolist to sort
:return: sorted repolist
"""
repo_list.sort(key=lambda repo: repo['repo_forks'] + repo['repo_stars'], reverse=True)
return repo_list
|
e18708489c87c097e5c22f4f5b67a133b915fba5
| 556,003 |
import math
def round_index(index: float) -> int:
"""Округляет значение к ближайшему целому.
Основное применение функции - однозначно определять конечное значение индекса.
Отличие от стандартной функции round() в том, что округление чисел с дробной
частью = 0.5 всегда идет в большую сторону.
Examples
--------
>>> round_index(2.5)
3
>>> round_index(0.5)
1
>>> round_index(1.25)
1
"""
if index % 1 == 0.5:
return math.ceil(index)
return round(index)
|
97f830dba481774e894f73b3ce0c99a62e2ae843
| 177,008 |
def transpose(x, y, _):
"""Transpose rows and columns."""
return y, x
|
46f2b58e6f15c7b337a9b6d714ff4c7cd1ba5ea3
| 98,220 |
def is_yaml_view(view):
""" Returns true if given view contains YAML code. """
return view.score_selector(0, "source.yaml") > 0
|
fe3177e95baa341103241e20bea32fb2d90e03f4
| 413,161 |
def gini(dist):
"""Returns the Gini impurity of a counter.
If used by a decision tree learning algorithm, the goal is to minimize the Gini impurity inside
each leaf.
Parameters:
dist (proba.Multinomial)
Example:
>>> from creme import proba
>>> from scipy import stats
>>> events = [
... 'sunny', 'sunny',
... 'rainy', 'rainy', 'rainy',
... 'snowy', 'snomy', 'snowy', 'snomy', 'snowy'
... ]
>>> dist = proba.Multinomial()
>>> for e in events:
... dist = dist.update(e)
>>> gini(dist)
0.74
References:
1. `A Simple Explanation of Gini Impurity <https://victorzhou.com/blog/gini-impurity/>`_
"""
return sum(dist.pmf(c) * (1 - dist.pmf(c)) for c in dist)
|
dd02700ef533c72ec2352f2926857bc39f5f3d48
| 518,310 |
def _add(shape, solution):
"""
Adds all points of the shape to the solution if they are not already
contained.
Returns True if all points could be added or False otherwise
"""
if any([ point in solution for point in shape ]):
return False
for point in shape:
solution.append(point)
return True
|
2795cfb2b8c167294e9dc4c1104fe6887fa472d4
| 116,419 |
def monthly_N_fixation_point(
precip, annual_precip, baseNdep, epnfs_2, prev_minerl_1_1):
"""Add monthly N fixation to surface mineral N pool.
Monthly N fixation is calculated from annual N deposition according to
the ratio of monthly precipitation to annual precipitation.
Parameters:
precip (float): input, monthly precipitation
annual_precip (float): derived, annual precipitation
baseNdep (float): derived, annual atmospheric N deposition
epnfs_2 (float): parameter, intercept of regression
predicting N deposition from annual precipitation
prev_minerl_1_1 (float): state variable, mineral N in the
surface layer in previous month
Returns:
minerl_1_1, updated mineral N in the surface layer
"""
wdfxm = (
baseNdep * (precip / annual_precip) + epnfs_2 *
min(annual_precip, 100.) * (precip / annual_precip))
minerl_1_1 = prev_minerl_1_1 + wdfxm
return minerl_1_1
|
51d2de4c024fbd0b41a6f75b66090aef18067dbe
| 475,171 |
def _make_argo_task(name, dependencies):
"""Generate an Argo Task spec
"""
task = {
'name': name,
'dependencies': dependencies,
'template': 'run-task',
'arguments': {
'parameters': [{
'name': 'task_name',
'value': name,
}]
}
}
return task
|
f2dbfdec4d409dddbaab1bb80082a49ee004c40f
| 498,968 |
def _detect_format_from_extension(filename):
"""
Attempts to detect file format from the filename extension.
Returns None if no format could be detected.
"""
if filename.endswith('.bz2'):
return "bz2"
elif filename.endswith('.xz'):
return "xz"
elif filename.endswith('.gz'):
return "gz"
else:
return None
|
45875b4b594f38619cdd4179ad2dd28c278e875f
| 301,022 |
def rotr(v, count, bits):
""" Rotate v right count bits """
mask = (1 << bits) - 1
count = count % bits
return ((v >> count) | ((v << (bits - count)) & mask))
|
cc3dd1b7ec0060a8d1b8e6b4c162ad0629f12116
| 426,507 |
def _genfmt(size, endian, sign):
"""
Generates a format string that can be used with struct.pack()
and struct.unpack().
"""
if sign not in [True, False]:
raise ValueError('sign must be either True or False')
if endian == 'little':
fmt = '<'
elif endian == 'big':
fmt = '>'
else:
raise ValueError('endianness must be either "little" or "big"')
if size == 16:
fmt += 'h' if sign else 'H'
elif size == 32:
fmt += 'i' if sign else 'I'
elif size == 64:
fmt += 'q' if sign else 'Q'
else:
raise ValueError('supported sizes are 16, 32 and 64')
return fmt
|
4bd61554bb24930fd44e3e8a2f00e5aacdbe8bed
| 203,738 |
def label_breakdown(data):
""" Find the label breakdown in data. """
res = {}
for row in data:
# if valid label
if (row[3]):
res[row[1]] = row[7]
neg, neu, pos = 0, 0, 0
for key in res.keys():
r = res[key]
if (r == -1):
neg += 1
elif (r == 0):
neu += 1
else:
pos += 1
return "{} / {} / {}".format(neg, neu, pos)
|
07aa33f36da83eb2b21d211ffe1979fb2b5c3491
| 145,940 |
def MergeComponents(components):
"""Given a list of components, merges components with the same hierarchy.
For components with same hierarchy, return the most fine-grained component.
For example, if components are ['Blink', 'Blink>Editing'], we should only
return ['Blink>Editing'].
"""
if not components or len(components) == 1:
return components
components.sort()
merged_components = []
index = 1
while index < len(components):
if not components[index].startswith(components[index - 1] + '>'):
merged_components.append(components[index - 1])
index += 1
merged_components.append(components[-1])
return merged_components
|
189308f4ddda3489234936286da22e4260449a10
| 579,519 |
def parse_byte_ranges(s):
"""Parses strings like "1,3-5" into set(1,3,4,5)."""
result = set()
for term in s.split(","):
parts = [int(p) for p in term.split("-")]
if len(parts) == 1:
hi = parts[0]
lo = parts[0]
elif len(parts) == 2:
lo, hi = min(parts), max(parts)
else:
raise ValueError(
f"""Couldn't parse "{term}" as byte or as a range of bytes"""
)
if lo < 0:
raise ValueError(f"Value out of range: {lo}")
elif hi > 255:
raise ValueError(f"Value out of range: {hi}")
result.update(range(lo, hi + 1))
return result
|
a3a75c11e52f7c439ed3ca073e8c72dc30a0d58a
| 544,161 |
def swap_keys_values(_dict: dict):
"""Swaps the Keys and Values in Dictionary.
Parameters
----------
_dict : dict
The dictionary that needs to be reversed.
Returns
-------
swapped_dict : dict
Returns the Swapped/Reversed Dictionary.
"""
swapped_dict = {value: key for key, value in _dict.items()}
return swapped_dict
|
ff92e6eff371aca31ef8549c2293530303c99d75
| 372,777 |
from typing import List
def rindex(lst: List, value: str) -> int:
"""
Return the index of the last occurence of a value in a list.
:param lst: The list to search in.
:param value: The value to search for.
:return: The index of the last occurence of the value.
"""
try:
return len(lst) - lst[::-1].index(value) - 1
except ValueError:
raise ValueError(f"Answer start token `{value}` not found in the eval template")
|
0bbe8a1aa2c8786b18a5c1cb97155e25ec36fa07
| 226,888 |
import networkx as nx
def mol_to_nx(mol):
"""Converts RDKit mol to NetworkX graph object.
Parameters
----------
mol : RDKit mol
RDKit molecule.
Returns
-------
G : Networkx Graph.
Graph object.
References
----------
`keras-molecules <https://github.com/dakoner/keras-molecules/blob/dbbb790e74e406faa70b13e8be8104d9e938eba2/convert_rdkit_to_networkx.py#L17>`__
"""
G = nx.Graph()
for atom in mol.GetAtoms():
G.add_node(atom.GetIdx(),
atomic_num=atom.GetAtomicNum(),
formal_charge=atom.GetFormalCharge(),
chiral_tag=atom.GetChiralTag(),
hybridization=atom.GetHybridization(),
num_explicit_hs=atom.GetNumExplicitHs(),
is_aromatic=atom.GetIsAromatic())
for bond in mol.GetBonds():
G.add_edge(bond.GetBeginAtomIdx(),
bond.GetEndAtomIdx(),
bond_type=bond.GetBondType())
return G
|
a1ddb82bdd3cc80ea61a377ec3f86397074d113e
| 538,647 |
def tag_kinds(atoms):
"""Tag different atom kinds, depending on oxidation state, magnetization, etc.
E.g. if there are 4 different types of 'Fe', tag them with 1,2,3 and 4.
"""
# Prepare dictionary [symbol][magnetization] => tag (tag is 0, if only single oxidation state)
symbols = set(atoms.get_chemical_symbols())
def get_kind(atom):
return f"{{'symbol': {atom.symbol}, 'charge': {atom.charge}, 'magmom': {atom.magmom}}}"
element_groups = [[atom for atom in atoms if atom.symbol == symbol] for symbol in symbols]
for group in element_groups:
kinds = sorted({get_kind(atom) for atom in group})
if len(kinds) == 1:
# we don't tag if all atoms of the element have the same properties
continue
for atom in group:
atom.tag = kinds.index(get_kind(atom)) + 1
return atoms
|
fad4304b1789dc63972d6c5e9a6a5ac1ad5c1666
| 377,770 |
import importlib
def get_module(module_name):
"""Attempts to find a bot module by name. This function looks for a
Python module named `module_name` located in `botnet.modules`. If the name
is prefixed with `botnet_` this function will look for an external module
instead.
"""
if not module_name.startswith('botnet_'):
import_name = 'botnet.modules.builtin.%s' % module_name
else:
import_name = '%s' % module_name
return importlib.import_module(import_name)
|
3f277799bedf479ffe89c4a65ccba5b05b23b78c
| 449,695 |
def rgba_to_int(r: int, g: int, b: int, a: int) -> int:
"""Use int.from_bytes to convert a color tuple.
>>> print(rgba_to_int(0, 0, 0, 0))
0
>>> print(rgba_to_int(0, 1, 135, 4))
100100
"""
return int.from_bytes([r, g, b, a], byteorder="big", signed=True)
|
8ad6eb4aefe06ba5d2c60ee1c32f8a8f390adee9
| 405,805 |
def ipopo_state_to_str(state):
"""
Converts the state of a component instance to its string representation
:param state: The state of an iPOPO component
:return: A string representation of the state
"""
ipopo_states = {
0: "INVALID",
1: "VALID",
2: "KILLED",
3: "VALIDATING",
4: "ERRONEOUS",
}
return ipopo_states.get(state, "Unknown state ({0})".format(state))
|
9d53fe004176a7d541694a83721293106fc83cd3
| 473,947 |
def separateUnit(string):
"""
Separates the unit and number in given string
e.g. '1 em' will return (float(1), 'em')
"""
# create an array of valid numbers (and '.')
nums = ['.'] + [str(x) for x in range(10)]
num_str = str()
unit_str = str()
# find first char in string that is not a number or '.', split the string on it
for i in range(len(string)):
if string[i] not in nums:
num_str = string[:i-1]
unit_str = string[i-1:]
if num_str == str():
num_str = string
return float(num_str), unit_str
|
ceab4bfc607c70714a5bc0459747a087eefec344
| 621,509 |
def get_imag(input, input_type="linear", channels_axis=1):
"""Returns the imaginary components of the complex-valued input.
Arguments
---------
input : torch.Tensor
Input tensor.
input_type : str,
(convolution, linear) (default "linear")
channels_axis : int.
Default 1.
"""
if input_type == "linear":
nb_hidden = input.size()[-1]
if input.dim() == 2:
return input.narrow(
1, nb_hidden // 2, nb_hidden // 2
) # input[:, :nb_hidden / 2]
elif input.dim() == 3:
return input.narrow(
2, nb_hidden // 2, nb_hidden // 2
) # input[:, :, :nb_hidden / 2]
else:
nb_featmaps = input.size(channels_axis)
return input.narrow(channels_axis, nb_featmaps // 2, nb_featmaps // 2)
|
168f2a89dd76cd0bfaa41eae1f8b4138832ecffc
| 620,322 |
def add_vars(var_seq_1, var_seq_2):
"""
Add two variable sequences.
"""
return [v1 + v2 for v1, v2 in zip(var_seq_1, var_seq_2)]
|
31118b9947441ebec8afebcca502992c27a8589f
| 295,443 |
def consume_next_text(text_file):
"""Consumes the next text line from `text_file`."""
idx = None
text = text_file.readline()
if text:
tokens = text.strip().split()
idx = tokens[0]
tokens.pop(0)
text = " ".join(tokens)
return idx, text
|
9a0595580a9fa8d6b5f480d218e88c508e5cd197
| 681,983 |
def fix_range(a, nmin, nmax):
"""
Given an array of values `a', scale the values within in to the range
specified by `nmin' and `nmax'.
Parameters
----------
a : ndarray
Array of values to scale.
nmin, nmax : float
New minimum and maximum values for the new range.
Returns
-------
b : ndarray
Scaled array.
"""
A = a.min()
B = a.max()
C = nmin
D = nmax
b = (((D - C) * (a - A)) / (B - A)) + C
return b
|
58e35fba4df24ac1d00614ebb179a2ce3f618602
| 343,015 |
import time
def _learn(ulearn, adaptive_learning_rate, increase_rate, decrease_rate, left_out, printer):
"""
Perform the learning process on ulearn with the given parameters.
Prints the preparation and learning time of the process using the printer.
:param ulearn: The learning object
:type ulearn: ULearner
:param adaptive_learning_rate: Whether to dynamically change the learning rate when results improve/worsen.
:type adaptive_learning_rate: bool
:param increase_rate: The rate with which to multiply the learning rate when an improvement was found (>= 1).
:type increase_rate: float
:param decrease_rate: The rate with which to multiply the learning rate when a worse solution was found (<= 1).
:type decrease_rate: float
:param left_out: Whether a third of the data should be left out and used for testing instead of training.
:type left_out: bool
:param printer: The printer to write logging information to.
:type printer: Printer
"""
# Prepare
starttime = time.time()
ulearn.prepare()
endtime = time.time()
printer.print("Preparation took %s seconds" % (endtime - starttime))
# Learn
starttime = time.time()
if adaptive_learning_rate and left_out:
left_out_rate = 0.3333
printer.print("Using %s of the data to validate." % left_out_rate)
actual_results = ulearn.learn_adaptive_rate_left_out(increase_rate=increase_rate, decrease_rate=decrease_rate,
left_out=left_out_rate)
elif adaptive_learning_rate and not left_out:
actual_results = ulearn.learn_adaptive_rate(increase_rate=increase_rate, decrease_rate=decrease_rate)
else:
actual_results = ulearn.learn()
endtime = time.time()
printer.print("---")
printer.print("Ran %s seconds." % (endtime - starttime))
return actual_results
|
90ed5c69c3009356cfbb95a7dd1371f66613aefa
| 129,957 |
import itertools
def partition(seq, key):
"""Group a sequence based into buckets by key(x)."""
groups = itertools.groupby(sorted(seq, key=key), key=key)
return ((k, list(v)) for k, v in groups)
|
89c97578444a2d99f88e1945f6819f84cdc1a7f6
| 203,175 |
import pathlib
import base64
def read_image(filepath: pathlib.Path) -> str:
"""
Read an image from a file.
Args:
filepath: Input image file path
Returns:
Image encoded in Base64
"""
with open(filepath, "rb") as f:
image_base64: str = base64.b64encode(f.read()).decode("utf-8")
return image_base64
|
4219c092188acd6953e9043bdb083f165af742fe
| 613,839 |
def insert_whitespace(seq: str) -> str:
"""
Return the sequence of characters with whitespace after each char
>>> insert_whitespace("RKDES")
'R K D E S'
"""
return " ".join(list(seq))
|
f30c62bd3e88821f74a53cbf80b69c729026c21a
| 553,786 |
def get_airtime(row):
""" extract the airtime (seconds) """
airtime = float(row['airtime'])
return 0.0 if airtime < 0.0 else airtime
|
afc01eb20a1305ff7baa72e29bbacb478b0d700a
| 193,088 |
def get_description_paragraph(soup):
"""Return the description paragraph from a class soup."""
return soup.find("p", id="VAR3").text
|
71d7af1f5932aa3bc936fb39c93eb4ca6258ad6d
| 374,474 |
def load_file(file: str) -> list:
"""
Loads a list of frequency changes and returns a list of integers
:param file: Name of file containing list of integers
:return: Python list of integers in the file
"""
with open(file) as f:
frequency_string = f.read().split()
frequency_list = [int(el) for el in frequency_string]
return frequency_list
|
79c966e30f7caf801e337c79d2b7b896cf747902
| 297,672 |
def isAscii(b):
"""
Check if a given hex byte is ascii or not
Argument : the byte
Returns : Boolean
"""
return b == 0x0a or b == 0x0d or (b >= 0x20 and b <= 0x7e)
|
7b6b84bd0ca36582b98de0dc2c4ebd1c31144f3b
| 92,847 |
def select_query(x, read_seq, read_qual):
"""Selects query.
If multiple matches are found, select the one with the smallest levenshtein
distance. If multiple matches having the same smallest levenshtein
distance, select the one with lowest sequencing quality sum at the
mismatched nucleotides. If there's still a tie, select the first one.
Parameters
----------
x : dict
A dictionary of fuzzy searching result. Keys are levenshtein distance.
Values are list of matched barcodes.
read_seq : str
A DNA string (not whole read).
read_qual : str
A sequencing quality string (not whole read).
Returns
-------
str
Matched reference barcode.
int
Levenshtein distance.
"""
# for i in sorted(x.keys()):
for i in x:
if x[i]:
if len(x[i]) == 1:
return x[i][0], i
else:
s = [sum([ord(read_qual[idx]) - 33
for idx, val in enumerate(read_seq)
if ii[idx] != val]) for ii in x[i]]
s = [idx for idx, val in enumerate(s) if val == min(s)][0]
return x[i][s], i
|
cbc9eaf92d9cc532cdb13824215aa698bbd6b6fe
| 404,530 |
def dimensions_match(matrix1, matrix2):
"""
This function checks if the orders of the given matrices match.
Arguments: matrix1 {Matrix} - The first Matrix object matrix2 {Matrix} - The
second Matrix object
Returns: boolean-- True or False depending on if their orders match or not.
"""
return matrix1.get_row_no() == matrix2.get_row_no(
) and matrix1.get_col_no() == matrix2.get_col_no()
|
a7ce6a7894b4ec8ff6827e8b802c2227909d9c60
| 631,331 |
def matTransposed(mat):
"""Return the transposed of a nxn matrix.
>>> matTransposed(((1, 2), (3, 4)))
((1, 3), (2, 4))"""
dim = len(mat)
return tuple( tuple( mat[i][j]
for i in range(dim) )
for j in range(dim) )
|
e74eb18d9bccda06b160bfdd9bd8b67564c56d06
| 174,448 |
def get_user_tables(conn):
"""Retrieve all user tables."""
query_string = "select schemaname, relname from pg_stat_user_tables;"
with conn.cursor() as cursor:
cursor.execute(query_string)
tables = cursor.fetchall()
return tables
|
36a8b76d2b0ed2c4f00bbdd9d61f6ac0d2ea2e60
| 579,077 |
def multiline_string_repr(string):
"""Return a representation of the string using multi-line string format
if possible."""
if '"""' not in string:
string = '"""{}\n"""\n'.format(string)
elif "'''" not in string:
string = "'''{}\n'''\n".format(string)
else:
string = repr(string) + "\n"
return string
|
6a190c49777571bb3cb774b4a065bfec8f63326e
| 158,517 |
def df_swap_cols(df, columns):
"""Swaps the column by column name or index
columns: Can be a tuple of column indexes or names
ex. (2, -1) or ('a', 'b')
"""
first, second = columns
cols = list(df.columns)
if type(first) is int:
cols[first], cols[second] = cols[second], cols[first]
df = df[cols]
elif type(first) is str:
df[first], df[second] = df[second], df[first]
return df
|
d6f3c9b38f2046e9d9633374e0165a54eb528508
| 576,663 |
def get_radec_cols(cat):
"""Return names of the RA and Dec columns for this catalog.
Parameters
----------
cat : Astropy Table object
catalog to extract RA and Dec columns from
Returns
-------
racol : Astropy.table.column object
RA column values
deccol : Astropy.table.column object
Dec column values
"""
# create a column name dictionary
d = dict([(x, 1) for x in cat.colnames])
for v in ["ra", "RA", "ALPHA_J2000", "alpha_j2000"]:
if v in d:
racol = v
break
else:
raise ValueError("RA column not found in", " ".join(cat.colnames))
for v in ["dec", "DEC", "Dec", "DELTA_J2000", "delta_j2000"]:
if v in d:
deccol = v
break
else:
raise ValueError("RA column not found in", " ".join(cat.colnames))
return racol, deccol
|
8ebdd7e89d444e91b8d2dacf294200839a72dcc9
| 481,815 |
def make_play(play, board):
"""Put the word down on the board."""
(score, (i, j), (di, dj), word) = play
for letter in word:
board[j][i] = letter
i += di
j += dj
return board
|
fbc36b89dfebd526e02c3d790d0ac787e6228728
| 87,532 |
import json
def load_json_rcfile(fname):
"""Loads a JSON run control file."""
with open(fname, "r", encoding='utf-8') as f:
rc = json.load(f)
return rc
|
b698f06ed0cd2b0e0097d010a0199aa607c8e340
| 113,384 |
from pathlib import Path
def cleaning(*, path: Path, extension: str, affix: str) -> None:
"""Deletes all files in a Path with certain extension and name termination.
Parameters:
path: Path - directory where to delete files
extension: str - extension of files to errase
affix: str - temination of file name to delete
Returns:
None
"""
assert path.is_dir(), f"Path: {path} does not exist"
for filename in path.iterdir():
if filename.suffix == extension and filename.stem.endswith(affix):
filename.unlink()
return None
|
a9f305090fd6a105dc9d2cb08a98b75fdf12f35b
| 589,453 |
def get_bit(byteval, index) -> bool:
"""retrieve bit value from byte at provided index"""
return (byteval & (1 << index)) != 0
|
1fe020449ae2ae2513073835db6f75b24e558fdb
| 1,692 |
def get_width_height(image):
""" Returns a tuple (width, height) indicating the width and height of the image."""
width = len(image)
height = len(image[0])
return width, height
|
4b45ec92d3a4b400d3b106e63d56fcbdef297760
| 692,309 |
def _get_task_file_name(task):
"""Returns the file name of the compile task. Eg: ${issue}-${patchset}.json"""
return '%s-%s-%s.json' % (task['lunch_target'], task['issue'],
task['patchset'])
|
a7df31c6312da0fec18e76099af1020fdbcbe325
| 667,697 |
def get_base_url(parsed):
"""Convert a parsed URL back to string but only include scheme, netloc, and path, omitting query."""
return parsed.scheme + "://" + parsed.netloc + parsed.path
|
21df200efbd72eab7ea844e3d8b5466170eac5c9
| 627,723 |
def get_line_ABC_inter(line1, line2):
"""get line intersection point,
line1: A1*x + B1*y + C1 = 0, [A1, B1, C1]
line2: A2*x + B2*y + C2 = 0, [A2, B2, C2]
inter = B1*A2 - A1*B2
x = (C1*B2-B1*C2)/inter, y=(A1*C2-A2*C1)/inter
"""
inter = line1[1]*line2[0] - line1[0]*line2[1]
if inter != 0:
x = (line1[2]*line2[1]-line1[1]*line2[2])/inter
y = (line1[0]*line2[2] - line2[0]*line1[2])/inter
return (x, y)
return None
|
291753265f253b0dd17fefd7de815b1b55686583
| 396,358 |
import warnings
def get_chromosome_reads(bam):
""" This function returns statistics about mapped/unmapped reads per chromosome as they are stored in the index.
It makes use of the method get_index_statistics() from the pysam module.
Parameters
----------
bam : dict, It is the output of the function pysam.AlignmentFile(bamfile, "rb")
Returns
-------
mapped_reads : dict[str, list]
Syntax 'I' | [mapped, unmapped, total reads]
"""
stats = bam.get_index_statistics()
mapped_reads = {}
for stat in stats:
mapped_reads[stat[0]] = [stat[1], stat[2], stat[3]]
if stat[2] != 0:
warnings.warn("Unmapped reads found in chromosome " + stat[0])
return mapped_reads
|
a94ac731e995b4a0ca8ecc16363af3428beb0df3
| 408,914 |
def output_name(pre_filename: str):
"""
Args:
pre_filename: test_pre_*****.png
Returns:
test_localization_*****_prediction.png,
test_damage_*****_prediction.png
"""
test_local = pre_filename.replace('pre', 'localization').replace('.png', '_prediction.png')
test_damage = pre_filename.replace('pre', 'damage').replace('.png', '_prediction.png')
return test_local, test_damage
|
f8c5b04bb531f663167395e1068aadc9855448f1
| 288,547 |
from typing import List
def calculate_node_correctness(pairs: List, num_correspondence: int) -> float:
"""
Calculate node correctness given estimated correspondences
Args:
pairs: a list of pairs of nodes
num_correspondence: the real number of correspondences
Returns:
node_correctness: the percentage of correctly-matched nodes
"""
node_correctness = 0
for pair in pairs:
if pair[0] == pair[1]:
node_correctness += 1
node_correctness /= num_correspondence
return node_correctness
|
3fba7c4e7f7c30804a539b3becd61780e1207d7b
| 432,904 |
from typing import Generator
def gen(*args: int) -> Generator[int, None, None]:
"""Convert arguments to generator"""
return (_ for _ in args)
|
35ec3c24e6a3f7897618c8b4ddb849097f6682b4
| 130,306 |
def trace(X_ref, Y_ref):
"""
Calculates the slope and intercept for the trace, given the position of the direct image in physical pixels.
These coefficients are for the WFC3 G141 grism.
See also: https://ui.adsabs.harvard.edu/abs/2009wfc..rept...18K/abstract
"""
BEAMA_i = 41
BEAMA_f = 248
DYDX_0_0 = -3.55018E-01
DYDX_0_1 = 3.28722E-05
DYDX_0_2 = -1.44571E-03
DYDX_1_0 = 1.42852E-02
DYDX_1_1 = -7.20713E-06
DYDX_1_2 = -2.42542E-06
DYDX_1_3 = 1.18294E-09
DYDX_1_4 = 1.19634E-08
DYDX_1_5 = 6.17274E-10
DYDX_0 = DYDX_0_0 + DYDX_0_1*X_ref + DYDX_0_2*Y_ref
DYDX_1 = DYDX_1_0 + DYDX_1_1*X_ref + DYDX_1_2*Y_ref + DYDX_1_3*X_ref**2 + DYDX_1_4*X_ref*Y_ref + DYDX_1_5*Y_ref**2
return [DYDX_0, DYDX_1]
|
da2f0f040804da451e860392456ecd31718ba1a0
| 217,880 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.