content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def strategy_best(cookies, cps, history, time_left, build_info):
"""
Always buy the item offering the lowest cost per cps in the time left.
"""
info_object = build_info.clone()
items_list = info_object.build_items()
item_to_buy = None
min_cost_per_cps = float("inf")
for item in items_list:
item_cost = info_object.get_cost(item)
item_cps = info_object.get_cps(item)
item_cost_per_cps = item_cost / item_cps
if (item_cost - cookies) / cps > time_left:
continue
elif item_cost_per_cps < min_cost_per_cps:
min_cost_per_cps = item_cost_per_cps
item_to_buy = item
return item_to_buy | 535c5fb89a3abaa64fa6e99d853b09eff7e3b5a1 | 296,688 |
def normalize_rbf(rbf_obj, X_scaler, Y_scaler):
"""
Generate a normalized RBF model.
Args:
rbf_obj (`Rbf` type): Original RBF model.
X_scaler (`sklearn.preprocessing` scaler type or None): Scaler for input X.
If None, then no normalization will be performed on the model input.
Y_scaler (`sklearn.preprocessing` scaler type or None): Scaler for output Y.
If None, then no normalization will be performed on the model output.
Returns:
norm_rbf_mod (callable): Normalized Rbf model.
"""
def norm_rbf_mod(X):
"""
Normalized RBF model.
Args:
X (2d array): Points to be evaluated. Each row is one point.
Returns:
Y (1d array): Function evaluations at points in `X`.
"""
assert(X.ndim == 2)
if X_scaler is not None:
X = X_scaler.transform(X)
Y = rbf_obj(*X.T)
if Y_scaler is not None:
Y = Y_scaler.inverse_transform(Y.reshape((-1, 1))).flatten()
return Y
return norm_rbf_mod | a867307ab791ea2192d1c23273897f67958ae913 | 530,224 |
def calcualte_length(data):
"""
LDAP protocol doesnt send the total length of the message in the header,
it only sends raw ASN1 encoded data structures, which has the length encoded.
This function "decodes" the length os the asn1 structure, and returns it as int.
"""
if data[1] <= 127:
return data[1] + 2
else:
bcount = data[1] - 128
#if (bcount +2 ) > len(data):
# raise Exception('LDAP data too larage! Length byte count: %s' % bcount)
return int.from_bytes(data[2:2+bcount], byteorder = 'big', signed = False) + bcount + 2 | 7fdb310c2aee5fbf9a642be13fe8021bc365ed85 | 578,959 |
def iid_divide(data, g):
"""divide list data among g groups each group has either int(len(data)/g)
or int(len(data)/g)+1 elements returns a list of groups."""
num_elems = len(data)
group_size = int(len(data) / g)
num_big_groups = num_elems - g * group_size
num_small_groups = g - num_big_groups
glist = []
for i in range(num_small_groups):
glist.append(data[group_size * i:group_size * (i + 1)])
bi = group_size * num_small_groups
group_size += 1
for i in range(num_big_groups):
glist.append(data[bi + group_size * i:bi + group_size * (i + 1)])
return glist | 648e5c813fb4021b7b0b544cf12a9b11a485ccb3 | 137,490 |
import requests
def make_request(method, url, session=None, args={}):
"""
Make a HTTP request and returns response if it was successful
:param method:
:param url:
:param session:
:param args: Dict of request arguments like data, params, headers etc
:return: response, session
"""
assert method and url, "Cannot create request without method and url"
if not session:
session = requests.Session()
response = session.request(method=method, url=url, **args)
if response.status_code == 200:
return response, session
else:
print("Got invalid response, status-{}, \nresponse-{}".format(response.status_code,
response.text))
return None, session | 57f7a3ebbd6c1a6ab31b3e496d79ba063ea37215 | 407,966 |
def empty_cells(state):
"""
Each empty cell will be added into cells' list
:param state: the state of the current board
:return: a list of empty cells
"""
cells = []
for x, row in enumerate(state):
for y, cell in enumerate(row):
if cell == 0:
cells.append([x, y])
return cells | b3e3944d9dd3c699ff2bfbffbd66407e1bda7d4a | 49,659 |
def edit_distance_with2ops(string1, string2, w_del=100, w_ins=49):
"""
Called by loanpy.helpers.Etym.rank_closest_phonotactics and \
loanpy.qfysc.Qfy.get_phonotactics_corresp. \
Takes two strings and calculates their similarity by \
only allowing two operations: insertion and deletion. \
In line with the "Threshold Principle" by Carole Paradis and \
Darlene LaCharité (1997) \
the distance is weighted in a way that two insertions are cheaper than \
one deletion: "The problem is really not very different from the dilemma \
of a landlord stuck with a limited budget for maintenance and a building \
which no longer meets municipal guidelines. Beyond a certain point, \
renovating is not viable (there are too many steps to be taken) and \
demolition is in order. Similarly, we posit that I) languages have \
a limited budget for adapting ill- formed phonological structures, \
and that 2) the limit for the budget is universally set at two steps, \
beyond which a repair by 'demolition' may apply. In other words, we \
predict that a segment is deleted if (but only if) its rescue is too \
costly in terms of the Threshold Principle" (p.385, Preservation \
and Minimality \
in Loanword Adaptation, \
Author(s): Carole Paradis and Darlene Lacharité, \
Source: Journal of Linguistics , Sep., 1997, Vol. 33, No. 2 (Sep., 1997), \
pp. 379-430, \
Published by: Cambridge University Press, \
Stable URL: http://www.jstor.com/stable/4176422). \
The code is based on a post by ita_c on \
https://www.geeksforgeeks.org/edit-distance-and-lcs-longest-common-subsequence\
(last access: June 8th, 2022)
:param string1: The first of two strings to be compared to each other
:type string1: str
:param string2: The second of two strings to be compared to each other
:type string2: str
:param w_del: weight (cost) for deleting a phoneme. Default should \
always stay 100, since only relative costs between inserting and deleting \
count.
:type w_del: int | float, default=100
:param w_ins: weight (cost) for inserting a phoneme. Default 49 \
is in accordance with the "Threshold Principle": \
2 insertions (2*49=98) are cheaper than a deletion \
(100).
:type w_ins: int | float, default=49.
:returns: The distance between two input strings
:rtype: int | float
:Example:
>>> from loanpy.helpers import edit_distance_with2ops
>>> edit_distance_with2ops("hey","hey")
0
>>> from loanpy.helpers import edit_distance_with2ops
>>> edit_distance_with2ops("hey","he")
100
>>> from loanpy.helpers import edit_distance_with2ops
>>> edit_distance_with2ops("hey","heyy")
49
"""
m = len(string1) # Find longest common subsequence (LCS)
n = len(string2)
L = [[0 for x in range(n + 1)]
for y in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if (i == 0 or j == 0):
L[i][j] = 0
elif (string1[i - 1] == string2[j - 1]):
L[i][j] = L[i - 1][j - 1] + 1
else:
L[i][j] = max(L[i - 1][j],
L[i][j - 1])
lcs = L[m][n]
# Edit distance is delete operations + insert operations*0.49.
# costs (=distance) are lower for insertions
return (m - lcs) * w_del + (n - lcs) * w_ins | 1fd92f1940fa92bc36f5b74439fff97fc4eecdd5 | 502,653 |
def get_testsuite_name(suite):
"""
Returns the name to be used for the given testsuite. This is made of
the class name and the result of "suite_name()" method if this exists
:param suite: Suite object whose name is needed
:type suite: ``testsuite``
:return: Name of given suite
:rtype: ``str``
"""
name = suite.__class__.__name__
if hasattr(suite, 'suite_name') and\
callable(getattr(suite, 'suite_name')) and\
suite.suite_name() is not None:
return '{} - {}'.format(name, suite.suite_name())
return name | 11ff05fc66231a71770167fd37fdd1e3db242532 | 141,131 |
from datetime import datetime
def add_current_time_to_state(state):
"""Annotate state with the latest server time"""
state['currentTime'] = datetime.now().timestamp()
return state | f61fa2d7b127fbebb22fc277e16f2f79cdd4acd3 | 11,398 |
def lookup_journal(tex):
"""
Return the name or acronym of the journal, given its tex macro.
"""
plaintext = tex.replace('\\','')
lookup = {'apj':'ApJ', 'apjs':'ApJS', 'aj':'AJ', 'mnras':'MNRAS', 'nature':'Nature', 'aap':'A&A', 'pasp':'PASP', 'jcap':'JCAP'}
try:
journal = lookup[plaintext]
except:
journal = plaintext
return journal | ebfa0acf29a0a7f922ca8a51001fe9b75be84a02 | 343,648 |
def get_coordinates(array_islands):
"""
First extract the connection (edge) that every island connects to, then
extract the coordinate of that island. Edges are returned in an array of
tuples, coordinates in a dictionary of arrays.
"""
# retrieve the connection
edges = []
for index, isle in enumerate(array_islands[1:], start=1):
edges.append((index, isle.connection))
# retrieve the coordinates
pos = dict((index, [*isle.coord]) for index, isle in enumerate(array_islands))
return edges, pos | 98a7cae04a93cb63a2038741afd67ad8f12d0cf4 | 140,551 |
def mag(x):
"""
Returns the magnitude of vector x
"""
return sum([i ** 2 for i in x]) ** 0.5 | 8fa9613d1895380cb692dc88ceb8d690289792b6 | 367,253 |
def bytesPad(text, size=8, pad=0):
"""Convert a string to bytes and add pad value if necessary to make the length up to size.
"""
text_as_bytes = text.encode("utf-8")
if len(text_as_bytes) >= size:
return text_as_bytes
else:
return text_as_bytes + bytes([pad] * (size - len(text_as_bytes))) | dcade27ff442106436c1ed7f453d1ecb51025f8e | 441,591 |
def replace_unknown_token(token, dictionary):
""" Checks whether the token is in the given dictionary.
If not it is replaced by an UNK token
Arguments:
token {string} -- the token to be checked and eventually replaced
dictionary {dict} -- the dictionary
Returns:
string -- the new token
"""
if token not in dictionary:
return "_unk_"
return token | f82868b1e78eebb47177881f3281ae0fbffa6a17 | 466,496 |
from typing import List
from typing import Optional
def diff(got_lines: List[str], expected_lines: List[str]) -> Optional[str]:
"""
Report a difference between the ``got`` and ``expected``.
Return None if no difference.
"""
if got_lines == expected_lines:
return None
result = []
result.append("Expected:")
for i, line in enumerate(expected_lines):
if i >= len(got_lines) or line != got_lines[i]:
print("DIFF: {:2d}: {!r}".format(i, line))
else:
print("OK : {:2d}: {!r}".format(i, line))
result.append("Got:")
for i, line in enumerate(got_lines):
if i >= len(expected_lines) or line != expected_lines[i]:
print("DIFF: {:2d}: {!r}".format(i, line))
else:
print("OK : {:2d}: {!r}".format(i, line))
return "\n".join(result) | 23cf24d96c5e52473117575998bf5a26c24c9b16 | 366,823 |
import re
def isStopGroup(group):
"""
Takes in the text of a group from the Content of Annual Reports section of
an official statement, and returns whether or not this is a 'stop group',
i.e. a group that doesn't actually list a requirement.
"""
if re.search("(?i)in addition to any of the information expressly required", group):
return True
if re.search("(?i)any or all of the items listed above", group):
return True
if re.search("(?i)to the extent not .{2,10} in the audited financial statements", group):
return True
return False | 61915c9bcafcb21417468d66867a590047e96f73 | 453,969 |
from typing import List
import pathlib
def _filename_matches(filepath: str, allowed_filenames: List[str]) -> bool:
"""Find if a filepath matches a list of filenames.
Args:
filepath, zipped or unzipped ex: path/results.js, path/results.json.gz
allowed_filenames: List of filenames, all unzipped
ex: [resolvers.json, blockpages.json]
Returns:
boolean, whether the filepath matches one of the list.
Zipped matches to unzipped names count.
"""
filename = pathlib.PurePosixPath(filepath).name
if '.gz' in pathlib.PurePosixPath(filename).suffixes:
filename = pathlib.PurePosixPath(filename).stem
return filename in allowed_filenames | 267ff88d8e3b2d76c869dd43a7b74205bf88bf16 | 297,649 |
def npv(Rn, i, i0, pe=0):
"""Net present value (NPV) is the difference between
the present value of cash inflows and the present value
of cash outflows over a period of time.
Args:
Rn: Expected return list
i: Discount rate
i0: Initial amount invested
pe: Profit or expense at the end of investment
Returns:
Net present value
Example:
Given the expected return list `Rn`, `i` as
discount rate, and `i0` as initial amount invested
you can calcuate NPV like this:
>>> import malee
>>> malee.npv([5000, 8000, 12000, 30000], 0.05, 40000)
7065.266015703324
"""
npv_sum = 0
for idx, Ri in enumerate(Rn):
if Ri == Rn[-1] and pe != 0:
npv_sum += Ri + pe / ((1 + i) ** (idx + 1))
else:
npv_sum += Ri / ((1 + i) ** (idx + 1))
return npv_sum - i0 | 1edc20f1418bb8fec48639d3f3f478ce1949dbe1 | 92,594 |
from pathlib import Path
def expand_paths(path_expr):
"""Given a path expression, return a list of paths."""
path = Path(path_expr).expanduser()
if path.is_file():
return [path]
return Path(path.root).glob(
str(Path("").joinpath(*path.parts[1:] if path.is_absolute() else path.parts))
) | 9851389f6000bfb1f003fba7bba883d679a61474 | 329,174 |
import ast
def chromosomesInCluster(hotspots):
"""
Determine chromosomes in a hotspots file
:param hotspots: str
Hotspots filename
:return: dict
Chromosomes and first last positions within the chromosomes in the cluster
"""
chromosomes = dict();
with open(hotspots, 'r') as fhandle:
for line in fhandle:
point = ast.literal_eval(line);
chromosome = point['chromosome'];
position = point['position'];
if chromosome in chromosomes:
values = chromosomes[chromosome];
chromosomes[chromosome] = (min(values[0], position), max(values[1], position + 1));
else:
chromosomes[chromosome] = (position, position + 1);
return chromosomes; | 62d702fe4e1358dbc1c2fc62a8254a3579b34be8 | 625,118 |
def flatten_list_of_lists(some_list, remove_duplicates=False, sort=False):
"""
Convert a list of lists into a list of all values
:param some_list: a list such that each value is a list
:type some_list: list
:param remove_duplicates: if True, return a unique list, otherwise keep duplicated values
:type remove_duplicates: bool
:param sort: if True, sort the list
:type sort: bool
:return: a new object containing all values in teh provided
"""
data = [item for sublist in some_list for item in sublist]
if remove_duplicates:
if sort:
return list(set(data))
else:
ans = []
for value in data:
if value not in ans:
ans.append(value)
return ans
elif sort:
return sorted(data)
return data | 20fe701870fa087706afa393c0bb25b73f3f3bae | 517,624 |
def EnumValueCrossRefLabel(enum_value_name):
"""Enum value cross reference label."""
return 'envoy_api_enum_value_%s' % enum_value_name | 1cb8bb25e6f3aafe536e8bcbc2588dba66914bd5 | 184,502 |
def needs_reversal(chain):
"""
Determine if the chain needs to be reversed.
This is to set the chains such that they are in a canonical ordering
Parameters
----------
chain : tuple
A tuple of elements to treat as a chain
Returns
-------
needs_flip : bool
Whether or not the chain needs to be reversed
"""
x = len(chain)
if x == 1:
first = 0
second = 0
else:
q, r = divmod(x, 2)
first = q - 1
second = q + r
while first >= 0 and second < len(chain):
if chain[first] > chain[second]:
# Case where order reversal is needed
return True
elif chain[first] == chain[second]:
# Indeterminate case
first -= 1
second += 1
else:
# Case already in the correct order
return False
return False | b5ef8841fe01b9608b16b6823f103de56e654fa6 | 688,847 |
def _are_anagrams(word1, word2):
"""Check if word1 and word2 are anagrams."""
return sorted(list(word1)) == sorted(list(word2)) | 6e9f9c11406c86a45b4b69046278a6e5c3f1d39a | 257,318 |
def get_value(header_dict, header_name, default=None, strip_value=""):
""" Return last header value, or default value (None)
If strip_value, remove strip_value (e.g., 'keep-alive') if it occurs in addition to other values
"""
if header_name in header_dict:
value = header_dict[header_name][-1]
if value and strip_value:
new_value = ", ".join(x.strip() for x in value.split(",") if x.strip().lower() != strip_value.lower())
return new_value or value
else:
return value
else:
return default | 8f78b6312ca899cbfe4aa1f90d6d43cc1669be1c | 161,295 |
def project_sorted(row, sorted_cols):
"""Extract the values in the ORDER BY columns from a row.
"""
return [ row[col] for col in sorted_cols ] | d3e86dee5a79a7224098c7046863808b999df0ec | 507,764 |
def build_header(*headers):
"""
Combine all specified headers - headers ORDER is CRUCIAL
:param headers: NSH headers
:type headers: `:class:nsh.common.*HEADER`
:return bytes
"""
composite_header = b''
for header in headers:
composite_header += header.build()
return composite_header | fafa9ea3287fa8f6b7c6fa572f93d5439aa9644d | 184,431 |
def parse_error_messages(e):
"""
Takes an error and parses its messages in human-readable form.
Where there are message dictionaries, separates out each message,
parsing the keys into sentence case with title capitalisations
:param e:
:type e: django.core.exceptions.ValidationError
:returns: A list of error messages
"""
response = []
if hasattr(e, "message_dict"):
for key, value in e.message_dict.items():
msg = f'{key.replace("_", " ").title()}:'
for v in value:
msg += f" {v}"
response.append(msg)
else:
response.append(str(e))
return response | 573bbf4a0ebf7f72edec517e561e62e390367e37 | 148,753 |
import configparser
def config_parser(config_file):
"""
Configuration file parsing.
Returns dictionary with configuration parameters:
'one_auth_file' - Opennebula sessions credential file path,
'key_file' - AES key file path,
'workers_quantity' - ZMQ workers quantity,
'server_ip' - IP address for ZMQ routing server binding,
'server_port' - Port number for ZMQ routing server binding,
'pidfile' - PID file path,
'vm_user' - VM user name,
'password_size' - Size password for VM users,
'password_complexity' - Complexity password for VM users(bool),
'loggerconf_file' - Logger configuration file path.
"""
config = configparser.ConfigParser()
config.read(config_file)
cinfig_dict = {'one_auth_file': config.get('auth_file','one_auth_file'),
'key_file': config.get('auth_file','key_file'),
'workers_quantity': int(config.get('zmq_workers_quantity','workers_quantity')),
'server_ip': config.get('ip_address_port','server_ip'),
'server_port': config.get('ip_address_port','server_port'),
'pidfile': config.get('pid_file','pidfile'),
'vm_user': config.get('vm_user_name','vm_user'),
'password_size': int(config.get('password_vm_users','password_size')),
'password_complexity': config.getboolean('password_vm_users','password_complexity'),
'loggerconf_file': config.get('logger_config_file','loggerconf_file')
}
return cinfig_dict | 8847d8344caeed24b6673dc34712430b8892d18b | 28,641 |
def camel2snake(s):
"""Convert a camelCase name to snake_case"""
o = ''
lastcap = False
for letter in s:
lc = letter.lower()
if not lc.isalpha():
lastcap = True
if lc == '-':
lc = '_'
lastcap = True
elif not lc.isalpha():
lastcap = True
elif lc != letter:
if not lastcap:
o += '_'
lastcap = True
else:
lastcap = False
o += lc
return o | d0c97c8183649af037714c6faafcfa441c9aa308 | 309,555 |
def generate_example(tmpdir, depth=5):
"""
Generate a directory structure containing a text file at the specified
depth
"""
dir = tmpdir.mkdir('test_dir')
for i in range(depth):
dir = dir.mkdir(f'test_dir{i}')
p = dir.join('test_file.txt')
p.write('Lorem Ipsum')
return p | 3a06420a88bd82ba41faa124ec3316214ad06100 | 340,283 |
def cut_extension(filename, ext):
"""
If filename has extension ext (including the possible dot),
it will be cut off.
"""
file = filename
index = filename.rfind(ext)
if 0 <= index and len(file)-len(ext) == index:
file = file[:index]
return file | 0f04ae7260f898c2ce79c9f4a2cf79538a601a48 | 298,483 |
def gc_frac(guide_seq):
"""Compute fraction of guide that is GC.
Args:
guide_seq: string of guide sequence; must be all uppercase
Returns:
fraction of guide sequence that is G or C
"""
gc = guide_seq.count('G') + guide_seq.count('C')
return float(gc) / len(guide_seq) | 596a3ae82ade23b6da583163ff7500ca4b8ffeb1 | 457,409 |
import re
def clean_text(text):
"""
Remove code blocks, urls, and html tags.
"""
text = re.sub(r'<code[^>]*>(.+?)</code\s*>', '', text, flags=re.DOTALL | re.MULTILINE)
text = re.sub(r'<div[^>]*>(.+?)</div\s*>', '', text, flags=re.DOTALL | re.MULTILINE)
text = re.sub(r'<blockquote[^>]*>(.+?)</blockquote\s*>', '', text, flags=re.DOTALL | re.MULTILINE)
text = re.sub('<.*?>', '', text)
text = text.replace('"', '"')
text = re.sub(r'http\S+', '', text)
text = re.sub(r'www.\S+', '', text)
return text | 91934ecd7e5d037be1198bc645da8e507b5955ce | 35,503 |
def _get_python_object_id(item):
"""Returns a unique id for a Python Object (used in cloning)"""
return id(item) | 896daace5e05e31287cc8f0784cc77b7aedcd808 | 490,428 |
def conflict_annotation(expt, loc):
"""
Returns conflict rate of an experiment as a string percentage. Only does
this if the experiment is EPaxos, as MPaxos has no conflict rate.
"""
if expt.is_epaxos():
conflict_rate = '{}%'.format(round(expt.conflict_rate(loc)*100, 1))
return conflict_rate
return None | 620939565f5bac3bb278c74cb085d85faba2bdbc | 636,180 |
def get_ref_alt_allele(ref, alt, pos):
"""
Description:
Helper function to index REF and ALT alleles with genomic positions.
Arguments:
ref list: REF alleles.
alt list: ALT alleles.
pos list: Genomic positions.
Returns:
ref_allele dict: REF alleles.
alt_allele dict: ALT alleles.
"""
ref_allele = dict()
alt_allele = dict()
for i in range(len(pos)):
r = ref[i]
a = alt[i]
p = pos[i]
ref_allele[p] = r
alt_allele[p] = a
return ref_allele, alt_allele | 1834ca9b16c03e82e61aece45882599712e81943 | 250,675 |
def str_to_bool(string):
"""Convert a string to a boolean value."""
if string.upper() in ["1", "ON", "TRUE", "YES"]:
return True
return False | bfd0858b1cd1bb6349abbec2dbf2c0b15b1f19d6 | 670,868 |
def get_message(tweet):
"""
Robustly get a message on a tweet.
Even if not extended mode or is a retweet (always truncated).
"""
try:
return tweet.full_text
except AttributeError:
return tweet.text | 58a6acd676f47350f51a5be81989c2500cabdb12 | 364,296 |
def output_meta(face_meta, output_path, overwrite=True):
""" Output face metadata into a .csv file
Parameters
----------
face_meta : dict of {int : dict of {str: float}}}
Mapping *<face_id>* to a :class:`dict`, which contains the metadata and face detection and
age estimation results of the output faces.
Each result is a :class:`dict` with fields frame_id, min_x, min_y, width, height, confidence and estimated_age
output_path : Path
Path to the output face metadata file
overwrite : bool
True if overwriting the existing file at :attr:`output_path`, default True
Returns
-------
success : bool
Whether the video file is generated
"""
if output_path.exists() and not overwrite:
return False
with open(str(output_path), 'w') as fout:
fout.write('FaceID,FrameID,Min_X,Min_Y,Width,Height,Confidence,EstimatedAge\n')
for face_id, info in face_meta.items():
fout.write('{},{},{},{},{},{},{},{}\n'.format(face_id, info['frame_id'],
info['min_x'], info['min_y'],
info['width'], info['height'],
info['confidence'], info['estimated_age']))
return True | d99fa81a0f1ec0b5c403630772652c27357809a7 | 387,047 |
import time
def get_ts_from_hour(time_str, f="%Y%m%d%H"):
"""
ex. 2016010212(str) -> timestamp * 1000(int)
"""
return int(time.mktime(time.strptime(time_str, f)) * 1000) | 5e34e7e2497e2714de1fb4ead6853d136921f1ca | 215,694 |
def get_absolute_name(package, relative_name):
"""Joins a package name and a relative name.
Args:
package: A dotted name, e.g. foo.bar.baz
relative_name: A dotted name with possibly some leading dots, e.g. ..x.y
Returns:
The relative name appended to the parent's package, after going up one
level for each leading dot.
e.g. foo.bar.baz + ..hello.world -> foo.hello.world
The unchanged relative_name if it does not start with a dot
or has too many leading dots.
"""
path = package.split('.') if package else []
name = relative_name.lstrip('.')
ndots = len(relative_name) - len(name)
if ndots > len(path):
return relative_name
absolute_path = path[:len(path) + 1 - ndots]
if name:
absolute_path.append(name)
return '.'.join(absolute_path) | ee10dbc354f3a4d7dde0b1e79d08268f5707b8cf | 184,082 |
def dBmtomw(value):
"""
Converts dBm to mw
value: dBm value
returns: mW value
"""
return round(10**(value/10),3) | cb62166d91112d61c549a469dad97a1a4c91644a | 491,927 |
def all_valid(formsets):
"""Validate every formset and return True if all are valid."""
# List comprehension ensures is_valid() is called for all formsets.
return all([formset.is_valid() for formset in formsets]) | 3cffd9879143e4879794e86bbb65e49f4f2fd975 | 9,281 |
from typing import Dict
def parse_links_header(link_header: str) -> Dict[str, str]:
"""
Parses the `Link` HTTP header and returns a map of the links. Logic from
[PageLinks.java](https://github.com/eclipse/egit-github/blob/master/org.eclipse.egit.github.core/src/org/eclipse/egit/github/core/client/PageLinks.java#L43-75).
"""
links = {}
for link in link_header.split(','):
segments = link.split(';')
if len(segments) < 2:
continue
link_part = segments[0].strip()
if not link_part.startswith('<') or not link_part.endswith('>'):
continue
link_part = link_part[1:-1]
for rel in (x.strip().split('=') for x in segments[1:]):
if len(rel) < 2 or rel[0] != 'rel':
continue
rel_value = rel[1]
if rel_value.startswith('"') and rel_value.endswith('"'):
rel_value = rel_value[1:-1]
links[rel_value] = link_part
return links | 117367cdd98ccc29afb3aa5bc0ed7eaaddeb7ae0 | 281,012 |
def normalize(ys, amp=1.0):
"""Normalizes a wave array so the maximum amplitude is +amp or -amp.
ys: wave array
amp: max amplitude (pos or neg) in result
returns: wave array
"""
high, low = abs(max(ys)), abs(min(ys))
return amp * ys / max(high, low) | c247cf22438e32b98cc883a17932aecd5ddeaca1 | 605,872 |
import re
def camel_to_underline(name):
"""
Converts CamelCase to underline. As for example CamelCase --> camel_case.
:param name: (str) Name to be converted
:return: Converted name in lower caps and underline
:rtype: str
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | 489bf310760df7fc89c82c2f848867d95d975d5a | 353,953 |
def part2(allergens):
"""Part 2 wants the list of unsafe ingredients, sorted by their
allergen alphabetically, joined together into one string by commas.
"""
return ",".join(
ingredient for (allergen, ingredient) in sorted(allergens.items())
) | 4d1d1b32b673fabc47fc03fef6d32343f0a49532 | 642,492 |
import typing
def minimum(pixel: typing.Tuple[int, int, int]) -> int:
"""Sort on the minimum RGB value of a pixel (either the R, G or B)."""
return min(pixel[0], pixel[1], pixel[2]) | f833bc0ff9571727e3b1f0dd47acfaae4c8be8ae | 135,802 |
def top(df, value, limit, order='asc', group=None):
"""
Awesome method that achieves what NO query in any language can do: (DRUM ROLL)
Get the top or flop N results based on a column value for each specified group columns
Args:
- group: String or array of strings for the columns,
on which you want to perform the group operation
- value: String for the column name on which you will rank the results
- order: String 'asc' or 'desc' to sort by ascending ou descending order
- limit: Number to specify the N results you want to retrieve.
Use a positive number x to retrieve the first x results.
Use a negative number -x to retrieve the last x results.
"""
ascending = order != 'desc'
limit = int(limit)
filter_func = 'nlargest' if (limit > 0) ^ ascending else 'nsmallest'
def _top(df):
return getattr(df, filter_func)(abs(limit), value).sort_values(by=value,
ascending=ascending)
if group is None:
df = _top(df)
else:
df = df.groupby(group).apply(_top)
return df | 6c225bd00da1051044850cd49c646621cbb90b4f | 296,432 |
def correlation(x, y):
"""
Fill in this function to compute the correlation between the two
input variables. Each input is either a NumPy array or a Pandas
Series.
correlation = average of (x in standard units) times (y in standard units)
Remember to pass the argument "ddof=0" to the Pandas std() function!
"""
std_x = (x - x.mean()) / x.std(ddof=0)
std_y = (y - y.mean()) / y.std(ddof=0)
return (std_x * std_y).mean() | 669f32eda3fa9508cefe0ea7e1b80c04523e6935 | 73,616 |
import torch
def squash(tensor, dim=-1):
""" Squash function as defined in [1].
Args:
tensor (Tensor): Input tensor.
dim (int, optional): Dimension on which to apply the squash function. Vector dimension. Defaults to the last.
"""
squared_norm = (tensor ** 2).sum(dim=dim, keepdim=True)
scale = squared_norm / (1. + squared_norm)
return scale * tensor / torch.sqrt(squared_norm + 1e-7) | 59a75dff6159afe7cfa8237b5ba9d05384c3e5a5 | 219,174 |
def booleanize(value: float):
""" Convert a given floating point value to its equivalent Boolean (1 if v > 0 else 0) """
return 1 if value > 0 else 0 | 2ddca922df073b4783c56eec0bfda6bfd5c3dc8c | 240,975 |
import requests
def url_download(url: str, write_location: str, filename: str) -> None:
"""Downloads a file from a URL and saves it locally.
Args:
url: A string that points to the location of a temp mapping file that needs to be processed.
write_location: A string that points to a file directory.
filename: A string containing a filepath for where to write data to.
Returns:
None.
"""
print('Downloading Data from {}'.format(url))
r = requests.get(url, allow_redirects=True, verify=False)
with open(write_location + '{filename}'.format(filename=filename), 'wb') as outfile:
try: outfile.write(r.content)
except OSError:
block_size = 1000000000
for i in range(0, len(r.content), block_size):
outfile.write(r.content[i:i + block_size])
outfile.close()
return None | 4501e97b0d932a5bd02825549b7b59f74afaf960 | 540,330 |
def _GetKeyKind(key):
"""Return the kind of the given key."""
return key.path().element_list()[-1].type() | c37f1d889e484390449de682e3d6c6b9d4521ce4 | 707,781 |
def af_subtraction(ch1, ch2, m, c):
"""
Subtract ch2 from ch1
ch2 is first adjusted to m * ch2 + c
:param ch1:
:param ch2:
:param m:
:param c:
:return:
"""
af = m * ch2 + c
signal = ch1 - af
return signal | ebf11ffdc9d64860179407a48d7351bacf35492d | 657,209 |
def serialize_measurement(measurement):
"""Serializes a `openff.evaluator.unit.Measurement` into a dictionary of the form
`{'value', 'error'}`.
Parameters
----------
measurement : openff.evaluator.unit.Measurement
The measurement to serialize
Returns
-------
dict of str and str
A dictionary representation of a openff.evaluator.unit.Measurement
with keys of {"value", "error"}
"""
return {"value": measurement.value, "error": measurement.error} | 69eedd9006c63f5734c762d6113495a913d5a8c4 | 2,935 |
def column_fill(keyword):
"""``column-fill`` property validation."""
return keyword in ('auto', 'balance') | 314f3a0c5451ba5f3de89c7052e816969d6318b5 | 542,430 |
from typing import List
def _replace_pw_references(alias_cmd: str, pw_args: List[str]) -> str:
"""Replace all occurrences of pw@ with the path to the pw script (argv[0]) plus all pw options"""
replacement = " ".join(pw_args) + " "
return alias_cmd.replace("pw@", replacement) | 9442bbc61389e3d0e6303668463cbf29aad52a1b | 44,000 |
def get_nested(a_dict, *args, default_value=None):
"""
Safely gets items buried in a nested dictionary
:param a_dict: nested dictionary to check
:param args: the keys to navigate down
:param default_value: if the items are not found in nested dict
:return: Item or none
>>> test_dict = {'a': {'b': {'c': 5}}}
>>> get_nested(test_dict, 'a', 'b', 'c')
5
>>> get_nested(test_dict, 'a', 'd', default_value=10)
10
>>> get_nested(test_dict, 'a', 'b', 'c', 'd')
>>> list_dict = {'a': [{}, {'b': 2}]}
>>> get_nested(list_dict, 'a', 1, 'b')
2
"""
value = a_dict
for arg in args:
if isinstance(value, dict):
if arg in value:
value = value[arg]
else:
return default_value
elif isinstance(value, list):
if isinstance(arg, int):
value = value[arg]
else:
return default_value
else:
return default_value
return value | 331cb4df7ec08f03f1392d70d317606ef089a65d | 515,612 |
def divide(numerator, denominator):
"""Divide two numbers."""
return numerator / denominator | 7199618f18186ef4108766253944a06cbb221d03 | 343,057 |
def scale(a: tuple, scalar: float) -> tuple:
"""Scales the point."""
return a[0] * scalar, a[1] * scalar | 9638b8cfbd792c2deb35da304c5c375e0402404e | 709,256 |
from typing import Dict
import json
def from_hcl(string: str) -> Dict:
"""Convert an HCL string into a Dict.
:param string: the HCL string.
:return: the dict.
"""
return json.loads(string.replace('"=', '":')) | 62fa2f5f0e9b52ec7773f6e9334c2e88308f3870 | 392,611 |
def rectarea(r):
"""Return the area of rectangle <r>"""
return (r[1][0] - r[0][0]) * (r[1][1] - r[0][1]) | 283d1152e4f0ae68a04125b098927bc25bb6acf3 | 147,164 |
def pairwise(iterable):
"""
Iter over an iterable, two items by two items
>>> list(range(10)|pairwise())
[(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]
"""
a = iter(iterable)
return zip(a, a) | b10528ab7315f6eb5ed78accfdf9135ae90c5b9a | 650,294 |
def get_orbit_direction(manifest):
"""This function returns the orbit direction from a S1 manifest file.
Args:
manifest: path to the manifest file
Returns:
"ASC" for ascending orbits, "DES" for descending
orbits. Throws an exception if manifest can not be parsed.
"""
with open(manifest, "r") as save_file:
for line in save_file:
if "<s1:pass>" in line:
if "DESCENDING" in line:
return "DES"
if "ASCENDING" in line:
return "ASC"
raise Exception("Orbit Directiction not found in "+str(manifest)) | 9ce98a3d85486a597db5b251861f194d7bdda2df | 300,744 |
def _slice(tensor, size, i):
"""Gets slice of columns of the tensor"""
return tensor[:, i*size:(i+1)*size] | e956dc9a9c38c60713cd7a5af74bb69d5916ab68 | 311,344 |
def make_message(msg_format, coll_id="testcoll", type_id="testtype", **kwargs):
"""
Combine supplied message format string with keyword parameters to build
a message text.
"""
msg_vals = dict(kwargs, coll_id=coll_id, type_id=type_id)
msg_text = msg_format%msg_vals
return msg_text | 6f909c7d6a5b38a1f2c7b8b3668c9bdcfdc59d19 | 583,695 |
def list_subtract(a, b):
"""Return a list ``a`` without the elements of ``b``.
If a particular value is in ``a`` twice and ``b`` once then the returned
list then that value will appear once in the returned list.
"""
a_only = list(a)
for x in b:
if x in a_only:
a_only.remove(x)
return a_only | d0557dbf0467e00c909d143c464f4af76eb130c6 | 675,454 |
def get_period(start, end, peak, tsend):
"""Return the onset/decline period for a mhw.
For onset if event starts on 1st day of timeseries, then:
if peak on 1 st day of event, onset period -> 1 day
else -> period=peak.
In any other case period = peak + 0.5
For decline if event ends on last day of timeseries, then:
if peak on last day of event, onset period -> 1 day
else -> period=(end - start - peak).
In any other case period = (end - start -peak) + 0.5
Parameters
----------
start: pandas Series
Index of start of event along time axis
end: pandas Series
Index of end of event along time axis
peak: pandas Series
Index of peak of event respect the event itself
-> index_peak - index_start
tsend: int
Index of last element of series
Returns
-------
onset_period: pandas Series
Period of onset of MHWs
decline_period: pandas Series
Period of decline of MHWs
"""
esp = end - start - peak
x = peak.where(peak != 0, 1)
onset_period = x.where(start == 0, x + 0.5)
y = esp.where(peak != tsend, 1)
decline_period = y.where(end == tsend, y + 0.5)
return onset_period, decline_period | 30a4f14440684ecebb5a605f8ae46e0a06de9f18 | 90,922 |
import pkg_resources
def require_package(name):
""" Set a required package in the actual working set.
Parameters
----------
name: str
The name of the package.
Returns
-------
bool:
True if the package exists, False otherwise.
"""
try:
pkg_resources.working_set.require(name)
return True
except: # noqa: E722
return False | 9147081444a8c040c7cf78f3246ee86b4edf18d2 | 513,969 |
import json
def load_json_file(path):
"""
Returns a dictionary representing the content of a json file
"""
with open(path) as json_file:
return json.load(json_file) | ae1a562364156b32dbe9433c462b74dd45f3704b | 603,012 |
import yaml
def load_config(config_file):
"""Open the YAML configuration file given in parameter"""
with open(config_file, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
print("[Error] - Error while opening the YAML conf file: {}".format(exc)) | 12d0cdb358682d8cae27af2e345ddb9f1b6db191 | 117,149 |
def prob_by_rank(rank, n):
"""
Transforms the rank of item into the probability that an item is recommended an observed by the user.
The first ranked item has a probability 1, and last ranked item is zero.
Simplified version of 1- (rank-1)/(n-1)
Args:
rank: rank of an item
n: number of items to be recommended
Returns:
prob: the probability an item will be recommended
"""
#if there is only one item, probability should be one, but method below will not work...
if n == 1:
prob = 1.0
else:
prob = (n-rank)/float(n-1)
return prob | e0f61f893906292a2362914f1da13acde7b7b258 | 184,529 |
import math
def dist(x1,y1,x2,y2):
""" return the distance between two points
"""
return math.sqrt((x1-x2)**2+(y1-y2)**2) | 67bdbf9c20e73520e61de262b34fbc92f5c4eedd | 223,964 |
def delete_perm_location(rec) -> tuple[str, dict]:
"""
Delete the permanentLocationId and permanentLocation from the input item record.
Args:
rec: dictionary loaded from a JSON record
Returns:
A tuple containing the old permanentLocationId (UUID) and the old permanentLocation (as dictionary).
"""
old_loc_id = rec.pop("permanentLocationId", None)
old_loc = rec.pop("permanentLocation", None)
return (old_loc_id, old_loc) | 3ebc72799ed7d64e27a9ec09a34742e45b6fa910 | 219,833 |
import random
import string
def get_rand_string(len=10) -> str:
"""Return a random string containing ascii characters, digits and/or punctuation marks."""
return "".join(random.choice(string.ascii_letters + string.digits) for _ in range(len)) | 4e9854e436a508f8ce56b746485fbf093fa63f8b | 431,258 |
def _validate_class_data_shapes(self, data, val=True, validate_n_slices=False):
"""Standardize `sample_weight` and `class_labels` data. Same as
:meth:`_validate_data_shapes`, except skips two validations:
- `_validate_last_dims_match_outs_shape`; for `class_labels`, model
output shapes can be same as input shapes as with autoencoders,
but inputs can still have class labels, subjecting to `sample_weight`.
`sample_weight` often won't share model output shape, as with e.g.
multiclass classification, where individual classes aren't weighted.
- `_equal_shapes`; per above, `data` entries may not have equal shapes.
"""
return self._validate_data_shapes(data, val,
validate_n_slices,
validate_last_dims_match_outs_shape=False,
validate_equal_shapes=False) | c5b9b85f05d266a7c074a541e4b33e9da9e47c89 | 408,309 |
def depth_breaks(increment_mm, max_mm):
"""Return a list of tuples representing depth ranges from a given depth increament (mm) and the maximum depth required"""
dd, dmax = increment_mm, max_mm
a = [i for i in range(0, dmax, dd)]
b = [(a[n], a[n+1]) for n, i in enumerate(a) if i != max(a)]
return(b) | 9c99689eafa8a013b34d272da6a1f824965332c1 | 226,403 |
import re
def _has_journal_arxiv_field(db_entry):
"""Check whether entry has a journal field containing arxiv info."""
if 'journal' not in db_entry:
return False
journal = db_entry['journal']
# this detects entries coming from google scholar
if journal[:5] != 'arXiv':
return False
patt = r'arXiv preprint arXiv:([0-9]{4}\.[0-9]*)$'
match = re.match(patt, journal)
if not match:
return False
return match.group(1) | 2198bab02f5be60dfedd8d3edfb50cd355fc2491 | 450,576 |
def normalize(prices):
"""Normalize pandas DataFrame by divide by first row"""
return prices / prices.iloc[0] | 29bad57437372eb72fd227620fda93f48244f741 | 116,159 |
def subtract(im1, im2):
"""Subtract two images (im1-im2). Pixel value cannot go below 0"""
im = im1 - im2
im[im<0] = 0
return im | 278793f316dfce768866887008b63a8cee966757 | 430,884 |
from typing import List
def two_sum(nums: List[int], target: int) -> bool:
"""Hashing.
Traverse the numbers and remember the nubmers we've seen. And what we
should check is if there is another number added to be the target.
Take [10, 15, 3, 7] for example:
(1) num = 10, seen_num = () , target - num = 7
(2) num = 15, seen_num = (10) , target - num = 2
(3) num = 3 , seen_num = (10, 15) , target - num = 14
(4) num = 7 , seen_num = (10, 15, 3), target - num = 10 (Match!)
TIME : O(n)
SPACE : O(n)
Args:
nums (List[int]): list of numbers
target (int): expected sum of any two numbers
Returns:
bool: whether any two numbers from list `nums` add up to `target`.
"""
seen_num = set()
for num in nums:
if target - num in seen_num:
return True
seen_num.add(num)
return False | f778584a8da9e0cb68205a8172b945b330dafed0 | 562,541 |
import requests
def read_from_url(url):
""" GET this `url` and read the response. """
response = requests.get(url)
return response.text | 13f3fffab518fde9e4788dea7458bb2e00a6e09f | 167,853 |
def assign_linux_server_policy(api, configuration, api_version, api_exception, computer_id):
""" Assigns a Linux server policy to a computer.
:param api: The Deep Security API modules.
:param configuration: Configuration object to pass to the api client.
:param api_version: The version of the API to use.
:param api_exception: The Deep Security API exception module.
:param computer_id: The ID of the computer to assign the policy to.
:return: A ComputersApi object that contains the Linux server policy.
"""
# Create search criteria to retrieve the Base Policy
search_criteria = api.SearchCriteria()
search_criteria.field_name = "name"
search_criteria.string_test = "equal"
search_criteria.string_value = "%Linux Server%"
# Create a search filter
search_filter = api.SearchFilter(None, [search_criteria])
policies_api = api.PoliciesApi(api.ApiClient(configuration))
computers_api = api.ComputersApi(api.ApiClient(configuration))
computer = api.Computer()
# Perform the search
policy_search_results = policies_api.search_policies(api_version, search_filter=search_filter)
# Assign the policy to the computer
computer.policy_id = policy_search_results.policies[0].id
return computers_api.modify_computer(computer_id, computer, api_version) | 92b00c7bd666265119651c0d05b3a318d130e316 | 665,707 |
def get_input_layer_variables(ann, variables, descriptors):
"""
Given a list of descriptor names and a tuple of
variables of the MILP_Relu model,
create and return a dictionary that for each descriptor
gives the corresponding variable
"""
# unpack the variables
x, y, z = variables
# Initialize an empty dictionary
sol = dict()
# Iterate over the input layer and the list of descriptors
# at the same time
for v, name in zip(ann.input_layer, descriptors):
sol[name] = y[v]
return sol | a9767e348adc9d396c949867cec0f58ac8893226 | 59,996 |
def get_area(ptlist):
""" Calculate the area of a polygon defined by a list of points.
The variable ptlist is a list of (x, y) point pairs. Be careful,
the implementation can give unexpected results with self-intersecting
polygons.
The output will always be non-negative.
Created: 2015 April 29, msswan
"""
I = lambda pt1, pt2: (pt2[1] + pt1[1]) * (pt2[0] - pt1[0]) / 2.0
area = I(ptlist[-1], ptlist[0])
for idx in range(0, len(ptlist)-1):
area += I(ptlist[idx], ptlist[idx+1])
return abs(area) | f33f08f26206e08fa3fdcf047038f58b3a90df58 | 31,206 |
import time
def calculate_time_back(age):
"""
Calculate Unix time back from age.
"""
now = int(time.time())
return now - age | 5be15176ab8b5591f52400e3108789f55a344df2 | 221,661 |
def filter_lower_case_keys(dict):
"""
Filter dict to include only lower case keys.
Used to skip HTTP response fields.
:param dict: Dict with all capabilities parsed from the SSDP discovery.
:return: Dict with lower case keys only.
"""
return {key: value for key, value in dict.items() if key.islower()} | f441202f6ae66ab023431c42680f349169ea0f79 | 24,627 |
import math
def _find_utm_crs(lat, lon):
"""Find the UTM CRS based on lat/lon coordinates.
Parameters
----------
lat : float
Decimal latitude.
lon : float
Decimal longitude.
Returns
-------
crs : dict
Corresponding UTM CRS.
"""
utm_zone = (math.floor((lon + 180) // 6) % 60) + 1
if lat >= 0:
pole = 600
else:
pole = 700
epsg = 32000 + pole + utm_zone
return {'init': f'epsg:{epsg}'} | 7dff30cbfb277d21728c0feadeb0018b6e793462 | 664,208 |
def ddmmmyy(arrow_date):
"""Return an Arrow date as a string formatted as lower-cased `ddmmmyy`; e.g. 28feb22.
:param arrow_date: Date/time to format.
:type arrow_date: :py:class:`arrow.arrow.Arrow`
:return: Date formatted as lower-cased `ddmmmyy`.
:rtype: str
"""
return arrow_date.format("DDMMMYY").lower() | 843a3bbcee01f9333f266338f140d409b43e3be3 | 486,803 |
from pathlib import Path
import math
def process(file: Path) -> int:
"""
Process input file yielding the submission value
:param file: file containing the input values
:return: value to submit
"""
heading = 90
east_west_pos = 0
north_south_pos = 0
instructions = [l.strip() for l in open(file)]
for i in instructions:
action = i[0]
value = int(i[1:])
if action == 'R':
heading = (heading + value) % 360
elif action == 'L':
heading = (heading - value) % 360
if action == 'E':
east_west_pos += value
if action == 'W':
east_west_pos -= value
if action == 'N':
north_south_pos += value
if action == 'S':
north_south_pos -= value
if action == 'F':
east_west_pos += value * math.sin(float(heading) / 360.0 * 2.0 * math.pi)
north_south_pos += value * math.cos(heading / 360 * 2 * math.pi)
manhattan_distance = int(abs(east_west_pos) + abs(north_south_pos))
return manhattan_distance | 3ba2c0a9fd4457ea2b49d6aca983123d8af45e04 | 25,282 |
import warnings
def ensure_unit(self, name, current_unit, unit, nwb_version):
"""A helper to ensure correct unit used.
Issues a warning with details if `current_unit` is to be ignored, and
`unit` to be used instead.
"""
if current_unit != unit:
warnings.warn(
"Unit '%s' for %s '%s' is ignored and will be set to '%s' "
"as per NWB %s."
% (current_unit, self.__class__.__name__, name, unit, nwb_version))
return unit | 68bcbaa2ec08b1be3b6da971dd9a279319791381 | 405,425 |
def __calc_year(entered_year: int, beginning_year: int) -> int:
"""
Calculates the year as a single digit (0 for first year, 2 for 3rd year)
(+1 because it's zero indexed)
"""
return entered_year - beginning_year + 1 | 9a471b5d8893d6a848320f494ff5acc51786df3f | 45,032 |
import torch
def create_optimizer(cfg, model):
"""
Creates an adam optimizer with correct weight
decay method.
"""
optimizer = torch.optim.AdamW(
model.parameters(),
eps=cfg.adam_epsilon,
lr=cfg.lr,
weight_decay=cfg.weight_decay)
return optimizer | 990109e30152c4108935a839078cc65c80db7272 | 320,811 |
def get_id_from_tensor(tensor, id_name):
"""Gets appropriate id column from tensor with ids"""
if id_name == "user":
return tensor[:, 0]
elif id_name == "item":
return tensor[:, 1]
else:
raise ValueError(f"id_name should be one of 'user', 'item', got {id_name}") | 064e658a667b0612eba3a934104b8f13b9c14196 | 455,517 |
def mock_cpu_count(*args, **kwargs):
"""
Instead of running multiprocessing.cpu_count(), we return a fixed
value during tests
"""
return 64 | eec7ae95f5f97de299ac1c79479e96bd37985326 | 281,377 |
def build_planet_position_lists(time_positions, planet_names):
"""
Reformat export for matplot lib, Returns a dict of the following format
{
'planet_1': {'x': [1,2,3],
'y': [4,5,6],
},
'planet_2': {'x': [1,2,3],
'y': [4,5,6],
},
...
}
"""
planet_dict = {}
for planet_name in planet_names:
x_positions = [time_position.positions[planet_name].x for time_position in time_positions]
y_positions = [time_position.positions[planet_name].y for time_position in time_positions]
planet_dict[planet_name] = {'x': x_positions, 'y': y_positions}
return planet_dict | bd48295f8441ed3dbfceadac8e5d14ab25cfd7f1 | 518,552 |
def cartesian2complex(real, imag):
"""
Calculate the complex number from the cartesian form: z = z' + i * z".
Args:
real (float|np.ndarray): The real part z' of the complex number.
imag (float|np.ndarray): The imaginary part z" of the complex number.
Returns:
z (complex|np.ndarray): The complex number: z = z' + i * z".
"""
return real + 1j * imag | 1fd44bc0accff8c9f26edfa84f4fcfafb2323728 | 705,591 |
def get_word_bin(string):
"""
Given a string, returns its binary representation in 0s and 1s
"""
return ''.join(format(ord(x), 'b') for x in string) | f5eaefa7b48aa21201f48927356c1b74af6aedf0 | 132,839 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.