content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def get_potential(q):
"""Return potential energy on scaled oscillator displacement grid q."""
return q ** 2 / 2
|
78f657d950dab0ac1821fbca5454e9b0dfb4eaab
| 204,886 |
def get_data_type(azdat):
"""
Get the radar file type (radial or raster).
Args:
azdat: Boolean.
Returns:
Radial or raster.
"""
if azdat:
return "radial"
return "raster"
|
375d250665f728bbf76b88a66f8352b623a55983
| 430,313 |
def csv2dict(filename, key_column, val_column, lower=True, header=True): #---<<<
"""
Create a dictionary from two columns in a CSV file.
filename = name of .CSV file
key_column = column # (0-based) for dictionary keys
val_column = column # (0-based) for dictionary values
lower = whether to make the keys lowercase
header = whether .CSV file has a header row as the first line
Returns the dictionary.
"""
thedict = dict()
firstline = True
for line in open(filename, 'r').readlines():
if firstline and header:
firstline = False
continue # skip over the header line
key_val = line.split(',')[key_column].strip()
val_val = line.split(',')[val_column].strip()
if lower:
thedict[key_val.lower()] = val_val
else:
thedict[key_val] = val_val
return thedict
|
799c7d530c44d36cd244046cc46abb237d80f40d
| 249,457 |
def decompress_amount(x):
""" Decompresses the Satoshi amount of a UTXO stored in the LevelDB. Code is a port from the Bitcoin Core C++
source:
https://github.com/bitcoin/bitcoin/blob/v0.13.2/src/compressor.cpp#L161#L185
:param x: Compressed amount to be decompressed.
:type x: int
:return: The decompressed amount of satoshi.
:rtype: int
"""
if x == 0:
return 0
x -= 1
e = x % 10
x /= 10
if e < 9:
d = (x % 9) + 1
x /= 9
n = x * 10 + d
else:
n = x + 1
while e > 0:
n *= 10
e -= 1
return n
|
85856d220be484b294622b99433ada47d04ec4e0
| 572,865 |
import csv
def load_angle_offsets(csv_path):
"""Load additive euler angles from CSV file.
header: joint, i, j, k
Order of x,y,z values must match order of rotation channels in BVH file. If the order is zxy then i=z, j=x, k=y.
:param csv_path: Path to CSV file containing values.
:type csv_path: str
:return: Angle offsets as dictionary.
:rtype: dict
"""
angle_offsets = dict()
try:
with open(csv_path) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
angle_offsets[row['joint']] = tuple(float(row[i]) for i in 'ijk')
except OSError as e:
print("ERROR:", e)
except ValueError as e:
print("ERROR: Reading angle values failed.", e)
return angle_offsets
|
aed976ebf2801364b9d770895e943d0864c46d5c
| 609,553 |
def _LLF(job, t):
"""
Least-Laxity-First assigns higher priority to jobs with lesser laxity (slack).
The laxity (slack) of a job of a job with deadline d at time t is equal to
deadline - t - (time required to complete the remaining portion of the job)
"""
return job.deadline - t - job.remaining_cost
|
8dd5025c5ee077352c7f0cf0e23fc76bc29950c3
| 658,039 |
def get_doi_url(doi):
"""
Given a DOI, get the URL for the DOI
"""
return "https://doi.org/%s" % doi
|
ab7bd3a4fb488f376021804783ca4ce88c938d70
| 190,081 |
def fix_sign(x, N=360 * 60 * 10):
"""
Convert negative tenths of arcminutes *x* to positive by checking
bounds and taking the modulus N (360 degrees * 60 minutes per
degree * 10 tenths per 1).
"""
if x < 0:
assert x > -N
x += N
assert x < N
return x % N
|
77063cd2a2680937276a3ef752782bc537942827
| 182,877 |
def get_topics(lda_model):
"""get_topics. Extract list of topics with top twenty words.
Parameters
----------
lda_model : gensim.models.ldamulticore.LdaMulticore
Trained LDA model. Takes output from get_model.
"""
topics = lda_model.show_topics(num_topics = -1, num_words=20, formatted=False)
return topics
|
c7819badbbc9037be6780c93d1771812f20f79c1
| 362,161 |
from typing import Counter
def recount_answers(group):
"""Count the number of positive answers of a group.
Only the questions to which everyone in a group answered
yes to count.
"""
people = len(group)
line = ''.join([n.strip('\n') for n in group])
count = Counter(line)
number = 0
for n, item in count.items():
if item == people:
number += 1
return number
|
f5695311d36ae71d4dca2dc57a720f216e725701
| 222,737 |
import torch
def precision(cm):
"""
Precision: of all predicted positive samples, what fraction was correct?
precision = TP / (TP + FP)
Args:
cm: Binary confusion matrix like this:
| TN | FP |
| FN | TP |
Returns:
scalar precision score
"""
rv = cm.diag() / cm.sum(0)
rv[torch.isnan(rv)]=0
return rv[1]
|
ce5faf206d3ece17ee0069b1ad1f2ee47c71d712
| 80,083 |
def fa_attachment(extension):
"""
Add fontawesome icon if found. Else return normal extension as string.
:param extension: file extension
:return: matching fontawesome icon as string
"""
if extension == 'pdf':
return "<i class='fa fa-file-pdf-o fa-lg'></i>"
elif extension == 'jpg' or extension == 'png':
return "<i class='fa fa-picture-o fa-lg'></i>"
elif extension == 'doc' or extension == 'docx':
return "<i class='fa fa-file-word-o fa-lg'></i>"
elif extension == 'xls' or extension == 'xlsx':
return "<i class='fa fa-file-excel-o fa-lg'></i>"
elif extension == 'extern':
return "<i class='fa fa-external-link'></i>"
elif extension == 'zip':
return "<i class='fa fa-file-archive-o fa-lg'></i>"
else:
return extension
|
3add6bf4c177cba893a2242df352fd0ae619ee90
| 22,194 |
def listGetShiftedGeometricMean(listofnumbers, shiftby=10.0):
""" Return the shifted geometric mean of a list of numbers, where the additional shift defaults to
10.0 and can be set via shiftby
"""
geommean = 1.0
nitems = 0
for number in listofnumbers:
nitems = nitems + 1
nextnumber = number + shiftby
geommean = pow(geommean, (nitems - 1) / float(nitems)) * pow(nextnumber, 1 / float(nitems))
return geommean - shiftby
|
904bb38a199052b086b7a2c695ce675011f65019
| 17,561 |
def extract_medium(medium):
"""
Convert Medium to simplified dictionary
:param medium: ILoop medium object
:return: list of dictionaries of format
{'id': <compound id (<database>:<id>, f.e. chebi:12345)>, 'concentration': <compound concentration (float)>}
"""
if medium is None:
return []
else:
return [{
'id': 'chebi:' + str(compound['compound'].chebi_id),
'name': compound['compound'].chebi_name,
'concentration': compound['concentration']
} for compound in medium.read_contents()]
|
0b31e1807271379caa74f62a2bd7acb6fd33936a
| 293,918 |
import json
def load_dict_json(file_path: str) -> dict:
"""
Loads a JSON file as a dictionary.
# Arguments
file_path (string): Path to the JSON file.
# Returns
Dictionary with the data from the JSON file.
"""
with open(file_path, 'rb') as fp:
return json.load(fp)
|
d692e55dda5818cc9aaede507165f56057c85ba5
| 89,918 |
def dzip(items1, items2, cls=dict):
"""
Zips elementwise pairs between items1 and items2 into a dictionary. Values
from items2 can be broadcast onto items1.
Args:
items1 (Sequence): full sequence
items2 (Sequence): can either be a sequence of one item or a sequence of
equal length to `items1`
cls (Class): dictionary type to use. Defaults to dict, but could
be ordered dict instead.
Returns:
dict: similar to dict(zip(items1, items2))
Example:
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([], [4]) == {}
"""
try:
len(items1)
except TypeError:
items1 = list(items1)
try:
len(items2)
except TypeError:
items2 = list(items2)
if len(items1) == 0 and len(items2) == 1:
# Corner case:
# allow the first list to be empty and the second list to broadcast a
# value. This means that the equality check wont work for the case
# where items1 and items2 are supposed to correspond, but the length of
# items2 is 1.
items2 = []
if len(items2) == 1 and len(items1) > 1:
items2 = items2 * len(items1)
if len(items1) != len(items2):
raise ValueError('out of alignment len(items1)=%r, len(items2)=%r' % (
len(items1), len(items2)))
return cls(zip(items1, items2))
|
c50e414c6ce06c0427750995c8bfb1bdfd18bf2b
| 476,170 |
def even_chunker(seq, n_chunks):
"""Given a sequence, returns that sequence divided into n_chunks of (roughly) the same size.
In other words, it returns a list of lists.
When len(seq) is evenly divisible by n_chunks, all chunks are the same size.
When len(seq) is not evenly divisible by n_chunks, all chunks have a length within +/- 1 of
all other chunks.
Some examples of the length of the return chunks for len(seq) == 100:
n_chunks == 3: [33, 33, 34]
n_chunks == 5: [20, 20, 20, 20, 20]
n_chunks == 6: [16, 17, 17, 16, 17, 17]
n_chunks == 7: [14, 14, 14, 15, 14, 14, 15]
n_chunks == 8: [12, 13, 12, 13, 12, 13, 12, 13]
n_chunks == 9: [11, 11, 11, 11, 11, 11, 11, 11, 12]
n_chunks == 15: [6, 7, 7, 6, 7, 7, 6, 7, 7, 6, 7, 7, 6, 7, 7]
"""
length = len(seq)
return [seq[i * length // n_chunks: (i + 1) * length // n_chunks]
for i in range(n_chunks)]
|
936c4c262644eec119eec9d73a6f61171a584408
| 105,281 |
def single_line(line, report_errors=True, joiner='+'):
"""Force a string to be a single line with no carriage returns, and report
a warning if there was more than one line."""
lines = line.strip().splitlines()
if report_errors and len(lines) > 1:
print('multiline result:', lines)
return joiner.join(lines)
|
e150f0d9039f3f4bc0cb1213d5670bc7519b1bbf
| 683,771 |
import time
def date(representation='literal'):
"""Return the local date.
Parameters
----------
representation : 'literal', 'number' (default: 'literal')
Example : 'literal' --> "24 march 2020"
'number' --> "24/03/2020"
"""
localtime = time.localtime()
year = localtime[0]
month = localtime[1]
day = localtime[2]
months = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october',
'november', 'december']
if representation == 'literal':
return '{0} {1} {2}'.format(day, months[month - 1], year)
elif representation == 'number':
day = '0' + str(day) if int(day) < 10 else str(day)
month = '0' + str(month) if int(month) < 10 else str(month)
return '{0}/{1}/{2}'.format(day, month, year)
|
d194ea8ee1b8dad4d79971fd1bff4f82d2cfc984
| 186,404 |
def notas(* tot, sit=False):
"""
:param tot: É o valor das notas que serão passados como paramêtro
:param sit: Pode ser True or False, dando a opções de mostrar ou não a situação
do aluno, podendo ser, ruim para média baixa, Razoavél para medias intermediarias
ou Ótima para boas medias.
:return: Retorna um dicionário com: Quantidade de notas, maior e menor nota, média das notas
e a situação sendo opcional.
"""
n = dict()
total = []
total = tot
media = sum(total)/ len(total)
#preenchendo Dicionário com as informações
n['qtd_notas'] = len(total)
n['maior_nota'] = max(total)
n['menor_nota'] = min(total)
n['média'] = media
if sit == True:
if media >= 0 and media < 4:
n['situação'] = 'Ruim'
elif media >= 4 and media < 7:
n['situação'] = 'Razoavél'
else:
n['situação'] = 'Ótima'
aluno = tot
return n
|
ab3edb6e9e201562351a3440fda5be2e08427ac0
| 540,974 |
def print_board(board):
"""
Input: takes a xo board as a list from values of a 3x3 board
Returns: a tic-tac-toe board in a visual manner
"""
return print("\n{} | {} | {}\n--+---+--\n{} | {} | {}\n--+---+--\n{} | {} | {}\n"
.format(
board[1],board[2],board[3],board[4],
board[5],board[6],board[7],
board[8],board[9],))
|
9968895a5c25a50644c4577c7f38654dcf5f9748
| 578,343 |
def int_to_vlq(val):
"""
Converts an integer value into a variable length quantity (list).
"""
vlq = []
for i in range(21, 0, -7):
if val >= (1 << i):
vlq.append(((val >> i) & 0x7F) | 0x80)
vlq.append(val & 0x7F)
return vlq
|
0e8dd5affb8084109d34751c3151aeaeacac1314
| 585,564 |
import re
def _match_op(ops, regex):
"""Returns ops that match given regex along with the matched sections."""
matches = []
prog = re.compile(regex)
for op in ops:
op_matches = prog.findall(op)
if op_matches:
matches.append((op, op_matches[0]))
return matches
|
98bf8e34f5b5b02d8f90a7d40ca8b98784113b3e
| 459,852 |
import warnings
def _handle_alias(X, Z):
"""Handle Z as an alias for X, return X/Z.
Parameters
----------
X: any object
Z: any object
Returns
-------
X if Z is None, Z if X is None
Raises
------
ValueError both X and Z are not None
"""
if Z is None:
return X
elif X is None:
msg = (
"argument Z will in transformers is deprecated since version 0.10.0 "
"and will be removed in version 0.11.0"
)
warnings.warn(msg, category=DeprecationWarning)
return Z
else:
raise ValueError("X and Z are aliases, at most one of them should be passed")
|
83ba789ff108f201d765f414a161052326d02ad9
| 424,792 |
def can_contain(parent_type, child_type):
""" Returns true if parent block can contain child block.
"""
return (parent_type in ['Document', 'BlockQuote', 'ListItem'] or
(parent_type == 'List' and child_type == 'ListItem'))
|
c8cef3515b3306f779525c59486b526654649433
| 31,441 |
def rgba_from_argb_int(color):
"""
Converts ARGB int into RGBA tuple.
Returns:
(int, int, int, int)
Red, green, blue and alpha channels.
"""
a = ((color >> 24) & 0xFF) / 255.
r = (color >> 16) & 0xFF
g = (color >> 8) & 0xFF
b = color & 0xFF
return r, g, b, a
|
a92d676f89678556a3382af4642408c5cb72f06c
| 258,171 |
def _clean_header(text: str, is_unit: bool = False) -> str:
"""
Extract header text from each raw trajectory summary csv file header.
:param text: Raw trajectory summary csv column header text.
:param is_unit: If True, return text with brackets for units.
:returns: Formatted text.
"""
# Return an empty string if there is no header found
if "Unnamed" in text:
return ""
# Removes additional spaces and hashtags from text. Add brackets optionally.
clean_header = " ".join(text.replace("#", "").split())
if is_unit:
clean_header = f" ({clean_header})"
return clean_header
|
000ab01267e78d621fd8a8e6844523e7fa909ba4
| 696,156 |
def filter_valid_transit(df_in, hour_in):
"""Filters master dataframe for VEH_IDs valid for specified hour
Inputs: df_in, a pandas dataframe containing joined transit_lines and
transit_types excel worksheets.
hour_in, an emme hour code as user arg.
Returns: transit_list, a list of valid VEH_IDs per specified hour
"""
df_in = df_in[df_in[hour_in].notnull()]
transit_list = df_in['VEH_ID'].tolist()
return transit_list
|
800b30ba3784150f7a5f30c6c9436564847c57a1
| 169,444 |
def bmatrix(a):
"""Returns a LaTeX bmatrix
:a: numpy array
:returns: LaTeX bmatrix as a string
"""
if len(a.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{bmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{bmatrix}']
return '\n'.join(rv)
|
3be2e4c67f535996161252d4da2c2397c21855da
| 223,680 |
def get_error(intercept, slope, points):
"""get_error function computes the error for a line passing through a
given set of points (x, y)
:intercept: y-intercept of the line passing through a set of points
:slope: slope of the equation represented as m
:points: set of (x, y) coordinates
:returns: the error value computed
"""
error_value = 0
for i in range(0, len(points)):
error_value += (points[i].y - (slope * points[i].x + intercept)) ** 2
return error_value / float(len(points))
|
cd271598a01eee12b343ae422b79085cac86a702
| 96,530 |
def subtract(x, y):
""" Returns the first vector minus the second. """
n = len(x)
assert len(y) == n
return map(lambda i: x[i]-y[i], range(0, n))
|
dd66579b99d01c42b9773a116b4989910a6be3be
| 646,088 |
def blend(a, scale, b):
"""
Returns a color that's 'scale' of the way between 'a' and 'b'.
(Interpolates RGB space; very simple)
"""
ar,ag,ab = a
br,bg,bb = b
return (min(255, max(0, br- (br-ar)*(1-scale))),
min(255, max(0, bg- (bg-ag)*(1-scale))),
min(255, max(0, bb- (bb-ab)*(1-scale))))
|
9ef9271f688e205f9da1105ba041fca44e497aa5
| 237,201 |
import time
def formatentry_ls(l):
"""Returns a string in a format similar to that produced by
'ls -la'. This is particularly handy to get Emacs' ange-ftp
working well with FtpServers.
Expects a list or tuple containing the following members:
- [0] - permissions, in 'drwxrwxrwx' format
- [1] - number of links (doesn't really matter much)
- [2] - file's owner's username (eg. 'root')
- [3] - file's group's groupname (eg. 'wheel')
- [4] - file's length in bytes
- [5] - file's mtime, in seconds since epoch
- [6] - file's name
"""
mtime = l[5]
if time.time() - mtime > (86400 * 28):
printtime = time.strftime('%b %d %Y', time.localtime(mtime))
else:
printtime = time.strftime('%b %d %H:%M', time.localtime(mtime))
l[5] = printtime
return '%s %3d %-8s %-8s %8d %-12s %s' % tuple(l)
|
cc4f83bdbefd27dfd563d1b722d24e026ecde573
| 239,876 |
import asyncio
async def check_address(host: str, port: int = 80, timeout: int = 2) -> bool:
"""
Async version of test if an address (IP) is reachable.
Parameters:
host: str
host IP address or hostname
port : int
HTTP port number
Returns
-------
awaitable bool
"""
try:
reader, writer = await asyncio.wait_for(
asyncio.open_connection(host, port),
timeout=timeout
)
writer.close()
await writer.wait_closed()
return True
except:
return False
|
e08da38636c66a59948adc4fa08132f9f7438db9
| 686,738 |
def delay(func):
"""
When schemas are referencing to each other, this decorator will help by
marking a schema as ``delayed`` to avoid the need for calling a schema to
generate itself until it is actually needed.
For example, if a schema function references to itself in this manner::
def my_schema():
return (
('a', 'foo'),
('b', my_schema()),
)
Because ``my_schema`` is being called within itself, it will get into
a recursion problem as soon as it is executed.
To avoid this, applying the decorator will make it so that the engine will
acknowledge this is the case, and will *expand* the schema only when it is
needed. No recursion problems will happen then since we are effectively
delaying its execution.
"""
func.__delayed__ = True
return func
|
162b738c7af8a93ff9ffb87fad5e14a514d44695
| 417,727 |
def member_needs_update(before, after):
"""
See if the given member update is something
we care about.
Returns 'False' for no difference or
change we will ignore.
"""
for attr in ("nick", "avatar", "roles"):
if getattr(before, attr) != getattr(after, attr):
return True
return False
|
5803e9affa7028e36e2bd5a74049ecc3771896ca
| 626,196 |
from typing import List
from typing import Any
def __find_item_index(list_to_look_in: List[Any], item_name: str):
"""
Returns the index of the first item in the list that matches the given name.
:param list_to_look_in: list of elements that contain name attributes
:param item_name: the item name to actually search for
:return: The index of the object if any are found.
"""
for count, element in enumerate(list_to_look_in):
if element.name == item_name:
return count
|
7081e45bfe0e6151a939b1533ce684ace7791d8f
| 253,322 |
from typing import OrderedDict
def pool_usage_metrics_list_table_format(result):
"""Format pool usage-metrics list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Pool Id'] = item['poolId']
table_row['Start Time'] = item['startTime'] if item['startTime'] else ""
table_row['End Time'] = item['endTime'] if item['endTime'] else ""
table_row['VM Size'] = item['vmSize']
table_row['Total Core Hours'] = str(item['totalCoreHours'])
table_output.append(table_row)
return table_output
|
1639a8602fba1dd12f87f1c094314bcea8c79766
| 357,521 |
def extra_hour_bits(value):
"""
practice getting extra bits out of the hours bytes
>>> extra_hour_bits(0x28)
[0, 0, 1]
>>> extra_hour_bits(0x8)
[0, 0, 0]
"""
masks = [ ( 0x80, 7), (0x40, 6), (0x20, 5), ]
nibbles = [ ]
for mask, shift in masks:
nibbles.append( ( (value & mask) >> shift ) )
return nibbles
|
635c7f8d4aa752001b75eba8518d1e3c857bc297
| 123,858 |
def CsvEscape(text):
"""Escapes data entry for consistency with CSV format.
The CSV format rules:
- Fields with embedded commas must be enclosed within double-quote
characters.
- Fields with embedded double-quote characters must be enclosed within
double-quote characters, and each of the embedded double-quote characters
must be represented by a pair of double-quote characters.
- Fields with embedded line breaks must be enclosed within double-quote
characters.
- Fields with leading or trailing spaces must be enclosed within
double-quote characters.
Args:
text: str Data entry.
Returns:
str CSV encoded data entry.
"""
if not text: return ''
if text.find('"') > -1: text = text.replace('"', '""')
if (text == '' or text.find(',') > -1 or text.find('"') > -1 or
text.find('\n') > -1 or text.find('\r') > -1 or text[0] == ' ' or
text[-1] == ' '):
text = '"%s"' % text
return text
|
a28620e204f5433c580c00a234ea0ab5e6ac060c
| 29,705 |
from datetime import datetime
def now() -> datetime:
"""Return the datetime object of the current time.
:return: datetime object represeting current time
"""
return datetime.now().astimezone()
|
e1ca72a8cf22b1614a406a3d44148fb509d262a8
| 627,014 |
def _substitution_mask(sent1, sent2):
"""Binary mask identifying substituted part in two sentences.
Example sentence and their mask:
First sentence = "I like the cat 's color"
0 0 0 1 0 0
Second sentence = "I like the yellow dog 's color"
0 0 0 1 1 0 0
Args:
sent1: first sentence
sent2: second sentence
Returns:
mask1: mask for first sentence
mask2: mask for second sentence
"""
mask1_start, mask2_start = [], []
while sent1[0] == sent2[0]:
sent1 = sent1[1:]
sent2 = sent2[1:]
mask1_start.append(0.)
mask2_start.append(0.)
mask1_end, mask2_end = [], []
while sent1[-1] == sent2[-1]:
if (len(sent1) == 1) or (len(sent2) == 1):
break
sent1 = sent1[:-1]
sent2 = sent2[:-1]
mask1_end = [0.] + mask1_end
mask2_end = [0.] + mask2_end
assert sent1 or sent2, 'Two sentences are identical.'
return (mask1_start + [1.] * len(sent1) + mask1_end,
mask2_start + [1.] * len(sent2) + mask2_end)
|
b93de9dfb6bde55859f57e6957f31c319e9417f6
| 227,465 |
def first_invoke(func1, func2):
"""
Return a function that when invoked will invoke func1 without
any parameters (for its side-effect) and then invoke func2
with whatever parameters were passed, returning its result.
"""
def wrapper(*args, **kwargs):
func1()
return func2(*args, **kwargs)
return wrapper
|
0190c4f7aa4cde2a6df964a8f3e4408ebceedfb7
| 297,582 |
import torch
def get_prediction(model, batch, device):
"""Get predicted labels for given input batch and model."""
images = torch.tensor(batch, dtype=torch.float).to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
return predicted
|
e8bb4257dc19f26fa206e26fa844ec9717974e52
| 9,250 |
def get_group_from_table(metatable_dict_entry):
"""
Return the appropriate group title based on either the SGID table name or
the shelved category.
"""
sgid_name, _, item_category, _ = metatable_dict_entry
if item_category == 'shelved':
group = 'UGRC Shelf'
else:
table_category = sgid_name.split('.')[1].title()
group = f'Utah SGID {table_category}'
return group
|
1e356f0204aa099728ba0a5c595c25f1abd45de3
| 550,514 |
def escape(v):
"""
Escapes values so they can be used as query parameters
:param v: The raw value. May be None.
:return: The escaped value.
"""
if v is None:
return None
elif isinstance(v, bool):
return str(v).lower()
else:
return str(v)
|
304079292532ef905099f1fd047ddb95df231076
| 672,692 |
import math
def Naca_00XX(c, t, x_list):
"""
Generates a simetric NACA airfoil.
Inputs:
:param c: chord
:param t: max thickness as a fraction of the chord.
:param x_list: points between 0 and c (chord lenght)
Returns dictionary with keys 'u'pper and 'l'ower
The Naca function can be found in: https://en.wikipedia.org/wiki/NACA_airfoil
Created on Wed Feb 03 12:50:52 2016
@author: Endryws and Pedro Leal
"""
y_upper = []
y_lower = []
for x in x_list:
xc= x/c # Is just for increase speed and facilitate future changes.
a1 = 5*t*c
t1 = 0.2969*(math.sqrt(xc))
t2 = -0.1260*xc
t3 = -0.3516*(xc**2)
t4 = 0.2843*(xc**3)
t5 = -0.1015*(xc**4)
y = (a1*(t1+t2+t3+t4+t5))
y_upper.append(y)
y_lower.append(y*(-1)) # is just for pick the y axis
# negative numbers
y = {'u': y_upper, 'l':y_lower}
return y
|
8fe95b7b02569138cc2adabcc526490341c0a71d
| 512,427 |
def serialise_matched_reference(data, current_timestamp):
"""Serialise the data matched by the model."""
serialised_data = {
'publication_id': data['WT_Ref_Id'],
'cosine_similarity': data['Cosine_Similarity'],
'datetime_creation': current_timestamp,
'document_hash': data['Document id']
}
return serialised_data
|
36f6f22eccf0bcb06c21b68de18e7d606cb4e48b
| 283,449 |
def get_idx_using_unique_iso(mol, iso_val):
"""
This function takes a value for an isotope label and finds the atom in a
mol which has that isotope label. This assumes there is only 1 atom in a
mol with the same isotope value
Inputs:
:param rdkit.Chem.rdchem.Mol mol: a molecule whose atom's have unique
isotope labels
:param int iso_val: the isotope value to search by
Returns:
:returns: int idx: the Idx index number of the atom whose isotope label
is the same as iso_val. Returns None if iso_val not in mol.
"""
for atom in mol.GetAtoms():
if atom.GetIsotope() == iso_val:
idx = atom.GetIdx()
return idx
return None
|
7528955c114bb6c7dd086f6f86058aa7cda193dd
| 431,006 |
def default_decision_function(position, solutions):
"""The default decision function - returns the first solution."""
return solutions[0]
|
7fb8678ab1c3298fe9cf0fe72aca0ee1d8c4161b
| 198,010 |
def process_fatal_error(token_list, values):
"""Retrieve and categorize fatal error.
This function parses a line, retrieves fatal error message, categorizes it
and stores it in the main 'values' dictionary with a key 'fatal'.
Arguments:
token_list (list)[-]: A list of tokens to be processed.
values (dict)[-]: Main dictionary to store processed values.
Returns:
values (dict)[-]: Main dictionary to store processed values.
token_list (list)[-]: A list of processed tokens.
"""
values.setdefault('fatal', {})
error_msg = token_list[2:] # Removed first two elements: 'fatal', 'error.'
print("Fatal error found: {0}".format(' '.join(error_msg)))
if error_msg[:3] == ['cross-section', 'tables', 'missing']:
za_num = int(error_msg[5].split('.')[0]) # error_msg[5] = 'ZZZAAA.(id)'
values['fatal'].setdefault('xs_missing', []).append(za_num)
return values, token_list
|
b769ac051f1103e4fe95fc842b26f4efa2f04159
| 179,819 |
import base64
def is_jwt_well_formed(jwt: str):
"""Check if JWT is well formed
Args:
jwt (str): Json Web Token
Returns:
Boolean: True if JWT is well formed, otherwise False
"""
if isinstance(jwt, str):
# JWT should contain three segments, separated by two period ('.') characters.
jwt_segments = jwt.split('.')
if len(jwt_segments) == 3:
jose_header = jwt_segments[0]
# base64-encoded string length should be a multiple of 4
if len(jose_header) % 4 == 0:
try:
jh_decoded = base64.b64decode(jose_header).decode('utf-8')
if jh_decoded and jh_decoded.find('JWT') > -1:
return True
except Exception:
return False
# If tests not passed return False
return False
|
4406ea2ab186c601f0959ec68107671b1a7f1990
| 268,868 |
from pathlib import Path
def create_symlinks(paths):
"""Create symlinks with unique file names."""
container_paths = [f"{n}_{Path(path).name}" for n, path in enumerate(paths)]
for container_path, path in zip(container_paths, paths):
Path(container_path).symlink_to(path)
return container_paths
|
9189255a5642cc24c6c762b0e2d1c6a704c4a1ff
| 350,317 |
def __find_notification_channel_name_in_message(message):
"""
The command "gcloud alpha monitoring channels create", communicates a notification channel Id as part of its output.
Knowing the message format, this function extracts the channel Id and returns it to the caller.
Sample message:
"Created notification channel [projects/hipaa-sample-project/notificationChannels/1095329235450268453]."
:param message: the message communicated by "gcloud alpha monitoring channels create" command
:return: the notification channel Id to be used for defining a stack driver alert
"""
channel_name = [t for t in message.split() if t.startswith('[projects')]
return channel_name[0].translate(None, '[].')
|
d53df43da83f85ec5174ced8780015cb690be1d0
| 276,075 |
def replace_escape_codes(input_str):
"""Replace escape codes function
Parameters
----------
input_str : str
String of input
Returns
-------
str
Sting to replace escape codes to print properly
"""
return input_str.replace('"', '"').replace(''', "'").replace('&', '&')
|
7ec7beb47412d828bcee1c02edbdde15940487f0
| 467,430 |
def list_check(lst):
"""Are all items in lst a list?
>>> list_check([[1], [2, 3]])
True
>>> list_check([[1], "nope"])
False
"""
counter = []
for each in lst:
if isinstance(each, list):
counter.append(True)
else:
counter.append(False)
return all(counter)
|
e3dd483d57f153c102ecd2d2592491ec36afd077
| 575,212 |
def flatten(tuple_entry):
"""
Given a tuple of tuples and objects, flatten into one tuple of objects
@param {Tuple} tuple_entry Tuple containing possibly nested tuples
@return {Tuple} One dimensional tuple of objects
"""
if len(tuple_entry) == 0:
return tuple_entry
if isinstance(tuple_entry[0], tuple):
return flatten(tuple_entry[0]) + flatten(tuple_entry[1:])
return tuple_entry[:1] + flatten(tuple_entry[1:])
|
0cfdf8345f3304e4a60085d7767040439281c273
| 556,744 |
def stripNameSpace(objName):
"""
Check to see if there is a namespace on the incoming name, if yes, strip and return name with no namespace
:param name: str
:return: str, name with no namespace
"""
name = objName
if ':' in name:
name = name.split(':')[-1]
return name
|
8f2e5ba1b39712ca8e0255b2cbfc72b9d64d5503
| 398,676 |
def allocated_size(allocation_unit, requested_size):
"""
Round ``requested_size`` up to the nearest ``allocation_unit``.
:param int allocation_unit: The interval in ``bytes`` to which
``requested_size`` will be rounded up.
:param int requested_size: The size in ``bytes`` that is required.
:return: The ``allocated_size`` in ``bytes``.
"""
allocation_unit = int(allocation_unit)
requested_size = int(requested_size)
previous_interval_size = (
(requested_size // allocation_unit)
* allocation_unit
)
if previous_interval_size < requested_size:
return previous_interval_size + allocation_unit
else:
return requested_size
|
f25a55f3cf96194c5357e106642b89f6aefba65c
| 639,699 |
from re import sub
def replace(given_text: str, sub_string: str, replacable_str: str) -> str:
"""Replace a substring with another string from a given text.
Args:
given_text (str): the full text where to be replaced
sub_string (str): the string to be replaced
replacable_str (str): the new replaced string
Returns:
str: stripped replaced text
"""
return sub(sub_string, replacable_str, given_text).strip()
|
e0110d446c401af47fa42033048e30ffe28d2bc9
| 439,005 |
def parse_name_scope(name):
"""
Given a tensor or op name, return the name scope of the raw name.
Args:
name: The name of a tensor or an op.
Returns:
str: The name scope
"""
i = name.rfind('/')
if i != -1:
return name[:i] if not name.startswith('^') else name[1:i]
return ''
|
dc0de1327a56f3d8cf110d4cd8a804a282bb52e1
| 571,611 |
def count_vowels(s):
"""
(str)-int
return number of vowels in s.
>>> count_vowels('The quick brown fox')
5
"""
lowerS = s.lower()
vowels = ['a', 'e', 'i', 'o', 'u']
ctr = 0
for ch in lowerS:
if ch in vowels:
ctr += 1
print ('There are {} vowels in: "{}"'.format(ctr, s))
print ()
return ctr
|
a043f65cb6fba8063139c5a57aec87cbfb891409
| 158,430 |
import json
def read_json(filepath, **kwargs):
"""Load JSON file `filepath` as dictionary. `kwargs` are keyword arguments
for `json.load()`.
Args:
filepath: path-like, path to JSON file.
Returns:
Dictionary of JSON contents.
"""
with open(filepath, 'r') as fp:
return json.load(fp, **kwargs)
|
09e1b28c065eee619a242790de65f3bee4eec4af
| 562,316 |
import base64
def encode_b64(data):
"""Encode data in base64."""
return base64.b64encode(data)
|
753b01c577794e65de7c714cad772c0de4230413
| 496,387 |
import io
def data_read(filename):
"""Read string from a file with leading and trailing whitespace removed.
:param ``str`` filename:
File to read from.
:returns ``str``:
File content.
"""
with io.open(filename) as f:
data = f.readline()
return data.strip()
|
78945d3929c293791ba43cb0863fdd6acc1a5460
| 417,243 |
import re
def is_commit(string: str) -> bool:
"""Tests if a string is a SHA1 hash."""
return bool(re.match("[0-9a-f]{5,40}", string))
|
538b34becb3cc1edcab90f62fbecf1a7dfcfeceb
| 340,857 |
def count_rectangles(m: int, n: int) -> int:
"""Returns the number of rectangles in an m by n rectangular grid."""
return m * (m + 1) * n * (n + 1) // 4
|
e868b081b006f019c3b6ae92ed0a20215c472e24
| 362,759 |
import logging
def is_matching_filename(filename, suffix_list):
"""
Return True/False if filename's suffix is in suffix list.
(Returns False if the file doesn't seem to have a suffix)
Assumption: members of suffix_list are lower case
"""
try:
_, suffix = filename.rsplit(".", 1)
except ValueError:
logging.warning("Unable to obtain suffix for '%s'" % (filename))
return False
return suffix.lower() in suffix_list
|
2bb8d0cda0210fa185bebade3e7851c07f3124f4
| 469,120 |
import re
def smart_title(value):
"""Converts a string into titlecase."""
"""Excludes [a, an, the, and, but, or, for, nor, of]"""
# strip smart single quotes
t = re.sub('\\u2018|\\u2019','\'', value.title())
# fix the plural
t = re.sub(r'([a-z])\'([A-Z])', lambda m: m.group(0).lower(), t)
# fix the letter following a number
t = re.sub(r'\d([A-Z])', lambda m: m.group(0).lower(), t)
return re.sub(r'(?i)\s(a|an|and|for|of|the)\b', lambda m: m.group(0).lower(), t)
|
e93b2ee7e0859bb0bd2df937a1f8c54f8fdedaf5
| 611,278 |
def atfile_sci(filename):
"""
Return the filename of the science image
which is assumed to be the first word
in the atfile the user gave.
"""
return filename.split()[0]
|
85d01f0da9fd065f486a0266db483e8a845db4a8
| 560,298 |
def integer(v):
""" Is arg an integer? """
return type(v) is int
|
66ec3c118ee8bf8c717e393b6774ba5e30d7b0dd
| 261,605 |
def sample_labels(model, wkrs, imgs):
"""
Generate a full labeling by workers given worker and image parameters.
Input:
- `model`: model instance to use for sampling parameters and labels.
- `wkrs`: list of worker parameters.
- `imgs`: list of image parameters.
Output:
1. list [img id, wkr id, label] as provided by `model.sample_label`.
"""
labels = [[ii, wi, model.sample_label(wkrs[wi], imgs[ii])] \
for ii in range(len(imgs)) for wi in range(len(wkrs))]
return labels
|
1abd2d0d087f7ce452db7c899f753366b148e9e6
| 17,152 |
def parse_None(el: object) -> object:
"""
Parse element or list of elements, inserting None when it finds a string 'None'
:param el: List or single element to parse
:return: List or single element parsed
"""
if isinstance(el, list):
for i in range(0, len(el)):
if el[i] == 'None':
el[i] = None
elif el == 'None':
el = None
return el
|
b2e36dcc9a9a7dc6401148553ab8035f0ea56a70
| 545,269 |
def tableToDicts(header, entries):
"""Converts a tuple of header names, and a list of entry tuples, to a list of dictionaries
"""
dicts = []
for entry in entries:
dicts.append(dict(zip(header, entry)))
return dicts
|
3b7f7939b32f65e116a57683dca84cb80328aaaa
| 103,507 |
def info(v, row, row_n, i_s, i_d, header_s, header_d, scratch, errors, accumulator):
""" Print information about a value, and return the value. Prints out these values:
- row_n: The row number
- header_d: Schema header
- type: The python type of the value
- value: The value of the row, truncated to 40 characters.
:param v: The current value of the column
:param row: A RowProxy object for the whiole row.
:param row_n: The current row number.
:param i_s: The numeric index of the source column
:param i_d: The numeric index for the destination column
:param header_s: The name of the source column
:param header_d: The name of the destination column
:param scratch: A dict that can be used for storing any values. Persists between rows.
:param errors: A dict used to store error messages. Persists for all columns in a row, but not between rows.
:param accumulator: A dict for use in accumulating values, such as computing aggregates.
:return: The final value to be supplied for the column.
"""
print("{}:{} {} {}".format(row_n, header_d, type(v), str(v)[:40]))
return v
|
6c74f34f4d33b854ca216c6ba3a4e8cc43b71e99
| 191,007 |
import ast
def format_itervars(ast_node):
"""Formats an `ast_node` of loop iteration variables as string, e.g. 'a, b'"""
# handle the case that there only is a single loop var
if isinstance(ast_node, ast.Name):
return ast_node.id
names = []
for child in ast_node.elts:
if isinstance(child, ast.Name):
names.append(child.id)
elif isinstance(child, ast.Tuple) or isinstance(child, ast.List):
# if its another tuple, like "a, (b, c)", recurse
names.append("({})".format(format_itervars(child)))
return ", ".join(names)
|
e5fa14fa230f671bba0b9683eab1023b390b02de
| 522,342 |
def draw_rect(surface, fill_color, outline_color, rect, border=1):
"""
Draw cells on surface
:param surface: surface
:param fill_color: cell color
:param outline_color: out color
:param rect: rect to draw
:param border: border width
:return: rect
"""
surface.fill(outline_color, rect)
surface.fill(fill_color, rect.inflate(-border * 2, -border * 2))
return rect
|
9da4bb071b016d7dae81668416c3a2d9a27af4eb
| 545,041 |
import textwrap
def testdir(testdir):
"""Return default testdir fixture with additional helper methods."""
def _path_has_content(path, content):
return testdir.tmpdir.join(path).read() == textwrap.dedent(content)
testdir.path_has_content = _path_has_content
def _write_path(path, content):
testdir.tmpdir.join(path).write(textwrap.dedent(content))
testdir.write_path = _write_path
return testdir
|
42797a41dbfa2c56f939d8f72b61833843773d23
| 257,445 |
def line_number_in_contents(contents, regex, default=1):
"""Find the line number where regex first occurs inside contents.
If regex has a matching group, the line number of the start of the
matching group will be returned. If the regex is not found,
'default' is returned.
"""
m = regex.search(contents)
if not m:
return default
if m.groups():
startpos = m.start(1) # start of group 1 (the paren section)
else:
startpos = m.start(0) # start of the entire regexp
return contents.count('\n', 0, startpos) + 1
|
51ebe0a23ebc19bbb997bddf651179088d32a81f
| 104,044 |
def find_variable(frame, varname):
"""Find variable named varname in the scope of a frame.
Raise a KeyError when the varname cannot be found.
"""
try:
return frame.f_locals[varname]
except KeyError:
return frame.f_globals[varname]
|
f97bbcd5d60faf7cecce12d08f53eb144a766bd0
| 660,049 |
def _invert(x, limits):
"""inverts a value x on a scale from
limits[0] to limits[1]"""
return limits[1] - (x - limits[0])
|
116a8420548097fe80ffc0a92acdf382b937ff1f
| 310,232 |
import string
def __camelize(value):
"""Camelize a variable name.
e.g. 'dumb_clusterer' to "DumbClusterer".
>>> __camelize('dumb_clusterer')
'DumbClusterer'
"""
return "".join(string.capwords(x) for x in value.split("_"))
|
f89ffa9ff262f0fc959c37af5063c4bf37a6504a
| 280,019 |
def get_mean_rate(reviewer):
"""Function that returns the mean rate of a viewer."""
#Take the mean rate of the viewer.
mean_rates = reviewer['Review Rating'].mean()
#Return this value.
return mean_rates
|
d67bdac1958419f952c8e92e97ecd2514651cfe9
| 307,135 |
from typing import Dict
from typing import Tuple
from typing import Literal
from typing import List
def place_colorstrip(
anchor_coords: Dict[str, Tuple[float, float]],
width: float,
height: float,
spacing: float,
loc: Literal["left", "right", "up", "down", "polar"],
) -> Tuple[
Dict[str, Tuple[List[float], List[float]]], Dict[str, Tuple[float, float]]
]:
"""Compute the coordinates of the boxes that represent a colorstrip.
This function computes the x and y coordinates (or the angles and radii)
of colored boxes, which together form a colorstrip used to annotate leaves
of a tree.
Args:
anchor_coords: Dictionary of nodes-to-coordinate tuples that contain
the "anchor" point to start the colorstrip. When `loc=left`, this
is the center right of each box, when `loc=right`, this is the
center left of each box, etc.
width: Width of the box. The width is defined as the length of the
box in the same direction as the leaves.
height: Height of the box. The height is defined as the length of the
box in the direction perpendicular to the leaves.
spacing: Space between consecutive colorstrips. This value is used as a
padding before placing the box.
loc: Where to place each box relative to the anchors. Valid options are:
`left`, `right`, `up`, `down`, `polar`.
Returns:
A dictionary of node-to-coordinate tuples for each box, and a dictionary
of the next set of anchor coordinates (for placing more colorstrips)
"""
next_anchor_coords = {}
size_x, size_y = (
(width, height) if loc in ("left", "right") else (height, width)
)
coef_x, coef_y = 0, 1 # default: up / polar
if loc == "left":
coef_x, coef_y = -1, 0
elif loc == "right":
coef_x, coef_y = 1, 0
elif loc == "down":
coef_x, coef_y = 0, -1
boxes = {}
for anchor, (x, y) in anchor_coords.items():
next_anchor_coords[anchor] = (
x + coef_x * (size_x + spacing),
y + coef_y * (size_y + spacing),
)
center_x, center_y = (
x + coef_x * ((size_x / 2) + spacing),
y + coef_y * ((size_y / 2) + spacing),
)
xs = [
center_x + size_x / 2,
center_x - size_x / 2,
center_x - size_x / 2,
center_x + size_x / 2,
center_x + size_x / 2,
]
ys = [
center_y + size_y / 2,
center_y + size_y / 2,
center_y - size_y / 2,
center_y - size_y / 2,
center_y + size_y / 2,
]
boxes[anchor] = (xs, ys)
return boxes, next_anchor_coords
|
e667de3478ee0b8ae02e5ec26272e8262ae72cdf
| 545,771 |
def tonumber(v):
"""
Convert a value to int if its an int otherwise a float.
"""
try:
v = int(v)
except ValueError as e:
v = float(v)
return v
|
8b52ac3385b3ffc721af523799ef3a6da4e29060
| 16,682 |
def get_user_attrs(object):
"""
Return the attributes of an object
:param object: the object to get attributes from
:return: list of attributes
"""
return [k for k in dir(object)
if not k.startswith('__')
and not k.endswith('__')]
|
44e1a8bcfedad46f92cd9d98f39f5797f9de15d5
| 602,852 |
def _get_username(user):
"""
Return user's username. ``user`` can be standard Django User
instance, a custom user model or just an username (as string).
"""
value = None
# custom user, django 1.5+
get_username = getattr(user, 'get_username', None)
if get_username is not None:
value = get_username()
if value is None:
# standard User
username = getattr(user, 'username', None)
if username is not None:
value = username
else:
# assume user is just an username
value = user
return value
|
e0f80aee5b8f8be2c5c11ca3cbe556f9c80637e9
| 569,371 |
def inverse_monoalpha_cipher(monoalpha_cipher):
"""Given a Monoalphabetic Cipher (dictionary) return the inverse."""
inverse_monoalpha = {}
for key, value in monoalpha_cipher.iteritems():
inverse_monoalpha[value] = key
return inverse_monoalpha
|
dcad6fae88fb005e39313dce6a977d653e58c599
| 514,414 |
def build_geometry(self, sym=1, alpha=0, delta=0):
"""Build the geometry of the machine
Parameters
----------
self : Machine
Machine object
sym : int
Symmetry factor (1= full machine, 2= half of the machine...)
alpha : float
Angle for rotation [rad]
delta : complex
Complex value for translation
Returns
-------
surf_list : list
list of surfaces needed to draw the lamination
"""
surf_list = list()
if self.frame is not None:
surf_list.extend(self.frame.build_geometry(sym=sym, alpha=alpha, delta=delta))
if self.rotor.is_internal:
# Adding the list of surfaces of the stator
surf_list.extend(self.stator.build_geometry(sym=sym, alpha=alpha, delta=delta))
# Adding the list of surfaces of the rotor
surf_list.extend(self.rotor.build_geometry(sym=sym, alpha=alpha, delta=delta))
# Add the shaft only for Internal Rotor
if self.rotor.Rint > 0:
surf_list.extend(
self.shaft.build_geometry(sym=sym, alpha=alpha, delta=delta)
)
else:
# Adding the list of surfaces of the rotor
surf_list.extend(self.rotor.build_geometry(sym=sym, alpha=alpha, delta=delta))
# Adding the list of surfaces of the stator
surf_list.extend(self.stator.build_geometry(sym=sym, alpha=alpha, delta=delta))
return surf_list
|
670e7ae40e6f02d386cba477cd6bc6dfcf2f42e9
| 676,441 |
def status_code_to_text(status):
"""
Takes an Solarwinds Orion status code and translates it to
human text and also a colour that can be used in Slack.
"""
if status == 0:
return ("Unknown", None) # aka slack 'grey'
elif status == 1:
return ("Up", "#00ad52") # aka slack 'good'
elif status == 2:
return ("Down", "#eb0000") # aka slack 'danger'
elif status == 3:
return ("Warning", "#e89e0e") # aka slack 'warning'
elif status == 14:
return ("Critical", "#eb0000")
|
96bc37cddafa9b2cfde2d53f910c4d96fea594a7
| 422,239 |
def parse_photos(soup):
"""Parse photo URLs from soup
Arguments:
soup: the soup object to parse from
Returns:
URL list: list of image URLs
"""
imgEles = soup.select("div.imgList")[0].findChildren("img")
imgUrlList = []
for img in imgEles:
imgUrlList.append(img["src"].replace("_125x85.crop.jpg", "_765x517.water3.jpg"))
return imgUrlList
|
a7fad05f6ed00a67e7f65c6195088161005661f7
| 411,059 |
def parse_comma_sep_list(csl):
"""Parse comma separated list of integers."""
return [int(x) for x in csl if x != ""]
|
df4396fd46749b6bb7cec03c1cc98062454f05a1
| 456,816 |
def adjust_fields(prefix, task):
"""
Prepend the prefix to a task's fields
:param prefix: string prepended to task fields
:type prefix: str
:param task: a JSOn task object from task.json
:type task: dict
:return: a modified JSON task object from task.json
:rtype: dict
"""
output_task = {}
for field, content in task.items():
output_task[prefix + field] = content
return output_task.copy()
|
21fefa294ee2c10dd2388bd70be11828753df7c9
| 77,452 |
def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if max_sizes is None:
return indices, []
if type(max_sizes) in (int, float):
max_src_size, max_tgt_size = max_sizes, max_sizes
else:
max_src_size, max_tgt_size = max_sizes
if tgt_sizes is None:
ignored = indices[src_sizes[indices] > max_src_size]
else:
ignored = indices[
(src_sizes[indices] > max_src_size) | (tgt_sizes[indices] > max_tgt_size)
]
if len(ignored) > 0:
if tgt_sizes is None:
indices = indices[src_sizes[indices] <= max_src_size]
else:
indices = indices[
(src_sizes[indices] <= max_src_size)
& (tgt_sizes[indices] <= max_tgt_size)
]
return indices, ignored.tolist()
|
16f82b1413a9e6cf389567c724f7267f4dc97891
| 483,797 |
def ExtractLogId(log_resource):
"""Extracts only the log id and restore original slashes.
Args:
log_resource: The full log uri e.g projects/my-projects/logs/my-log.
Returns:
A log id that can be used in other commands.
"""
log_id = log_resource.split('/logs/', 1)[1]
return log_id.replace('%2F', '/')
|
b8a26978e7a394fd84e86182cbe365e3fde69dc7
| 443,260 |
def plus1(x):
"""
returns x + 1
"""
return x + 1
|
e27669977aceca1d2c4fc34de7ca0d822d1fda57
| 505,862 |
async def in_voice(ctx):
"""Checks that the command sender is in the same voice channel as the bot."""
bot_voice = ctx.guild.voice_client
if bot_voice is not None and bot_voice.channel is not None:
return True
else:
return False
|
8d89b0b1d4a8fa79bb591d5326f41de1d8138df9
| 288,330 |
def _get_order_and_exponentiation_step(method):
"""Return order and exponentiation step given ``method``.
Given ``method`` we return the initial order of the approximation error of the
sequence under consideration (order) as well as the step size representing the
growth of the exponent in the series expansion of the limit (exponentiation_step).
See function ``richardson_extrapolation`` for more details.
For different methods, different values of order and exponentiation step apply.
Consider the following examples, where we continue the notation from function
``richardson_extrapolation`` and use O() to denote the Big O Laundau symbol.
Central Differences.
Derivative approximation via central difference is given by
g(h) := [f(x + h) - f(x - h)] / 2h = f'(x) + r(x, h),
where r(x, h) denotes the remainder term.
If we expand the remainder term r(x, h) we get
r(x, h) = a0*(h**2) + a1*(h**4) + a2*(h**6) + ...
with a0 = f''(x) / 2!, a1 = f'''(x) / 3! etc.
Rearanging terms we can write L := f'(x) = g(h) - r(x, h) = g(h) + O(h**2) and
we notice that order = 2 and exponentiation_step = 2.
Forward Differences.
Derivative approximation via forward difference is given by
g(h) := [f(x + h) - f(x)] / h = f'(x) + r(x, h),
where again r(x, h) denotes the remainder term.
If we expand the remainder term r(x, h) we get
r(x, h) = a0*(h**1) + a1*(h**2) + a2*(h**3) + ...
with a0 = f''(x) / 2!, a1 = f'''(x) / 3! etc.
Rearanging terms we can write L := f'(x) = g(h) - r(x, h) = g(h) + O(h) and
we notice that order = 1 and exponentiation_step = 1.
Backward Differences.
Analogous to forward differences.
Args:
method (str): One of ["central", "forward", "backward"], default "central".
Returns:
order (int): Initial order of the approximation error of sequence elements.
exponentiation_step (int): Step representing the growth of the exponent in the
series expansions of the limit.
Example:
>>> _get_order_and_exponentiation_step('central')
(2, 2)
"""
lookup = {
"central": (2, 2),
"forward": (1, 1),
"backward": (1, 1),
}
order, exponentiation_step = lookup[method]
return order, exponentiation_step
|
6c48956b0d2c016a2409a14f2512257d03b38571
| 441,886 |
def nested_get(dictionary: dict, keys: list):
"""Set value to dict for list of nested keys
>>> nested_get({'key': {'nested_key': 123}}, keys=['key', 'nested_key'])
123
"""
nested_dict = dictionary
for key in keys[:-1]:
nested_dict = nested_dict[key]
return nested_dict.get(keys[-1])
|
87444ae9d67c66b6eb0b4389a4a7bc51a5f05502
| 267,113 |
def lookup_movieId(movies, movieId):
"""
Convert output of recommendation to movie title
"""
# match movieId to title
movies = movies.reset_index()
boolean = movies["movieid"] == movieId
movie_title = list(movies[boolean]["title"])[0]
return movie_title
return movie_title
|
d7eae506b47a8d9045318e264032ee928c7b8fb2
| 292,537 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.