content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def str_to_obj(string):
"""
Convert the string representation of a number to a number if required.
"""
try:
return float(string)
except ValueError:
return string | f3d4ce4c48044eff2c0dfc0fd937926c94702b4b | 542,564 |
import math
def calculateFreq( pmf , begin, end ):
"""
Calculates the frequencies of the pmf betweer [begin, end)
Args:
pmf: the probability mass function
begin: the first (inclusive) element of the interval
end: the last (exclusive) element of the interval
Return:
the probability, to fall between [begin, end)
Side effect
Normalize the pmf
"""
pmf.Normalize()
items = pmf.GetDict() ;
filtered = [ prob for value, prob in items.iteritems()
if value >= begin and value < end ]
prob_sum = math.fsum(filtered)
return prob_sum | 30b6a625e37ae1cd10065d4f66d536d64cae010d | 69,638 |
def splitLines(s):
"""Split s into a list of lines, each of which has a trailing newline
If the lines are later concatenated, the result is s, possibly
with a single appended newline.
"""
return [ l + '\n' for l in s.split('\n') ] | eb84be1b7e7c1fd0b80717236c7f211001fe268d | 355,491 |
def CreateAcceleratorConfigMessages(msgs, accelerator_type, accelerator_count):
"""Returns a list of accelerator config messages.
Args:
msgs: tracked GCE API messages.
accelerator_type: reference to the accelerator type.
accelerator_count: number of accelerators to attach to the VM.
Returns:
a list of accelerator config message that specifies the type and number of
accelerators to attach to an instance.
"""
accelerator_config = msgs.AcceleratorConfig(
acceleratorType=accelerator_type, acceleratorCount=accelerator_count)
return [accelerator_config] | 7aaaf7b09ac6f5904e4887895cf2fe0933cfc491 | 563,924 |
def isLeaf(tree, nid):
"""
Test if NodeID is a leaf with no nid attached to it.
Parameters
----------
tree : treelib.Tree
Tree where NID is defined.
nid : treelib.Node
NodeID to test.
Returns
-------
boolean
True if node is a leaf, false in other situation
"""
if len(tree.is_branch(nid)) == 0:
return True
else:
return False | 02030891cb5df92cb4581ba67509e98137e4a6b2 | 419,770 |
def _replace_oov(original_vocab, line):
"""Replace out-of-vocab words with "UNK".
This maintains compatibility with published results.
Args:
original_vocab: a set of strings (The standard vocabulary for the dataset)
line: a unicode string - a space-delimited sequence of words.
Returns:
a unicode string - a space-delimited sequence of words.
"""
return u" ".join(
[word if word in original_vocab else u"UNK" for word in line.split()]) | 2e2cb1464484806b79263a14fd32ed4d40d0c9ba | 705,768 |
import torch
def angle_dist(vec_a: torch.Tensor, vec_b: torch.Tensor) -> torch.Tensor:
"""
Combines vectors a and by concatenating [a * b, |a-b|]
:param vec_a: A vector
:param vec_b: Another vector with the same shape as vec_a
:return: The combined vector
"""
angle_vec = vec_a * vec_b
dist_vec = torch.abs(vec_a - vec_b)
return torch.cat((angle_vec, dist_vec), -1) | b4d7b9f530a77d37429b4b3807c10746a70290b1 | 294,447 |
def get_telemetry_url(environment: str,
enterprise_id: str,
device_id: str,
category: str,
metric: str,
from_time: str,
to_time: str,
period: str,
statistic: str) -> str:
"""
Build and return telemetry url for scapi endpoint
:param environment:
:param enterprise_id:
:param device_id:
:param category:
:param metric:
:param from_time:
:param to_time:
:param period:
:param statistic:
:return:
"""
url = f'https://{environment}-api.esper.cloud/api/graph/{category}/{metric}/?from_time={from_time}&' \
f'to_time={to_time}&period={period}&statistic={statistic}&device_id={device_id}&enterprise_id=' \
f'{enterprise_id}'
return url | a4e7f5f06553edcfc4d4c8e09caf75446008aaee | 392,534 |
def get_beta(intrinsic_growth_rate, gamma, susceptible, relative_contact_rate):
"""
Calculates a rate of exposure given an intrinsic growth rate for COVID-19
:param intrinsic_growth_rate: Rate of spread of COVID-19 cases
:param gamma: The expected recovery rate from COVID-19 for infected individuals
:param susceptible: Current amount of individuals that are susceptible
:param relative_contact_rate: The relative contact rate amongst individuals in the population
:return: beta: The rate of exposure of individuals to persons infected with COVID-19
"""
inv_contact_rate = (
1.0 - relative_contact_rate
) # The inverse rate of contact between individuals in the population ## get_beta_icr_exp
updated_growth_rate = (
intrinsic_growth_rate + gamma
) # The intrinsic growth rate adjusted for the recovery rate from infection ## get_beta_ugr_exp
beta = updated_growth_rate / susceptible * inv_contact_rate ## get_beta_beta_exp
return beta | 8937312632c0ce473970af066628027ea67c2a7f | 295,334 |
def get_view_parent_folder(view):
"""
Given a view, return the path of the window folder it is in, otherwise
return nothing.
"""
file_name = view.file_name()
if not file_name:
return None
try:
parent_window_folders = view.window().folders()
except Exception:
parent_window_folders = []
for folder in parent_window_folders:
if folder in file_name:
return folder
return None | cfe93eb9f99c11c83e99f87e493ebd2fa2b6ad4b | 383,071 |
def _get_tensor_value(tensor):
"""Gets the value of a torch Tensor."""
return tensor.cpu().detach().numpy() | de281eabdeb1ef54c7f36979ffe6511b8998d9b3 | 430,976 |
import shutil
def check_commands(commands: list) -> list:
"""
Checks if all commands in the list exist in the system / environment
returns a list of all missing commands
"""
missing = []
for cmd in commands:
if shutil.which(cmd) is None:
missing.append(cmd)
return missing | 8a12880e2fded7f8f98a0458da937b59eb68482e | 579,480 |
def spline_type_to_id(spline_type):
""" spline_type_to_id(spline_type)
Method to map a spline name to an integer ID. This is used so that
set_cubic_spline_coefs() can be efficient.
The spline_type can also be a number between -1 and 1, representing
the tension for a Cardinal spline.
"""
# Handle tension given for Cardinal spline
tension = 0.0
if isinstance(spline_type, (float, int)):
if spline_type >= -1 and spline_type <= 1:
tension = float(spline_type)
spline_type = 'Cardinal'
else:
raise ValueError('Tension parameter must be between -1 and 1.')
# Get id
if spline_type.lower() in ['c', 'card', 'cardinal', 'catmull–rom']:
return tension # For catmull-rom, we use default tension 0
elif spline_type.lower() in ['b', 'basis', 'basic']:
return 2.0
elif spline_type.lower() in ['herm', 'hermite']:
return 3.0
elif spline_type.lower() in ['lag', 'lagrange']:
return 4.0
elif spline_type.lower() in ['lanc', 'lanczos']:
return 5.0
elif spline_type.lower() in ['near', 'nearest']:
return 97.0
elif spline_type.lower() in ['lin', 'linear']:
return 98.0
elif spline_type.lower() in ['quad', 'quadratic']:
return 99.0
else:
raise ValueError('Unknown spline type: ' + str(spline_type)) | 355e091d7cdfbae4ad0531d0f2e31f2533641b3c | 106,131 |
import re
def prettify_nsdecls(xml):
"""
Wrap and indent attributes on the root element so namespace declarations
don't run off the page in the text editor and can be more easily
inspected. Sort attributes such that the default namespace, if present,
appears first in the list, followed by other namespace declarations, and
then remaining attributes, both in alphabetical order.
"""
def parse_attrs(rootline):
"""
Return 3-tuple (head, attributes, tail) looking like
('<p:sld', ['xmlns:p="html://..."', 'name="Office Theme>"'], '>').
"""
attr_re = re.compile(r'([-a-zA-Z0-9_:.]+="[^"]*" *)')
substrs = [substr.strip() for substr in attr_re.split(rootline)
if substr]
head = substrs[0]
attrs, tail = ((substrs[1:-1], substrs[-1]) if len(substrs) > 1
else ([], ''))
return (head, attrs, tail)
def sequence_attrs(attributes):
"""
Sort attributes alphabetically within the subgroups: default
namespace declaration, other namespace declarations, other
attributes.
"""
def_nsdecls, nsdecls, attrs = [], [], []
for attr in attributes:
if attr.startswith('xmlns='):
def_nsdecls.append(attr)
elif attr.startswith('xmlns:'):
nsdecls.append(attr)
else:
attrs.append(attr)
return sorted(def_nsdecls) + sorted(nsdecls) + sorted(attrs)
def pretty_rootline(head, attrs, tail):
"""
Return string containing prettified XML root line with *head* on the
first line, *attrs* indented on following lines, and *tail* indented
on the last line.
"""
indent = 4 * ' '
newrootline = head
for attr in attrs:
newrootline += '\n%s%s' % (indent, attr)
newrootline += '\n%s%s' % (indent, tail) if tail else ''
return newrootline
lines = xml.splitlines()
rootline = lines[1]
head, attributes, tail = parse_attrs(rootline)
attributes = sequence_attrs(attributes)
lines[1] = pretty_rootline(head, attributes, tail)
return '\n'.join(lines) | ffc1755d1f0d4f6faafc9caf2a085cc307888cfb | 401,926 |
import csv
def read_file(csv_dir, header=False):
"""
Read csv file
:param csv_dir: String, directory of file
:param header: Boolean, true will read first row as header
:return: data: List of data on each row
header_name: List of header name
"""
header_name = []
data = []
with open(csv_dir) as csvFile:
readCSV = csv.reader(csvFile, delimiter=',')
for row in readCSV:
if header:
header_name.append(row)
header = False
else:
data.append(row)
if not header_name:
return data
else:
return data, header_name | d2303cf42eac21342c702e532ab70458eb7d0a94 | 318,189 |
import re
def slugify(name):
"""
Takes a article name and returns a slug appropriate version using hyphens
:param name: string to be converted
:return: converted string
"""
out = re.sub(r'[^\w\d\s]', '', name)
return re.sub(r'\s', '-', out) | baec337ef46bc38c46e97a1dedd0eaf9d8bd1d18 | 237,625 |
def x_to_ab(x, p, q):
"""
Given some integer x mod pq, returns the CRT representation (x mod p, x mod q)
(CRT -> Chinese Remainder Theorem).
"""
return x % p, x % q | 2d8d5c4cb60c490e04f9d3bec47652afc2ee9638 | 672,729 |
def dict_to_entity(entity_dict: dict):
"""Convert a dict of str: str entries to a single entity bytes object.
"""
line_template = '\t"{}" "{}"'
entity_lines = b'\n'.join(line_template.format(key, value).encode('ascii') for key, value in entity_dict.items())
return b'{\n' + entity_lines + b'\n}' | 7521ee500db3636f6ab4e47c22d983bb052c3873 | 506,833 |
def generate_bond_indices(natoms):
"""
natoms: int
The number of atoms
Finds the array of bond indices of the upper triangle of an interatomic distance matrix, in column wise order
( or equivalently, lower triangle of interatomic distance matrix in row wise order):
[[0,1], [0,2], [1,2], [0,3], [1,3], [2,3], ...,[0, natom], ...,[natom-1, natom]]
"""
# initialize j as the number of atoms
j = natoms - 1
# now loop backward until you generate all bond indices
bond_indices = []
while j > 0:
i = j - 1
while i >= 0:
new = [i, j]
bond_indices.insert(0, new)
i -= 1
j -= 1
return bond_indices | 2484a45c653b027de126713d8b7984551b2a1cd1 | 655,961 |
def get_app_icon_name(app_id: str) -> str:
"""Builds the corresponding app icon name from app id."""
return f"icon_{app_id}" | b1d2367ad499866195a00285188bf927cec68b2b | 331,604 |
def _strip_dict(d):
"""Return a new dict with falsy keys stripped out."""
return dict((k, v) for k, v in d.items() if v) | 966b617df20ce2c45bb3487915f40db92aa80a13 | 345,074 |
def dif(x, y, afrund = None):
"""
Calculate the difference between two arrays
"""
result = []
if afrund == None:
afrund = 2
for index, element in enumerate(x):
result.append(round((element - y[index]), afrund))
return result | f7a1ed998e66eb4f497124a3647a7ed04a82fce4 | 106,931 |
import re
def extractStringFromBrackets(value):
"""This function extracts a string in different brackets.
>>> extractStringFromBrackets('[Brussels]')
'Brussels'
>>> extractStringFromBrackets('(Ghent)')
'Ghent'
>>> extractStringFromBrackets('Gent')
'Gent'
>>> extractStringFromBrackets('')
''
Nothing should be extracted when threre is content before the brackets
>>> extractStringFromBrackets('Gent (Belgium)')
'Gent (Belgium)'
"""
found = None
if value.startswith('('):
found = re.search('^\((.*)\)', value)
elif value.startswith('['):
found = re.search('\[(.*)\]', value)
else:
return value
if found:
return found.group(1)
else:
return '' | 7e6209066dca00de0a25acd86ac1fcb5e97c7229 | 444,936 |
import re
def isDataURL(str):
"""Check if it is a Data URL.
Args:
str: Character to check.
Returns:
Returns True if the string is a DataURL, False if not.
"""
matches = re.match(r'^\s*data:(?:(\w+\/[\w\d\-+.]+)(?:;[\w-]+=[\w\d-]+)?)?(?:;base64)?,([\w\d!$&\',()*+;=\-._~:@\/?%\s]*)\s*$', str)
return True if matches else False | c84b9c54f6bd5e1faced532a05c66d273ee36c42 | 160,016 |
def get_times_list(binout):
"""
Method to get a times list from binary
output.
Parameters
----------
binout : fp.binarayfile class instance
Returns
-------
list of times
"""
return sorted([float("{0:15.6f}".format(t)) for t in
binout.recordarray["totim"]]) | bbf4e3635bdc54e1713f050021156f8e369d348c | 218,178 |
def splitRom(rom, chunk = 65536, count = 3, lanes = ('hi', 'lo')):
"""Split a ROM image into chunk*count chunks of chunk bytes.
Supports only 8-bit ROM chips.
rom: the ROM image. Shall be iterable, should be a bytearray.
chunk: the length of one ROM chip.
count: the number of banks, for sanity checking as it shall hold that
len(rom)==chunk*count*len(lanes)
lanes: the name of each ROM chip in a bank.
return: dictionary of bytearrays of chunk bytes, keyed "{lane}{bank}"
for each lane in lanes and each bank up to count.
"""
if (l:=len(rom))!=chunk*count:
print(F"caution, {l} is not {chunk*count//1024}kB")
return {F"{lanes[j]}{i}":rom[chunk*i:chunk*(i+1)][j::len(lanes)]
for i in range(count) for j in range(len(lanes))} | db0c4885512be43419dad63517273d34bec6fb27 | 514,958 |
def is_lista_in_listb(lista, listb):
"""Check whether elements from a list (lista) are all contained in another list (listb).
Parameters
----------
lista: List
listb: List
"""
result = all(elem in listb for elem in lista)
return result | 0c30ecffe36eb7f257877ae111dd34c4f5c19926 | 408,115 |
def cols_with_missing_values(dataframe):
""" query a dataframe and find the columns that have missing values"""
return list(dataframe.columns[dataframe.isnull().any()]) | 7c588e911c307cc727dbdde00f3d51c22e14c149 | 91,319 |
def change_weather(delphin_dict: dict, original_weather: str, new_weather: str) -> dict:
"""
Changes the weather file of a weather instance. Can therefore only be used on climate conditions that is loaded
from a file.
:param delphin_dict: Delphin dict to change.
:param original_weather: Name of the original weather
:param new_weather: New weather file path
:return: Modified Delphin dict
"""
# Find original weather
climate_conditions = delphin_dict['DelphinProject']['Conditions']['ClimateConditions']['ClimateCondition']
for weather_index in range(0, len(climate_conditions)):
if climate_conditions[weather_index]['@name'] == original_weather:
climate_conditions[weather_index]['Filename'] = new_weather
return delphin_dict | 79e12327d46e564329881b43f070dc6a26458cd1 | 206,311 |
import random
def generate_word2vec_samples(text, num_skips, skip_window):
"""Generates samples that can be used to train a skip-gram word2vec model.
Code is inspired by the word2vec implementation from:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
Args:
text: The text, where samples should be generated from.
num_skips: How many samples (current word, context word) should be generated per word.
skip_window: Size of the context window (skip_window words on the left and on the right
considered as context.
Returns:
Two lists; one containing center words and one for contextual words.
"""
assert num_skips <= 2 * skip_window
center_words = []
context_words = []
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
# concatenate all sentences in the text
data = []
for sentence in text:
data.extend(sentence)
# contains contextual words for word with index curr_index
buffer = []
def enqueue_context(i):
buffer = []
start_left = max(i-skip_window, 0)
end_right = min(i+skip_window, len(data)-1)
for j in range(start_left, i):
buffer.append(data[j])
for j in range(i+1, end_right+1):
buffer.append(data[j])
return buffer
for i in range(len(data)):
buffer = enqueue_context(i)
targets_to_avoid = [] # already considered contextual words for current token
for j in range(min(num_skips, len(buffer))):
# randomly pick a context word
target = random.randint(0, len(buffer) - 1)
while target in targets_to_avoid:
target = random.randint(0, len(buffer) - 1)
targets_to_avoid.append(target)
center_words.append(data[i])
context_words.append(buffer[target])
return center_words, context_words | 40b69420c6a5d0cb57bb78102a349cf292fcc781 | 274,145 |
def canConstruct_v6(ransomNote: str, magazine: str) -> bool:
"""
This solution, I picked from the problem discussion page.
Contributed by user 'siwal'.
I liked the intuitive and straight forward approach:
1) Take the next letter from the note.
2) If the required record is not found in a magazine, we can not compose the given note.
2a) Stop pointless.
3) Otherwise, cut out the letter and advance. The magazines now have one more cutout and one less letter.
4) Repeat 1 to 3 until note is complete.
5) Success!
Nice! Easy to understand!
"""
for letter in ransomNote:
if letter not in magazine:
return False
else:
magazine = magazine.replace(letter, '', 1)
return True | c6260b76eb380a8a2276bb223f3e63bc9f1c07ad | 313,413 |
def groupsFromKey(keyFile='./key.txt'):
"""
given a groups file, return a dict of groups.
Example:
### GROUP: TR
16602083
16608059
### GROUP: TU
16504000
16507011
"""
groups={}
thisGroup="?"
with open(keyFile) as f:
raw=f.read().split("\n")
for line in raw:
line=line.strip()
if len(line)<3:
continue
if "### GROUP" in line:
thisGroup=line.split(": ")[1]
groups[thisGroup]=[]
else:
groups[thisGroup]=groups[thisGroup]+[line]
return groups | f32333143a7dfb4a26054317a7a8dc6d23aa5e83 | 146,856 |
from typing import OrderedDict
def filter_log(logdict, event, invert=False):
"""
Filter a dictionary of log entries (loaded by load_log)
by the named event, inverting (logical NOT) if the optional
invert argument is True.
"""
d = {}
for id, val in logdict.items():
if val['event'] == event:
if invert:
continue
else:
d[id] = val
else:
if invert:
d[id] = val
else:
continue
return OrderedDict(d) | 95845e5bd185b7a80055ac9f8c1f9377acb89036 | 527,521 |
def get_ring(gossip):
""" Return the ring status in a structured way.
Args:
gossip: A list of gossip info for each node.
Returns:
A list of nodes represented by dictionaries.
"""
nodes = sorted(gossip, key=lambda node: node['token'])
for index, node in enumerate(nodes):
node['index'] = index
if not nodes:
raise Exception('Unable to find nodes in ring')
# Calculate skew and diff for each node in ring.
ideal_load = sum(node['load'] for node in nodes) / len(nodes)
for index, node in enumerate(nodes):
try:
node['skew'] = abs(node['load'] - ideal_load) / ideal_load
except ZeroDivisionError:
node['skew'] = 0
node['diff'] = abs(node['load'] - nodes[index - 1]['load'])
return nodes | c050da0eb91e915f371cef5c7468b3c9b6240484 | 470,000 |
def convert_component_ids(config, component_names):
"""Convert a list of component names to ids."""
component_names_lower = [name.lower() for name in component_names]
result = []
for cd in config.component_defs:
cpath = cd.path
if cpath.lower() in component_names_lower:
result.append(cd.component_id)
return result | 7d427e52bee9ba861616c1faee54970a91e30deb | 135,525 |
def propertylist_prop_names(insts, property_list):
"""
Return the originally cased property list, based on the lexical case
in the instances.
If a property name is not in any instance, it is not returned.
"""
prop_list = []
for pname in property_list:
for inst in insts:
if pname in inst:
prop_list.append(inst.properties[pname].name)
break
return prop_list | ed22ff1b53b75ada899d325d86bb4f146ee07003 | 424,179 |
def get_dashboard_link(cluster):
"""
This function returns the dashboard address.
:param cluster: Dask cluster
:type cluster: dask_jobqueue.PBSCluster
:return: Link to the dashboard
:rtype: string
"""
template = "http://{host}:{port}/status"
host = cluster.scheduler.address.split("://")[1].split(":")[0]
port = cluster.scheduler.services["dashboard"].port
return template.format(host=host, port=port) | a4344ac755508b20e95e9c892f66973d94fc5dd2 | 353,205 |
def GetNextTokenIndex(tokens, pos):
"""Get the index of the next token after 'pos.'"""
index = 0
while index < len(tokens):
if (tokens[index].lineno, tokens[index].column) >= pos:
break
index += 1
return index | 0864c3fe4a1515851f5e8299a25b2bd33be2822c | 227,417 |
def in_limit(value, limit):
"""Check if value is in limit."""
return 0 <= value < limit | 5f506a87424f410a00f11a97b9815cc69869f26d | 490,979 |
def sort_refs(reflections):
"""Sort reflections by Miller index and entering flag"""
refs_sorted = sorted(reflections.rows(), key=lambda x: x["entering"])
refs_sorted = sorted(refs_sorted, key=lambda x: x["miller_index"][2])
refs_sorted = sorted(refs_sorted, key=lambda x: x["miller_index"][1])
refs_sorted = sorted(refs_sorted, key=lambda x: x["miller_index"][0])
return refs_sorted | e3de07eeb12ebefdd0d8f87315c89b72435739ef | 481,654 |
def remove_whitespace_chars(text):
"""
Remove unnecessary (trailing, double, etc.) whitespace characters from a piece of text.
:param text: A piece of text.
:return Text without unnecessary whitespace.
"""
return " ".join(text.split()) | 40640c421bf6e776001e8cfa443dbb2f7148d6f0 | 16,580 |
def get_cylinder_radius(cell_geometry):
"""Return the radius the cylinder should have
The cylinder have the same radius as the half-sphere that make the dots
(the hidden and the shown part of the dots).
The radius is such that the spherical cap with diameter
cell_geometry.dot_diameter has a height of cell_geometry.dot_height.
"""
h = cell_geometry.dot_height
r = cell_geometry.dot_diameter / 2
return (r ** 2 + h ** 2) / 2 / h | 9d52f5c50e1e0690eab48313cf441e5e01fe2b7a | 86,559 |
def are_instances(lhs, rhs, cls) -> bool:
"""Return True if both lhs and rhs are instances of cls; False otherwise
"""
return isinstance(lhs, cls) and isinstance(rhs, cls) | d7c84982d1794cf9e7e6a8b8139c9f0844085293 | 456,219 |
def pd_columns_to_string(df):
""" Returns a single string with a list of columns, eg: 'col1', 'col2', 'col3' """
columns = "".join("'" + column + "', " for column in df.columns)
return columns[:-2] | 57b2a24d8a0f88fcd465928465f32dc358c1a020 | 59,946 |
def leap_year(year: int) -> str:
"""
Принимает год и возвращает пустую строку для високосного года и строку "НЕ " для невисокосного года.
:param year: int
:return: str
"""
if year % 4 == 0:
return ''
else:
return 'НЕ ' | 9549f863d315239369545024b1646da6b8ce6d72 | 205,299 |
def isfuncdesc(desc):
"""Tests if a description is a function-type description."""
return desc is not None and 'signatures' in desc | ec634abf9c801cb2af360e3757298751edf9b88c | 302,987 |
import socket
def connect(host, port):
"""Check host:port is up."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
try:
sock.connect((host, int(port)))
sock.close()
return True
except socket.error:
return False | dcd59964d3b950005cd5eb54908de9a7677f6e56 | 386,366 |
def max_subarray_recursive(nums: list[int]) -> int:
"""Returns the largest non-empty contiguous subarray sum
Args:
nums: An array of integers drawn from the range [-10^5, 10^5]
Examples:
>>> max_subarray_recursive([-2,1,-3,4,-1,2,1,-5,4])
6
>>> max_subarray_recursive([1])
1
>>> max_subarray_recursive([5,4,-1,7,8])
23
>>> max_subarray_recursive([])
0
"""
## EDGE CASE ##
if not nums:
return 0
def calculate_max_subarray_sum(index_to_include: int, max_sum=-float("inf")):
if index_to_include == 0: # base case
return nums[0], nums[0]
max_subarray_sum = nums[index_to_include] # initialize
prev_max_subarray_sum, prev_max_sum = calculate_max_subarray_sum(
index_to_include - 1, max_sum
)
if prev_max_subarray_sum > 0:
max_subarray_sum += prev_max_subarray_sum
return max_subarray_sum, max(prev_max_sum, max_subarray_sum)
# Update each idx to contain the maximum contiguous subarray sum that could
# be obtained by the inclusion of the element at that idx; in other words,
# include a previously running subarray sum if it would produce a larger
# sum, else ONLY include the element at that idx (thus starting a new
# subarray partition)
_, max_sum = calculate_max_subarray_sum(len(nums) - 1)
return max_sum | 6e374643a13dcf0c97a8352633a948c81f2df59a | 537,790 |
import heapq
def _findlimit(repo, a, b):
"""
Find the last revision that needs to be checked to ensure that a full
transitive closure for file copies can be properly calculated.
Generally, this means finding the earliest revision number that's an
ancestor of a or b but not both, except when a or b is a direct descendent
of the other, in which case we can return the minimum revnum of a and b.
None if no such revision exists.
"""
# basic idea:
# - mark a and b with different sides
# - if a parent's children are all on the same side, the parent is
# on that side, otherwise it is on no side
# - walk the graph in topological order with the help of a heap;
# - add unseen parents to side map
# - clear side of any parent that has children on different sides
# - track number of interesting revs that might still be on a side
# - track the lowest interesting rev seen
# - quit when interesting revs is zero
cl = repo.changelog
working = len(cl) # pseudo rev for the working directory
if a is None:
a = working
if b is None:
b = working
side = {a: -1, b: 1}
visit = [-a, -b]
heapq.heapify(visit)
interesting = len(visit)
hascommonancestor = False
limit = working
while interesting:
r = -heapq.heappop(visit)
if r == working:
parents = [cl.rev(p) for p in repo.dirstate.parents()]
else:
parents = cl.parentrevs(r)
for p in parents:
if p < 0:
continue
if p not in side:
# first time we see p; add it to visit
side[p] = side[r]
if side[p]:
interesting += 1
heapq.heappush(visit, -p)
elif side[p] and side[p] != side[r]:
# p was interesting but now we know better
side[p] = 0
interesting -= 1
hascommonancestor = True
if side[r]:
limit = r # lowest rev visited
interesting -= 1
if not hascommonancestor:
return None
# Consider the following flow (see test-commit-amend.t under issue4405):
# 1/ File 'a0' committed
# 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
# 3/ Move back to first commit
# 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
# 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
#
# During the amend in step five, we will be in this state:
#
# @ 3 temporary amend commit for a1-amend
# |
# o 2 a1-amend
# |
# | o 1 a1
# |/
# o 0 a0
#
# When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
# yet the filelog has the copy information in rev 1 and we will not look
# back far enough unless we also look at the a and b as candidates.
# This only occurs when a is a descendent of b or visa-versa.
return min(limit, a, b) | d8d6c0b250fff1a97e0be9c828b5daa4bbf22264 | 375,823 |
import torch
def deen_loss(energy_model, x, sigma=0.1):
"""DEEN loss from
Deep Energy Estimator Networks
The loss is computed as
x_ = x + v # noisy samples
s = -dE(x_)/dx_
loss = 1/2*||x - x_ + sigma^2*s||^2
Args:
x (torch.Tensor): input samples
sigma (int, optional): noise scale. Defaults to 1.
Returns:
DEEN loss
"""
x = x.requires_grad_()
v = torch.randn_like(x) * sigma
x_ = x + v
s = sigma ** 2 * energy_model.score(x_)
loss = torch.norm(s + v, dim=-1) ** 2
loss = loss.mean() / 2.0
return loss | bf221e5e9c08b44cf184b4dff8d7dc03a486475f | 519,130 |
def update_form_from_dict(form, field_dict):
"""Update WTForm field values from Python dictionary"""
for field in form:
if field.name in field_dict.keys():
value = field_dict[field.name]
if field.type == "FieldList":
if not isinstance(value, list):
value = [value]
for index in range(len(field)):
try:
field[index].data = value[index]
field[index].raw_data = None
except:
field[index].data = None
field[index].raw_data = None
else:
field.data = value
field.raw_data = None
return form | c823fb777f1e2db73f495089f904504ab6aa0ef2 | 370,570 |
def int_enum(cls, val):
"""Get int enum value.
Parameters
----------
cls : `type`
Int enum class.
val : `int` or `str`
Name or value.
Returns
-------
`IntEnum`
Raises
------
ValueError
"""
if isinstance(val, str):
val = val.upper()
try:
return getattr(cls, val)
except AttributeError:
raise ValueError('{0}.{1}'.format(cls, val))
return cls(val) | d2a53e12a21749589e23b6688241600baed8450e | 662,769 |
def get_plot_of_confidence_intervals_labels(time_type):
"""Get graph labels
Parameters
----------
time_type : string
A string to distinguish between times in order to adjust labels
Returns
-------
string, string, string
three strings that represent the graph's title, x-axis label and
y-axis label
"""
if time_type == "w":
title = "Mean Waiting time over number of iterations"
elif time_type == "s":
title = "Mean Service time over number of iterations"
elif time_type == "b":
title = "Mean time Blocked over number of iterations"
else:
title = " "
x_axis = "Number of trials"
y_axis = "Means of times"
return title, x_axis, y_axis | 2230bd7248907b9c9a4cf42397b323a5e68abfcf | 565,985 |
import copy
def rollelem(a, index, to_index=0):
"""Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : list
Input list.
index : int
The index of the item to roll backwards. The positions of the items
do not change relative to one another.
to_index : int, optional
The item is rolled until it lies before this position. The default,
0, results in a "complete" roll.
Returns
-------
res : list
Output list.
"""
res = copy.copy(a)
res.insert(to_index, res.pop(index))
return res | 315f879ef3ea2f57acb65597f4c6446692934032 | 90,018 |
def build_args_from_supported_features( features ):
"""Build a list of --enable or --disable features from a dictionary.
Ex: --enable-music-wave would be the result of {'music-wave': True}"""
args = []
for feature in features:
if features[feature]:
args.append( '--enable-' + feature )
else:
args.append( '--disable-' + feature )
return args | d55701b6b20cb2d5a4281dbd44fd72b66645a83c | 111,665 |
def sum_all(root):
"""Sums up all node values in the tree.
This will fail if all node values are not numerical.
Args:
root(BinaryTreeNode): Root node
Returns:
int: Sum of all node values or 0 if empty tree.
"""
if root is None:
return 0
return root.data + sum_all(root.lchild) + sum_all(root.rchild) | 3497717d614d95974874ad3a4e55108a0c4f2509 | 491,195 |
from typing import Union
def has_unique_elements(_list: Union[list, tuple]):
"""Determines whether or not the items of a list are unique.
Parameters
----------
_list : Union[list, tuple]
The list that must be checked.
Returns
-------
is_unique : bool
Returns `True` if the items of the lists are unique, else `False`.
"""
is_unique = len(set(_list)) == len(_list)
return is_unique | 3be3640acd04163f8a612d08d2bff6ec4e4f39aa | 152,759 |
def color_float_to_hex(r, g, b):
"""Convert RGB to hex."""
def clamp(x):
x = round(x*255)
return max(0, min(x, 255))
return "#{0:02x}{1:02x}{2:02x}".format(clamp(r), clamp(g), clamp(b)) | e8e82ecd5f6f418cdc0cd72f0b799d1dd0ad6b1d | 644,834 |
def trim_string(
string: str,
limit: int,
max_length: int,
trim_back: bool,
) -> str:
"""Trip a string to fit in viewport.
Args:
string: The string to trim
limit: The available screen estate (horizontal)
max_length: The length of the longest file name in a directory
trim_back: Whether to trim the end or the beginning of the string
Returns:
Trimmed string
"""
if len(string) > limit:
if trim_back:
return string[0 : limit - 1] + "…"
else:
return "…" + string[-limit + 1 :]
else:
return string + " " * (min(max_length, limit) - len(string)) | 23f8f80f4a2890109aab0cc1e86bdc4878759a3c | 346,861 |
def filters_cleaner(filters):
"""
Function that clean the filters for easy_load.
Parameters
---
filters: dict with the filters
Returns
---
str with the exact query for SODA API
Examples
---
>>> filters_clean(filters={'agency':['DOT'],'borough':['BRONX']})
'agency = "DOT" and boro = "BRONX"'
>>>
"""
filters_ls = []
for key,value in filters.items():
filters_ls_temp = []
for i in value:
filters_ls_temp.append(str(key + '=' +'"' +i+'"'))
filters_str_temp = " or ".join(filters_ls_temp)
filters_ls.append(filters_str_temp)
filters_str = " and ".join(filters_ls)
return filters_str | 6b26a888c3cd93dfaa9710604c512368a5a1cfd3 | 440,734 |
import sympy
def antal_l_coefficient(index, game_matrix):
"""
Returns the L_index coefficient, according to Antal et al. (2009), as given by equation 1.
L_k = \frac{1}{n} \sum_{i=1}^{n} (a_{kk}+a_{ki}-a_{ik}-a_{ii})
Parameters
----------
index: int
game_matrix: sympy.Matrix
Returns
-------
out: sympy.Expr
Examples:
--------
>>>a = Symbol('a')
>>>antal_l_coefficient(0, Matrix([[a,2,3],[4,5,6],[7,8,9]]))
Out[1]: 2*(a - 10)/3
>>>antal_l_coefficient(0, Matrix([[1,2,3],[4,5,6],[7,8,9]]))
Out[1]: -6
"""
size = game_matrix.shape[0]
suma = 0
for i in range(0, size):
suma = suma + (game_matrix[index, index] + game_matrix[index, i] - game_matrix[i, index] - game_matrix[i, i])
return sympy.together(sympy.simplify(suma / size), size) | 39dd5dab87f29db5a85a55e8bbb3d0d98ff4ad3c | 670,161 |
def add_timestamp_info_to_document(document, timeseries):
"""
Adds some meta information to documents that containg time series.
The document will get the additional attributes 'first_timestamp',
'last_tiemstamp' and 'num_timestamps' (number of timestamps). This
information can be used to calculate the timeseries' resolution, to
check, if there are any timestamps missing or to query documents based
on their contained tiemstamps.
Parameters
----------
document : dict
dict, that contains the timeseries' metadata.
timeseries : pandas.Series
The timeseries itself with the timestamps as index.
Returns
-------
document : dict
The updated timeseries metadata document.
"""
document["first_timestamp"] = timeseries.index[0]
document["last_timestamp"] = timeseries.index[-1]
document["num_timestamps"] = len(timeseries.index)
document["max_value"] = timeseries.max().item()
document["min_value"] = timeseries.min().item()
return document | a7f9e53b2d18d6359b0836105ce8d4b490fb015a | 446,433 |
def nonwhitespace(argument):
"""Return argument with all whitespace removed.
This includes removing any single spaces within the string.
"""
return "".join(argument.split()) | fd32afc0fd8ce94dc8cb5da05a998e8b404cef50 | 122,955 |
def clean_code(code):
"""Escape newlines."""
return code.replace('\n', '\\n') | 2535cb97d9c42391fb43d7b8cca4281742f39526 | 662,705 |
def clean_update_item_string(string):
"""
Removes "(" and ")" parentheses from a string
Args:
string(str): entry
Returns:
str: string with parens removed
"""
return str(string.replace("(", "").replace(")", "")) | 14634fe1232e620837855c2400b40a4dc865f237 | 668,921 |
def get_parameters(function, **kwargs):
"""
Return a list of parameters from a SSM function, like
describe_parameters or get_parameters_by_path.
"""
next_token = ""
response = function(**kwargs)
parameter_list = response["Parameters"]
while "NextToken" in response:
next_token = response["NextToken"]
kwargs["NextToken"] = next_token
response = function(**kwargs)
parameter_list.extend(response["Parameters"])
return parameter_list | ca9fe2f942399040d78e6a2ed9e49a11ceed4c6a | 634,059 |
def paint_text(text_str, color_str):
"""
Adds markup around given text.
Supports some colors by name instead of hexadecimal.
:param text_str:
:param color_str: (str) Hexadecimal color.
:return: (str)
"""
return '[color={color_str}]{text_str}[/color]'.format(text_str=text_str,
color_str=color_str) | 256dde931eeec82250fc178d28d9bdc07cd651c8 | 648,893 |
def first_non_repeating_letter(string):
"""
This was created for Code Wars:
https://www.codewars.com/kata/52bc74d4ac05d0945d00054e
This function takes a string as an input.
It will return the first character the does not repeat in the string.
It is case insensitive
"""
#checks for empty string
if string == "":
return ""
else:
#creates list which will check if there is a repeated character
list = []
#iterates through string to count how many times it appears
#keeps track of occurences by adding counts to list
for i in string:
list.append(string.lower().count(i.lower()))
if string.lower().count(i.lower()) > 1:
pass
else:
return i
#break so only first occurence is taken
break
#if no character occured only a single time, a blank string will be return
if 1 not in list:
return "" | 130636509c9f4324d507fe82b4ac00af741b29fe | 262,306 |
def get_residues(pdb, chain_ids=None):
"""Returns returns residues copied from a PDB.
Args:
- pdb - Bio.PDB.Structure.Structure.
- chain_ids - strip residues from these specific chain_ids only.
Returns:
- residues - a list of Bio.PDB.Residue.Residue.
"""
residues = []
for model in pdb:
for chain in model:
if chain_ids == None or chain.id in chain_ids:
residues.extend(chain.child_list)
return residues | 83c33523fed529733ed62a6d463d5c77aaa66cc6 | 133,041 |
import csv
def read_artist_id_csv(csv_file, ignore_y):
""" This function reads artist_id csv
@param ignore_y - skip artist with "y" for the "crawled" column in True
save every artist ignoring "y" if False
@return - dict {artist: artist_id} """
artist_id_dict = dict()
with open(csv_file, 'r') as fpin:
reader = csv.reader(fpin, delimiter=',')
next(reader)
for row in reader:
if len(row) > 0:
if ignore_y or row[2] != "Y":
artist_id_dict[row[0]] = row[1]
return artist_id_dict | 29e327b2fea25c49f291be52ce447945a8ecf0ec | 66,874 |
from typing import AnyStr
from typing import List
import json
def load_json_samples(path: AnyStr) -> List[str]:
"""
Loads samples from a json file
:param path: Path to the target file
:return: List of samples
"""
with open(path, "r", encoding="utf-8") as file:
samples = json.load(file)
if isinstance(samples, list):
return samples
else:
raise RuntimeError(f"File's content must be list-like") | b735e7265a31f6bc6d19381bfe9d0cbe26dcf170 | 706,566 |
def assemble_coordinate(coord_x, coord_y, coord_z):
"""Assemble a coordinate Celery Script node from x, y, and z."""
return {
'kind': 'coordinate',
'args': {'x': coord_x, 'y': coord_y, 'z': coord_z}} | aa7d391db5436e76b7810b8eae336cd7962240b1 | 326,041 |
def is_pandigital(nb):
"""
This function returns whether or not a number is
a 1-9 pandigital number, i-e it can be writted with
integers from 1 to 9.
"""
if len(str(nb)) == 9:
for i in range(1, 10):
if str(i) not in str(nb):
return False
return True
else:
return False | f24b5ee488e8a186c56fdeb6c1842fdce835bc1b | 343,757 |
def str2bool(x):
""" Convert a string to a boolean. """
return x.lower() in ("true", "1") | b39d63e4c550b8d5f9e94dc514f4178e68a56e38 | 364,669 |
def load_class(module, class_name):
""" Loads the class from a module by the class name"""
klass = getattr(module, class_name, None)
return klass | bc445b4ea55ea3e53f4ffa0c16b26103f7ba46ad | 302,700 |
def is_checkpoint_specified(checkpoint_config):
"""Whether or not the checkpoint config specifies a checkpoint."""
return (checkpoint_config.path
or checkpoint_config.run_dir
or checkpoint_config.id) | 800486d1eb2b023eb50a9eb6c325d6884ee82072 | 169,560 |
def add_arg_group(parser, title):
"""Add the arguments for a specific group to the parser
:param parser: the parser configure
:param title: the group name
:return: the new parser
"""
return parser.add_argument_group(f'{title} arguments') | a6cde0d1e5682744761f0a6424ae71cbf0ace12b | 485,466 |
def line_indices(lines, tag):
"""
:param lines: Must be a list of strings representing each line
:param tag: is the tag to find in the list of lines
:return: the indices of the lines with that tag
"""
return [l for l in range(len(lines)) if tag in lines[l]] | 353c4cc85b0e911dfc8686bcac073b5a6bc82b24 | 585,623 |
def get_max_point(board, points):
"""return the point with the largest value on the board"""
best_point = (-1, -1)
best = 0
for i, j in points:
if 0 <= i < len(board) and 0 <= j < len(board[0]) and board[i][j] > best:
best_point = (i, j)
best = board[i][j]
return best_point | 670697f7032e01cf807218df699b9ad23f6ce297 | 163,252 |
def get_link_inf(f, path):
"""Given a path to a node, returns information about
the link, or None, if the path is not part of a link. Returned link_inf has keys:
link_type - type of link, either 'hard' or 'soft'
loc - location (key) of link group associated with link. i.e. in links['lg'][link_type]
is_source - For link_type "soft" returns True if is a link source (not the target
of the link-group). For link type 'hard' this is not returned.
Note: This routine called 'get_link_inf' (not 'get_link_info") because the returned
dictionary is different than what is stored in the node class, "link_info" dict.
"""
link_type = (
'hard' if path in f.links['path2lg']['hard']
else 'soft' if path in f.links['path2lg']['soft']
or path in f.links['lg']['soft'] # this in case path is the target
else 'ext' if path in f.links['path2lg']['ext'] else None)
if link_type is None:
# node is not in a hard, soft or ext link-group, so is not part of a link
return None
if link_type == 'soft':
# soft link. This is the target if it is the location for a soft link group
is_source = not path in f.links['lg']['soft']
loc = f.links['path2lg']['soft'][path] if is_source else path
link_inf = {'link_type': link_type, 'loc': loc, 'is_source': is_source}
else:
# must be hard or external. loc for hard is a tuple, for ext is file\npath
loc = f.links['path2lg'][link_type][path]
link_inf = {'link_type': link_type, 'loc': loc}
return link_inf | b454cd26f47635a29e8a329844a12207599e4e0c | 111,317 |
def atleast_ndim(x, ndim):
"""Reshapes a tensor so that it has at least n dimensions."""
if x is None:
return None
return x.view(list(x.shape) + [1] * (ndim - x.ndim)) | ee4e4b35806bfbfd54223962dd12dd2ad078881d | 58,063 |
def to_key(monitoring_info_proto):
"""Returns a key based on the URN and labels.
This is useful in maps to prevent reporting the same MonitoringInfo twice.
"""
key_items = list(monitoring_info_proto.labels.items())
key_items.append(monitoring_info_proto.urn)
return frozenset(key_items) | 9280797abbe3ebe88b32be59d656dfa94e6990d4 | 245,211 |
def apply_set_and_clear(val: int, set_flag: int, clear_flag: int):
"""
:param val: an input value of the flag(s)
:param set_flag: a mask of bits to set to 1
:param clear_flag: a mask of bits to set to 0
:note: set has higher priority
:return: new value of the flag
"""
return (val & ~clear_flag) | set_flag | fd166fe5fe197ca39af6ebd04e6e24b9b3f5d94a | 475,630 |
from typing import Callable
import hashlib
def function_to_uid(f: Callable) -> str:
"""Returns a unique identifier for the given function."""
return hashlib.sha1(f.__name__.encode("utf-8")).hexdigest() | abdda8563ce26bf80bd8d1e97d9e68b6fde60fad | 214,630 |
import html
def chm_htmlescape(s: str, quote: bool = True) -> str:
"""
chm_htmlescape() is a wrapper of html.escape().
.hhc/.hhk files don't recognize hex escaping, we need convert
hex escaping to decimal escaping. for example: ``'`` -> ``'``
html.escape() may generates a hex escaping ``'`` for single
quote ``'``, this wrapper fixes this.
"""
s = html.escape(s, quote)
s = s.replace(''', ''') # re-escape as decimal
return s | 29af4ed5e17ae6f49eb98c71fe0550e265974cf3 | 379,932 |
def match_target_amplitude(audioSegment_sound, target_dBFS):
"""
Match the amplitude of an input signal to a target level.
Args:
audioSegment_sound (Audiosegment (pydub)): input signal.
target_dBFS (float/int): target level (in dBFS).
Returns:
matched_audio (Audiosegment (pydub)): amplitude-matched signal.
"""
dBFS_diff = target_dBFS - audioSegment_sound.dBFS
matched_audio = audioSegment_sound.apply_gain(dBFS_diff)
return matched_audio | 94012918a4f9acc41790eff5df3ae1ec73bdb67d | 180,278 |
def upper_bits(sequence,n):
"""Return only the n highest bits of each term in the sequence"""
return map(lambda x: (x >> n) % (2**n), sequence) | fed47f009ebc0958184eff333334da9a54665ba8 | 178,046 |
def is_list_with_len(value, length) -> bool:
""" Is the given value a list of the given length?
:param value: The value to check
:type value: Any
:param length: The length being checked for
:type length: Nat
:return: True if the value is a list of the length
:rtype: bool
"""
return isinstance(value, list) and len(value) == length | 9b242b76e7c244afff7f53d5290bc183b5fafe1b | 388,774 |
from typing import Union
def pretty_size(size: Union[int, float]) -> str:
"""
Converts a size in bytes to its string representation (e.g. 1024 -> 1KiB)
:param size: Size in bytes
"""
size = float(size)
power = 2 ** 10
base = 0
while size > power:
size /= power
base += 1
return "%.2f %s" % (size, {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}[base] + "B") | 08d1953814294fb62a7a8a23f2fdb376cdc7387b | 262,326 |
def is_url(checkpoint):
"""Check whether the checkpoint is a url or not.
"""
is_url = ('https://storage.googleapis.com' in checkpoint)
return is_url | 988e22920de0cd5a1bcf7543261d17fefabf6c29 | 624,129 |
def commastring2list(output_type=str):
"""Returns a lambda function which converts a comma separated string into a list of a given type
Args:
output_type (function, optional): string type conversion function. Defaults to str.
Returns:
function: lambda function
"""
return lambda input_str: list(map(output_type, input_str.split(","))) | b9d6d86c03730949e14913cbda6f8d333117d41d | 181,176 |
def split32(data):
""" Split data into pieces of 32 bytes. """
all_pieces = []
for position in range(0, len(data), 32):
piece = data[position:position + 32]
all_pieces.append(piece)
return all_pieces | cc721c44f28ea5714798fd7382554626da3abd72 | 555,144 |
def splitValues(textStr):
"""Splits a comma-separated number sequence into a list (of floats).
"""
vals = textStr.split(",")
nums = []
for v in vals:
nums.append(float(v))
return nums | 380b3d622dd31ec478b2912b60df66c0911e2003 | 187,097 |
def quadsplit(quad):
"""Split ccw quad to two triangles"""
v1, v2, v3, v4 = quad
return [
[v1, v2, v3],
[v3, v4, v1]] | 9e567beeefad574efbba0b10b7134c78456446ee | 626,306 |
import torch
def polak_ribiere(g_k1, g_k):
"""
This function computes the Polak-Ribier conjugate gradient search direction constant
INPUTS:
g_k1 < tensor > : gradient at current time step
g_k < tensor > : gradient at previous time step
OUTPUTS:
b_k1 < tensor > : conj. gradient update constant at current time step
"""
return torch.matmul(g_k1.t(), (g_k1 - g_k)) / torch.matmul(g_k.t(), g_k) | 749483a26dcada1d3668ab47c03c58f7c2534081 | 395,814 |
import torch
def stack_subsample_frames(x, stacking=1, subsampling=1):
""" Stacks frames together across feature dim, and then subsamples
input is batch_size, feature_dim, num_frames
output is batch_size, feature_dim * stacking, num_frames / subsampling
"""
seq = [x]
for n in range(1, stacking):
tmp = torch.zeros_like(x)
tmp[:, :, :-n] = x[:, :, n:]
seq.append(tmp)
x = torch.cat(seq, dim=1)[:, :, ::subsampling]
return x | 9c8096ed92a8c8030452248b1f87e1626933333b | 354,953 |
import pytz
from datetime import datetime
def unixTimeToUTC(timestamp):
"""Convert unix timestamp (seconds since Jan 1, 1970, to ISO-8601
compatible UTC time string.
"""
utc = pytz.utc
dtTime = datetime.fromtimestamp(timestamp, utc)
iso_str = dtTime.isoformat()
# isoformat returns a string like this:
# '2014-10-30T04:25:21+00:00'
# strip off the '+00:00' and replace
# with 'Z' (both are ISO-8601 compatible)
npos = iso_str.rfind('+')
iso_z = iso_str[:npos] + 'Z'
return iso_z | 9bc4d9b4f45f15edd00f32ed32e6481a89365d93 | 562,598 |
def target_subnet(model, subnet_idx):
"""
model = Keras model
subnet_idx = indexes of the subnet you want to target.
ex: subnet_idx = [1,0] -> targets model.layers[1].layers[0]
"""
subnet = model
for idx in subnet_idx:
subnet = subnet.layers[idx]
return subnet | df6092b0287e5dde53f5ad0363ae635d91582a58 | 67,845 |
def get_label_length(label):
"""Get length of cell label."""
label_length = 5
while label_length < len(label) and label[label_length].isdigit():
label_length += 1
return max(min(label_length, 9), 5) | 37b0b9f928ebf1fc9bd51fdd2e44c45305b8ba61 | 332,875 |
def _organize_requests_by_external_key(enrollment_requests):
"""
Get dict of enrollment requests by external key.
External keys associated with more than one request are split out into a set,
and their enrollment requests thrown away.
Arguments:
enrollment_requests (list[dict])
Returns:
(requests_by_key, duplicated_keys)
where requests_by_key is dict[str: dict]
and duplicated_keys is set[str].
"""
requests_by_key = {}
duplicated_keys = set()
for request in enrollment_requests:
key = request['external_user_key']
if key in duplicated_keys:
continue
if key in requests_by_key:
duplicated_keys.add(key)
del requests_by_key[key]
continue
requests_by_key[key] = request
return requests_by_key, duplicated_keys | b06ee82b317defe7f2a65df767e44fb36310b4fc | 276,116 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.