content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def equalize_array(array):
""" Expand all rows of 2D array to the same length """
if len(array) == 0:
return array
max_length = max([len(i) for i in array])
for row in array:
diff = max_length - len(row)
row.extend([""] * diff)
return array
|
120dd2282f5f32342d66dba2916b269ef7d38f41
| 280,986 |
def read_file(file):
"""
Reads a file line by line and stores them in a list.
Parameters
----------
file : The file to read. Each line in the file represents an element to be
stored in a list.
Returns
-------
content : A list with the lines of the file as elements.
"""
content = []
with open(file, 'r', encoding='utf-8') as reader:
for line in reader:
line = line.strip() # Remove the newline char
content.append(line)
return content
|
9c2dfd5ee180be6c54914b9c5165da1756562416
| 78,445 |
import re
def _db_regex_func(expr, item):
"""
REGEXP search function for SQLite.
:param expr:
:type expr: str
:param item:
:type item:
:return: True if a match is found.
:rtype: bool
"""
reg = re.compile(expr, re.I)
return reg.search(item) is not None
|
009faba2ab950ee37f3cbc10ff6519e90099d9c4
| 535,103 |
def rotate90_point(x, y, rotate90origin=()):
"""Rotates a point 90 degrees CCW in the x-y plane.
Args:
x, y (float): Coordinates.
rotate90origin (tuple): x, y origin for 90 degree CCW rotation in x-y plane.
Returns:
xrot, yrot (float): Rotated coordinates.
"""
# Translate point to origin
x -= rotate90origin[0]
y -= rotate90origin[1]
# 90 degree CCW rotation and translate back
xrot = -y + rotate90origin[0]
yrot = x + rotate90origin[1]
return xrot, yrot
|
172b74f4aaa5453520ed9669d1f76d3103c51b06
| 681,201 |
from typing import Dict
from typing import Tuple
def format_dimensions(dimensions: Dict[str, int]) -> Tuple[int, int]:
"""Slightly simplify 'dimensions' response attribute into ``(width, height)`` tuple"""
return dimensions.get("width", 0), dimensions.get("height", 0)
|
627b1ec13887923c8ff41465e4e5652f5b4aea76
| 402,352 |
import click
def prompt_access_token(exporter):
"""Prompt user for an access token for a provider.
:return: The new access token
"""
text_prompt = ('You must configure an access token\n')
text_prompt += 'Create one at: {0}\n'.format(exporter.access_token_url())
text_prompt += 'Access token'
return click.prompt(text_prompt, type=str)
|
cff9e4239921b5f7360935e48f0ee6afeba450a1
| 350,052 |
def get_gcd(num1, num2):
"""
Gets the greatest common divisor of two numbers.
:param num1: The first number.
:param num2: The second number.
:return: The GCD.
"""
if (num2 == 0):
return num1
return get_gcd(num2, num1 % num2)
|
f1fbf359af9c7ec2ab04b9cedc7719fd7923f4ea
| 358,133 |
def path_in_cc(path, cc):
"""Determines whether all vertices of a given path belong to a given
connected component.
:param path:
:param cc: list of vertices representing a connected component in a graph
:return: True if the path vertices are found in the connected component,
False otherwise
"""
for node in path:
if node not in cc:
return False
return True
|
9d549234f2a1294380a3e416fea93cdf7944d8b2
| 26,181 |
def flatten(s, accepted_types=(list, tuple)):
"""flatten lists and tuples to a single list, ignore empty
Parameters
----------
s : Sequence
accepted_types : Tuple (acceptable sequence types, default (list,tuple)
Return
------
l : Flat sequence
Example
-------
flatten(('Hello',[' ',2],[' '],(),['the',[([' '])],('World')]))
('Hello', ' ', 2, ' ', 'the', ' ', 'World')
"""
s_type = type(s)
s = list(s)
i = 0
while i < len(s):
while isinstance(s[i], accepted_types):
if not s[i]:
s.pop(i)
i -= 1
break
else:
s[i:i+1] = s[i]
i += 1
return s_type(s)
|
8a25a2f3f0b4963dcfb689026b9d5644ccf2591e
| 240,466 |
def _colnames(elements):
"""Generate column names for output text file.
Args:
elements (list): List of output element abbreviations (str).
Returns:
str: header line that describes columns.
"""
time = ['{:8}'.format('Time')]
sfr = ['{:8}'.format('SFR')]
survivors = ['{:10}'.format('Survivors')]
feh = ['{:8}'.format('[Fe/H]')]
abunds = ['{:8}'.format('[{}/Fe]'.format(item)) for item in elements]
line = time + sfr + survivors + feh + abunds
return ' '.join(line) + '\n'
|
3104562aa59872e0dfd1a229056eb6c48e5ac56a
| 333,001 |
from pathlib import Path
from typing import List
def parse_dependency(filepath: Path) -> List[str]:
"""Parse python dependencies from a file.
The list of dependencies is used by `setup.py` files. Lines starting
with "#" are ingored (useful for writing comments). In case the
dependnecy is host using git, the url is parsed and modified to make
suitable for `setup.py` files.
Args:
filepath (Path):
Returns:
List[str]: List of dependencies
"""
dep_list = []
for dep in open(filepath).read().splitlines():
if dep.startswith("#"):
continue
key = "#egg="
if key in dep:
git_link, egg_name = dep.split(key)
dep = f"{egg_name} @ {git_link}"
dep_list.append(dep)
return dep_list
|
6c030a81c834f815e30ca0e586713b282f4b89f0
| 325,183 |
import io
def read_bytes(n: int, reader: io.IOBase) -> bytes:
"""
Reads the specified number of bytes from the reader. It raises an
`EOFError` if the specified number of bytes is not available.
Parameters:
- `n`: The number of bytes to read;
- `reader`: The reader;
Returns the bytes read.
"""
buff = reader.read(n)
if not isinstance(buff, bytes):
raise ValueError('The reader is expected to return bytes.')
if len(buff) != n:
raise EOFError(f'Unable to read {n} bytes from the stream.')
return buff
|
bb3d00fc7667839864f4104a94a26e682f058fdc
| 6,287 |
import pickle
def loadData(file_model):
"""
Load object from (pk) dump file
:param file_model: object file
:return: reconstructed object file
"""
object_file = None
try:
object_file = open(file_model, 'rb')
object_model = pickle.load(object_file)
object_file.close()
return object_model
except FileNotFoundError:
print("FATAL ERROR: file does not exist in the given destination.")
exit()
except Exception:
print("FATAL ERROR: pickle file is invalid or corrupted.")
exit()
|
5715e6343b0ab12370ce3821089d3dc9f4b150a1
| 646,701 |
def calculate_spendings(queryResult):
"""
calculate_spendings(queryResult): Takes 1 argument for processing - queryResult
which is the query result from the display total function in the same file.
It parses the query result and turns it into a form suitable for display on the UI by the user.
"""
total_dict = {}
for row in queryResult:
# date,cat,money
s = row.split(",")
# cat
cat = s[1]
if cat in total_dict:
# round up to 2 decimal
total_dict[cat] = round(total_dict[cat] + float(s[2]), 2)
else:
total_dict[cat] = float(s[2])
total_text = ""
for key, value in total_dict.items():
total_text += str(key) + " $" + str(value) + "\n"
return total_text
|
a80fb8068ceabb690d7e9aec12b160323b2a061d
| 434,850 |
import struct
def from_big_endian(bytestring):
""" Convert big-endian bytestring to int """
bytestring = bytestring.rjust(4, b'\x00')
return struct.unpack(">L", bytestring)[0]
|
02ba7eae7180b43339d0e278ce633ab414ca397b
| 467,583 |
from typing import List
from typing import Dict
def _tags_as_dict(tags: List[Dict]) -> Dict[str, str]:
"""
Convert a list of tags to a dictionary
:param tags: the list of tags
:return: the dictionary of tags
"""
return {tag["Key"]: tag.get("Value", "") for tag in tags}
|
f909f1f4fb6773cf3b1e32067d963738b6e7023d
| 39,196 |
import random
def choose_from_string(string: str, n: int):
""" Choose a card from a string. Returns the choice and what remains after choosing. """
assert len(string) >= n
choices = []
for _ in range(n):
char = random.choice(string)
string = string.replace(char, '', 1)
choices.append(char)
return ''.join(choices), string
|
e8605428d046456311333f73c66465319803dc13
| 627,918 |
def count_nodes(d:dict) -> int:
"""Count the number of nodes (leaves included) in a nested dictionary.
"""
n = len(d.keys())
for k,v in d.items():
if type(v) is dict:
n += count_nodes(v)
return n
|
d8253cab1cbf85692be82307d85ac441ba56ec86
| 535,578 |
def deepgetattr(obj, attr, default = None):
"""
Get a named attribute from an object; multi_getattr(x, 'a.b.c.d') is
equivalent to x.a.b.c.d. When a default argument is given, it is
returned when any attribute in the chain doesn't exist; without
it, an exception is raised when a missing attribute is encountered.
"""
attributes = attr.split(".")
for i in attributes:
try:
obj = getattr(obj, i)
except AttributeError:
if default:
return default
else:
raise
return obj
|
fb3363d362f198e1ad4037f18b8c514be5742c49
| 664,938 |
def convert_x1y1x2y2_to_XcYcWH(box):
"""
Convert box from dictionary of {"x1":,"y1":,"x2":,"y2"} to {"x_centre":,"y_centre":,"width":,"height":}
Assumption 1: point 1 is the top left and point 2 is the bottom right hand corner
"""
assert box["x1"] <= box["x2"]
assert box["y1"] <= box["y2"]
width = box["x2"] - box["x1"]
height = box["y2"] - box["y1"]
x_centre = round(box["x1"] + width/2)
y_centre = round(box["y1"] + height/2)
return {"x_centre":x_centre,"y_centre":y_centre,"width":width,"height":height}
|
e7da7353b64b969b4c51dc7ece06729b05bad31e
| 12,691 |
def RGBtoHSB( nRed, nGreen, nBlue ):
"""RGB to HSB color space conversion routine.
nRed, nGreen and nBlue are all numbers from 0 to 255.
This routine returns three floating point numbers, nHue, nSaturation, nBrightness.
nHue, nSaturation and nBrightness are all from 0.0 to 1.0.
"""
nMin = min( nRed, nGreen, nBlue )
nMax = max( nRed, nGreen, nBlue )
if nMin == nMax:
# Grayscale
nHue = 0.0
nSaturation = 0.0
nBrightness = nMax
else:
if nRed == nMin:
d = nGreen = nBlue
h = 3.0
elif nGreen == nMin:
d = nBlue - nRed
h = 5.0
else:
d = nRed - nGreen
h = 1.0
nHue = ( h - ( float( d ) / (nMax - nMin) ) ) / 6.0
nSaturation = (nMax - nMin) / float( nMax )
nBrightness = nMax / 255.0
return nHue, nSaturation, nBrightness
|
5e698f58c0f01fade4bc4c4cea7b7838239c7caf
| 102,567 |
def get_nearest_point(kdtree, the_pt, dist_thresh_in_km=0.005):
"""
Get the nearest point to "the_pt" using the the kd=tree index
Arguments:
kd {[KDTree]} -- The KD-tree of all the points
the_pt {list} -- [x,y] of the point
Keyword Arguments:
dist_thresh_in_km {float} -- The maximum distance (in km) within
which the nearest points have to be searched (default: {0.005})
Returns:
list -- A tuple of the nearest point in the tuple
(distance, point index)
"""
dist_in_km, ind = kdtree.query(the_pt, k=1)
if dist_in_km is not None and dist_in_km <= dist_thresh_in_km:
return dist_in_km, ind
return None, None
|
418e7aeb37f19e6e7dd71fef160fb4ea3159baaf
| 539,399 |
def parse_port_id (parameters, port_id = "portno"):
"""
Parse command parameters to get the socket port ID for the command.
:param parameters: The list of parameters provided to the command.
:param port_id: The name of the port ID parameter.
:return The socket port ID to use for the command. The port ID provided by the request will be
made negative if the ID is not valid. A 0 is returned when no port ID is provided.
"""
try:
port = int (parameters.get (port_id, 0))
if (port > 4):
port = -port
except:
# If there is an exception getting the parameter, assume no valid parameter is available.
port = 0
return port
|
f77c35ade0b80deea7b2632534f0181e300c831c
| 419,082 |
def get_number_available(product_information_table: list) -> str:
"""Return number of books available."""
number = product_information_table[5].text
return "".join([character for character in number if character.isdigit()])
|
ab39cd19b36d53997e22485fbebfab63cd8202eb
| 85,358 |
from typing import List
from typing import OrderedDict
def dict_list_to_request_params(param_name: str, values: List[dict]) -> dict:
"""
Returns dict to be used by request params from dict list
Expected Airtable Url Params is:
`?sort[0][field]=FieldOne&sort[0][direction]=asc`
>>> objects = [
... { "field": "FieldOne", "direction": "asc"},
... { "field": "FieldTwo", "direction": "desc"},
... ]
>>> dict_list_to_request_params("sort", objects)
{
"sort[0][field]": "FieldOne",
"sort[0][direction]: "asc",
"sort[1][field]": "FieldTwo",
"sort[1][direction]: "desc",
}
"""
param_dict = {}
for index, dictionary in enumerate(values):
for key, value in dictionary.items():
field_name = "{param_name}[{index}][{key}]".format(
param_name=param_name, index=index, key=key
)
param_dict[field_name] = value
return OrderedDict(sorted(param_dict.items()))
|
74efd49357d972c59ab0cf5ea53cb02802283358
| 378,013 |
def field(field_var):
""" Render a field based on template
https://docs.djangoproject.com/en/1.8/howto/custom-template-tags/#inclusion-tags
Possibly add parameter for custom classes
:param field_var: the field variable
:return: the template context
"""
return {'field': field_var}
|
7b48d67f83098cf8d3e4ec004bfdccc25c2565e6
| 590,120 |
import math
def bearing(obj_pos):
"""
Return bearing of an object:
https://en.wikipedia.org/wiki/Bearing_(navigation). Used to determine
an objects center in x-direction (more to the left or right in the image)
"""
angle = math.atan2(obj_pos[0], -obj_pos[2])
return math.degrees(angle)
|
1f98504fe5f64d8fe6619eef307ba1d3a68908cd
| 442,632 |
def filtrar_entre(valores, menor, maior):
"""
Cria uma lista com os números de 'valores' que estejam no intervalo
['menor', 'maior') (o primeiro intervalo é fechado e o segundo é aberto).
Parâmetros: lista de floats e os limites.
Retorna: a lista filtrada.
"""
nova_lista = []
for valor in valores:
if (valor >= menor) and (valor < maior):
nova_lista.append(valor)
return nova_lista
|
d237a40fc82b9cec4bc9052a20cc0064c5dabcf1
| 118,144 |
def parse_german_float(s):
"""
Parse a German float string.
German uses a dot for the thousands separator and a comma for the
decimal mark.
"""
return float(s.replace('.', '').replace(',', '.'))
|
7092676b3356fd43a2acd3049231e08eb8898f0b
| 402,519 |
def isInList(node, alist: list):
"""
Determine if a node is within a list
"""
for temp in alist:
if temp.position == node.position:
return True
return False
|
979b58647b88142d29a93a339e520416adfd78f9
| 64,498 |
import json
def read_json(file_name):
"""Read and parse JSON file
:param file_name: JSON file name
"""
with open(file_name) as f:
data = f.read()
return json.loads(data)
|
49384f20deaeedef58b1358de1f5924eb3ac82a8
| 193,014 |
def number_to_name(number):
"""Take integer number as input (0-1-2-3-4) and returns string (rock-spock-paper-lizard-scissor)
"""
if number == 0:
return "rock"
elif number == 1:
return "spock"
elif number == 2:
return "paper"
elif number == 3:
return "lizard"
elif number == 4:
return "scissor"
else:
return "Error"
|
e0a5dd4ceb35ee0d7c2f86f95fb34f4bf1fe2da3
| 73,849 |
from typing import Tuple
from typing import List
from typing import Dict
from typing import Any
def info() -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
"""
Get input and output schemas
:return: OpenAPI specifications. Each specification is assigned as (input / output)
"""
input_sample = [
{
'name': "a",
'type': "string",
'required': True,
'example': "1"
},
{
'name': "b",
'type': "string",
'example': "2"
}
]
output_sample = [
{
'name': "integer",
'type': "string",
'required': True,
'example': '42'
}
]
return input_sample, output_sample
|
282f60cacac69865736c58712c4bfbdb2f5e2c24
| 7,180 |
def get_groups(groups_collection):
"""Returns a list of group names
:param groups_collection: Mongo collection that maintains groups
:return: List of group names
"""
groups = groups_collection.find()
return list(groups)
|
75ff224e383eaf2f4fd9e4d345231aa4a7ea587f
| 27,617 |
def rgb2hex(rgb) -> str:
"""Convert an RGB sequence to a hexadecimal color.
The values of the sequence need to be in ``range(256)``.
>>> rgb2hex([0, 255, 255])
'#00ffff'
"""
return '#%02x%02x%02x' % tuple(rgb)
|
7b7240bbd260d8c0d17484f24eb96fdee4223974
| 619,258 |
def create_alias(name):
"""
Clean an alias to be an acceptable Python variable
"""
return name.replace(' ', '_').replace('.', '_').lower()
|
111640d2a063e027b3c823f396cc27defcb6fd5d
| 252,624 |
def get_rare_elements_number(d, n):
"""
Count the number of rare elements
:param d: dictionary to use
:param n: the threshold for rarity
:return: the number of rare elements as a string
"""
i = 0
for k, v in d.items():
if v < n:
i += 1
return str(i)
|
367aab5fdf4efb945cccfb9dffd95573cbd72a3c
| 662,405 |
def nbsp(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
|
6ef5586da6c03c84db1acef0597ecb449d527eca
| 66,584 |
def _readlines(fname, fpointer1=open, fpointer2=open):
"""Read all lines from file."""
# fpointer1, fpointer2 arguments to ease testing
try: # pragma: no cover
with fpointer1(fname, "r") as fobj:
return fobj.readlines()
except UnicodeDecodeError: # pragma: no cover
with fpointer2(fname, "r", encoding="utf-8") as fobj:
return fobj.readlines()
|
142432f9f650f4da1ac878f34b7736181c0d2424
| 471,198 |
def pybb_moderated_by(topic, user):
"""
Check if user is moderator of topic's forum.
"""
return user.is_superuser or user in topic.forum.moderators.all()
|
8dd6370f9bd0fa87c0acce1f9b6aec6a0f434888
| 555,302 |
def _is_macos(repository_ctx):
"""Determines if the host is running MacOS.
Args:
repository_ctx: A `repository_ctx` instance.
Returns:
A `bool` indicating whether the host is running MacOS.
"""
os_name = repository_ctx.os.name.lower()
return os_name.startswith("mac os")
|
35d12c32243ebe6e7253cc863280dbe53d99b0e2
| 618,969 |
def render_paper_images(paper_soup):
"""
Renders paper uploaded images with default visual styles.
"""
images = paper_soup.find_all('img')
for image_tag in images:
image_tag['style'] = ''
image_tag['class'] = 'img-fluid my-3 hover-translate-y-n3 hover-shadow-lg'
return paper_soup
|
34caf9ec1efdae0ab4f713d57fb8e52eb31a6ef9
| 504,302 |
def load_polymer(filename):
""" Return the polymer string from FILENAME. """
polymer = None
with open(filename) as f:
for line in f:
polymer = line.rstrip()
break
return polymer
|
f961a095137390e0a50f83121da4acb34c3e1a4d
| 587,125 |
def urlpath2(url:bytes) -> bytes:
""" Get url's path(strip params) """
return url.split(b'?', 1)[0]
|
b5464b3617cbd6303f4438c92fd8f5271f6906e1
| 46,949 |
def __gen_t(dt: float, num_timesteps: int) -> list:
"""generate time vector, starting at 0
Args:
dt (float): time between saved timesteps
num_timesteps (int): number of total timesteps
Returns:
t (list): list of times
"""
t = [float(x) * dt for x in range(0, num_timesteps)]
return t
|
9ba58dfb2ec2963b48503b3f28582230b169c19f
| 150,068 |
def exif_values_to_list(value):
"""Convert a value returned by ExifTool to a list if it is not already and filter out bad data"""
if isinstance(value, list):
value = [v for v in value if not str(v).startswith("(Binary data ")]
return [str(v) for v in value]
elif not str(value).startswith("(Binary data "):
return [str(value)]
|
794d0daad4cfada5a0e92062fcbd6676341e050c
| 529,035 |
def case_safe_sf_id(id_15):
"""
Equivalent to Salesforce CASESAFEID()
Convert a 15 char case-sensitive Id to 18 char case-insensitive Salesforce Id
or check the long 18 char ID.
Long 18 char Id are from SFDC API and from Apex. They are recommended by SF.
Short 15 char Id are from SFDC formulas if omitted to use func CASESAFEID(),
from reports or from parsed URLs in HTML.
The long and short form are interchangable as the input to Salesforce API or
to django-salesforce. They only need to be someway normalized if they are
used as dictionary keys in a Python application code.
"""
if not id_15:
return None
if len(id_15) not in (15, 18):
raise TypeError("The string %r is not a valid Force.com ID")
suffix = []
for i in range(0, 15, 5):
weight = 1
digit = 0
for char in id_15[i:i + 5]:
if char not in '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz':
raise TypeError("The string %r is not a valid Force.com ID")
if char.isupper():
digit += weight
weight *= 2
suffix.append(chr(ord('A') + digit) if digit < 26 else str(digit - 26))
out = ''.join(suffix)
if len(id_15) == 18 and out != id_15[15:]:
raise TypeError("The string %r is not a valid Force.com ID")
return id_15[:15] + out
|
e70f4cfe2e03b269d8c1f31e3d903de48c63c000
| 369,080 |
def clean_up_feature_sets(*feature_sets, earliest_date: dict, last_date: dict) -> list:
"""Leave only features from inside the observation window."""
results = []
for feats in feature_sets:
results.append(feats[(feats.DATE < feats.SUBJECT_ID.map(last_date))
& (feats.DATE >= feats.SUBJECT_ID.map(earliest_date))])
return results
|
06e4ac3713dce63a4237694fa6b6b0ed850216f6
| 48,233 |
def inverse(pattern):
""" gets the inverse pattern of a pattern """
new_pattern = ''
for item in pattern:
if item == '0':
new_pattern += '1'
elif item == '1':
new_pattern += '0'
else:
new_pattern += item
return new_pattern
|
7c7e63833cae343e5dd155f868f6b3da5c656133
| 244,187 |
def age_window_hit(by_predicted, by_truth, m=lambda x: -0.1*x+202.8):
"""
calculates the window for a given truth and checks if the prediction lies within that window
:param by_predicted: the predicted birth year
:param by_truth: the true birth year
:param m: function giving the window size when given by_truth.
:return: true if by_predicted within m-window of by_truth
"""
return int(by_truth - m(by_truth)) <= by_predicted <= int(by_truth + m(by_truth))
|
141ab8467fbef19fadce8cdce62657cd158ae391
| 304,942 |
def gen_v_stmt(q1n, q2n):
"""
return a string of verify statement.
"""
return "verify {} {};\n".format(q1n, q2n)
|
5fd8c56b97a001934601427a1e36754cb41f8830
| 547,943 |
import six
def make_bytes(dictionary):
"""Encodes all Unicode strings in the dictionary to UTF-8 bytes. Converts
all other objects to regular bytes.
Returns a copy of the dictionary, doesn't touch the original.
"""
result = {}
for (key, value) in six.iteritems(dictionary):
# Keep binary data as-is.
if isinstance(value, six.binary_type):
result[key] = value
continue
# If it's not a string, convert it to one.
if not isinstance(value, six.text_type):
value = six.text_type(value)
result[key] = value.encode('utf-8')
return result
|
07dae3ce26a97203b7f1bd067f34e88b18522c06
| 98,115 |
def expand_by_device(original_parallelism, device_parallelism, data):
"""Opposite of reduce_by_device().
Args:
original_parallelism: a expert_utils.Parallelism object.
device_parallelism: a expert_utils.Parallelism object.
data: a list of tensors with length device_parallelism.n
Returns:
a list of Tensors with length original_parallelism.n
"""
device_to_datum = {
device_parallelism.devices[i]: data[i]
for i in range(device_parallelism.n)}
return [device_to_datum[d] for d in original_parallelism.devices]
|
6dff03ffeccbba896213f49b0848e436bddb88ae
| 553,065 |
import json
def load_dicefile(file):
""" Load the dicewords file from disk. """
with open(file) as f:
dicewords_dict = json.load(f)
return dicewords_dict
|
42da982410dfc1ac39a1f40766ba6f7a2824d088
| 672,333 |
def splitparticle(s):
"""Extracts number, particle/element type and mass number of particle string
(e.g. 1Cs133, Cs133, 1e).
Parameters
----------
s : str
Input particle string.
Returns
-------
tuple of (int, str, int)
Tuple of signature (number of particles, element symbol, atomic mass number)
Examples
--------
>>> splitspecies('1Cs133') # returns (1,'Cs',133)
>>> splitspecies('-e') # returns (-1,'e',0)
"""
tail = s.lstrip('+-0123456789')
head = s[:-len(tail)]
if head == '+' or head == '': # handle omitted 1 or plus sign
n = int(1)
elif head == '-': # handle omitted 1
n = int(-1)
else:
n = int(head) # leading number including sign (if present)
El = tail.rstrip('0123456789') # get central letters
if El == 'e' and len(El) == len(tail): # handle electron strings, e.g. ':-1e'
A = 0
else: # handle hadrons
A = int(tail[len(El):]) # trailing number
return n, El, A
|
5bebda09b0695d6c027245d9ad44ed7bb148cca8
| 544,019 |
def gridIndicesToMatIndices(i,j, maxI, maxJ):
"""
converts grid coordinates to numpy array coordiantes
grid has (0,0) at bottom left corner, numpy has (0,0)
at top left corner.
:param i: x - column index
:param j: y - row index
:param maxI: number of columns in grid
:param maxJ: number of rows in grid
:return: converted row and column coordiantes
"""
return (i-1,maxJ-j-1)
|
de988d3405feeda0e85f28dc0b858f7ea951d75e
| 504,324 |
import re
def normalize_ssh_url(url):
"""Convert a SSH-like url to a RFC1738-compliant url
>>> normalize_ssh_url('[email protected]:/path/to/repo.git/') == 'ssh://[email protected]/path/to/repo.git/'
True
>>> normalize_ssh_url('[email protected]:path/to/repo.git/') == 'ssh://[email protected]/path/to/repo.git/'
True
:param url:
:return:
"""
matcher = re.match(r"(?P<username>\w+@|)(?P<hostname>\w+\.[^:]+):(?P<path>.+)", url)
if matcher:
(username, hostname, path) = matcher.groups()
if not path.startswith("/"):
path = "/" + path
return "ssh://%s%s%s" % (username, hostname, path)
return url
|
950ee9a4eb56926ad7c7f2c0712a77af89c84639
| 222,513 |
def deals_summary_statistics(shelves):
"""
Given a list of shelves, returns a query to return summary statistics for all deals belonging to those shelves.
:param shelves: An array of strings representing the Bloomberg deal names of the shelves to be included
:return: Summary statistics for all deals in the given shelves.
"""
return {
"dimensions": ["Deal.name", "AssetPool.assetPoolId"],
"order": [["Deal.name", "desc"]],
"filters": [
{
"member": "Shelf.bloombergName",
"operator": "equals",
"values": shelves
}
],
"segments": ["AssetPool.securitizedAndPricing"],
"measures": [
"Asset.count",
"LoanAssetCutoff.balanceAverage",
"LoanAssetCutoff.weightedAveragePaymentToIncome",
"LoanAssetCutoff.balanceSum",
"LoanAssetCutoff.weightedAverageObligorCreditScore",
"LoanAssetCutoff.percentNoCreditScore",
"LoanAssetCutoff.weightedAverageLoanToValueAtCutoff",
"LoanAssetCutoff.percentUsed",
"LoanAssetCutoff.weightedAverageOriginalTermAtCutoff",
"LoanAssetCutoff.weightedAverageRemainingTermAtCutoff",
"LoanAssetCutoff.weightedAverageInterestRateAtCutoff"
]
}
|
fec001aeafe17a34a158e3c38b57c461eb6792a2
| 543,115 |
from typing import List
def extract_fields(rec: str, pos: List[int]) -> List[str]:
""" Extract fields from a delimited record """
fields = []
if length := len(rec):
for i in pos:
if i < length:
fields.append(rec[i])
return fields
|
8f5e8b15da4509b2cc7d9bae0d96f548c835d38e
| 285,151 |
from pathlib import Path
def get_live_toml_file(toml_file: Path) -> Path:
"""
Check to see if the TOML file path exists already
Parameters
----------
toml_file: Path
Pathlib Path holding the user provided path to the toml file.
Returns
-------
Path
path to the live toml file if it exists, else the normal TOML file path
"""
live_file = Path(str(toml_file) + "_live")
if live_file.exists():
return live_file
return toml_file
|
df852bfffebf9055a9e0ebc565755866e2bf1f25
| 116,538 |
def fix_imessage_date(seconds):
"""
Convert seconds to unix epoch time.
iMessage dates are not standard unix time. They begin at midnight on
2001-01-01, instead of the usual 1970-01-01.
To convert to unix time, add 978,307,200 seconds!
Source: http://d.hatena.ne.jp/sak_65536/20111017/1318829688
(Thanks, Google Translate!)
"""
return seconds + 978307200
|
fefc32c1489048e9d2ca5532bf00a2867eff2d1d
| 253,332 |
def get_filepaths_from_data_dir(data_dir, file_extension="*.txt"):
"""Creates a list containing paths to filenames in a data directoryl
Args:
data_dir (PosixPath): PosixPath to the data directory.
file_extension (str): A string with the given file extension you want to extract.
"""
files = [file for file in data_dir.glob(file_extension) if file.is_file()] # Using list comprehension to get all the file names if they are files.
return files
|
e03d01040e8b11e1b701864004225445ef4127ee
| 319,119 |
import json
def loadjson(fp):
"""Convenience wrapper to load json data."""
with open(fp, 'r') as fin:
dat = json.load(fin)
return dat
|
4254e1d187ed72f1436152692a001c1466d35303
| 449,244 |
def calc_median(nums):
"""Calculate the median of a number list."""
N = len(nums)
nums.sort()
if N % 2 == 0:
m1 = N / 2
m2 = (N / 2) + 1
m1 = int(m1) - 1
m2 = int(m2) - 1
median = (nums[m1] + nums[m2]) / 2
else:
m = N / 2
m = int(m) - 1
median = nums[m]
return median
|
3755d4436fe66c5e56d5297c2e2b2e59763caa90
| 89,252 |
def _check_for_item(lst, name):
"""Search list for item with given name."""
filtered = [item for item in lst if item.name == name]
if not filtered:
return None
else:
return filtered[0]
|
b820fa63e952fb9e0d5007811588e74071d18140
| 485,030 |
def unstack_and_count(da, dims):
""" Unstacks provided xarray object and returns the total number of elements along dims """
try:
unstacked = da.unstack(da.dims[0])
except ValueError:
unstacked = da
if dims is None:
return ((0 * unstacked) + 1)
else:
return ((0 * unstacked) + 1).sum(dim=dims, skipna=True)
|
e60879b140934b67d9684ab3ff71dfe8a9fd31bb
| 578,863 |
def compile_unmatched(unmatched, applicants):
"""
Compile list of unmatched applicants after allocation, using the
output unmatched IDs from allocate.
"""
return [a for a in applicants if a["id"] in unmatched]
|
3ead63f58e90c2b4b70c12f2f9970a539af8fd8a
| 563,017 |
def get_list_scafs(file_in):
"""
Return a list of scaffolds to walk through. Expects each scaffold on its own
line in the input file.
"""
scaf_ls = []
with open(file_in, 'r') as infile:
for line in infile:
scaf_ls.append(line.strip())
return scaf_ls
|
bab93ee2cd070517fe3946d481acac5ac675e525
| 500,506 |
def bit_flip(array):
"""Given a binary array, find the maximum number zeros in an array with one
flip of a subarray allowed. A flip operation switches all 0s to 1s and 1s
to 0s."""
original_zeroes = 0
current_sum, max_sum = 0, 0
for value in array:
if not value:
value = -1
original_zeroes += 1
current_sum = max(0, current_sum + value)
max_sum = max(max_sum, current_sum)
return original_zeroes + max_sum
|
aeed191e9d30816492a6f45c889e20e1435e094d
| 197,011 |
import torch
def remove_bbx_abnormal_z(bbx_3d):
"""
Remove bounding box that has negative z axis.
Parameters
----------
bbx_3d : torch.Tensor
Predcited 3d bounding box, shape:(N,8,3)
Returns
-------
index : torch.Tensor
The keep index.
"""
bbx_z_min = torch.min(bbx_3d[:, :, 2], dim=1)[0]
bbx_z_max = torch.max(bbx_3d[:, :, 2], dim=1)[0]
index = torch.logical_and(bbx_z_min >= -3, bbx_z_max <= 1)
return index
|
b598da809600ef50c84c14b42554490c3b0131bf
| 575,526 |
def enum_name(name):
"""Shorten an enumeration name."""
assert name.startswith('GL_')
return name[3:]
|
f428d90c43ff674910e4e7195143497a6338d976
| 512,905 |
def scalar_prod(n,a):
"""
This function takes a scalar, n, and multiplies it by every
element in a vector.
"""
c = [n*a[0],n*a[1],n*a[2]]
return(c)
|
ca4756599eeabefc4a0b7b22357bb22bf5348a28
| 475,140 |
import random
def build_batches(sentences, batch_size, num_chunks_in_batch=1):
"""
Randomize batches sequences along sentences if dataset indexes.
https://gist.github.com/pommedeterresautee/1a334b665710bec9bb65965f662c94c8#file-trainer-py-L181
https://wandb.ai/pommedeterresautee/speed_training/reports/Train-HuggingFace-Models-Twice-As-Fast--VmlldzoxMDgzOTI
Args:
sentences (list): List of samples.
batch_size (int): Batch size.
num_chunks_in_batch (int, optional): Training batch should consist of
several "chunks" of samples with different sequece lengths to make
training more random. Default is 1, which mean in batch would be
samples with the same length.
"""
chunk_size = int(batch_size/num_chunks_in_batch)
batch_ordered_sentences = list()
while len(sentences) > 0:
to_take = min(chunk_size, len(sentences))
select = random.randint(0, len(sentences) - to_take)
batch_ordered_sentences += sentences[select:select + to_take]
del sentences[select:select + to_take]
return batch_ordered_sentences
|
66da330776532fd29807cdf00e3ec22d6ee7abf1
| 219,608 |
def find_bounds(f, y):
"""
Find approximate bounds for binomial search.
"""
x = 1
while((f(x) < y) and (x<1000483646) ):
x = x * 2
lo = -100
if (x ==1):
lo = -100
else:
lo = x/2
if(x > 1000):
x = min(x,2047483646)
return lo, x
|
c0cbf9f9867d028f8d83da5ae05860613946e987
| 618,870 |
import re
def is_s3_file_url(file_url):
"""
Match the s3 file regex match.
"""
return re.match(
r"^(https:|/api/method/frappe_s3_attachment.controller.generate_file)",
file_url,
)
|
b9dbc7eda61d53177509fc2a3e306c60c9f4bf24
| 527,064 |
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Parameters
----------
a : ndarray
Input array.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return a.ravel().nonzero()[0]
|
de4d3367251ce17188e3e07901a711ccaeb7c994
| 242,974 |
def deepcopy_nested_dict(nested_dict_to_deepcopy: dict):
"""
Deepcopy of a nested dictionary of two levels, e.g. {k1:{...}, k2:{...}, ..., kN:{...}}
:param nested_dict_to_deepcopy: The nested dictionary to return a deepcopy of
:return: A deepcopy of a nested dictionary
"""
# Copy the upper level
deepcopied_nested_dict = nested_dict_to_deepcopy.copy()
# Coppy the lower level
for k, d in nested_dict_to_deepcopy.items():
assert type(d) is dict
deepcopied_nested_dict[k] = d.copy()
return deepcopied_nested_dict
|
09e3c0d4eecf88613b25a7344ad3ca9f4ea8c23b
| 108,605 |
def uniq(l: list) -> list:
"""Return unique elements from l"""
return list(dict.fromkeys(l))
|
d96171b0eff66180222e1b48ee553aec4dda40da
| 575,671 |
from bs4 import BeautifulSoup
def extract_internal_links(HTML):
"""
Looks through HTML and extracts internal hyperlinks
"""
soup = BeautifulSoup(HTML)
link_list = [link.get('href') for link in soup.find_all('a')]
return link_list
|
32150de06eb796572c3651ca2399d74a5bcae643
| 376,910 |
def cutlabel(s, cuts):
"""Cuts a string s using a set of (n, label) cuts.
Returns a list of (sub, label) pairs.
If there was an initial part before the first cut, then it has a label of None.
If there are no cuts, returns s as a single element, with label None.
"""
cuts = sorted(cuts)
# no cuts -> whole string is an element
if not cuts: return [(s, None)]
if cuts[0][0] != 0:
cuts.insert(0, (0, None))
if cuts[-1][0] < len(s)-1:
cuts.append((len(s), None))
locs, labels = zip(*cuts)
ret = []
for i, j, label in zip(locs, locs[1:], labels):
ret.append((s[i:j], label))
return ret
|
2f81a64d24dc0d31ff6671f2cc8459a38e3af51c
| 668,956 |
def formatIntervalSeconds(cSeconds):
""" Format a seconds interval into a nice 01h 00m 22s string """
# Two simple special cases.
if cSeconds < 60:
return '%ss' % (cSeconds,);
if cSeconds < 3600:
cMins = cSeconds / 60;
cSecs = cSeconds % 60;
if cSecs == 0:
return '%sm' % (cMins,);
return '%sm %ss' % (cMins, cSecs,);
# Generic and a bit slower.
cDays = cSeconds / 86400;
cSeconds %= 86400;
cHours = cSeconds / 3600;
cSeconds %= 3600;
cMins = cSeconds / 60;
cSecs = cSeconds % 60;
sRet = '';
if cDays > 0:
sRet = '%sd ' % (cDays,);
if cHours > 0:
sRet += '%sh ' % (cHours,);
if cMins > 0:
sRet += '%sm ' % (cMins,);
if cSecs > 0:
sRet += '%ss ' % (cSecs,);
assert len(sRet) > 0; assert sRet[-1] == ' ';
return sRet[:-1];
|
eca067323d84cd3d6ea72dffe4d0658c86e896af
| 465,028 |
def time_str_to_int(s: str) -> int:
"""
Convert 24-hour or 12-hour time string to int hour of the day
Parameters
----------
s : str
24-hour or 12-hour time string
Returns
-------
int
Hour of the day between [0,24)
"""
if ":" in s:
# this is a 24-hour string
return int(s.strip().split(":")[0])
elif "am" in s.lower():
# This is a 12-hour string
return int(s.lower().split("am")[0].strip()) % 12
elif "pm" in s.lower():
# This is a 12-hour string
return (int(s.lower().split("pm")[0].strip()) % 12) + 12
else:
# Make no assumptions about what time they are using
# throw an error to be safe
raise ValueError(
f"Given string '{s}' is not of the form ##:00 or ##AM or ##PM."
)
|
aa9d653e2caff24cb06b8f9761f0a5436689f711
| 404,868 |
def find_diff(g, start):
"""
g -> Graph of states of nibbles
start -> Starting configuration
Find all possible differentials given a start configuration. The function
returns a tuple containing:
1. the number of rounds
2. a list of all possible end states (second-last round)
3. a list of all possible end states (last round)
"""
vs = set([start])
states = [vs]
rounds = 0
is_end = False
while len(vs) > 0 and not is_end:
n_vs = set()
for v0 in vs:
for v1 in g[v0]:
if v1 == 4095:
is_end = True
n_vs.add(v1)
vs = n_vs
states.append(vs)
rounds += 1
return (rounds, states)
|
74f24be96f057e2810fed6de6bb7753be31092d5
| 691,450 |
def filter_dicom(dcmdata):
"""Return True if a DICOM dataset should be filtered out, else False"""
comments = getattr(dcmdata, 'ImageComments', '')
if len(comments):
if 'reference volume' in comments.lower():
print("Filter out image with comment '%s'" % comments)
return True
return False
|
6b38e3dd12e032e9f56c07717c7296c276cbb01a
| 196,100 |
def first(iterable, default=None, key=None):
"""Return first element of *iterable* that evaluates to ``True``, else
return ``None`` or optional *default*. Similar to :func:`one`.
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional *key* argument specifies a one-argument predicate function
like that used for *filter()*. The *key* argument, if supplied, should be
in keyword form. For example, finding the first even number in an iterable:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
Contributed by Hynek Schlawack, author of `the original standalone module`_.
.. _the original standalone module: https://github.com/hynek/first
"""
return next(filter(key, iterable), default)
|
2b389efbb7b3630f9e21613b3ba62f8ea44d8f7f
| 395,860 |
def frags_in_cutoff(fragList, cutoff, center_ip_id):
"""Return list of indices of frags within cutoff from central ion pair excluding central ion pair."""
indexes = []
for i in range(len(fragList)):
if fragList[i]["dist"] < cutoff and i != center_ip_id:
indexes.append(i)
return indexes
|
40a20823eb30b3c6a2b91a5802bbe1d94e6a37db
| 118,488 |
def transcribe(seq: str) -> str:
"""
transcribes DNA to RNA by replacing
all `T` to `U`
"""
#.replace() function iterates through letters of seq & replaces "T" with "U" (effectively transcribing the DNA sequence)
transcript = seq.replace("T", "U")
return transcript
|
5c1af98f5ff76c9d3ac670471843dd8abe6bde4b
| 259,393 |
def pictures(listing):
"""Score by number of images."""
nimages = len(listing.images)
if nimages == 0:
return -500
elif nimages == 1:
return -200
elif nimages < 4:
return 0
else:
return 200
|
c9a9a2cf089a1f2a9b0e00c104c5ccdb46e79260
| 579,991 |
import math
def vector_angle(x, y):
"""
A method which returns the angle of a vector.
Parameter(s):
-------------
x : Integer/Float
x-axis value of vector.
y : Integer/Float
y-axis value of vector.
Returns:
--------
theta : Integer/Float
Angle of vector in radians.
See also:
---------
circle_label_angle
"""
hypot = math.sqrt(x*x + y*y) # get the hypotenuse
theta = math.asin(y / hypot) # get the corresponding vector angle
# apply CAST rule
if x < 0:
theta = math.pi - theta
if theta < 0:
theta = theta + 2*math.pi
return theta
|
691303f63741c159e6a588294e7b41cdfb96fd83
| 449,433 |
def get_target(model, observation):
"""Get target from observation."""
if model.model_kind == "dynamics":
target = observation.next_state
elif model.model_kind == "rewards":
target = observation.reward
elif model.model_kind == "termination":
target = observation.done
else:
raise NotImplementedError
return target
|
fc5f6ba3af330279172d2adb3f35f1079cd60e40
| 544,889 |
from typing import Sequence
from typing import List
def indent_iterable(elems: Sequence[str], num: int = 2) -> List[str]:
"""Indent an iterable."""
return [" " * num + elem for elem in elems]
|
efb7ca21051aa798ec853ed2544df5ca1f3e24b0
| 557,274 |
import pickle
def load_data(file):
"""Load (preprocessed) data from file."""
with open(file, 'rb') as dr:
X = pickle.load(dr)
gene_names = pickle.load(dr)
return X, gene_names
|
8e1729e2cf9b76ce47ae4fcbbecda97f235cdb8e
| 657,700 |
import math
def _2d_rotate(position, angle_degrees):
"""rotate a 2d vector around an angle (in degrees)"""
radians = math.radians(angle_degrees)
# take the negative of the angle because the orientation circle works clockwise in this world
cos = math.cos(-radians)
sin = math.sin(-radians)
x, y = position
return x * cos - y * sin, - (x * sin + y * cos)
|
cd031deef2ed1be59ffa03214055ede3277d7912
| 632,154 |
import torch
def normalize_0_1(input: torch.Tensor) -> torch.Tensor:
"""
Normalize a given tensor to a range of [0, 1]
:param input: (Torch tensor) Input tensor
:param inplace: (bool) If true normalization is performed inplace
:return: (Torch tensor) Normalized output tensor
"""
# Perform normalization not inplace
return (input - input.min()) / (input.max() - input.min())
|
45c70dde803b05b1afe2ce52fdf9cdd12dbe0cfb
| 409,402 |
def volpiano_characters(*groups):
"""Returns accepted Volpiano characters
The characters are organized in several groups: bars, clefs, liquescents,
naturals, notes, flats, spaces and others. You can pass these group
names as optional arguments to return only those subsets:
>>> volpiano_characters()
'3456712()ABCDEFGHJKLMNOPQRSIWXYZ89abcdefghjklmnopqrsiwxyz.,-[]{¶'
>>> volpiano_characters('naturals', 'flats')
'IWXYZiwxyz'
Parameters
----------
*groups : [str]
The groups to include: 'bars', 'clefs', 'liquescents', 'naturals',
'notes', 'flats', 'spaces' or 'others'
Returns
-------
str
A string with accepted Volpiano characters
"""
symbols = {
'bars': '34567',
'clefs': '12',
'liquescents': '()ABCDEFGHJKLMNOPQRS',
'naturals': 'IWXYZ',
'notes': '89abcdefghjklmnopqrs',
'flats': 'iwxyz',
'spaces': '.,-',
'others': "[]{¶",
}
if not groups:
groups = symbols.keys()
return "".join((symbols[key] for key in groups))
|
af361f302fc531828655e1274e7c0868969c2fdc
| 60,839 |
import csv
def read_csv_metrics(path):
""" Read a metrics summary file produced by superpmi, and return the single row containing the information as a dictionary.
Args:
path (str) : path to .csv file
Returns:
A dictionary with each metric
"""
with open(path) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
return row
return None
|
a397333fac6e591012cacc35274cfc8a3686f91e
| 170,033 |
import fnmatch
def in_list(patterns, name):
"""
Return true if name matches at least one pattern in the list.
"""
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
return True
return False
|
a05b78c6d879580683ebf617f889290cb1fa98b0
| 179,877 |
def normalizeExpression(licsConcluded):
"""
Combine array of license expressions into one AND'd expression,
adding parens where needed.
Arguments:
- licsConcluded: array of license expressions
Returns: string with single AND'd expression.
"""
# return appropriate for simple cases
if len(licsConcluded) == 0:
return "NOASSERTION"
if len(licsConcluded) == 1:
return licsConcluded[0]
# more than one, so we'll need to combine them
# iff an expression has spaces, it needs parens
revised = []
for lic in licsConcluded:
if lic in ["NONE", "NOASSERTION"]:
continue
if " " in lic:
revised.append(f"({lic})")
else:
revised.append(lic)
return " AND ".join(revised)
|
d65d60ed5dc24e4220b2cfac87ab9e5ce4abd794
| 360,065 |
def reverse_enumerate(l):
"""Like enumerate but in the other direction
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return zip(range(len(l)-1, -1, -1), reversed(l))
|
14cb2d145d0f23c7bed626f6eb5b2af878acec30
| 130,073 |
def convert_to_float(lst, purpose):
"""
Returns list of all float-convertable values of `lst`,
along with length of new list
"""
float_times = []
len_times = 0
for t in lst:
if (str(t)[:3] != 'DNF') and (t != '') and (str(t)[-1] != '+'):
float_times.append(float(t))
len_times += 1
elif str(t)[-1] == '+':
if purpose == 'average':
float_times.append(float(t[:-1]))
len_times += 1
elif purpose == 'single':
float_times.append(t)
len_times += 1
return float_times, len_times
|
83ad621eccb38fab848533c2ea8f61846b8bad1e
| 235,253 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.