content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def _bp_static_url(app, blueprint):
""" builds the absolute url path for a blueprint's static folder """
urls = app.url_map.bind('')
u = urls.build("{}.static".format(blueprint.name), values={"filename":""})
print(u)
return u
|
3058b5ca1e594d599b5472ef3610dd3925bbd9c7
| 565,813 |
def is_tag(obj):
"""Determines if `obj` is a tuple of two strings.
Examples:
>>> is_tag(('hello', 'yes'))
True
>>> is_tag(('hi', 22))
False
"""
try:
return (
isinstance(obj, tuple)
and len(obj) == 2
and all((isinstance(x, str) or x == None) for x in obj)
)
except:
return False
|
fffae8647d54bc13568f8c5aa1a2db90458bc3e3
| 295,070 |
def bdev_opal_delete(client, bdev_name, password):
"""Delete opal virtual bdev from the system.
Args:
bdev_name: name of opal vbdev to delete
password: admin password of base nvme bdev
"""
params = {
'bdev_name': bdev_name,
'password': password,
}
return client.call('bdev_opal_delete', params)
|
8427a4e62230485809c06b3a142cb294752266c0
| 314,107 |
def truncate(x, d):
"""
Truncate a number to d decimal places
Args:
x: the number to truncate
d: the number of decimal places; -ve to truncate to powers
Returns: truncated number
"""
if d > 0:
mult = 10.0 ** d
return int(x * mult) / mult
else:
mult = 10 ** (-d)
return int(x / mult) * mult
|
880104d3f00fd1f37c424aca0f356a0c043b210a
| 401,889 |
def quintic_ease_out(p):
"""Modeled after the quintic y = (x - 1)^5 + 1"""
f = p - 1
return (f * f * f * f * f) + 1
|
ffd09fcc45f08861688257c254cc86a53bf13041
| 693,736 |
def get_element_coordinates(feeder_mapping, element_type, name):
"""Return the coordinates of the bus to which the element is attached.
Parameters
----------
feeder_mapping : dict
dictionary created by this module at a feeder level
element_type : str
capacitors, lines, loads, etc.
name : str
Element name
Returns
-------
dict | None
None is returned if no coordinates are stored.
Example output::
{'x': '34374.509', 'y': '206624.15'}
If element_type == 'lines'
{'from': None, 'to': {'x': '34802.251', 'y': '206769.654'}}
"""
bus = feeder_mapping[element_type][name]
if element_type == "lines":
from_coords = feeder_mapping["bus_coords"].get(bus["from"])
to_coords = feeder_mapping["bus_coords"].get(bus["to"])
return {"from": from_coords, "to": to_coords}
if element_type == "transformers":
bus = bus["primary"]
return feeder_mapping["bus_coords"].get(bus)
|
92b02255694a9e87f9dfd5b2e3562ef9f1e2c362
| 180,085 |
def get_dummy_stratas(metadatas: dict) -> dict:
"""
Create a dummy, one-factor 'no_stratification' categorical
metadata variables to stratify on for each metadata table.
Parameters
----------
metadatas : dict
Key = Metadata file path.
Value = Metadata table.
Returns
-------
stratas : dict
Key = Metadata file path.
Value = List of one dummy 'no_stratification' variable to stratify on.
"""
stratas = {}
for md_fp, md in metadatas.items():
md['no_stratification'] = 'no_stratification'
stratas[md_fp] = ['no_stratification']
return stratas
|
1500cc0f3078e8796debe2876872cb2e3dc3882e
| 236,162 |
import random
def _exponential_backoff(max_tries):
"""
Returns a series of floating point numbers between 0 and 2^i-1 for i in 0 to max_tries
"""
return [random.random() * (2**i - 1) for i in range(0, max_tries)]
|
496aee1d4745d6e5452e2386f57e1a2290926721
| 97,077 |
def verify_name_in_series(df, y_name):
"""
Verify that a column name is in the dataframe. Return a list
of the x names without the y_name
"""
x_names = list(df.columns)
if y_name not in x_names:
raise KeyError(f"Name : {y_name!r} not in dataframe")
x_names.remove(y_name)
return x_names
|
0959ec36e018fd43a4693c5f6dd2e12dd5f89cc6
| 303,657 |
def clean_tag(tag):
"""
Format tag to match e621 format
"""
tag = tag.replace(' ', '_')
return tag
|
68af2880c02af6e1d60fe577fb8a1685f7cc3157
| 531,779 |
from pathlib import Path
from typing import Tuple
import re
def get_file_versions_after_bump(cwd: Path) -> Tuple[list, list]:
"""
Read all version numbers and test whether they were correctly bumped (or not bumped if tagged/not matched)
:param cwd: Current Working Dir
:return: List of all regex matches, one for whitelisted one for blacklisted
"""
valid_version_regex = r"(?<!\.)\d+(?:\.\d+){2}(?:-SNAPSHOT)?(?!\.)"
with open(f"{cwd}/bump_version_test_files/bump_test_file_whitelisting", "r") as bumped_file_whitelisted:
bumped_data = bumped_file_whitelisted.read()
bumped_versions_whitelisted = re.findall(valid_version_regex, bumped_data)
with open(f"{cwd}/bump_version_test_files/bump_test_file_blacklisting", "r") as bumped_file_blacklisted:
bumped_data = bumped_file_blacklisted.read()
bumped_versions_blacklisted = re.findall(valid_version_regex, bumped_data)
return bumped_versions_whitelisted, bumped_versions_blacklisted
|
8168533203aefe3503955391266dae23e04bc18b
| 146,188 |
import csv
def load_object_detection_results(filename, width_height=False):
"""Load object detection results.
The csv file should contain 8 columns
(frame id, xmin, ymin, xmax, ymax, t, score, object id)
or 7 columns
(frame id, xmin, ymin, xmax, ymax, t, score).
Args
filename(string): filename of object detection results in csv format.
width_height(bool): Return box format [xmin, ymin, w, h, ...] if True.
Otherwise, return [xmin, ymin, xmax, ymax, ...].
Default: False.
Return
dets(dict): a dict mapping frame id to bounding boxes.
"""
dets = {}
with open(filename, 'r') as f:
reader = csv.reader(f)
_ = next(reader)
for row in reader:
if len(row) == 8:
frame_id, xmin, ymin, xmax, ymax, t, score, obj_id = row
if int(frame_id) not in dets:
dets[int(frame_id)] = []
if xmin and ymin and xmax and ymax and t and score and obj_id:
if width_height:
dets[int(frame_id)].append([
float(xmin), float(ymin), float(xmax)-float(xmin),
float(ymax)-float(ymin), int(float(t)),
float(score), int(float(obj_id))])
else:
dets[int(frame_id)].append([
float(xmin), float(ymin), float(xmax), float(ymax),
int(float(t)), float(score), int(float(obj_id))])
elif len(row) == 7:
frame_id, xmin, ymin, xmax, ymax, t, score = row
if int(frame_id) not in dets:
dets[int(frame_id)] = []
if xmin and ymin and xmax and ymax and t and score:
if width_height:
dets[int(frame_id)].append([
float(xmin), float(ymin), float(xmax)-float(xmin),
float(ymax)-float(ymin), int(t), float(score)])
else:
dets[int(frame_id)].append([
float(xmin), float(ymin), float(xmax), float(ymax),
int(t), float(score)])
return dets
|
ca9f70fe0823fc86eabe0e3d86b1bd5206730146
| 449,426 |
def roc_auc_preprocess(positives, negatives, roc_auc):
"""ROC AUC analysis must be preprocessed using the number of positive and
negative instances in the entire dataset and the AUC itself.
Args:
positives (int): number of positive instances in the dataset
negatives (int): number of negative instances in the dataset
roc_auc (float): ROC AUC
returns:
(positive, negative) tuple that can be used for `prob_below` and
`credible_interval`
"""
unique_combinations = positives * negatives
# correctly ranked combinations are pairs of positives and negatives
# instances where the model scored the positive instance higher than the
# negative instance
correctly_ranked_combinations = roc_auc * unique_combinations
# the number of incorrectly ranked combinations is the number of
# combinations that aren't correctly ranked
incorrectly_ranked_combinations = (
unique_combinations - correctly_ranked_combinations
)
return correctly_ranked_combinations, incorrectly_ranked_combinations
|
1a6ca8b9bf1f7bef5647ded675e96cfe04b4b02e
| 225,630 |
def convert_key_to_title(snake_case_key):
"""Replace underscores with spaces and convert to title case"""
return snake_case_key.replace('_', ' ').title()
|
1cad3e8e3c5abae9127786752686faf076c3a2ad
| 316,420 |
def build_tree(elems, root, currentkey, parentkey):
""" Constructs a hierarchic tree from a flat dictionary.
https://stackoverflow.com/questions/35452512/construct-hierarchy-tree-from-python-flat-parent-children-dict-list
:param elems: flat dictionary
:param root: root node of current recursion
:param currentkey: key of current element
:param parentkey: key parent element
"""
elem_with_children = {}
def _build_children_sub_tree(parent):
cur_dict = {
'id': parent,
# put whatever attributes here
}
if parent in elem_with_children.keys():
cur_dict["children"] = [_build_children_sub_tree(cid) for cid in elem_with_children[parent]]
return cur_dict
for item in elems:
cid = item[currentkey]
pid = item[parentkey]
elem_with_children.setdefault(pid, []).append(cid)
res = _build_children_sub_tree(root)
return res
|
60d21e56027a2b396d5f58e466ea58786135268d
| 665,566 |
def count_files(tree, repo):
"""
Count how many files there are in a repository.
"""
num_files = 0
trees = []
visited = set()
visited.add(tree.id)
trees.append(tree)
while trees:
current_tree = trees.pop()
for entry in current_tree:
if entry.type == "tree":
if entry.id not in visited:
trees.append(repo[entry.id])
visited.add(entry.id)
else:
num_files += 1
return num_files
|
a1f5af0c5dc6ac31051b8f1247fd568f2000fbe2
| 544,408 |
def hex(color: tuple, prefix: str='#') -> str:
"""
Convert RGB to HEX.
:param color: 3-element tuple with color RGB values
:param prefix: string prefix
:return: string with color in hex
"""
if len(color) is not 3:
raise ValueError('Color should be a 3 element tuple')
if not all([0 <= v <= 255 for v in color]):
raise ValueError('RGB values have to be in range from 0 to 255')
return '{}{:02x}{:02x}{:02x}'.format(prefix, color[0], color[1], color[2])
|
83ef829de8537722fd69c208044f38ec2bcdc057
| 536,369 |
from typing import Tuple
import requests
def _to_seloger_geographical_code(post_code: str) -> Tuple[str, str]:
"""
Convert French 'Code Postal' to seloger's appropriate custom geographical code.
Args:
post_code: standard French post codes.
Returns:
- 'cp' or 'ci', the type of geographical code returned
- the actual geographical code, in seloger's nomenclatura.
Raises:
ValueError if we don't get a match in seloger's database of codes.
RuntimeError if we get more than two matches.
"""
post_code = str(post_code)
url = (
f"https://autocomplete.svc.groupe-seloger.com/api/v2.0/auto/complete/fra"
f"/63/10/8/SeLoger?text={post_code}"
)
response = requests.get(url)
cities = response.json()
matches = []
for city in cities:
if city.get("Type", None) == "Group":
if post_code == str(city.get("Params", {}).get("cp", "")):
matches = [("cp", post_code)]
break
if post_code in city.get("Meta", {}).get("Zips", []):
matches.append(("ci", city["Params"]["ci"]))
continue
if post_code == str(city.get("Params", {}).get("cp", "")):
matches.append(("cp", post_code))
if not matches:
msg = f"Unknown post code '{post_code}'."
raise ValueError(msg)
if len(matches) == 2:
# for department that match large cities (e.g. Paris, Lyon) we can get 2 matches
# One for the department and one for the group of arrondissements
# we arbitrarily return the department code
if set([geo_type for geo_type, geo_code in matches]) != {
"cp",
"ci",
}: # pragma: no cover
msg = (
f"Got multiple matches for post code '{post_code}'. "
"This should never happen!"
)
raise RuntimeError(msg)
matches = [match for match in matches if match[0] == "cp"]
if len(matches) > 2: # pragma: no cover
msg = (
f"Got multiple matches for post code '{post_code}'. "
"This should never happen!"
)
raise RuntimeError(msg)
return matches[0]
|
d0d7a7d15dff6fd17921a85f80b99006caff41f8
| 429,505 |
def is_dark(x, y):
"""
Determine if a given pixel coordinate is a 'dark' pixel or not.
"""
# Odds are (0,0), (0,2), (1,1), (1,3)
# Evens are (0,1), (0,3), (1,0), (1,2)
return (x + y) % 2 == 0
|
0bf137a98e89dc6f9b688aa8e467b709ff53991a
| 38,572 |
def _read_file(file_name) -> str:
"""
Reads file to string
"""
with open(file_name) as file:
content = file.read()
content = content.rstrip('\n')
return content
|
e8fca613fbb3b8169fdd2bac6bf0a34456a74657
| 172,743 |
def get_start(model):
"""
Get the start position of a (feature) location. For point locations
the position value is returned. In case of uncertain start end,
the minimum is returned.
"""
if model.get("location"):
model = model["location"]
if model["type"] == "range":
if model["start"].get("uncertain"):
return get_start(model["start"])
else:
return model["start"]["position"]
elif model["type"] == "point":
return model["position"]
|
0db17ea852e6478031d9c940ef43e8dd14acff73
| 416,223 |
def _ray_remote(function, params):
"""This is a ray remote function (see ray documentation). It runs the `function` on each ray worker.
:param function: function to be executed remotely.
:type function: callable
:param params: Parameters of the run.
:type params: dict
:return: ray object
"""
r = function(params)
return r
|
0aae675af23be189b8e15504ccde41e095f4b4d6
| 32,952 |
def IsThinArchive(path):
"""Returns whether the given .a is a thin archive."""
with open(path, 'rb') as f:
return f.read(8) == '!<thin>\n'
|
905b1a0e999b99e387e09036aa393e5a54599664
| 397,378 |
import requests
import json
def get_coordinates_info(lon, lat):
"""
Request from geocode.arcgis API an info about (lat, lon) coordinates
:param lon: a number for Longitude
:param lat: a number for Latitude
:return: dict if found data else None
"""
path = r"https://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/reverseGeocode?" \
"location={}%2C{}&langCode=en&outSR=&forStorage=false&f=pjson".format(lon, lat)
x = requests.get(path)
if x.ok:
return json.loads(x.text)
|
e421909cab65587523197e4e6cbed1afbb37576c
| 49,858 |
def fixed_crops(X, crop_shape, crop_type):
"""
Take center or corner crops of images.
Inputs:
- X: Input data, of shape (N, C, H, W)
- crop_shape: Tuple of integers (HH, WW) giving the size to which each
image will be cropped.
- crop_type: One of the following strings, giving the type of crop to
compute:
'center': Center crop
'ul': Upper left corner
'ur': Upper right corner
'bl': Bottom left corner
'br': Bottom right corner
Returns:
Array of cropped data of shape (N, C, HH, WW)
"""
N, C, H, W = X.shape
HH, WW = crop_shape
x0 = (W - WW) / 2
y0 = (H - HH) / 2
x1 = x0 + WW
y1 = y0 + HH
if crop_type == 'center':
return X[:, :, y0:y1, x0:x1]
elif crop_type == 'ul':
return X[:, :, :HH, :WW]
elif crop_type == 'ur':
return X[:, :, :HH, -WW:]
elif crop_type == 'bl':
return X[:, :, -HH:, :WW]
elif crop_type == 'br':
return X[:, :, -HH:, -WW:]
else:
raise ValueError('Unrecognized crop type %s' % crop_type)
|
af35228a6e61e62920a60df7b3fd5431b047ff38
| 643,351 |
def py_opp(x):
"""
Function for python unary operator ``-``.
@param x floats
@return `-x`
"""
return -x
|
4d7f6260da54eaa9ea3d6e71a10cdb61d4bba8c1
| 43,396 |
def addresses_for_key(gpg_keychain, key):
"""
Takes a key and extracts its email addresses.
"""
fingerprint = key['fingerprint']
addresses = []
for key in gpg_keychain.list_keys():
if key['fingerprint'] == fingerprint:
# parse raw email addresses (without preceding name)
addresses.extend([address.split('<')[-1].strip('>') for address in key['uids'] if address])
assert len(addresses) > 0
return addresses
|
4621bbd2451318f0f34b9d6a843e228e3aed892e
| 452,935 |
import math
def stPlotFrom3Dmat(array3D, rowChoice = None, colChoice = None):
"""
This function creates a 2D space-time plot (needs to be colored later) from a 3D matrix.
Will return error if input matrix is not 3D.
Allows user to specify rowChoice or colChoice for the slicing.
If neither rowChoice or colChoice is selected, will default to middle row.
If both contain values, will default to rowChoice (essentially ignores colChoice).
"""
if array3D.ndim != 3: # If not a 3D array
print("Error: input array is not 3 dimensional")
return
if not (rowChoice or colChoice): # If neither are given values, go with middle row
rowChoice = int(math.ceil(array3D.shape[0] / 2))
if not rowChoice: # If colChoice is selected
return array3D[:, colChoice, :]
else:
return array3D[rowChoice, :, :]
|
df544c3f9a6d29fd398d26c82b0776bc6a7de4ae
| 231,824 |
def byte_to_int16(lsb, msb):
"""
Converts two bytes (lsb, msb) to an int16
:param lsb: Least significant bit
:param msb: Most significant bit
:return: the 16 bit integer from the two bytes
"""
return (msb << 8) | lsb
|
483a7205e57d615bf0122b437a5c000d9e859620
| 632,842 |
def BitSet(x, n):
"""Return whether nth bit of x was set"""
return bool(x[0] & (1 << n))
|
d331fe0d2f6367409bf1933954a9de3fb051446f
| 673,824 |
import math
def rev_pentagonal(n):
"""Reverse pentagonal. Return 0 for n < 0 or negate result is if is not positive integer result."""
if n < 0:
return 0
delta = int(math.sqrt(1 + 24 * n))
# delta is square and 1 + 24 * n % 6 == 0 so delta %6 == 5
if delta * delta == 1 + 24 * n and delta % 6 == 5:
return (delta + 1) // 6
else:
return - (delta + 1) // 6
|
43bcbac5c1e0ac498089063f22df79840043f68a
| 99,023 |
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [
line
for line in lineiter
if line and not line.startswith("#") and not line.startswith('git+')
]
|
838b05c9bc2238f8c420ba9bd9c834ddff0dd3d4
| 648,043 |
from warnings import warn
def _format_1(raw):
"""
Format data with protocol 1.
:param raw: returned by _load_raw
:return: formatted data
"""
data, metadata = raw[0]['data'], raw[0]['metadata']
# Check for more content
if len(raw) > 1:
base_key = 'extra'
key = base_key
count = 0
while key in metadata:
key = base_key + f'_{count}'
count += 1
metadata[key] = raw[1:]
warn('File contains extra information which will be returned in '
f'metadata[{key}].')
return data, metadata
|
0939ffb4242828a7857f0f145e4adeb90ec6cff1
| 20,791 |
def _calculate_emissions(emission_probs):
"""Calculate which symbols can be emitted in each state
"""
# loop over all of the state-symbol duples, mapping states to
# lists of emitted symbols
emissions = dict()
for state, symbol in emission_probs:
try:
emissions[state].append(symbol)
except KeyError:
emissions[state] = [symbol]
return emissions
|
a0a5ef927566567ba50f6f9ef452efb480793a68
| 366,220 |
from importlib import import_module
from typing import Callable
def import_function(function_path: str) -> Callable:
"""
Import a function from a dotted path.
Example:
>>> import_function("generate_changelog.pipeline.noop_func")
<function noop_func at 0x11016d280>
Args:
function_path: A dotted path to a function
Returns:
The callable function
"""
bits = function_path.split(".")
function_name = bits[-1]
module = import_module(".".join(bits[:-1]))
return getattr(module, function_name)
|
3105abee396d73e3580d737b1149dbdd00e0751b
| 32,507 |
from pathlib import Path
def get_config_path(root: str, idiom: str) -> Path:
"""Get path to idiom config
Arguments:
root {str} -- root directory of idiom config
idiom {str} -- basename of idiom config
Returns:
Tuple[Path, Path] -- pathlib.Path to file
"""
root_path = Path(root)
file_name = '{}.json'.format(idiom)
return root_path.joinpath(file_name)
|
86d65f11fbd1dfb8aca13a98e129b085158d2aff
| 704,498 |
def read(path, strip=False):
"""Read file at ``path`` and return content. Opt., ``strip`` whitespace."""
content = ''
with open(path) as fp:
content = fp.read()
if strip:
content = content.strip()
return content
|
dddc9f7226088ca42c7371b6f0253dfec4b519bb
| 176,316 |
import re
def _get_title(title: str) -> str:
"""Return normalized title.
Age restrictions in the form of "(16+)" will be removed. Otherwise,
the title will be returned unaltered.
"""
title = re.sub(r"\([0-9]+\+\)", "", title)
return title
|
6d05176340ecf34a79422b40924e8760bc767e1e
| 475,774 |
def convert_macaddr(addr):
"""Convert mac address to unique format."""
return addr.replace(':', '').lower()
|
020d90ccfa50287ae29f0bc2bedeb1d19da62c71
| 639,127 |
def _dest_path(path):
"""Returns the path, stripped of parent directories of .dSYM."""
components = path.split("/")
res_components = []
found = False
for c in components:
# Find .dSYM directory and make it be at the root path
if c.endswith(".dSYM"):
found = True
if found:
res_components.append(c)
return "/".join(res_components)
|
869efc4265e09c182a948f6580075846a6af7c9f
| 30,016 |
from pickle import dumps
from psycopg2 import Binary
def binary_pickle(cls):
"""
Converts a class instance into a Binary pickle for postgres storage
"""
return Binary(dumps(cls))
|
bbc77f5b4581ae4663ce573fe3bcccdaaf8f47f8
| 147,414 |
def __split_list(list, chunk_size):
"""
Splits the provided list into chunks of a specific size.
Parameters
----------
list : list
The list to split into chunks
chunk_size : int
The size of a chunk
"""
return [list[offs:offs+chunk_size] for offs in range(0, len(list), chunk_size)]
|
548449bc82c7dc085d437042d15fe956fe96cbc3
| 524,089 |
def unmap_from_unit_interval(y, lo=0., hi=1.):
""" Linearly map value in [0, 1] to [lo_val, hi_val] """
return y * (hi - lo) + lo
|
35a6dfbad855f80fa2034eb88cb41fcedc9a00d5
| 695,691 |
import pickle
def serialize(data):
"""Pickle some Python object."""
return pickle.dumps(data, protocol=2)
|
3c5730235c61dcbd00eef854fbf83c6c3f634944
| 129,786 |
def make_jobjects(entities, transformer, *args):
"""Run a sequence of entities through a transformer function that produces
objects suitable for serialization to JSON, returning a list of objects
and a dictionary that maps each entity's key_name to its index in the list.
Item 0 of the list is always None."""
jobjects = [None]
indexes = {}
for entity in entities:
index = len(jobjects)
jobject = transformer(index, entity, *args)
if jobject is not None:
jobjects.append(jobject)
indexes[entity.key().name()] = index
return jobjects, indexes
|
6d891488dc95f6c3ed6d69ab21f743c2fa721177
| 650,103 |
import requests
def verify_access_token(access_token: str) -> bool:
"""Checks if the access token is valid by making a request."""
response = requests.get(
"https://api.spotify.com/v1/me",
headers={"Authorization": f"Bearer {access_token}"},
)
if response.status_code == 200:
return True
if response.status_code == 401:
return False
raise Exception(f"Unhandled status ({response.status_code})")
|
60917fab6d8d2634c40d26af1fff53d680fe7e7f
| 428,999 |
def filter_laps_by_driver(laps, drivers):
"""Filter lap time data to get only laps driven by the driver in `drivers`.
Parameters
----------
`laps` : dict
Timings for each driver per lap as returned by `api.get_all_laps` data key
`*drivers` : list
A valid driver_id used by Ergast API
Returns
-------
dict
`laps` filtered to contain only a list of timings per lap for the specified drivers
"""
if len(drivers) == 0:
return laps
else:
result = {
'data': {},
'race': laps.get('race', ''),
'season': laps.get('season', ''),
'round': laps.get('round', '')
}
for lap, times in laps['data'].items():
result['data'][lap] = [t for t in times if t['id'] in drivers]
return result
|
5ff51ae86302e577f1a2585e633cca4b49f9e643
| 334,097 |
from pathlib import Path
def completely_pruned(wkspace):
"""Determine which systematics are completely pruned.
Parameters
----------
wkspace : str or os.PathLike
Path of TRExFitter workspace.
Returns
-------
list(str)
Names of all completely pruned systematics.
"""
pairs = []
with (Path(wkspace) / "PruningText.txt").open("r") as f:
for line in f:
if not line.startswith(" --->>"):
continue
sys, status = line.strip()[7:].split(" ")
if status == "is not present":
continue
pairs.append((sys, status))
unique = sorted(set([p[0] for p in pairs]))
tests = {u: 0 for u in unique}
for sys, status in pairs:
k = 0
if status == "is kept":
k = 1
elif status == "is shape only":
k = 1
elif status == "is norm only":
k = 1
tests[sys] += k
return [k for k, v in tests.items() if v == 0]
|
8026e6ba23280c70a0da8474b92b52b502455bc6
| 693,418 |
def to_port_num(str_in):
"""
Tries to coerce provided argument `str_in` to a port number integer
value.
Args:
str_in (string): String to coerce to port number.
Returns:
(int) or (None): Integer port number if provided `str_in` is a valid
port number string, None otherwise.
"""
try:
port = int(str_in)
if port<1 or port>65535:
return None
except ValueError:
return None
return port
|
7b922b8d809b87a45623ac398773180939ac0b0f
| 664,197 |
def get_citation_links(session, profile_url):
"""
From the given url to Google scholar page, extract all absolute links to
paper citations.
Args:
session (HTMLSession)
profile_url (str): a url to a person's Google Scholar page.
Returns:
list: list of absolute URLs to paper cites.
"""
r = session.get(profile_url)
links = r.html.absolute_links
assert len(links) > 0, "No paper cites, did we hit a captcha?"
paper_links = [link for link in links if "view_citation" in link]
return paper_links
|
38d040217db7b95c35a614011bbd2286c315991a
| 639,424 |
import re
def match_mm3_bond(mm3_label):
"""Matches MM3* label for bonds."""
return re.match('[\sa-z]1', mm3_label)
|
5759365883106fc557839b4ef38ab1e242536381
| 430,234 |
def pyimpl_identity(x):
"""Implement `identity`."""
return x
|
4a758bca2541051cf4065da7490e34e9648be60e
| 476,971 |
def transition_model(corpus, page, damping_factor):
"""
Return a probability distribution over which page to visit next,
given a current page.
With probability `damping_factor`, choose a link at random
linked to by `page`. With probability `1 - damping_factor`, choose
a link at random chosen from all pages in the corpus.
"""
ans = {}
for val in [*corpus]:
ans[val] = 0
n = len(corpus[page])
if n != 0:
one_from_page = damping_factor / n
for val in [*corpus[page]]:
ans[val] += one_from_page
N = len(corpus)
for val in [*corpus]:
ans[val] += (1 - damping_factor) / N
else:
N = len(corpus)
if N != 0:
for val in [*corpus]:
ans[val] += (1) / N
return ans
|
76786c4bfacbf7fa7d3a495073136051fb68298c
| 557,217 |
def is_2D(mrc_data):
"""
Checks if the given numpy array is 2D
Args:
mrc_data: Numpy array to check
Returns:
Whether the array is 2D
"""
return (mrc_data.shape[0] == 1 or mrc_data.shape[1] == 1 or mrc_data.shape[2] == 1)
|
8c610ec7d350b415d11de3bb17ff0073af4ab194
| 198,115 |
def net_income(ebt, tax):
"""
Computes net income.
Parameters
----------
ebt : int or float
Earnings before tax
tax : int or float
Tax expense
Returns
-------
out : int or float
Net income
"""
return ebt - tax
|
7d72f10d98d3646837ad3f5eccb6c19d2900ea38
| 35,142 |
from bs4 import BeautifulSoup
def scrape(client):
"""
Return the Beautiful Soup object
"""
# get it
html = client.get("/").data
# scrape the site with bs4
soup = BeautifulSoup(html, "html.parser")
return soup
|
8dc6b5b8271ba717a2c403c7b9a39334a45a3077
| 289,620 |
def get_highest_distances(results, num):
"""
Takes results from instance_distance_analysis.run_distance_analysis and a number of top
results to return.
Returns 3 lists.
- Documents with the largest median male instance distance
- Documents with the largest median female instance distance
- Documents with the largest difference between median male & median female instance distances \
each list contains tuples, where each tuple has a document and the median male/female/difference \
instance distance
:param results: dictionary of results from run_distance_analysis
:param num: number of top distances to return
:return: 3 lists of tuples.
"""
male_medians = []
female_medians = []
difference_medians = []
for document in list(results.keys()):
male_medians.append((results[document]['male']['median'], document))
female_medians.append((results[document]['female']['median'], document))
difference_medians.append((results[document]['difference']['median'], document))
male_top = sorted(male_medians, reverse=True)[0:num]
female_top = sorted(female_medians, reverse=True)[0:num]
diff_top = sorted(difference_medians)[0:num]
return male_top, female_top, diff_top
|
a2c1b09258d394673fead9e8fdc83797c788ac42
| 235,182 |
def tag_case(tag, uppercased, articles):
"""
Changes a tag to the correct title case while also removing any periods:
'U.S. bureau Of Geoinformation' -> 'US Bureau of Geoinformation'. Should
properly upper-case any words or single tags that are acronyms:
'ugrc' -> 'UGRC', 'Plss Fabric' -> 'PLSS Fabric'. Any words separated by
a hyphen will also be title-cased: 'water-related' -> 'Water-Related'.
Note: No check is done for articles at the begining of a tag; all articles
will be lowercased.
tag: The single or multi-word tag to check
uppercased: Lower-cased list of words that should be uppercased (must be
lower-cased to facilitate checking)
articles: Lower-cased list of words that should always be lower-cased:
'in', 'of', etc
"""
new_words = []
for word in tag.split():
cleaned_word = word.replace('.', '')
#: Upper case specified words:
if cleaned_word.lower() in uppercased:
new_words.append(cleaned_word.upper())
#: Lower case articles/conjunctions
elif cleaned_word.lower() in articles:
new_words.append(cleaned_word.lower())
#: Title case everything else
else:
new_words.append(cleaned_word.title())
return ' '.join(new_words)
|
2447453964d16db201fa63aa14053e5e2d25ebe7
| 686,707 |
def DictHasElems(pDict):
"""checks if a dictionary is
not empty"""
return not not pDict
|
ed599ac65e8ce3b27d3e3fa8e49fc39328e1bd78
| 167,095 |
import glob
def load_videos(videos_dir: str):
"""
Return video file paths from provided directory
:param videos_dir:
:return:
"""
videos = []
for x in glob.glob(videos_dir + "*"):
videos.append(x.replace('\\', '/'))
return videos
|
1724109bcccc4e0daaac6bdebf3689444753d5ed
| 69,178 |
def delete_comments(line):
"""Deletes comments in parentheses from a line."""
fields = line.split(')')
result = []
for f in fields:
if '(' in f:
result.append(f.split('(',1)[0])
else:
result.append(f)
return ''.join(result)
|
f20b1070ec0dbc6274407a9490ce9649789efc7a
| 116,865 |
def get_subject(proposition):
"""
Returns the subject of a given proposition
"""
return proposition[1][0]
|
479018a9302026f9d1a473969bb409f04e85e8fb
| 602,522 |
import re
def words(text, reg=re.compile('[a-z0-9]+')):
"""Return a list of the words in text, ignoring punctuation and
converting everything to lowercase (to canonicalize).
>>> words("``EGAD!'' Edgar cried.")
['egad', 'edgar', 'cried']
"""
return reg.findall(text.lower())
|
ff2ba22c24b6edab6d2beb49ba18e182cc7cd440
| 111,467 |
def process_smooth_dates_fit_inputs(x, y, dt_idx, reg_dates):
"""Sanitises the inputs to the SmoothDates fitting method"""
if hasattr(x, 'index') and hasattr(y, 'index'):
assert x.index.equals(y.index), 'If `x` and `y` have indexes then they must be the same'
if dt_idx is None:
dt_idx = x.index
x = x.values
y = y.values
assert dt_idx is not None, '`dt_idx` must either be passed directly or `x` and `y` must include indexes'
if reg_dates is None:
reg_dates = dt_idx
return x, y, dt_idx, reg_dates
|
c2c3194afdc6b8b729a331139988c972ed54ad9e
| 617,503 |
def dictionarify_recpat_data(recpat_data):
"""
Covert a list of flat dictionaries (single-record dicts) into a dictionary.
If the given data structure is already a dictionary, it is left unchanged.
"""
return {track_id[0]: patterns[0] for track_id, patterns in \
[zip(*item.items()) for item in recpat_data]} \
if not isinstance(recpat_data, dict) else recpat_data
|
d1cdab68ab7445aebe1bbcce2f220c73d6db308f
| 862 |
def CMDMSG(color):
"""
Returns a properly formatted CMDMSG message for the given color.
"""
return "CMDMSG\t{0}".format(color)
|
455be73d465d825a0a92403e7be5af4ed170de85
| 186,997 |
def convert_SE3_to_arr(SE3_dict, timestamps=None):
"""Convert SE3 dictionary to array dictionary
Args:
SE3_dict (dict): a dictionary containing SE3s
timestamps (list): a list of timestamps
Returns:
poses_dict (dict): each pose contains a [4x4] array
"""
poses_dict = {}
if timestamps is None:
key_list = sorted(list(SE3_dict.keys()))
else:
key_list = timestamps
for cnt, i in enumerate(SE3_dict):
poses_dict[key_list[cnt]] = SE3_dict[i].pose
return poses_dict
|
16a811e924aae5d03aedf944d34e0c6c5810fa6e
| 614,099 |
def is_crypto_code(dx):
"""
Crypto code is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ljavax/crypto/.",
".",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/security/spec/.",
".",
"."):
return True
return False
|
fa20ae7c8fe37889c1d8b40782bf0eb00e36c141
| 486,847 |
def _is_hdf5_filepath(filepath):
"""Predicate the filepath is a h5 file."""
return (filepath.endswith('.h5') or filepath.endswith('.hdf5') or
filepath.endswith('.keras'))
|
2be4518b2a83d93642373d0ec8663cba73df69cd
| 109,965 |
def repl_preadd(name: str, old: str, new: str):
"""
if the string old is in the string name, it is removed, and the string new
is addded at the beginning of the string name
the updated string is returned
"""
if old in name:
name = name.replace(old, '')
name = new + name
return name
|
b272ef96d2f2ec3530a67f35f440fdd75d25b5d1
| 569,026 |
def server_error(exception): # pylint: disable=unused-argument
"""Internal Server Error
:return: "Server error"
"""
return "Server error"
|
f2fcd433e54c1cddef853a8588e3e92c416984b1
| 612,074 |
from datetime import datetime
def part_of_day() -> str:
"""Checks the current hour to determine the part of day.
Returns:
str:
Morning, Afternoon, Evening or Night based on time of day.
"""
am_pm = datetime.now().strftime("%p")
current_hour = int(datetime.now().strftime("%I"))
if current_hour in range(4, 12) and am_pm == 'AM':
greet = 'Morning'
elif am_pm == 'PM' and (current_hour == 12 or current_hour in range(1, 4)):
greet = 'Afternoon'
elif current_hour in range(4, 8) and am_pm == 'PM':
greet = 'Evening'
else:
greet = 'Night'
return greet
|
5736b7049924197595341a173e642b8e3ea9e856
| 34,588 |
def username(user_id, navigation):
"""Login and returns username for the given user_id """
username, password = navigation.get_credentials(user_id)
navigation.visit_page('LoginPage')
navigation.page.login(username, password)
return username
|
c772c07705d98c1db239a019dda5fc6a4254a984
| 386,910 |
def get_related_name(model, field):
""" Extract the related name for a field.
`model` is the model on which the related field exists. `field` is
the field from the original model.
"""
opts = model._meta.concrete_model._meta
all_related_objects = [r for r in opts.related_objects]
for relation in all_related_objects:
if relation.field == field:
if relation.related_name and relation.related_name[-1] == '+':
return None
elif relation.related_name:
return relation.related_name
else:
return relation.name + '_set'
return None
|
52a9a5b0651514010c1c32d3911df3663482a42b
| 571,270 |
def get_fastq_read_ids(ref_path):
"""Extracts the read ids from a fastq file."""
read_ids = set()
with open(ref_path, 'r') as ref:
for line in ref:
if line.startswith('@'): # i.e if line is header
# split the line on spaces, take the first element, remove @
read_id = line.split(' ')[0].replace('@', '')
read_ids.add(read_id)
return read_ids
|
85e4564c6df617e22dd0b76519cdbad57aefced7
| 107,492 |
def pixel_size_based_on_coordinate_transform(dataset, coord_trans, point):
"""Get width and height of cell in meters.
Calculates the pixel width and height in meters given a coordinate
transform and reference point on the dataset that's close to the
transform's projected coordinate sytem. This is only necessary
if dataset is not already in a meter coordinate system, for example
dataset may be in lat/long (WGS84).
Args:
dataset (gdal.Dataset): a projected GDAL dataset in the form of
lat/long decimal degrees
coord_trans (osr.CoordinateTransformation): an OSR coordinate
transformation from dataset coordinate system to meters
point (tuple): a reference point close to the coordinate transform
coordinate system. must be in the same coordinate system as
dataset.
Returns:
pixel_diff (tuple): a 2-tuple containing (pixel width in meters, pixel
height in meters)
"""
# Get the first points (x, y) from geoTransform
geo_tran = dataset.GetGeoTransform()
pixel_size_x = geo_tran[1]
pixel_size_y = geo_tran[5]
top_left_x = point[0]
top_left_y = point[1]
# Create the second point by adding the pixel width/height
new_x = top_left_x + pixel_size_x
new_y = top_left_y + pixel_size_y
# Transform two points into meters
point_1 = coord_trans.TransformPoint(top_left_x, top_left_y)
point_2 = coord_trans.TransformPoint(new_x, new_y)
# Calculate the x/y difference between two points
# taking the absolue value because the direction doesn't matter for pixel
# size in the case of most coordinate systems where y increases up and x
# increases to the right (right handed coordinate system).
pixel_diff_x = abs(point_2[0] - point_1[0])
pixel_diff_y = abs(point_2[1] - point_1[1])
return (pixel_diff_x, pixel_diff_y)
|
4632fc8703d75b3e63974efc6383578a902639b2
| 580,585 |
def compute_feature_derivative(errors, feature, coefficient, l2_penalty, feature_is_constant):
"""
Purpose: Compute derivative of a feature wrt coefficient
Input : Error between true output and predicted output values, feature,
coefficient, L2 penalty strength, if the feature is constant or not
Output : Derivative of the feature wrt coefficient
"""
if not feature_is_constant:
derivative = feature.T.dot(errors)
else:
derivative = feature.T.dot(errors) - 2.0 * l2_penalty * coefficient
return derivative
|
8a014540ed071f8cbd140c8cebb6d2370cdb75a5
| 450,230 |
def standardize(arr, stats=None):
"""Standardize the input.
Parameters
----------
arr : numpy.ndarray
An array to be standardized.
stats : tuple of float, default None
Average and standard deviation used to standardize the arr.
If None, those of the arr are used.
Returns
-------
arr_std : numpy.ndarray
A standardized array
stats : tuple of float
Average and standard deviation used to standardize the input
array.
"""
if arr.ndim > 2:
raise ValueError("'arr' must be 1D or 2D.")
if stats is None:
if arr.ndim == 1:
stats = (arr.mean(), arr.std())
else:
stats = (arr.mean(axis=1), arr.std(axis=1))
if arr.ndim == 1:
arr_std = (arr - stats[0]) / stats[1]
else:
arr_std = (arr - stats[0].reshape(-1, 1)) / stats[1].reshape(-1, 1)
return arr_std, stats
|
c6edd3aa132e9b08d5ca3e56c71633bff7295381
| 448,671 |
def format_mode_f1(mode):
"""Format mode for DT (F1 series).
Args:
mode: Mode (analog, input, alternate).
Returns:
DT mode definition.
"""
if mode == "analog":
return "ANALOG"
elif mode == "input":
return "GPIO_IN"
elif mode == "alternate":
return "ALTERNATE"
raise ValueError(f"Unsupported mode: {mode}")
|
2f6e4077153983c929edd6cf378c7fb25cf0c214
| 465,562 |
def load_feature(feature_file_path):
"""
Load list of features from a text file
Features are separated by newline
"""
return [l.strip() for l in open(feature_file_path)]
|
b17ac771882408168309281e824cb041f6ec84b3
| 381,042 |
def get_tiles(image, tile_size):
"""Splits an image into multiple tiles of a certain size"""
tile_images = []
x = 0
y = 0
while y < image.height:
im_tile = image.crop((x, y, x+tile_size, y+tile_size))
tile_images.append(im_tile)
if x < image.width - tile_size:
x += tile_size
else:
x = 0
y += tile_size
return tile_images
|
b9dce156f1937e9a68e8ee6053b9eb05c0100670
| 137,238 |
import pyarrow
def get_hdfs_connector(host: str, port: int, user: str):
""" Initialise a connector to HDFS
Parameters
----------
host: str
IP address for the host machine
port: int
Port to access HDFS data.
user: str
Username on Hadoop.
Returns
----------
fs: pyarrow.hdfs.HadoopFileSystem
"""
return pyarrow.hdfs.connect(host=host, port=port, user=user)
|
320732bc106961e8c7bc926af7995c91d3ad6248
| 60,323 |
def remove_none_values(temp_value):
"""Remove None from dicts to ensure that nulls are ignored"""
return_value = dict()
for key, value in temp_value.items():
if value is not None:
return_value[key] = value
return return_value
|
fbf17b2e7620662c03a71e81dc1c774dd5e29cdb
| 565,261 |
def check_day(n):
"""
Given an integer between 1 and 7 inclusive,
return either string 'work!' or string 'rest!'
depending on whether the day is a workday or not
"""
if n < 1 or n > 7:
return None
if n >= 6:
return "rest!"
return "work!"
|
f7329fb411a737c2fb9799a37a0bb52e28d4db83
| 36,382 |
def get_priority(filter_item):
""" Internal worker function to return the frame-filter's priority
from a frame filter object. This is a fail free function as it is
used in sorting and filtering. If a badly implemented frame
filter does not implement the priority attribute, return zero
(otherwise sorting/filtering will fail and prevent other frame
filters from executing).
Arguments:
filter_item: An object conforming to the frame filter
interface.
Returns:
The priority of the frame filter from the "priority"
attribute, or zero.
"""
# Do not fail here, as the sort will fail. If a filter has not
# (incorrectly) set a priority, set it to zero.
return getattr(filter_item, "priority", 0)
|
4b23c4d0b247f2f91845ddd5e1911208f3916c32
| 83,615 |
def ensure_str_type(df):
"""Convert object elements in dataframe to string.
Parameters
----------
* df : pd.DataFrame
Returns
-------
* pd.DataFrame with object types converted to string
"""
is_object = df.dtypes == object
df.loc[:, is_object] = df.loc[:, is_object].astype(str)
return df
|
8501e5a7528200a6f0bd2eba463f1b6dca3d800b
| 463,852 |
def median(data_sorted):
"""
Finds the median value of a given series of values.
:param data_sorted:
The values to find the median of. Must be sorted.
"""
length = len(data_sorted)
if length % 2 == 1:
return data_sorted[((length + 1) // 2) - 1]
half = length // 2
a = data_sorted[half - 1]
b = data_sorted[half]
return (a + b) / 2
|
9cad0c556ad9b22e173c161d58456c6dae5bf198
| 540,104 |
import requests
def epoch_stakes(self, number: int, **kwargs):
"""
Return the active stake distribution for the specified epoch.
https://docs.blockfrost.io/#tag/Cardano-Epochs/paths/~1epochs~1{number}~1stakes/get
:param number: Number of the epoch.
:type number: int
:param return_type: Optional. "object", "json" or "pandas". Default: "object".
:type return_type: str
:param gather_pages: Optional. Default: false. Will collect all pages into one return
:type gather_pages: bool
:param count: Optional. Default: 100. The number of results displayed on one page.
:type count: int
:param page: Optional. The page number for listing the results.
:type page: int
:returns A list of objects.
:rtype [Namespace]
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/epochs/{number}/stakes",
params=self.query_parameters(kwargs),
headers=self.default_headers
)
|
abc0e18a932dc237e3d24ada2eae6bfd6316df2c
| 555,150 |
def string_or_empty(string):
"""Returns a string preceded by an underscore or an empty string.
Depending on whether the string parameter is none or not.
:param string: string.
:returns: a new string.
"""
return string if string else ''
|
bd44e2631c81585e88c8e3b8a61b8fe0b4c3a202
| 304,292 |
def get_ordered_adoption_center_list(adopter, list_of_adoption_centers):
"""
The method returns a list of an organized adoption_center
such that the scores for each AdoptionCenter to the Adopter
will be ordered from highest score to lowest score.
"""
ranking = []
for ac in list_of_adoption_centers:
ranking.append([ac, adopter.get_score(ac)])
# Sort by score first, in case of duplicates - sort by center's name
ranking = sorted(ranking, key=lambda x: x[0].get_name())
ranking = sorted(ranking, key=lambda x: x[1], reverse=True)
return [ac[0] for ac in ranking]
|
675615a32828f0f753bc2ab4aea84c215fb4dec3
| 542,111 |
def get_notes_by_song(song, notes_map):
"""
Iterate over all labels for a given song.
Same note have different midi codes in different octaves.
For example the note C has the following codes: 12, 24, 36, 48, 60, 72,
84, 96, 108, 120
For this representation, we transform all these code only to the note C.
We do the same for all the notes.
"""
notes = []
for segment in song:
# (start,end,(instrument,note,measure,beat,note_value))
notes.append(notes_map[segment[2][1]]['note'])
return notes
|
311666e0f72fad8c8c10c7128a633c7ae18653d2
| 636,829 |
def _catc_tags(start_num, end_num):
"""
Return a list of CATC tags corresponding to the start test number and end
test number. For example, start=1 and end=3 would return:
["CATC-001", "CATC-002", "CATC-003"].
"""
return ["CATC-0{0:02d}".format(i) for i in range(start_num, end_num + 1)]
|
32a44a4a814101a72be192fb0b37c57f2b300f0f
| 387,940 |
import numbers
def contains_complex(A):
"""Return True if the array contains any (non-real) complex numbers."""
return any(isinstance(x, numbers.Complex) and not isinstance(x, numbers.Real)
for x in A.flat)
|
cf9c407b0db9c979806bc4c4c31859f1d7390ea0
| 438,419 |
def verify_notebook_name(notebook_name: str) -> bool:
"""Verification based on notebook name
:param notebook_name: Notebook name by default keeps convention:
[3 digit]-name-with-dashes-with-output.rst,
example: 001-hello-world-with-output.rst
:type notebook_name: str
:returns: Return if notebook meets requirements
:rtype: bool
"""
return notebook_name[:3].isdigit() and notebook_name[-4:] == ".rst"
|
87a839ddffc32613d74775bf532953c8263093cd
| 30,421 |
import re
def parse_one_cve_commit(commit_str):
"""
Parse a single CVE commit.
:param commit_str: the commit message
returns the commit ID and all the matched CVEs in a list.
"""
result = re.findall(r'CVE-\d{4}-\d+', commit_str)
#print(result)
result = list(set(result))
if commit_str[:7] == 'commit ': # the first commit message contains the 'commit ' leading string
commit_id = commit_str[7:47]
else: # all other commit messages starts with the 40-digit SHA1 checksum, which is the git commit ID.
commit_id = commit_str[:40]
#print(commit_id, result)
return (commit_id, result)
|
92082df8390b92a45d4cfeeb203d2043ea90387b
| 282,918 |
def count_frames_manual(video):
"""
This method comes from https://www.pyimagesearch.com/2017/01/09/
count-the-total-number-of-frames-in-a-video-with-opencv-and-python/
written by Adrian Rosebrock.
Counts frames in video by looping through each frame.
Much slower than reading the codec, but also more reliable.
"""
# initialize the total number of frames read
total = 0
# loop over the frames of the video
while True:
# grab the current frame
(grabbed, frame) = video.read()
# check if we reached end of video
if not grabbed:
break
# increment the total number of frames read
total += 1
# return the total number of frames in the video file
return total
|
89bd20ff5239219d13024d808d18ad7cc9ac07be
| 58,804 |
import csv
def read_text_data(filename):
"""
Reads a csv file to extract text.
Args:
filename: a string specifying the filename / path
Returns:
A list of text sentences
"""
data = []
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
count = 0
for row in reader:
data.append(row['text'])
csvfile.close()
return data
|
5d806def5bc9a7b9e870679a9742c7f724b18f94
| 370,522 |
def _prime_factors(n):
""" Returns the prime factors of n. Unmemoized version. """
assert isinstance(n, int)
result = []
p=2
while p**2 <= n:
if n % p == 0:
result.append(p)
n /= p
else:
p += 1
result.append(int(n))
return result
|
3812e5914b9c48125820677972870ea4811d38b1
| 239,430 |
def tag(expressions):
"""Returns a list of objects like `{'text': SENTENCE, entities: [{'entity': ENTITY_NAME, 'value': ENTITY_VALUE, 'start': INT, 'end', INT}]}`"""
array = expressions['data']
result = list(map(lambda x: {'text': x['text'], 'entities': [
ent for ent in x['entities'] if ent['entity'] != "intent"], }, array))
return result
|
4e80cfd3f68cc8c4bc532b86fa0d2a7464695773
| 304,206 |
def frame_index_to_pts(frame: int, start_pt: int, diff_per_frame: int) -> int:
"""
given a frame number and a starting pt offset, compute the expected pt for the frame.
Frame is assumed to be an index (0-based)
"""
return start_pt + frame * diff_per_frame
|
e9dc5fce62d0540fc14c57a707d3f8846fec30d0
| 533,897 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.