content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def dedupe_list(l):
"""Remove duplicates from a list preserving the order.
We might be tempted to use the list(set(l)) idiom,
but it doesn't preserve the order, which hinders
testability."""
result = []
for el in l:
if el not in result:
result.append(el)
return result
|
272fd6201fef908dd6a36ff01ef4087bc159f7ba
| 331,369 |
def convert_to_html(model):
"""
Takes a model instance and produces an HTML table that represents the
results in a nice way for us to look at.
Parameters
----------
model : dict
Dictionary of layers and their properties as a list in the form of
[Vp, Vs, rho, thickness, top]
Returns
-------
html_str : str
Nicely formatted HTML table of the model results.
"""
layer_labels = ["Water", "Ice", "Upper_Seds.", "Middle_Seds.",
"Lower_Seds.", "Upper_Crust", "Middle_Crust",
"Lower_Crust", "Mantle"]
header = ["Type", "Vp", "Vs", "Density", "Thickness", "Top"]
layer_names = ["water", "ice", "upper_sediments",
"middle_sediments", "lower_sediments",
"upper_crust", "middle_crust", "lower_crust", "mantle"]
# Make the table with HTML
html_str = "<table width=\"350\" border=\"1\">\n"
html_str += "\t<tr>"
# Header row
for h in header:
html_str += "\t\t<td>%s</td>\n" % h
html_str += "\t</tr>\n"
# Row for each layer (if it exists)
for i, layer in enumerate(layer_names):
if layer in model:
# Layer label in first column
html_str += "\t<tr>\n"
html_str += "\t\t<td>%s</td>\n" % layer_labels[i]
# Get and write layer data
row = model[layer]
for item in row:
html_str += "\t\t<td>%s</td>\n" % item
html_str += "\t</tr>\n"
# Close table
html_str += "</table>\n"
return html_str
|
f5ecc9677f7bfc0692e7f58b3dc6ce4f1955ee44
| 653,002 |
def isSpaceFree(board, move):
"""Return True if the move is free on the provided board."""
return board[move] == ' '
|
8524c869fc8d014de229633748be0da3c9180713
| 181,042 |
def fixtureid_cpcs(fixture_value):
"""
Return a fixture ID to be used by pytest, for fixtures returning a list of
CPCs.
Parameters:
* fixture_value (list of zhmcclient.Cpc): The list of CPCs.
"""
cpc_list = fixture_value
assert isinstance(cpc_list, list)
cpcs_str = ','.join([cpc.name for cpc in cpc_list])
return "CPCs={}".format(cpcs_str)
|
864d76898452e74841497253e7799e763aac3c0b
| 122,498 |
def get_renaming(mappers, year):
"""Get original to final column namings."""
renamers = {}
for code, attr in mappers.items():
renamers[code] = attr['df_name']
return renamers
|
33197b5c748b3ecc43783d5f1f3a3b5a071d3a4e
| 705,784 |
def convert_to_lower(x):
"""Convert string to lower case
:param x: string
:return: string
"""
x = str(x)
x = str.lower(x)
return x
|
310a9603a572830c0c5b9fcee573abe88a466d7f
| 410,587 |
def calculate_max_series(series):
"""
Calculate the maximum of a series
:param series: list of values where we want to know the maximum of.
:return max(series): highest value in the series
"""
assert type(series) is list and len(series) != 0
return max(series)
|
de8ba05f580e2de4ca6e0737e5db61352bf1e266
| 650,919 |
import re
def is_image(url: str) -> bool:
"""If a image is a valid image or not.
Examples
________
>>> url = "https://i.imgur.com/a/MzgxNjI.jpg"
>>> is_image(url)
True
>>> url = "https://www.google.com"
>>> is_image(url)
False
Parameters
----------
url : str
The url to validate
Returns
-------
bool
The image is a valid image or not
"""
# We cast it to bool just to make sure that it is True/False
return bool(re.match(r"(https?:\/\/.*\.(?:png|jpg))", url))
|
f1978247c8262ce9c5cb842d300fefb1b9067f99
| 524,454 |
import torch
def split_tensor_pos(tensor):
""" Splits tensor into positive and negative terms """
zeros_like = torch.zeros_like(tensor)
pos_tensor = torch.max(tensor, zeros_like)
neg_tensor = torch.min(tensor, zeros_like)
return pos_tensor, neg_tensor
|
e3f1b013949b38a88185aeaaec45cb60891753a2
| 640,685 |
def register_sql(spark, files, schema=None, sep=None, table_name="table", return_count=False):
"""
Register a list of files as a SQL temporary view.
parameters:
- files is overloaded: can be one file path or list of file paths.
- spark: pyspark.sql.SparkSession
- table_name: this is how we will refer to table in SQL query
Schema of files must be the same for the table
Example usage after registering table:
>> DF = spark.sql("SELECT * FROM table")
"""
tempFiles = spark.read.csv(files, schema=schema, sep=sep, header=True)
tempFiles.createOrReplaceTempView(table_name)
if return_count:
return spark.sql("SELECT * FROM {}".format(table_name)).count()
|
f7f79211c2ad0dc8591dfdca7823e595f86ef035
| 630,642 |
def curvature(x, dfunc, d2func, *args, **kwargs):
"""
Curvature of function:
`f''/(1+f'^2)^3/2`
Parameters
----------
x : array_like
Independent variable to evalute curvature
dfunc : callable
Function giving first derivative of function f: f', to be called `dfunc(x, *args, **kwargs)`
d2func : callable
Function giving second derivative of function f: f'', to be called `d2func(x, *args, **kwargs)`
args : iterable
Arguments passed to `dfunc` and `d2func`
kwargs : dict
Keyword arguments passed to `dfunc` and `d2func`
Returns
-------
float or ndarray
Curvature of function f at `x`
"""
return ( d2func(x, *args, **kwargs) /
(1. + dfunc(x, *args, **kwargs)**2)**1.5 )
|
892629c780a142a0c974fd9d859a28fe5a66a96e
| 247,509 |
def _fits_section_header(section_name):
"""
Blank fits header cards for a section header. As in drizzle, one blank line,
a line with the section name as a comment, then one more blank.
"""
return [('', '', ''), ('', '/ ' + section_name, ''), ('', '', '')]
|
4d7ce0175e40706505bed15e2fc1f5d4f2d27dac
| 87,464 |
def readme_contents_section_exists(path: str) -> bool:
"""
Given a README.md path, checks to see if there is a Contents section
"""
try:
with open(path, 'r') as f:
return '## Contents' in f.read()
except FileNotFoundError:
return False
|
7353613b5e74f3343c8214073b00919a32a692b0
| 677,558 |
def format_query(query):
"""Replaces spaces for %20"""
return "%20".join(query.split())
|
8512752574e34ae72e5bd6a6da68283a710c347d
| 345,004 |
def hdx_acap_formatter(raw):
"""Formats raw pandas.DataFrame
- Drops 'pcode', 'admin_level_name', 'alternative source' columns
- Orders Columns
Arguments:
raw (pandas.DataFrame): from hdx_acap_connector
Returns: pandas.DataFrame
"""
data = raw.copy()
data.columns = [column.lower() for column in data.columns]
data = data.drop(['pcode', 'admin_level_name', 'alternative source'], axis=1)
column_order = ['id', 'country', 'region', 'iso', 'category', 'measure',
'targeted_pop_group', 'comments', 'non_compliance', 'date_implemented',
'source', 'source_type', 'entry_date', 'link']
data = data[column_order]
return data
|
2372f7b340ed6e7f25283baeb457cabbfef930ca
| 494,018 |
def string2bool(allegedstring):
"""
Tries to return a boolean from a string input if possible,
else it just returns the original item, allegedly a string.
"""
if allegedstring.lower().startswith('t'):
return True
elif allegedstring.lower().startswith('f'):
return False
elif allegedstring == "1":
return True
elif allegedstring == "0":
return False
else:
return allegedstring
|
a5a8598f0d0c2e79a27f7ae9a4555894b2ccf9ac
| 62,765 |
import torch
def build_design_tensor(item_thetas, individual_assignment):
"""
Given a sequence of angles representing objects on S^1, and
assignments of those objects to individuals, creates a design tensor
of the appropriate dimensions.
:param torch.Tensor item_thetas: a tensor of dimension batch x n representing
the item to be used at the nth trial
:param torch.Tensor individual_assignment: a tensor of dimension batch x n x 2
with 0-1 rows indicating the individual to assign item n to
"""
# batch x n x 2
item_features = torch.stack([item_thetas.cos(), -item_thetas.sin()], dim=-1)
ind1 = individual_assignment[..., 0].unsqueeze(-1)*item_features
ind2 = individual_assignment[..., 1].unsqueeze(-1)*item_features
# batch x n x 6
return torch.cat([item_features, ind1, ind2], dim=-1)
|
16dfa55a98b04e67ba68e6b50d4e0ff53a38d5e3
| 580,403 |
def _calc_num_simulated_obs_meeting_a_condition(simulated_y, condition):
"""
Calulates the number of simulated observations where `y == 1` and
`condition == True`.
Parameters
----------
simulated_y : 1D ndarray of ints in `{0, 1}`.
Denotes the simulated outcomes.
condition : 1D ndarray of booleans or ints in `{0, 1}`.
Denotes the conditions that need to be met in addition to `y == 1`.
Returns
-------
num : scalar.
The number observations with `simulated_y == 1 and condition == True`.
"""
if simulated_y.shape[0] != condition.shape[0]:
msg = "simulated_y.shape[0] MUST EQUAL condition.shape[0]"
raise ValueError(msg)
return simulated_y.T.dot(condition)
|
f64f8467a641a9e8e18748bad9f43d9f6b4995db
| 341,196 |
import pickle
def load_model(path):
"""Loads the saved BaseModel and returns the reconstructed object.
Parameters
----------
path: string
Location of the saved model.
Returns
-------
model: BaseModel
Saved BaseModel/child class instance."""
with open(path, 'rb') as f:
model = pickle.load(f)
try:
for m in model.models:
m._parameters._prefix = m._prefix
except:
model._parameters._prefix = model._prefix
return model
|
4ccfbefcb1813152e4397901f240be8755091fee
| 158,434 |
import hashlib
def file_hash(file_path):
""" Compute the md5 hash of a file """
digest = hashlib.md5()
with open(file_path, 'rb') as fp:
for chunk in iter(lambda: fp.read(4096), b''):
digest.update(chunk)
return digest.hexdigest()
|
490501eac96ebe14f07a36365985ca1b1bed9215
| 132,461 |
import mimetypes
def guess_mimetype(path):
"""Define a mimetypes.guess_type() wrapper that returns just the type and
also handles webp.
"""
if path.endswith('.webp'):
return 'image/webp'
return mimetypes.guess_type(path)[0]
|
a6f42de3f8f550119f20e496cd4f70eeb7a49239
| 523,284 |
from typing import Any
def success_fixture(mocker: Any) -> None:
"""Return fixture for success method used by status endpoint."""
mocked_success = mocker.patch('medtagger.api.core.business.success')
mocked_success.return_value = {'success': True}
return mocked_success
|
d4fa3f54e4602dede36b159838c9f6feee4e4836
| 515,698 |
def ranktokelv(rankine):
""" This function converts Rankine to kelvin, with Rankine as parameter."""
kelvin = rankine / 1.8
return kelvin
|
c604ea03047afb8dfffc96ef580b1520c7ae52e1
| 511,353 |
def f(x):
""" Target function for the perceptron learning algorithm. I've
chosen the NAND gate, but any function is okay, with the caveat
that the algorithm won't terminate if ``f`` cannot be computed by
a perceptron."""
return int(not (x[0] and x[1]))
|
2713e720a09b707bab1ad7678d5d20d636926f13
| 394,895 |
def get_passed_objects(token):
""" Returns a tuple (which you can pass as args) of passed objects """
contents = token.split_contents()
return tuple(contents[1:])
|
4e609ae4f0edc01e5635ab397d4bec5a2f91e786
| 147,842 |
def select_trajectories(panel, items=None):
"""Select trajectories of certain items in a panel.
Parameters
----------
panel : pandas Panel
A panel of trajectories, with time on the major axis, items denoting
different trajectories, and features on the minor axis.
items : iterable of strings (each string an item of the `panel`) or None
The items of the panel to select. If None, then use all items in the
panel.
Returns
-------
items_to_trajectories : dict
Dictionary mapping the items to their trajectories.
"""
items_to_trajectories = {}
if items is None:
items = panel.items
for item in items:
items_to_trajectories[item] = panel.loc[item].dropna(how='all').values
return items_to_trajectories
|
5b7b9d408bd1dcd3d213282f4bb1f938a93d1ef6
| 163,280 |
def get_env(fuzzing_language, build):
"""Returns an environment for building. The environment is returned as a list
and is suitable for use as the "env" parameter in a GCB build step. The
environment variables are based on the values of |fuzzing_language| and
|build."""
env_dict = {
'FUZZING_LANGUAGE': fuzzing_language,
'FUZZING_ENGINE': build.fuzzing_engine,
'SANITIZER': build.sanitizer,
'ARCHITECTURE': build.architecture,
# Set HOME so that it doesn't point to a persisted volume (see
# https://github.com/google/oss-fuzz/issues/6035).
'HOME': '/root',
'OUT': build.out,
}
return list(sorted([f'{key}={value}' for key, value in env_dict.items()]))
|
d7681862b118731557dc79dcf51f3ee12d387568
| 238,410 |
import logging
def MergeDictionaryValues(old_dict, new_dict):
"""Attempts to merge the given dictionaries.
Warns if a key exists with different values in both dictionaries. In this
case, the new_dict value trumps the previous value.
Args:
old_dict: Existing dictionary.
new_dict: New dictionary.
Returns:
Result of merging the two dictionaries.
Raises:
ValueError: If the keys in each dictionary are not unique.
"""
common_keys = set(old_dict) & set(new_dict)
if common_keys:
conflicting_keys = set(key for key in common_keys
if old_dict[key] != new_dict[key])
if conflicting_keys:
def FormatKey(key):
return ('\'{key}\' has conflicting values \'{old}\' and \'{new}\'. '
'Using \'{new}\'.').format(key=key,
old=old_dict[key],
new=new_dict[key])
for conflicting_key in conflicting_keys:
logging.warning(FormatKey(conflicting_key))
result = old_dict.copy()
result.update(new_dict)
return result
|
2836386f78363a1a0cede442ac0d6e81e3cd71e4
| 695,263 |
def chunk_data(data, size):
"""Creates a list of chunks of the specified `size`.
Args:
data (list): self-explanatory.
size (int): desired size of each chunk, the last one can be <= `size`.
Returns:
coll (list): lists of lists.
"""
coll = []
start_indx = 0
while start_indx < len(data):
slice_range = range(start_indx, min(start_indx + size, len(data)))
chunk = [data[i] for i in slice_range]
coll.append(chunk)
start_indx += size
return coll
|
3d7547d747c308e575eca4bb21c2a1316f8fbdc8
| 292,121 |
import random
def encourage() -> str:
"""Returns a random encouragement message"""
return random.choice([
"Please try again.",
"Give it another try.",
"Let's try it again.",
"Try it again; next time's the charm!",
"Don't give up now, try it one more time.",
"But no need to fret, try it again.",
"Try it again. I have a good feeling about this.",
"Try it again. You get better each time.",
"Try it again. Perseverence is the key to success.",
"That's okay: you learn more from mistakes than successes. Let's do it one more time."
]
)
|
55928dcad07a871b5fc214f1fa631bc359b61dcf
| 265,203 |
def generalized_fibonacci_sequence(n, p):
"""Compute the generalized Fibonacci sequence.
Args:
n: An `int`. The number of terms to compute. Must be >= 2.
p: An `int`. The number of the generalized sequence. Must be >= 1. If `p` is
1, the sequence is the standard Fibonacci sequence.
Returns:
A list of `int`s.
"""
a = [1, p]
for _ in range(n - 2):
a.append(a[-2] + a[-1])
return a
|
2b7b2c5e2f7102166deb4f81b37e950fa900a17e
| 642,605 |
def get_weighted_mean(items=[{}]):
"""
When scores are grouped we multiply first each
score x by its frequency f and divide the sum of
products xf by total frequency N
weighted_mean = summation(x*f) / n
x = score
f = frequency
n = total frequency
"""
total_freq = 0
total = 0
for item in items:
total = total + (item["score"] * item["freq"])
total_freq = total_freq + item["freq"]
print(total)
return total / total_freq
|
9796b704aab21a53f582d7f0602784cec27bd145
| 331,524 |
def is_transactional_goal(domain_goal: dict):
"""Checks if a domain goal is transactional. For MultiWOZ this is equivalent to the presence of a ``'book'``
field in the domain goal.
Parameters
----------
domain_goal
Domain goal containing constraints and information requests. See `compute_igcd_f1` for full goal structure.
"""
return "book" in domain_goal and domain_goal["book"]
|
a10f143db07e396e9e85050010ee873349110e1e
| 170,403 |
def clean(features):
"""
Sanitize the dataset for analysis by centering the means to 0 and creating unit variance
"""
means = features.mean(axis=0)
sd = features.std(axis=0)
cleaned = features
for j in range(features.shape[1]):
cleaned.T[j] = [(i-means[j])/sd[j] for i in cleaned.T[j]]
return (cleaned,means,sd)
|
06c025af9d2ce587cad1effacf6286505f1f71c5
| 506,620 |
def unreduceConfig(cls, stream):
"""Create a `~lsst.pex.config.Config` from a stream.
Parameters
----------
cls : `lsst.pex.config.Config`-type
A `lsst.pex.config.Config` type (not an instance) that is instantiated
with configurations in the ``stream``.
stream : file-like object, `str`, or compiled string
Stream containing configuration override code.
Returns
-------
config : `lsst.pex.config.Config`
Config instance.
See also
--------
lsst.pex.config.Config.loadFromStream
"""
config = cls()
config.loadFromStream(stream)
return config
|
ae65d6930b34bb32866c23417f4e0a65d6cec06b
| 203,200 |
def remainder(num1: int, num2: int) -> int:
"""Find the remainder from num1 and num2."""
if all((num1 > 0, num2 >= 1,)):
return num1 % num2
raise ValueError("Check number inputs. ")
|
3ac3e106cfb315ac53a38b839cc28002ae97e00b
| 155,566 |
def merge(a, b):
"""
Recursively merges hash b into a so that keys from b take precedence over keys from a
See: https://github.com/ansible/ansible/blob/6787fc70a643fb6e2bdd2c6a6202072d21db72ef/lib/ansible/utils/vars.py
"""
# if a is empty or equal to b, return b
if a == {} or a == b:
return b.copy()
# if b is empty the below unfolds quickly
result = a.copy()
# next, iterate over b keys and values
for k, v in b.iteritems():
# if there's already such key in a
# and that key contains a MutableMapping
if k in result and isinstance(result[k], dict) and isinstance(v, dict):
# merge those dicts recursively
result[k] = merge(result[k], v)
else:
# otherwise, just copy the value from b to a
result[k] = v
return result
|
478c7ef98d793526b157ec353b740a9e1d21c011
| 468,935 |
def create_client_from_parsed_globals(session, service_name, parsed_globals,
overrides=None):
"""Creates a service client, taking parsed_globals into account
Any values specified in overrides will override the returned dict. Note
that this override occurs after 'region' from parsed_globals has been
translated into 'region_name' in the resulting dict.
"""
client_args = {}
if 'region' in parsed_globals:
client_args['region_name'] = parsed_globals.region
if 'endpoint_url' in parsed_globals:
client_args['endpoint_url'] = parsed_globals.endpoint_url
if 'verify_ssl' in parsed_globals:
client_args['verify'] = parsed_globals.verify_ssl
if overrides:
client_args.update(overrides)
return session.create_client(service_name, **client_args)
|
2e3dc30f5b212ea4a812de33069025450b50ba2b
| 470,065 |
import json
def error(status, message):
"""
Return an error message
"""
return {
"statusCode": status,
"body": json.dumps({"message": message})
}
|
73b8f78f8858736998f2db24ad61dbd32764e0df
| 321,436 |
def key_strings_from_class(klass):
"""
Returns the set of visible fields from a class, presumably a BaseModel subclass.
"""
return set(x for x in klass.__fields__.keys())
|
57b5858f9f16ff44a60e62f3f40b7de67af74107
| 256,983 |
import re
def remove_color_tags(s):
"""Clean string and remove color tags from string"""
return re.sub("<[^>]*>", "", s)
|
389072bf8a2ad7dbf28ed985e0504dbe6cda04eb
| 362,382 |
def types_args_one_port(port, formatter):
"""
Extract from a port:
1. a list of "type arg"s
2. a list of args
:param port:
:param formatter: a string formatter with {0} and/or {1} where {0} is the port name, and {2} is an arg ID.
:return:
"""
types_args = []
args = []
for i in range(len(port.argtypes)):
arg = formatter.format(port.name, i)
args.append(arg)
types_args.append("%s %s" % (port.argtypes[i], arg))
return types_args, args
|
c10a379aab76ff2101f4353c5421ede62c1473cb
| 81,199 |
import math
def data_to_sorted_xy(data, logx):
"""
Return a list of (x, y) pairs with distinct x values and sorted by x value.
Enter: data: a list of (x, y) or [x, y] values.
logx: True to return (log10(x), y) for each entry.
Exit: data: the sorted list with unique x values.
"""
if not logx:
if (len(data) <= 1 or (
data[0][0] < data[1][0] and (len(data) <= 2 or (
data[1][0] < data[2][0] and (len(data) <= 3 or (
data[2][0] < data[3][0] and len(data) == 4)))))):
return data
return sorted(dict(data).items())
return sorted({math.log10(x): y for x, y in data}.items())
|
7e84a3f684dc9a82bf5fd48256e5f5c18a5eedb6
| 13,870 |
def sort_dict(dict_unsorted):
"""Sort dictionary by values in reverse order."""
dict_sorted = sorted(
dict_unsorted.items(), key=lambda dict_sort: dict_sort[1], reverse=True
)
return dict_sorted
|
1b9be6dcf620e9b9c0074aa5fe03091d23e1b87d
| 93,063 |
def paginated_id_to_names(slack, method, key, **kwargs):
"""Call the paginated method via slack, return a dict of id: name."""
cursor = True
mapping = {}
while cursor:
if cursor is True:
api_return = slack.api_call(method, **kwargs)
else:
api_return = slack.api_call(method, cursor=cursor, **kwargs)
if api_return["ok"]:
mapping.update({x["id"]: x["name"] for x in api_return[key]})
cursor = api_return.get("response_metadata", {}).get("next_cursor")
else:
break
return mapping
|
097fd00c4d78d12ea2d1b5067927e9d09eef2c0d
| 176,346 |
def end_chat(input_list):
""" End chat
Parameters
----------
input_list : list
List containing 'quit' to end chat.
Returns
-------
True or False : boolean
Boolean assures whether to end chat based on whether the input contains 'quit'.
"""
if 'quit' in input_list:
return True
else:
return False
|
bcf96aa2d3dc43c1d8464b9775c7450ebcf7984b
| 665,045 |
def shift_row(matrix):
"""
This function is used for AES encryption. The first row is unchanged; The bytes in second, third and forth row are
shifted over one, two and three bytes to the left respectively.
:param matrix: A 4x4 matrix which composed by 4 lists. The lists only accept numeric objects.
:return: A new matrix which be changed follow the ShiftRow rule. The format of this matrix is same as the input
matrix.
"""
new_matrix = [matrix[0], [], [], []]
row_num_offset = -1
for row in matrix[1:]:
position_tag = -4
new_row = row.copy()
for byte in row:
new_row[position_tag + 4 + row_num_offset] = byte
position_tag += 1
new_matrix[position_tag - row_num_offset] = new_row
row_num_offset -= 1
return new_matrix
|
6a5dfe1398db1582663fe26d2cb3c4d860d8dd00
| 202,800 |
def train_vae(model, input_tensor, opt, batch_size, loss_fn = None):
"""
Wrapper function to make a forward pass through a VAE model.
Assumes that the model class contains a variational loss function,
if this is not the case it can be provided using the loss_fn arg.
Params
------
model (torch.nn.Module)
VAE model.
input_tensor (torch.tensor)
Tensor of size (batch_size, n_feats).
opt (torch.optim.Optimizer)
batch_size (int)
loss_fn (default= None)
Loss function in case VAE class doesn't contain a loss function.
Returns
-------
loss (float)
Variational loss for the input batch.
"""
input_ = input_tensor.view(batch_size, -1 ).float()
# Zero out grads
opt.zero_grad()
# Make forward computation
reconstructed, mu, log_var = model(input_)
# Backprop errors
loss = model.loss(reconstructed, input_, mu, log_var)
loss.backward()
# Update weights
opt.step()
return loss
|
1a93bb7f2e39e34975170aa90bd035c388f6fa26
| 425,129 |
def filter_document(nodes) -> str:
""" Returns the text content of the specified nodes """
text = ""
for element in nodes:
text = text + element.text_content()
return text
|
122cd81005f9b27d7945a3d51aad474e92a2968a
| 327,309 |
import re
def regex(pattern):
"""Return compiled regular expression pattern."""
return re.compile(pattern)
|
e8fa8d3d8a8745d0dc6a2c71b4a8cba2c5af16b9
| 308,506 |
import itertools
def args_combinations(*args, **kwargs):
"""
Given a bunch of arguments that are all a set of types, generate all
possible possible combinations of argument type
args is list of type or set of types
kwargs is a dict whose values are types or set of types
"""
def asset(v):
if isinstance(v, set):
return v
else:
return {v}
keys = list(kwargs.keys())
for curr_args in itertools.product(*[asset(a) for a in args]):
for curr_kwargs in itertools.product(*[asset(kwargs[k]) for k in keys]):
yield curr_args, {k: v for (k, v) in zip(keys, curr_kwargs)}
|
8e90ce285322bd17a97e4bdb75e230f7015f4b2d
| 702,598 |
def index_range(keyword, header_list):
"""
This function takes in a keyword and a list of column header names and creates a list of indices that contains the
specified keyword.
:param keyword: A header or part of a header that you are interested in.
:param header_list: A list of column header names from raw data.
:return: index_list: A list of indices that correspond to where the specific keyword shows up in the raw data column
header list
"""
index_list = list()
for header in header_list:
if header.startswith(keyword):
index_list.append(header_list.index(header))
return index_list
|
b4efaab899362108406ad61b75dabcfc2bc92752
| 469,009 |
import re
def parse_changelog(filename, version):
"""Parse Debian syntax changelog files and return last date.
Args:
filename: filename of changelog
Returns:
date in the format day-of-week, dd month yyyy hh:mm:ss +zzzz or -1
"""
check_line = False
with open(filename, 'r') as changelog:
for line in changelog.readlines():
if check_line:
if re.match(r'^ .*<.*@.*> [A-Z][a-z][a-z], [0-9][0-9]', line):
return re.split(r'^ .*<.*@.*>', line)[1].strip()
else:
m = re.match(r'^.* \((.*)\)', line)
if m:
if m.groups()[0] == version:
check_line = True
|
2896d0a3ce02668b9977c9a89d6fe87d32b15325
| 505,637 |
def pick(d, *args):
"""
Pick some keys on a given dictionary.
:param d: the dict
:param args: the keys to pick
"""
res = {}
getitem = d.getlist if hasattr(d, "getlist") else d.__getitem__
for key in args:
if key in d:
res[key] = getitem(key)
return res
|
db040245574ef05faa596fcf6accbd50587f834f
| 87,929 |
import logging
def create_logger(e_handler_name, t_handler_name):
"""
Wrapper to create logger for errors and training records
:param e_handler_name: filepath to logger as string
:param t_handler_name: filepath to logger as string
:return: logger object
"""
log = logging.getLogger()
log.setLevel(logging.INFO)
# Create Handlers
c_handler = logging.StreamHandler()
e_handler = logging.FileHandler(filename=e_handler_name)
t_handler = logging.FileHandler(filename=t_handler_name)
# Create Log Format(s)
f_format = logging.Formatter('%(asctime)s:%(processName)s:%(name)s:%(levelname)s:%(message)s')
# Set handler levels
c_handler.setLevel(logging.INFO)
e_handler.setLevel(logging.ERROR)
t_handler.setLevel(logging.INFO)
# Set handler formats
c_handler.setFormatter(f_format)
e_handler.setFormatter(f_format)
t_handler.setFormatter(f_format)
# Add handlers to the logger
log.addHandler(c_handler)
log.addHandler(e_handler)
log.addHandler(t_handler)
return log
|
a0720407fd410781456b531ed36de4a232ff5ae2
| 260,495 |
def get_output_labels(opts, preds):
"""Convert predictions to labels."""
processor = opts["pass_in"][0]
labels = processor.get_labels()
output_labels = []
for prediction in preds:
output_labels.append(labels[prediction])
return output_labels
|
8121107c0e894335925546d9970297f6f72a7bbf
| 100,121 |
def predict(title:str):
"""
Naive Baseline: Always predicts "fake"
"""
return {'prediction': 'fake'}
|
8cca0e47294dcb6b5c4bfb3d29c2e98a674266cc
| 482,373 |
def split_filter(string_to_split, delimiter):
"""
Create a custom Jinja filter to use it in the Jinja2 template. Same functionality as the split function in the
Python language
:param string_to_split: the string to split
:type string_to_split: string
:param delimiter: the delimiter to split the string into
:type delimiter: string
:return: the string split as a list
:rtype: list[string]
"""
if string_to_split is not None:
return string_to_split.split(delimiter)
else:
return None
|
2648854cdb8173c49748cee94ba97c7186642a71
| 492,850 |
def sample_c(_rb):
""" Force sample from ChainerRL PrioritizedReplayBuffer
"""
def sample(n):
_rb.memory.wait_priority_after_sampling = False
return _rb.sample(n)
return sample
|
d83d6d8d115acb5cee953a7a9b24ca45e4548e0e
| 158,846 |
def arg_max(list_input):
"""
Return the index of the biggest element on the array
:param list_input: Input List
:return: index biggest element
"""
biggest_element = max(list_input)
idx_max = list_input.index(biggest_element)
return idx_max
|
667640534f947ee6b198a094add2a1c9fdc67c82
| 188,604 |
import re
import string
def clean_str(inp_str: str) -> str:
"""Removes non printable characters from the string
Reference:
https://stackoverflow.com/a/52540226
Arguments:
inp_str {str} -- The input string, to be cleaned.
Returns:
{str} -- The cleaned string
"""
return re.sub(f'[^{re.escape(string.printable)}]', '', inp_str)
|
36a42eacc070a32b41934b9cdf55d8d59b1b4182
| 121,690 |
import copy
def derive(coefficients):
"""
Applies one derivation of given coefficients list of polynomial function.
:param coefficients: polynomial function represented as a coefficients list
:return coefficients: the new derived polynomial function represented as a coefficients list
"""
if not coefficients:
return None
new_coefficients = copy.deepcopy(coefficients)
for index in range(len(new_coefficients)):
new_coefficients[index] = new_coefficients[index] * (len(new_coefficients) - index - 1)
new_coefficients.pop(-1)
return new_coefficients
|
ee0c4e7a672d1dbd1fc85692eecce6c61fb04a6a
| 329,782 |
def get_headers(sql, prefix=None):
"""Returns a list of headers in an sql select string
Parameters
----------
sql : str
SQL to get definition from.
prefix : str (optional)
Characters to prefix to each header.
Returns
-------
list
List of headers.
Examples
--------
>>> get_headers('SELECT a, b, c FROM x')
['a', 'b', 'c']
>>> get_headers('SELECT a, b, c, FROM x', 'z')
['z.a', 'z.b' ,'z.c']
"""
cols = []
open_parenth = False
clean_sql = ""
# Remove anything in parentheses, liable to contain commas
for i, char in enumerate(sql):
if char == "(":
open_parenth = True
elif char == ")":
open_parenth = False
if not open_parenth:
clean_sql += sql[i].replace("\t", " ")
for col in clean_sql.split("FROM")[0].replace("SELECT", "").split(","):
if " as " in col.lower():
c = col.split(" as ")[-1]
elif "." in col.lower():
c = col.split(".")[-1]
else:
c = col
cols.append(c.strip())
if prefix != None:
cols = [".".join([prefix, col]) for col in cols]
return cols
|
034ddcec32b6c36d5360777c75e5ae21c07a1821
| 493,664 |
def ord_mrv(csp):
"""
Return the variable with the minimum remaining value (MRV).
"""
# ANCHOR: alt version 1
# variables = csp.get_all_unasgn_vars()
# dom_sizes = [var.cur_domain_size() for var in variables]
# min_index = dom_sizes.index(min(dom_sizes))
# return variables[min_index]
# ANCHOR: even shorter version!
# variables = csp.get_all_unasgn_vars()
# func = lambda var: var.cur_domain_size()
# return min(variables, key=func)
# ANCHOR: original version.. readable
variables = csp.get_all_unasgn_vars()
min_var = variables[0]
min_dom_size = float('inf')
for var in variables:
dom_size = var.cur_domain_size() # get the variables domain size
if dom_size < min_dom_size:
min_var = var
min_dom_size = dom_size
return min_var
|
7d574fe1d389f3493c171871b53d3555c2865b42
| 323,871 |
import math
def get_points_dist(pt1, pt2):
"""Returns the distance between a pair of points.
Parameters
----------
pt1 : libpysal.cg.Point
A point.
pt2 : libpysal.cg.Point
The other point.
Returns
-------
dist : float
The distance between ``pt1`` and ``pt2``.
Examples
--------
>>> get_points_dist(Point((4, 4)), Point((4, 8)))
4.0
>>> get_points_dist(Point((0, 0)), Point((0, 0)))
0.0
"""
dist = math.hypot(pt1[0] - pt2[0], pt1[1] - pt2[1])
return dist
|
d4033334963673c2f33c2c170d8eaba4c0b6a77e
| 233,718 |
def parse_report_filter_values(request, reports):
"""Given a dictionary of GET query parameters, return a dictionary mapping
report names to a dictionary of filter values.
Report filter parameters contain a | in the name. For example, request.GET
might be
{
"crash_report|operating_system": "Linux",
"crash_report|graphics_card": "nVidia",
"apprentice_report|version": "13.0",
"start_date": "2015-01-01",
"end_date": "2015-01-31",
}
We want to return
{
"crash_report": {
"operating_system": "Linux",
"graphics_card": "nVidia",
},
"apprentice_report": {
"version": "13.0",
},
}
"""
report_name_to_filter_values = {}
# Note that if there are multiple values in the request.GET dictionary,
# as is the case for checkboxes with corresponding hidden fields, that
# items() will simply return the last value.
for report_and_parm_name, value in request.GET.items():
if "|" in report_and_parm_name:
report_name, parm_name = report_and_parm_name.split("|", 1)
report_name_to_filter_values.setdefault(
report_name, {})[parm_name] = value
# Make sure that all reports are in the result, and that each of the
# report's filters has a value.
for report in reports:
filter_values = report_name_to_filter_values.setdefault(
report.name(), {})
for filt in report.get_filters():
if filt.name not in filter_values:
filter_values[filt.name] = filt.default_value()
# Give the filter a chance to convert from the GET value into
# something that makes more sense to the report.
filter_values[filt.name] = filt.process_GET_value(
filter_values[filt.name])
return report_name_to_filter_values
|
217a7bfdeb65952637774ebefb6ae0ea7a0d991c
| 9,834 |
def exponent(attrs, inputs, proto_obj):
"""Elementwise exponent of input array."""
return 'exp', attrs, inputs
|
78c9fb18246ca70126f193000fe432a85e2da08f
| 250,890 |
def utilization (arrv):
"""Returns observed utilization of all queues in arrv."""
max_d = 0
tot_time = [0] * arrv.num_queues()
for evt in arrv:
max_d = max (evt.d, max_d)
tot_time[evt.qid] += evt.s
return [ x / max_d for x in tot_time ]
|
18ed74b8e34f6411899f131e415c2e34de099a25
| 237,769 |
def get_indexes(table, col, v):
""" Returns indexes of values _v_ in column _col_
of _table_.
"""
li = []
start = 0
for row in table[col]:
if row == v:
index = table[col].index(row, start)
li.append(index)
start = index + 1
return li
|
fd585a908477dd3632f2dbc3f450c1601157e928
| 675,575 |
def visualThreshold(npArray, value):
"""
Thresholds all pixel values greater than 0 in numPy array and sets them to given value
Arguments:
npArray: A numpy array
value: int to set all values greater than zero to
Returns:
Numpy array
"""
#npArray = npArray.astype('uint8')
#print(mito_anno)
npArray[npArray > 0] = value
return npArray
|
f3ba99e36f670f65f7911acf5fc30df51aeff422
| 572,797 |
from ssl import _create_unverified_context
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
def url_is_alive(url: str) -> object:
"""
Checks that a given URL is reachable
"""
try:
return urlopen(url, context=_create_unverified_context())
except HTTPError:
return False
except URLError:
return False
|
fa651fd941bc27b907491ad75ab5940331de75e9
| 123,912 |
def getTrackingScriptString(jobid=None):
"""
For injecting tracking script into a web page.
jobid -> current jobid. Adds "jobid" custom variable to events and page views on this page.
"""
customVarString = ""
if jobid is not None:
customVarString = "_gaq.push(['_setCustomVar',1,'jobid','{jobid}',3]);".format(jobid=str(jobid))
#If you look closely you'll see escaped { and }.
string = """<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-11026338-3']);
_gaq.push(['_setDomainName', 'none']);
_gaq.push(['_setAllowLinker', true]);
{customVar}
_gaq.push(['_trackPageview']);
(function() {{
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
}})();
</script>""".format(customVar=customVarString)
return string
|
fca5d972a2950b17c639bb49237db40f55b91531
| 494,225 |
def get_fits_from_sample(cells, datatype, ct, tr):
"""
Returns the fits for the given datatype, celltype and treatment.
:param cells: dict of cells
:param datatype: str, datatype
:param ct: str, celltype
:param tr: str, treatment
:return: dict of fits
"""
sample = cells[ct][tr]
fits = sample['fits']
return fits[datatype]
|
b76946f29743561ec7c4a0d4080ea62e56d9644d
| 229,580 |
def create_table_name(
table_type: str, command_id: str, context_id: str, node_id: str
) -> str:
"""
Creates and returns in lower case a table name with the format <tableType>_<commandId>_<contextId>_<nodeId>
"""
if table_type not in {"table", "view", "merge"}:
raise TypeError(f"Table type is not acceptable: {table_type} .")
return f"{table_type}_{command_id}_{context_id}_{node_id}".lower()
|
15083f04ccab3a0d11da7c6617cc8023e66d0452
| 447,089 |
def _intersect_point2_circle(P, C):
"""
Returns True if point P lies with circle C.
@type P : Point2 instance
@type C : Circle instance
"""
return abs(P - C.c) <= C.r
|
58522af7eed88e90c6377be5393da82d8ff99529
| 20,329 |
def getarg(args, index):
"""
Helper to retrieve value from command line args
"""
return args[index] if index < len(args) else None
|
256de43dda23c9f613c61a8dca456a3d507ca332
| 29,384 |
def security_plugin_label(context):
"""Security plug-in name adapter"""
return context.title
|
3f5e415c88774e80e323a025eb3fc4055b4e4af2
| 556,217 |
from typing import Any
def do_nothing(var: Any) -> Any:
"""
Does nothing but return what it's given. Mainly for
default callbacks.
:param var: Any
:return: Returns var that was given.
"""
return var
|
cee85cdceb60c30043f1e2d72dfe36fa351aa343
| 426,040 |
def is_valid_algorithm(algorithm: str) -> bool:
"""check if the input algorithm is valid
:param algorithm: algorithm name
:type algorithm: str
:return: True if the algorithm is valid
:rtype: bool
"""
available_algorithms = [
'Histogram Equalization',
'Contrast Stretching',
'Log Compression',
'Contrast Invert',
'No Algorithm',
]
return algorithm in available_algorithms
|
433aa8da6b0d2f7d4fc1c410754a0067f2644c78
| 176,655 |
import re
def get_qdyn_compiler(configure_log):
"""Return the name the Fortran compiler that QDYN was compiled with"""
with open(configure_log) as in_fh:
for line in in_fh:
if line.startswith("FC"):
m = re.search(r'FC\s*:\s*(.*)', line)
if m:
fc = m.group(1)
return fc
return None
|
71e0b9e448664a3a5df11587d63574cef22e0511
| 681,419 |
def get_prefix(id: str) -> str:
"""Drop the final component of a qualified name (e.g. ('x.y' -> 'x')."""
return id.rsplit('.', 1)[0]
|
ffe781d328e9d07fc7b044c9ff19de15a4799815
| 490,665 |
def apply_intervals(list_: list, intervals: list) -> list:
"""
Apply slice lists in a list.
>>> list_with_elements = ['spam', 'eggs', 'foo', 'bar']
>>> intervals = [0, 2, 3]
>>> apply_intervals(list_with_elements, intervals)
['spam', 'foo', 'bar']
"""
return [list_[interval] for interval in intervals]
|
617c6ef925605394565de2c3c07ce5d7e233f75a
| 558,974 |
def createBox(box):
"""
create a box filter from the input list "box"
this filter should accept a list of length len(box) and return a simple
convolution of it.
the meaning of this box filter is as follows:
for each element the input list l, multiple l[i] by box[i]
sum the results of all of these multiplications
return the sum
So for a box of length 3, filter(l) should return:
(box[0] * l[0] + box[1] * l[1] + box[2] * l[2])
The function createBox returns the box filter itself, as well as the length
of the filter (which can be passed as an argument to conv)
:param box: list
:return: function, int
"""
# Fill in
def boxFilter(l):
# Fill in
#sum of multiplications
Multi_sum = 0;
#Creates the range for the list
Range = range(0,len(box));
for A in Range:
#Sums the elements after multiplying the desired lists
Multi_sum = Multi_sum + box[A] * l[A];
return (Multi_sum);
pass
return boxFilter, len(box);
|
ad6fe6b3c1d2f778a5477225c609cb76da55f4e0
| 229,314 |
from typing import Sequence
def listify(seq: Sequence) -> str:
"""Convert sequence to a conventional string list
Args:
seq (Sequence): Sequence object
Returns:
str: the conventional string list
"""
fnl = ""
for option in seq:
fnl += str(option) + ", "
return fnl.strip(", ")
|
85587ceab6a732eff3a5805478a3bf42d977da25
| 340,800 |
import torch
def reduce(tensor: torch.Tensor, reduction: str) -> torch.Tensor:
"""Reduces the given tensor using a specific criterion.
Args:
tensor (torch.Tensor): input tensor
reduction (str): string with fixed values [elementwise_mean, none, sum]
Raises:
ValueError: when the reduction is not supported
Returns:
torch.Tensor: reduced tensor, or the tensor itself
"""
if reduction in ("elementwise_mean", "mean"):
return torch.mean(tensor)
if reduction == 'sum':
return torch.sum(tensor)
if reduction is None or reduction == 'none':
return tensor
raise ValueError('Reduction parameter unknown.')
|
a77edd7f9a8486a8fd604b9a35c2ecfe28d43c8c
| 702,271 |
def rectangle_crop_img(src, x_start, x_end, y_start, y_end):
"""
Getting the parts needed in an image by clipping an image with a rectangle.
Image ( 0, 0 ) points in the upper left corner of the image,
x_start and x_end determine the height of the image,
y_start and y_end determine the width of the image.
"""
return src[x_start:x_end, y_start:y_end]
|
40eb71921db0f6d7f30221c8f07309a11daa4b2a
| 89,827 |
def label(label, units):
"""
Combines a label string and units string together in the form 'label [units]'
unless one of the other is empty.
"""
string = r''+label
if len(units)>0: string = string + ' [' + units + ']'
return string
|
4eaef8d176d7d462503509df9b081c53da741195
| 236,148 |
def open_group(group, path):
"""Creates or loads the subgroup defined by `path`."""
if path in group:
return group[path]
else:
return group.create_group(path)
|
b67b7cd5e49a6274a1dea68159f859cd7eb8c40e
| 457,162 |
def sum_series(n, n0=0, n1=1):
"""
Compute the nth value of a summation series.
:param n0=0: value of zeroth element in the series
:param n1=1: value of first element in the series
This function should generalize the fibonacci() and the lucas(),
so that this function works for any first two numbers for a sum series.
Once generalized that way, sum_series(n, 0, 1) should be equivalent to fibonacci(n).
And sum_series(n, 2, 1) should be equivalent to lucas(n).
sum_series(n, 3, 2) should generate antoehr series with no specific name
The defaults are set to 0, 1, so if you don't pass in any values, you'll
get the fibonacci sercies
"""
if n < 0:
return None
if n == 0:
return n0
elif n == 1:
return n1
else:
return sum_series(n - 1, n0, n1) + sum_series(n - 2, n0, n1)
|
abd9fcf4f74500b03c0c3fdd00ca2cb57553dcb5
| 590,452 |
import re
def begins_with_pattern(string, pattern):
"""
Check if a string begins with a pattern.
Args:
string (str)
pattern (str or regular expression)
Returns:
bool
"""
x = re.match(pattern, string)
if x is None:
return False
return True
|
48c8198c2cc00d57d06201e1a1058b9d9a43fbf7
| 355,526 |
def parse_args(args):
"""
This parses the arguments and returns a tuple containing:
(args, command, command_args)
For example, "--config=bar start --with=baz" would return:
(['--config=bar'], 'start', ['--with=baz'])
"""
index = None
for arg_i, arg in enumerate(args):
if not arg.startswith('-'):
index = arg_i
break
# Unable to parse any arguments
if index is None:
return (args, None, [])
return (args[:index], args[index], args[(index + 1):])
|
6e02216d7ff1af7677e5df956a508d7b80af43dd
| 680,619 |
def lookup(source, *keys, **kw):
"""Successively looks up each key, returning the default keyword arg if a dead end is reached."""
fallback = kw.get('default')
try:
for key in keys:
source = source[key]
return source
except (KeyError, AttributeError, TypeError):
return fallback
|
a7fefc33fb92a4b9fc9ca1231a11a6b998407681
| 503,186 |
def is_attorney(descr):
""" If a string description is a attorney description """
return any(map(lambda s: s in descr.upper(), ('ATTORNEY','ATTNY')))
|
aa45fcada75e74abb9dee686d2b662f527f0f883
| 599,176 |
def list_of_elem(elem, length):
"""return a list of given length of given elements"""
return [elem for i in range(length)]
|
d72bdab16a541714b2a0a781a3077d40e309e9f7
| 10,872 |
def calc_total_income(row):
""" 计算累计收益
:param row: dataframe行
:return 累计收益
"""
# 场景分析: 累计收益 = 当日市值+卖出金额-成本
return row["share"]*row["price"]+row["sell"]-row["cost"]
|
c73d7e5b0b34ada73b0fe92701c3fd40a483eeaa
| 191,792 |
def rgbfloat2rgbint(i):
"""
Convert a rgb float value (0.0-1.0) to a rgb integer value (0-255).
Args:
i (int): Float value from 0.0 to 1.0
Returns:
int: Integer values from 0 to 255
"""
return int(255 * i)
|
cad7b5f8ee0227a9ef02d9b1ce7703562a1632a2
| 486,149 |
def discoverSubclasses(cls):
"""Returns a list of all classes derived from the the supplied base
class.
Implementation based on ubuntu's answer to related question on
StackOverflow [1]_.
Parameters
----------
cls: type
The base to search from
Returns
-------
list
List of all classes that uses `cls` directly or indirectly.
References
----------
..[1] "How can I find all subclasses of a given class in Python?"
http://stackoverflow.com/questions/3862310/how-can-i-find-all-subclasses-of-a-given-class-in-python
"""
return cls.__subclasses__() + [g for s in cls.__subclasses__() for
g in discoverSubclasses(s)]
|
ff406074ff5269a5ac7f5fb746460b6b0a01e109
| 146,423 |
def broadcast(dct):
"""Convenience function for broadcasting a dictionary of lists.
Parameters
----------
dct : dict
Input parameter ranges. All parameter ranges must have only 1 or N values.
Returns
-------
list
List of N dictionaries; the input parameters with N values are 'zipped' together
"""
N = 1
for k, v in dct.items():
if len(v) > N:
N = len(v)
ret = []
for i in range(N):
entry = {}
for k, v in dct.items():
if len(v) != N:
entry[k] = v[0]
else:
entry[k] = v[i]
ret.append(entry)
return ret
|
85d8e32b0cf784b3975bd033b0a56d9df0641de1
| 577,671 |
import random
def random_text(text_length=32):
"""Produces random characters of specified length for use in password or text"""
possible_chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!#$%()*+-/:;<=>?@[\]^_`{|}~'
text = ''
for _ in range(text_length):
text += random.SystemRandom().choice(possible_chars)
return text
|
fa2f60c7e8c461e4af1f5ba2a8886f1ef382ca0e
| 130,662 |
def merge_similarities(oldsims, newsims, clip=None):
"""
Merge two precomputed similarity lists, truncating the result to clip most similar items.
"""
if oldsims is None:
result = newsims or []
elif newsims is None:
result = oldsims
else:
result = sorted(oldsims + newsims, key=lambda item: -item[1])
if clip is not None:
result = result[:clip]
return result
|
02f243c4a9b3f0c7032f06f62b9ae1b94c6bfa84
| 110,846 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.