content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def check_if_only_decoys(sequences):
"""
Check if the sequences to consolidate are composed only of decoys
"""
only_decoys = True
for sequence in sequences:
if 'decoy' not in sequence.split()[2]:
only_decoys = False
break
return only_decoys | aeb65a261bcea6db80cd7a81b566463ad3d0414f | 31,934 |
import logging
def set_up_logger(name, file_path, level=logging.INFO, verbose=True):
"""
Creates basic logger object that optionally also logs to command-line.
:param name: (string) Name of the logger. Should be called with __name__.
:param file_path: (string) File location of the output handler.
:param level: (logging.LEVEL) Specifies the logging level which is output.
:param verbose: (boolean) If set to True, will also log with the appropriate level to stdout.
:return: logging.Logger object with the specified properties.
"""
# create base logger
logger = logging.getLogger(name)
logger.setLevel(level)
# format the date accordingly
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s \t: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
# add handler for write to file
fh = logging.FileHandler(file_path)
fh.setLevel(level)
fh.setFormatter(formatter)
logger.addHandler(fh)
# add handler for stream output if wanted
if verbose:
sh = logging.StreamHandler()
sh.setLevel(level)
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger | 05c74e21390f704f4e89332e24696ad4f10599a1 | 466,093 |
def to_from(arr):
"""Convert two elements list into dictionary 'to-from'.
"""
try:
return {'from':arr[0], 'to':arr[1]}
except IndexError:
return None | b9c632b2a57121cb094a0cdbf443d838d93130ac | 294,829 |
def cli(ctx):
"""Get the search tools available
Output:
dictionary containing the search tools and their metadata.
For example::
{
"sequence_search_tools": {
"blat_prot": {
"name": "Blat protein",
"search_class": "org.bbop.apollo.sequence.search.blat.BlatCommandLineProteinToNucleotide",
"params": "",
"search_exe": "/usr/local/bin/blat"
},
"blat_nuc": {
"name": "Blat nucleotide",
"search_class": "org.bbop.apollo.sequence.search.blat.BlatCommandLineNucleotideToNucleotide",
"params": "",
"search_exe": "/usr/local/bin/blat"
}
}
}
"""
return ctx.gi.annotations.get_search_tools() | c2e27d34147bdd7b33c4e8e76a0cfac6d0a1a678 | 507,866 |
def rowvec(vec):
""" Convert to row vector """
return vec.reshape(1, -1) | 4f4735d479ea0565513fb14cd76a4a2e8bc54c95 | 235,067 |
def get_nonzero_LV_coverage(dict_genes, LV_matrix):
"""
This function counts the number of LVs that each
gene is present in (i.e. has a nonzero contribution).
This function returns a dictionary [gene id]: number of LVs
Arguments
---------
dict_genes: dict
Dictionary mapping gene ids to label="generic", "other"
LV_matrix: df
Dataframe containing contribution of gene to LV (gene x LV matrix)
"""
dict_nonzero_coverage = {}
for gene_label, ls_genes in dict_genes.items():
LV_series = (LV_matrix.loc[ls_genes] != 0).sum(axis=1)
dict_nonzero_coverage[gene_label] = LV_series
return dict_nonzero_coverage | 20b257f2bccb7cb23b72b8a649942998b8041624 | 433,858 |
def image_dialogs(context, request, image=None, image_name_id='', landingpage=False,
deregister_form=None, snapshot_images_registered=0):
""" Modal dialogs for Image landing and detail page."""
return dict(
image=image,
image_name_id=image_name_id,
landingpage=landingpage,
deregister_form=deregister_form,
snapshot_images_registered=snapshot_images_registered,
) | 5eae78609ca832422c2fea61ad02f2f111df33f4 | 441,837 |
def read_file(filename):
""" Given a filename, read the entire thing into a string """
with open(filename, encoding='utf-8') as file:
return file.read() | 6e5a9f900dfe0d0ce71350ae8bfd1d69b1864e19 | 481,410 |
def get_oldest(fromlist):
"""
get_oldest(fromlist) where fromlist is a list of DataObjects
Get the oldest timestamp out of all the timestamps in the DataObject list.
"""
oldest_timestamp = fromlist[0].data[0][1] #take the first timestamp from the first DataObject in the fromlist list
for obj in fromlist:
if obj.oldest_sample < oldest_timestamp:
oldest_timestamp = obj.oldest_sample
return oldest_timestamp | 0593c1a529e6d4191661d40a88b2d9b5467ef130 | 102,818 |
from datetime import datetime
def parse_date(date_str) :
"""
Converts a date-time string into a datetime object.
"""
if len(date_str) > 19:
return datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%f')
else :
return datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S') | 68e2089b24e8fca06b2bedb5df6a68f7630466f4 | 374,528 |
def _propertyset_is_map(ps):
"""Helper function to determine if the given PropertySet should be handled
like a map or like a sequence"""
return len(ps.keys) > 0 or len(ps.values) == 0 | b9136076dcfaa18fdfee481973aa6ae72549febc | 297,747 |
def _dtree_filter_comp(dtree_data,
filter_key,
bin_class_type):
"""
List comprehension filter helper function to filter
the data from the `get_tree_data` function output
Parameters
----------
dtree_data : dictionary
Summary dictionary output after calling `get_tree_data` on a
scikit learn decision tree object
filter_key : str
The specific variable from the summary dictionary
i.e. `dtree_data` which we want to filter based on
leaf class_names
bin class type : int
Takes a {0,1} class-value depending on the class
to be filtered
Returns
-------
tree_data : list
Return a list containing specific tree metrics
from the input fitted Classifier object
"""
# Decision Tree values to filter
dtree_values = dtree_data[filter_key]
# Filter based on the specific value of the leaf node classes
leaf_node_classes = dtree_data['all_leaf_node_classes']
# perform the filtering and return list
return [i for i, j in zip(dtree_values,
leaf_node_classes)
if bin_class_type is None or j == bin_class_type] | b73584081332dd0eabda9109561ff9bcf96060d7 | 297,668 |
def get_lang_probability(lang_prob):
"""
Takes a string with the format lang:probability and returns
a tuple (lang, probability)
Args:
lang_prob: str
"""
lang, probability = lang_prob.split(":")
try:
probability = float(probability)
except Exception as e:
print("Cound not convert probability to float")
print(e)
return (lang, probability) | 0f29f42e42832c9f8a187e3e4cbc0c6dca806b60 | 233,331 |
import itertools
def is_it_a_partition(all_nodes, partitions):
""" test if partitions is a partition of all_nodes.
Parameters
----------
* all_nodes : list of nodes
* partitions : list of list of nodes
Returns
-------
True if 'partitions' is a partitions of all_nodes, False otherwise
"""
for nodes_subset in partitions:
for node in nodes_subset:
if node not in all_nodes:
return False
for nodes_subset1, nodes_subset2 in itertools.combinations(partitions, 2):
if len(set.intersection(set(nodes_subset1), set(nodes_subset2))) > 0:
return False
s = set()
for nodes_subset in partitions:
s = set.union(s, set(nodes_subset))
if s != set(all_nodes):
return False
return True | 6969554844ac4656c11d7989fa9061fbcb871b64 | 293,037 |
def elementWise(A, B, operation):
"""
execute an operate element wise and return result
A and B are lists of lists (all lists of same lengths)
operation is a function of two arguments and one return value
"""
return [[operation(x, y)
for x, y in zip(rowA, rowB)]
for rowA, rowB in zip(A, B)] | 39e78ca7730bf8367daf3a55aeb617b2c0707a44 | 9,981 |
def tname(x):
"""-> name of type of x"""
return type(x).__name__ | 79f8cbd066e921c9172e6588bbe46d1873df9f24 | 325,082 |
def modify_tuple(obj, positions, new_values):
"""
Modify the tuple object at certain positions by certain value
>>> modify_tuple((1,2,3), [0,1,2], [3,2,1])
(3, 2, 1)
"""
assert isinstance(obj, tuple)
assert len(positions) == len(new_values)
alt = list(obj)
for pos, val in zip(positions, new_values):
alt[pos] = val
return tuple(alt) | a74c59f3dc2cb7d590ef59a1f838693eae0d624f | 157,582 |
def rwh_primes1(n):
# https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
""" Returns a list of primes < n """
sieve = [True] * (n//2)
for i in range(3, int(n**0.5)+1, 2):
if sieve[i//2]:
sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)
return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]] | 67a3de7e8b25b5ce77e1c3faa136a410f86cd042 | 419,652 |
def revrange(a,b):
"""
Returns the tuple (b-1, b-2, ..., a)
Note that this tuple is the reverse of tuple(range(a,b))
Parameter a: the "start" of the range
Precondition: a is an int <= b
Parameter b: the "end" of the range
Precondition: b is an int >= a
"""
assert type(a) == int and a <= b
assert type(b) == int
tup1 = tuple(range(a,b))
tup2 = tup1[::-1]
tup = ()
result = tup
for i in tup2:
if a < b:
result = tup2
else:
result = tup
return result
#pass | 28c0de37d354401f45db33f83d0397b6cde8ed80 | 98,829 |
import struct
def get_slicer_info(mod_file):
"""
Open an IMOD .mod file and retrieve the Slicer information
Args:
mod_file: The .mod file path
Returns: A list of Slicer point objects with keys {"angles", "coords"}
"""
results = []
with open(mod_file, "rb") as file:
token = file.read(4)
if token != b"IMOD":
print(
"ID of .mod file is not 'IMOD'. This does not seem to be an IMOD MOD file!"
)
exit(1)
# Read past rest of ID and file header
file.read(236)
while token != b"IEOF":
file.seek(-3, 1)
token = file.read(4)
if token == b"SLAN":
# Read past SLAN object size and time
file.read(8)
angles = struct.unpack(">" + ("f" * 3), file.read(4 * 3))
xyz = struct.unpack(">" + ("f" * 3), file.read(4 * 3))
results.append({"angles": angles, "coords": xyz})
file.read(32)
# Read forward a little so next iteration of while loop starts at end of SLAN object
file.read(3)
elif token == b"OBJT":
# Objects are 176 bytes; skip path to make reading faster
file.read(176)
# Read forward a little so next iteration of while loop starts at end of object
file.read(3)
if len(results) == 0:
print("Reached end of MOD file without finding slicer angles!")
exit(1)
return results | 522278eceab40d7741b22de63d01022266eaeeb4 | 606,006 |
def _convert_track(track, state_types, media, attribute_mapping, localizations):
""" Converts a Tator Native track into a State spec.
"""
spec = {'type': state_types['Localization'].id,
'media_id': media.id,
'frame': int(track['frame_added'])}
spec['localization_ids'] = localizations[int(track['id'])]
spec['media_ids'] = [media.id]
for src_name in attribute_mapping:
if src_name in track:
spec[attribute_mapping[src_name]] = track[src_name]
return spec | 017d1f5620d67ddaf70b3f3cf97fd318fa8bae73 | 519,342 |
def R_from_r(r):
"""
Calculate reflected power R from the reflection amplitude r
of the Fresnel equations.
Parameters
----------
r : array like
Fresnel reflection coefficients
Returns
-------
R : array like
Reflectivity
"""
return abs(r)**2 | c3ad6ef150008248b0c0fb00f11215a354e6883e | 428,450 |
def format_(string):
"""dos2unix and add newline to end if missing."""
string = string.replace('\r\n', '\n').replace('\r', '\n')
if not string.endswith('\n'):
string = string + '\n'
return string | 1579554445aa0b13cd8f445adecf6e789cb875c4 | 527,089 |
def _urpc_test_func_5(arg_types, args):
"""!
@brief u-RPC variable signature test function.
@param arg_types Types of u-RPC arguments
@param args u-RPC arguments
@return Argument types and arguments without any change
"""
return arg_types, args | 9da9e0a149a4b46d975ea9016532b5f15c77b4cc | 135,976 |
from typing import Iterable
def all_elements_are_instances(iterable: Iterable, Class) -> bool:
"""Returns ``True`` if all elements of iterable are instances of Class.
False otherwise.
"""
return all([isinstance(e, Class) for e in iterable]) | 747e4b518789a7843fb46f37a4a473706ce8fd63 | 57,765 |
def identifier(value):
"""An identifier."""
return value | 54e93bb41f893e85715969264062d0b546c31d6d | 399,469 |
import math
def calculate_distance(p1, p2):
"""
Calculate distance between two points
param p1: tuple (x,y) point1
param p2: tuple (x,y) point2
return: distance between two points
"""
x1, y1 = p1
x2, y2 = p2
d = math.sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))
return d | 756b609a91e17299eb879e27e83cd663800e46dd | 4,528 |
def minimum_absolute_difference(arr):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/minimum-absolute-difference-in-an-array/problem
Given an array of integers, find and print the minimum absolute difference between any two elements in the array.
Solve:
Sort the array, and then compare the different between each two adjacent values. After sorting, we know that
the minimum absolute difference has to be between two values that are stored sequentially in the list so we
simply find the smallest difference and return that
Args:
arr: Array of integers to check
Returns:
int: The minimum absolute difference between two elements of the array
"""
arr.sort()
min_diff = arr[-1] - arr[0]
for i in range(len(arr)-1):
if arr[i+1] - arr[i] < min_diff:
min_diff = arr[i+1] - arr[i]
return min_diff | 00dc3ce179282b669407ea3a94fea07538a404d9 | 696,686 |
def ufmt(base, n):
"""return a string representing a unit to a power n."""
if n == 0: return ''
if n == 1: return '-'+base
if n == -1: return '/'+base
if n > 0: return '-'+base+str(n)
if n < 0: return '/'+base+str(-n) | 90a478eb6c649d423cc254d3b96c6afadb5dd8f3 | 480,176 |
def time_overlap(ts0,ts1,valid=True):
"""Check for overlapping time coverage between series
Returns a tuple of start and end of overlapping periods. Only considers
the time stamps of the start/end, possibly ignoring NaNs at the beginning
if valid=True, does not check for actual time stamp alignment
"""
if valid:
start = max(ts0.first_valid_index(),ts1.first_valid_index())
end = min(ts0.last_valid_index(),ts1.last_valid_index())
else:
start = max(ts0.index[0],ts1.index[0])
end = min(ts0.index[-1], ts1.index[-1])
return (start,end) if end > start else None | 58fa5220cbc0c016ae258343f86b98889a7ed17f | 439,518 |
def public_key_path(config):
"""
Path where this framework saves the public test key.
"""
return "%s/%s" % (config.get_state_dir(), "id_rsa_test.pub") | 1876da6fcd398a5445023f815617465060ae2cee | 413,245 |
def tuple_as_atom(atom:tuple) -> str:
"""Return readable version of given atom.
>>> tuple_as_atom(('a', (3,)))
'a(3)'
>>> tuple_as_atom(('bcd', ('bcd',12)))
'bcd(bcd,12)'
"""
assert len(atom) == 2
return '{}({})'.format(atom[0], ','.join(map(str, atom[1]))) | 5c18f34733d839865eef35509f95c2d4d198a903 | 12,465 |
def get_num_ways(N,Coins):
"""
This fuction will return the number of ways to make the amount with those denominations.
https://www.youtube.com/watch?v=jgiZlGzXMBw
https://www.geeksforgeeks.org/understanding-the-coin-change-problem-with-dynamic-programming/
Args:
N: an amount of money
Coins: a list of coin denominations
Returns:
the number of ways to make the amount of money with coins of the available denominations.
Example: for amount=44 (44¢) and denominations=[1,2,3] (1¢, 2¢ and 3¢),
your program would output 4—the number of ways to make 44¢ with those denominations:
1¢, 1¢, 1¢, 1¢
1¢, 1¢, 2¢
1¢, 3¢
2¢, 2¢
To test this function:
>>> get_num_ways(4,[1,2,3])
4
"""
# create an array to store the number of ways for each amount from 1 to the amount
# Index of Array of ways: [0, 1, 2, 3, 4]
# Array of ways: [0, 1, 2, 3, 4]
ways = [0] * (N + 1);
# if the amount is zero, there is 1 way to make 0 with 0 coins!
ways[0]=1
# traverse through the coins
for i in range(len(Coins)):
# compare coin value with the amount, which is also the index of the array ways
for j in range(len(ways)):
# update the ways array if the coin value is less than the amount
if(Coins[i] <= j):
ways[j] += ways[int((j-Coins[i]))]
# return the number of ways at the Nth position
return ways[N] | bafc37b9a3e457e30b49ddecdd316310b4a10a01 | 336,682 |
def fixture_pg_test_orm(pg_test_db):
"""
Gets the test Orm handle for Postgres.
Returns:
(PostgresOrm): The test Postgres Orm object.
"""
# This also tests init works and Postgres is properly integrated
return pg_test_db._orm | dbd9bedee752c41e840c254aee9ecf5ba5727760 | 465,478 |
import warnings
def valid_first_cell(sheet, data):
"""Return boolean if first cell in worksheet is not ``skip``."""
try:
return hasattr(data[0][0], "lower") and data[0][0].lower() != "skip"
except:
warnings.warn("Invalid first cell (A1) in worksheet {}".format(sheet))
return False | 89bdfb12f76d62f6df6d04d2856f10d09ec8d83c | 338,912 |
import torch
def dot_products(tensor: torch.Tensor) -> torch.Tensor:
"""
Computes the dot product between each pair of rows in the given Tensor.
:param tensor: the Tensor to compute the dot products between the rows of. Size [N, D].
:return: the computed dot products, where result[i, j] = tensor[i, :] * tensor[j, :]. Size [N, N].
"""
products = tensor[:, None, :] * tensor[None, :, :] # [N, N, D]
return torch.sum(products, dim=2) | 860f3051b04a3b12c2884ed9ef51c50cb390ab91 | 442,465 |
def snap_value(input, snap_value):
"""
Returns snap value given an input and a base snap value
:param input: float
:param snap_value: float
:return: float
"""
return round((float(input) / snap_value)) * snap_value | 1b2f967ecca2a151c5229cbb9eb57d6c67853925 | 22,680 |
def get_account_key_change_count(self) -> dict:
"""Get number of times account key has been changed.
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - spPortal
- GET
- /spPortal/account/key/changeCount
.. note::
If never been changed, will return an HTTP 500 with no content.
:return: Returns dictionary of account key changes \n
* keyword **count** (`int`): Integer number of times account key
has been changed
:rtype: dict
"""
return self._get("/spPortal/account/key/changeCount") | 101a88c1537ee459427bfbd5a6343489bc134f3a | 440,127 |
import aiohttp
def is_image(r: aiohttp.ClientResponse):
""" Checks whether the supplied URL is a proper image """
return r.content_type.startswith('image') | 60454772ebfd9cdae4d54e601388afefdd479b02 | 411,765 |
import random
def get_best_move(board, scores):
"""
returns the best move tuple (row, col) that has the highest score in scores
"""
max_squares = []
empty_squares = board.get_empty_squares()
# find max score
max_score = -999
for square in empty_squares:
if scores[square[0]][square[1]] >= max_score:
max_score = scores[square[0]][square[1]]
# find empty squares with max scores
for square in empty_squares:
if scores[square[0]][square[1]] == max_score:
max_squares.append((square[0], square[1]))
return random.choice(max_squares) | 35424b7cad953693bb96eac47a0b4bed4714a72f | 163,205 |
import torch
def compute_accuracy_multitask(output, target):
"""
Calculates the classification accuracy.
:param target: Tensor of correct labels of size [batch_size, numClasses]
:param output: Predicted scores (logits) by the model.
It should have the same dimensions as target
:return: accuracy: average accuracy over the samples of the current batch for each condition
"""
num_samples = target.size(0)
correct_pred = target.eq(output.round().long())
accuracy = torch.sum(correct_pred, dim=0)
return accuracy.cpu().numpy() * (100. / num_samples) | b2549c12ef2b6e63eccd1c42a344b2c9a219ba61 | 458,763 |
def flip_ctrlpts_u(ctrlpts, size_u, size_v):
""" Flips a list of 1-dimensional control points in u-row order to v-row order.
**u-row order**: each row corresponds to a list of u values (in 2-dimensions, an array of [v][u])
**v-row order**: each row corresponds to a list of v values (in 2-dimensions, an array of [u][v])
:param ctrlpts: control points in u-row order
:type ctrlpts: list, tuple
:param size_u: size in u-direction
:type size_u: int
:param size_v: size in v-direction
:type size_v: int
:return: control points in v-row order
:rtype: list
"""
new_ctrlpts = []
for i in range(0, size_u):
for j in range(0, size_v):
temp = [float(c) for c in ctrlpts[i + (j * size_u)]]
new_ctrlpts.append(temp)
return new_ctrlpts | 6c28a487b05de5490d244a04d19d25c575ebdd75 | 689,472 |
def parse_phonemes(phonemes):
"""Parse mimic phoneme string into a list of phone, duration pairs.
Arguments
phonemes (bytes): phoneme output from mimic
Returns:
(list) list of phoneme duration pairs
"""
phon_str = phonemes.decode()
pairs = phon_str.split(' ')
return [pair.split(':') for pair in pairs if ':' in pair] | 0d78d31fe369e193b18478119707e8b9de6159fd | 56,151 |
from typing import Mapping
from typing import Any
def _get_headers_from_http_request_headers(headers: "Mapping[str, Any]", key: str):
"""Return headers that matches this key.
Must comply to opentelemetry.context.propagation.httptextformat.Getter:
Getter = typing.Callable[[_T, str], typing.List[str]]
"""
return [headers.get(key, "")] | cbb49a1fc062bff1685615f4cc481bc35729490c | 291,919 |
def binarize_ic50(ic50, ic50_threshold):
"""
Binarize ic50 based on a threshold
"""
if ic50 <= ic50_threshold:
return 1
return 0 | d1512f790dfad4fb3f85f4757184ceb7d21fc56a | 23,725 |
import pathlib
import csv
from typing import OrderedDict
def read_csv_file_with_header_to_hashed_odict_of_odicts(path_csv_file: pathlib.Path,
hash_by_fieldname: str,
encoding: str = "ISO-8859-1",
delimiter: str = ";",
quotechar: str = '"',
quoting: int = csv.QUOTE_MINIMAL) -> 'OrderedDict[str, OrderedDict[str, str]]':
"""
reads the csv file into an ordered dict of ordered dicts
returns: {'indexfield':{fieldname1:value, fieldname2:value}, 'indexfield2':{fieldname1:value, fieldname2:value}}
>>> # setup
>>> test_directory = pathlib.Path(__file__).absolute().parent.parent / 'tests'
>>> testfile1 = test_directory / '2018-04-26_alle_Navision_Artikel.csv'
>>> testfile2 = test_directory / '0001_aktive_preis_qty.csv'
>>> csv_file_broken_less_fields_than_header = test_directory / 'csv_file_broken_less_fields_than_header.csv'
>>> r_csv = read_csv_file_with_header_to_hashed_odict_of_odicts
>>> # Test Fieldname for hashing not existent in the header
>>> r_csv(path_csv_file=testfile1, hash_by_fieldname='not_existing') # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Field "not_existing" is not available, or the csv file does not have header information
>>> # Test OK, Fieldname for hashing is unique
>>> r_csv(path_csv_file=testfile1, hash_by_fieldname='Nr.') # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
OrderedDict([('HUB025', OrderedDict([('HTMLpublish', 'Nein'), ('Artikel Nicht Verfügbar', 'Nein'), ('Sperre Angebot', 'Nein'), ...
>>> # Test Fieldname for hashing is not unique
>>> r_csv(path_csv_file=testfile2, hash_by_fieldname='CustomLabel') # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Index is not unique, field: "CustomLabel", value: "HUB179"
>>> # Test Number of Fields is smaller than the header
>>> r_csv(path_csv_file=csv_file_broken_less_fields_than_header, hash_by_fieldname='a') # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Row has length 3 instead of 4 : "['1', '2', '3']"
"""
with open(str(path_csv_file), 'r', encoding=encoding) as f_csv_file:
is_first_row = True
fieldnames = []
index_of_hash_field = 0
number_of_rows = 0
dict_result = OrderedDict()
my_csv_reader = csv.reader(f_csv_file, delimiter=delimiter, quotechar=quotechar, quoting=quoting)
for row in my_csv_reader:
if is_first_row:
is_first_row = False
fieldnames = row
if hash_by_fieldname not in fieldnames:
raise ValueError('Field "{}" is not available, or the csv file does not have header information'.format(hash_by_fieldname))
index_of_hash_field = fieldnames.index(hash_by_fieldname)
number_of_rows = len(fieldnames)
continue
if len(row) != number_of_rows:
raise ValueError('Row has length {} instead of {} : "{}"'.format(len(row), number_of_rows, row))
dict_row = OrderedDict()
for index, value in enumerate(row):
dict_row[fieldnames[index]] = value
index_value = row[index_of_hash_field]
if index_value not in dict_result:
dict_result[index_value] = dict_row
else:
raise ValueError('Index is not unique, field: "{}", value: "{}"'.format(hash_by_fieldname, index_value))
return dict_result | 153b80c49fc2b07de3886d70365dbdd92f252de6 | 294,104 |
def info2lists(info, in_place=False):
"""
Return info with:
1) `packages` dict replaced by a 'packages' list with indexes removed
2) `releases` dict replaced by a 'releases' list with indexes removed
info2list(info2dicts(info)) == info
"""
if 'packages' not in info and 'releases' not in info:
return info
if in_place:
info_lists = info
else:
info_lists = info.copy()
packages = info.get('packages')
if packages:
info_lists['packages'] = list(packages.values())
releases = info.get('releases')
if releases:
info_lists['releases'] = list(releases.values())
return info_lists | 313fda757d386332e16a0a91bb4408fe3cb8c070 | 706,560 |
from functools import reduce
from operator import add
def sum(xs):
"""
A "sum" implementation that can take advantage of operator overloading
"""
return reduce(add, xs) | 5e436016b96e7a858b7be4fb0ee481d66db17b1c | 544,910 |
def remaining_balance(loan: float, interest_rate: float, years: int, payments: int) -> float:
"""
Calculate the remaining loan balance
:param loan: initial loan amount
:param interest_rate: interest rate
:param years: loan term in years
:param payments: total number of payments made
:return: remaning balance
"""
r = interest_rate / 1200 # monthly interest rate
m = r + 1
n = years * 12
remaining = loan * (((m ** n) - (m ** payments)) / ((m ** n) - 1))
return remaining | a02501e5b859cbd7c5efe6393dca5003197c30b0 | 77,088 |
def format_document(document):
"""Formats the given document
Args:
document (obj): The json object containing the document metadata.
Returns:
obj: the formatted document object.
"""
languages = [lang.strip().capitalize() for lang in document["_source"]["languages"]] if document["_source"]["languages"] else None
return {
"score": document["_score"],
"document_id": document["_source"]["document_id"],
"title": document["_source"]["title"],
"abstract": document["_source"]["abstract"],
"link": document["_source"]["link"],
"date": document["_source"]["date"],
"celex": document["_source"]["celex"],
"keywords": document["_source"]["keywords"],
"source": document["_source"]["source"],
"informea": document["_source"]["informea"],
"languages": languages,
"subjects": document["_source"]["subjects"],
"areas": document["_source"]["areas"]
} | d208c161903ea249c6b781f00f9d4cf0e6c0d62d | 340,898 |
def convert_enum_list_to_delimited_string(enumlist, delimiter=','):
"""
Converts a list of enums into a delimited string using the enum values
E.g., [PlayerActionEnum.FLYBALL, PlayerActionEnum.HOMERUN, PlayerActionEnum.WALK] = 'FLYBALL,HOMERUN,WALK'
:param enumlist:
:param delimiter:
:return:
"""
return delimiter.join([listitem.value for listitem in enumlist]) | 5fb4064b27d8050c11de89e4fca4c9d849d1c1be | 381,512 |
import itertools
import fnmatch
def ignore_rules_for_url(spider, url):
"""
Returns a list of ignore rules from the given spider,
that are relevant to the given URL.
"""
ignore_rules = getattr(spider, "pa11y_ignore_rules", {}) or {}
return itertools.chain.from_iterable(
rule_list
for url_glob, rule_list
in ignore_rules.items()
if fnmatch.fnmatch(url, url_glob)
) | 1f225b175cf412df063e161ef5f4c824a93e65fc | 646,736 |
import re
def unsub_emails(unsub_list, email_list):
"""
Takes the list of plex user email address and filters out the members of the
unsubscribe list.
"""
excludes = re.split(r",|,\s", unsub_list)
email_list = list(set(email_list)^set(excludes))
return email_list | 5acdc6472fad2b75244234c646612d2b0f38c727 | 663,793 |
import torch
def validate(metric,
net,
val_data,
use_cuda):
"""
Core validation/testing routine.
Parameters:
----------
metric : EvalMetric
Metric object instance.
net : Module
Model.
val_data : DataLoader
Data loader.
use_cuda : bool
Whether to use CUDA.
Returns
-------
EvalMetric
Metric object instance.
"""
net.eval()
metric.reset()
with torch.no_grad():
for data, target in val_data:
if use_cuda:
target = target.cuda(non_blocking=True)
output = net(data)
metric.update(target, output)
return metric | e9612e861cc82c03eb3d0121fa9915b20bdd90de | 464,930 |
def o_to_matsubara_idx_f(o):
"""
Convert index in "o" convension to fermionic Matsubara index
Parameters
----------
o 2*n+1
Returns n
-------
"""
assert o%2 == 1
return int((o-1)/2) | b1626d94fb9ad00316338242f704e72d6dd62e0e | 154,836 |
import itertools
def combinations_of_all_lengths(it):
"""
Return an iterable with all possible combinations of items from ``it``:
>>> for comb in combinations_of_all_lengths('ABC'):
... print("".join(comb))
A
B
C
AB
AC
BC
ABC
"""
return itertools.chain(
*(itertools.combinations(it, num+1) for num in range(len(it)))
) | e2700c4ea6f35c9aa39b6b0e6674ccfeb7394eed | 265,065 |
def wait_for_event(bidi_session, event_loop):
"""Wait until the BiDi session emits an event and resolve the event data."""
def wait_for_event(event_name: str):
future = event_loop.create_future()
async def on_event(method, data):
remove_listener()
future.set_result(data)
remove_listener = bidi_session.add_event_listener(event_name, on_event)
return future
return wait_for_event | 45fd51e0271b2ed8af010f18c3835e57a3979669 | 94,954 |
import yaml
def configMap(name, **kwargs):
"""
>>> import nuvolaris.kube as kube, nuvolaris.testutil as tu
>>> tu.grep(kube.configMap("hello", value="world"), "kind:|name:|value:", sort=True)
kind: ConfigMap
name: hello
value: world
>>> tu.grep(kube.configMap("hello", **{"file.js":"function", "file.py": "def"}), "file.", sort=True)
file.js: function
file.py: def
"""
out = yaml.safe_load("""apiVersion: v1
kind: ConfigMap
metadata:
name: %s
data: {}
"""% name)
for key, value in kwargs.items():
out['data'][key] = value
return yaml.dump(out) | fdae8ac67a068946cf320aeb49b3753e6c26487e | 57,665 |
def strategy_cheap(cookies, cps, history, time_left, build_info):
"""
Always buy the cheapest item you can afford in the time left.
"""
items = build_info.build_items()
costs = {}
for item in items:
cost = build_info.get_cost(item)
costs[cost] = item
cheapest = min(costs)
if cheapest > cps * time_left:
return None
return costs[cheapest] | 725512e43f8aa0c8222ffd0e5021226ae131adf4 | 593,966 |
def get_author_book_ratings(book_tr):
"""Get the ratings ``<span>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: ratings ``<span>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... ratings_span = get_author_book_ratings(book_tr)
... print(ratings_span.contents[-1])
4.55 avg rating — 2,414 ratings
3.77 avg rating — 1,689 ratings
4.28 avg rating — 892 ratings
4.54 avg rating — 490 ratings
...
"""
return book_tr.find('span', attrs={'class': 'minirating'}) | 8bfbfafb0f42db0a7e49f03a4316ef7a0bacc7dc | 248,648 |
def get_2comp(val_int, val_size=16):
"""Get the 2's complement of Python int val_int
:param val_int: int value to apply 2's complement
:type val_int: int
:param val_size: bit size of int value (word = 16, long = 32) (optional)
:type val_size: int
:returns: 2's complement result
:rtype: int
"""
# test MSBit (1 for negative)
if (val_int&(1<<(val_size-1))):
# do complement
val_int = val_int - (1<<val_size)
return val_int | ca558b938d5ea9027adcdf4379dd403f1eb3965d | 557,657 |
def get_header_version(args, dat_root, parser):
"""Return the version from the <dat_root> header version, or the -header_version argument"""
if not args.header_version:
header_version = dat_root.find('header').find('version').text
if not header_version:
print('Version not found in ClrMame Pro XML dat. Please specify with -header_version')
print('')
parser.print_help()
parser.exit()
raise SystemExit()
else:
header_version = args.header_version
return header_version | 571806b8b143d80dd8fa6116cbe5fef8187a781e | 274,569 |
import configparser
def retrieve_config(arg_list, default_config):
""" Retrieve a dictionary from a config file """
if len(arg_list) > 1:
config_file = arg_list[-1]
else:
config_file = default_config
# Convert config file
config = configparser.ConfigParser()
config.read(config_file)
return {s: dict(config.items(s)) for s in config.sections()} | 21c805754af372026c9fdc6e9ad8c7b34ae386a5 | 355,248 |
def BrokenLinksChecks(input_api, output_api): # pragma: no cover
"""Complains if there are broken committed symlinks."""
stdout = input_api.subprocess.check_output(['git', 'ls-files'])
files = stdout.splitlines()
output = []
infra_root = input_api.PresubmitLocalPath()
for filename in files:
fullname = input_api.os_path.join(infra_root, filename)
if (input_api.os_path.lexists(fullname)
and not input_api.os_path.exists(fullname)):
output.append(output_api.PresubmitError('Broken symbolic link: %s'
% filename))
return output | 2818f5a9952b47a7aaf5a177394297b84511831a | 292,678 |
def return_polynomial_factory(net_principal, returns, return_days):
"""Factory for a callable with point evaluation of the return polynomial.
The return polynomial for a loan with net principal :math:`s_\circ`,
returns :math:`r_1,r_2,\ldots,r_k` to be paid :math:`n_1,n_2,\ldots,n_k`
days after the loan is granted, respectively, is given by
.. math::
f(c) = s_\\circ (1 + c)^{n_k} - r_1 (1 + c)^{n_k-n_1} - \\cdots
- r_{k-1} (1 + c)^{n_k-n_{k-1}} - r_k.
This function builds and returns a callable implementing such polynomial.
Parameters
----------
net_principal : float, required
The net principal of a grossed up loan.
returns : list of floats, required
Due payments that completely pay off the grossed up principal when
respectively applied for the given return days.
return_days : list of ints, required
List with the number of days since the taxable event (which is usually
a "loan granted" event) happened.
Returns
-------
Callable
Python callable implementing the return polynomial for the given
parameters
"""
coefficients_vec = [net_principal] + [-1 * r for r in returns]
def return_polynomial(irr_):
powers_vec = [
(1 + irr_) ** (return_days[-1] - n) for n in [0] + return_days
]
return sum(
coef * power
for coef, power in zip(coefficients_vec, powers_vec)
)
return return_polynomial | d1eca5e2e6c4ce5e4460510900cc5819bf268f54 | 246,352 |
def compute_cell_next_state(current, neighbours):
"""Return the next state of the cell on position (i, j) in alive_matrix.
Parameters
----------
current: int
The state of the cell, 1 or 0 (live or dead)
neighbours: array
The number of alive cells around the cell.
Returns
-------
new_state: int
The new state of the cell, 1 or 0 (live or dead)
"""
new_state = 0
if current > 0:
if neighbours == 2 or neighbours == 3:
new_state = 1
elif neighbours == 3:
new_state = 1
return new_state | 39fb17954e65d7467dd32d26fd539fd394331fcb | 507,908 |
def bytes2str(bstr):
"""Convert a bytes into string."""
if type(bstr) is bytes:
return bstr.decode('utf-8')
elif type(bstr) is str:
return bstr
else:
raise TypeError(
bstr, ' should be a bytes or str but got ', type(bstr), ' instead') | 7bb33c3e73907e35a258e7af3921461e1657d4f7 | 506,044 |
def get_profiles(argv):
"""Method gets module profiles
Args:
argv (list): input arguments
Returns:
list: profiles
"""
profiles = ['full']
for arg in argv:
if ('--profile' in arg):
profiles = arg.split('=')[1].split(',')
argv.remove(arg)
break
return profiles | b4f4dbaa4c68d1c0ba9b1f57f5e2471fae4e2445 | 134,426 |
def __top_frond_right(dfs_data):
"""Returns the frond at the top of the RF stack."""
return dfs_data['RF'][-1] | faf59ef6af0ea2bd4045fa7de46b67e4ff3e9410 | 42,971 |
def ses(path, b):
"""
Returns an edit script for a given match grid path.
The edit script transforms sequence A of the match grid
into sequence B via deletions ("D", index) and inserations
("I", A index, B value).
"""
patch = []
for i in range(len(path) - 1):
x, y = path[i]
nx, ny = path[i + 1]
dx, dy = nx - x, ny - y
if dx == 1 and dy == 1:
pass #match
elif dx == 1:
patch.append(("D", x))
else: #dy == 1:
patch.append(("I", x, b[y]))
return patch | 47a62e75bb003478ca231c73b2fd0e0370a0aaf8 | 275,344 |
import re
def IsGCESlave(slavename):
"""Returns (bool): Whether |slavename| is hosted on GCE.
Args:
slavename: The hostname of the slave.
"""
# The "-c2" suffix indicates that a builder is in GCE (as opposed to
# in the Chrome Golo, which has a -m2 suffix).
return bool(re.search(r'-c\d+$', slavename)) | 613569120fd4390608ba6173ef3686bb1ef1b01f | 638,400 |
def TTD_TTDS_rule(M, i, j, w, t):
"""
Coordinate TourTypeDayShift and TourTypeDay variables for each day of each week
:param M: Model
:param i: window
:param j: day
:param w: week
:param t: tour type
:return: Constraint rule
"""
return sum(M.TourTypeDayShift[i, t, k, j, w]
for k in M.tt_length_x[t] if (i, t, k, j) in M.okTourTypeDayShift) == M.TourTypeDay[i, t, j, w] | a7e03543d2ca4e782ca4b12e136eeb65c6968efb | 569,959 |
def name_matches(name,pattern):
"""Simple wildcard matching of project and sample names
Matching options are:
- exact match of a single name e.g. pattern 'PJB' matches 'PJB'
- match start of a name using trailing '*' e.g. pattern 'PJ*' matches
'PJB','PJBriggs' etc
- match using multiple patterns by separating with comma e.g. pattern
'PJB,IJD' matches 'PJB' or 'IJD'. Subpatterns can include trailing
'*' character to match more names.
Arguments
name: text to match against pattern
pattern: simple 'glob'-like pattern to match against
Returns
True if name matches pattern; False otherwise.
"""
for subpattern in pattern.split(','):
if not subpattern.endswith('*'):
# Exact match required
if name == subpattern:
return True
else:
if name.startswith(subpattern.rstrip('*')):
return True
else:
return False | eb8ceead45cc0766af0aec92ca02b37f387c3311 | 26,177 |
from typing import List
from typing import Any
def n_chunks(items: List[Any], n_groups: int) -> List[List[Any]]:
"""Returns a list of `n_groups` slices of the original list, guaranteed to
contain all of the original items.
"""
return [items[i::n_groups] for i in range(n_groups)] | 16d0336394b6bff220e1063da0221dae1072e39c | 410,206 |
def translate_attribute(metasra_attribute: dict) -> dict:
"""Translate a MetaSRA attribute that looks like this:
{
"property_id": "EFO:0000246",
"unit_id": "missing",
"value": 31.0
}
into our representation documented here:
https://github.com/AlexsLemonade/refinebio/issues/2127#issuecomment-591651893
"""
attribute = {"value": metasra_attribute["value"]}
if metasra_attribute["unit_id"] != "missing":
attribute["unit"] = metasra_attribute["unit_id"]
return {metasra_attribute["property_id"]: attribute} | 4471581dac43916991f9dfcae1df7b1113bc062f | 255,788 |
def tweets_to_text_strings(tweets):
"""Converts list of tweets to list of tweet text strings."""
return [tweet.GetText() for tweet in tweets] | 5f277ad0b95a0bbef9e370d2e54e9ed146c263b5 | 438,603 |
from typing import OrderedDict
def vanilla_sgd(params, grads, learning_rate):
"""
Update rules for vanilla SGD. Based on the update functions from
Lasagne (https://github.com/Lasagne/Lasagne)
The update is computed as
param := param - learning_rate * gradient
Parameters
----------
params : list of shared varaibles that will be updated
grads : list of symbolic expressions that produce the gradients
learning_rate : step size
Returns
-------
A dictionary mapping each parameter in params to their update expression
"""
updates = OrderedDict()
for param, grad in zip(params, grads):
updates[param] = param - learning_rate * grad
return updates | b1622602dccc10673b54788d09bac554cb5dec3e | 516,526 |
def is_eligible_file( filename ):
""" Based on the file name, decide whether the file is likely to contain image data """
eligible = False
if ( filename.endswith( '.png' ) or filename.endswith( '.jpg' ) ):
eligible = True
return eligible | 75988428ce9078f8de1f95c97dba4f2e77bdbe3b | 22,412 |
def encrypt(plaintext, rails):
"""
Rail fence cipher. Encrypts plaintext by given number of rails.
:param plaintext: plaintext to encrypt.
:param rails: number of rails to use to encrypt.
:returns: encrypted plaintext (ciphertext).
See https://en.wikipedia.org/wiki/Rail_fence_cipher
Example:
>>> encrypt("DANGEROUS", 2)
'DNEOS AGRU'
>>> encrypt("DANGEROUS", 3)
'DGO AEU NRS'
"""
data = [""] * rails
for index, character in enumerate(plaintext):
data[index % rails] += character
return " ".join(data) | 57b4cf3b71d8ef33f041326d90032beda4da48a9 | 109,339 |
import re
def alphanum_sort(l):
"""Sort the given list in ascending order the way that humans expect.
i.e For a list l containing `["5", "24", "10"]`, the function sorts it as
`["5", "10", "24"]` instead of `["10", "24", "5"]` as would Python List
sort() method.
"""
# key to use for the sort
def convert(text):
return int(text) if text.isdigit() else text
def alphanum_key(key):
return [convert(c) for c in re.split("([0-9]+)", key)]
l.sort(key=alphanum_key) | 15b0c5dc44d841538896b4ec841c33ce54d7fdcb | 638,188 |
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl | 6878d8f6d613aa5e7267930edfe0896d54af89b6 | 599,251 |
def parse_genome_size(txt_file):
"""Pull out the genome size used for analysis."""
with open(txt_file, 'rt') as txt_fh:
return txt_fh.readline().rstrip() | f386898478393596e28e6e6d47a7da372a8f6b98 | 678,979 |
def build_request(nQs, category, difficulty, token, q_type = 'any'):
"""[summary]
Arguments:
nQs {int} -- number of questions
category {int} -- integer corresponding to category
difficulty {str} -- "easy", "medium", or "hard"
Keyword Arguments:
q_type {str} -- ["multiple", "boolean", or "any"] (default: {'any'})
Returns:
[str] -- output to send to requests
"""
if q_type == "any":
s = f'https://opentdb.com/api.php?amount={nQs}&category={category}&difficulty={difficulty}&encode=base64&token={token}'
else:
s = f'https://opentdb.com/api.php?amount={nQs}&category={category}&difficulty={difficulty}&type={q_type}&encode=base64&token={token}'
return s | 84d442ddf5e146e91c03121c5443d655e26a4f93 | 158,666 |
def clean_data(data):
"""Clean up unwanted markup in data"""
data = data.strip()
data = data.replace('\n', ' ')
return data | 17b3861d99cffcd1db771da4ea5a91aaace1015e | 272,690 |
import torch
def extract_ampl_phase(fft_im):
"""
Extracts amplitude and phase from the image
Args:
fft_im (Tensor): size should be bx3xhxwx2
Returns:
tuple: (amplitude, phase)
"""
# fft_im: size should be bx3xhxwx2
fft_amp = fft_im[:, :, :, :, 0]**2 + fft_im[:, :, :, :, 1]**2
fft_amp = torch.sqrt(fft_amp)
fft_pha = torch.atan2(fft_im[:, :, :, :, 1], fft_im[:, :, :, :, 0])
return fft_amp, fft_pha | 59d14c950530228262a712427496be9d99840a1c | 630,278 |
import calendar
def GetFirstSundayOfMonth(year, month):
"""Returns the first sunday of the given month of the given year.
>>> GetFirstSundayOfMonth(2016, 2)
7
>>> GetFirstSundayOfMonth(2016, 3)
6
>>> GetFirstSundayOfMonth(2000, 1)
2
"""
weeks = calendar.Calendar().monthdays2calendar(year, month)
# Return the first day in the first week that is a Sunday.
return [date_day[0] for date_day in weeks[0] if date_day[1] == 6][0] | d2bca510d03840969a6513686e2e69607c254d8d | 513,667 |
def distinct(not_distinct_list: list):
"""
Returns a list with no duplicate elements
"""
return list(set(not_distinct_list)) | 87dfa260b7882e28ada5803db30f12275968f44d | 656,840 |
import numbers
import ast
def is_numeric(obj):
"""
Check if the given object represents a number
:param obj: input object
:return: True if obj is a number, else False
"""
if isinstance(obj, numbers.Number):
return True
elif isinstance(obj, str):
try:
nodes = list(ast.walk(ast.parse(obj)))[1:]
except SyntaxError:
return False
if not isinstance(nodes[0], ast.Expr):
return False
if not isinstance(nodes[-1], ast.Num):
return False
nodes = nodes[1:-1]
for i in range(len(nodes)):
if i % 2 == 0:
if not isinstance(nodes[i], ast.UnaryOp):
return False
else:
if not isinstance(nodes[i], (ast.USub, ast.UAdd)):
return False
return True
else:
return False | fa3e43c3eb750ee0c958fd8c654c3d38c03c682e | 234,216 |
def split_list(xs):
"""Split a list into two lists of equal length."""
midpoint, remainder = divmod(len(xs), 2)
if remainder:
xs = xs[:-1]
return xs[:midpoint], xs[midpoint:] | 2209d8e01b620341195c1221141ab6c6c98250f6 | 134,201 |
from datetime import datetime
def timestamp(time_only=False):
"""
Get the current time.
:param time_only: bool; exclude year, month, and date or not
:return: str; the current time
"""
if time_only:
formatter = '%H:%M:%S'
else:
formatter = '%Y-%m-%d %H:%M:%S'
return datetime.now().strftime(formatter) | ae30e08e5efd0d939c66b642383dd208caf5fa65 | 565,581 |
import yaml
def load_config() -> dict:
"""
Loads the config.yml file to memory and returns it as dictionary.
:return: Dictionary containing the config.
"""
with open('config.yml', 'r') as ymlfile:
return yaml.load(ymlfile, Loader=yaml.FullLoader) | 6e05aa4eb6a7d9862814f595ecdc89ffab145ee5 | 707,782 |
def parse_parameters(config):
""" Compile the microbenchmark parameters used when running the test """
return {
'threads': config.num_threads,
'min_runtime': config.min_time
} | 22f65d22616013ccf650befda416e55d3064eff6 | 609,952 |
def _get_kernel(kernel):
"""Return the requested kernel function. """
return globals()['kernel_' + kernel] | 3a931d97149ab8a693c07fdc51a1fecc6d903f45 | 149,235 |
def get_normalised_context(manifest):
"""
normalises context as a list
:param: manifest - IIIF Manifest
:return: normalised @context always return an `list`
"""
context = manifest['@context']
return [context] if type(context) == str else context | 7374f3e56f79de776efab11fd8ef1bddfb2a2840 | 430,698 |
from typing import Set
def load_vocab(file: str) -> Set[str]:
"""
Loads the vocabulary into a set.
:param file: Path to vocabulary file
:return: set of tokens in the vocabulary
"""
vocab = set()
with open(file, encoding="utf-8") as f_in:
for token in f_in:
token = token.strip()
if token == "":
continue
vocab.add(token)
return vocab | 1b15631985eb308f1a392f72aa8c7c22cfbc22e6 | 434,861 |
import torch
def set_model_for_retrain(model_path, retrain_fraction, map_location, reset=True):
"""Set model for transfer learning.
The first layers (defined by 1-retrain_fraction) are frozen (i.e. requires_grad=False).
The weights of the last layers (defined by retrain_fraction) are reset unless reset option is False.
Args:
model_path (str): Pretrained model path.
retrain_fraction (float): Fraction of the model that will be retrained, between 0 and 1. If set to 0.3,
then the 30% last fraction of the model will be re-initalised and retrained.
map_location (str): Device.
reset (bool): if the un-frozen weight should be reset or kept as loaded.
Returns:
torch.Module: Model ready for retrain.
"""
# Load pretrained model
model = torch.load(model_path, map_location=map_location)
# Get number of layers with learnt parameters
layer_names = [name for name, layer in model.named_modules() if hasattr(layer, 'reset_parameters')]
n_layers = len(layer_names)
# Compute the number of these layers we want to freeze
n_freeze = int(round(n_layers * (1 - retrain_fraction)))
# Last frozen layer
last_frozen_layer = layer_names[n_freeze]
# Set freeze first layers
for name, layer in model.named_parameters():
if not name.startswith(last_frozen_layer):
layer.requires_grad = False
else:
break
# Reset weights of the last layers
if reset:
for name, layer in model.named_modules():
if name in layer_names[n_freeze:]:
layer.reset_parameters()
return model | d8aed311ca01518a7817cbdc26a559fffbf0f1a2 | 413,333 |
def ffs(n):
"""find first set bit in a 32-bit number"""
r = 0
while r < 32:
if (1<<r)&n:
return r
r = r+1
return -1 | f401f708058508401b56d013898bced14428ad3f | 109,518 |
def read_loss_data(loss_file):
"""Read loss classification file."""
proj_to_status = {}
f = open(loss_file, "r")
for line in f:
line_data = line.rstrip().split("\t")
entry_type = line_data[0]
if entry_type != "PROJECTION":
continue
proj = line_data[1]
status = line_data[2]
proj_to_status[proj] = status
f.close()
return proj_to_status | f03a15537c4fb351a929cc95be9da44860700310 | 561,481 |
def from_str(s):
""" generate genotype from string
e.g. "Genotype(
normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)],
[('sep_conv_3x3', 1), ('dil_conv_3x3', 2)],
[('sep_conv_3x3', 1), ('sep_conv_3x3', 2)],
[('sep_conv_3x3', 1), ('dil_conv_3x3', 4)]],
normal_concat=range(2, 6),
reduce=[[('max_pool_3x3', 0), ('max_pool_3x3', 1)],
[('max_pool_3x3', 0), ('skip_connect', 2)],
[('max_pool_3x3', 0), ('skip_connect', 2)],
[('max_pool_3x3', 0), ('skip_connect', 2)]],
reduce_concat=range(2, 6))"
"""
genotype = eval(s)
return genotype | 4c6878e8ade2f9641849eda62f77fbd3006965a9 | 463,532 |
def _check_rect(n_x, n_y, dx, dy, symbol, M):
"""
Check that for a rectangle defined by two of its sides, every element \
within it is the same.
.. note:: This method is called once the main script has reached the \
limits of a rectangle.
:param n_x: Starting position of the rectangle (top left corner abscissa).
:param n_y: Starting position of the rectangle (top left corner ordonate).
:param dx: Width of the rectangle.
:param dy: Height of the rectangle.
:param symbol: Symbol which should be in the rectangle.
:param M: Input matrix, as a list of strings.
:returns: Boolean indicated whether the rectangle is correct or not.
"""
for x in range(dx):
for y in range(dy):
if M[n_y + y][n_x + x] != symbol:
return False
return True | d2819d29659dc103391f8af3e44e89af2bdbe738 | 421,998 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.