content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def prepare_function_parameters(input_parameters, training_parameters): """Prepare function parameters using input and training parameters""" function_parameters = {} function_parameters = input_parameters.copy() function_parameters.update(training_parameters) return function_parameters
747a9fd878e7729353876819d0d2c871b76a54b0
443,354
def fix_sequence_length(sequence, length): """ Function to check if length of sequence matches specified length and then return a sequence that's either padded or truncated to match the given length Args: sequence (str): the input sequence length (int): expected length Returns: str: string of length 'length' """ # check if the sequence is smaller than expected length if len(sequence) < length: # pad the sequence with 'N's sequence += 'N' * (length - len(sequence)) # check if the sequence is larger than expected length elif len(sequence) > length: # truncate to expected length sequence = sequence[:length] return sequence
300566ba387ac33709ed2e9eef28f1ab65ed5f60
576,054
def trajectory_importance_max_min(states_importance): """ computes the importance of the trajectory, according to max-min approach: delta(max state, min state) """ max, min = float("-inf"), float("inf") for i in range(len(states_importance)): state_importance = states_importance[i] if state_importance < min: min = state_importance if state_importance > max: max = state_importance return max - min
c9185e46455224056d9dbdd4704f7cb1aa6cddac
247,170
def _get_nullable_handle(handle_wrapper): """Return the handle of `handle_wrapper` or None""" if handle_wrapper is None: return None else: return handle_wrapper._handle
1e55b535b38b0ae56a011fb7cd3620a3296b609a
428,436
def color(priority, text): """ Colors the text according to the priority of the task :param priority: priority of task :param text: string, that needs to be colored :return: colored text """ if priority == 1: # красный return f'\033[31m{text}\033[0m' elif priority == 2: # желтый return f'\033[33m{text}\033[0m' elif priority == 3: # синий return f'\033[34m{text}\033[0m'
9609c951127453a72d7c5c6e1642800658f8e0d5
353,194
import torch def one_hot(y, num_dim=10): """ One Hot Encoding, similar to `torch.eye(num_dim).index_select(dim=0, index=y)` :param y: N-dim tenser :param num_dim: do one-hot labeling from `0` to `num_dim-1` :return: shape = (batch_size, num_dim) """ one_hot_y = torch.zeros(y.size(0), num_dim) if y.is_cuda: one_hot_y = one_hot_y.cuda() return one_hot_y.scatter_(1, y.view(-1, 1), 1.)
694bfea18ecbb5c5737e0d38c0aa0f5f52a82a55
1,007
def hook(t): """Calculate the progress from download callbacks (For progress bar)""" def inner(bytes_amount): t.update(bytes_amount) # Update progress bar return inner
d8228b9dec203aaa32d268dea8feef52e8db6137
3,102
import base64 def get_hash(crc): """Gets the base64-encoded hash from a CRC32C object. Args: crc (google_crc32c.Checksum|predefined.Crc): CRC32C object from google-crc32c or crcmod package. Returns: A string representing the base64 encoded CRC32C hash. """ return base64.b64encode(crc.digest()).decode('ascii')
70a358132c9d11fa6aecd3fa84bc755f8c247697
565,679
def split_arguments(args): """Returns the 2-tuple (args[:-1], args[-1]) if args[-1] exists and is a dict; otherwise returns (args, {}). """ if args and isinstance(args[-1], dict): return args[:-1], args[-1] else: return args, {}
e5dcc6228cf2d92f701bb8c2e89dc2fdac97e993
276,649
def compute_bytes_per_voxel(element_type): """Returns number of bytes required to store one voxel for the given metaIO ElementType """ switcher = { 'MET_CHAR': 1, 'MET_UCHAR': 1, 'MET_SHORT': 2, 'MET_USHORT': 2, 'MET_INT': 4, 'MET_UINT': 4, 'MET_LONG': 4, 'MET_ULONG': 4, 'MET_LONG_LONG': 8, 'MET_ULONG_LONG': 8, 'MET_FLOAT': 4, 'MET_DOUBLE': 8, } return switcher.get(element_type, 2)
b2bd1ff15d1bfef979e8f5c2152a5776b4754220
332,270
def _swiftmodule_for_cpu(swiftmodule_files, cpu): """Select the cpu specific swiftmodule.""" # The paths will be of the following format: # ABC.framework/Modules/ABC.swiftmodule/<arch>.swiftmodule # Where <arch> will be a common arch like x86_64, arm64, etc. named_files = {f.basename: f for f in swiftmodule_files} module = named_files.get("{}.swiftmodule".format(cpu)) if not module and cpu == "armv7": module = named_files.get("arm.swiftmodule") return module
59e978f22f4b1959ef32b0f2d68b0d92ec7fabe0
38,046
def calculate_width(size: tuple[int, int], pixel_height: int) -> int: """ Calculates width according to height to keep the aspect ratio. """ original_width, original_height = size height_ratio = pixel_height / original_height return round(original_width * height_ratio, None)
077de208c5a471a550bf2955fc978e3fdf77d83a
389,379
def writePattern(df, pat): """ utility function to append a pattern in a dataframe Parameters ---------- df : pandas dataframe input dataframe to be updated pat : src.Patterns.pattern pattern to be added Returns ------- pandas dataframe updated dataframe """ df = df.append(pat.getDictForm(), ignore_index=True) return df
0e65b2a8024109e1d72782293764b663f70ccaf2
305,597
def smooth_freqs(freqs): """ Smooths freqs vector, guarantees sum == 1 :param freqs: vector of frequencies :return: vector of frequencies guaranteed to sum to 1 """ s = sum(freqs) return [f/s for f in freqs]
89238cdf5d43d72010e05512b0d80fb648666aec
169,496
def curb_gpred_spans(dmrs_xml, max_tokens=3): """ Remove general predicate node token alignments if a general predicate node spans more than max_tokens. This prevents general predicate nodes from dominating rule extraction. :param dmrs_xml: Input DMRS XML :param max_tokens: Maximum number of allowed tokens before the entire general predicate node span is removed :return: Modified DMRS """ for entity in dmrs_xml: if entity.tag != 'node': continue # Determine if the node is a general predicate gpred_node = False for node_info in entity: if node_info.tag == 'gpred': gpred_node = True break if not gpred_node: continue # Remove the alignment if the number of tokens exceeds the specified limit tokalign = entity.attrib.get('tokalign') gpred_token_num = len(tokalign.split(' ')) if gpred_token_num > max_tokens: entity.attrib['tokalign'] = '' return dmrs_xml
45d80423a0604ca503e8f2ae730b9b5ca0c3e1e1
107,538
def get_structure(dataset_or_iterator): """Returns the type specification of an element of a `Dataset` or `Iterator`. Args: dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`. Returns: A nested structure of `tf.TypeSpec` objects matching the structure of an element of `dataset_or_iterator` and spacifying the type of individal components. Raises: TypeError: If `dataset_or_iterator` is not a `Dataset` or `Iterator` object. """ try: return dataset_or_iterator.element_spec # pylint: disable=protected-access except AttributeError: raise TypeError("`dataset_or_iterator` must be a Dataset or Iterator " "object, but got %s." % type(dataset_or_iterator))
e1def0868e3f656b258c3639543ac01e83d96d14
161,943
from typing import OrderedDict def process_odin_args(args): """ Finds arguments needed in the ODIN stage of the flow """ odin_args = OrderedDict() odin_args["adder_type"] = args.adder_type if args.adder_cin_global: odin_args["adder_cin_global"] = True if args.disable_odin_xml: odin_args["disable_odin_xml"] = True if args.use_odin_simulation: odin_args["use_odin_simulation"] = True return odin_args
92a50c84be6d16f966323d02a9db8d5f32a04c2e
48,483
def className(obj): """ Return the name of a class as a string. """ return obj.__class__.__name__
557d77c70387524e83c4956ce859f90116251136
393,662
def boost_nph(group, nph): """ Return a fraction of boost based on total number of high-binding peptides per IAR. :param pandas.core.frame.DataFrame group: The IAR under review. :param float nph: The total boost amount allotted to this criterion. :returns: The percent of boost provided by this criterion. :rtype: float >>> df = pandas.DataFrame({'binding_score': [0.1, 0.4, 1.5, 0.04, 0.44, 1.75, 1.0]}) >>> boost_nph(df, 1) 1.0 >>> df = pandas.DataFrame({'binding_score': [0.1, 0.4, 1.5, 1.75, 1.0]}) >>> boost_nph(df, 1) 0.9 >>> df = pandas.DataFrame({'binding_score': [0.1, 0.4, 1.5, 1.75]}) >>> boost_nph(df, 1) 0.7 >>> df = pandas.DataFrame({'binding_score': [0.1, 1.5]}) >>> boost_nph(df, 1) 0.4 >>> df = pandas.DataFrame({'binding_score': [1.5]}) >>> boost_nph(df, 1) 0.0 """ n = len(group[group['binding_score'] <= 1.0]) return round(nph * ((n >= 1) * 0.4 + (n >= 2) * 0.3 + (n >= 3) * 0.2 + (n >= 4) * 0.1), 2)
383cff2d6c4f9bff5aadd02982368a3d24f9b167
528,388
def set_offset(chan_obj): """ Return a tuple of offset value and calibrate value. Arguments: chan_obj (dict): Dictionary containing channel information. """ physical_range = chan_obj['physical_max'] - chan_obj['physical_min'] digital_range = chan_obj['digital_max'] - chan_obj['digital_min'] calibrate = physical_range / digital_range calibrated_dig_min = calibrate * chan_obj['digital_min'] offset = chan_obj['physical_min'] - calibrated_dig_min return (offset, calibrate)
06ab3aeafcb1ba26799d3ac2696b62909928310d
80,751
from math import floor, log10 def findSizeInt(number): """ #EN-US: → Calculates the number of digits in a number. :param number: the number to be calculated. :return: the number of digits of the number entered. #PT-BR: → Calcula a quantidade de dígitos em um número. :param number: o número a ser calculado. :return: a quantidade de dígitos do número informado. """ number = abs(int(number)) return 1 if number == 0 else floor(log10(number)) + 1
8b174183520337f31f17bfb4163d5ed5ff90e896
14,360
def turbulent_kinetic_energy(field): """ Calculates turbulent kinetic energy. Parameters ---------- field : Field2D Returns ------- Field2D Author(s) --------- Jia Cheng Hu """ return 0.5*(field.rms()**2).fsum(0)
53c7e6eb34051f3df6e71efefb89776492fdd785
359,170
def play_sound(register, cur_instruction, name): """Play sound/send instruction.""" register['sound'].append(register[name]) register['counter'] += 1 return cur_instruction
6a1f664e40d998c36cefea1987281dfb72cd7f1f
454,347
import json def read_coco(cocofile): """Read coco annotation file (json format) into dictionaries""" # Read file with open(cocofile, 'r') as fptr: cocojson = json.load(fptr) imgid_to_annot = {} # Create dict of image id to filename and annots for all images imgid_to_filename = {img['id']: (img['file_name'], img['width'], img['height']) for img in cocojson['images']} catid_to_category = {(cat['id']-1): cat['name'] for cat in cocojson['categories']} for annot in cocojson['annotations']: image_id = annot['image_id'] if image_id in imgid_to_annot: info = imgid_to_annot[image_id] info.append((annot['bbox'], annot['category_id']-1)) imgid_to_annot[image_id] = info else: imgid_to_annot[image_id] = [(annot['bbox'], annot['category_id']-1)] return imgid_to_filename, imgid_to_annot, catid_to_category
acd156c4ed45a5b24783ab0cb7c9280e74032668
381,104
def bookkeep_product(mol): """Bookkeep reaction-related information of atoms/bonds in products Parameters ---------- mol : rdkit.Chem.rdchem.Mol RDKit molecule instance for products. Returns ------- info : dict Reaction-related information of atoms/bonds in products """ info = { 'atoms': set() } for atom in mol.GetAtoms(): info['atoms'].add(atom.GetAtomMapNum() - 1) return info
bf2d774fd42c98468bfb3af1abe1e2adc15ec8e2
531,534
import torch def _convert_boxes_to_roi_format(boxes): """ Convert rois into the torchvision format. :param boxes: The roi boxes as a native tensor[B, K, 4]. :return: The roi boxes in the format that roi pooling and roi align in torchvision require. Native tensor[B*K, 5]. """ concat_boxes = boxes.view((-1, 4)) ids = torch.full_like(boxes[:, :, :1], 0) for i in range(boxes.shape[0]): ids[i, :, :] = i ids = ids.view((-1, 1)) rois = torch.cat([ids, concat_boxes], dim=1) return rois
72ce30f5d7a92b3a09eb692c88f610da26b1edeb
70,941
def compare_distributions(actual_distribution, expected_distribution, threshold): """Compare if two distributions are similar. Args: actual_distribution: A list of floats, contains the actual distribution. expected_distribution: A list of floats, contains the expected distribution. threshold: Number within [0,100], the threshold percentage by which the actual distribution can differ from the expected distribution. Returns: The similarity between the distributions as a boolean. Returns true if the actual distribution lies within the threshold of the expected distribution, false otherwise. Raises: ValueError: if threshold is not with in [0,100]. Exception: containing detailed error messages. """ if len(expected_distribution) != len(actual_distribution): raise Exception( 'Error: expected and actual distributions have different size (%d vs %d)' % (len(expected_distribution), len(actual_distribution))) if threshold < 0 or threshold > 100: raise ValueError('Value error: Threshold should be between 0 to 100') threshold_fraction = threshold / 100.0 for expected, actual in zip(expected_distribution, actual_distribution): if actual < (expected * (1 - threshold_fraction)): raise Exception("actual(%f) < expected(%f-%d%%)" % (actual, expected, threshold)) if actual > (expected * (1 + threshold_fraction)): raise Exception("actual(%f) > expected(%f+%d%%)" % (actual, expected, threshold)) return True
198f3d54cee98b5d69ada2d12f9b7f09a31a87b3
436,378
import re def pathspec(expression): """ Normalizes a path which is separated by backward or forward slashes to be separated by forward slashes. """ return '/'.join(re.split(R'[\\\/]', expression))
c5a028eb8b6371a8fa85928844c1e463fa44ab69
185,780
def no_forcing(grid): """Zero-valued forcing field for unforced simulations.""" del grid def forcing(v): return tuple(0 * u.array for u in v) return forcing
5ffe81104de215f80c30c1edc896933ae980917e
476,640
import six def _compare_match(dict1, dict2): """ Compare two dictionaries and return a boolean value if their values match. """ for karg, warg in six.iteritems(dict1): if karg in dict2 and dict2[karg] != warg: return False return True
c2da782d6dc6d9a00b49dd093fc2b2acb76200bf
155,609
def strip_output(nb): """strip the outputs from a notebook object""" nb.metadata.pop('signature', None) for cell in nb.worksheets[0].cells: if 'outputs' in cell: cell['outputs'] = [] if 'prompt_number' in cell: cell['prompt_number'] = None return nb
94f168f9ee04c18076eb154d2da704e53cb883ff
311,254
def remove_new_lines_in_paragraph(article): """When we publish articles to dev.to sometimes the paragraphs don't look very good. So we will remove all new lines from paragraphs before we publish them. This means we don't have to have very long lines in the document making it easier to edit. Some elements we don't want to remove the newlines from, like code blocks or frontmatter. So the logic is simple remove new lines from elements except specific ones like code blocks. Of course code blocks can span multiple lines so when we see a code block ``` we skip lines end until we see end of that code block ```. The same logic applies to all the elements we want to ski Args: article (str): The article we want to publish. Returns: str: The article with new lines removed from article. """ skip_chars = ["```", "---", "-", "*", "![", ":::"] endswith_char = "" article_lines = article.split("\n\n") for index, line in enumerate(article_lines): line_startswith_skip_char = [char for char in skip_chars if line.startswith(char)] if line_startswith_skip_char or endswith_char: if line_startswith_skip_char: endswith_char = line_startswith_skip_char[0] if line.endswith(endswith_char): endswith_char = "" continue article_lines[index] = line.replace("\n", " ") return "\n\n".join(article_lines)
6f2123bebe966b4e3b03a0c4473628985b883f3f
645,770
import re def parse_arches_from_config_in(fname): """Given a path to an arch/Config.in.* file, parse it to get the list of BR2_ARCH values for this architecture.""" arches = set() with open(fname, "r") as f: parsing_arches = False for line in f: line = line.strip() if line == "config BR2_ARCH": parsing_arches = True continue if parsing_arches: m = re.match(r"^\s*default \"([^\"]*)\".*", line) if m: arches.add(m.group(1)) else: parsing_arches = False return arches
af1d8de9a5a210f4e009b942cf069f1c6d6c4e47
648,849
def GROUP(e): """ puts the argument in a group :param: - `e`: a string regular expression :return: grouped regular expression (string) """ return "({e})".format(e=e)
93a57b6004b59f1f52ba8bdfda8e5727f9653bea
523,049
from typing import OrderedDict def invert(mapper): """Invert a dict of lists preserving the order.""" inverse = OrderedDict() for k, v in mapper.items(): for i in v: inverse[i] = k return inverse
0cd15a56762b36a774cb91b711f5d893da62de1f
664,585
def parse_dataset_sid_pid2eval_sid_pid(dataset_sid_pid2eval_sid_pid): """ Parsing priority, sid_pid is mapped to: 1. dataset_sid_pid2eval_sid_pid[sid_pid] if it exists, else 2. to the same sid_pid Returns: sid_pid2eval_id: a dense mapping having keys for all possible sid_pid s (0 to 99_99) using the provided sparse dataset_sid_pid2eval_sid_pid """ dsp2spe_new = dict() for k in range(10000): sid_pid_new = dataset_sid_pid2eval_sid_pid.get(k, k) dsp2spe_new[k] = sid_pid_new if sid_pid_new != 'IGNORED' else k assert all(v in list(range(10000)) for v in dsp2spe_new.values()), dsp2spe_new.values() return dsp2spe_new
dfbf98f1f295aa2cecbf6213ea189bc7f609c567
371,950
def decipher(message: str, cipher_map: dict) -> str: """ Deciphers a message given a cipher map :param message: Message to decipher :param cipher_map: Dictionary mapping to use :return: Deciphered string >>> cipher_map = create_cipher_map('Goodbye!!') >>> decipher(encipher('Hello World!!', cipher_map), cipher_map) 'HELLO WORLD!!' """ # Reverse our cipher mappings rev_cipher_map = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(ch, ch) for ch in message.upper())
0953dcb3119c8158f829b45184af85e1a5964716
335,057
from typing import Iterable from typing import Tuple from typing import List import itertools def sorted_chain(*ranges: Iterable[Tuple[int, int]]) -> List[Tuple[int, int]]: """Chain & sort ranges.""" return sorted(itertools.chain(*ranges))
6724c401a451b7d64aafb594af481e914de61277
263,237
def gen_orthoplot(f, gs): """Create axes on a subgrid to fit three orthogonal projections.""" axs = [] size_xy = 5 size_z = 1 size_t = size_xy + size_z gs_sub = gs.subgridspec(size_t, size_t) # central: yx-image axs.append(f.add_subplot(gs_sub[:size_xy, :size_xy])) # middle-bottom: zx-image axs.append(f.add_subplot(gs_sub[size_xy:, :size_xy], sharex=axs[0])) # right-middle: zy-image axs.append(f.add_subplot(gs_sub[:size_xy, size_xy:], sharey=axs[0])) return axs
92d90a6e472349d539a30165e6cbfce2e5b53e4f
360,963
from typing import Any from typing import Callable from functools import reduce def pipe(v: Any, *fns: Callable): """ Thread a value through one or more functions. Functions are applied left to right >>> def double(n): ... print(f'Doubling {n}...') ... return n * 2 >>> def inc(n): ... print(f'Incrementing {n}...') ... return n + 1 >>> pipe(3, inc, double) Incrementing 3... Doubling 4... 8 """ return reduce(lambda x, f: f(x), fns, v)
94d0d3dad502494b444a0aabfa31c1c1ad471db4
298,047
def parseCustomHeaders(custom: str) -> list: """ Parse string of semi-colon seperated custom headers in to a list """ if ";" in custom: if custom.endswith(';'): custom = custom[:-1] return custom.split(';') else: return [custom]
1cfdf1a9b8e0c70ca60b1938dd4a6f2b5a8a17f2
349,462
def CODE(string): """ Returns the numeric Unicode map value of the first character in the string provided. Same as `ord(string[0])`. >>> CODE("A") 65 >>> CODE("!") 33 >>> CODE("!A") 33 """ return ord(string[0])
0f680fe1e45156c00d0a5839e24f1619a456773f
700,680
from typing import Any import json def save_object_as_json(obj: Any, filepath: str) -> None: """Saves Python object as JSON file""" with open(file=filepath, mode='w') as fp: json.dump(obj=obj, fp=fp, indent=4) return None
3f38a33dd7cbf6e9d25308ede4ed48e4ccd9c28d
291,851
import re def tag_to_label(tag: str) -> str: """Return the spaced and lowercased label for the given camelCase tag.""" return re.sub(r'(?<!^)(?=[A-Z])', ' ', tag).lower()
262d72d343c99f509e91da07fbd307ac6c30836b
612,313
def builder_support(builder): """ Return True when builder is supported. Supported builders output in html format, but exclude `PickleHTMLBuilder` and `JSONHTMLBuilder`, which run into issues when serializing blog objects. """ if hasattr(builder, "builder"): builder = builder.builder not_supported = {"json", "pickle"} return builder.format == "html" and builder.name not in not_supported
3e3244bfefa0cb81f52fc3aadb12b052865a5b90
570,231
def is_abba(abba_str): """Returns true if 4 character string consists of a pair of two different characters followed by the reverse of that pair""" if len(abba_str) != 4: raise Exception return abba_str[0] == abba_str[3] and abba_str[1] == abba_str[2] and abba_str[0] != abba_str[1]
78484e6cc5174aaba30e98c2bd843d57c81ab9f0
538,664
import random def integers_equal(n, min_, max_): """ Return sequence of N equal integers between min_ and max_ (included). """ eq = random.randint(min_, max_) return [eq for _ in range(n)]
4ab257ed7c434bf4574afac4b0fc3f0e543da57c
144,205
import math def geometric_mean(values): """ Evaluates the geometric mean for a list of numeric values. @param values: List with values. @return: Single value representing the geometric mean for the list values. @see: http://en.wikipedia.org/wiki/Geometric_mean """ try: values = [int(value) for value in values] except ValueError: return None product = 1 n = len(values) if n == 0: return None return math.exp(sum([math.log(x) for x in values])/n)
34f0c5906b0425fbd915f3c7218f033d54d04f08
322,577
def ossec_level_to_log_level(message: dict) -> str: """ converts OSSEC_Level to log level OSSEC level. An integer in the range 0 to 15 inclusive. 0-3=Low severity, 4-7=Medium severity, 8-11=High severity, 12-15=Critical severity. """ level = message.get("OSSEC_Level", 0) if level >= 8: return "error" elif level >= 4: return "warning" else: return "info"
085230cf38f45647feec7a0e47c0e1d0d34b5408
148,722
def remove_remote(ssh, path): """Remove a remote file or directory. Arguments: ssh: Established SSH connection instance. path (str): Absolute path to the file or directory. Returns: Boolean indicating result. """ stdin, stdout, stderr = ssh.exec_command('rm -rf %s' % path) return True
5b7f02df2dc1147771712e468559f5741d674b6e
525,547
def diff(list_1, list_2): """ get difference of two lists :param list_1: list :param list_2: list :return: list """ return list(set(list_1) - set(list_2))
46671b83b604947a10467a6376ee2ea1025b41cb
399,942
def character_ngrams(s, ngram_range, lhs='<', rhs='>'): """ Extract character n-grams of length *n* from string *s* Args: s: the string whence n-grams are extracted ngram_range: tuple with two elements: the min and max ngram lengths lhs: left-padding character (to show token boundaries) rhs: right-padding character (to show token boundaries) Returns: list of n-grams in *s* """ ngrams = [] for word in s.split(): word = lhs + word + rhs rangemax = len(word) - (ngram_range[1]-1) if rangemax < 1: rangemax = 1 for i in range(rangemax): for j in range(ngram_range[0], ngram_range[1] + 1): ngrams.append(tuple(word[i:i+j])) return ngrams
2728a9d787594c77b78206278afa717ddd5c5e07
140,036
from typing import Iterable from typing import Tuple import itertools def find_two_numbers_that_add_to(numbers: Iterable[int], desired_outcome: int) -> Tuple[int, int]: """Finds two numbers in a list (or other iterable) of numbers that add up to the desired outcome. Returns the two numbers.""" # Coming up with an algorithm that returns all possible combinations of 2 # in a list of numbers is fun, but not necessary as this is already # provided in the itertools module of the standard library. for left, right in itertools.combinations(numbers, 2): if left + right == desired_outcome: return left, right raise ValueError(f"No numbers add up to {desired_outcome} in this list.")
6f7e0feea89c21909d9596cc7fb3e3afadd468f6
146,122
def GetComplementaryColor(hexStr): """Returns complementary RGB color Example Usage: >>> GetComplementaryColor('#FFFFFF') '#000000' """ if hexStr[0] == '#': hexStr = hexStr[1:] rgb = (hexStr[0:2], hexStr[2:4], hexStr[4:6]) compColor = '#' for a in rgb: compColor += '%02x' % (255 - int(a, 16)) ## print('complementaryColor = ', compColor) if hexStr.isupper(): return compColor.upper() # Retain case. return compColor
5c42a8ccfc48f57d4f8ba2728b83ad0972dbd644
620,214
def flatten(l): """Flatten a nested list to a one dimensional list.""" r = [] for item in l: if isinstance(item, list) or isinstance(item, tuple): r.extend(flatten(item)) else: r.append(item) return r
4355cce3c291356c45758f29939dad364edcfe55
190,760
import random def random_anagrammable(Nsamples, wbags, Nanagrams=1, Lmax=None): """ Randomly sample words from a dictionary which also have an anagram in the dictionary. Parameters ---------- Nsamples : int Number of words to sample wbags : mapping Structure mapping `wordbag(word)` to a set of all anagrams of that word. Nanagrams : int, optional Number of anagrams a word must have to be suitable. Defaults to 1. Lmax : int, optional Maximum length of word to sample. """ candidates = [w for ws in wbags.values() for w in ws if (Lmax is None or len(w) <= Lmax) and len(ws) > 1] return random.sample(candidates, Nsamples)
54d14f3af3a4d20cbc01d473ba18b89a07de5415
471,132
def elev_gain_loss(elev_data, smooth_time=25): """ Calculate elevation gain and loss over the course of the activity. Elevation profile smoothed to reduce the impact of short-term elevation noise. Gain and loss defined as cumulative sum of positive and negative 1st discrete difference, respectively. :param pd.Series elev_data: series of elevation data, indexed with timestamp :param int smooth_time: number of seconds to smooth input data before calculating changes :return: (elevation gain, elevation loss) :rtype: tuple """ # smooth input data by n seconds elev_data = elev_data.rolling('{}s'.format(str(smooth_time))).mean() # gain and loss calculation gain = sum([i for i in elev_data.diff() if i > 0]) loss = sum([i for i in elev_data.diff() if i < 0]) return gain, loss
df73dad3a80c4396b9268b39b58499a200d7b738
306,186
def num_owned_indices_from_cyclic(dd): """Given a dimension dictionary `dd` with dist_type 'c', return the number of indices owned. """ block_size = dd.get('block_size', 1) global_nblocks, partial = divmod(dd['size'], block_size) local_nblocks = ((global_nblocks - 1 - dd['proc_grid_rank']) // dd['proc_grid_size']) + 1 local_partial = partial if dd['proc_grid_rank'] == 0 else 0 local_size = local_nblocks * dd['block_size'] + local_partial return local_size
653137633fad20db171cb2566273dbf4f9b2077a
236,678
import torch def load_model_from_file(path, model): """ Load a (trained) model from file. Parameters ---------- path: str File where the model to be loaded is saved. Returns ------- Pytorch object Pytorch object as defined in the file. """ model.load_state_dict(torch.load(path)) model.eval() return model
7b767d536efe587c0b58afdba51a8e3c597e7e98
450,372
def default(df): """ By default, do nothing :param df: :return: """ return df
d64c9bca054f08e969e0167fb074554257e03ae5
165,187
def _IsGritInputFile(input_file): """Returns True iff this is a GRIT input file.""" return input_file.endswith('.grd')
9a45554185c4b6958001500518c9d18d6d563c63
446,194
def get_model_dir(experiment_params, flags_obj): """Gets model dir from Flags.""" del experiment_params return flags_obj.model_dir
d3a3254939a9f1973d369235964a2e1935d8ddd0
304,524
from typing import Sequence from typing import List def make_golden( sequence: Sequence, )-> List: """Given a sequence of elements, separates it into two new sequences, on the index that is equivalent to the golden ratio of the sequence length. """ golden_index = round(len(sequence) * 0.618) return [sequence[0:golden_index], sequence[golden_index:]]
34b63a8a88aa2a0154b75070c8a6eaed7d3be259
294,374
from typing import Optional from typing import Type def is_none_type(type_: Optional[Type]) -> bool: """Is the given type NoneType?""" return type_ is type(None)
adacf3da1037bfb10d44570fbd3e9ebd07c85b45
378,288
def h(level: int, text: str) -> str: """Wrap text into an HTML `h` tag.""" return f"<h{str(level)}>{text}</h{level}>"
b7dac43c54ed2080687b89a72f268e5a2d580d24
181,188
def link(url, linkText='{url}'): """Returns a link HTML string. The string is an &lt;a&gt; tag that links to the given url. If linkText is not provided, the link text will be the url. """ template = '<a href={url}>' + linkText + '</a>' return template.format(url=url)
a53c4cd468de23cfc25572093ca8787feb4f12a4
66,595
def get_class_name(o, lower=False): """ Returns the class name of an object o. """ if not isinstance(o, type): o = o.__class__ if lower: return o.__name__.lower() else: return o.__name__
15ef10f6612e34e4c81bae707044daca15b5f81f
637,216
import re def parse_tags(app_tags): """ Parse given tags value, standardize, and remove duplicates. """ app_tags = [_f for _f in re.split("[,]+ *", app_tags) if _f] tags = [] for tag in app_tags: tag = tag.replace('"', '') tag = tag.replace("'", '') tag = re.sub(r"\s+", '-', tag) tag = tag.lower() tags.append(tag) tags = list(set(tags)) return tags
8d974b640bc4e6f1fc0035b2aa1433b04b068aca
167,911
def _return_ordered_config(unordered_config: dict, template_config: dict) -> dict: """Orders the keys in unordered_config according to the order in template_config. Note this function assumes all keys in both dictionaries are identical, including all nested dictionaries. """ # create a dictionary for the ordered config ordered_config = {} # loop through keys in the order of the template config for key in template_config.keys(): # get the value from the unordered config val = unordered_config[key] # if the value is a dictionary, it needs to be ordered too if isinstance(val, dict): ordered_config[key] = _return_ordered_config(val, template_config[key]) # otherwise just save the value else: ordered_config[key] = val return ordered_config
f74f62973511bf26b188c77e6ee6d31367c8fcf6
647,410
def count_char(char, text): """Count number of occurences of char in text.""" return text.count(char)
dad3ad1fb81387efadb5e7e0fdde8c7d67e4be05
477,661
def normalize_content(content: str) -> str: """ Remove preceding and trailing whitespaces from `content` string. :param content: string to be normalized. :returns: normalized string. """ return content.strip(' \n\t')
b5fd7c6fb28bd9ff90ecfcb7784d3152660643b7
241,361
def get_solution(self, j_t0=0): """Return the solution corresponding to a time step. Parameters ---------- self : MeshSolution an MeshSolution object j_t0 : int a time step Returns ------- solution: Solution a Solution object """ return self.solution[j_t0]
cc0c14b73727a6fdbdeae8b69ced1218b2b64d9b
257,455
def _get_stat_var_mcf(sv_id: str, sv_pv: dict) -> str: """Generate a MCF node string for a statVar Args: sv_id: Node Id string for the StatVar sv_pv: dictionary of all property:values for the StatVar Returns: a string with StatVar node in MCF format with each property in a new line and properties are sorted in alphabetical order. """ stat_var = [] stat_var.append(f'Node: {sv_id}') for p in sorted(sv_pv.keys()): stat_var.append('{}: {}'.format(p, sv_pv[p])) return '\n'.join(stat_var)
c4eb06bbe60a858f647818b64b4721e188ad9886
267,975
def last_updated_cell(i): """Make and return the last_updated cell at Column E.""" return "E{}".format(str(i))
53e61c345746184200819ab41a7bae73718657c9
206,409
import time def watch_condition(cond, target=None, timeout=None, interval=0.1): """ Watch a given condition (a callable) until it returns the target value, and return that value. Stop watching on timeout, in that case return None. The condition is tested approximately every 'interval' seconds. """ start = time.time() while True: ret = cond() if ret == target: return ret if timeout and time.time() > start+timeout: return None time.sleep(interval)
8915cb03e35615403f77d8f5682e7b0225cfb57d
363,554
from typing import Dict from typing import Any from typing import List def _create_group_codes_names( contact_models: Dict[str, Any], assort_bys: Dict[str, List[str]] ) -> Dict[str, str]: """Create a name for each contact models group codes. The group codes are either found in the initial states or are a factorization of one or multiple variables in the initial states. ``"is_factorized"`` can be set in contact models to indicate that the assortative variable is already factorized which saves memory. """ group_codes_names = {} for name, model in contact_models.items(): is_factorized = model.get("is_factorized", False) n_assort_bys = len(assort_bys[name]) if is_factorized and n_assort_bys != 1: raise ValueError( f"'is_factorized' is 'True' for contact model {name}, but there is not " f"one assortative variable, but {n_assort_bys}." ) elif is_factorized: group_codes_names[name] = assort_bys[name][0] else: group_codes_names[name] = f"group_codes_{name}" return group_codes_names
0fe1419ae5e1b3a6e3b6b60880e8e2db641d6297
300,488
import torch def bhwc_to_bchw(input: torch.Tensor) -> torch.Tensor: """ Permutes a tensor to the shape [batch size, channels, height, width] :param input: (torch.Tensor) Input tensor of the shape [batch size, height, width, channels] :return: (torch.Tensor) Output tensor of the shape [batch size, channels, height, width] """ return input.permute(0, 3, 1, 2)
b4ce50136977edd4f2f5156298c84f75fa7780a8
138,184
def mmd2_u_stat_variance(K, inds=(0, 1)): """ Estimate MMD variance with estimator from https://arxiv.org/abs/1906.02104. K should be a LazyKernel; we'll compare the parts in inds, default (0, 1) to use K.XX, K.XY, K.YY. """ i, j = inds m = K.n(i) assert K.n(j) == m XX = K.matrix(i, i) XY = K.matrix(i, j) YY = K.matrix(j, j) mm = m * m mmm = mm * m m1 = m - 1 m1_m1 = m1 * m1 m1_m1_m1 = m1_m1 * m1 m2 = m - 2 mdown2 = m * m1 mdown3 = mdown2 * m2 mdown4 = mdown3 * (m - 3) twom3 = 2 * m - 3 return ( (4 / mdown4) * (XX.offdiag_sums_sq_sum() + YY.offdiag_sums_sq_sum()) + (4 * (mm - m - 1) / (mmm * m1_m1)) * (XY.row_sums_sq_sum() + XY.col_sums_sq_sum()) - (8 / (mm * (mm - 3 * m + 2))) * (XX.offdiag_sums() @ XY.col_sums() + YY.offdiag_sums() @ XY.row_sums()) + 8 / (mm * mdown3) * ((XX.offdiag_sum() + YY.offdiag_sum()) * XY.sum()) - (2 * twom3 / (mdown2 * mdown4)) * (XX.offdiag_sum() + YY.offdiag_sum()) - (4 * twom3 / (mmm * m1_m1_m1)) * XY.sum() ** 2 - (2 / (m * (mmm - 6 * mm + 11 * m - 6))) * (XX.offdiag_sq_sum() + YY.offdiag_sq_sum()) + (4 * m2 / (mm * m1_m1_m1)) * XY.sq_sum() )
6fad7095651f8f66d991af8b33597e64464b7d70
139,537
def to_sequence(index, text): """Returns a list of integer indicies of lemmas in `text` to the word2idx vocab in `index`. :param index: word2idx vocab. :param text: list of tokens / lemmas / words to be indexed :returns: list of indicies """ indexes = [index[word] for word in text if word in index] return indexes
974c62ee767e2e48c903aabda9b1f4894071cb3a
249,858
def alpha_from_k_half_mode(khalf, beta, gamma): """Calculates alpha, given the half-mode wavenumber""" return 1./khalf * (2.**(1./gamma) - 1. )**(1./beta)
4eee2694ca05b5a6ebe424791e465e6f954a9f25
86,226
import pathlib import json def check_configuration(configuration_file: str) -> dict: """ load json into a dictionary from a given valid file path string, otherwise throws FileNotFoundError exception :param configuration_file: string of path to configuration :return: dict """ path_object = pathlib.Path(configuration_file) with path_object.open() as json_data_file: config = json.load(json_data_file) return config
4a380855799045bdfeea921d55b72e5b16e97eb0
529,456
def unique_list(a_list, unique_func=None, replace=False): """Unique a list like object. - collection: list like object - unique_func: the filter functions to return a hashable sign for unique - replace: the following replace the above with the same sign Return the unique subcollection of collection. Example: data = [(1, 2), (2, 1), (2, 3), (1, 2)] unique_func = lambda x: tuple(sorted(x)) unique(data) -> [(1, 2), (2, 1), (2, 3)] unique(data, unique_func) -> [(1, 2), (2, 3)] unique(data, unique_func, replace=True) -> [(2, 1), (2, 3)] """ unique_func = unique_func or (lambda x: x) result = {} for item in a_list: hashable_sign = unique_func(item) if hashable_sign not in result or replace: result[hashable_sign] = item return list(result.values())
8d7957a8dffc18b82e8a45129ba3634c28dd0d52
708,206
def sliding_box(start, stop, size, step=1): """ Find sliding boxes of region Parameters: ----------- start: int 0 based start coordinate of the region. stop : int 0 based stop coordinate of the region, i.e. stop is not part of the region anymore; size : int The size of the box regions step : int, optional The step size of the sliding box. Return ------ regions : list List of tuples (start,stop) with the extracted regions. """ # we need to truncate if step is no multiple of stop-start if (stop - start) / step != 0: overhead = (stop-start) % step if overhead % 2 == 0: start += overhead // 2 stop -= overhead // 2 else: start += overhead // 2 stop -= overhead // 2 + 1 regions = [] for i in range(0,(stop - start),step): regions.append((i+start, i+start+size)) return regions
7bb0ce44fe0d97e36b38abdc7342df0f5c47fd91
569,028
import re def dont_document_data(config, fullname): """Check whether the given object should be documented Parameters ---------- config: sphinx.Options The configuration fullname: str The name of the object Returns ------- bool Whether the data of `fullname` should be excluded or not""" if config.document_data is True: document_data = [re.compile('.*')] else: document_data = config.document_data if config.not_document_data is True: not_document_data = [re.compile('.*')] else: not_document_data = config.not_document_data return ( # data should not be documented (any(re.match(p, fullname) for p in not_document_data)) or # or data is not included in what should be documented (not any(re.match(p, fullname) for p in document_data)))
dfe56e6cad36ecb978d2973efa0a313e4b75587e
634,446
def get_resource_name(prefix, project_name): """Get a name that can be used for GCE resources.""" # https://cloud.google.com/compute/docs/reference/latest/instanceGroupManagers max_name_length = 58 project_name = project_name.lower().replace('_', '-') name = prefix + '-' + project_name return name[:max_name_length]
7779f71e00063b32566f05d4cb0d8daef81043c0
700,829
def _pretty_multiplier(x: float) -> str: """Make a prettier version of a multiplier value Args: x: Value for a multiplicative factor (e.g., b = x * a) Returns: A humanized version of it """ if x > 100: return f'{x:.0f}x' elif x > 2: return f'{x:.1f}x' return f'{(x-1)*100:.1f}%'
98c2f96ac429ac0b17ed929fe39d5caf548c36e2
247,020
import copy def _fold_stats(result, stats, ktk_cube_dataset_id): """ Add stats together. Parameters ---------- result: Dict[str, Dict[str, int]] Result dictionary, may be empty or a result of a previous call to :meth:`_fold_stats`. stats: Dict[str, int] Statistics for a single dataset. ktk_cube_dataset_id: str Ktk_cube dataset ID for the given ``stats`` object. Returns ------- result: Dict[str, Dict[str, int]] Result dictionary with ``stats`` added. """ result = copy.deepcopy(result) if ktk_cube_dataset_id in result: ref = result[ktk_cube_dataset_id] for k, v in stats.items(): ref[k] += v else: result[ktk_cube_dataset_id] = stats return result
72dee1f34ca9357974b858a221b23b9d6ff116dc
154,799
def getOperator(instr): """Get the operator from an existing instruction""" operator = (instr/100)*100 # change for binary machine, but beware of breaking number output return operator
5d8163a2cf3f497dbae84565cbe6af913f65085c
231,021
def xpath(elt, xp, ns, default=None): """ Run an xpath on an element and return the first result. If no results were returned then return the default value. """ res = elt.xpath(xp, namespaces=ns) if len(res) == 0: return default else: return res[0]
e8e873db535597edc82a3af439db2ddb7a0c1c1b
96,675
def get_clean_block(block): """Clean up unicode characters and common OCR errors in blocks""" replacements = {'fi': 'fi', 'fl': 'fi', '—': '-', "’": "'", "‘": "'", '”': '"', '“': '"', 'km3': 'km2', 'kmz': 'km2', '\\N': 'W', 'VV': 'W', 'lVI': 'M', '\x0c': ''} for replacement in replacements: if replacement in block: block = block.replace(replacement, replacements[replacement]) return(block)
7a769ebeb1992070286e1d57bb93421c68c79afc
662,796
def match_edit(json, body): """Checks the json edit response matches the request body.""" # Check the benchmark matches the request if 'benchmark_id' in body: assert json['benchmark']['id'] == str(body['benchmark_id']) # Check the site matches the request if 'site_id' in body: assert json['site']['id'] == str(body['site_id']) # Check the flavor matches the request if 'flavor_id' in body: assert json['flavor']['id'] == str(body['flavor_id']) # Check the tags matches the request if 'tags_ids' in body: json_tags = set(t['id'] for t in json['tags']) body_tags = set(str(id) for id in body['tags_ids']) assert json_tags == body_tags return True
9c5bfceb25c742371db1edc014355a3cb7eecf23
523,775
def camel_case_to_snake_case(camel_case_string): """ Convert a camelCase string to a snake_case string. Args: camel_case_string (str): A string using lowerCamelCaseConvention. Returns: str: A string using snake_case_convention. """ # https://stackoverflow.com/a/44969381 return ''.join(['_' + c.lower() if c.isupper() else c for c in camel_case_string]).lstrip('_')
fc05ba07de498864088211fc447bc24d33b470b3
502,837
from typing import Union from typing import Dict def _format_peak_comment(mz: Union[int, float], peak_comments: Dict): """Format peak comment for given mz to return the quoted comment or empty string if no peak comment is present.""" if peak_comments is None: return "" peak_comment = peak_comments.get(mz, None) if peak_comment is None: return "" return f"\t\"{peak_comment}\""
dd303655f0b163af27ab043ce69efdefa53d28b6
622,794
def detect_edge_features(features, Dx, Dy, wx, wy=None): """Detects edge features from feature set. Parameters ---------- features : `pd.DataFrame` Feature set returned from `trackpy.locate` Dx, Dy : scalar Dimensions of stack wx, wy : scalar Dimensions of bounding boxes Returns ------- edges : array-like Indices of edge features (to be discarded) """ # Set wy if not provided wy = wx if wy is None else wy # (assumes a square box) # Create a bounding box for each bead df_bboxes = features.loc[:, ['x', 'y']] df_bboxes['x_min'] = features['x'] - wx/2 df_bboxes['y_min'] = features['y'] - wy/2 df_bboxes['x_max'] = features['x'] + wx/2 df_bboxes['y_max'] = features['y'] + wy/2 # Check boundaries edges = features.loc[(df_bboxes['x_min'] < 0) |\ (df_bboxes['y_min'] < 0) |\ (df_bboxes['x_max'] > Dx) |\ (df_bboxes['y_max'] > Dy)].index.values return edges
ded29e4fe047a114e833e522a596cc56d576c007
552,870
def TransformNextMaintenance(r, undefined=''): """Returns the timestamps of the next scheduled maintenance. All timestamps are assumed to be ISO strings in the same timezone. Args: r: JSON-serializable object. undefined: Returns this value if the resource cannot be formatted. Returns: The timestamps of the next scheduled maintenance or undefined. """ if not r: return undefined next_event = min(r, key=lambda x: x.get('beginTime', None)) if next_event is None: return undefined begin_time = next_event.get('beginTime', None) if begin_time is None: return undefined end_time = next_event.get('endTime', None) if end_time is None: return undefined return '{0}--{1}'.format(begin_time, end_time)
7d6bd3bc2437c0f0eaf006674c61a42f11c191a6
377,083
def add_negated_bounded_span(works, start, length): """Filters an isolated sub-sequence of variables assined to True. Extract the span of Boolean variables [start, start + length), negate them, and if there is variables to the left/right of this span, surround the span by them in non negated form. Args: works: a list of variables to extract the span from. start: the start to the span. length: the length of the span. Returns: a list of variables which conjunction will be false if the sub-list is assigned to True, and correctly bounded by variables assigned to False, or by the start or end of works.""" sequence = [] if start > 0: sequence.append(works[start - 1]) for i in range(length): sequence.append(works[start + i].Not()) if start + length < len(works): sequence.append(works[start + length]) return sequence
2ffcf0e008cebca2b59d89fbc5b3b517aee5df1c
336,204
def define_orderers(orderer_names, orderer_hosts, domain=None): """Define orderers as connection objects. Args: orderer_names (Iterable): List of orderer names. orderer_hosts (Iterable): List of orderer hosts. domain (str): Domain used. Defaults to none. Returns: dict: A dictionary of Orderer Connections """ orderer_connections = {} for name, host in zip(orderer_names, orderer_hosts): if domain: key = "{name}.{domain}".format(name=name, domain=domain) else: key = name orderer_connections[key] = {"url": ("grpc://" + host + ":7050")} return orderer_connections
32ea2ed7b89afdf64356674e9a1fa0aea85792e3
56,096
def mk_item_id(item_id: str, collection_id: str): """Make the Elasticsearch document _id value from the Item id and collection.""" return f"{item_id}|{collection_id}"
9707c13e9a1e606c4f495004a4561dafe4d08957
451,626
from typing import List def recortar_nombres(nombres: List[str]) -> List[str]: """Recorta los nombres de la lista a la longitud de la última cadena de caracteres. :param nombres: Lista de cadenas de caracteres. :nombres type: List[str] :return: Lista de cadenas de caracteres recortadas. :rtype: List[str] """ n = len(nombres[-1]) return [nombre[:n] for nombre in nombres]
00c356d6bf07bdf76054e8ee9cb01800508f2529
457,111
import requests import json def request_new_tokens(refresh_token, client_id, client_secret, write_out=None): """ Contacts api.amazon.com/auth/o2/token to retrieve new access and refresh tokens :param refresh_token: valid refresh_token :param client_id: client_id :param client_secret: client_secret :param write_out: callable taking dict argument matching 'tokens.txt' schema :return: access_token, refresh_token """ s = requests.session() params_dict = { 'grant_type': 'refresh_token', 'refresh_token': refresh_token, 'client_id': client_id, 'client_secret': client_secret } res = s.post('https://api.amazon.com/auth/o2/token', data=params_dict, headers={'Content-Type': 'application/x-www-form-urlencoded'}) if res.status_code == 200: payload = json.loads(res.content.decode()) if callable(write_out): write_out(payload) return payload.get('access_token'), payload.get('refresh_token') else: raise Exception("Failed to request new tokens: {} {}".format(res.status_code, res.content.decode()))
1ce8bfb274d02b6b8cfdf71aea11b4d59a7fdef5
244,929