content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import requests def tmdb_data_for_id(tmdb_id: int, tmdb_api_token: str) -> dict: """ Get additional information for a movie for which you already have the ID Args: tmdb_id (int): the ID for a movie on The Movie Database tmdb_api_token (str): your tmdb v3 api token Returns: dict """ url = f"https://api.themoviedb.org/3/movie/{tmdb_id}?" params = {'language': 'en-US', 'api_key': tmdb_api_token, } return requests.get(url, params).json()
dbac37d7969e85138de18a24f1caf963cc7d44da
695,069
def pipe2glue(pcomments, pformat, rec): """ Convert a NMRPipe table to a nmrglue table Parameters ---------- pcomments : list List of NMRPipe comment lines. pformats : list List of NMRPipe table column formats strings. rec : recarray Records array with named fields. Returns ------- comments : list List of comments rec : recarray Records array with named fields. """ # add a "#" to the list of comments and we are done comments = ["# " + c for c in pcomments] return comments, rec
4f89bcc734669e0147c4a9e4b511d8135cd8e19e
381,549
def _extract_month_from_filename(fname): """Extract month number from precipitation file name""" return str(fname[7:].split('.tif')[0])
b01b5c6e537bc0de431bc854ae878ccfe62e71d8
678,171
def _commonPrefix(L): """Takes a list of lists, and returns their longest common prefix.""" assert L if len(L) == 1: return L[0] for n in range(1, max(map(len, L)) + 1): prefix = L[0][:n] for item in L[1:]: if prefix != item[:n]: return prefix[0:-1] assert False
9ee00995d531880ed8cf8ad22f95209a11124426
541,027
def count_n(input_sequence): """ Simply counts the occurrence of 'n' and 'N' in a sequence """ lower = input_sequence.count("n") upper = input_sequence.count("N") return upper + lower
4678ac4cad2bdf8857dd2291771c82b96e3f51c7
207,741
import torch def neg_loss(pred, gt): """Modified focal loss. Exactly the same as CornerNet. Runs faster and costs a little bit more memory Args: pred (torch.Tensor): predicted center heatmaps, should have shapes [batch, c, h, w] gt (torch.Tensor): ground truth center heatmaps, should have shapes [batch, c, h, w] Returns: torch.Tensor with focal loss value. """ pred = pred.unsqueeze(1).float() gt = gt.unsqueeze(1).float() positive_inds = gt.eq(1).float() negative_inds = gt.lt(1).float() negative_weights = torch.pow(1 - gt, 4) loss = 0 positive_loss = torch.log(pred + 1e-12) * torch.pow(1 - pred, 3) * positive_inds negative_loss = ( torch.log(1 - pred + 1e-12) * torch.pow(pred, 3) * negative_weights * negative_inds ) num_pos = positive_inds.float().sum() positive_loss = positive_loss.sum() negative_loss = negative_loss.sum() if num_pos == 0: loss = loss - negative_loss else: loss = loss - (positive_loss + negative_loss) / num_pos return loss
1e4cf438a4e30b404b88a9a0d8ddf3954580ee49
399,267
import struct def UnpackAttributeRecordPartialHeader(Buffer): """Unpack the first 16 bytes of the attribute record header, return a tuple: (type_code, record_length, form_code, name_length, name_offset, flags, instance).""" return struct.unpack('<LLBBHHH', Buffer)
27e86c82872727bfcc6d4fa3e8c1146bc1686673
187,720
def preprocess_keylist(keylist, **options): """Convert a list of keys to a comma-separated string.""" if isinstance(keylist, list): return ", ".join([str(key) for key in keylist]) return keylist
ce31366f08de1f164c9deaabbad47310bf211caa
364,858
def _pipe_separated_uppercase(val): """ Returns ``None`` if *val* is ``'*'``. Otherwise, split *val* on the ``'|'`` character and return a list of the items, transformed to all caps. >>> _pipe_separated_uppercase("a|b|c") ['A', 'B', 'C'] >>> _pipe_separated_uppercase("*") None """ return None if val == '*' else set(s.upper() for s in val.split('|'))
9a1ac36edb4cde29f917b5700cd2bd42a88532fc
251,647
def get_overlaps(first_intervals, second_intervals): """ >>> get_overlaps([(1, 2), (3, 4), (8, 9)], [(1, 4), (7, 8.5)]) [(1, 2), (3, 4), (8, 8.5)] >>> get_overlaps([(1, 4), (7, 8.5)], [(1, 2), (3, 4), (8, 9)]) [(1, 2), (3, 4), (8, 8.5)] >>> get_overlaps([(1, 8), (9, 10)], [(2, 3), (5, 6), (7, 9.5)]) [(2, 3), (5, 6), (7, 8), (9, 9.5)] >>> get_overlaps([(2, 3), (5, 6), (7, 9.5)], [(1, 8), (9, 10)]) [(2, 3), (5, 6), (7, 8), (9, 9.5)] >>> get_overlaps([(1, 10)], [(0, 5)]) [(1, 5)] >>> get_overlaps([(0, 5)], [(1, 10)]) [(1, 5)] >>> get_overlaps([(1, 6), (7, 9)], [(5.5, 7.5)]) [(5.5, 6), (7, 7.5)] >>> get_overlaps([(5.5, 7.5)], [(1, 6), (7, 9)]) [(5.5, 6), (7, 7.5)] """ overlaps = [] for first_interval in first_intervals: # Find the index of the first interval in the second list starting after this interval ends. # We do not need to search beyond this interval. end_index = None for index, second_interval in enumerate(second_intervals): if second_interval[0] >= first_interval[1]: end_index = index break # Go through all of these intervals and compute the overlaps. for second_interval in second_intervals[:end_index]: if second_interval[1] > first_interval[0]: uncovered_region = (max(first_interval[0], second_interval[0]), min(first_interval[1], second_interval[1])) overlaps.append(uncovered_region) return overlaps
72b7310c30b77bf9465b3e7ef682f81aba0a28ff
108,232
def rank_points_to_avg_rank(sum_points: float, current_week: int) -> float: """Average rank (1 highest, 10 lowest) based on total rank points at current week. Assumes 10 players, 1st = 1, 10th = 0.1. Args: sum_points: float, sum of rank points through current week current_week: int, current week number Returns: _: float, average rank """ return (1 - (sum_points/current_week))*10 + 1
370b3883abb717c7a436f28d62c9d70aece7f7e1
179,144
def unwrap(func): """ Returns the object wrapped by decorators. """ def _is_wrapped(f): return hasattr(f, '__wrapped__') unwrapped_f = func while (_is_wrapped(unwrapped_f)): unwrapped_f = unwrapped_f.__wrapped__ return unwrapped_f
17aa0c8cc91578fd1187784ad0396ed91c5ec9b8
4,189
def get_target_value_list(data_set): """Get the list that contains the value of quality.""" target_value_list = [] for line in data_set: target_value_list.append(line[-1]) return target_value_list
1f254e67fa1b6d763913f490442f1197ae040897
311,124
def format_prefix(meta): """Format log metadata as a prefix to be prepended to log lines.""" ts = meta.time.strftime('%H:%M:%S.%f')[:-3] if meta.comm and meta.pid: return "%s %s[%d]: " % (ts, meta.comm, meta.pid) else: return ts + ": "
bb9707637ab911dfc962218f9f7828adcb9db053
504,292
def max_yngve_depth(yngve_tree_root): """Returns the max depth of the ynvge tree of the sentence Args: yngve_tree_root (obj): The root node Returns: int: The max depth of the yngve tree """ return max([leaf.score for leaf in yngve_tree_root.leaves])
09b31e14ac66f07dd21b041740301a1e8355e71f
97,342
def length_of_last_word(words): """ Returns the length of the last word (a string of lower or uppercase characters) from a string composed of space and alphabetic characters. Parameters: words(str) Returns: int """ def traverse(index, progress): """ Returns the index of a delimiter (either a space or the end/beginning of the string). Parameters: start_index(int): the index to start iterating from progress(int): a postive or negative 1, to indicate what direction to iterate in Returns: int: the modified value of start_index """ # determine the stop condition if progress < 0: # iterating backwards STOP = STOP_INDEX_NEGATIVE else: # progress > 0 # iterating forwards STOP = -1 # iterate # print(index, STOP) while index != STOP: # stops for words with no space # stop for words with space if words[index].isalpha() and words[index + progress] == " ": break else: index += progress return index # useful constants for iteration WORDS_LENGTH = len(words) STOP_INDEX_NEGATIVE = -(WORDS_LENGTH) # solve for all cases with no space if " " not in words: return len(words) # find the index of the starting delimiter of the last word start = -1 start = traverse(start, -1) # find the index of the ending delimiter of the last word end = start end = traverse(end, 1) # index the last word start += WORDS_LENGTH end += WORDS_LENGTH last_word = words[start : end + 1] # return the length of the substring between the delimiters return len(last_word)
d39069c026ff7e91239e4e65126be13186e6eaab
504,524
from functools import reduce def _get_fragments_coord(frags): """Return the letter coordinate of the given list of fragments (PRIVATE). This function takes a list of three-letter amino acid sequences and returns a list of coordinates for each fragment had all the input sequences been flattened. This is an internal private function and is meant for parsing Exonerate's three-letter amino acid output. >>> from Bio.SearchIO.ExonerateIO._base import _get_fragments_coord >>> _get_fragments_coord(['Thr', 'Ser', 'Ala']) [0, 3, 6] >>> _get_fragments_coord(['Thr', 'SerAlaPro', 'GlyLeu']) [0, 3, 12] >>> _get_fragments_coord(['Thr', 'SerAlaPro', 'GlyLeu', 'Cys']) [0, 3, 12, 18] """ if not frags: return [] # first fragment always starts from position 0 init = [0] return reduce(lambda acc, frag: acc + [acc[-1] + len(frag)], frags[:-1], init)
8948c16e68048e9b3b4a4fe7455cff816d1cb736
609,066
def _compute_checksum(packet): """Computes the checksum byte of a packet. Packet must not contain a checksum already Args: packet (list): List of bytes for a packet. packet must not contain a checksum as the last element Returns: The computed checksum byte. """ # checksum is the sum of the bytes # from device id to the end of the data # mod (%) 256 and bit negated (~) (1's compliment) # and (&) with 0xFF to make sure it is a byte. return ~(sum(packet[2:]) % 0x100) & 0xFF
260d268938236169dc6d3448670ac972995d2d41
399,865
def lemmatize_sentence(sentence: dict, terms: dict): """ Lemmatize naf sentence Args: sentence: dict of sentence (naf) terms: list of terms dict (naf) Returns: lemmatized sentences as string """ return [terms[term["id"]]["lemma"] for term in sentence["terms"]]
f78cef0fd39b65be351b2dba954551e2ff489d8b
151,653
def warp_required(xlooks, ylooks, crop): """ Check if a crop or multi-look operation is required. :param int xlooks: Resampling/multi-looking in x dir :param int ylooks: Resampling/multilooking in y dir :param int crop: Interferogram crop option :return: True if params show rasters need to be cropped and/or resized :rtype: bool """ if xlooks > 1 or ylooks > 1: return True if crop is None: return False return True
627c0365c341784b4ae5b385a921e3906ebcb867
471,220
def gm_estimator(gm_list): """ GM-PHD State Estimator Args: gm_list (:obj:`list`): List of Gaussian components representing posterior probability hypothesis density of multiple targets. Returns: (:obj:`list`): List of estimated target states. """ targets = [] for gm in gm_list: if gm.weight > 0.5: targets.append(gm.mean) return targets
be77fa30375b8fdd7c44c612d37a826fc7d0f485
278,504
def get_extended_attention_mask(attention_mask): """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( f"Wrong shape for attention_mask (shape {attention_mask.shape})" ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask
a7ded6363ab5f28377c7cd54f86a9b6bb6101b07
86,366
def get_tid_timestamp_from_event(event): """ Return the thread ID & timestamp (in sec) from a event """ # line 1 of each event: the program name; thread id (tid); time (in sec); cycles took for the trace (not sure) # Example: # demo_sift1M_rea 3100 945.398942: 1 cycles: fields_with_spaces = event[0].replace("\t", "").replace("\n", "").replace(":", "").split(" ") fields = [] for f in fields_with_spaces: if f != '': fields.append(f) tid = int(fields[1]) timestamp = float(fields[2]) return tid, timestamp
8a44d22fa15c8f7b39b060cb32451c4cd5ee0fee
464,276
def title(soup): """ returns the title of a listing """ data_list = [element.text for element in soup.find_all("div", "title")] return data_list[0].strip()
b61fc19f3e4da84b439cf36db1c8c35b6157bd00
453,836
def _merge_params(cli, config): """Merge CLI params with configuration file params. Note that the configuration params will overwrite the CLI params.""" # update CLI params with configuration; overwrites params = dict(list(cli.items()) + list(config.items())) return params
aa7a70909c63931758f06e178398b3cfb2a6e497
110,776
def format_percent(n): """Formats n as a percentage""" return '{:.1%}'.format(n)
e3e47722374e90be0249236b71c40658afb85f86
150,485
import datetime def convertto_iso_format(dt_obj: datetime.datetime): """Takes a given datetime object and returns the timestamp in ISO format as a string. Examples: >>> now = get_epoch_time()\n >>> now\n 1636559940.508071 >>> now_as_dt_obj = convertfrom_epoch_time( now )\n >>> now_as_dt_obj\n datetime.datetime(2021, 11, 10, 7, 59, 0, 508071) >>> convertto_human_readable_time( now_as_dt_obj )\n '2021-11-10 07:59:00.508071' >>> convertto_iso_format( now_as_dt_obj )\n '2021-11-10T07:59:00.508071' Args: dt_obj (datetime.datetime): Reference an existing datetime object. Returns: str: Returns a string. """ iso_format = dt_obj.isoformat() return iso_format
3034ad46411438efb2abb3848cfaa12e18983aca
663,708
def _is_int_in_range(value, start, end): """Try to convert value to int and check if it lies within range 'start' to 'end'. :param value: value to verify :param start: start number of range :param end: end number of range :returns: bool """ try: val = int(value) except (ValueError, TypeError): return False return (start <= val <= end)
54ed477b4d6f603a48a1104d60c00433b1cc47db
21,267
def create_board_aggregate(board): """ Creates a more convenient board aggregate wrapper around a Board model. Parameters: board (board.models.Board): The base Board model which will be wrapped. Returns: A dict with the following shape: { id: [boardId], name: [boardName], sections: [ { id: [firstSectionId], name: [firstSectionName], tasks: [ { id: [firstTaskId], text: [firstTaskText] } ] } ] } """ data = { 'id': board.id, 'name': board.name, } # Build sectionList sectionList = [] for section in board.section_set.all(): # Build taskList taskList = [] for task in section.task_set.all(): # Each task is a dict { id: int, text: str } taskList.append({ 'id': task.id, 'text': task.text, }) # Build current section entry and add it to sectionList sectionEntry = { 'id': section.id, 'name': section.name, 'tasks': taskList, } sectionList.append(sectionEntry) data['sections'] = sectionList return data
483bfdd9da8f4a4c0a0c53a2cb7193a0cfb15076
519,547
def get_all_names(actions): """ Returns all action names present in the given actions dictionnary.""" assert isinstance(actions, dict) names = set(actions.keys()) return names
1656ecd25f17ca1abcdf685b9fbac3413bd2e8b5
158,979
def split_antonyms(text): """ >>> split_antonyms(' antialkoholista, absztinens <em>val</em>, bornemissza <em>reg</em>') ['antialkoholista', 'absztinens <em>val</em>', 'bornemissza <em>reg</em>'] >>> split_antonyms('marad &lt;vhol&gt;, kitart &lt;vki mellett, vmi mellett&gt;, ragaszkodik &lt;vkihez&gt;') ['marad &lt;vhol&gt;', 'kitart &lt;vki mellett, vmi mellett&gt;', 'ragaszkodik &lt;vkihez&gt;'] >>> split_antonyms(' [vallalkozas, uzlet]: nyereseges, hasznos') ['nyereseges', 'hasznos'] """ antonyms = [] if ':' in text: text = text[text.index(':')+1:] in_meta = False last_i = 0 for i, ch in enumerate(text): if text[i:].startswith(('&lt;', '[', '(')): in_meta = True elif text[i:].startswith(('&gt;', ']', ')')): in_meta = False if ch == ',' and not in_meta: antonym = text[last_i:i].strip() antonyms.append(antonym) last_i = i+1 antonym = text[last_i:].strip() antonyms.append(antonym) return antonyms
bd6222d62d7e4ca66ee59c8968f0efdd8dbb0b8b
414,462
import textwrap def wrap_text(text_string): """Split a text string into multiple lines to improve legibility """ # First, check to see if the text was already formatted. We do # this by trying to split the text string into mutliple lines # based on newlines contained in the string. lines = text_string.splitlines() if len(lines) > 1: # Already formatted return lines # Not already formatted. Wrap it ourselves. return textwrap.wrap(text_string)
a2ed14a294e6e17b17b31e51ee6b42a217f6235c
74,455
def prepare_model_settings(label_count, sample_rate, clip_duration_ms, window_size_ms, window_stride_ms, dct_coefficient_count): """Calculates common settings needed for all models. Args: label_count: How many classes are to be recognized. sample_rate: Number of audio samples per second. 默认的两个 + train时候指定的参数个数 clip_duration_ms: Length of each audio clip to be analyzed. window_size_ms: Duration of frequency analysis window. window_stride_ms: How far to move in time between frequency windows. dct_coefficient_count: Number of frequency bins to use for analysis. Returns: Dictionary containing common settings. """ desired_samples = int(sample_rate * clip_duration_ms / 1000) #语音样本点数16k=16000 window_size_samples = int(sample_rate * window_size_ms / 1000) #计算一帧的样本个数以及帧移的样本个数,这里它们的值是640和640。 window_stride_samples = int(sample_rate * window_stride_ms / 1000) length_minus_window = (desired_samples - window_size_samples) #减去一帧DNN (16000-640)/640+1=25 cnn (16000-640)/320+1=49 if length_minus_window < 0: spectrogram_length = 0 else: spectrogram_length = 1 + int(length_minus_window / window_stride_samples) fingerprint_size = dct_coefficient_count * spectrogram_length return { 'desired_samples': desired_samples, 'window_size_samples': window_size_samples, 'window_stride_samples': window_stride_samples, 'spectrogram_length': spectrogram_length, 'dct_coefficient_count': dct_coefficient_count, 'fingerprint_size': fingerprint_size, 'label_count': label_count, 'sample_rate': sample_rate, }
2ef36b4861c634b43f985b6fdd96dd2fdf47c983
231,918
def ensure_list(config): """ ensure_list Ensure that config is a list of one-valued dictionaries. This is called when the order of elements is important when loading the config file. (The yaml elements MUST have hyphens '-' in front of them). Returns config if no exception was raised. This is to keep the same format as ensure_dictionary, and allowed possible config file repairs in the future without breaking the API. """ if not isinstance(config, list): raise TypeError("config is not a list. Did you forget some '-' "+ "in your configuration file ?\n" + str(config)) for element in config: if isinstance(element, str): continue if not isinstance(element, dict): raise ValueError("Parsing error in the configuration file.\n" + str(element)) if len(element) != 1: raise ValueError("Parsing error in the configuration file.\n" + str(element)) return config
56397e3eb6ab98d40392a668112febc77f11d9cc
684,087
def filter_data(dataset, remove_no_school, remove_outliers, begin_date): """ filter lines out of dataset: - remove_no_school: bool, if True day considered as non working are removed - remove_outliers: bool, if True day considered as outliers are removed from the training set only Note that outliers are here: - statistical outliers - strikes - days with no expected guests - days with no effective guests """ # remove weekends and holidays if remove_no_school: mask = (dataset['working'] != 0) dataset = dataset.loc[mask] # remove outliers if remove_outliers: mask =\ (dataset['prevision'] != 0) &\ (dataset['reel'] != 0) &\ (dataset['date_str'] <= begin_date) &\ (dataset['greve'] != 1) &\ (dataset['upper_outlier'] != 1) &\ (dataset['lower_outlier'] != 1) # | (dataset['date_str'] >= begin_date) dataset = dataset.loc[mask] return dataset
e66221d957dba3bb456447b336927f0755cfb30a
167,682
def bisect_env_args(patchset): """Generates arguments for bisect-env to apply the patchset""" args = [] for patch in patchset: args.append(f"--try-pick={patch}") return args
3a79fa261cd6027bcc5cc62dc1b9d1da13f915f3
85,281
import importlib def import_from_string(val, setting_name): """ Attempt to import a class from a string representation. """ try: # Nod to tastypie's use of importlib. parts = val.split(".") module_path, class_name = ".".join(parts[:-1]), parts[-1] module = importlib.import_module(module_path) return getattr(module, class_name) except (ImportError, AttributeError) as e: msg = "Could not import '%s' for Graphene setting '%s'. %s: %s." % ( val, setting_name, e.__class__.__name__, e, ) raise ImportError(msg)
4e70397cc5ef73988dc8a6afc69401431462efba
454,868
from typing import List from typing import Set def scan_polarimeter_names(group_names: List[str]) -> Set[str]: """Scan a list of group names and return the set of polarimeters in it. Example:: >>> group_names(["BOARD_G", "COMMANDS", "LOG", "POL_G0", "POL_G6"]) set("G0", "G6") """ result = set() # type: Set[str] for curname in group_names: if (len(curname) == 6) and (curname[0:4] == "POL_"): result.add(curname[4:6].upper()) return result
d4ae998041401beb40df7746ec0afc2b98c9d6b1
691,084
def convert_plotly(value: float) -> float: """ Convert the given element to the proper value used in plotly polar plots. Works with _Variant.polar_x, _PolarLocus.theta, _PolarLocus.width, using the somehow magic number 57.1. Args: value: input value to convert Returns: adjusted value """ return value * 57.1
11f6ee850a242db890a180d969009069c23d6ed7
185,573
def signUp(command): """ Check if command is to sign up (u | signup). """ return (command.lower() == 'u' or command.lower() == 'signup')
1dd2a6fd7fa999ed3aa6679a3616962d538712e6
561,999
def add_access_control_headers(response): """Adds the required access control headers""" response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Set-Cookie' response.headers['Access-Control-Allow-Methods'] = 'POST,GET,PUT,DELETE' response.headers['Cache-Control'] = 'No-Cache' return response
c4108e3e34b349e9211b946e3501a5c5f9a4c79e
605,946
import networkx def is_connected(domain): """ Test if a :py:class:`~fieldkit.mesh.Domain` is connected. Returns ------- bool `True` if the `domain` is a single connected component, and `False` otherwise. """ return networkx.is_connected(domain.graph)
c43d38a4bb6fd13ed09eeb6df68238da802c8888
446,788
def is_upper_snake(text): """ Check if a string is in an UPPER_SNAKE_CASE format :param text: String to check :return: Whether string is in upper snake format """ if " " in text: return False return "_" in text and text.isupper()
8376a485949361a737ac3cca0a7630606fb805ab
544,266
def status_sps(status): """ This method will return valid, invalid or undefined for a given result of models.PackageMember.sps_validation_status(). status: Tuple(None, {}) status: Tuple(True, {'is_valid': True, 'sps_errors': [], 'dtd_errors': []}) status: Tuple(False, {'is_valid': True, 'sps_errors': [], 'dtd_errors': []}) """ if status[0] is True: return 'valid' if status[0] is False: return 'invalid' return 'undefined'
d6e53286fbc13da06e94a68bb048f25df8a902b1
257,047
def mapValue(value, in_min, in_max, out_min, out_max): """ Returns a new value mapped in a desired range. Parameters: value: value to be mapped in_min - in_max: limits of the range where the value is out_min - out_max: limits of the range where the value will be mapped """ return (value - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
61804f0f6499a155a779d4255e048500a406207e
138,322
def unify_int_list(intlist): """ Remove duplicates and sort list of integers :param intlist: list of integers :return: sorted unique list """ return sorted(set(intlist))
6411649d54c954d26e7a63ed226a98e38de4a2f2
400,716
import torch def azimuthal_average(image, center=None): # modified to tensor inputs from https://www.astrobetter.com/blog/2010/03/03/fourier-transforms-of-images-in-python/ """ Calculate the azimuthally averaged radial profile. Requires low frequencies to be at the center of the image. Args: image: Batch of 2D images, NxHxW center: The [x,y] pixel coordinates used as the center. The default is None, which then uses the center of the image (including fracitonal pixels). Returns: Azimuthal average over the image around the center """ # Check input shapes assert center is None or (len(center) == 2), f'Center has to be None or len(center)=2 ' \ f'(but it is len(center)={len(center)}.' # Calculate the indices from the image H, W = image.shape[-2:] h, w = torch.meshgrid(torch.arange(0, H), torch.arange(0, W)) if center is None: center = torch.tensor([(w.max() - w.min()) / 2.0, (h.max() - h.min()) / 2.0]) # Compute radius for each pixel wrt center r = torch.stack([w - center[0], h - center[1]]).norm(2, 0) # Get sorted radii r_sorted, ind = r.flatten().sort() i_sorted = image.flatten(-2, -1)[..., ind] # Get the integer part of the radii (bin size = 1) r_int = r_sorted.long() # attribute to the smaller integer # Find all pixels that fall within each radial bin. deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented, computes bin change between subsequent radii rind = torch.where(deltar)[0] # location of changed radius # compute number of elements in each bin nind = rind + 1 # number of elements = idx + 1 nind = torch.cat([torch.tensor([0]), nind, torch.tensor([H * W])]) # add borders nr = nind[1:] - nind[:-1] # number of radius bin, i.e. counter for bins belonging to each radius # Cumulative sum to figure out sums for each radius bin if H % 2 == 0: raise NotImplementedError('Not sure if implementation correct, please check') rind = torch.cat([torch.tensor([0]), rind, torch.tensor([H * W - 1])]) # add borders else: rind = torch.cat([rind, torch.tensor([H * W - 1])]) # add borders csim = i_sorted.cumsum(-1, dtype=torch.float64) # integrate over all values with smaller radius tbin = csim[..., rind[1:]] - csim[..., rind[:-1]] # add mean tbin = torch.cat([csim[:, 0:1], tbin], 1) radial_prof = tbin / nr.to(tbin.device) # normalize by counted bins return radial_prof
a40754537a58f07cf99f6b207be8a7f95d258c9f
669,904
def get_data_table_list(root_el): """Return list of dataTable elements in EML doc""" if not root_el: return [] return root_el.xpath('.//dataset/dataTable')
0885729f1bd80467591090f8a1f40996b3fa405d
390,598
def check_parse_errors(options, args): """Do validations on command line options, returning error messages, if any.""" if not options.language: return "language parameter not informed." elif not args: return "base path not informed." else: return None
cefa9608bc37b551d8ca3f93a196ff072f67b451
44,873
def positive_places(f, xs): """Takes as arguments some function f and a list of numbers xs and returns a list of those-and-only-those elements x of xs for which f(x) is strictly greater than zero. """ ans = [] for x in xs: if f(x) > 0: ans.append(x) return ans
25d6e9d678ea14b7ec2ccec808606b9c6e0b780d
598,073
def repos_split(repos_relpath): """Split a repos path into its directory and basename parts.""" idx = repos_relpath.rfind('/') if idx == -1: return '', repos_relpath return repos_relpath[:idx], repos_relpath[idx+1:]
3bd1d76f75664ac28d03277214b5cd9f2bdcaf05
32,234
def check_uniqueness_in_rows(board: list): """ Check buildings of unique height in each row. Return True if buildings in a row have unique length, False otherwise. """ for row in board: row = row[1:][:-1].replace("*", "") for num in row: if row.count(num) > 1: return False return True
c30ea2f1de9c2afe4018913cbec96fe72923e7a3
649,810
def numeric_scraper(text:str) -> list: """Returns a list of all numbers in a given string ex "a11b2c333" -> [11, 2, 333]""" num_lst = [] cur_num = "" string = False for char in text: #on a string if string: if char.isdigit(): cur_num += char else: num_lst.append(int(cur_num)) cur_num = "" string = False #looking for a string else: if char.isdigit(): cur_num += char string = True #last string if string: num_lst.append(int(cur_num)) return num_lst
77ab521962e945289d88583cca626b201ae7d221
138,414
def _print_parameter_name_and_type(p_name, p_type): """ Print parameter name and type following Numpy docstring standard. If `p_name` is an empty string, then return an empty string. Parameters ---------- p_name: str Parameter name p_type: str Parameter type """ s = "" if p_name: s = f"{p_name}" if p_type: s += f" : {p_type}" return f"{s}\n"
87498fa026a713f8ee7d062c33d3ed0bf950bab8
609,997
def chain_2(d2f_dg2, dg_dx, df_dg, d2g_dx2): """ Generic chaining function for second derivative .. math:: \\frac{d^{2}(f . g)}{dx^{2}} = \\frac{d^{2}f}{dg^{2}}(\\frac{dg}{dx})^{2} + \\frac{df}{dg}\\frac{d^{2}g}{dx^{2}} """ return d2f_dg2*(dg_dx**2) + df_dg*d2g_dx2
9d24771fa5f3a7632051ebc5ad33732eacbaef35
381,889
import re def microlisp_tokenize(txt): """ Split code text to tokens: `(', `)', non-space sequences """ tokens = re.split('(\s+|\(|\))', txt) return [t for t in tokens if len(t) and not t.isspace()]
cc03e78ae20e34318267c1502063a13ff5ab13fc
180,714
def transpose(x): """ Return the transpose of ``x``. EXAMPLES:: sage: M = MatrixSpace(QQ,3,3) sage: A = M([1,2,3,4,5,6,7,8,9]) sage: transpose(A) [1 4 7] [2 5 8] [3 6 9] """ return x.transpose()
61b1e03bad41cd0811fa3fa44b7c217bb5555d04
539,552
def calculate_hamming_distance(input_bytes_1, input_bytes_2): """Finds and returns the Hamming distance (number of differing bits) between two byte-strings """ hamming_distance = 0 for b1, b2 in zip(input_bytes_1, input_bytes_2): difference = b1 ^ b2 # Count the number of differences ('1's) and add to the hamming distance hamming_distance += sum([1 for bit in bin(difference) if bit == '1']) return hamming_distance
b7856a36d09ef8c1d8497aebb32995c323cf7d6a
377,631
def _get_mins_and_secs_str_from_secs(delta): """ Returns minutes and seconds from a seconds number """ mins = round(delta / 60) secs = round(delta-mins*60) if (secs == 60): mins += 1 secs -= 60 time_text = (f"{mins} minute" if mins > 0 else "") + ("s" if mins > 1 else "") + \ (" and " if mins > 0 and secs > 0 else "") + \ (f"{secs} second" if secs > 0 else "") + ("s" if secs > 1 else "") return time_text
7593e168e6de90eb00bcc8844ae66e268c531a7b
657,113
def multiples(arg): """ Returns a list of numbers in the range [0,100] that are multiples of arg. Parameters ---------- arg : int the multiplier Returns ------- list(int) Description of return value Examples -------- >>> multiples(4) [0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100] """ return [n for n in range(0,101) if n%arg == 0]
3a126093077aeb1b06b49f8d5784336b61a8cc68
177,686
import time def timetrace(message, idstring, tracemessage="TEST_MESSAGE", final=False): """ Trace a message with time stamps. Args: message (str): The actual message coming through idstring (str): An identifier string specifying where this trace is happening. tracemessage (str): The start of the message to tag. This message will get attached time stamp. final (bool): This is the final leg in the path - include total time in message """ if message.startswith(tracemessage): # the message is on the form TEST_MESSAGE tlast t0 # where t0 is the initial starting time and last is the time # saved at the last stop. try: prefix, tlast, t0 = message.split(None, 2) tlast, t0 = float(tlast), float(t0) except (IndexError, ValueError): t0 = time.time() tlast = t0 t1 = t0 else: t1 = time.time() # print to log (important!) print("** timetrace (%s): dT=%fs, total=%fs." % (idstring, t1-tlast, t1-t0)) if final: message = " **** %s (total %f) **** " % (tracemessage, t1-t0) else: message = "%s %f %f" % (tracemessage, t1, t0) return message
98882610b4a184a6918c0be7302bc9117a6f153a
665,082
def flatten(x): """Flatten a list of arbitrary depth. Returns a list with no sub-lists or sub-tuples. If the input is not a list or a tuple, it will be returned as a one-element list. """ if not isinstance(x, (list, tuple)): return [x] else: if len(x) == 0: return [] else: return flatten(x[0]) + flatten(x[1:])
461e674f5b17d2825827f550b90f0fcef90f81aa
213,210
def precip_to_energy(prec): """Convert precip to W/m2 Parameters --------- prec : (mm/day) """ density_water = 1000 Lv = 2.51e6 coef = density_water / 1000 / 86400 * Lv return coef * prec
d264eb741df893b70ec48826fae13787c1394221
574,579
import torch def quaternion2rotationPT( q ): """ Convert unit quaternion to rotation matrix Args: q(torch.tensor): unit quaternion (N,4) Returns: torch.tensor: rotation matrix (N,3,3) """ r11 = (q[:,0]**2+q[:,1]**2-q[:,2]**2-q[:,3]**2).unsqueeze(0).T r12 = (2.0*(q[:,1]*q[:,2]-q[:,0]*q[:,3])).unsqueeze(0).T r13 = (2.0*(q[:,1]*q[:,3]+q[:,0]*q[:,2])).unsqueeze(0).T r21 = (2.0*(q[:,1]*q[:,2]+q[:,0]*q[:,3])).unsqueeze(0).T r22 = (q[:,0]**2+q[:,2]**2-q[:,1]**2-q[:,3]**2).unsqueeze(0).T r23 = (2.0*(q[:,2]*q[:,3]-q[:,0]*q[:,1])).unsqueeze(0).T r31 = (2.0*(q[:,1]*q[:,3]-q[:,0]*q[:,2])).unsqueeze(0).T r32 = (2.0*(q[:,2]*q[:,3]+q[:,0]*q[:,1])).unsqueeze(0).T r33 = (q[:,0]**2+q[:,3]**2-q[:,1]**2-q[:,2]**2).unsqueeze(0).T r = torch.cat( (r11,r12,r13, r21,r22,r23, r31,r32,r33), 1 ) r = torch.reshape( r, (q.shape[0],3,3)) return r
feeed764ee179b31674790f9d2afc7b606a02aef
1,538
def get_name_from_filename(filename): """Gets the partition and name from a filename""" partition = filename.split('_', 1)[0] name = filename.split('_', 1)[1][:-4] return partition, name
606cfcc998c4a8405c9ea84b95b2c63f683dd114
7,459
def isAllRests(notes): """ Check if a given list of notes has only rests within it """ for note in notes: if note[0] != 'r': return False return True
4228614d16afa99782a32b9b70a1aaa3035af603
149,054
def gen_chunks_values_constraint(chunks, variable_name): """ Generation of a SPARQL VALUES clause to restrict gene/protein/etc. names Produces something like VALUES ?controlledName {"hsa-miR-6079"^^xsd:string "hsa-miR-4452"^^xsd:string "hsa-miR-6512-5p"^^xsd:string "RBPJ"^^xsd:string "NICD"^^xsd:string} """ filter_clause = '' if len(chunks) > 0 : filter_clause = 'VALUES ' + variable_name + ' { \n' for g in chunks : filter_clause += '"' + g.replace('"', '').replace("'", "").replace('\\\\', '\\') + '"^^xsd:string ' k = filter_clause.rfind(" ") filter_clause = filter_clause[:k] filter_clause += ' } .' return filter_clause
fe95f29ed3cf8361849c82fb48204de67f811f43
323,677
def binarySearch(numList, left, right, target): """ Binary search for the range of number found in the exponential search algorithm :param left: the first number in the range of number :param right: the last number in the range of number :param numList: a list of number sorted in ascending order :param target: a number to be searched in the list of number :return: index of target or None if target is not found """ if left > right: return -1 mid = (left + right) // 2 if target == numList[mid]: return mid elif target < numList[mid]: return binarySearch(numList, left, mid - 1, target) else: return binarySearch(numList, mid + 1, right, target)
7052600df2b075519da975d2496f40a86b8c1ded
444,317
def ConcatenateResults(p): """Concatenate string results into a single string. Result is string.""" return p >> (lambda l: ''.join(l) if l and len(l) > 0 else None)
a9c290a0c5b84abd5796d4133214b1b5f88e0285
409,419
def parse_j2000(name): """Takes the J2000 name stored in the results and returns it in a format astropy can understand.""" return ' '.join([name[1:3], name[3:5], name[5:7], name[7:10], name[10:12], name[12:]])
617b4c95b42475096c736afd0fedce7e6f191e36
594,927
def search_non_residue(p): """Find a non residue of p between 2 and p Args: p: a prime number Returns: a integer that is not a quadratic residue of p or -1 if no such number exists """ for z in range(2, p): if pow(z, (p - 1) // 2, p) == p - 1: return z return -1
d74762a11f7557089f58be6b41840aa60074c00d
675,112
def trycatch(*args, **kwargs): """Wraps a function in a try/catch block. Can be used as a function decorator or as a function that accepts another function. **Params**: - func (func) - Function to call. Only available when used as a function. - oncatch (str) [kwargs] - Function to call if an exception is caught. - rethrow (str) [kwargs] - If true, exception will be re-thrown. **Examples**: :: trycatch(myfunc)(myarg1, myarg2, kwarg=mykwarg) trycatch(myfunc, oncatch=mycatchfunc)(myarg1, myarg2, kwarg=mykwarg) trycatch(myfunc, rethrow=True)(myarg1, myarg2, kwarg=mykwarg) """ rethrow = kwargs.get('rethrow', False) oncatch = kwargs.get('oncatch', None) def decor(func): def wrapper(*fargs, **fkrgs): try: return func(*fargs, **fkrgs) except: cresult = None if oncatch != None: cresult = oncatch() if rethrow: raise return cresult return wrapper if len(args) > 0 and callable(args[0]): func = args[0] return decor(func) return decor
c7b5ff53ef60fd7a9a97df3111e23fd8d8624f62
441,898
def is_more_vacancies_to_scrape(vacancies_raw): """Check value of key more in dictionary with vacancy attributes. Key more is True when it's possible to make another request and fetch vacancies, and False when it's not possible.""" return vacancies_raw.json()['more']
c789f225d4627b31ffd78a33bb275dc23f7d58a0
438,744
import colorsys def hex_to_rgb_hls(hex_str): """ Convert hex string to a rgb list in 255 scale, and a hsv list in float scale. """ if len(hex_str) > 6: hex_str = hex_str[1:] # Convert to rgb rgb = [] for i in range(0, 6, 2): rgb.append(int(hex_str[i:i+2], 16)) # Convert to hsv hls = list(colorsys.rgb_to_hls(*[i / 255 for i in rgb])) return rgb, hls
9e0e102a813bf318de93baf31d76cee25dada586
364,870
def derivatives(seq): """Returns the derivative of this sequence by differencing. The output has the same length (`0` is added to the end).""" ret = [b-a for a,b in zip(seq, seq[1:])] ret.append(0) return ret
736984dbd34c5ba8e206b97f35f01e06f4c68992
515,165
import json def get_syn_setup_params( syn_extra_params_path, cpre_cpost_path, fit_params_path, gid, invivo, ): """Load json files and return syn_setup_params dict. Args: syn_extra_params_path (str): path to the glusynapses related extra parameters file cpre_cpost_path (str): path to the c_pre and c_post related file c_pre (resp. c_post) is the calcium amplitude during isolated presynaptic (resp. postsynaptic) activation fit_params_path (str): path to the file containing the glusynapse fitted parameters The fitted parameters are time constant of calcium integrator, depression rate, potentiation rate, and factors used in plasticity threshold computation. gid (int): ID of the postsynaptic cell invivo (bool): whether to run the simulation in 'in vivo' conditions Returns: dict: glusynapse setup related parameters """ with open(syn_extra_params_path, "r", encoding="utf-8") as f: syn_extra_params = json.load(f) with open(cpre_cpost_path, "r", encoding="utf-8") as f: cpre_cpost = json.load(f) with open(fit_params_path, "r", encoding="utf-8") as f: fit_params = json.load(f) return { "syn_extra_params": syn_extra_params, "c_pre": cpre_cpost["c_pre"], "c_post": cpre_cpost["c_post"], "fit_params": fit_params, "postgid": gid, "invivo": invivo, }
41bc06cfb420024dcd7c606a094204591b17b635
188,812
def true_state_value(probability): """ compute the true value of the first state according to dynamics :param probability: the probability of right action :return: the true value V(S1)=p*1*(-1+V(S2))+(1-p)*1*(-1+V(S1)) V(S2)=p*1*(-1+V(S1))+(1-p)*1*(-1+V(S3)) V(S3)=p*1*(-1+0)+(1-p)*1*(-1+V(S2)) p is the probability of right action ===> V(S1) = 2*(p-2)/(p*(1-p)) """ return 2*(probability-2)/(probability*(1-probability))
277d75aaad0045d5cac0cab7cef0d6167f7b72f4
202,277
def buildRowBorder(start, middle, end, spacer, dateWidth, editorWidths, editorIds): """ Create a row border line. :param start: The character to use at the start :param middle: The character to use for each middle column :param end: The character to use at the end :param spacer: The character to use for each cell ceiling/floor :param dateWidth: The width of the date column :param editorWidths: The width of each editors column :param editorIds: The ids of each editor :return: """ line = start + spacer * (dateWidth + 2) for editor in editorIds: line += middle + spacer * (editorWidths[editor] + 2) return line + end
2c5229748a9b57aa3c79e29af0c54d05962b2b19
296,916
def splitOneListIntoTwo(inputList): """ Function that takes a list, each of whose entries is a list containing two entries, and assembles two separate lists, one for each of these entries """ list1 = [] list2 = [] for entry in inputList: list1.append(entry[0]) list2.append(entry[1]) return list1, list2
ee1d139bfb42bc77d8760e5f9f33cbde279c1669
129,010
def fixture_labels_file_load() -> str: """Return a filepath to an existing labels file.""" return "tests/labels.toml"
b5d4a36bee10363047a640ed83b7bdfbf9d84d20
490,292
def get_last_activation_layer(model): """Return the name of the last activation layer""" layer_names = [] for layer in model.layers: layer_names.append(layer.name) layer_names = [name for name in layer_names if 'activation' in name] return layer_names[-1]
de38b75a1bcfb55b771d85f60a8d78ba31783283
324,903
from typing import List import statistics def znormalizeData(valList: List[float]) -> List[float]: """ Given a list of floats, return the z-normalized values of the floats The formula is: z(v) = (v - mean) / stdDev In effect, this scales all values to the range [-4, 4]. It can be used, for example, to compare the pitch values of different speakers who naturally have different pitch ranges. """ valList = valList[:] meanVal = statistics.mean(valList) stdDevVal = statistics.stdev(valList) return [(val - meanVal) / stdDevVal for val in valList]
27731f5dfee3132ba2e4e490303655c1ebbf91f5
435,817
def convert_token(token: str) -> tuple[int, int]: """Converts string token into 2d grid offset :param token: token in string format :return: 2d grid offset in integer format """ direction, length = token[:1], int(token[1:]) assert direction in ['R', 'L', 'U', 'D'] if direction in ['L', 'D']: length *= -1 offset = (length, 0) if direction in ['R', 'L'] else (0, length) return offset
f1cffcff7c3822d963b3d1962df2104873814b33
548,785
import torch def aggregate_accuracy(test_logits_sample, test_labels): """ Compute classification accuracy. """ return torch.mean(torch.eq(test_labels, torch.argmax(test_logits_sample, dim=-1)).float())
eff4f6b6613f053bed8d790502e967cb74d58bab
618,285
def ensure_collection(value, collection_type): """Ensures that `value` is a `collection_type` collection. :param value: A string or a collection :param collection_type: A collection type, e.g. `set` or `list` :return: a `collection_type` object containing the value """ return collection_type((value,)) if value and isinstance(value, str) else collection_type(value)
9afa29fd0672d7ab2e65484fdd0c811781fb9f82
644,759
import math def engFormat(f): """Format a number in engineering format, where the exponent is a multiple of 3""" if f == 0.0: value = 0.0 exponent = 0 else: exponent = math.log10(-f if f < 0 else f) if exponent < 0: exponent = -int(math.ceil(-exponent)) else: exponent = int(math.floor(exponent)) for i in range(3): if (exponent % 3) == 0: break exponent = exponent - 1 value = f * 10 ** -exponent # Choose a format to maintain the number of useful digits we print. if abs(value) < 10: fmt = "%6.3f%s" elif abs(value) < 100: fmt = "%6.2f%s" else: fmt = "%6.1f%s" return fmt % (value, ("" if exponent == 0 else "e%d" % exponent))
b9fed4fdaf2d21b87e211a0972d69ad4e8a86e80
204,531
def window_maker(list_name, filled_list, window_size, slide_size): """Make a bed file of sliding windows.""" for scaffold, start, end in filled_list: width = window_size step = slide_size if width <= end: list_name.append((scaffold, start, width)) else: list_name.append((scaffold, start, end)) while width <= end: start += step width += step if width >= end: list_name.append((scaffold, start, end)) else: list_name.append((scaffold, start, width)) return list_name
02f5f293b2ba49efdd7c31a955776a6df5e19f42
74,110
from typing import Literal from typing import Optional from typing import Union import requests from bs4 import BeautifulSoup def alliance_bank_withdraw( email: str, password: str, alliance_id: int, receiver: str, receiver_type: Literal["alliance", "nation"], note: Optional[str] = None, **resources: Union[int, float, str], ) -> bool: """Send money from an alliance bank. Parameters ---------- email : str The email of the account to use. password : str The password of the account to use. alliance_id : int The alliance ID to send from. receiver : str The receiver of the withdrawal, must be a nation or alliance name. receiver_type : Literal["alliance", "nation"] The type of receiver, either "alliance" or "nation". note : Optional[str], optional The note to send with the withdrawal, by default no note is sent. **resources : Union[int, float, str] The resources to send, specified as kwargs. (i.e. money=100) Returns ------- bool Whether or not the withdrawal was successful. """ with requests.Session() as session: transaction_data = {f"with{key}": value for key, value in resources.items()} transaction_data["withtype"] = receiver_type.capitalize() if note is not None: transaction_data["withnote"] = note transaction_data["withrecipient"] = receiver transaction_data["withsubmit"] = "Withdraw" login_data = { "email": email, "password": password, "loginform": "Login", } response = session.request( "POST", "https://politicsandwar.com/login/", data=login_data ) if "login failure" in response.text.lower(): return False response = session.request( "POST", f"https://politicsandwar.com/alliance/id={alliance_id}&display=bank", data=transaction_data, ) content = response.text if "Something went wrong" in content: transaction_data["token"] = BeautifulSoup(content, "html.parser").find("input", {"name": "token"}).attrs["value"] # type: ignore response = session.request( "POST", f"https://politicsandwar.com/alliance/id={alliance_id}&display=bank", data=transaction_data, ) content = response.text return "successfully transferred" in content
ac3e11859143d63515b63c854630798e5aea4020
247,537
from typing import List from typing import Dict from typing import Any def _max_pred(prediction_scores: List[Dict[str, Any]]) -> Dict[str, Any]: """Utility function to find the maximum predicted label for a single prediction Args: prediction_scores (List[Dict[str, Any]]): A list of predictions with keys 'label' and 'score' Returns: Dict[str, Any]: The 'label' and 'score' dict with the highest score value """ return max(prediction_scores, key=lambda d: d["score"])
1d85c5191c09e14829e8c7bb06430c58fe980760
231,532
def readRelations(pathToFile): """ Reads in a .txt file that represents the relationships between the task ids. They are delimited by '->'. Returns a list of tuples that represents directed edges of a graph. """ with open(pathToFile, "r") as file: content = file.read() out = content.splitlines() out = [i.split('->', 1) for i in out] out = list(map(tuple, out)) return out
8e098b8bc35738cab0778c9ad10d24813c37237a
202,593
def build_sample_map(flowcell): """Build sample map ``dict`` for the given flowcell.""" result = {} rows = [(lane, lib["name"]) for lib in flowcell["libraries"] for lane in lib["lanes"]] i = 1 for _, name in sorted(set(rows)): if name not in result: result[name] = "S{}".format(i) i += 1 return result
faf43ca65146093462ae26a9c18ebb238e23a7ff
696,276
import re def _SplitFreqRange(freq_range): """Splits a `freq_range` str in a list of numerical (fmin, fmax) tuples.""" try: fmin, fmax = re.split(',|-', freq_range.strip()) return [(float(fmin), float(fmax))] except AttributeError: freq_ranges = [] for one_range in freq_range: fmin, fmax = re.split(',|-', one_range.strip()) freq_ranges.append((float(fmin), float(fmax))) return freq_ranges
db3c7fc2d2a3576ab07b5acdbae9308408a04575
40,349
def normalize_basename(s, force_lowercase=True, maxlen=255): """Replaces some characters from s with a translation table: trans_table = {" ": "_", "/": "_slash_", "\\": "_backslash_", "?": "_question_", "%": "_percent_", "*": "_asterisk_", ":": "_colon_", "|": "_bar_", '"': "_quote_", "<": "_lt_", ">": "_gt_", "&": "_amp_"} then if the generated name is longer than maxlen, the name is truncated to maxlen and the hash of the name modulo 0xffffffff is appended. """ # replace all whietspaces by _ l = s.lower() if force_lowercase else s # table = mktrans(" ", "_") # return l.translate(table) trans_table = {" ": "_", "/": "_slash_", "\\": "_backslash_", "?": "_question_", "%": "_percent_", "*": "_asterisk_", ":": "_colon_", "|": "_bar_", '"': "_quote_", "<": "_lt_", ">": "_gt_", "&": "_amp_"} n = ("".join([trans_table.get(x, x) for x in l])) if len(n) > maxlen - 8: h = format(hash(n) & 0xffffffff, "08x") n = n[:maxlen-8] + "_"+ h return n
8b6c6fee3a55b3d704294d8bdaa7f72101ac477b
684,780
def column_names_window(columns: list, window: int) -> list: """ Parameters ---------- columns: list List of column names window: int Window size Return ------ Column names with the format: w_{step}_{feature_name} """ new_columns = [] for w in range(1, window + 1): for c in columns: new_columns.append(f"w_{w}_{c}") return new_columns
b5f5d6f8fcf340fbe6d1e640590ac5538258bf55
401,359
import math def RoundDistance(distance): """Round distances above 10 (mi/km) to the closest integer.""" return math.ceil(distance) if distance > 10 else distance
58e733c5ddac2089aa80a5bcf79fed1e0b2f6c55
137,952
def rmse(y_hat, y): """ Function to calculate the root-mean-squared-error(rmse) Inputs: > y_hat: pd.Series of predictions > y: pd.Series of ground truth Output: > Returns the rmse as float """ assert(y_hat.size == y.size) return round((sum(i*i for i in y_hat-y)/len(y))**0.5, 4)
f03e0b93135b19dc111473cd0838b8fe293c5302
473,783
import torch def pad_tensor(vec, pad, dim): """ Pads a tensor with zeros according to arguments given Args: vec (Tensor): Tensor to pad pad (int): The total tensor size with pad dim (int): Dimension to pad Returns: padded_tensor (Tensor): A new tensor padded to 'pad' in dimension 'dim' """ pad_size = list(vec.shape) pad_size[dim] = pad - vec.size(dim) padded_tensor = torch.cat([vec, torch.zeros(*pad_size)], dim=dim) return padded_tensor
872888db539676c82d4ac7f61832c1675fb06978
561,327
from datetime import datetime def fromTS(timestamp): """ Returns human readable date-time format from timestamp """ d = datetime.fromtimestamp(timestamp) return d.strftime('%I:%M:%S %p on %d, %b %Y')
c7cdcb42843c62bca1884fda76a55c9f72f2e53a
524,665
def rgb2hex(rgb): """Converts an RGB 3-tuple to a hexadeximal color string. EXAMPLE ------- >>> rgb2hex((0,0,255)) '#0000FF' """ return ('#%02x%02x%02x' % tuple(rgb)).upper()
4c3323e34fcd2c1b4402ebe5f433c5fd9320cce9
708,098
def apply_rules_block(start, end, state): """ Use a separate Ray task for computing a block update, rather than a method that can only run in the ``RayConwaysRulesBlocks`` actor's worker. Otherwise, this implementation follows ``RayConwaysRules.apply_rules``. Args: start: Starting row index in ``State.grid`` for this block, inclusive. end: Ending row index in ``State.grid`` for this block, exclusive. state: The current ``State`` object. """ x = state.x_dim y = state.y_dim g = state.grid block = g[start:end].copy() for n in range(end-start): i = n+start for j in range(y): im1 = i-1 if i > 0 else x-1 ip1 = i+1 if i < x-1 else 0 jm1 = j-1 if j > 0 else y-1 jp1 = j+1 if j < y-1 else 0 num_live_neighbors = ( g[im1][jm1] + g[im1][j] + g[im1][jp1] + g[i][jm1] + g[i][jp1] + g[ip1][jm1] + g[ip1][j] + g[ip1][jp1]) cell = block[n][j] # default value is no change in state if cell == 1: if num_live_neighbors < 2 or num_live_neighbors > 3: block[n][j] = 0 elif num_live_neighbors == 3: block[n][j] = 1 return block
8620e60c537890f37137c5bbcf85c194068fb327
542,363