content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def create_pair(urls): """Parses a urls pair string into urls pair.""" tokens = urls.split(",") source_URL = tokens[0] neighbor_URL = tokens[1] return (source_URL, neighbor_URL)
6ac18e8f543cd8783eb0efef7f7c9089742bdd82
173,185
def points_with_surrounding_gaps(points): """ This function makes sure that any gaps in the sequence provided have stopper points at their beginning and end so a graph will be drawn with correct 0 ranges. This is more efficient than filling in all points up to the maximum value. For example: input: [1,2,3,10,11,13] output [1,2,3,4,9,10,11,12,13] """ points_with_gaps = [] last_point = -1 for point in points: if last_point + 1 == point: pass elif last_point + 2 == point: points_with_gaps.append(last_point + 1) else: points_with_gaps.append(last_point + 1) points_with_gaps.append(point - 1) points_with_gaps.append(point) last_point = point return points_with_gaps
39f6d94435464ba1d662efa874019e2331437b0d
543,557
import warnings def puppy_vid_inspected_trajectory(grp, step_width, loc_marker, epoch_idx, obs_offset): """ .. deprecated:: 1.0 Use :py:class:`PuppyActionVideo` instead """ warnings.warn('deprecated, use PuppyActionVideo instead') loc_x = grp['puppyGPS_x'][obs_offset+step_width*epoch_idx+step_width-1] loc_y = grp['puppyGPS_y'][obs_offset+step_width*epoch_idx+step_width-1] loc_marker.set_data([loc_x], [loc_y]) return loc_marker
582083bc76950437b9ebdd1258393c3c6f0311c8
650,348
def slice_doubles(doubles): """Get the heads and relations from a matrix of doubles.""" return ( doubles[:, 0:1], # heads doubles[:, 1:2], # relations )
35ffcc48382f78c9c547eb49fcde19413d2a9f5b
624,380
def get_reference_output_files(reference_files_dict: dict, file_type: str) -> list: """ Returns list of files matching a file_type from reference files Args: reference_files_dict: A validated dict model from reference file_type: a file type string, e.g. vcf, fasta Returns: ref_vcf_list: list of file_type files that are found in reference_files_dict """ ref_vcf_list = [] for reference_key, reference_item in reference_files_dict.items(): if reference_item['file_type'] == file_type: ref_vcf_list.append(reference_item['output_file']) return ref_vcf_list
7050c39a8116f8874dbc09bcf7ff2908dcd13ff8
11,886
def total_error_to_per_piece_error(error_rate: float, pieces: int) -> float: """Convert from total error rate to per-round error rate.""" if error_rate > 0.5: return 1 - total_error_to_per_piece_error(1 - error_rate, pieces) assert 0 <= error_rate <= 0.5 randomize_rate = 2*error_rate round_randomize_rate = 1 - (1 - randomize_rate)**(1 / pieces) round_error_rate = round_randomize_rate / 2 return round_error_rate
69c820d5cb248bc54c443b61d650ce8644a530ba
361,909
from typing import Iterable import re def clear_stop_words( text: str, stop_list: Iterable[str], replace_char: str = " " ) -> str: """Replace stop words with specified replace character.""" pattern = "|".join(stop_list) compiled_pattern = re.compile(r"\b(" + pattern + r")\b") # ---------------------------- Exert Function ----------------------------- clear_text = re.sub(compiled_pattern, replace_char, text) return clear_text
8eb1e9508d7a9506a33fc355c52c10692110d503
410,665
def epsilon_mean(eps, limit=0.999): """Compute mean of the ellipticity distribution. Args: eps: A numpy array of real or complex ellipticity (epsilon) estimates. limit: The truncation limit, a positive number. Returns: Compute the mean of the eps samples, subject to the requirement |eps| < limit. """ mask = (abs(eps) < limit) return eps[mask].mean()
fdb020ba48e479ae592bbe82b498d5751bce91f0
389,011
def calc_quadrant_from_decimal_number(value: int) -> str: """Return a quadrant (4-ary) from a given decimal number. Parameters ---------- value: int a decimal number Returns ---------- str : a quadrant """ base = 4 q = "" tmp = int(value) while tmp >= base: q = str(tmp % base) + q tmp = int(tmp / base) q = str(tmp % base) + q return q
858bf354251e8ffa62c544965d6652da74b81e16
596,531
def spell_correct(args, gb, gb_and_pwl, word, bigrams): """ Function to spell-check a word and correct it if possible. input: args (argparse object): input arguments gb: british dictionary for spell checking gb_and_pwl: words from british dictionary and input personal word list word (str): word to spell-check bigrams (dict): corpus bigrams read from args.corpus_bigrams returns spell-checked (and corrected, if necessary) word """ # If the line is a valid word, continue if word == "" or word[0].isupper() or gb.check(word): return word else: # Suggest corrections for sub_line suggestions = gb_and_pwl.suggest(word) # See if any of them are reasonable options = [] for opt in suggestions: l = opt.split() if len(l) == 2 and "".join(l) == word: options.append(opt) break # Find the most probable option best = (word, 0) for opt in options: try: # Check if option is a bigram that appears in corpus if bigrams[opt] > best[1]: best = (opt, bigrams[opt]) except KeyError: continue return best[0]
1c003bc777409db598993f8dc1e609f31dae4f38
493,408
def generatePolicy(effect, principalId, resource): """Generate a policy based on input.""" authResponse = {} authResponse['principalId'] = principalId statementOne = {'Action': 'execute-api:Invoke', 'Effect': effect, 'Resource': resource} policyDocument = {'Version': '2012-10-17', 'Statement': [statementOne]} authResponse['policyDocument'] = policyDocument authResponse['context'] = {'policyGenerator': 'authorizer.authorize'} return authResponse
86d3b3472711d4bbc8f7e132abbc3d8b5d140425
358,218
def bool_to_integer_string(boolean): """Returns '0' for False & '1' for True :param `boolean`: Value to convert :returns: Conversion result :rtype: str """ return "{}".format(int(boolean))
16b09cd3b7765b87817f33ae2a1d714c70d0b964
485,749
def is_valid_pre_6_2_version(xml): """Returns whether the given XML object corresponds to an XML output file of Quantum ESPRESSO pw.x pre v6.2 :param xml: a parsed XML output file :return: boolean, True when the XML was produced by Quantum ESPRESSO with the old XML format """ element_header = xml.find('HEADER') if element_header is None: return False element_format = element_header.find('FORMAT') if element_format is None: return False try: name = element_format.attrib['NAME'] except KeyError: return False if name != 'QEXML': return False return True
80bda73addc68a88b2a1dc5828c0553cbaf7e6f2
709,974
def parse_metadata_line(line): """Parse a single metadata line and return the name, value""" # The metadata format is a 5 column format: name, comment, attach_to_series, type_as_str, value = line.strip("\n").split("\t") # Since, as yet, ixdat doesn't support per-series metadata, we prefix the per-series # metadata item names with the name of the series, to avoid name clashes while still # preserving the data if attach_to_series: full_name = f"{attach_to_series}_{name}" else: full_name = name # Type convert the metadata (the specification for version 1 also has a color type, # but as of yet it is not used) if type_as_str == "string": return full_name, value elif type_as_str == "int": return full_name, int(value) elif type_as_str == "double": return full_name, float(value) elif type_as_str == "bool": return full_name, value == "true" else: raise TypeError(f"Unknown metadata type {type_as_str} for {name}")
2a7de2c37a5735a49024c1d0421f62336de4ca66
507,254
def add_temporality(df): """ Adds previous five-game statistics to each sample as numerical data. Parameters ---------- df: pandas dataframe Containing basic stats from past games (pts, reb, ast, date, etc) Returns ---------- df: pandas dataframe The updated df, previous stats included. """ df_ = df.drop([0,1,2,3,4]) pts = df['pts'].values reb = df['reb'].values ast = df['ast'].values pts_ = [] reb_ = [] ast_ = [] for i in range(1,6): pts_.append([pts[j-i] for j in range(5,len(pts))]) reb_.append([reb[j-i] for j in range(5,len(reb))]) ast_.append([ast[j-i] for j in range(5,len(ast))]) df_['pts-1'] = pts_[0] df_['pts-2'] = pts_[1] df_['pts-3'] = pts_[2] df_['pts-4'] = pts_[3] df_['pts-5'] = pts_[4] df_['reb-1'] = reb_[0] df_['reb-2'] = reb_[1] df_['reb-3'] = reb_[2] df_['reb-4'] = reb_[3] df_['reb-5'] = reb_[4] df_['ast-1'] = ast_[0] df_['ast-2'] = ast_[1] df_['ast-3'] = ast_[2] df_['ast-4'] = ast_[3] df_['ast-5'] = ast_[4] return df_
48339c75738834fc2d90ddfd1b49ccc1a15e19b4
311,706
def splitDict(data): """ Split a dictionary with lists as the data, into smaller dictionaries :param data: A dictionary with lists as the values :return: A tuple of dictionaries each containing the data separately, with the same dictionary keys """ # find the maximum number of items in the dictionary maxitems = max([len(values) for values in data.values()]) output = [dict() for _ in range(maxitems)] for key, values in data.items(): for i, val in enumerate(values): output[i][key] = val return tuple(output)
aad4a54bca6f6007d4ee70652cac5b7ba47cf112
231,079
import random def choose_random_edge(g): """Chooses a random edge, as defined by a pair of nodes.""" randKey1 = random.choice(list(g.keys())) randKey2 = random.choice(g[randKey1]) return randKey1, randKey2
1389e38665963aaf3e42abaa5c8b998fc0bad866
409,622
def sub(x, y): """subtracts y from x""" return x - y
e27fbd2b19d24f85d3d7cfd32011cae0a77e30fe
383,979
def var_is_false(var): """ Returns True if var = False, else False. Remember here that 1 is a almost True value but in this case should return False. :param var: any variable. :return: boolean """ return not var and isinstance(var, bool)
c21435b2d6d4b3a984a4e0f39ef0cffb2158b914
493,292
def bit_count(num: int): """Counts the number of bits with value 1.""" try: return num.bit_count() # Python 3.10 (~6 times faster) except AttributeError: return bin(num).count("1")
bf0a6e969cf4cd867afcccfec76f9db60ffb3778
361,467
def list_product_initial(initial, lists): """Return a list of lists, with an initial sequence from the first argument (a list of lists) followed by each sequence of one element from each successive element of the second argument.""" if not lists: return initial return list_product_initial([a + [b] for a in initial for b in lists[0]], lists[1:])
34bee5ea3b1a62b9ace5c47303e2efe03b44ddd0
187,329
import random def trim_to_length_random(source, target, length): """Trim data to a max of length. Data is shuffled in place if over limit. Args: source (collections.deque): Source data target (collections.deque): Target data length (int): Trim to length Returns: (collections.deque, collections.deque): Trimmed data """ if length == None: return (source, target) else: # Randomly select data to use if over length if length < len(source): zipped_data = list(zip(source, target)) random.shuffle(zipped_data) source = [x[0] for x in zipped_data] target = [x[1] for x in zipped_data] source = source[:length] target = target[:length] return (source, target)
61aa7a7c4a27db354a6d9d02793aa83140b6d373
145,548
def decorate(message: str, symbol: str = '+', line_width: int = 79) -> str: """Вывести текст в обрамлении символов :param message: оригинальное сообщение :param symbol: каким символом написать строки :param line_width: какую ширину заполнить символами :return: оформленное текстовое сообщение в виде нескольких строк """ separator = '\n' + symbol * line_width + '\n' result = separator + message.strip() + separator return result
2b533902b621b4063f7019315f07bb0a962566ea
606,360
def formula_has_multi_any_bfunc(formula, bfunc_set): """Returns true if the total times any of the provided basis functions appear in the formula is more than once per per normalization. :formula: str :bfunc_set: list of str :returns: bool """ equivalents=formula.count("+")+1 instances=0 for b in bfunc_set: instances+=formula.count(b) return instances/equivalents>1
aef4fd2cabf41d1eaa3c1004a7d0f0ae9a054047
81,221
def calc_overlap(row): """ Calculates the overlap between prediction and ground truth and overlap percentages used for determining true positives. """ set_pred = set(row.predictionstring_pred.split(' ')) set_gt = set(row.predictionstring_gt.split(' ')) # Length of each and intersection len_gt = len(set_gt) len_pred = len(set_pred) inter = len(set_gt.intersection(set_pred)) overlap_1 = inter / len_gt overlap_2 = inter/ len_pred return [overlap_1, overlap_2]
98e65250f82ab13b23de049fd80a59dea30ccce2
705,225
def calculate_de(frequency_dicts, metric_fn): """ Calculates the expected disagreement by chance :param frequency_dicts: The output of data_transforms.calculate_frequency_dicts e.g.: { unit_freqs:{ 1:2..}, class_freqs:{ 3:4..}, total:7 } :param metric_fn metric function such as nominal_metric :return: De a float """ De = 0 class_freqs = frequency_dicts["class_freqs"] class_names = list(class_freqs.keys()) for i, c in enumerate(class_names): for k in class_names: De += class_freqs[c] * class_freqs[k] * metric_fn(c, k) return De
6a8bbbd4f1487b9e1594abaf03be42d42a2293ee
428,191
import sqlite3 from typing import Dict def get_types(conn: sqlite3.Connection) -> Dict[str, str]: """Get types for each column in the database. :param conn: Connection to the database :type conn: sqlite3.Connection :return: A dictionary mapping names of columns to SQL names of their types :rtype: Dict[str, str] """ types = {} cur = conn.cursor() cur.execute("PRAGMA table_info(data)") data = cur.fetchall() for row in data: types[row[1]] = row[2] return types
ef53ab921586b09c9f48a73e175bbcc6d0e2cd8b
523,005
import torch def compute_ap(recall, precision): """ Compute the average precision, given the recall and precision curves. Code originally from https://github.com/rbgirshick/py-faster-rcnn. # Arguments recall: The recall curve (list). precision: The precision curve (list). # Returns The average precision as computed in py-faster-rcnn. """ # correct AP calculation # first append sentinel values at the end mrec = torch.cat((torch.zeros((1, ), device=recall.device, dtype=recall.dtype), recall, torch.ones((1, ), device=recall.device, dtype=recall.dtype))) mpre = torch.cat((torch.zeros((1, ), device=precision.device, dtype=precision.dtype), precision, torch.zeros((1, ), device=precision.device, dtype=precision.dtype))) # compute the precision envelope for i in range(len(mpre) - 1, 0, -1): mpre[i - 1] = torch.max(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = torch.nonzero(mrec[1:] != mrec[:-1]) # and sum (\Delta recall) * prec ap = torch.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap
3cc1b80c1d8a610d7b3b3fc8d3f6f6e8b167bfab
220,659
import re def extract_namenode(path): """ extract namenode from URI Args: path(str): HDFS path or AFS path Returns: str: namenode of path >>>extract_namenode("hdfs://host:port/tmp") "hdfs://host:port" >>>extract_namenode("hdfs://fs/tmp") "hdfs://fs" >>>extract_namenode("hdfs:///tmp") "hdfs://" """ match = re.match(r'((hdfs|afs)://[^/]*)/(.*)', path, re.M | re.I) return match.group(1) if match else None
8c0d52644047cea64f09fb31315c4177508d765d
333,625
import string def digits(token): """ Whether a given string token contains digits or not. :param token: input token :type token: str :return: description of the content :rtype: str """ if token.isdigit(): return 'all_digits' elif set(token) & set(string.digits): return 'some_digits' else: return 'no_digits'
2546aeeddce9d8c3d014c809e383c277409448f6
544,656
def multi_to_one_dim(in_shape, in_index): """ Convert an index from a multi-dimension into the corresponding index in flat array """ out_index = 0 for dim, index in zip(in_shape, in_index): out_index = dim * out_index + index return out_index
b1a38f72225b354a4c8a0ca93f016ac77752bc98
667,125
def find_end_paren(function_code: str, start: int): """ Find the end location given a starting parenthesis location :param function_code: :param start: :return: """ parentheses = [] for i, character in enumerate(function_code[start:]): if character == "(": parentheses.append(character) elif character == ")": parentheses.pop() if not parentheses: return i + start
7644fa8f9763f98178df0da2d75825929b88a169
625,122
import torch def add_e7(t): """ Function to add a very small value to each element, to avoid inf errors when taking the logarithm. """ return t + torch.ones_like(t) * 1e-7
6f419ba06a3a5c6ea904fbd4ed337f1dee8e9c1c
472,050
def horizontal_overlaps(rect, others, sorted=False): """Get rects that overlap horizontally with the given rect.""" overlaps = [] for other in others: # Note: can optimise to prevent # going through the rest of the # array when we hit a non match if rect.overlaps_y(other): overlaps.append(other) return overlaps
856499aeb2507c5425de63fe95a129058193438f
636,607
def channel_bytes_to_str(id_bytes): """ Args: id_bytes: bytes representation of channel id Returns: string representation of channel id """ assert type(id_bytes) in [str, bytes] if isinstance(id_bytes, str): return id_bytes return bytes.hex(id_bytes)
c8870ff03c418e1d8280b04407feb9bdf6ca4589
397,963
from typing import Mapping def delimited_file_to_dict(path : str, delimiter : str=',') -> Mapping[str,str]: """ Returns a dictionary populated by lines from a file where the first delimited element of each line is the key and the second delimited element is the value. :param str path: path to file :param str delimiter: delimiter separating key and value :return: dict containing key -> value :rtype: Mapping[str,str] :raise: UnsupportedOperation if there's an error in reading the file """ values : Mapping[str,str] = {} with open(path,'a+') as f: f.seek(0) for line in f: line = line.rstrip() key,value = line.split(delimiter) values[key] = value return values
7687dfc0e59462455f8b90877d5232a342a6c789
649,127
def CheckGeophysicalModelsValid(rootGroup, verbose=False): """ **CheckGeophysicalModelsValid** - Checks for valid geophysical model group data given a netCDF root node Parameters ---------- rootGroup: netCDF4.Group The root group node of a Loop Project File verbose: bool A flag to indicate a higher level of console logging (more if True) Returns ------- bool True if valid geophysical model data in project file, False otherwise. """ valid = True if "GeophysicalModels" in rootGroup.groups: if verbose: print(" Geophysical Models Group Present") gmGroup = rootGroup.groups.get("GeophysicalModels") # if verbose: print(gmGroup) else: if verbose: print("No Geophysical Models Group Present") return valid
110ff208cf36a3caa4b2fcadcf999b092e4a03a4
508,147
def read_in_akas(entitysymbols): """Read in alias to QID mappings and generates a QID to list of alternate names. Args: entitysymbols: entity symbols Returns: dictionary of QID to type names """ # take the first type; UNK type is 0 qid2aliases = {} for al in entitysymbols.get_all_aliases(): for qid in entitysymbols.get_qid_cands(al): if qid not in qid2aliases: qid2aliases[qid] = set() qid2aliases[qid].add(al) # Turn into sets for dumping for qid in qid2aliases: qid2aliases[qid] = list(qid2aliases[qid]) return qid2aliases
3cd98365e3c75ff38449623806e60b31c7d88265
234,865
def phony(params: dict) -> str: """ Build phony rules according to 42 rules """ phony = "all re clean fclean norm bonus" if params["library_libft"]: phony += " libft" if params["library_mlx"] and params["compile_mlx"]: phony += " minilibx" return phony
0db9f695caa3801467f7d6d3efecc070e0bda6ec
681,074
def alter_board(board, player, cell): """Alter board string for player input""" board = list(board) # enter player letter in supplied cell board[cell - 1] = player return ''.join(board)
895b7e592f3ba530e4c8d554a5c07f5823b1b4b1
684,099
def heading(title, fgcol, bgcol,extras='',add='&nbsp;<br>'): """Format a page heading.""" return ''' <table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading"> <tr bgcolor="%s"> <td valign=bottom>%s <font color="%s" face="helvetica, arial">&nbsp;<br>%s</font></td ><td align=right valign=bottom ><font color="%s" face="helvetica, arial">%s</font></td></tr></table> ''' % (bgcol, add,fgcol, title, fgcol, extras or '&nbsp;')
ff4b11710812aaab28e96e02f8f7777471a226a0
634,107
def _validate_tasks_var_format(value: str) -> str: """Validate task variables Arguments: value {str} -- A '.' seperated string to be checked for task variable formatting Returns: str -- A string with validation error messages """ add_info = '' parts = value.split('.') if len(parts) != 5: add_info = 'Valid tasks variables are ' \ '"tasks.<TASKNAME>.outputs.parameters.<NAME>" and ' \ '"tasks.<TASKNAME>.outputs.artifacts.<NAME>".' # check for other parts _, _, attr, prop, _ = parts if attr != 'outputs': add_info = 'Tasks variable can only access previous tasks "outputs".' elif prop not in ('parameters', 'artifacts'): add_info = 'Task outputs variables must be "parameters" or "artifacts".' return add_info
1763d59ebf28b89edde146ab9632ed2ec441af93
430,455
def make_dict_with_words(file_name): """Write a function that reads the words in words.txt and stores them as keys in a dictionary. It doesn’t matter what the values are.""" words2dict = dict() fin = open(file_name) for line in fin: key = line.strip() if key not in words2dict: words2dict[key] = key return words2dict
15b02ade7c5a7123849e63e1eee028fc1cbe2b97
570,886
def challenge_response_accepted(challenge, response): """Simple check if a valid response for the challenge was provided.""" try: response = int(response) except ValueError: return False expected = int((challenge / 2.0) + (challenge / 3.0) - (challenge / 4.0)) return expected == response
0b98db197bc6b8860150004ac88b55aef7aa333c
550,083
def find_duplicates(df, columns=None, take_last=False): """Locates duplicate values in a dataframe Args: df (`pandas.DataFrame`): The DataFrame to find duplicates in columns (`list`, optional): Spesific columns to find duplicates in take_last (bool, optional): Should the last duplicate not be marked as a duplicate? Defaults to `False` Returns: `pandas.DataFrame`: A frame containing duplicates """ mask = df.duplicated(cols=columns, take_last=take_last) return df.loc[mask]
52bfeae514e221ffae654c9c837010d63764867a
388,393
def flip(pattern): """Flip pattern horizontally.""" return [row[::-1] for row in pattern]
9298be5925899a15b9d90d98138dd1104dfba638
353,896
def format_byte_size(byte_size: int, units: str = "SI"): """ Format a number representing a total number of bytes into a conveneient unit. Parameters ---------- byte_size : int Total number of bytes to format. units : str, optional Convention for orders of magnitude to apply. May be either SI (orders of 1000) or binary (in memory, orders of 1024). The default is SI. """ num = byte_size prefixes = ["", "K", "M", "G", "T", "P", "E", "Z"] if units == "SI": order = 1000.0 suffix = "B" elif units == "binary": order = 1024.0 suffix = "iB" else: raise ValueError("'units' argument must be either 'SI' (for orders of 1000) or 'binary' (for orders of 1024).") for prefix in prefixes: if abs(num) < order: return f"{num:3.2f}{prefix}{suffix}" num /= order return f"{num:.2f}Y{suffix}"
071c1eb5571948e3e008cdab1fd865c9e38ccb2d
317,878
def trailing_zero(x : int) -> int: """Given a positive integer x, computes the number of trailing zero of x """ cnt : int = 0 while x and not x & 1: cnt += 1 x >>= 1 return cnt
081ff5c36fecdd63d1dd7512330c781a480c8392
247,570
def dwn_f_hat(f_hat, nj1): """ downsample f_hat vector to level j - 1 with length Nj1. [PyTorch-enabled Function] :param f_hat: a matrix of size (d,j1), d-dim feature, nj > nj1, torch tensor format :param nj1: length nj1, must be integer :return: a vector which components are those of f_hat from 1 to nj1, torch tensor format """ f_hat_dwn = f_hat[:, :nj1] return f_hat_dwn
d875c609be75965a503721b2e154311fb4646a59
355,104
def lossAttack(model, images, labels, Loss): """ Computes the loss value for a batch of samples. model: instance of a nn.Module subclass images: pytorch tensor with dimensions [batch,channels,width,height] labels: pytorch tensor of shape [batch] containing the integer labels of the samples loss: callable, loss function outputs -> pytorch tensor of dimensions [batch] containing the negative loss values """ loss = Loss(model(images).detach(), labels) return -loss
d6f99e3a9b9b44b966a9ced4991d4d045304400e
612,242
import requests def request(url, to_json=False): """ Sends a request to an url and makes sure it worked """ response = requests.get(url) if not response.ok: raise ValueError( f"Failed to get a good response when retrieving from {url}. Response: {response.status_code}" ) if not to_json: return response.content.decode("utf-8") else: return response.json()
4e672095f7257c1deb86c60697cf97c00daef8fd
59,401
def checkMovingWindow(alg, parameters, context, outputTxt=False): """ Verify if we have the right parameters """ configTxt = alg.parameterAsString(parameters, 'config_txt', context) config = alg.parameterAsString(parameters, 'config', context) if configTxt and config: return False, alg.tr("You need to set either inline configuration or a configuration file!") # Verify that configuration is in moving window movingWindow = False if configTxt: if 'MOVINGWINDOW' in configTxt: movingWindow = True # Read config file: if config: with open(config) as f: for line in f: if 'MOVINGWINDOW' in line: movingWindow = True if not movingWindow and not outputTxt: return False, alg.tr('Your configuration needs to be a "moving window" configuration!') if movingWindow and outputTxt: return False, alg.tr('Your configuration needs to be a non "moving window" configuration!') return True, None
20b3fa5130b43c8335ce6ed05c3153be0eaeb040
443,661
def _update_post_node(node, options, arguments): """ Extract metadata from options and populate a post node. """ node["date"] = arguments[0] if arguments else None node["tags"] = options.get("tags", []) node["author"] = options.get("author", []) node["category"] = options.get("category", []) node["location"] = options.get("location", []) node["language"] = options.get("language", []) node["redirect"] = options.get("redirect", []) node["title"] = options.get("title", None) node["image"] = options.get("image", None) node["excerpt"] = options.get("excerpt", None) node["exclude"] = "exclude" in options node["nocomments"] = "nocomments" in options return node
9110c1f46046783e603a96006cbefaa1cabd1d0e
305,709
import random def evolve(pop, mut_rate, mu, lambda_): """ Evolve the population *pop* using the mu + lambda evolutionary strategy :param pop: a list of individuals, whose size is mu + lambda. The first mu ones are previous parents. :param mut_rate: mutation rate :return: a new generation of individuals of the same size """ pop = sorted(pop, key=lambda ind: ind.fitness) # stable sorting parents = pop[-mu:] # generate lambda new children via mutation offspring = [] for _ in range(lambda_): parent = random.choice(parents) offspring.append(parent.mutate(mut_rate)) return parents + offspring
e2510d0ce92d0c5703b9166778f48581db4aca2f
15,007
import time def _get_time_diff_to_now(ts): """Calculate time difference from `ts` to now in human readable format""" secs = abs(int(time.time() - ts)) mins, secs = divmod(secs, 60) hours, mins = divmod(mins, 60) time_ago = "" if hours: time_ago += "%dh" % hours if mins: time_ago += "%dm" % mins if secs: time_ago += "%ds" % secs if time_ago: time_ago += " ago" else: time_ago = "just now" return time_ago
091f420971f23432c3ebaa1bef93b60ff2953c88
682,083
import click def handle_stderr(stderr_pipe): """ Takes stderr from the command's output and displays it AFTER the stdout is printed by run_command(). """ stderr_output = stderr_pipe.read() if len(stderr_output) > 0: click.secho("\n__ Error Output {0}".format('_'*62), fg='white', bold=True) click.echo(stderr_output) return True
666dcfcfbc5f8630df311d7783b9a273ac8cf611
300,305
import torch def reshape_data(data, flatten=None, out_shape=None): """ Helper function to reshape input data for processing and return data shape Inputs: data: [tensor] data of shape: n is num_examples, i is num_rows, j is num_cols, k is num_channels, l is num_examples = i*j*k if out_shape is not specified, it is assumed that i == j (l) - single data point of shape l, assumes 1 color channel (n, l) - n data points, each of shape l (flattened) (i, j, k) - single datapoint of of shape (i,j, k) (n, i, j, k) - n data points, each of shape (i,j,k) flatten: [bool or None] specify the shape of the output If out_shape is not None, this arg has no effect If None, do not reshape data, but add num_examples dimension if necessary If True, return ravelled data of shape (num_examples, num_elements) If False, return unravelled data of shape (num_examples, sqrt(l), sqrt(l), 1) where l is the number of elements (dimensionality) of the datapoints If data is flat and flatten==True, or !flat and flatten==False, then None condition will apply out_shape: [list or tuple] containing the desired output shape This will overwrite flatten, and return the input reshaped according to out_shape Outputs: tuple containing: data: [tensor] data with new shape (num_examples, num_rows, num_cols, num_channels) if flatten==False (num_examples, num_elements) if flatten==True orig_shape: [tuple of int32] original shape of the input data num_examples: [int32] number of data examples or None if out_shape is specified num_rows: [int32] number of data rows or None if out_shape is specified num_cols: [int32] number of data cols or None if out_shape is specified num_channels: [int32] number of data channels or None if out_shape is specified """ orig_shape = data.shape orig_ndim = data.dim() if out_shape is None: if orig_ndim == 1: # single datapoint num_examples = 1 num_channels = 1 num_elements = orig_shape[0] if flatten is None: num_rows = num_elements num_cols = 1 data = torch.reshape(data, [num_examples]+list(orig_shape)) # add num_examples=1 dimension elif flatten == True: num_rows = num_elements num_cols = 1 data = torch.reshape(data, (num_examples, num_rows*num_cols*num_channels)) else: # flatten == False sqrt_num_elements = torch.sqrt(num_elements) assert torch.floor(sqrt_num_elements) == torch.ceil(sqrt_num_elements), ( "Data length must have an even square root. Note that num_channels is assumed to be 1." +" data length = "+str(num_elements) +" and data_shape="+str(orig_shape)) num_rows = int(sqrt_num_elements) num_cols = num_rows data = torch.reshape(data, (num_examples, num_rows, num_cols, num_channels)) elif orig_ndim == 2: # already flattened (num_examples, num_elements) = data.shape if flatten is None or flatten == True: # don't reshape data num_rows = num_elements num_cols = 1 num_channels = 1 elif flatten == False: sqrt_num_elements = torch.sqrt(num_elements) assert torch.floor(sqrt_num_elements) == torch.ceil(sqrt_num_elements), ( "Data length must have an even square root when not specifying out_shape.") num_rows = int(sqrt_num_elements) num_cols = num_rows num_channels = 1 data = torch.reshape(data, (num_examples, num_rows, num_cols, num_channels)) else: assert False, ("flatten argument must be True, False, or None") elif orig_ndim == 3: # single data point num_examples = 1 num_rows, num_cols, num_channels = data.shape if flatten == True: data = torch.reshape(data, (num_examples, num_rows * num_cols * num_channels)) elif flatten is None or flatten == False: # already not flat data = data[None, ...] else: assert False, ("flatten argument must be True, False, or None") elif orig_ndim == 4: # not flat num_examples, num_rows, num_cols, num_channels = data.shape if flatten == True: data = torch.reshape(data, (num_examples, num_rows*num_cols*num_channels)) else: assert False, ("Data must have 1, 2, 3, or 4 dimensions.") else: num_examples = None; num_rows=None; num_cols=None; num_channels=None data = torch.reshape(data, out_shape) return (data, orig_shape, num_examples, num_rows, num_cols, num_channels)
72acd69b59d1f0c3c9d707dfe069a2c517e8f8ef
149,873
from typing import Any from typing import Iterable def is_iterable(obj: Any) -> bool: """Return if ``obj`` is iterable or not.""" return isinstance(obj, Iterable)
6822a4cbdcecbbbafff5eca709c9c6c906152c69
363,972
def _get_element(lists, indices): """Gets element from nested lists of arbitrary depth.""" result = lists for i in indices: result = result[i] return result
549922f3dcd7b3ace7978e1034607f5fd91a3d93
231,911
def align_down(alignment, x): """Rounds x down to nearest multiple of the alignment.""" a = alignment return (x // a) * a
7441aa9fc7be9720ad6f518f62ca7400bb800013
579,857
def add_reprompt(response): """ Adds a response message to tell the user to ask their question again. """ response['response']['reprompt'] = { "outputSpeech": { "type": "PlainText", "text": "Please ask your crypto price question" } } return response
64fa9f057e84d7332a7c3273a6d1940bcbe03d36
636,991
def hex_str_to_int(input_str): """ Converts a string with hex bytes to a numeric value Arguments: input_str - A string representing the bytes to convert. Example : 41414141 Return: the numeric value """ try: val_to_return = int(input_str, 16) except Exception as e: val_to_return = 0 print(e) return val_to_return
838c65d4488de54ef282e6a4d0088c57b68bb8d8
295,336
def hat(segment, position): """ This function returns 0 when ``position` is the start or end of ``segment`` and 1 when ``position`` is in the middle of the segment. /\ __/ \__ """ h = abs((segment.midpoint - position) / segment.length) return max(0, 1 - h)
b76aebf4ce6200e65de67d373222931bad5ce075
311,403
def pop_dunder_recursive(dictionary): """ Recursively removes all dunder keys from a nested dictionary. """ keys = [key for key in dictionary.keys()] for key in keys: if key.startswith("__") and key.endswith("__"): dictionary.pop(key) for key, value in dictionary.items(): # Check subdicts for dunder keys if isinstance(value, dict): pop_dunder_recursive(value) # If we have a non-dict iterable which contains a dict, # remove dunder keys from that too elif isinstance(value, (tuple, list)): for element in value: if isinstance(element, dict): pop_dunder_recursive(element) return dictionary
fe86c9a686e46adb212aa03fbf5a7d95b59602be
257,253
import configparser def get_config(config_file): """ Read configuration file, and returns a configparser object """ conf = configparser.ConfigParser() conf.read(config_file) return conf
7d12eb9e1ba5e2ccbfe47518b9c995c4fe09af97
231,472
def parse_int(row, css, invalid_value): """Parses an int from the given row using the css selector. Returns invalid_value if there is no value.""" result = row.css(css).get() if result: return int(result) return invalid_value
85029b47fc9e64c9ccda261587d176e9aae8da1b
271,352
import re def check_legitimate_ver(version): """ This function check if the version is legitimate, only digits and dot. :param version: str :return: boolean """ return re.match("^[0-9.]+$", version)
550e65b8deeb4c49562ace1cbf14b89c2d0818e5
664,723
def offsets_for_times(consumer, partitions, timestamp): """Augment KafkaConsumer.offsets_for_times to not return None Parameters ---------- consumer : kafka.KafkaConsumer This consumer must only be used for collecting metadata, and not consuming. API's will be used that invalidate consuming. partitions : list of kafka.TopicPartition timestamp : number Timestamp, in seconds since unix epoch, to return offsets for. Returns ------- dict from kafka.TopicPartition to integer offset """ # Kafka uses millisecond timestamps timestamp_ms = int(timestamp * 1000) response = consumer.offsets_for_times({p: timestamp_ms for p in partitions}) offsets = {} for tp, offset_and_timestamp in response.items(): if offset_and_timestamp is None: # No messages exist after timestamp. Fetch latest offset. consumer.assign([tp]) consumer.seek_to_end(tp) offsets[tp] = consumer.position(tp) else: offsets[tp] = offset_and_timestamp.offset return offsets
53464fea2fa0090d3a1d972cea5bcb32a5919503
255,629
import inspect import time import logging def WaitFor(condition, timeout): """Waits for up to |timeout| secs for the function |condition| to return True. Polling frequency is (elapsed_time / 10), with a min of .1s and max of 5s. Returns: Result of |condition| function (if present). """ min_poll_interval = 0.1 max_poll_interval = 5 output_interval = 300 def GetConditionString(): if condition.__name__ == '<lambda>': try: return inspect.getsource(condition).strip() except IOError: pass return condition.__name__ start_time = time.time() last_output_time = start_time while True: res = condition() if res: return res now = time.time() elapsed_time = now - start_time last_output_elapsed_time = now - last_output_time if elapsed_time > timeout: raise Exception('Timed out while waiting %ds for %s.' % (timeout, GetConditionString())) if last_output_elapsed_time > output_interval: logging.info('Continuing to wait %ds for %s. Elapsed: %ds.', timeout, GetConditionString(), elapsed_time) last_output_time = time.time() poll_interval = min(max(elapsed_time / 10., min_poll_interval), max_poll_interval) time.sleep(poll_interval)
14364b00cbba06eda778c3cc7adede6106298b4b
260,387
def get_vertices_from_edge_list(graph, edge_list): """Transforms a list of edges into a list of the nodes those edges connect. Returns a list of nodes, or an empty list if given an empty list. """ node_set = set() for edge_id in edge_list: edge = graph.get_edge(edge_id) a, b = edge['vertices'] node_set.add(a) node_set.add(b) return list(node_set)
f052615bd666343c9032be56ae8b9bde585eb798
126,679
import struct def _read_ulong(f): """Read a 32-bit unsigned long.""" return struct.unpack('>I', f.read(4))[0]
3871185bcd6b53bc2dcfdb302eb9a76f260e9dc4
582,601
def format_rst_file(title: str, action: str, parameters: list, action_parameter='', title_size='=') -> list: """ Creates the text for the generated rst doc file. :param title: The title text for the doc. :param action: The action to be called i.e. '.. toctree::'. :param parameters: The passed in values to the action i.e. list of file names of other rst files to index. :param action_parameter: The action's parameter i.e. ':maxdepth: 1'. :param title_size: The text size of the title. :return: The generated text for the rst doc file, split into a list of lines. """ if action_parameter != '': action_parameter = f' {action_parameter}\n' lines = [title, title_size * len(title), '', action, action_parameter] for parameter in parameters: lines.append(f' {parameter}') lines = [line + '\n' for line in lines] return lines
8ba2965764972eb200f48e5afafc5a638468ec8b
402,329
def addRoundKey(state, roundKey): """Adds (XORs) the round key to the state.""" for i in range(16): state[i] ^= roundKey[i] return state
b2ad011511cf131f8c1b72ff504b66e9958080e4
56,144
from typing import List def get_name_parts(name: str) -> List[str]: """ Get name parts from name of argument for constructing internal arg name or flag identity. Args: name (str): String of name for arugment (ex: "My Argument"). Returns: List[str] """ return ( name.lower() .replace("--", " ") .strip() .replace("-", " ") .replace("_", " ") .split(" ") )
de95ab1fbc99ea0dfc924405b5c21a7f0f56451d
390,199
from typing import Tuple def encrypt(m: int, PU: Tuple[int, int]) -> int: """Encryption Param: m: int -- value to encrypt PU: tuple of ints -- key Returns ciphertext """ e, n = PU cipher = (m**e) % n return cipher
d854cf64db8c36d2cd2637a25205971186c5cc06
370,004
def get_default_sender(site): """get default sender (name, address) tuple""" sender = (site.mail_from_name, site.mail_from_addr) return sender
4f1d6f5ab57e6f211c7df2f2d1f2881560ebc1e2
390,723
import torch def hard_dice(input_, target, threshold=0.5, reduction='mean', epsilon=1e-8): """ Hard dice score coefficient after thresholding. Arguments: preds (torch tensor): raw probability outputs targets (torch tensor): ground truth threshold (float): threshold value, default: 0.5 reduction (string): one of 'none', 'mean' or 'sum' epsilon (float): epsilon for numerical stability, default: 1e-8 Returns: dice (torch tensor): hard dice score coefficient """ if not input_.shape == target.shape: raise ValueError # if not (input_.max() <= 1.0 and input_.min() >= 0.0): # raise ValueError if not ((target.max() == 1.0 and target.min() == 0.0 and(target.unique().numel() == 2)) or (target.max() == 0.0 and target.min() == 0.0 and(target.unique().numel() == 1))): raise ValueError input_threshed = input_.clone() input_threshed[input_ < threshold] = 0.0 input_threshed[input_ >= threshold] = 1.0 intesection = torch.sum(input_threshed * target, dim=-1) input_norm = torch.sum(input_threshed, dim=-1) target_norm = torch.sum(target, dim=-1) dice = torch.div(2.0 * intesection + epsilon, input_norm + target_norm + epsilon) if reduction == 'none': pass elif reduction == 'mean': dice = torch.mean(dice) elif reduction == 'sum': dice = torch.sum(dice) else: raise NotImplementedError return dice
4a617852a5f96a895d56e1f8149e5f64c05bd0c3
124,026
def add_to_dict(param_dict): """ Aggregates extra variables to dictionary Parameters ---------- param_dict: python dictionary dictionary with input parameters and values Returns ---------- param_dict: python dictionary dictionary with old and new values added """ # This is where you define `extra` parameters for adding to `param_dict`. return param_dict
ff77474305182be35c84a4c9ddd2d6ab3ddf1ecb
142,965
import torch def indexes_to_one_hot(indexes, n_dims=None): """Converts a vector of indexes to a batch of one-hot vectors. """ #SRC: https://discuss.pytorch.org/t/fastest-way-of-converting-a-real-number-to-a-one-hot-vector-representing-a-bin/21578/2 indexes = indexes.type(torch.int64).view(-1, 1) n_dims = n_dims if n_dims is not None else int(torch.max(indexes)) + 1 one_hots = torch.zeros(indexes.size(0), n_dims).scatter_(1, indexes, 1) one_hots = one_hots.view(*indexes.shape[:-1], -1) return one_hots
6f848e19fdc63c1ed7e7f7e9672a4b8ef1d5c9ba
578,387
def enhex(x): """enhex(x) -> str Hex-encodes a string. Example: >>> enhex("test") '74657374' """ return x.encode('hex')
12e05592ac58709b40521ed8fc55a120e5f2d1db
495,494
def is_top_down(lines): """ Return `True` if dates in the given lines go in an ascending order, or `False` if they go in a descending order. If no order can be determined, return `None`. The given `lines` must be a list of lines, ie. :class:`~taxi.timesheet.lines.TextLine`, :class:`taxi.timesheet.lines.Entry` or :class:`~taxi.timesheet.lines.DateLine`. """ date_lines = [ line for line in lines if hasattr(line, 'is_date_line') and line.is_date_line ] if len(date_lines) < 2 or date_lines[0].date == date_lines[1].date: return None else: return date_lines[1].date > date_lines[0].date
7251ad73478cd85e0faaa229e84060bbc9c649e8
631,966
def atoms_to_xyz_file(atoms, filename, title_line=''): """ Print a standard .xyz file from a set of atoms :param atoms: (list(Atom)) :param filename: (str) :param title_line: (str) """ with open(filename, 'w') as xyz_file: print(len(atoms), title_line, sep='\n', file=xyz_file) for atom in atoms: x, y, z = atom.coord print(f'{atom.label:<3} {x:^10.5f} {y:^10.5f} {z:^10.5f}', file=xyz_file) return None
177b3184f1879c7216323443d88437656712a933
240,747
def get_orientation(strategy, **kwargs): """ Determine a PV system's surface tilt and surface azimuth using a named strategy. Parameters ---------- strategy: str The orientation strategy. Allowed strategies include 'flat', 'south_at_latitude_tilt'. **kwargs: Strategy-dependent keyword arguments. See code for details. Returns ------- surface_tilt, surface_azimuth """ if strategy == 'south_at_latitude_tilt': surface_azimuth = 180 surface_tilt = kwargs['latitude'] elif strategy == 'flat': surface_azimuth = 180 surface_tilt = 0 else: raise ValueError('invalid orientation strategy. strategy must ' + 'be one of south_at_latitude, flat,') return surface_tilt, surface_azimuth
f1d27d67175bd2caa3bb95e8d31668ef07a4c8e2
678,678
def gridIndexToSingleGridIndex(ix, iy, iz, nx, ny, nz): """ Convert a grid index (3 indices) into a single grid index: :param ix, iy, iz: (int) grid index in x-, y-, z-axis direction :param nx, ny, nz: (int) number of grid cells in each direction :return: i: (int) single grid index Note: ix, iy, iz can be ndarray of same shape, then i in output is ndarray of that shape """ return ix + nx * (iy + ny * iz)
72a4cca6a62878a0622b3a42d5b4314758c75092
231,178
def arrays_shape(*arrays): """Returns the shape of the first array that is not None. Parameters ---------- arrays : ndarray Arrays. Returns ------- tuple of int Shape. """ for array in arrays: if array is not None: shape = array.shape return shape
e9e6a4876b938934c843386dffc58f0eccfb20a3
7,932
import torch def cos_objective_func(source, targets): """ Cosine loss function. Args: source: Torch tensors. targets: Torch tensors. Returns: Mean cosine value of each source wrt target. """ # m here for matrix. object_m = torch.mm(source, torch.t(targets)) source_norm = torch.norm(source, 2, 1, keepdim=True) targets_norm = torch.norm(targets, 2, 1, keepdim=True) norm_m = torch.mm(source_norm, torch.t(targets_norm)) return (object_m/norm_m).mean()
70c5a0eef850802880d0d372fcec2d1a6f4b581b
247,766
def get_updates(_queue): """ Get all available updates from a queue.Queue() instance and return them as a list """ _list = [] while not _queue.empty(): _list.append(_queue.get()) return _list
25bc5c5e3a112cf7e87f1c6421474d99b3b7f891
569,485
from typing import Awaitable import asyncio async def wait_one(*tasks: Awaitable): """ Either return result of the first completed task or raise its exception. wait_one will return as soon as any of tasks completes: >>> async def run(): >>> task_1 = ... >>> task_2 = ... >>> await wait_one(task_1, task_2) >>> assert task_1.done() or task_2.done() """ try: return await next(asyncio.as_completed(tasks)) except: raise
157a9fa3af2e5eca091e01846d263092aabf68eb
525,997
def get_h_cost_dijkstra(node_a, node_b): """ Return the heuristic_cost that it takes to get from node_a to node_b. Heuristic: Diagonal movement cost = 14 , vertical/horizontal movement cost = 10 """ global HEURISTIC_COST_DIAGONAL global HEURISTIC_COST_VERTICAL_HORIZONTAL dis_x = abs(node_a.position[0] - node_b.position[0]) dis_y = abs(node_a.position[1] - node_b.position[1]) D = 0 return D * (dis_x + dis_y)
ad5979bc1454bd35ffe0e415c2d4e0106f9f9cca
549,911
def class_fullname(obj): """Returns the full class name of an object""" return obj.__module__ + "." + obj.__class__.__name__
a7b5915e15122664943a181a48d3f52dff232c88
700,132
def read_file(filename): """Shortcut for reading a file.""" with open(filename, 'rb') as fd: return fd.read()
276fee9344a7aeb3e5595413e2a9c336085180a4
236,923
def ci_report(ci): """return text of a report for confidence intervals""" maxlen = max([len(i) for i in ci]) buff = [] add = buff.append convp = lambda x: ("%.2f" % (x[0]*100))+'%' conv = lambda x: "%.5f" % x[1] title_shown = False for name, row in list(ci.items()): if not title_shown: add("".join([''.rjust(maxlen)]+[i.rjust(10) for i in map(convp, row)])) title_shown = True add("".join([name.rjust(maxlen)]+[i.rjust(10) for i in map(conv, row)])) return '\n'.join(buff)
490be9912fc370334bc8f1468171026f3e9a4aac
536,851
def mk_closest(D, n): """Compute a sorted list of the distances for each of the nodes. For each node, the entry is in the form [(d1,i1), (d2,i2), ...] where each tuple is a pair (distance,node). """ C = [] for i in range(n): dlist = [(D[i,j], j) for j in range(n) if j != i] dlist.sort() C.append(dlist) return C
fb327dc5eb93d4db8b19eb06c369d5bea7c3b275
573,584
import math def soundpressure_to_soundlevel(Pa, p0=0.00002): """ convert soundpressure in Pascal to sound level in dB (dBSPL) Lp(dBSPL) = 20 * log10(p/p0) p0: threshold of hearing, 0.00002 Pa (20uPa) """ return 20 * math.log10(Pa / p0)
1f7ab63f356db9f45002f3fc090c9cbf9ea5ba04
135,000
def get_project_data(repo_dicts): """Return data needed for each project in visualization.""" repo_links, stars, labels = [], [], [] for repo_dict in repo_dicts: repo_name = repo_dict['name'] repo_url = repo_dict['html_url'] repo_link = f"<a href='{repo_url}'>{repo_name}</a>" repo_links.append(repo_link) stars.append(repo_dict['stargazers_count']) owner = repo_dict['owner']['login'] description = repo_dict['description'] label = f"{owner}<br />{description}" labels.append(label) return repo_links, stars, labels
304709af51ed1058922d937259b14da71b7367cd
473,452
def module_of (object) : """Returns the name of the module defining `object`, if possible. `module_of` works for classes, functions, and class proxies. """ try : object = object.__dict__ ["Essence"] except (AttributeError, KeyError, TypeError) : pass result = getattr (object, "__module__", None) if not result : globals = getattr (object, "func_globals", None) if globals : result = globals.get ("__name__") return result
0adc87b309d466ba1f5f1f78ad50379397528fb2
686,869
def jsonname(val): """Convert the string in val to a valid json field name. """ return val.replace('.', '_')
f6fac35580cc6384ce4a16060fd917fcef4010a3
419,080
import pickle def read_model(file_name): """Loads the specified pickle file. Parameters ---------- file_name : str The location the pickle file. Returns ------- dict The dictionary inside the pickle. """ with open(file_name, "rb") as model_file: return pickle.load(model_file)
96e42e94886107f5581f042f7524effd8b3a97dd
615,421
def parseBNDF(lines, i): """This function parses the smokeview file to find bndf files Parameters ---------- lines : list List of strings corresponding to lines from a smokeview file i : int Index of bndf line Returns ------- float Mesh number for bndf file string Boundary file name string String variable number float Variable number """ (_,mesh,vNum) = lines[i-1].split() bndfName = lines[i].split(' ')[1].replace('\n','') vID = ' '.join(lines[i+1].split(' ')[1:]).replace('\n','') (mesh, vNum) = (float(mesh), float(vNum)) return mesh, bndfName, vID, vNum
aedc8032b4d8f180163b162acb18cb7f08711568
323,090
def getOverlapSetSim(concepts_1: set, concepts_2: set): """ Returns Overlap Set Similarity for the given concept sets """ intersection = len(concepts_1.intersection(concepts_2)) return intersection/min(len(concepts_1),len(concepts_2))
80cb52ad6335f6ec9988fdd97315d9de717d0414
414,898