content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def __checkPassed(a,b,c,verbose): #used in __convert_dims """ Notes: internal logic to determine if correct information was passed in __convert_dims when finding cellsize -> arrdim or arrdim -> cellsize conversion Inputs: a: cells b: arrdims c: lnglat_deltas verbose: whether to print status message Outputs: bool: return a boolean specifiying if check was passed or not. """ if (c is None) and (type(c) not in(tuple,list)) or (len(c) !=2): print(f'latlong: {c} must be a tuple or list of len 2') if verbose else None return False if (a is None) or (b is not None): print('error: parameter mismatch, can only pass: cells or arrdims not both') if verbose else None return False if (type(a) not in (tuple,list)) or (len(a) != 2): print(f'error: parameter {a} is not a tuple or list of length 2') if verbose else None return False return True
95bf3196146a193a740a47293c7030ef7e4bf3d8
442,322
def mel_sampling( audio, frame_duration_ms = 1200, overlap_ms = 200, sample_rate = 16000 ): """ Generates audio frames from audio. This is for melspectrogram generative model. Takes the desired frame duration in milliseconds, the audio, and the sample rate. Parameters ---------- audio: np.array frame_duration_ms: int, optional (default=1200) overlap_ms: int, optional (default=200) sample_rate: int, optional (default=16000) Returns ------- result: List[np.array] """ n = int(sample_rate * (frame_duration_ms / 1000.0)) n_overlap = int(sample_rate * (overlap_ms / 1000.0)) offset = 0 results = [] while offset + n <= len(audio): results.append(audio[offset : offset + n]) offset += n - n_overlap if offset < len(audio): results.append(audio[offset:]) return results
72cb22ba976d6f91a9b69953d8bedcd33c4ab294
671,329
def altCase(text: str): """ Returns an Alternate Casing of the `text`. """ return "".join( [ words.upper() if index % 2 else words.lower() for index, words in enumerate(text) ] )
b8d2d0c4d98192c321da9111419fbb4262cf2f52
268,463
def flush_queue(queue): """Get all data from the queue. Notes: this is just a multiprocessing support function. Args: queue (Queue): the multiprocessing queue Returns: list: the result data pushed in the queue """ data = [] while not queue.empty(): data.append(queue.get()) return data
0e51e8d5f0d93e8422add9c5f74fbf0f4e240618
555,740
from pathlib import Path def check_and_get_subdirectory(base_dir: Path, subdir_name: str) -> Path: """ Checks existance of base_dir, NON-existance of base_dir/subdir, and returns the Path(base_dir / subdir). Raises: FileNotFoundError if the base_dir does not exist OSError if the base_dir and subdir both exist. Returns: path to an unexisting subdirectory subdir in the base directory base_dir. """ if not base_dir.is_dir(): raise FileNotFoundError("Directory {} not found" .format(base_dir)) subdir = base_dir / subdir_name if subdir.is_dir(): raise OSError( f"Directory {subdir} exists and shouldn't be overwritten." ) return subdir
1df2d2f03b4933a25f1bba7e1cb62f6ce998ccb3
637,809
from typing import List def split_by_hash_sign(path: str) -> List[str]: """ Checks if the path contains maximum one hash. :param path: path to check :return: path split into array on the hash """ if "#" in path: split_path = path.split("#") if len(split_path) > 2: raise Exception("There should be maximum one '#' in the path {}".format(path)) return split_path return [path]
52456b40a3f880be1d9f2441a4a989d8b5a00868
405,114
def testme(si): """Revert a string, filtering out the vowel characters""" so = ''.join([c for c in reversed(si) if c.lower() not in 'aeiouy']) return so, len(so)
ea90c3e95139b51f5854a32c3602607e26fd3a6a
677,075
def sentencify(words): """ The function should: 1. Capitalise the first letter of the first word. 2. Add a period (.) to the end of the sentence. 3. Join the words into a complete string, with spaces. 4. Do no other manipulation on the words. :param words: :return: """ return words[0][0].upper() + ' '.join(words)[1:] + '.'
f9e80dfedd9d2257c92a5ab17115e5cf956adaac
248,386
def _paginate(paginated_fn, max_results_per_page, max_results): """ Intended to be a general use pagination utility. :param paginated_fn: :type paginated_fn: This function is expected to take in the number of results to retrieve per page and a pagination token, and return a PagedList object :param max_results_per_page: :type max_results_per_page: The maximum number of results to retrieve per page :param max_results: :type max_results: The maximum number of results to retrieve overall :return: Returns a list of entities, as determined by the paginated_fn parameter, with no more entities than specified by max_results :rtype: list[object] """ all_results = [] next_page_token = None while len(all_results) < max_results: num_to_get = max_results - len(all_results) if num_to_get < max_results_per_page: page_results = paginated_fn(num_to_get, next_page_token) else: page_results = paginated_fn(max_results_per_page, next_page_token) all_results.extend(page_results) if hasattr(page_results, "token") and page_results.token: next_page_token = page_results.token else: break return all_results
de93c2f6c16350b9948cf68c8d53fcf9518fa4f8
140,348
def _possibly_sorted(x): """Bypass the sorted() call in reprlib._possibly_sorted This is mostly for ensuring reproducibility in tests. """ return list(x)
b7dd511299567631900260a1130ad8e9034cdf4d
114,670
def train_test_split(test_ratio, *args): """ Ordinary train/test split. :param test_ratio: float, split value e.g. 0.8 means 80% train and 20% test data :param args: tuple of arrays to be split :return: list of split arrays """ split = int(len(args[0]) * test_ratio) return [(elem[:split], elem[split:]) for elem in args]
a8941968eb801ef18b312b4d93acba8dff0e9cde
504,558
def __is_reflection_coord(coord): """Reflection coordinates have a 2 followed by 0 or more trailing 1s.""" i = len(coord)-1 while coord[i] == 1: i -= 1 return coord[i] == 2
f9b26fcf469831b42d95f3979240bdf77e851c40
587,394
def text_concat(format_str, source_str, concat_with, order_by=None): """ Generates the SQL for text concatenation. Parameters ---------- format_str : str Format of the concatenated output. source_str : str fields and FROM part of statement to provide values, along with WHERE to join to aggregated. concat_with : str String to concatenate with order_by : str Column to order by """ if order_by is not None: order_by = f" ORDER BY x.{order_by} ASC" else: order_by = "" sql = f""" STUFF((SELECT '{concat_with}'+CAST({format_str} AS VARCHAR(MAX)) FROM (SELECT DISTINCT {source_str}) x {order_by} FOR XML PATH(''), TYPE).value('.','VARCHAR(MAX)'), 1, {len(concat_with)}, '') """ return sql
6f5e0b3047db84527ad03a897c9f48d76b73ac94
332,824
def segment_times(timeseries, max_gap): """ Returns an N-D array where each row represents a separate segmentation of continuous data with no gaps greater than the max gap. """ time_segments = [] is_contiguous = False arr_n = -1 for i, t in enumerate(timeseries): if not is_contiguous: time_segments.append([t]) arr_n += 1 else: time_segments[arr_n].append(t) if i + 1 < len(timeseries): is_contiguous = (timeseries[i + 1] - t) < max_gap return time_segments
1751be2f415297e07eb8952709388f5033ba7da4
585,723
def _get_name_from_email(email): """ Given an email return a capitalized name. Ex. [email protected] would return John Smith. """ name = email.rsplit('@', 1)[0] name = ' '.join([n_part.capitalize() for n_part in name.split('.')]) return name
0644b6dbb69b89624b5f60b7f13f35efe5ce2ff6
314,362
import logging def INFO(target): """A decorator to set the .loglevel attribute to logging.INFO. Can apply to either a TestCase or an individual test method.""" target.loglevel = logging.INFO return target
8bf2061b248a457131486687a32440ce68d173b5
661,055
def calc_z_shifts(zphase, zsubstrate, margin): """Calculate shifts from z = 0 for each phase.""" shifts = {} shifts['sub_bottom'] = margin sub_bottom_upper_margin = shifts['sub_bottom'] + zsubstrate shifts['phase'] = sub_bottom_upper_margin + margin phase_upper_margin = shifts['phase'] + zphase shifts['sub_top'] = phase_upper_margin + margin sub_top_upper_margin = shifts['sub_top'] + zsubstrate shifts['box_z'] = sub_top_upper_margin + margin return shifts
6798fc6b4fff552103fe9d7abdfd7e1ef12181d1
584,574
def model_with_weights(model, weights, skip_mismatch): """ Load weights for model. Args model : The model to load weights for. weights : The weights to load. skip_mismatch : If True, skips layers whose shape of weights doesn't match with the model. """ if weights is not None: # model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch) model.load_weights(weights, by_name=True) return model
8d37f3b884d9041a65d4d168c255d1dde203e562
417,396
def divide_cases(ncases, nprocs): """ Divide up load cases among available procs. Parameters ---------- ncases : int Number of load cases. nprocs : int Number of processors. Returns ------- list of list of int Integer case numbers for each proc. """ data = [] for j in range(nprocs): data.append([]) wrap = 0 for j in range(ncases): idx = j - wrap if idx >= nprocs: idx = 0 wrap = j data[idx].append(j) return data
b13db144b5e6ca4a0dc31750c2507c1a250408e0
443,580
def find_first(value, vector): """Find the index of the first occurence of value in vector.""" for i, v in enumerate(vector): if value == v: return i return -1
d366d991083710c27ec667d69d532d4281246b69
141,666
import requests def extra_metadata_helper(resource_id, headers): """ Build extra metadata dict to help with other integrations. Parameters ---------- resource_id: str The OSF resource ID headers: dict OSF Authorization header Returns ------- Extra metadata dictionary """ # Get project information base_url = "https://api.osf.io/v2/nodes/{}/".format(resource_id) project_info = requests.get(base_url, headers=headers).json() # Build creators list citation_data = requests.get("{}citation/".format(base_url), headers=headers).json() creators = [{ "first_name": author['given'], "last_name": author['family'], "ORCID": None} for author in citation_data['data']['attributes']['author']] # Get license if it exists license = None if 'license' in project_info['data']['relationships'].keys(): license_data = requests.get(project_info['data']['relationships']['license']['links']['related']['href'], headers=headers).json() if license_data['data']['attributes']: license = license_data['data']['attributes']['name'] # See if there's an identifier for this project identifier_data = requests.get("{}identifiers/".format(base_url), headers=headers).json() identifiers = [{ "type": identifier['attributes']['category'], "identifier": identifier['attributes']['value']} for identifier in identifier_data['data']] extra_metadata = { "title": project_info['data']['attributes']['title'], "creators": creators, "publication_date": project_info['data']['attributes']['date_created'], "description": project_info['data']['attributes']['description'], "keywords": project_info['data']['attributes']['tags'], "license": license, "related_identifiers": identifiers, "references": None, "notes": None } return extra_metadata
19038f10fdd11602de7a892fba0a2801ce01fa46
93,066
def startswith(string, substring): """ Return True if string starts with substring, otherwise false. """ return string.startswith(substring)
4b1edeb846f0736b7890e3ddd9daba3ca95d9143
288,553
def cli_cosmosdb_gremlin_database_throughput_migrate(client, resource_group_name, account_name, database_name, throughput_type): """Migrate an Azure Cosmos DB Gremlin database throughput""" if throughput_type == "autoscale": return client.migrate_gremlin_database_to_autoscale(resource_group_name, account_name, database_name) return client.migrate_gremlin_database_to_manual_throughput(resource_group_name, account_name, database_name)
2b768c0e443c4ce4324e2a3bfc2f685693a08d99
681,922
def relFreqProfile(freqprof): """Generates a relative frequency profile from an absolute frequency profile.""" tokenCount = sum(freqprof.values()) if tokenCount > 0: return dict(((i[0], i[1]/tokenCount) for i in freqprof.items()))
76a2d81212894674e5456750f5d588b1b7bb60a6
197,178
def has_bio_error(prev_label, curr_label): """ Check whether a BIO error occurred by comparing the current label with the previous label. Examples for incorrect BIO tags: 1. I- follows I- of another type 1 Hello B-ABC 2 World I-ABC 3 ! I-XYZ 2. I- after O 1 Hello O 2 World I-ABC 3 ! I-ABC Args: prev_label (Label): Previous label curr_label (Label): Current label Returns: bool: Whether or not a BIO error occurred """ i_follows_o = prev_label.is_o and curr_label.is_i i_follows_other_component = all([ curr_label.is_i, any([ # The types differ prev_label.type != curr_label.type, # OR: the types are equal, but the relation type differs all([ prev_label.type == curr_label.type, prev_label.rel_type is not None, curr_label.rel_type is not None, prev_label.rel_type != curr_label.rel_type ]) ]) ]) return i_follows_o or i_follows_other_component
cc831f71c5d20448b6d822127e01e124c5cc2020
149,904
def Crosses(ps, thresh): """Tests whether a sequence of p-values ever drops below thresh.""" if thresh is None: return False for p in ps: if p <= thresh: return True return False
f269f1093df0537fc79bab85631050a94b1316fb
314,877
def linspace(start,stop,num): """Return a list of floats linearly spaced from start to stop (inclusive). List has num elements.""" return [i*(stop-start)/(num-1)+start for i in range(num)]
4a8c90845531cf999bbcee5867d1558dad204516
321,104
def pow(x: int, n: int) -> int: """pow(x, n) -> x ^ n""" return x ** n
51248c191b424ee1d0e0a1a68ef365e6fe097847
475,423
def generate_distracting_answer_by_top(question_type, top_answers_split): """ Use top 3 answer of specific type to replace Args: question_type: string top_answers_split: dict, keys are question_type, values are list of (top_answers, frequency) """ return [_[0] for _ in top_answers_split[question_type][:3]]
eca140e540a6e6824a766307e2479c6e7e094fc5
351,112
def next_cell(board, current): """ Get next cell in board after current :param board: matrix :param current: tuple (matrix index) :return: tuple(matrix index) or None if not exists """ for row in range(current[0], len(board)): for col in range(current[1], len(board)): if board[row][col] == 0: return (row, col) for row in range(len(board)): for col in range(len(board)): if board[row][col] == 0: return (row, col) return None
515286c4a65782dde38af3c04e6db905c892517d
135,969
def _list_str(string, separator=','): """Convert comma separated string to list.""" return string.strip('{:s} \n\t'.format(separator))
dd272909c58ededde01ad6ebb01101d58788c42f
583,407
def parse_input(line): """Parse length, width, and height from line""" # Split the line by character 'x' # Parse them into ints return tuple(int(value) for value in line.split('x'))
6c62ff676cf958f6bde8d29f5ebe4b4487ff171b
266,832
def labeled_point_to_row_col_period(labeled_point): """Helper function to reconstruct period, row, and column of labeled point Used in .map call for predictions Args: labeled_point (LabeledPoint): cell with label and features Returns: tuple (int, int, str): row, col, period """ features = labeled_point.features row, col = features[0], features[1] month, day, hour = features[2], features[3], features[4] period = '2013{:02d}{:02d}{:02d}'.format(int(month), int(day), int(hour)) return row, col, period
e18bd88a8d4a341bec000e0fa250fd7ade69b32b
81,872
def remove_class_init_kwargs(cls, kwargs: dict) -> dict: """Remove kwargs used to initialize the given class.""" params = list(cls.__init__.__code__.co_varnames) params.remove("self") for param in params: kwargs.pop(param, None) return kwargs
fdc0e1826971ff02448af2b111dd8a7419f00ce2
528,776
def _nltk_to_iso369_3(name): """ Map a country name to an ISO 369-3 code. See https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for an overview. """ name = name.lower() return {'danish': 'dan', 'dutch': 'nld', 'english': 'eng', 'french': 'fra', 'finnish': 'fin', 'german': 'deu', 'hungarian': 'hun', 'italian': 'ita', 'norwegian': 'nor', 'portuguese': 'por', 'russian': 'rus', 'spanish': 'spa', 'swedish': 'swe', 'turkish': 'tur'}.get(name, None)
db29333075ed4dffff9a900c50b638ef25f9dcc7
514,236
def _parse_snapshots(data, filesystem): """ Parse the output of a ``zfs list`` command (like the one defined by ``_list_snapshots_command`` into a ``list`` of ``bytes`` (the snapshot names only). :param bytes data: The output to parse. :param Filesystem filesystem: The filesystem from which to extract snapshots. If the output includes snapshots for other filesystems (eg siblings or children) they are excluded from the result. :return list: A ``list`` of ``bytes`` corresponding to the names of the snapshots in the output. The order of the list is the same as the order of the snapshots in the data being parsed. """ result = [] for line in data.splitlines(): dataset, snapshot = line.split(b'@', 1) if dataset == filesystem.name: result.append(snapshot) return result
9e7ee50892ca446f57e2d52c29f5813bded72186
527,405
def map_fields(raw: str, field_map): """Map raw field values to column name and perform type cast on value""" fields = raw.split(',') return {key: field_map[key](fields[i]) for i, key in enumerate(field_map.keys())}
cbeb5b5abf2e8cf4b2ed8945b5282a69f552ecde
89,992
import requests def get_census_variable_descriptions(dataset, year, variables): """ Download descriptions of census variables from the API """ url_template = "https://api.census.gov/data/{year}/{dataset}/profile/variables/{variable}.json" variable_descriptions = {} for variable in variables: url = url_template.format(year=year, dataset=dataset, variable=variable) response = requests.get(url) data = response.json() variable_descriptions[variable] = { "concept": data["concept"], "label": data["label"], } return variable_descriptions
e87bd8890a46907f03182d0633e1508c562d8f82
609,004
import six def get_process_signature(process, input_parameters): """ Generate the process signature. Parameters ---------- process: Process a capsul process object input_parameters: dict the process input_parameters. Returns ------- signature: string the process signature. """ kwargs = ["{0}={1}".format(name, value) for name, value in six.iteritems(input_parameters)] return "{0}({1})".format(process.id, ", ".join(kwargs))
0004870baaf3a00c35eae815dfcbf4c6965cec51
67,174
def get_message(service, message, userid="me"): """Get a message.""" msg = service.users().messages().get( userId=userid, id=message['id'] ).execute() return msg
3f2bed26aeecbde6ddc917c7201a5b1f952839c4
493,849
def get_obj_attr(obj, attr): """Works like getattr() but supports django's double underscore object dereference notation. Example usage: .. code-block:: python >>> get_obj_attr(book, 'writer__age') 42 >>> get_obj_attr(book, 'publisher__address') <Address object at 105a79ac8> :param obj: Object to start the derference from :param attr: String name of attribute to return :returns: Derferenced object :raises: AttributeError in the attribute in question does not exist """ # handle '__' referencing like in QuerySets fields = attr.split('__') field_obj = getattr(obj, fields[0]) for field in fields[1:]: # keep going down the reference tree field_obj = getattr(field_obj, field) return field_obj
dc059569acce1d8e33d3e001ef7c908ce43ae4cb
500,553
import torch def get_uniq_topk(rank, history=None): """Based on rank and history, select the top ranked that is not in history Args: rank: (N, seq_len) torch.LongTensor, from torch.sort(x)[1] history: either None, or a torch.LongTensor of size (N, history_len); history_len <= seq_len Returns: res: torch.LongTensor of size (N,1) history: torch.LongTensor of size (N, history_len); if initially history=None, then history_len = 1; otherwise the new history will have res appended as the last column """ res = [] if history is None: res = rank[:, :1] history = res else: for r, h in zip(rank, history): for i in r: if i in h: continue else: res.append(i) break res = torch.stack(res, dim=0).unsqueeze(-1) history = torch.cat([history, res], dim=-1) # in fact, dim=1 return res, history
028d013ffeb870b80955da0449fcec85d1525b00
536,374
def _strftime(d): """ Format a date the way Atom likes it (RFC3339?) """ return d.strftime('%Y-%m-%dT%H:%M:%SZ%z')
1eebf1bff9c68ba4649f1377f16b4b9feb737f01
10,608
from typing import List from typing import Tuple from typing import Any def find_groupped_index(groupping_list : List[Tuple[Any, List[str]]], key_to_check : Any) -> int: """Finds index of key in given list of tuples where first element of tuple is key.""" for i, (key, _) in enumerate(groupping_list): if key == key_to_check: return i return -1
e092228def86029d77f263dd07c5b811bd771687
473,520
def filter_matches(matches, threshold=0.75): """Returns filterd copy of matches grater than given threshold Arguments: matches {list(tuple(cv2.DMatch))} -- List of tupe of cv2.DMatch objects Keyword Arguments: threshold {float} -- Filter Threshold (default: {0.75}) Returns: list(cv2.DMatch) -- List of cv2.DMatch objects that satisfy ratio test """ filtered = [] for m, n in matches: if m.distance < threshold * n.distance: filtered.append(m) return filtered
c2cbec1da42d96575eb422bfdda6a1351e24508b
6,553
def is_blank(text): """ Returns ``True`` if string contains only whitespace characters or is empty. Otherwise ``False`` is returned. """ return not text or text.isspace()
3c5f4cf864b4115cfe2dd7cf67165fe5a130ad02
328,628
from typing import List from typing import Dict from typing import Any def consolidate_fastapi_response(all_exceptions: List[Dict[str, Any]]) -> str: """ Consolidate the default fastAPI response so it can be returned as a string. Default schema of fastAPI exception response is: { 'loc': ('body', '<outer_scope1>', '<outer_scope2>', '<inner_param>'), 'msg': '<the_error_message>', 'type': '<expected_type>' } In order to create a meaningful V1-style response, we consolidate the above response into a string of shape: '<outer_scope1>.<outer_scope2>.<inner_param>: <the_error_message>' """ # Pick just the error message while discarding v2 response items def error_to_str(error: dict) -> str: err_node = ".".join(str(loc) for loc in error['loc'] if loc != 'body') res = ": ".join([err_node, error["msg"]]) return res all_errs = ". ".join(error_to_str(exc) for exc in all_exceptions) return all_errs
895883ea80834fe8ffc98ede84f601c5a56c247b
540,938
from typing import List from typing import Tuple import random import math def split_train_dev( game_files: List[str], train_ratio: float = 0.9, rnd_seed: int = 42 ) -> Tuple[List[str], List[str]]: """ Split train/dev sets from given game files sort - shuffle w/ Random(42) - split Args: game_files: game files train_ratio: the percentage of training files rnd_seed: for randomly shuffle files, default = 42 Returns: train_games, dev_games Exception: empty game_files """ # have to sort first, otherwise after shuffling the result is different # on different platforms, e.g. Linux VS MacOS. game_files = sorted(game_files) random.Random(rnd_seed).shuffle(game_files) n_files = len(game_files) if n_files == 0: raise ValueError("no game files found!") n_train = int(math.ceil(n_files * train_ratio)) n_dev = n_files * (1 - train_ratio) n_dev = int(math.floor(n_dev)) if n_dev > 1 else 1 train_games = game_files[:n_train] dev_games = game_files[-n_dev:] return train_games, dev_games
baaf4dd4785cb6324ce3bcdbabe6624aebe7ef82
155,238
def num_args(*args): """Return the number of arguments called.""" return len(args)
12835555487347c64f487fe65970d76a82949a8c
327,389
import string def strip_punctuation(text): """ Input: text (string) Returns: Text string without punctuation """ return text.rstrip().translate(str.maketrans('', '', string.punctuation))
60467cb4accfb5f260932e1c9250f796df416c9b
441,195
def build_hsts_header(config): """Returns HSTS Header value.""" value = 'max-age={0}'.format(config.max_age) if config.include_subdomains: value += '; includeSubDomains' if config.preload: value += '; preload' return value
9f94d87b1949f5c9e2f898466a8f5191f2327357
17,128
def elapsed_time(word_time): """An accessor function for the elapsed time of a word_time.""" return word_time[1]
c9a1a15e52846c5f3266c9d627554d97c72be220
312,997
from typing import Iterable def product(iterable: Iterable[int], start: int = 1) -> int: """ >>> product([2]) 2 >>> product([2, 3]) 6 >>> product([]) 1 >>> product([2], start=10) 20 """ for v in iterable: start *= v return start
9265fa57cd809982195d1688264b62dbffc4a0b5
194,282
def insertion_sort(array : list) -> list: """ This function sorts an array of numbers using the insertion sort algorithm Parameters ---------- array : list List containing numbers Returns ------- list The sorted list """ for i in range(1, len(array)): iteration = array[i] index = i while iteration < array[index-1] and index > 0: array[index] = array[index - 1] index -= 1 array[index] = iteration return array
6a93e2457d530b7185de7ac62c2c23d0cc503f69
346,644
from pathlib import Path def get_model_name(model_name_or_path): """Return a short version of the model's name or path""" path = Path(model_name_or_path) if path.exists(): if "checkpoint-" in path.name: model_name_or_path = f"{path.parent.name}/{path.name}" else: model_name_or_path = str(path.name) return model_name_or_path
c4a29af7d3123e3ab0f6c7db6686a5cf958cd41a
549,014
def get_counts_permutation_fdr(value, random, observed, n, alpha): """ Calculates local FDR values (q-values) by computing the fraction of accepted hits from the permuted data over accepted hits from the measured data normalized by the total number of permutations. :param float value: computed p-value on measured data for a feature. :param ndarray random: p-values computed on the permuted data. :param observed: pandas Series with p-values calculated on the originally measured data. :param int n: number of permutations to be applied. :param float alpha: error rate. Values velow alpha are considered significant. :return: Tuple with q-value and boolean for H0 rejected. Example:: result = get_counts_permutation_fdr(value, random, observed, n=250, alpha=0.05) """ a = random[random <= value].shape[0] + 0.0000000000001 #Offset in case of a = 0.0 b = (observed <= value).sum() qvalue = (a/b/float(n)) return (qvalue, qvalue <= alpha)
0af134f0d37f69d099bad01751730a42ef2a9dc0
392,717
def Compute_structures(V,F): """ For the linear normal kernel Parameters ---------- @param : V : torch tensor n-points x d-dimension points. @param : F : torch Long tensor m-connections x 2-dim tensor containing pair of connected points' indices. Returns ------- @output : centers : torch tensor npoints-1 x d-dimension points, the centers of each discretization segment in the tree. @output : lengths : float npoints-1 x 1-dimension tensor, the length of each discretization segment in the tree. @output : normalized_seg : torch tensor npoints-1 x d-dimension normalized vectors of the discretization segments in tne tree. """ V0, V1 = V.index_select(0,F[:,0]), V.index_select(0,F[:,1]) u = (V1-V0) lengths = (u**2).sum(1)[:, None].sqrt() normalized_tgt_ok = u / (lengths.view(-1,1)) centers = (V0+V1)/2. return centers, lengths, normalized_tgt_ok
5cd1af5dc51332ad92904a037c86047e93548b42
242,238
def convert_timedelta_to_mins(timedelta): """Calculates and returns the minuetes from the inputted timedelta Args: timedelta (datetime.timedelta): the timedelta to calculate the minuetes from Returns: int: the minuetes calculated from the inputted timedelta """ return int(round(timedelta.total_seconds() / 60))
d6b45c34b02499589cf92fc11f895f479b4b579c
409,991
def section(title): """center in a string 60 wide padded with =""" return "\n{:=^60}\n.".format(title)
f8904ee5429d5ffe0e9373f61f0db98a261359c4
48,590
def ProjectToProjectResourceName(project): """Turns a project id into a project resource name.""" return 'projects/{0}'.format(project)
957a0bc46442068a52bdbe1ca7bfee25e9715597
276,753
def y_eq_func(ylag,pilag,v,s,slag,alpha,h,b,phi,gamma): """ equilibrium value for output Args: ylag (float): lagged output pilag (float): lagged inflation v (float): demand disturbance s (float): supply disturbance slag (float): lagged supply disturbance alpha (float): sensitivity of demand to real interest rate h (float): coefficient on inflation in Taylor rule b (float): coefficient on output in Taylor rule phi (float): degree of stickiness in inflation expectations gamma (float): effect of output on inflation in SRAS Returns: (float): equilibrium value for output """ return 1/(alpha*b+alpha*gamma*h+1)*(-pilag*alpha*h+alpha*gamma*h*phi*ylag+alpha*h*phi*slag-alpha*h*s+v)
74f1cf166b9806b0c0f7cb742a37329dfe21cf4c
332,607
def multicommand_get_command_short_help(self, ctx, cmd_name): """Returns the short help of a subcommand It allows MultiCommand subclasses to implement more efficient ways to provide the subcommand short help, for example by leveraging some caching. Parameters ---------- ctx : click.core.Context The current context cmd_name : The sub command name Returns ------- str The sub command short help """ return self.get_command(ctx, cmd_name).short_help
0255615202c52f9a6ca82b12d273a97efbbee42d
648,530
import json def find_arg(i): """ Find the arg (e.g. '422') if i is one of the analyzed args, otherwise return '' """ path = '/var/inphosemantics/data/20131214/Washburn/vsm-data/' if i == 'new' or '(' in i: return '' i = int(i) with open(path + 'arg_indices.json', 'r') as jsonf: indices = json.load(jsonf) for k in indices: if i in indices[k]: return str(k) return ''
6357bd5c999fee3b4a7f391272361f681d94a9e3
161,236
def construct_url(ip_address: str) -> str: """Construct the URL with a given IP address.""" if "http://" not in ip_address and "https://" not in ip_address: ip_address = "{}{}".format("http://", ip_address) if ip_address[-1] == "/": ip_address = ip_address[:-1] return ip_address
dbd4754140e85a09809abf1a76c9d3eedd45230f
460,046
def sig_indicator(pvalue): """Return a significance indicator string for the result of a t-test. Parameters ---------- pvalue: float the p-value Returns ------- str `***` if p<0.001, `**` if p<0.01, `*` if p<0.05, `ns` otherwise. """ return ( '***' if pvalue < 0.001 else '**' if pvalue < 0.01 else '*' if pvalue < 0.05 else 'ns' )
51798d74afc6284c633d65a5ca89a9a421cab76b
638,162
import csv def read_cities(file_name): """ Read in the cities from the given `file_name`, and return them as a list of four-tuples: [(state, city, latitude, longitude), ...] Use this as your initial `road_map`, that is, the cycle Alabama -> Alaska -> Arizona -> ... -> Wyoming -> Alabama. """ roadmap = [] for l in csv.reader(open(file_name, 'r'), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True): if l[5] == str("Lat"): continue roadmap.append((l[2], l[1], float(l[5]), float(l[6]), int(l[0]), None)) print(roadmap) return roadmap
1fe3f57848e0c543414b8a89595a7040186eb4c7
414,737
def _Backward2a_T_Ph(P, h): """Backward equation for region 2a, T=f(P,h) Parameters ---------- P : float Pressure [MPa] h : float Specific enthalpy [kJ/kg] Returns ------- T : float Temperature [K] References ---------- IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam August 2007, http://www.iapws.org/relguide/IF97-Rev.html, Eq 22 Examples -------- >>> _Backward2a_T_Ph(0.001,3000) 534.433241 >>> _Backward2a_T_Ph(3,4000) 1010.77577 """ I = [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7] J = [0, 1, 2, 3, 7, 20, 0, 1, 2, 3, 7, 9, 11, 18, 44, 0, 2, 7, 36, 38, 40, 42, 44, 24, 44, 12, 32, 44, 32, 36, 42, 34, 44, 28] n = [0.10898952318288e4, 0.84951654495535e3, -0.10781748091826e3, 0.33153654801263e2, -0.74232016790248e1, 0.11765048724356e2, 0.18445749355790e1, -0.41792700549624e1, 0.62478196935812e1, -0.17344563108114e2, -0.20058176862096e3, 0.27196065473796e3, -0.45511318285818e3, 0.30919688604755e4, 0.25226640357872e6, -0.61707422868339e-2, -0.31078046629583, 0.11670873077107e2, 0.12812798404046e9, -0.98554909623276e9, 0.28224546973002e10, -0.35948971410703e10, 0.17227349913197e10, -0.13551334240775e5, 0.12848734664650e8, 0.13865724283226e1, 0.23598832556514e6, -0.13105236545054e8, 0.73999835474766e4, -0.55196697030060e6, 0.37154085996233e7, 0.19127729239660e5, -0.41535164835634e6, -0.62459855192507e2] Pr = P/1 nu = h/2000 T = 0 for i, j, ni in zip(I, J, n): T += ni * Pr**i * (nu-2.1)**j return T
f425d6921b2f0e1fe05fe8442c4988db18c12a4d
176,019
def linefunc(X1, X2, Y1, Y2): """Construct a 1D linear function (y=a*x+b), based on two points.""" a = (Y2-Y1) / (X2-X1) b = Y1 - a*X1 return lambda X: a*X+b
e946aac2a9b42f7fa7daf7dec095745c7b2ed3f6
496,236
def ensure_fd(fd): """Ensure an argument is a file descriptor.""" if not isinstance(fd, int): if not hasattr(fd, "fileno"): raise TypeError("Arguments must be file descriptors, or implement fileno()") return fd.fileno() return fd
eabbb5cd6462ef36953190690a464e43e392ace4
500,787
def store_true(arg, _, args, acc): """ lambda that store a True boolean value on the parser result """ acc[arg] = True return args, acc
84f6cb94905a03b1952ce62fc8e6bea354866016
649,339
def is_link(url, processed): """ Determine whether or not a link should be crawled A url should not be crawled if it - Is a file - Has already been crawled Args: url: str Url to be processed processed: list[str] List of urls that have already been crawled Returns: bool If `url` should be crawled """ if url not in processed: if url.startswith('#') or url.startswith('javascript:'): return False is_file = url.endswith(['pdf', 'jpg', 'jpeg', 'png', 'docx', 'csv', 'xls']) if is_file: return False return True return False
310f8ade0d22a988b193dbeb93b083e64ae7d28e
480,569
def mutate_with_removal(solution): """Try to mutate the passed solution in-place by removing an item""" # removal is only possible if there are items placed in the container if solution.placed_items: # select an item randomly item_index = solution.get_random_placed_item_index() # remove the item return solution.remove_item(item_index) return False
b948500395ee817492fe57b2c6ffd030c1d50967
428,853
def plzz(self, rotvel="", deltarotvel="", **kwargs): """Plots the interference diagram from a cyclic modal analysis. APDL Command: PLZZ Parameters ---------- rotvel Rotational speed in revolutions per minute (RPM) used to define the speed line. If blank, use the rotational speed (from OMEGA) specified in the prestressing step of the linear perturbation analysis. If explicitly input as 0, or if the linear perturbation was not used, no speed lines are plotted. deltarotvel Adds speed lines about the RotVel speed line corresponding to RotVel ± DeltaRotVel. Only plotted if RotVel is known. Notes ----- PLZZ plots the cyclic modal frequencies as points on a frequency vs. harmonic index (nodal diameter) graph. If rotational speed (RotVel) is provided, the speed line is also plotted, leading to the interference diagram (also known as the SAFE or ZZENF diagram). If DeltaRotVel is also provided, two additional speed lines are plotted, enveloping the safe speed line itself. For more information, see Postprocessing a Modal Cyclic Symmetry Analysis in the Cyclic Symmetry Analysis Guide. """ command = f"PLZZ,{rotvel},{deltarotvel}" return self.run(command, **kwargs)
8bdd7745f9c9c5cab8ef3825bef3be6296bf8faa
578,305
def split(sentence): """Splits a sentence into words. Args: sentence (str): A sentence to split. Returns: list of str: A list of words. Punctuations are removed. """ return sentence.lower().replace('.', '').replace('?', '').split()
9e8cfa2e640ef9818351e8a249e55ddccc7e1057
435,701
def _pow(a, b): """C-like power, a**b """ return a**b
02101594d22a3366912b468de96bfe0bdab44102
640,379
def get_q(kernel_size: int, dilation: int): """Get apparent kernel size Args: kernel_size: kernel size dilation: dilate ratio of kernel """ return (kernel_size - 1) * dilation + 1 if kernel_size % 2 == 1 else kernel_size * dilation
eba28e9c7f8696ca737368deeefb6056951b9d43
212,295
import ast def get_input(arg, valid_keys): """Convert the input to a dict and perform basic validation""" json_string = arg.replace("\\n", "\n") try: input_dict = ast.literal_eval(json_string) if not all(k in input_dict for k in valid_keys): return None except Exception: return None return input_dict
55f702725186cb74767546c8a3c4b068f0c03f2b
44,157
def diff_mic(pos1, pos2, cell): """Minimum image convention relative vector (orthorhombic cell only)""" diff = pos2 - pos1 for i in range(3): diff[i] -= round(diff[i]/cell[i])*cell[i] return diff
ea63817262321024bff29439354efeaaf8c58bb4
645,630
def unescape_bytes(bytestr): """ Returns a variant of the given bytestring that has C escapes replaced with their ASCII values. >>> unescape_bytes(b'\\0') b'\x00' """ return bytestr.decode('unicode_escape')
a628b9433c409d5180a793c266ba48774239ee41
620,859
def factorial(number: int) -> int: """ >>> factorial(5) 120 >>> factorial(0) 1 >>> import random >>> import math >>> numbers = list(range(0, 50)) >>> for num in numbers: ... assert factorial(num) == math.factorial(num) >>> factorial(-1) Traceback (most recent call last): ... ValueError: factorial() not defined for negative values """ if number < 0: raise ValueError("factorial() not defined for negative values") fact = 1 for i in range(1, number + 1): fact *= i return fact
a63bb3c547411e265184eda954c4616d29a759e5
609,147
def bits_to_str(bits: list) -> str: """ convert bits to string Args: bits (list): bits as list Returns: str: bit list string """ return "".join(str(int(x)) for x in bits)
4a48f019131463c21df1cbc5a167d08e38dc18cd
417,340
def get_tr1(name): """In libstd++ the tr1 namespace needs special care. Return either an empty string or tr1::, useful for appending to search patterns. Args: name (str): the name of the declaration Returns: str: an empty string or "tr1::" """ tr1 = "" if "tr1" in name: tr1 = "tr1::" return tr1
c44d1f2830fb0f0ee6fcfeda331a46868676363c
116,181
import math def calc_I_circle(R: float) -> float: """Calculate the second moment of area of a circle centered on an axis.""" return math.pi * R**4 / 4
770b4efcd16655f5893a693a78d76b1e5b64e561
287,525
import time def wait_for(condition, max_tries=60): """Wait for a condition to be true up to a maximum number of tries """ cond = False while not cond and max_tries > 1: try: cond = condition() except Exception: pass time.sleep(1) max_tries -= 1 return condition()
eb8f77c6cc26324222f6ccbc973046c14bb93a56
625,210
def get_uniprot_to_dcid(file_path): """ Args: file_path for the 'uniprot_to_dcid.txt'. Returns: A dict mapping UniProt entry to protein DCID in Data Commons. example: {'O43657': 'TSN6_HUMAN'} """ with open(file_path, 'r') as file: lines = file.read().split('\n') uniprot_to_dcid = {} #lines[0] is the column names and lines[-1] is ''. for line in lines[1:-1]: line_split = line.split('\t') uniprot = line_split[0] dcid = line_split[2] # multiple uniprot entry maps to one entry name if ',' in uniprot: uniprots = uniprot.split(',') for uni in uniprots: uniprot_to_dcid[uni] = dcid else: uniprot_to_dcid[uniprot] = dcid return uniprot_to_dcid
5b00c83fb4430d28055d5b7c5b7b8de3acb3c6fe
363,277
def within_range(year_start, year_end, year_x): """ Check if year_x is within the time span defined by [year_start, year_end] Parameters ----------- year_start : int year_end : int year_x : int Return ------- Boolean """ return ((year_x >= year_start) and (year_end >= year_x))
eec98ed9369c1169606493d5e9be8f34611dd8ad
481,474
def ReadFile(path, encoding='utf-8'): """Reads data from a file. Args: path: The path of the file to read. encoding: Same param as open(). Set to None for binary mode. """ with open(path, mode='r' if encoding else 'rb', encoding=encoding) as f: return f.read()
b0a0b5482db050b506c66292cc147bf7ba160f35
218,745
def create_unique_ID(class_no, sentence_no): """ Creates a a unique ID for our dataset in the format of 'c' + class_no + '-s' + sentence_no. Eg - c0-s0, c1-s2 etc. """ unique_id = 'c' + str(class_no) + '-s' + str(sentence_no) return unique_id
378c4cc2c24c15e5c5a217519b8bda975088f155
305,166
def parse_keypair_lines(content, delim='|', kv_sep='='): """ Parses a set of entities, where each entity is a set of key-value pairs contained all on one line. Each entity is parsed into a dictionary and added to the list returned from this function. """ r = [] if content: for row in [line for line in content if line]: item_dict = {} for item in row.split(delim): key, value = [i.strip("'\"").strip() for i in item.strip().split(kv_sep)] item_dict[key] = value r.append(item_dict) return r
fc720f66e84c4173cc78e61ef5be31ae447e0cd8
391,957
from datetime import datetime def set_current_time() -> str: """Return the current time as string with the following format 'YYYYMMDD_HHMMSS' """ return datetime.now().strftime("%Y%m%d_%H%M%S")
6761fd286126efbdc5b525e4ce0820feae5fa48f
201,161
import requests def get_versions(manifest_url: str, types=['snapshot', 'release', 'old_beta', 'old_alpha']): """Get manifest of mojang.""" response = requests.request( method='GET', url=manifest_url, ) response_json = response.json() versions = [] for version in response_json['versions']: if version['type'] in types: versions.append(version) return versions
257272f009f76d0b57739e32d6d744ec65b14efd
490,903
def coroutine(f): """Turn a generator function into a coroutine by calling .next() once.""" def started(*args, **kwargs): cr = f(*args, **kwargs) cr.next() return cr return started
d2f63295a6a75eb70b6d99784acd6582f37b9c45
250,226
import configparser def read_config(file): """ This loads the INI file into an easy to use ConfigParser() object. This configuration structure can then be made an input to the processing functions. Parameters ---------- file: str The name of the INI file to load Returns ------- config: ConfigParser() object The ConfigParser object containing the structure of the INI file """ my_parser = configparser.ConfigParser() my_parser.read(file) return my_parser
4ba8f27bc975bda0788f83fd781b94d8b32aac64
418,211
def scroll_with_mousewheel(widget, target=None, modifier='Shift', apply_to_children=False): """scroll a widget with mouse wheel Args: widget: tkinter widget target: scrollable tkinter widget, in case you need "widget" to catch mousewheel event and make another widget to scroll, useful for child widget in a scrollable frame modifier (str): Modifier to use with mousewheel to scroll horizontally, default is shift key apply_to_children (bool): bind all children Examples: scroll_with_mousewheel(my_text_widget, target='my_scrollable_frame') to make a scrollable canvas: for w in my_canvas: scroll_with_mousewheel(w, target=my_canvas) """ def _scroll_with_mousewheel(widget): target_widget = target if target else widget def scroll_vertically(event): # scroll vertically ---------------------------------- if event.num == 4 or event.delta > 0: target_widget.yview_scroll(-1, "unit") elif event.num == 5 or event.delta < 0: target_widget.yview_scroll(1, "unit") return 'break' # bind events for vertical scroll ---------------------------------------------- if hasattr(target_widget, 'yview_scroll'): # linux widget.bind("<Button-4>", scroll_vertically, add='+') widget.bind("<Button-5>", scroll_vertically, add='+') # windows and mac widget.bind("<MouseWheel>", scroll_vertically, add='+') # scroll horizontally --------------------------------------- def scroll_horizontally(event): # scroll horizontally if event.num == 4 or event.delta > 0: target_widget.xview_scroll(-10, "unit") elif event.num == 5 or event.delta < 0: target_widget.xview_scroll(10, "unit") return 'break' # bind events for horizontal scroll ---------------------------------------------- if hasattr(target_widget, 'xview_scroll'): # linux widget.bind(f"<{modifier}-Button-4>", scroll_horizontally, add='+') widget.bind(f"<{modifier}-Button-5>", scroll_horizontally, add='+') # windows and mac widget.bind(f"<{modifier}-MouseWheel>", scroll_horizontally, add='+') _scroll_with_mousewheel(widget) def handle_children(w): for child in w.winfo_children(): _scroll_with_mousewheel(child) # recursive call if child.winfo_children(): handle_children(child) if apply_to_children: handle_children(widget)
d993586b7703b286f9a17406a1858c8e7f4c8507
622,746
def format_choice(choice): """ Default format for choice :param choice: The choice :return: The formatted choice """ return choice
7986143bc19da03d66a58fb21d477b7c895ab9ee
414,973
def is_supported_compression(file_extension: str) -> bool: """Determine if the given file extension indicates a compression format that pandas can handle automatically. Args: file_extension (str): the file extension to test Returns: bool: True if the extension indicates a compression format that pandas handles automatically and False otherwise Notes: Pandas can handle on the fly decompression from the following extensions: ‘.bz2’, ‘.gz’, ‘.zip’, or ‘.xz’ (otherwise no decompression). If using ‘.zip’, the ZIP file must contain exactly one data file to be read in. """ return file_extension.lower() in [".bz2", ".gz", ".xz", ".zip"]
9a964b6cb126483a519746a3fd9cd1ce84f2d386
418,425
def get_db(entity): """ Returns the #pony.orm.Database backing the #Entity instance *entity*. """ return entity._database_
012f05c638b41ed21cf6d717fc3b55926264efb9
667,238
import torch def one_hot_embedding(labels, num_classes=2): """Embedding labels to one-hot form. Args: labels: (LongTensor) class labels, sized [B,N]. num_classes: (int) number of classes. Returns: (tensor) encoded labels, sized [N,#classes]. """ B, N = labels.size() # labels = labels.view(-1, 1) # [B,N]->[B*N,1] labels = labels.view(int(B * N), 1) y = torch.FloatTensor(labels.size()[0], num_classes) # [B*N, D] y.zero_() y.scatter_(1, labels, 1) return y
b4ec618441685ec0b30fb42eb363ea3db378e334
267,297
def get_method(ioctl_code): """Returns the correct method type name for a 32 bit IOCTL code""" method_names = [ 'METHOD_BUFFERED', 'METHOD_IN_DIRECT', 'METHOD_OUT_DIRECT', 'METHOD_NEITHER', ] method = ioctl_code & 3 return method_names[method], method
81a044837ba1aa46ddc824ff8c0dccd3e8c2eb21
366,035
from typing import List from typing import Dict from typing import Any def validate_cell_header( headers: List[str], cell: Dict[str, Any] ) -> List[str]: """Check that header of a cell meets project specifications.""" content = [line.rstrip('\n') for line in cell['source']] curr_header = content[0] msg = f"Cell header must be h2 (i.e. start with ##), found: {curr_header}" if not curr_header.startswith('## '): raise ValueError(msg) msg = f"Each header must appear only once, '{curr_header}' is duplicated" if curr_header in headers: raise ValueError(msg) headers.append(curr_header) return headers
c18c385792a0f04661ca3961e31d7bb7ce8e5801
29,372