content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def make_roi_header(**param): """ Format header data to be written when saving ROI data. Args: method (string): integration method. param (dict): integration parameters. Returns: hdr_list (string): header data. """ hdr_list = ['== Integration ROI =='] method = [i for i in param.keys() if "pos" in i][0].split('_pos')[0] hdr_list.append('Integration method: {}'.format(method)) for k, v in param.items(): hdr_list.append('{}: {}'.format(k, v)) header = "\n".join(['# ' + i for i in hdr_list]) return header
ba3ad2218cab4144e790b539fc7208bf065ce53c
43,727
def remove_stopwords(sentence: list, stopwords): """Function removes stopwords from sentence. Args: sentence (list): Sentence in english language. stopwords (list): List of stopwords Returns: str: Sentence without stopwords. """ ret_sen = ' '.join([word for word in sentence if word not in stopwords]) return ret_sen
024350cfd63071ff86e53c3c248dd7192c2097f5
212,506
def positive_index(index, size): """ Return a positive index from any index. If the index is positive, it is returned. If negative, size+index will be returned. Parameters ---------- index : int The input index. size : int The size of the indexed dimension. Returns ------- int A positive index corresponding to the input index. Raises ------ ValueError If the given index is not valid for the given size. """ if not -size <= index <= size: raise ValueError( "Invalid index {} for size {}".format(index, size)) if index < 0: index = size + index return index
d2c21e8e819926fc1904a2176a633007b3ef7a36
88,926
def get_all_logits( predictions, features ): """After checking assertions against predictions and features length, return start & end logtis. Args: predictions ([Tuple[ndarray, ndarray]]): start & end logit predictions. features ([Dataset]): tokenized & splited datasets. Returns: Tuple([array]): start & end logtis """ assert ( len(predictions) == 2 ), "`predictions` should be a tuple with two elements (start_logits, end_logits)." all_start_logits, all_end_logits = predictions assert len(predictions[0]) == len( features ), f"Got {len(predictions[0])} predictions and {len(features)} features." return all_start_logits, all_end_logits
92477be3dcf16d8a98203346413aea13bdb6f12f
177,498
import json def load_config(config_files): """ loads json configuration files the latter configs overwrite the previous configs """ config = dict() for f in config_files: with open(f, 'rt') as cfg: config.update(json.load(cfg)) return config
4a61ca063bf8147a0f2576cddc8bf438b33f8792
38,192
import json def load_data(filename): """Load Atom Snippets data from json :filename: Atom Snippets json file :returns: json atom data """ snippets_file = open(filename) # data = snippets_file.read().decode('utf-8') atom_snippets_fixture = json.load(snippets_file) return atom_snippets_fixture
9bb6b31c028379bc2a45f90f0f32241eff5950a7
383,306
def get_batch(chunk_agg: list, # chunk aggregate from get_chunks function batch_size: int, # how many samples to load i: int, # current batch index return_malicious: bool = False, # whether to return the malicious label for the data points or not return_counts: bool = False, # whether to return the counts for the data points or not return_tags: bool = False): # whether to return the tags for the data points or not """ Get a batch of data from a chunk aggregate. Args: chunk_agg: Chunk aggregate from get_chunks function batch_size: How many samples to load i: Current batch index return_malicious: Whether to return the malicious label for the data points or not (default: False) return_counts: Whether to return the counts for the data points or not (default: False) return_tags: Whether to return the tags for the data points or not (default: False) Returns: Current batch of sha (optional), features and labels. """ # get current batch of data using i and batch size batch = [t[i:i + batch_size] for t in chunk_agg] # pop the last element of the current batch (y -> labels) batch_y = batch.pop() # initialize labels dict labels = {} if return_malicious: # get malware label for this sample through the index labels['malware'] = batch_y[:, 0] if return_counts: # get count for this sample through the index labels['count'] = batch_y[:, 1] if return_tags: # get tags list for this sample through the index labels['tags'] = batch_y[:, 2:] # return current batch unpacked (contains S (optionally) and X) and labels dict return *batch, labels
a6f112d490c4a9e6235a99359937dc159494f464
236,234
def load_graph(location): """ Format the graph from the provided txt file. Returns a dictionary: {node: [edges]} """ with open(location, 'r') as f: graph = {} for line in f: line = line.split('\t') graph[line[0]] = line[1:-1] return graph
90da4d827cc45b51e0607f746187005cc37d8aae
424,224
import torch def near(a,b): """Test if two tensors are nearly identical""" return torch.allclose(a,b, rtol=1e-03, atol=1e-05)
b3b1e27b97ff7e877cb59a9b0572d086e544a983
551,398
def pop(string, encoding=None): """pop(string,encoding=None) -> (object, remain) This function parses a tnetstring into a python object. It returns a tuple giving the parsed object and a string containing any unparsed data from the end of the string. """ # Parse out data length, type and remaining string. try: (dlen, rest) = string.split(":", 1) dlen = int(dlen) except ValueError: raise ValueError("not a tnetstring: missing or invalid length prefix") try: (data, type, remain) = (rest[:dlen], rest[dlen], rest[dlen + 1:]) except IndexError: # This fires if len(rest) < dlen, meaning we don't need # to further validate that data is the right length. raise ValueError("not a tnetstring: invalid length prefix") # Parse the data based on the type tag. if type == ",": if encoding is not None: return (data.decode(encoding), remain) return (data, remain) if type == "#": try: return (int(data), remain) except ValueError: raise ValueError("not a tnetstring: invalid integer literal") if type == "^": try: return (float(data), remain) except ValueError: raise ValueError("not a tnetstring: invalid float literal") if type == "!": if data == "true": return (True, remain) elif data == "false": return (False, remain) else: raise ValueError("not a tnetstring: invalid boolean literal") if type == "~": if data: raise ValueError("not a tnetstring: invalid null literal") return (None, remain) if type == "]": l = [] while data: (item, data) = pop(data, encoding) l.append(item) return (l, remain) if type == "}": d = {} while data: (key, data) = pop(data, encoding) (val, data) = pop(data, encoding) d[key] = val return (d, remain) raise ValueError("unknown type tag")
17697986010cecd7f490556f3867764645bafa0c
280,177
def create_grid(start, stop, frequency): """ create_grid - returns an evenly grid in a list, with a number of points per year equal to the frequency. For example, frequency = 2 means semiannual. For example, create an annual grid from years 0 -> 5 (inclusive) >>> create_grid(0., 5., 1) [0.0, 1.0, 2.0, 3.0, 4.0, 5.0] Create a semi-annual grid with frequency = 2 >>> create_grid(0., 2., 2) [0.0, 0.5, 1.0, 1.5, 2.0] Negative time is allowed. >>> create_grid(-1., 0, 4) [-1.0, -0.75, -0.5, -0.25, 0.0] Note that dates are aligned to the start date, and if the end point is not an even number of time steps from the start, the interval terminates at the last equally-spaced point before the stop point. >>> create_grid(10., 11.1, 1) [10.0, 11.0] >>> create_grid(10., 11.6, 2) [10.0, 10.5, 11.0, 11.5] Raises a ValueError if the stop point is before the start point. >>> create_grid(1., 0., 1) Traceback (most recent call last): ... ValueError: Stop point must be after start NOTE: There are similar functions in NumPy (arange, ...), but I do not want to introduce a major dependence on non-standard libraries here. :param start: float :param stop: float :param frequency: int :return: list """ if stop < start: raise ValueError('Stop point must be after start') frequency = float(frequency) start = float(start) interval = int(frequency * (stop - start)) # Generate an integer grid out = range(0, interval + 1) # return to original time interval as floats return [start + float(x) / frequency for x in out]
c645b92738c86c36f3e1095927c75a6e11c7aa1e
240,581
def fmtExp(num): """Formats a floating-point number as x.xe-x""" retStr = "%.1e" % (num,) if retStr[-2] == '0': retStr = retStr[0:-2] + retStr[-1] return retStr
b6cbc6cf824450c4e9a7f55f4b5080b81811e84f
138,806
def field_validator(field): """Decorate an entity method to make it a validator of the given `field`. Parameters ---------- field : Any The field to validate. Returns ------- Callable The decorated method. """ return field.validator
8720f0fc967fac2a06221d83e6461d285d0115dc
494,609
import uuid def make_uuid(value): """Converts a value into a python uuid object.""" if isinstance(value, uuid.UUID): return value return uuid.UUID(value)
b65b5739151d84bedd39bc994441d1daa33d1b51
46
def _do_not_recurse(value): """Function symbol used for wrapping an unpickled object (which should not be recursively expanded). This is recognized and respected by the instantiation parser. Implementationally, no-op (returns the value passed in as an argument). Parameters ---------- value : object The value to be returned. Returns ------- value : object The same object passed in as an argument. Notes ----- Taken (with minor changes) from `Pylearn2`_. .. _Pylearn2: \ http://github.com/lisa-lab/pylearn2/blob/master/pylearn2/config/yaml_parse.py """ return value
7a68483470ae742df1ae1d41a1017f66a8cd161c
567,176
def postprocess_keywords(field, keywords, **options): """Split a string of keywords into a list.""" if not keywords: return [] return [keyword.strip() for keyword in keywords.split(';') if keyword.strip()]
f026d0f1cb90e388e5a0b2e23af01ef11685256d
424,080
def filter_json(json, *keys): """Returns the given JSON but with only the given keys.""" return {key: json[key] for key in keys if key in json}
452ce26104d1b9395112a4f2fb96656eef74ca37
462,275
def extract_variable_name(attribute_name: str): """ Function used to transform attribute names into cheetah variables attribute names look like standard shell arguments (--attr-a), so they need to be converted to look more like python variables (attr_a) Parameters ---------- attribute_name : str name of the attribute Returns ------- transformed name """ return attribute_name.lstrip("-").replace("-", "_")
14724cf730c8be1f8e79fb3bacd41fcf9b78f657
204,039
def cropping_center(img, crop_shape, batch=False): """ Crop an array at the centre Args: img: input array crop_shape: new spatial dimensions (h,w) """ orig_shape = img.shape if not batch: h_0 = int((orig_shape[0] - crop_shape[0]) * 0.5) w_0 = int((orig_shape[1] - crop_shape[1]) * 0.5) img = img[h_0:h_0 + crop_shape[0], w_0:w_0 + crop_shape[1]] else: h_0 = int((orig_shape[1] - crop_shape[0]) * 0.5) w_0 = int((orig_shape[2] - crop_shape[1]) * 0.5) img = img[:, h_0:h_0 + crop_shape[0], w_0:w_0 + crop_shape[1]] return img
3a0b41f20d8044804ab1f5b471d18f98c4cc97c5
357,937
import ipaddress def get_hosts(network): """get_hosts() will return all the hosts within a provided network, range""" network = ipaddress.IPv4Network(network, strict=False) hosts_obj = network.hosts() hosts = [] for i in hosts_obj: hosts.append(str(i)) return hosts
097fa3abbf1cda1c3c0ddc0c2fec4a06d1d44fa9
709,522
def calculate_check_digit(nhs_number: str) -> int: """Given a nine-digit NHS Number calculate the tenth check digit.""" if not nhs_number.isdigit(): raise ValueError("nhs_number must comprise only digits") if len(nhs_number) != 9: raise ValueError("Expecting nine digits") # https://www.datadictionary.nhs.uk/data_dictionary/attributes/n/nhs/nhs_number_de.asp?shownav=1 digits_weighted = [int(v) for v in list(nhs_number)] for i in range(9): digits_weighted[i] = digits_weighted[i] * (10 - i) check_digit = 11 - (sum(digits_weighted) % 11) if check_digit == 11: check_digit = 0 if check_digit == 10: raise ValueError("Number is invalid") return check_digit
8327899f44b6fe6420ad826a69bed6deb612ed74
199,423
def get_insert_loc(segments, segment): """ Uses a binary search to find the correct location to insert a new segment. Parameters ---------- segments : list of list of int The indices of the points belonging to the segments/lines. segment : list of int The indices of the points belonging to the segment/line. Returns ------- : int The index where the segment should be inserted. """ if len(segments) == 0: return 0 if segment[0] > segments[-1][0]: return len(segments) lo = 0 hi = len(segments) while lo < hi: mid = (lo + hi) // 2 if segment[0] < segments[mid][0]: hi = mid else: lo = mid + 1 return lo
4a46c85e93d0a52a2f4a328ec62ef5af93d6b082
349,364
def get_output(response, more_out=False): """ Returns final output to display :param response: WoppResponse object to get the info :param more_out: more or less information? :return: dict containing output information """ out_dict = { 'name': response.name, 'current_version': response.latest_version, 'summary': response.summary, 'homepage': response.homepage, 'package_url': response.package_url, 'author': response.author, } if more_out: out_dict.update({ 'author_email': response.author_email, 'releases': ', '.join(response.releases) if response.releases else None, 'project_urls': response.project_urls, 'requires_python': response.requires_python, 'license': response.license, 'current_release_url': response.latest_release_url, 'current_package_info': response.latest_pkg_urls, 'dependencies': ', '.join(response.dependencies) if response.dependencies else None, }) else: out_dict.update({ 'latest_releases': ', '.join(response.latest_releases) if response.latest_releases else None, }) return out_dict
4e90c3124c78a5499a851a03786a36dfdf363ff5
128,857
def censys_ipv4_meta_extraction(raw: dict) -> dict: """ Extracts metadata from Censys IPv4 dicts :param raw: Censys IPv4 dict :return: Metadata part of common format dict """ _as = raw.get("autonomous_system", None) or dict() return { "ip": raw["ip"], # Program should fail if IP is not given "as": { "number": _as.get("asn", None), "name": _as.get("name", None), "location": _as.get("country_code", None), "prefix": _as.get("routed_prefix", None), }, }
1760282c9dee363968973e54c9b76410865e1c93
53,906
import hashlib def calculate_file_hash(filename): """ Calculate MD5 check-sum for given file Args: filename: Location of file Returns: str: MD5 check-sum """ with open(filename, 'rb') as file: return hashlib.md5(file.read()).hexdigest()
b82850c863e133ccfccc2d41da27031ee674ec2d
517,651
def define_plane(p, t): """Calculates the offset (d) parameter of a plane given a point on the directrix (p) and the tangent vector (t)""" return -1.0 * ((t[0] * p[0]) + (t[1] * p[1]) + (t[2] * p[2]))
be3c75b5d84b3acba29b0137b867a73005c61a0a
462,113
def is_trailer_present(pdf, obj_end_index): """Checks if trailer is present in PDF file.""" pdf.seek(0) pdf = pdf.read() if b'trailer' in pdf[obj_end_index::]: return True return False
8fb80c6e72d711021bbfa60d482b0899432041c2
260,119
def d2s_rfc3339(d): """ Date to string in the format expected by Atom feeds. """ return d.isoformat('T') + 'Z'
cff067ca754518d50a44b4a10a7fcf494acfbca8
355,435
import hashlib def md5sum(fname): """ Computes md5 sum of a file contents. :param fname: Path to file. :returns: md5 sum of the file. """ md5 = hashlib.md5() with open(fname, 'rb') as f: for chunk in iter(lambda: f.read(128*md5.block_size), b''): md5.update(chunk) return md5.hexdigest()
f79f439de8c3cf479cdb9005d28ef76f2058057c
238,216
def debom(string): """Strip BOM from strings.""" return string.encode("utf-8").decode("utf-8-sig")
2c5bcd7353cbbd3db42a6f2b801ecab7eaca5b4f
487,747
import csv def loadData(file_path='data/crane_data.csv'): """ load data, from crane_data.csv as default """ data = [] with open(file_path, 'r') as f: reader = csv.reader(f) next(reader) for line in reader: buffer = [] for item in line: buffer.append(int(item)) data.append(buffer) return data
e4cf4506f379aa19f12bcbefda8d49d39e68caf4
424,369
def is_main_google_link(soup_tag): """ This function checks if the given BeautifulSoup4 tag is of a main link in Google page """ return 'data-ved' in soup_tag.attrs
28a4c61adc44be6932c0062c97e0c2dbcfb2b72f
180,122
def return_filename_filter_string(settings): """Returns a list of dicts with, a key/value for the search string. This string is used to filter the log files based on the command line parameters. """ searchstrings = [] rw = settings['rw'] iodepths = settings['iodepth'] numjobs = settings['numjobs'] benchtypes = settings['type'] for benchtype in benchtypes: for iodepth in iodepths: for numjob in numjobs: searchstring = f"{rw}-iodepth-{iodepth}-numjobs-{numjob}_{benchtype}" attributes = {'rw': rw, 'iodepth': iodepth, 'numjobs': numjob, 'type': benchtype, 'searchstring': searchstring} searchstrings.append(attributes) return searchstrings
a4f978bc389f6fac1d078f47c8b94f530394dd61
342,207
def checksave(save_all, pix_list, save_ratio, save_count_annotated, save_count_blank): """ Checks whether or not an image chip should be saved :param save_all: (bool) saves all chips if true :param pix_list: list of pixel values in image mask :param save_ratio: ratio of annotated chips to unannotated chips :param save_count_annotated: total annotated chips saved :param save_count_blank: total blank chips saved :return: bool """ if save_all is True: save = True elif save_count_annotated / float(save_count_blank) > save_ratio: save = True elif len([x for x in pix_list if x > 0]) > 0: save = True else: save = False return save
766144b91ec7c8e9cc7126127f07a5f9cd942640
586,239
import re def quotemeta(string): """Implementation of perl quotemeta - all chars not matching /[A-Za-z_0-9]/ will be preceded by a backslash""" return re.sub(r'([^A-Za-z_0-9])', r'\\\g<1>', string, count=0)
24585a5927e7b1ec6e676e7b27d529ba5616a3eb
451,583
def is_pattern_error(exception: TypeError) -> bool: """Detect whether the input exception was caused by invalid type passed to `re.search`.""" # This is intentionally simplistic and do not involve any traceback analysis return str(exception) == "expected string or bytes-like object"
623246404bbd54bc82ff5759bc73be815d613731
1,479
def EQ(a, b): """ >>> EQ(1, 2) False >>> EQ(1, 1) True >>> EQ("abc", "abc") True >>> EQ("abc", "def") False >>> EQ({"a":"b"}, {"a":"b"}) True >>> EQ({"a":"b"}, {"a":"c"}) False """ return a == b
d21735cc061d29d018969fbff7bf1a52218daaa4
222,950
def _format_credits(cs: str) -> str: """ Format credits data into a search link, if possible """ # no data if not cs.strip(): return "" # multiple artists, cant search elif "|" in cs: return cs else: search_link: str = ( r"https://www.nhk.or.jp/minna/search/?keyword={}&opt=all".format(cs) ) return f'=HYPERLINK("{search_link}", "{cs}")'
ccd2f4488b88e9f61af81f162dab11802946419e
585,939
def separator(simbol, count): """ Функция создает разделитель из любых символов любого количества :param simbol: символ разделителя :param count: количество повторений :return: строка разделитель примеры использования ниже """ s = simbol * count return s
bdf9b7ac533eaec26552a3ed6238f239caac9d21
640,582
import yaml import logging def _read_instructions() -> dict: """Read the database migration instructions. These are contained in the migrate.yaml file of the database project. Parameters ---------- None Returns ------- migrate : dict Database metadata and migration instructions Notes ----- This function assumes that the current working directory is the root directory of the database project. If no migrate.yaml file is found, we throw an error. """ try: stream = open('/deployment/migrate.yaml', 'r') migrate = yaml.safe_load(stream) except FileNotFoundError: logging.error('Database project must have migrate.yaml to be deployed.') raise FileNotFoundError return migrate
61b723bb0e3659f64976d45dbc77ddf774686a43
562,237
from datetime import datetime def print_time(t): """ prints time t in "11:23 AM" format :type t: float time in minutes :rtype : str time in specified format """ dt = datetime(2014, 8, 1, int(t/60), int(t%60), 0) return dt.strftime("%I:%M %p")
c47aad5c85c56c382ad7c6050a6b022d8933cc47
94,947
def newline_to_br(base): """Replace newline with `<br />`""" return base.replace('\n', '<br />')
fe807e08a875358f8f8da0c422f1b43cd19f7e03
668,541
def get_table_name(element): """ The function will return a fully qualified table name according to the logic applied to the processed element. In this case, input data already provides the table name directly but it can be extended to more complex use cases. """ return 'PROJECT_ID:DATASET.' + element['type']
4ab7f0c1bd401524f9426a9740dc2e990407ab54
97,235
def suit(card): """Returns the suit of a card""" return card[-1]
cb270350f7882b7ba0e43e26e590b6ec596543b5
496,542
def is_valid_seq(seq, max_len=2000): """ True if seq is valid for the babbler, False otherwise. """ l = len(seq) valid_aas = "MRHKDESTNQCUGPAVIFYWLO" if (l < max_len) and set(seq) <= set(valid_aas): return True else: return False
257fa70384ce5ae90373b0d7cd5532e8258bf9dc
389,665
def update_removal_plan(orig_dict, updating_dict): """ Helper to update dictionaries of sets of objects to remove/redact. """ for key in updating_dict.keys(): if key in orig_dict: orig_dict[key].update(updating_dict[key]) else: orig_dict[key] = updating_dict[key] return orig_dict
16aedb2d2fc3d57214a4cc7f1c94c61c947bc87b
515,312
import math def convtransp_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1): """ Utility function for computing output of transposed convolutions takes a tuple of (h,w) and returns a tuple of (h,w) """ if type(h_w) is not tuple: h_w = (h_w, h_w) if type(kernel_size) is not tuple: kernel_size = (kernel_size, kernel_size) if type(stride) is not tuple: stride = (stride, stride) if type(pad) is not tuple: pad = (pad, pad) if type(dilation) is not tuple: dilation = (dilation, dilation) h = math.floor((h_w[0] + 2 * pad[0] - dilation[0] * (kernel_size[0] - 1) - 1) / stride[0] + 1) w = math.floor((h_w[1] + 2 * pad[1] - dilation[1] * (kernel_size[1] - 1) - 1) / stride[1] + 1) return h, w
441576a77f639f22f71a5a54ff4aeb2bd584d7df
639,996
def is_naive(value): """Return whether the given datetime object is timezone naive :return: boolean, True if naive, False otherwise """ return value.utcoffset() is None
6ed565b5b267eaddf5dc27ebec229e1156b80830
552,281
def process_inputs(data): """ Process user inputs and apply default values if parameters not specified. Parameters ---------- data : dict User input containing model parameters Returns ------- out : dict Model data in format that allows construction of the Pyomo model """ # Extract symbols for assets assets = list(data['initial_weights'].keys()) # Number of intervals over multi-period optimisation horizon periods = [len(v.keys()) for k, v in data['estimated_returns'].items()][0] # Estimated return for each interval estimated_returns = { (i, int(k)): v for i, j in data['estimated_returns'].items() for k, v in j.items() } # Extract model parameters parameters = data.get('parameters', {}) data = { 'S_ASSETS': assets, 'S_PERIODS': range(1, periods + 1), 'S_TIME_INDEX': range(1, periods + 2), 'P_RETURN': estimated_returns, 'P_INITIAL_WEIGHT': data['initial_weights'], 'P_MIN_WEIGHT': parameters.get('min_weight', -1), 'P_MAX_WEIGHT': parameters.get('max_weight', 1), 'P_MIN_CASH_BALANCE': parameters.get('min_cash_balance', 0), 'P_MAX_LEVERAGE': parameters.get('max_leverage', 1), 'P_MAX_TRADE_SIZE': parameters.get('max_trade_size', 1), 'P_TRADE_AVERSION': parameters.get('trade_aversion', 1), 'P_TRANSACTION_COST': parameters.get('transaction_cost', 0.001), } return data
8ebd4d54d24c51a97ca9fa1a269623846e085ecd
73,288
import re def re_match_dob(user_form, re_input): """ Outputs a tuple containing .group(1), group(3), and group(5) strings from a regular expression match. """ re_match = re.compile(re_input) re_match = re_match.search(user_form) if re_match: return (re_match.group(1).strip(), re_match.group(3).strip(), re_match.group(5).strip())
fcc482cf397a783b0c243716ce35a3eda543530e
558,779
def costfunc(circuit_evals, proportions): """Takes the expectation values of the circuit evaluations and adds them according to the proportion Arguments: circuit_evals :[Float]: the expectation values proportions :[Float]: the mixing coefficients """ loss = 0 for circ_eval, prop in zip(circuit_evals,proportions): loss += circ_eval * prop return loss
a17397e3788784a83b9576756ba201250d50443a
142,286
import select def _fileno_can_read(fileno): """Check if a file descriptor is readable.""" return len(select.select([fileno], [], [], 0)[0]) > 0
7628e6bfa4cb93fabb950612d6233b54452ece66
581,694
def unpack_puzzle_input(dir_file: str) -> list: """ Args: dir_file (str): location of .txt file to pull data from Returns: list of numeric values saved in .txt file """ with open(dir_file, "r") as file: content = file.read() data = content.split("\n") return list(map(int, data))
f1ee3bf954f500a75801cf499d274f22c078cf5f
492,141
def make_sample_id(gloss_id, lang1, lang2, index1, index2): """ Sample IDs should uniquely identify a feature row. Sample sample ID: 98/English,German/1,1 """ assert lang1 < lang2 s = str(gloss_id) + '/' s += lang1 +','+ lang2 + '/' s += str(index1) +','+ str(index2) return s
4a935605788c4561aab06fe8bd2e88675d81c7a0
593,856
def sort_file(f): """Create sorted list of names from file contents.""" handle = open(f, 'r') return sorted(handle.read().replace('\"', '').split(','))
ebab364fd2fda7bfb77c3279c890d669653de8f1
474,141
def full_ipv6(ip6): """Convert an abbreviated ipv6 address into full address.""" return ip6.replace('::', '0'.join([':'] * (9 - ip6.count(':'))))
326043107b5607f2c847ebfa4ced6e1a62afee1d
526,892
def get_border(char, length): """Get a border consisting of a character repeated multiple times. :param char: The character to make up the border. :param length: The length of the border. :return: A string consisting of the character repeated for the given length. """ border = '' for i in range(length): border += char return border
9cd73504dc450e1e31c75b398240a27184a130e4
695,996
def get_accounts_for_institution(accounts, institution): """Provides the accounts available from the given institution. Args: accounts: array of accounts to filter by institution Returns: array of 0..* accounts provided by given institution """ institution_accounts = [] for account in accounts: if account.institution == institution: institution_accounts.append(account) return institution_accounts
07ecba61c8e42f3635122c995b356045a479f6c0
279,137
import torch def _multiclass_accuracy(prediction, ground_truth): """ Computes metrics for multiclass classification Arguments: prediction, np array of int (num_samples) - model predictions ground_truth, np array of int (num_samples) - true labels Returns: accuracy - ratio of accurate predictions to total samples """ return torch.sum(prediction == ground_truth), len(prediction)
d8efb8b1adba5e8379313fbb8b1150d97ff23892
617,093
def key_value(tree): """Returns the key value of given tree""" return tree[1]
753bb9502bb87b07adbf56b2ad415886d084c85d
585,685
def IS(l,u,y,q): """ calculation of the Interval Score l: left interval limit u: right interval limit y: argument of IS q: quantile level corresponding to limits Output IS value in y for given l,u,q """ if y<l: return 2*(l-y) + q*(u-l) elif y>u: return 2*(y-u) + q*(u-l) else: return q*(u-l)
f7cdda3d5de1a61ec2341faf0bd788dd4f9e0356
121,545
def remove_duplicates(data_frame): """ Remove duplicate rows from dataframe :param data_frame: pandas dataframe :return: pandas dataframe without duplicate rows """ num_dups = sum(data_frame.duplicated()) if num_dups == 0: return data_frame else: df = data_frame.drop_duplicates(keep="first") assert sum(df.duplicated()) == 0, "Still duplicates present" return df
acffd8cc57fe739ac4733da9831ff0692033d568
459,447
def create_lecture_collection_slug(video_attributes): """ Create a name for a collection based on some attributes of an uploaded video filename Args: video_attributes (ParsedVideoAttributes): Named tuple of lecture video info """ return ( video_attributes.prefix if not video_attributes.session else "{}-{}".format(video_attributes.prefix, video_attributes.session) )
2637fc24c23a027c9749c2ea35c7b157c9888555
608,551
def SubtractTop(xs, **unused_kwargs): """Subtracts the first tensor from the second.""" return xs[1] - xs[0]
9a2701a0a2ca923e24cf62164a99da3b0fcede9d
515,403
def pages_sorted_by_page_no(book_pages, reverse=False): """Return a list of book_page Row instances sorted by page_no. Args: list of BookPage instances Returns: list of BookPage instances, sorted """ return sorted( book_pages, key=lambda k: k.page_no, reverse=reverse, )
4c92a5ec46d308ab9f368a263895b874ca62a5b7
504,249
def _check_code(response, expected_code): """Check the response code from a response object return True/False """ return response.status_code == expected_code
a45fc66a7f2d651a79273605be9ac457207bb5ac
138,443
def halfstring(inputstr): """Return the first half of a string.""" strlen = len(inputstr) return inputstr[:strlen / 2]
43e6113ebed1e0545d296e4789ee3a274e2754ab
88,910
def reduce_length_for_tweet(Hash, Message, Reduced=False): """ Recursively remove a word from the message until it is small enough to tweet Reduced is a boolean testing if the string has been shortened. If it has, an ellipsis (…) will be placed at the end of the tweet. """ if len(Hash) + len(Message) > 133: # get rid of a word Message = ' '.join(Message.split(' ')[:-1]) Reduced = True return reduce_length_for_tweet(Hash, Message, Reduced) return Hash, Message, Reduced
75f9eb8dfd7a0dad2183e21d6fb9f648f0a0714f
375,500
import string import random def generate(size=12, chars=string.ascii_letters + string.digits, special=False): """ Generate password :param size: Size is number of chars :param chars: Charset :param special: Boolean, us special characters or not :return: Random string """ if special: chars += string.punctuation return ''.join(random.SystemRandom().choice(chars) for _ in range(size))
e953f533dc09dd2e69d705b5ca3d37785402c1b4
367,620
def is_tachycardic(heart_rate, patient_age): # test """Checks to see if heart rate is tachycardic considering age Args: heart_rate (int): heart rate of specified patient patient_age (int): age of specified patient Returns: str: tachycardic or not tachycardic """ if 1 <= patient_age <= 2: threshold = 151 elif 3 <= patient_age <= 4: threshold = 137 elif 5 <= patient_age <= 7: threshold = 133 elif 8 <= patient_age <= 11: threshold = 130 elif 12 <= patient_age <= 15: threshold = 119 else: threshold = 100 if heart_rate > threshold: return "tachycardic" else: return "not tachycardic"
2cbf3602e34809d01ffd2aa3329e3ab6e79840ee
546,638
import copy def convertUnits(ww_params, pixel_size): """ Convert to the units currently used by the analysis pipeline. This also adds additional zeros as needed to match the length expected by the analysis pipeline. """ ww_params = copy.copy(ww_params) ww_params[0] = ww_params[0] * pixel_size ww_params[1] = ww_params[1] * 1.0e+3 ww_params[2] = ww_params[2] * 1.0e+3 for i in range(len(ww_params), 7): ww_params.append(0.0) return ww_params
cb32938ccacee6b76dbacbb1c4bd76e4f13b1cef
674,241
import random def createLoc(qspace): """Given a qspace, return a list of randomized numbers (len=dim) within the qspace""" loc = [] for tup in qspace.getSize(): a1 = tup[0] a2 = tup[1] loc.append(random.uniform(a1,a2)) return loc
bf1376a46443937a72f56b2814f9b5f0953eedd7
397,205
def find_most_extreme_value(values): """ Find the most extreme (signed) value and index of the value. Parameters ---------- values: float array An array of signed values. Returns ------- The value and index of the most extreme value. """ max_value = values.max() min_value = values.min() max_value_index = values.argmin() if (max_value < -min_value) \ else values.argmax() max_value = min_value if (max_value < -min_value) else max_value return max_value, max_value_index
023bde31e8e8b3457223fa6d9a221f3efef465ff
148,272
def average_rating(rating_list): """Helper method that calculates the average rating of a book.""" if not rating_list: return 0 return round(sum(rating_list) / len(rating_list))
e0fa64abb12e51793d6f148a9be81ca543a03025
576,097
from typing import Optional from typing import Union def prepare_timeout(timeout: Optional[int]) -> Optional[float]: """Request timeout is in milliseconds, but `requests` uses seconds""" output: Optional[Union[int, float]] = timeout if timeout is not None: output = timeout / 1000 return output
64b31825a072fbab8c7112c008d444e1c4abbf41
578,783
def is_tile(*tiles): """Return a predicate function for `find()` that will find all coordinates containing one of `tiles`.""" tiles = set(tiles) def predicate(tile_map, coord): tile = tile_map[coord] return (tile in tiles) return predicate
a8372a2ee8fa3c3b23dc246365330ea32adbdea8
580,297
def askyesno(question, default=True): """Ask a yes/no question and return True or False. The default answer is yes if default is True and no if default is False. """ if default: # yes by default question += ' [Y/n] ' else: # no by default question += ' [y/N] ' while True: result = input(question).upper().strip() if result == 'Y': return True if result == 'N': return False if not result: return default print("Please type y, n or nothing at all.")
10635ea8eb9ff735921b0aeb98a3b0083703cf07
667,353
def forceTwoTuple(x): """ If `x` is a tuple or a list, return `x`, otherwise return `(x, x)`. """ if isinstance(x, list): x = tuple(x) if not isinstance(x, tuple): x = (x, x) return x
fbd9233f16a35847886902102fdaf3c182f07ea6
309,378
def __charge_to_sdf(charge): """Translate RDkit charge to the SDF language. Args: charge (int): Numerical atom charge. Returns: str: Str representation of a charge in the sdf language """ if charge == -3: return "7" elif charge == -2: return "6" elif charge == -1: return "5" elif charge == 0: return "0" elif charge == 1: return "+1" elif charge == 2: return "+2" elif charge == 3: return "+4" else: return "0"
1bfda86ee023e8c11991eaae2969b87a349b7f7e
705,833
import torch from typing import Optional from typing import Tuple def generate_image_coords( image_shape: torch.Size, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """Generate image horizontal, vertical coordinates Args: image_shape (torch.Size): shape of image. Expected 2/3/4D or higher dimensions, where the last two dimensions are (h, w). flip_h (bool, optional): whether to flip horizontal axis. Defaults to True. dtype (torch.dtype, optional): data type, defaults to torch.float32. Defaults to None. device (torch.device, optional): torch device. Defaults to None. Returns: torch.tensor: horizontal coordinates in `image_shape`. torhc.tensor: vertical coordinates in `image_shape`. """ dtype = dtype or torch.float32 ndims = len(image_shape) if ndims < 2: raise ValueError("rank of `image_shape` must be at east 2D, " f"got {ndims}") h = image_shape[-2] w = image_shape[-1] # Generate x, y coordinates x = torch.arange(w, dtype=dtype, device=device) y = torch.arange(h, dtype=dtype, device=device) # Expand dims to match depth map x = x.view((1,)*(ndims-2) + (1, -1)) # (..., 1, w) y = y.view((1,)*(ndims-2) + (-1, 1)) # (..., h, 1) x = torch.broadcast_to(x, image_shape) # (..., h, w) y = torch.broadcast_to(y, image_shape) return x, y
e293676a80a2ecd08112bbc27f98967ca1990eec
332,846
def csv_to_list(csv_string): """ Converts a string with comma-separated integer values to a Python list of integers. Receives -------- csv_string : string Comma-separated integer values. Returns ------- integer_list : list List of integer values. """ string_list = csv_string.split(',') integer_list = list(map(int, string_list)) return integer_list
f8aa609006245da13e14912354ff594651960829
363,049
def clean_list(data_list: list) -> list: """Returns a list with any none values removed Args: data_list (list): The list to be cleaned Returns (list): The list cleaned of None values. """ return list(filter(None, data_list))
ec329fcaf25ef8f84479519ac7b79c0f51e8e625
75,326
def unquote_option_value(value): """Remove quotes from a string.""" if len(value) > 1 and value[0] in ('"', "'") and value[0] == value[-1]: return value[1:-1] return value
089e8ca9006004703e649684af6f0bd1a1cc74ca
671,908
def convert_top(t): """Converts time of possession string to seconds Args: t (str): e.g. '1:30' Returns: int: e.g. 90 """ try: m, s = [int(c) for c in t.split(':')] return m * 60 + s except (AttributeError, ValueError): return 0
a3364949f6317c585892a6026be226b371e579dd
498,230
def number_of_stutters(s): """ What comes in: -- a string s What goes out: Returns the number of times a letter is repeated twice-in-a-row in the given string s. Side effects: None. Examples: -- number_of_stutters('xhhbrrs') returns 2 -- number_of_stutters('xxxx') returns 3 -- number_of_stutters('xaxaxa') returns 0 -- number_of_stutters('xxx yyy xxxx') returns 7 -- number_of_stutters('xxxyyyxxxx') returns 7 -- number_of_stutters('') returns 0 Type hints: :type s: str """ # ------------------------------------------------------------------------- # DONE: 4. Implement and test this function. # The testing code is already written for you (above). # ------------------------------------------------------------------------- count = 0 for k in range( 1, len(s)): if s[k] == s[k - 1]: count = count + 1 return count
cf10213a2e229c4c4ad9ecda40a56fb669873b72
498,524
def run_script(script, localvars=None): """ Execute a given project script in the current context. Parameters ---------- script : script instance The script to execute. localvars : dict, optional If provided it will be used for evaluating the script. In general, it can be `localvrs`=``locals()``. Returns ------- out Output of the script if any """ return script.execute(localvars)
431ed3c34f3f26c9afa90c792241cba2ea511b57
242,459
def add_quincena_column(dfx, date_col_name): """ Adding column with a distinction between the first and second half of the month (quincena) :param dfx: (dataframe) df without the quincena column added :param date_col_name: (string) name of the column that will be used as reference the create the new quincena column :return dfx: (dataframe) df with a column indicating the half of the month with the following format: %y-%m-qx (where 'x' stands for 1 or 2, depending on the month's half) """ ## Creating support column to identify the half of the month dfx.insert( dfx.columns.to_list().index(date_col_name) + 1, 'support_col_month_half', dfx[date_col_name].apply(lambda x: '1' if int(x.strftime('%d')) <= 15 else '2') ) ## Creating quincena column dfx.insert( dfx.columns.to_list().index(date_col_name) + 1, 'quincena', dfx[date_col_name].dt.strftime(date_format='%y-%m-q') + dfx['support_col_month_half'] ) ## Dropping support column dfx.drop(['support_col_month_half'], axis=1, inplace=True) return dfx
388ff0d6ca7c73c9b61f8b0c115777ab809d480e
410,046
def _calculate_meta(meta, bases): """Calculate the most derived metaclass.""" winner = meta for base in bases: base_meta = type(base) if issubclass(winner, base_meta): continue if issubclass(base_meta, winner): winner = base_meta continue raise TypeError( 'metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases' ) return winner
905be74a62bf946d3bd5cdfc1bed12c1dce34210
562,345
def multiple_split(source_string, separators, split_by = '\n'): """ This function allows the user to split a string by using different separators. Note: This version is faster than using the (s)re.split method (I tested it with timeit). Parameters: * source_string: string to be splitted * separators: string containing the characters used to split the source string. * split_by: all the ocurrences of the separators will be replaced by this character, then the split will be done. It defaults to '|' (pipe) """ translate_to = split_by * len(separators) translation = str.maketrans(separators, translate_to) return source_string.translate(translation).split(split_by)
0310d60a225fe156f86d6c0b4ce02781773750de
22,526
import csv def convert(filepath, formatter): """Convert CSV file to a list of strings by applying the provided formatter to each line. """ output = [] with open(filepath, "r", encoding="utf-8") as file: reader = csv.reader(file) for row in reader: output.append(formatter(row)) return output
da9b9d0b69bcb79fc25cfd80df856ef0b3d3d4ff
457,067
def find(string, letter, start = 0): """Find function using a string and a search letter. [Optionally] can specify start index as well.""" index = start while index < len(string): if string[index] == letter: return index index = index + 1 return -1
007e3afdabe8267ad375960b3dfcbf4f4bbfaaae
409,780
def convert_to_numeric(score): """ Convert the score to a float. """ converted_score = float(score) return converted_score
a5949a4c826e4b115b7bd37c253f09a2456fd6a3
507,102
from typing import Tuple def check_version(available: str, needed: Tuple[int, int]) -> bool: """Check that an available version matches the needed version.""" major, minor = available.split(".") if int(major) < needed[0]: return False return int(minor) >= needed[1]
63e1608f92cac9fef2d8429eb505a20a64382b74
615,684
def nub(x): """Deletes all duplicates from a list""" new = [] new .append(x[0]) for x1 in range(1, len(x)): if x[x1] in new: pass if x[x1] not in new: new.append(x[x1]) return new
ed92909bbc3efdb3dd4dbb0da7c24e3db4343f6e
558,518
def flip(board): """Returns horizontal mirror image of board with inverted colors.""" flipped_board = dict() for square, piece in board.items(): flipped_board[(7 - square[0], square[1])] = piece.swapcase() return flipped_board
62e3bbbe33abdd2e2d4e1ce6eae9f992b0fd275a
696,890
def solve_substring_left_to_right(text): """ Solve a flat/small equation that has no nested parentheses Read from left to right, regardless of the operation :param str text: A flat equation to solve :return: The result of the given equation :rtype: int """ text = text.replace("(", "") text = text.replace(")", "") inputs = text.split(" ") total = 0 next_operation = total.__radd__ for input in inputs: if input == "+": next_operation = total.__radd__ elif input == "*": next_operation = total.__rmul__ else: value = int(input) total = next_operation(value) return total
7ac57effd54bebdaa1a5479a2d33881a0b640b73
105,785
def gen_caption_from_classes(class_names, templates): """ Given a list of class names, return a list of template augmented captions, and the class_idx of these captions. captions: A list of strings describing each class labels_list: A list of ints representing the class index """ captions = [] labels_list = [] for i, class_name in enumerate(class_names): if type(class_name) == str: class_name = [class_name] for c in class_name: for template in templates: caption = template.format(c) captions.append(caption) labels_list.append(i) return captions, labels_list
e5654da09f37c06524b6bde1ad9d8808ea094245
510,974
def already_all_imported(group, imported_filepaths): """Test is all the filepaths in groups are already imported in imported_filepaths""" nimported = sum(fp in imported_filepaths for fp in group) return nimported == len(group)
c6bbd66795ba527dec81a45b959a4278543f92e1
571,272
def dumb_factor(x, primeset): """ If x can be factored over the primeset, return the set of pairs (p_i, a_i) such that x is the product of p_i to the power of a_i. If not, return [] """ factors = [] for p in primeset: exponent = 0 while x % p == 0: exponent = exponent + 1 x = x//p if exponent > 0: factors.append((p,exponent)) return factors if x == 1 else []
d01a1faa90a62521b0e9881e2a62791b69482087
78,249
from typing import Sequence from typing import Type from typing import Union def unique_fast( seq: Sequence, *, ret_type: Type[Union[list, tuple]] = list ) -> Sequence: """Fastest order-preserving method for (hashable) uniques in Python >= 3.6. Notes ----- Values of seq must be hashable! See Also -------- `Uniquify List in Python 3.6 <https://www.peterbe.com/plog/fastest-way-to-uniquify-a-list-in-python-3.6>`_ """ return ret_type(dict.fromkeys(seq))
c41c6b298e52bd3069414206cf9ada4766ea8f4d
7,955