content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def conditional_decorator(decorator, condition): """Checks the condition and if True applies specified decorator.""" def gen_decorator(f): if not condition: return f return decorator(f) return gen_decorator
4ffdf93c40ae3c23475ad447de0d124794ccce81
380,276
import decimal def serialize_decimal(amount: decimal.Decimal) -> str: """Serialize a decimal value.""" return str(amount)
b54e9cc056a09e2e67ee61c03355f0b8b27dd0bd
257,393
import re def aide_memoire(s): """Replaces all of: 'aid memoire' 'aide memoir' 'aide memoire' 'aide -memoire' 'aide-memoir' 'aide-memoire' with 'aide memoire'. """ pattern = re.compile('([Aa]ide{0,1} {0,1}( |-)[Mm]emoire{0,1})') return re.sub(pattern, 'aide memoire', s)
874d4ac20ca20792484cf28f9e6135d740037828
440,509
import re def display_to_origin(display): """ from the display value, an stencila article version, trim it to be the SWH origin value e.g. for display value https://elife.stencila.io/article-30274/v99/ return https://elife.stencila.io/article-30274/ """ if not display: return None match_pattern = re.compile(r"^(https://elife.stencila.io/.*?/).*$") return match_pattern.sub(r"\1", display)
1ff22328590dd2926a83406f82a2fd920b7e2f90
23,246
import collections def sort_dict(source_dict): """Recursive alphabetical sort a nested dict. Sorts a direct dict child, as well list of dicts. Args: source_dict: Nested dict Returns: OrderedDict: Sorted alphabetically by key name. """ sorted_dict = collections.OrderedDict() # Alphabetical sort on key. for key, val in sorted(source_dict.items(), key=lambda t: t[0]): if isinstance(val, dict): sorted_dict[key] = sort_dict(val) # Create new OrderedDict elif isinstance(val, list): # Sort any child dicts, otherwise skip. lst = [] for item in val: if isinstance(item, dict): item = sort_dict(item) lst.append(item) sorted_dict[key] = lst else: sorted_dict[key] = val return sorted_dict
06ad26f3553bd1f9ee3605c5452174315dd3aba4
464,506
import textwrap def unindent(text: str) -> str: """Remove indentation from text""" return textwrap.dedent(text).strip()
035c46a9921a328902f84d71316707f854bf5201
688,881
def add_tracks_to_playlist(spotify_obj, spotify_username, playlist_id, tracks_ids, tracks_per_requests=100): """Method that adds tracks with given Spotify tracks IDs to Spotify user's playlist with a given playlist ID. Spotipy.Spotify object is used to add the tracks. Maximum tracks per request in Spotify API is 100 and the same number is set in the method by default. Can be changed to a number below 100.""" results = [] for tracks_chunk in [tracks_ids[i:i + tracks_per_requests] for i in range(0, len(tracks_ids), tracks_per_requests)]: results.append(spotify_obj.user_playlist_add_tracks(spotify_username, playlist_id, tracks_chunk)) return results
08383b4a8d74989afecec217159078951eb199c5
243,025
import io def build_wsgi_environ(scope, body): """ Builds a scope and request body into a WSGI environ object. This code snippet is taken from https://github.com/django/asgiref/blob /36c3e8dc70bf38fe2db87ac20b514f21aaf5ea9d/asgiref/wsgi.py#L52 WSGI specification can be found at https://www.python.org/dev/peps/pep-0333/ This function helps translate ASGI scope and body into a flask request. """ environ = { "REQUEST_METHOD": scope["method"], "SCRIPT_NAME": scope.get("root_path", ""), "PATH_INFO": scope["path"], "QUERY_STRING": scope["query_string"].decode("ascii"), "SERVER_PROTOCOL": "HTTP/{}".format(scope["http_version"]), "wsgi.version": (1, 0), "wsgi.url_scheme": scope.get("scheme", "http"), "wsgi.input": body, "wsgi.errors": io.BytesIO(), "wsgi.multithread": True, "wsgi.multiprocess": True, "wsgi.run_once": False, } # Get server name and port - required in WSGI, not in ASGI environ["SERVER_NAME"] = scope["server"][0] environ["SERVER_PORT"] = str(scope["server"][1]) environ["REMOTE_ADDR"] = scope["client"][0] # Transforms headers into environ entries. for name, value in scope.get("headers", []): # name, values are both bytes, we need to decode them to string name = name.decode("latin1") value = value.decode("latin1") # Handle name correction to conform to WSGI spec # https://www.python.org/dev/peps/pep-0333/#environ-variables if name == "content-length": corrected_name = "CONTENT_LENGTH" elif name == "content-type": corrected_name = "CONTENT_TYPE" else: corrected_name = "HTTP_%s" % name.upper().replace("-", "_") # If the header value repeated, # we will just concatenate it to the field. if corrected_name in environ: value = environ[corrected_name] + "," + value environ[corrected_name] = value return environ
57c80dcdc63904ee2323378d20d3e9da7c45fa6e
571,020
def _get_parameters_proto(host_calls_dictionary): """Get the FormalParameterProtos for the first host call in the dictionary.""" return host_calls_dictionary['host_calls'][0].parameters
7d62ee04bc52fe29bd36a14366c96e8ce5542c46
688,192
def convert_message(content, id): """ Strip off the useless sf message header crap """ if content[:14] == 'Logged In: YES': return '\n'.join(content.splitlines()[3:]).strip() return content
f1e56ae59a93cd5e1d7fbd6ec37a24b90040d119
555,446
def CheckUnique(num): """ Function to get unique digit numbers Args: num(int) : number Returns: bool : True or False """ # Start traversing the numbers repeated = [0,0,0,0,0,0,0,0,0,0] # if a digit occcurs more than 1 time then break while (num): if repeated[num % 10] == 1: break repeated[num % 10] = 1 num = (int)(num / 10) # num will be 0 only when above loop doesn't break thus its unique if num == 0: return True return False
98f47f36e7d4f8d1b693d4bdfcebe7f1c8d98683
150,712
def check_output_format(expected_formats): """ Decorator for stream outputs that checks the format of the outputs after modifiers have been applied :param expected_formats: The expected output formats :type expected_formats: tuple, set :return: the decorator """ def output_format_decorator(func): def func_wrapper(*args, **kwargs): self = args[0] if self.output_format not in expected_formats: raise ValueError("expected output format {}, got {}".format('doc_gen', self.output_format)) return func(*args, **kwargs) return func_wrapper return output_format_decorator
8cfcca474d3d008835f9cd722cfc9f567c7a53da
685,779
from textwrap import dedent def docstring(**kwargs): """ Replace values in the decorated function docstring. """ def decorator(fn): fn.__doc__ = dedent(fn.__doc__).format(name=fn.__name__, module=fn.__module__, **kwargs) return fn return decorator
3e8ae541d6ae33bcafb1562dd0ec63de3d3f703b
427,526
def convert_cidr_to_netmask(netmask_cidr: str) -> str: """ This function will convert a netmask CIDR in a standard netmask (255.0.0.0) :param netmask_cidr: IP address netmask in CIDR format (/24) :return str: IP address netmask in 255.255.255.255 format """ bits = 0 for i in range(32 - int(netmask_cidr), 32): bits |= (1 << i) return ( "%d.%d.%d.%d" % ( (bits & 0xff000000) >> 24, (bits & 0xff0000) >> 16, (bits & 0xff00) >> 8, (bits & 0xff) ) )
91808f853508d56a6e1c592cd2d98abb8e9c466d
336,869
def unflatten(d): """ This unflattens a flattened dictionary. Args: d (dict|list): The object to flatten. Returns: items (dict): The unflattened object. """ items = {} for k, v in d.items(): keys = k.split('.') sub_items = items for ki in keys[:-1]: try: sub_items = sub_items[ki] except KeyError: sub_items[ki] = {} sub_items = sub_items[ki] sub_items[keys[-1]] = v return items
6e7ece99ece7c7faa9ed0e317499df5d2db098d2
465,578
def loadEntireTxtFile(filename): """ Load entire file (text mode) and return its content. """ rf = open(filename, "rU") try: result = rf.read() return result finally: rf.close()
0f9f7688172c3542a78aaf1b145bef8314479285
346,539
def show_factor(factor): """ Convert a factor back into a string. >>> show_factor(parse_factor('c1ds[p1m]')) 'c1ds[p1m]' """ return "{1}[{2}]".format(*factor)
f4b67ff0197d8e5d0074911ad89f9cf8a9880bae
393,623
def get_vpcs(cfmclient, uuid=None): """ Get a list of VPCs currently defined in Composable Fabric. :param cfmclient: Connected CFM API client :param uuid: specific VPC UUID to retrieve :return: list of VPC dictionary objects :rtype: list """ path = 'v1/vpcs' if uuid: path += '/{}'.format(uuid) return cfmclient.get(path).json().get('result')
a9c9d1909332cc0fbe149755f18e416411ad08d3
261,158
def iround(x): """Round floating point number and cast to int.""" return int(round(x))
0cc45c946c8325a2e17306f79bbb759b8844a64d
536,013
import torch import math def importance_sampling_cross_validation(logp): """Compute the importance-sampling cross validation (ISCV) estimate. The ISCV estimates the holdout log-likelihood from just an approximation to the posterior predictive log-likelihoods on the training data. ### References: [1]: Alan E. Gelfand, Dipak K. Dey, Hong Chang. Model determination using predictive distributions with implementation via sampling-based methods. Technical report No. 462, Department of Statistics, Stanford university, 1992. http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.860.3702&rep=rep1&type=pdf [2]: Aki Vehtari, Andrew Gelman, Jonah Gabry. Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC. arXiv:1507.04544 https://arxiv.org/pdf/1507.04544.pdf [3]: Sumio Watanabe. Mathematical Theory of Bayesian Statistics. CRC Press. 2018 https://www.crcpress.com/Mathematical-Theory-of-Bayesian-Statistics/Watanabe/p/book/9781482238068 Args: logp: Tensor, shape (B,M,...), containing log p(y_i | x_i, theta_j) for i=1,..,B instances and j=1,...,M models. Returns: iscv_logp: Tensor, (...), the ISCV estimate of the holdout log-likelihood. iscv_logp_sem: Tensor, (...), the standard error of th emean of `iscv_logp`. """ logse = torch.logsumexp(-logp, 1) iscv_logp = logse.mean(0) iscv_logp_var = logse.std(0) m = int(logp.shape[1]) iscv_logp -= math.log(m) iscv_logp = -iscv_logp iscv_logp_sem = (iscv_logp_var / float(m)).sqrt() return iscv_logp, iscv_logp_sem
9bef3b3c3775e359d52a321a8e72b69d38f0fcb7
22,217
from typing import Optional from datetime import datetime import re def to_datetime(timestamp: Optional[str]) -> Optional[datetime]: """ Convert string to datetime of formats:: '2019-05-24 09:26:22' 'Friday 24th May 2019 10:01:44' Args: timestamp (str): String timestamp Returns: datetime if input is a valid datetime string """ if timestamp is None: return None try: # '2019-05-24 09:26:22' return datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S") except ValueError: pass try: # 'Friday 1st May 2019 10:01:44'. Used in verified_at for subscribers sub_timestamp = re.sub(r"\b([0123]?[0-9])(st|th|nd|rd)\b", r"\1", timestamp) return datetime.strptime(sub_timestamp, "%A %d %B %Y %H:%M:%S") except ValueError: pass raise ValueError("datetime string '{}' not supported".format(timestamp))
1eac7674da1ee00cff6bf74308ab77a630d217db
265,250
def calculate_mae_and_mape(predictions): """ Calculate the mean absolute error (MAE) and the median absolute percentage error (MAPE). """ predicted = predictions["predicted"] actual = predictions["actual"] absolute_errors = (predicted - actual).abs() mean_absolute_error = absolute_errors.mean() # The percentage error is undefined when the actual measurement is 0. percentage_error = absolute_errors / actual percentage_error = percentage_error[actual != 0] # Take the absolute value after dividing to have all positive metrics mean_absolute_percentage_error = 100 * percentage_error.abs().median() return mean_absolute_error, mean_absolute_percentage_error
749be4270ab78b94df6177cf679d35d0748cb063
183,124
def compute_factorial(n: int) -> int: """ Compute n-factorial. :param n: Number to compute factorial for :return: n-factorial """ if (not isinstance(n, int)) or (n < 0): raise ValueError("compute_factorial() only accepts non-negative integer values.") factorial = 1 for i in range(1, n + 1): factorial *= i return factorial
75061c245376f09ec01e6bcf018d04e938f419c1
18,632
def split_once_or_fail_with(strink, pattern, error_message): """ split selected string (string is a builtin, hence strink) using the given pattern; raise RuntimeError with provided error message if that fails :param strink: str, string to split :param pattern: str, pattern to split on :param error_message: str, error message to raise in case of split fails :return: (str, str) """ try: k, v = strink.split(pattern, 1) except ValueError: raise RuntimeError(error_message) # we do this on purpose, so that ValueError is raised here return k, v
f36f9e17b79741b067718e55157d9f78ee1af13d
258,511
def truncate_str(tr_str, num_chars, terminator=None): """ Truncates tr_str at num_chars and appends a terminator message. If num_chars == 0, return the original string. """ default_terminator = "<TRUNCATED>" if terminator is None: terminator = default_terminator if num_chars == 0 or len(tr_str) <= num_chars: return tr_str return "%s%s" % (tr_str[:num_chars], terminator)
09d235cb745dfee687325ac70f02effee7d0570c
358,743
import hashlib def truncate_value(value, max_length=63, from_left=True): """ Truncate a value (typically a column name) to a certain number of characters, using a hash to ensure uniqueness. """ hash_length = 8 truncated_length = max_length - hash_length - 1 value = value.encode('unicode-escape') if from_left: truncated_value = value[-truncated_length:] else: truncated_value = value[:truncated_length] if len(value) > max_length: short_hash = hashlib.sha1(value).hexdigest()[:hash_length] return '{}_{}'.format(truncated_value.decode('utf-8'), short_hash) return value.decode('utf-8')
124f90f1da40abaffdd8f3c17ca7692df2c16213
584,645
def split_dataset(data, frac_train=0.7): """ Splits data into training and test sets according to 'frac_train' Parameters ---------- data : numpy.ndarray data array to be split frac_train : float, from 0 to 1 fraction of samples in training set Returns ------- training_set : numpy.ndarray array with training observations test_set : numpy.ndarray array with test observations """ n_train = int(frac_train * data.shape[0]) return data[:n_train], data[n_train:]
f933cb28bc43f2d8489e8a2e1e6ad94ea4ce2d24
172,005
from typing import Any def is_hashable(obj: Any) -> bool: """Test if object is hashable via duck typing. NB: not using ``collections.Hashable`` as some objects (e.g. pandas.Series) have a ``__hash__`` method to throw a more specific exception. """ try: hash(obj) return True except Exception: return False
14481da808a8cfb3969f47481579f938c7e2f1bb
183,775
def _triplet_with_context(seq: str, i: int): """Return the ith triplet in a sequence with the previous and following nucleotide Args: seq (str): Sequence i (int): 0-based index of the target triplet. Returns: str: The target triplet. """ start = i * 3 return seq[start-1:start+4]
4f878f16d0d64d1eba7177c853510391196f2f06
282,967
def prime(num: int) -> bool: """Returns a boolean if the number is prime""" divisors = [i for i in range(1, num + 1) if num % i == 0] print(divisors) return True if len(divisors) == 2 else False
976b84a5946679dba8f248426842ecc1e81b576b
607,106
def sequence_recovery(native_struct,designed_struct): """calculate percent sequence recovery between a native and designed struct""" native_residues = native_struct.get_residues() designed_residues = designed_struct.get_residues() total = 0.0; recovered = 0.0; for native,designed in zip(native_residues,designed_residues): if native.get_resname() == designed.get_resname(): recovered += 1 total += 1 #print recovered, total return recovered/total
2b6541dd25406a1569e510f1ea59ca6bd6b14e25
595,268
import random def cxTwoPoints(ind1, ind2): """Execute a two points crossover on the input individuals. The two individuals are modified in place and both keep their original length. :param ind1: The first individual participating in the crossover. :param ind2: The second individual participating in the crossover. :returns: A tuple of two individuals. This function use the :func:`~random.randint` function from the python base :mod:`random` module. """ size = min(len(ind1), len(ind2)) cxpoint1 = random.randint(1, size) cxpoint2 = random.randint(1, size - 1) if cxpoint2 >= cxpoint1: cxpoint2 += 1 else: # Swap the two cx points cxpoint1, cxpoint2 = cxpoint2, cxpoint1 ind1[cxpoint1:cxpoint2], ind2[cxpoint1:cxpoint2] \ = ind2[cxpoint1:cxpoint2], ind1[cxpoint1:cxpoint2] return ind1, ind2
3491ac0010d76f52aa97e163de7766ce1c9e993c
146,521
import torch def l2_loss(t): """Return the l2 loss.""" return 0.5 * torch.sum(t ** 2)
1cea9b6b6e8bb30698dd44675e219a3bfe05287a
132,274
def ComponentStaticLibrary(env, lib_name, *args, **kwargs): """Pseudo-builder for static library. Args: env: Environment in which we were called. lib_name: Static library name. args: Positional arguments. kwargs: Keyword arguments. Returns: Output node list from env.ComponentLibrary(). """ return env.ComponentLibrary(lib_name, *args, **kwargs)
c5a8fc1d559c6b24d02b44d3bbfe3bdb2d360cb0
247,023
import socket def Bind(port, socket_type, socket_proto): """Try to bind to a socket of the specified type, protocol, and port. This is primarily a helper function for PickUnusedPort, used to see if a particular port number is available. Args: port: The port number to bind to, or 0 to have the OS pick a free port. socket_type: The type of the socket (ex: socket.SOCK_STREAM). socket_proto: The protocol of the socket (ex: socket.IPPROTO_TCP). Returns: The port number on success or None on failure. """ s = socket.socket(socket.AF_INET, socket_type, socket_proto) try: try: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(('', port)) return s.getsockname()[1] except socket.error: return None finally: s.close()
2c5682b331e6bf547d1d3a8dafc52b76369aee0c
91,031
from bs4 import BeautifulSoup def get_sorted_id_from_soup(soup:BeautifulSoup): """ Sorts unique and duplicated IDs in a document markup language document. Args: soup (BeautifulSoup): markup language as BeautifulSoup object Returns: unique, duplicate ({}): two lists for unique and duplicated IDs Modules: bs4 (BeautifulSoup) """ unique, dupe = [], [] for tag in soup.find_all(attrs={'id':True}): id = tag.get('id') if id not in unique: unique.append(id) else: dupe.append(id) sorted = {'unique': unique, 'duplicate': dupe} return sorted
f6aace3f7431dbcf40b01c20b14de200d7aed680
293,831
def accuracy(tp: float, fn: float, tn: float, fp: float) -> float: """ Calculate accuracy. :param tp: True positive count. :param fn: False negative count. :param tn: True negative count. :param fp: False positive count. :return: Accuracy. """ return 0.0 if tp + tn + fp + fn == 0 else (tp + tn) / (tp + tn + fp + fn)
32b31e875e68090c6a5632cdc162febda974af45
453,069
import json def read_jsonFile(filePath): """ Reads any json file and returns the result """ data = None with open(filePath, 'r') as file: data = json.loads(file.read()) return data
d611c8ac1c6be02b341cb5d0553988a18635d739
524,126
import json def loadJsonArgs(fn): """ Load the .json file containing input values Args: fn: file name Returns: args: argument dictionary """ with open(fn) as data_file: data = json.load(data_file) args = {} args['patient_id'] = data['Patient ID'] args['start_phase'] = data['Start Phase'] args['total_phase'] = data['Total Phase'] args['im_name'] = data["Image Name"] args['model_output']=data["Output Surface Model Name"] args['seg_name'] = data["Segmentation Name"] args['im_top_dir'] = data["Image Top Dir"] args['seg_folder_name'] = data["Segmentation Folder Name"] args['im_folder_name'] = data["Image Folder Name"] args['out_dir'] = data["Output Dir Name"] args['num_interpolation']=data["Number of Interpolations"] args['num_cycle'] = data["Number of Cardiac Cycles"] args['duration'] = data["Cycle Duration (s)"] args['edge_size'] = data["Mesh Size"] args['mask_folder_name'] = data["Mask Folder Name"] return args
51e8a0fdaf53836cf831701ff6921479a8d8e03f
20,030
from typing import List from typing import Dict def remove_audio_before_db(examples: List[Dict]) -> List[Dict]: """Remove (potentially heavy) 'audio' key from examples Parameters ---------- examples : list of dict Examples. Returns ------- examples : list of dict Examples with 'audio' key removed. """ for eg in examples: if "audio" in eg: del eg["audio"] return examples
5e0b761ffc899f5510dd0549e88ed0b6ffd9eda6
276,259
def isCompatible(c1, c2, IUPAC_compatibles): """ Checks compatibility between character c1 and c2 """ if IUPAC_compatibles[c1 + "_" + c2] == 1: return True else: return False
f8c21c1cc1b438754cd9eaa40939a59d77100536
576,187
def create_plot_data(f, xmin, xmax, n): """ Computes and returns values of y = f(x). f -- function of x xmin -- minimum value of x xmax -- maximum value of x n -- number of values of x returns values of x and y """ xs = [] ys = [] for i in range(n): xi = xmin + float(i) * (xmax - xmin) / (n - 1) yi = f(xi) xs.append(xi) ys.append(yi) return (xs, ys)
73e5245d38454f31ffc78c9bab3723f74d7db331
186,700
import math def absolute_value(vector): """Returns the absolute value of a vector. It is calculated by the euclidean norm ``abs(a) = Sqrt( pow(a1) + pow(a2) + pow(a3) )`` :param vector: vector.abstractvector.DocumentVector :type vector: the vector whose absolute value will be calculated :returns: int """ values = [ v for v in vector.values ] square_values = [ v**2 for v in values] absolute_value = math.sqrt(sum(square_values)) return absolute_value pass
ad29087601f706113dcadb1dd77e780c22045a80
582,866
from typing import Union from typing import Any from typing import Sequence def supports_iteration(value: Union[Any, Sequence[Any]]) -> bool: """Returns ``True`` if the ``value`` supports iterations.""" try: for _ in value: return True except TypeError: pass return False
b74b0ffc85fdfdabfdd1fb5f352c23632966eb97
31,650
def make_sql(table_name, max_rows=None, for_eval=False): """Creates the sql command for pulling data from BigQuery. Args: table_name: BigQuery table name max_rows: if set, limits the number of rows pulled from BigQuery for_eval: True if this is for evaluation, false otherwise Returns: sql command as string """ if for_eval: # 1/3 of the dataset used for eval where_clause = 'WHERE MOD(FARM_FINGERPRINT(unique_key), 3) = 0' else: # 2/3 of the dataset used for training where_clause = 'WHERE MOD(FARM_FINGERPRINT(unique_key), 3) > 0' limit_clause = '' if max_rows: limit_clause = 'LIMIT {max_rows}'.format(max_rows=max_rows) return """ SELECT CAST(pickup_community_area AS string) AS pickup_community_area, CAST(dropoff_community_area AS string) AS dropoff_community_area, CAST(pickup_census_tract AS string) AS pickup_census_tract, CAST(dropoff_census_tract AS string) AS dropoff_census_tract, fare, EXTRACT(MONTH FROM trip_start_timestamp) AS trip_start_month, EXTRACT(HOUR FROM trip_start_timestamp) AS trip_start_hour, EXTRACT(DAYOFWEEK FROM trip_start_timestamp) AS trip_start_day, UNIX_SECONDS(trip_start_timestamp) AS trip_start_timestamp, pickup_latitude, pickup_longitude, dropoff_latitude, dropoff_longitude, trip_miles, payment_type, company, trip_seconds, tips FROM `{table_name}` {where_clause} {limit_clause} """.format(table_name=table_name, where_clause=where_clause, limit_clause=limit_clause)
2c333d1b4aa49fd63613840756028be1eabea227
422,200
def calculate_size(transformed_dataset): """Helper function to calculate the total size of a dataset Args: transformed_dataset (dict): a ``TransformedDataset`` instance, which maps (str) table name to {'size': (float) size of the table in byte, 'row_size': (float) the size of a row in byte, 'entries': (set) the column names, 'chosen': (set) the rows selected} Returns: float: the dataset size in byte """ size = 0 for table in transformed_dataset.values(): size += table['row_size'] * len(table['chosen']) return size
5cf82114c6849ab6555a4a1fc3f6599e37ed4fae
124,841
def list_extremum(data: list, type=0) -> tuple: """ 找到list中的极值,并返回索引,若极值有多个,则返回多个索引 Args: data: list数据 type: 1表示执行最大值操作,0表示最小值操作 Returns: 极值,索引的列表 """ if type == 1: # 极大值 ex = max(data) elif type == 0: # 极小值 ex = min(data) else: raise Exception("The value of 'type' should only be 0 or 1.") # 拿到所有索引 idx = [id for id, item in enumerate(data) if item == ex] return ex, idx
adf513af5221491b51ae7a105f5feaa96617126a
671,255
def mk_endpoint_config(prj, endpoint_name, model_name): """ Return endpoint config creation parameters. """ return { "EndpointConfigName": prj.full_job_name(endpoint_name) + "-config", "ProductionVariants": [ { "VariantName": prj.full_job_name(model_name) + "-variant-1", "ModelName": prj.full_job_name(model_name), "InitialInstanceCount": 1, "InstanceType": prj.deploy.instance_type, "InitialVariantWeight": 1.0, } ], "Tags": prj.tags(), }
e556a144abd825dcaa842d00dbc985798c487d00
615,594
from io import StringIO def build_from_imports_py(imports_info): """ Build a string containing a series of `from X import Y` lines Parameters ---------- imports_info : str or list of (str, str or list of str) List of import info If element is a pair first entry is the package to be imported from and the second entry is either a string of the single name to be If element is a string, insert string directly Returns ------- str String containing a series of imports """ buffer = StringIO() for import_info in imports_info: if isinstance(import_info, tuple): from_pkg, class_name = import_info if isinstance(class_name, str): class_name_str = class_name else: class_name_str = "(" + ", ".join(class_name) + ")" buffer.write( f"""\ from {from_pkg} import {class_name_str}\n""" ) elif isinstance(import_info, str): buffer.write(import_info) return buffer.getvalue()
4f15258af0c8040572f5b117f683b8481a970e2b
438,595
def argmin(x): """ Returns the index of the smallest element of the iterable `x`. If two or more elements equal the minimum value, the index of the first such element is returned. >>> argmin([1, 3, 2, 0]) 3 >>> argmin(abs(x) for x in range(-3, 4)) 3 """ argmin_ = None min_ = None for (nItem, item) in enumerate(x): if (argmin_ is None) or (item < min_): argmin_ = nItem min_ = item return argmin_
8d6778182bf3c18ffa6ef72093bf19a818d74911
4,051
def distance_squared(x1, y1, x2, y2): """ Helper function to return the square of the distance between points (x1,y1) and (x2,y2). :Parameters: x1: float X coordinate of first point y1: float Y coordinate of first point x2: float X coordinate of second point y2: float Y coordinate of second point :Returns: distsq: float The square of the distance, (x1-x2)**2 + (y1-y2)**2 """ xdiff = float(x1) - float(x2) ydiff = float(y1) - float(y2) distsq = (xdiff * xdiff) + (ydiff * ydiff) return distsq
66fead189b9b1984adf5d4dc24dc57c27cf6a2d9
593,587
def get_offset_scale_attr(scale_attr=""): """ utility node for getting the offset scale attribute name. :param scale_attr: <str> scale name attribute. :return: <str> offset scale attribute. <bool> False for failure. """ if 'scaleX' in scale_attr: return 'offset_scaleX' if 'scaleY' in scale_attr: return 'offset_scaleY' if 'scaleZ' in scale_attr: return 'offset_scaleZ' return False
b1ac88d8e00818ae0d9ae5454533458e25211f1b
564,046
def MakeGuestAccelerators(messages, accelerator_configs): """Constructs the repeated accelerator message objects.""" if accelerator_configs is None: return [] accelerators = [] for a in accelerator_configs: m = messages.AcceleratorConfig( acceleratorCount=a['count'], acceleratorType=a['type']) accelerators.append(m) return accelerators
015c3576aa1c490b7b4e0be104ae7def0c303ad9
137,864
import re def _clean_requirements_entries(requirements_list: list) -> list: """ Remove version information from a list with requirement strings. """ _entry_regex = r'^(.*?)/s*[;><=].*$' for index, entry in enumerate(requirements_list): requirements_list[index] = re.sub(_entry_regex, r'\1', entry) return requirements_list
d826ddb6c7a1d49a7c5fe577aa649d4210edad54
353,158
def list_to_unicode_str(lst): """ Convert list of large integers to a unicode string. """ return "".join(map(chr, lst))
1fc3dde75806c2cafe0c6965b15b9b19e53d05a4
103,926
def worst(category): """ Category status is the worst lowest status of all modules of the same type as the category (only plots or rated modules, depending) with positive status value (no error, data acquisition succeeded). 'unrated' modules are always per definition excluded. If there is no correct module with positive status, the category status is set to -1 (no information). """ status = 1.0 for mod in category.module_list: if mod.dataset is None: continue if status > mod.dataset['status'] >= 0 and mod.type == category.type: status = mod.dataset['status'] return status
876a758707be691305446fc0a106ca4c279ef13b
214,552
from typing import Dict from typing import Any def merge_parameters( higher_priority_params: Dict[str, Any], lower_priority_params: Dict[str, Any] ) -> Dict[str, Any]: """Merge the contents of two dicts, keeping values from higher_priority_params if there are conflicts.""" return {**lower_priority_params, **higher_priority_params}
31fefe13b36e4e06bd6c733ac1447f938169c8ed
384,125
def rightangletriangles(i): """genearate all the possible right angle triangles with integer edge for the given integer perimeter i list of tuple: [(longest, secondlong, shortest), ..] """ lmin=int(i/(1+2**0.5)) lmax=int(i/2) lt=[] for l in range(lmin, lmax): # for s in range( int((i-l)/(2**0.5)) ,l): t=i-l-s if l*l==s*s+t*t: lt.append((l,s,t)) # if lt: print(i,lt) return lt
558040636b93b35178c787a51d7184cfc4f04fb9
357,188
def inject_text(htmltext, intext): """ Insert text from a file into an html string. Arguments: htmltext -- string where text will be added. intext -- input file Returns: text with intext added between <p></p> characters """ parts = htmltext.split('<p></p>') with open(intext, 'r') as infile: adddata = infile.read() return "%s<p>%s</p>%s" % (parts[0], adddata, parts[1])
0eb9cfbd29af998af0ad76c3341e8c07368b982e
141,283
def nan_check(test_dataframe): """ :param test_dataframe: pandas data frame :return: bool, whether there is any missing value in the data frame. """ return test_dataframe.isnull().values.any()
c3eaf976d4d10001800f1e0946da4ae36f6887a7
73,289
from typing import Iterable def getSqlQuery(db_table_name: str='movie') -> Iterable[str]: """Get the SQL query by table name. """ query = '' if db_table_name == 'movie': query = f''' INSERT INTO {db_table_name} ( rg_id, scraped_timestamp, title, year, overview, rating, imdb_score, reelgood_rating_score, url_offset_value ) VALUES''' + '(%s, NOW(), %s, %s, %s, %s, %s, %s, %s);' elif db_table_name == 'availability': query = f''' INSERT INTO {db_table_name} ( scraped_timestamp, rg_id, source_name, source_movie_id, source_web_link ) VALUES''' + '(NOW(), %s, %s, %s, %s);' return query, db_table_name
658ffd8de7060e291700fc29af886013270ed17a
595,114
def search_viewtransforms(config, aces_id): """ Search the config for the supplied ACES ID, return the view transform name. """ for vt in config.getViewTransforms(): desc = vt.getDescription() if aces_id in desc: return vt.getName() return None
c49df124060819b023f8cdcdd299d7c66b950a23
139,410
def are_seqs_equal(seq_1,seq_2): """ seq_1: list of (onset,pitch) tuples seq_2: list of (onset,pitch) tuples Returns True if the two lists are equal. """ if len(seq_1)!=len(seq_2): return False for i in range(len(seq_1)): if seq_1[i][0]!=seq_2[i][0] or seq_1[i][1]!=seq_2[i][1]: return False return True
a181800d829f72c76b88a3ce903d281da998e088
166,763
def get_state_xy(idx, num_cols): """Given state index this method returns its equivalent coordinate (x,y). Args: idx: index uniquely identifying a state num_cols: number of colums Returns: values x, y describing the state's location in the grid """ y = int(idx % num_cols) x = int((idx - y) / num_cols) return x, y
28595312d5a8c9a6a52248e5e6c2149df5a18d90
199,104
def _parse_response(response, decode_json): """ Helper function that extracts the status code and parses the response text. """ ret = {'status': response.status_code} if decode_json: try: ret['response'] = response.json() except ValueError: ret['response'] = response.text else: ret['response'] = response.text return ret
c534fb5e891e6d85471577b1482c39c0441a6840
399,719
def encodeWord(word: str, isNormalVersionForLogicallyThinkingHuman: bool) -> str: """Encode single word into its numerical representation in given system (e.g. "koko" -> "3131")""" map1 = {1:['w', 'e', 'r', 'u', 'i', 'o', 'a', 's', 'z', 'x', 'c', 'v', 'n', 'm', 'ę', 'ó', 'ą', 'ś', 'ń', 'ć', 'ż', 'ź'], 2: ['p', 'y', 'j', 'g', 'q'], 3: ['t', 'l', 'b', 'd', 'h', 'k', 'ł'], 4: ["f"]} map2 = {1:["a", "c", "e", "m", "n", "o", "r", "s", "u", "w", "z", "x", "v"], 2: ["ą", "ę", "g", "j", "p", "y", "q"], 3: ["b", "ć", "d", "h", "k", "l", "ł", "ń", "ó", "ś", "t", "ź", "ż", "i"], 4: ["f"]} selectedVersionMap = map2 if isNormalVersionForLogicallyThinkingHuman else map1 numbered = '' for char in word: for key in selectedVersionMap.keys(): if char in selectedVersionMap[key]: numbered = numbered + str(key) break return numbered
02da1f692bbfc7d50383d190a905d4a4d1595fef
631,735
def header_column_rename(header_list: list, column_name: str, column_index: int): """Rename column with a name Args: header_list (list, required): header list param to convert the month. column_name (str, required): New name for the column column_index (int, required): index of the column Returns: pandas.DataFrame: header list """ header_list[column_index] = column_name return header_list
029691ff8a48636154aff592de25d3ac61797481
157,145
def list_of_dict_to_dict(list_of_dict, key): """ Converts a list of dicts to a dict of dicts based on the key provided. Also removes the key from the nested dicts. converts [{key: v1, k2:v12, k3:v13}, {key:v2, k2: v22, k3:v23}, ... ] to {v1: {k2: v12, k3:v13}, v2:{k2:v22, k3:v23}, ...} Args: list_of_dict: eg: [{k1: v1, k2:v12, k3:v13}, {k1:v2, k2: v22, k3:v23}, ... ] Returns: dict_of_dict: eg: {v1: {k2: v12, k3:v13}, v2:{k2:v22, k3:v23}, ...} """ dict_of_dict = {} for item in list_of_dict: # item will be the nested dict value = item[key] # This will be the "key" in dict_of_dict item.pop(key) # removes key from the nested dict dict_of_dict[value] = item # adds item to the new dict return dict_of_dict
e88fc4e6bec2752d7797feae1da32010088de412
254,592
def findMin(heap): """ Returns the minimum element in the heap. """ if len(heap) == 0: raise IndexError("findmin on empty heap") return heap[0]
858390e1e2d893910206ae4f0045406f737ea6c3
269,105
def add_custom_variables(reco_event,truth_event): """ Example of adding custom variables to the csv - this is a dummy and returns the value of 1.0 and the eventNumber twice for every event. The objects passed to this function are the root2csv two TTrees. This function MUST return an array """ return [1.0,reco_event.eventNumber,truth_event.eventNumber]
cebce4865314703c41e2cd346d693c518e29781e
64,409
def factorial(x: int)-> int: """ This function returns the `Factorial` value of a number which is specified in the function by user. This function takes exactly one argument. Args: `x: int` : x should be an `integer number` for calculating the factorial value of x. """ fact = 1 for i in range(1, x+1): fact*=i return fact
43c9debabdee8ec0a85a1d66fd21c498761a043f
281,844
def p_wrap(content): """wrap string in a <p> tag""" return "<p>%s</p>" % content
7f164739df4916693bb006e97c16d5eb126f4650
417,332
def gN(node): """ return True if node is gizmo """ return 'gizmo_file' in node.knobs()
446edcc46e775413b32ddecf844d994c2a64a2f0
236,675
def is_valid_xml_char(char): """Check if a character is valid based on the XML specification.""" codepoint = ord(char) return (0x20 <= codepoint <= 0xD7FF or codepoint in (0x9, 0xA, 0xD) or 0xE000 <= codepoint <= 0xFFFD or 0x10000 <= codepoint <= 0x10FFFF)
8bd5edf78246d9ba2ef4f50a48780c6a210ffdb0
652,528
def scan_for_bombs(board, n_bomb, row, column): """ Finds and potentially marks bombs and neighbors eligible to be openened. Returns a dict of squares to open and squares, whos neighbors should be inspected. """ bombs = 0 # Bombs discovered unopened_neighbors = [] # Unopened neighbors val = int(board[row][column]) # Value of this square (number of bombs for neighbor) # Bombs are discovered and unopened neighbors are found for i in range(max(0, row - 1), min(len(board), row + 2)): for j in range(max(0, column - 1), min(len(board[0]), column + 2)): if board[i][j] is "x": bombs += 1 elif board[i][j] is "?": unopened_neighbors.append((i, j)) # Updates the number of bombs found for this square n_bomb[row][column] = bombs # Squares to open and bombs to mark are found. # "update" are squares that shall notify their neighbors to re-inspect. updates = {"update": []} if bombs == val: # Neighbors can be opened if the required number of bombs are discovered already updates["open"] = unopened_neighbors elif bombs + len(unopened_neighbors) == val: # Bombs can be marked and their neighbors updated for r, c in unopened_neighbors: board[r][c] = "x" updates["update"] = unopened_neighbors return updates
e09477e5a5e33a90b101851583f94db5237e4eb4
440,045
def calibration(i): """This function gives wavelength as a function of spectrum array index.""" a,b = 3.04956866e-01, 3.82033291e+02 return a*i + b
e96b5c353d8745eb865e3a0c8fd08df34e1da134
355,650
def echo_handler(completed_proc): """Immediately return ``completed_proc``.""" return completed_proc
53f3ef51bf349ac5146014ef25b88326d5bc010e
705,302
def valueFunction1(v,alpha,_lambda,beta): """ The value function used in prospect theory :param v: Value :param alpha: :param _lambda: :return: """ if v>=0: return v**alpha else: return -_lambda*(-v)**beta
01d325d68dc0654748db0457065013ba4796f777
280,661
from typing import Iterable def aslist(item): """ Wraps a single value in a list, or just returns the list """ if isinstance(item, list): value = item elif isinstance(item, str): value = [item] elif isinstance(item, Iterable): value = list(item) else: value = [item] return value
08c97d20fb9c5644f694c570ef89b63751402e46
100,371
def get_max_words_with_ngrams(max_words, word_ngrams): """ Calculate the length of the longest possible sentence :param max_words: int, the length of the longest sentence :param word_ngrams: int :return: int, the length of the longest sentence with word n-grams """ max_words_with_ng = 1 for ng in range(word_ngrams): max_words_with_ng += max_words - ng return max_words_with_ng
60cc2fc0e044dae570092e59c661a1b71ef1b617
505,452
import keyword def make_name(name: str) -> str: """Generates a valid Python name from a system model name. Args: name (str): The system model name. Returns: str: The generated name. """ for c in '-#*%.:': name = name.replace(c, '_') if keyword.iskeyword(name): return name + '_' if name == 'id': return 'id_' if name == 'type': return 'type_' return name
7171a54956815a0f0c61852d1ec84464226f3dd7
226,104
def role_to_tag(role: str) -> str: """Return the isRole object bool tag for the given object role.""" return 'is' + role.title().replace(' ', '')
38dcc8348c2ebaef17e57bba2c335c6809e85b6d
348,446
def read_id_from_file(path): """Reading the id in first line in a file """ with open(path) as id_file: return id_file.readline().strip()
19058ec4367bdba683e6e5bff109714c4e81be5f
59,479
def relatice_percent_difference(x,y): """ Calculates the relative percent difference of 2 numbers |x-y|/avg(x,y) * 100 """ average = abs(x + y) / 2 rpd = abs(x - y) / average * 100 return rpd
1573d89ad38672db2a8e3665c307f98e5a9583db
692,206
def calculate_standard_deviation(pessimistic, optimistic): """ Calculate the standard deviation of a task. """ return round((pessimistic - optimistic) / 6, 1)
4f8aa9c2d118fc278d77fd378c8d727a1ccd5038
602,678
def get_line_from_two_points(p1, p2): """ Returns a function which takes an x-coordinate and returns the corresponding y-coordinate on the line defined by the points p1, p2 """ slope = p2[1] - p1[1] slope /= p2[0] - p1[0] return lambda x: (slope * (x - p1[0])) + p1[1]
81acd90af283b3b09e57a3e8d10a28df44a88af3
86,950
from typing import List from typing import Tuple def _max_len(choices: List[Tuple[str, str]]) -> int: """Return the maximum length of the first subitems of each item.""" return max(len(item[0]) for item in choices)
17cc12f2691a7b5f638017d6ae078e072ec28780
388,376
import hashlib def make_hash(to_hash: str) -> str: """Return a hash of to_hash.""" hash_obj = hashlib.md5() hash_obj.update(to_hash.encode("utf-8")) hash_code = str(hash_obj.hexdigest()) return hash_code
d00796e5d3248b3c9bf9d89ec3ec850ee25ea0c3
514,916
def is_hangul_char(character): """Test if a single character is in the U+AC00 to U+D7A3 code block, excluding unassigned codes. """ return 0xAC00 <= ord(character) <= 0xD7A3
aef2a700c1a1ffa0940e6d263773d62c2df41464
469,844
def _get_tolerance_line(line): """get a data item for a tolerance line with format (each line only one item): i: type=rel, 1e-3 """ assert line, 'Empty line!' line = line.strip().replace(' ','') stmp = line.split(':') key = stmp[0] _type, _val = stmp[1].split(',') _type = _type.split('=')[-1] tol={key:{'type':_type, 'val':float(_val)}} return tol
30a0b8e604f66d49305b91983e1c82112d6caf0b
472,773
def lisää_arvaus_listalle(arvaukset, pakka, arvaus): """Lisää arvauksen ja pakan tilan arvauksiin. Parametrit ---------- arvaukset : arr_like Lista arvauksista pakka : arr_like Pakan tilan ennen arvausta arvaus : int Arvatun kortin numero Palauttaa --------- arr_like Uusi lista arvauksista """ arvaukset.append((pakka, arvaus)) return arvaukset
42aefd1656eb1ce2c568471e8a560e794ff9959e
273,628
def full_overlap(aIntervalA, aIntervalB): """ Returns True if interval A falls completely within interval B otherwise returns False""" # Check that both inputs are 3-column intervals if not len(aIntervalA) == len(aIntervalB) == 3: raise Exception("Regions could not be overlapped") if aIntervalA[0] == aIntervalB[0]: if aIntervalA[1] >= aIntervalB[1]: if aIntervalA[2] <= aIntervalB[2]: return True else: return False
4c7199d26e0ae5248e632cb52a596d4573e4c589
114,697
def find_fusion_energy_per_reaction(reactants: str) -> float: """Finds the average fusion energy produced per fusion reaction in joules from the fuel type. Args: reactants: the isotopes that are combined in the fusion even. Options are "DD" or "DT" Returns: The average energy of a fusion reaction in Joules """ if reactants == "DT": fusion_energy_of_neutron_ev = 14.06 * 1e6 fusion_energy_of_alpha_ev = 3.52 * 1e6 fusion_energy_per_reaction_ev = ( fusion_energy_of_neutron_ev + fusion_energy_of_alpha_ev ) elif reactants == "DD": fusion_energy_of_trition_ev = 1.01 * 1e6 fusion_energy_of_proton_ev = 3.02 * 1e6 fusion_energy_of_he3_ev = 0.82 * 1e6 fusion_energy_of_neutron_ev = 2.45 * 1e6 fusion_energy_per_reaction_ev = ( 0.5 * (fusion_energy_of_trition_ev + fusion_energy_of_proton_ev) ) + (0.5 * (fusion_energy_of_he3_ev + fusion_energy_of_neutron_ev)) else: raise ValueError("Only fuel types of DD and DT are currently supported") fusion_energy_per_reaction_j = fusion_energy_per_reaction_ev * 1.602176487e-19 return fusion_energy_per_reaction_j
8121802f096a027c885d7569149dafb4a6a70548
629,890
def word_count(data): """ 输入一个字符串列表,统计列表中字符串出现的次数 参数 ---- data: list[str],需要统计的字符串列表 返回 ---- re: dict,结果hash表,key为字符串,value为对应的出现次数 """ re = {} for i in data: re[i] = re.get(i, 0) + 1 return re
40edfdcf65e5312ee302b341ba61921fd9f5a46e
634,777
import pathlib def rename_item(path, new_path): """Rename item, returns pathlib.Path.""" path = pathlib.Path(path) return path.rename(new_path)
f2440348a4002ec92f11d0948131906ae6857103
371,402
import re def get_indent(str_): """ Find length of initial whitespace chars in `str_` """ # type: (str) -> int match = re.search(r'[^\s]|$', str_) if match: return match.start() else: return 0
a9de80043341b062326bfa58322c37100c91aa06
689,921
def get_test_submission_variant(case_obj): """Returns a test clinvar variant submission object""" variant_subm_obj = { '_id' : "{}_a99ab86f2cb3bc18b993d740303ba27f".format(case_obj['_id']), 'csv_type' : "variant", 'case_id' : case_obj['_id'], 'category' : "snv", 'local_id' : "a99ab86f2cb3bc18b993d740303ba27f", 'linking_id' : "a99ab86f2cb3bc18b993d740303ba27f", 'chromosome' : "5", 'start' : "7666888", 'stop' : "7666888", 'ref' : "A", 'alt' : "T", 'clinsig' : "Pathogenic" } return variant_subm_obj
b26d77c223e7a86711118f4ab2e77cb7e4fd96b7
583,898
def read_iupred(file): """ Read disorder predictions produced by IUPRED2 from the .dis file Inputs: - file: Filename of the .dis file output by IUPRED2 Returns: - prot: Protein sequence (string) - dis: List of predicted disorder (values between 0 and 1) """ f = open(file, 'r') dis = [] prot = '' for line in f: if '#' in line: continue tokens = line.split() assert len(tokens) == 3 prot += tokens[1] dis.append(float(tokens[2])) f.close() return prot, dis
4e843b060e52ba3dc3072d5df89deab6d9847b42
206,079
def get_name(fname): """Get the elements of the file name we need Params: fname -- String: The file name c9_c8_c176_IC12_s4_l_t Returns: The image's component number, scan number, and hemisphere 12, 4, L """ if fname.endswith('.nii.gz'): fname = fname.replace('.nii.gz', '') name_stuff = {} tmp = fname.split('_') # tmp is just a placeholder elems = tmp[-4:-1] # The elements of the file name in a list name_stuff['IC'] = elems[0][2:] # 18 name_stuff['Scan'] = elems[1][1:] # 3 name_stuff['Hemi'] = elems[2].upper() return name_stuff
86928f8ee4677dc63aad08b92e5ecb84563614d7
277,918
def receiver(signal, **kwargs): """Decorator for registering a signal.""" def decorator(func): signal.connect(func, **kwargs) return decorator
8494911c7b96a8cfff8e55ef7949c6c39165a888
247,315