content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def _psd_from_mt(x_mt, weights): """Compute PSD from tapered spectra. Parameters ---------- x_mt : array Tapered spectra weights : array Weights used to combine the tapered spectra Returns ------- psd : array The computed PSD """ psd = weights * x_mt psd *= psd.conj() psd = psd.real.sum(axis=-2) psd *= 2 / (weights * weights.conj()).real.sum(axis=-2) return psd
32df8892a6af1c3e50cd53faa8a2d9231845ad64
132,402
from math import log10, ceil def find_scale(pagewidth, pageheight): """Find the power of 10 (10, 100, 1000...) that will reduce the scale below the PDF specification limit of 14400 PDF units (=200 inches)""" major = max(pagewidth, pageheight) oversized = major / 14400.0 return 10 ** ceil(log10(oversized))
8ca190dc679da134829268a65a673b8ddccfa0fc
475,847
def strip_prefices(columns, prefices): """Filters leaderboard columns to get the system column names. Args: columns(iterable): Iterable of leaderboard column names. prefices(list): List of prefices to strip. You can choose one of ['channel_', 'parameter_', 'property_'] Returns: list: A list of clean column names. """ new_columns = [] for col in columns: for prefix in prefices: if col.startswith(prefix): col = col.replace(prefix, '') new_columns.append(col) return new_columns
387007d3d8ccd160ff5ac001739612632e1e55c9
536,840
def pvc_is_root(pvc): """Return True if a persistent volume claim has a root_key.""" return pvc.spec.selector \ and pvc.spec.selector.match_labels \ and pvc.spec.selector.match_labels.get('root_key', None)
935c6a93c9698fc7af008b6459ad557aa73789d7
540,704
def sgd(l_rate, parameters, grads): """ Stochastic Gradient Descent. Parameters ---------- :type lr: theano.tensor.scalar :param lr: Initial learning rate :type parameters: theano.shared :params parameters: Model parameters to update :type grads: Theano variable :params grads: Gradients of cost w.r.t to parameters """ updates = [] for param, grad in zip(parameters, grads): updates.append((param, param - l_rate * grad)) return updates
aaa1d11788669801b4edd89aca29b38258043ff1
695,861
def head(sequence): """ Returns first item from `sequence` or :const:`None` if sequence is empty. >>> head([1]) 1 >>> head([2, 3]) 2 >>> head([(1, 2), (3, 4)]) (1, 2) >>> head() """ try: return next(iter(sequence)) except StopIteration: return None
e2deacc47efbe72410f6b9992ef1bf98098dc786
563,627
def get_label_name(node, dialogue_list): """ Takes a node and returns the name for its label """ for dialogue in dialogue_list: if dialogue["Id"] == node["Parent"]: label_name = "{}_{}".format(dialogue["DisplayName"], node["Id"]) return label_name
0971de9fac5861785edc97625ae9d331449403a3
643,117
def get_original_language(element): """ Returns the original language for a document. """ speaker_language = element.getparent().getparent().getparent().get('LANGUAGE') return speaker_language or '?'
f960b6f1fee47b61fa34c2858d46e42914247c80
247,536
def http_header(value): """Converts Django HTTP headers to standard format e.g. HTTP_ACCEPT -> Accept HTTP_CACHE_CONTROL -> Cache-Control """ parts = value.split('_') header_parts = [part.title() for part in parts[1:]] formatted = '-'.join(header_parts) return formatted
1059f96ccfcb3dff022e48c31c57c816f97dd294
546,106
def kadane(list_obj=None): """ Find maximum sum of a subarray :param list list_int: list of objs :return: maximum sum of subarray :rtype: int DOCTESTS -------- Test 1 (list of ints): >>> print(kadane([-1, 2, 3, -4, 5, -6])) 6 Test 2 (list of ints): >>> print(kadane([-1, 2, 3, -6, 5, -6])) 5 Test 3 (list of ints): >>> print(kadane([3, 2, 3, -7, 5, -6])) 8 Test 4 (invalid argument type): >>> print(kadane()) Traceback (most recent call last): ... TypeError: input must be of type list Test 5 (empty list): >>> print(kadane([])) Traceback (most recent call last): ... ValueError: list must not be empty """ if type(list_obj) is not list: raise TypeError("input must be of type list") if not list_obj: raise ValueError("list must not be empty") max_sum, cur_max = list_obj[0], 0 size = len(list_obj) for idx, val in enumerate(list_obj): cur_max = max(val, val + cur_max) max_sum = max(max_sum, cur_max) return max_sum
3f6ace41e1a24df77e8e269c3e2e4386acbb07b3
520,673
def _get_filepath_components(filepath): """ Splits filepath to return (path, filename) """ components = filepath.rsplit("/", 1) if len(components) == 1: return (None, components[0]) return (components[0], components[1])
9a5cf6c8667acee8695804f74f91157ee42ea605
620,456
def team_id(conn, team): """Returns the id of the team name. Or -1 if no team exists""" c = conn.cursor() c.execute('SELECT id FROM teams WHERE name LIKE ?', (team,)) team_id = c.fetchone() return team_id[0] if team_id else -1
699bac77dcf1bfc2a279b37d428892d6bb5a75ee
454,277
def check_all_rows(A): """ Check if all rows in 2-dimensional matrix don't have more than one queen """ for row_inx in range(len(A)): # compute sum of row row_inx if sum(A[row_inx]) > 1: return False return True
e39f4ca3e401c02b13c5b55ed4389a7e6deceb40
707,954
def policy_rollout(agent, env, num_traj, horizon): """Rollout an agent to collect trajectories. Args: agent: an agent to rollout. env: an environment to perform rollouts. num_traj: the number of trajectories to collect. horizon: the maximal number of steps for each trajectory. Returns: states, actions, rewards and observations from rollout trajectories. """ traj_states = [] traj_actions = [] traj_rewards = [] traj_observations = [] for _ in range(num_traj): time_step = env.reset() states = [] rewards = [] actions = [] observations = [] for _ in range(horizon): # MuJoCo specific operations. states.append(env._gym_env.get_state()) # pylint: disable=protected-access observations.append(time_step) action = agent.action(time_step.observation) actions.append(action) time_step = env.step(action) rewards.append(float(time_step.reward)) if time_step.is_last(): break traj_states.append(states) traj_actions.append(actions) traj_rewards.append(rewards) traj_observations.append(observations) return traj_states, traj_actions, traj_rewards, traj_observations
9acdb3767f92626715fb9fbd084e5d42513bc394
691,947
def _sign(num): """ Returns the sign of the input Args : num - float Returns : String 'positive' or 'negative' """ if num < 0: return 'positive' else: return 'negative'
9a61ee68f122f9762637508e2f4a8006549b8ef7
245,633
import re def indent_bibtex(string): """ Takes a oneline bibtex string and adds newlines and indents after each item """ result = re.sub(r'}}$', '}\n}\n', string.strip()) result = re.sub(r', (?=[a-zA-Z]+={.*?})', r',\n ', result) return result
afa2baeb925dc3ed386723b260207d46b00dd059
210,652
def gasPpmvDryToMoist(x,xh2o): """ Take ppmv dry air to ppmv moist air. """ xd = x/(1+(1e-6*xh2o)) return xd
af55f9a211bd4758189866cb955940f411baf14d
397,382
def _no_Ns(nt_seq): """ Returns True if a sequence does not have any N's """ if 'N' not in nt_seq: return True else: return False
51941c795d2585ab89ef61526c8d8610a8220f65
133,193
from typing import Union from pathlib import Path def file_variations( filename: Union[str, Path], # The original filename to use as a base. extensions: list, ) -> list: # list of Paths """Create a variation of file names. Generate a list of variations on a filename by replacing the extension with the provided list. Adapted from T. Olsens `file_variations of the pysis module for using pathlib. """ return [Path(filename).with_suffix(extension) for extension in extensions]
b8f1953b7b59791ac1c2650aa959fef39ce53f1e
660,569
def iso_string_to_sql_date_mysql(x: str) -> str: """ Provides MySQL SQL to convert an ISO-8601-format string (with punctuation) to a ``DATE``, just by taking the date fields (without any timezone conversion). The argument ``x`` is the SQL expression to be converted (such as a column name). """ return f"STR_TO_DATE(LEFT({x}, 10), '%Y-%m-%d')"
6798578e8633e819e7e55622e53b2cd2be583fc5
695,270
import requests def ECG_trace_list(MRI): """ Get a list of patient's ECG images(except the latest one) Target a patient in the database by the MRI, use the 'GET' request for server to get that patient's all ECG image timestamps(except the latest one) and save them to a string. Args: int/str: A integer of medical record number, or a numeric string that indicates a medical record number Returns: List: A list of the timestamps corresponding to patient's every ECG image """ r = requests.get("http://127.0.0.1:5000/api/patient_ECG_trace/" + str(MRI)) trace_list_str = r.text[1:-2] trace_list = trace_list_str.split(",") return trace_list
b1f1adc5e7f6dc74cb3070e06253b12ab4dad91c
511,230
def contains(begin, end, node): """Check node is contained between begin and end in a ring.""" if not end: return False if not begin: return False if begin > end: # circle around # [begin ,1024[ or [0 , end] if (node > begin and node < 1024) or (node >= 0 and node <= end): return True else: return False else: # simplest case if node <= end and node > begin: return True else: return False return False
45b1ea48eb4ff4498b504e5f5457ff6115f4583f
453,677
def const(x): """Return a function that takes any arguments and returns the specified value. Parameters ---------- x Value to return. Returns ------- `function` """ return lambda *args, **kwargs: x
858f2189863c0a201467bedcbe50cd37e0b5c908
233,589
def join(items): """Join items with commas, 'and'""" n = len(items) return '' if n == 0 else items[0] if n == 1 else ' and '.join( items) if n == 2 else ', '.join(items[:-1] + ['and ' + items[-1]])
a142acba218b1db0417603466368560e396de157
428,560
def parse_report_for_epoch_metrics(report_dict, metrics=["train_mIoU"]): """ Parse report for given metric information over epoch and add them to list Arguments: report_dict: dictionalry with raw report data metrics: list of metrics to search over epoch Returns: result: the dictionary of the form {metric : {"epoch": [], "value": []} """ result = {metric: {"epoch": [], "value": []} for metric in metrics} for epoch in report_dict["epoch_metrics"]: epoch_num = int(epoch) for metric in metrics: value = float(report_dict["epoch_metrics"][epoch][metric]) result[metric]["epoch"].append(epoch_num) result[metric]["value"].append(value) return result
a97747d986e1ccd79ab44a92e01987192f2afdb5
128,926
def rotate(password, rotate_type, *params): """Rotate password - rotate left/right X steps - means that the whole string should be rotated; for example, one right rotation would turn abcd into dabc. - rotate based on position of letter X - means that the whole string should be rotated to the right based on the index of letter X (counting from 0) as determined before this instruction does any rotations. Once the index is determined, rotate the string to the right one time, plus a number of times equal to that index, plus one additional time if the index was at least 4. """ if isinstance(params[0], int): steps = params[0] else: index = password.index(params[1]) steps = index + 1 if index >= 4: steps += 1 if rotate_type != 'left': steps = -steps steps = steps % len(password) return password[steps:] + password[:steps]
5b8248d56b1143a2d1ac41e890645649c6a434f5
648,664
import string def is_punctuation(token): """ Determine whether a token is punctuation. """ return token in string.punctuation + '’‘“”``'
1101def8e723a2ddd550b76189008ef1646ba4e8
298,070
def create_sorted_stats_dfs(df): """ Takes a dataframe with phrase as index, percentage docs of all years, 3 Mann-Kendall statistic-related columns, and 3 Theil-Sen slope-related columns, produces 5 dataframes: (i) phrases with increasing trend (in descending order of Mann-Kendall Z), (ii) phrases with decreasing trend (in ascending order, i.e. high negative values first) of Mann-Kendall Z), (iii) phrases which show no trend according to Mann Kendall (p-value>0.05), (iv) phrases with postive theil-sen slope (in descending order), (v) phrases with negative theil-sen slope (in ascending order). """ increasing_mk = df[df.trend_type_mk=='increasing'] increasing_mk = increasing_mk.sort_values(by='mannkendall_z', ascending=False) decreasing_mk = df[df.trend_type_mk=='decreasing'] # Ascending order for negative: we want high negative values to be first. decreasing_mk = decreasing_mk.sort_values(by='mannkendall_z') notrend_mk = df[df.trend_type_mk=='no trend'] # Ascending order by pvalue: values closer to 0.05 first notrend_mk = notrend_mk.sort_values(by='mann_kendall_pvalue') positive_theilsen = df[df.theilsen_slope>0] positive_theilsen = positive_theilsen.sort_values(by='theilsen_slope', ascending=False) negative_theilsen = df[df.theilsen_slope<0] negative_theilsen = negative_theilsen.sort_values(by='theilsen_slope') return increasing_mk, decreasing_mk, notrend_mk, positive_theilsen, negative_theilsen
a5ce6ee72666c7ab90764ecf6dcd8462ce8ab4ac
419,860
import random def change_coords_a_bit(coords, max_delta=0.05): """ Used for fluctuating the given coordinates by max_delta. The goal is to avoid overlapping pins on the map. """ new_x = coords[0] + random.random()*max_delta*random.choice([-1, 1]) new_y = coords[1] + random.random() * max_delta * random.choice([-1, 1]) return new_x, new_y
af6a5643551e8f0ac5443b5cd441ddf2b465c163
306,941
def lines(a, b): """Return lines in both a and b""" lines = [] # Split string a into lines: \n # For each lines: for line in a.split("\n"): # Split string b into lines: \n # Check if line from a appears in b if line in b.split("\n"): # Check if the line is empty if line != "": # If not empty, append to list lines.append(line) return lines
6c417fe3c6bd0888f4006c147e27f13c6b2ac440
387,726
def flat_correct(ccd, flat, min_value=None, norm_value=None): """Correct the image for flat fielding. The flat field image is normalized by its mean or a user-supplied value before flat correcting. Parameters ---------- ccd : `~astropy.nddata.CCDData` Data to be transformed. flat : `~astropy.nddata.CCDData` Flatfield to apply to the data. min_value : float or None, optional Minimum value for flat field. The value can either be None and no minimum value is applied to the flat or specified by a float which will replace all values in the flat by the min_value. Default is ``None``. norm_value : float or None, optional If not ``None``, normalize flat field by this argument rather than the mean of the image. This allows fixing several different flat fields to have the same scale. If this value is negative or 0, a ``ValueError`` is raised. Default is ``None``. {log} Returns ------- ccd : `~astropy.nddata.CCDData` CCDData object with flat corrected. """ # Use the min_value to replace any values in the flat use_flat = flat if min_value is not None: flat_min = flat.copy() flat_min.data[flat_min.data < min_value] = min_value use_flat = flat_min # If a norm_value was input and is positive, use it to scale the flat if norm_value is not None and norm_value > 0: flat_mean_val = norm_value elif norm_value is not None: # norm_value was set to a bad value raise ValueError('norm_value must be greater than zero.') else: # norm_value was not set, use mean of the image. flat_mean_val = use_flat.data.mean() # Normalize the flat. flat_mean = flat_mean_val * use_flat.unit flat_normed = use_flat.divide(flat_mean) # divide through the flat flat_corrected = ccd.divide(flat_normed) flat_corrected.meta = ccd.meta.copy() return flat_corrected
abcd91b54f0664ae05b294fcc64b42f04dfdccef
656,662
def lowest_index(arr, target): """ Finds the lowest index of target in arr. If target in arr, returns lowest index i such that arr[i] == target, else returns index i where it should be inserted while keeping arr sorted. Args: arr array to search target in target value Returns: index, where 0 <= index <= len(arr) Preconditions: arr == sorted(arr) < is supported between target and elements of arr """ # initialize search range start, end = 0, len(arr) # maintain solution in range [start, end] while (start < end): mid = (start + end) // 2 if arr[mid] < target: start = mid + 1 else: end = mid return end
f7e7e394d5e17b7638152f354e4d2166b3e1bad3
240,589
def helper_float(val): """ Helper function for use with `dict_reader_as_geojson`. Returns `None` if the input value is an empty string or `None`, otherwise the input value is cast to a float and returned. """ if val is None or val == '': return None else: return float(val)
891f31a3d0ab44f39bb0e61a7d581592dc143cb4
292,320
def build_path(pattern, keys): """Replace placeholders in `pattern` to build path to be scraped. `pattern` will be a string like "measure/%(measure)s/ccg/%(entity_code)s/". """ substitutions = { "practice_code": "L83100", "ccg_code": "15N", "stp_code": "E54000037", "regional_team_code": "Y58", "bnf_code": "0205051R0BBAIAN", "measure": "ace", } path = pattern for key in keys: subst_key = None if key in ["code", "entity_code"]: for token in ["practice", "ccg", "stp", "regional-team"]: if token in pattern: subst_key = "{}_code".format(token).replace("-", "_") if subst_key is None: subst_key = "bnf_code" else: subst_key = key path = path.replace("%({})s".format(key), substitutions[subst_key]) assert "%" not in path, "Could not interpolate " + pattern return path
64e72a758ba4cb9c7f850cbc44e031e2bf2d9125
75,189
def _get_basic_re_expr(re_expr: str) -> str: """ Converts a regular expression to a basic regular expression. Parameters ---------- re_expr : str Regular expression. Returns ------- str Basic regular expression. """ return re_expr
14378b9003e32b341163cd53bb5887b6ea7fd44c
351,644
def from_table_to_obj_list(table, obj_type, separator="|"): """ Map a given tabular output into a python object. The python object you want to map the table into needs to define a MAPPINGS dictionary which declare how to map each row element into the object itself. Each entry of the MAPPINGS dictionary is composed as follow: - key: name of the table column (specified in the header) - value: a dict containing: - field: name of the object attribute you want to map the value to - transformation: a function that will be called on the value before assigning this to the object attribute. Default values can be defined in the class __init__ definition. :param table: string containing the table to parse :param obj_type: type of the object you want to map the table into :param separator: separator for the row items :return: a list obj_type instances containing the parsed data """ lines = table.splitlines() results = [] if len(lines) > 1: mappings = obj_type.MAPPINGS columns = lines[0].split(separator) rows = lines[1:] for row in rows: obj = obj_type() for item, column in zip(row.split(separator), columns): mapping = mappings.get(column) if mapping: transformation_func = mapping.get("transformation") value = item if transformation_func is None else transformation_func(item) setattr(obj, mapping["field"], value) results.append(obj) return results
3d4ef6620bdbb0a71b0b5f84f16f8653b64ecf32
349,689
def drop_dark_data(df): """Return dataframe in which the index does not contain the string 'dark' """ s = df.index.str.contains("dark") return df[~s].copy()
02f21d7122537fe92041d6ca55313b6a18207d5c
233,712
def _sanitize_log_msg(record): """ Sanitize log message to make sure we can print out properly formatted JSON string :param record: log object :return: sanitized log object """ return record.getMessage().replace('\n', '_').replace('\r', '_').replace('\t', '_')
480dde7e008e0b1744f7f409699bc3245102445b
181,381
from typing import Dict def rename_attributes( obj: Dict, old_to_new_attributes: Dict[str, str], in_place: bool = False ) -> Dict: """Rename a set of attributes in the given dict object. Second parameter is a dict that maps old to new attribute names. Default is to return a copy, but can also pass in_place=True.""" if not in_place: obj = dict(obj) for old_name, new_name in old_to_new_attributes.items(): if old_name in obj: obj[new_name] = obj.pop(old_name) return obj
6360b5d5c23205951dfd3da90c3a56e26bcbe321
370,938
import time def timestr_unix(time_str): """ transfer formatted timestamp to unix time. Args: time_str (str): formated time string. Returns: int: unix time. """ time_format_with_hill = '%Y-%m-%dT%H:%M:%S.%f%z' time_str = time_str[:26] + time_str[-6:] time_format = time.strptime(time_str, time_format_with_hill) return int(time.mktime(time_format))
fcffd97576e579702634ca80f9f45b9990b47b82
121,647
import csv def get_X_y(data_file): """Read the log file and turn it into X/y pairs. Add an offset to left images, remove from right images.""" X, y = [], [] steering_offset = 0.4 with open(data_file) as fin: for _, left_img, right_img, steering_angle, _, _, speed in csv.reader(fin): if float(speed) < 20: continue # throw away low-speed samples X += [left_img.strip(), right_img.strip()] y += [float(steering_angle) + steering_offset, float(steering_angle) - steering_offset] return X, y
82329371eaa6100d99aed599707bf28cb83f025e
72,062
def order_to_degree(order): """Compute the degree given the order of a B-spline.""" return int(order) - 1
f2078b7fd7c19202d3bc7826807cbed53cd2a1d7
643,369
def get_character_node(graph, character, only_one=False): """Return nodes for the given character :param graph: The graph :type graph: nx.Graph :param character: The character :type character: str :param only_one: boolean in order to return one node or all found nodes, defaults to False :type only_one: bool, optional :return: List of nodes if only_one is set to False, else tuple :rtype: list or tuple """ found = [] for node, data in graph.nodes(data=True): if data["character"] != character: continue found.append(node) if only_one: return found[0] if found else found return found
fd0c325e7e2b8e0fc0b0ef1055d7453776c649c1
367,827
import re def extract_libraries(files): """Extracts a list of imports that were used in the files Parameters ---------- files : []string Full paths to files that need to be analysed Returns ------- dict imports that were used in the provided files, mapped against the language """ res = [] # regex to find imports like import Stuff regex_import = re.compile(r"import ((?!func|var|let|typealias|protocol|enum|class|struct)\s?[a-zA-Z0-9.]+)", re.IGNORECASE) # regex to find imports like import kind module.symbol # here kind can be func, var, let, typealias, protocol, enum, class, struct # this regex will return a list of (kind, module.symbol) regex_declarations = re.compile(r"import (func|var|let|typealias|protocol|enum|class|struct) ([a-zA-Z0-9.]+)", re.IGNORECASE) for f in files: with open(file=f, mode='r', errors='ignore') as fr: contents = ' '.join(fr.readlines()) matches = regex_import.findall(contents) matches.extend([d[1] for d in regex_declarations.findall(contents)]) if matches: res.extend(matches) # remove duplicates res = list(set(res)) return {"Swift": res}
9ea66db3dbe422eb73e20fced595dee1d83a76f5
560,585
def queue_requeue(queue, index=-1): """Returns the element at index after moving it to the beginning of the queue. >>> x = [1, 2, 3, 4] >>> queue_requeue(x) 4 >>> x [4, 1, 2, 3] """ x = queue.pop(index) queue.insert(0, x) return x
8136eec4058cfdffd2f57b71e5954da8dd9878ca
545,277
def getCol(table, index): """ Gets a col by an index Ignores "NA" """ col = [] for row in table: if (row[index] == 'NA'): continue col.append(row[index]) return col
885a2aebc2bafc2e7d0f3825ee5bc69aa328827a
477,831
import struct def int_from_bytes(val): """Converts val to an unsinged int assuming a BE byte order""" try: return int.from_bytes(val, 'big') except AttributeError: # Older Python, so pad it and unpack return struct.unpack('>L', b'\x00'*(4-len(val))+val)[0]
bcaddc3952e5f116b2d44e15784a46e5b441b92e
177,887
import io import re import math def mem_per_core(units='kB'): """returns memory per core in kb (default) or another desired unit""" with io.open('/proc/meminfo') as f: lines = [x for x in f.readlines() \ if re.search('MemTotal', x) is not None] mem = float(lines[0].split()[1]) # memory in kb with io.open('/proc/cpuinfo') as f: lines = [x for x in f.readlines() \ if re.search('processor', x) is not None] ncores = len(lines) mpc = mem / ncores if units.lower() == 'mb': mpc *= 1e6 / 1e9 # MB to kB return int(math.floor(mpc))
538ce73ac1dd3e30f10082dcbdde89f67fc5410d
110,890
def django_url_preprocessor(url, root_url): """ Convert url from the simplified string version for app developers to Django regular expression. e.g.: '/example/resource/{variable_name}/' r'^/example/resource/?P<variable_name>[1-9A-Za-z\-]+/$' """ # Default Django expression that will be matched DEFAULT_EXPRESSION = '[0-9A-Za-z-]+' # Split the url into parts url_parts = url.split('/') django_url_parts = [] # Remove the root of the url if it is present if root_url in url_parts: index = url_parts.index(root_url) url_parts.pop(index) # Look for variables for part in url_parts: # Process variables if '{' in part or '}' in part: variable_name = part.replace('{', '').replace('}', '') part = '(?P<{0}>{1})'.format(variable_name, DEFAULT_EXPRESSION) # Collect processed parts django_url_parts.append(part) # Join the process parts again django_url_joined = '/'.join(django_url_parts) # Final django-formatted url if django_url_joined != '': django_url = r'^{0}/$'.format(django_url_joined) else: # Handle empty string case django_url = r'^$' return django_url
7586010739649ba10ab7eb71eed68bd6885880f9
375,988
import types import functools def compose(*funcs): """ Functional composition of a non-empty list [f, g, h] will be f(g(h(x))) :Example: >>> f = lambda x: x * x >>> g = lambda x: x + 1 >>> h = lambda x: x * 2 >>> funcs = [f, g, h] >>> fgh = compose(*funcs) >>> fgh(3) # 49 >>> compose(f, g, h)(3) """ if not funcs: raise ValueError("Compose only supports non-empty lists") for func in funcs: if not isinstance(func, (types.BuiltinMethodType, functools.partial, types.MethodType, types.BuiltinFunctionType, types.FunctionType)): raise TypeError("Only Function types are supported") def compose_two(f, g): def c(x): return f(g(x)) return c return functools.reduce(compose_two, funcs)
4e1b66de31fa35a7a76312f6df57c3b50c1a24e1
488,011
def get_table3_coeff(A_A): """その他の一次エネルギー消費量の算出に用いる表3の係数を取得 Args: A_A(float): 床面積の合計 (m2) Returns: tuple: 係数 a_SV, b_SV """ # その他の一次エネルギー消費量の算出に用いる係数 table_3 = [ (33, 38, 33), (129, -21, 579) ] if A_A < 30: index = 0 elif A_A < 120: index = 1 else: index = 2 a_SV = table_3[0][index] b_SV = table_3[1][index] return a_SV, b_SV
736f4d5579449ba84193d8a997e9fe2800b007b6
107,280
def is_quoted(value): """ Return a single or double quote, if a string is wrapped in extra quotes. Otherwise return an empty string. """ ret = "" if ( isinstance(value, str) and value[0] == value[-1] and value.startswith(("'", '"')) ): ret = value[0] return ret
302ec7aa13a2e8c35a5aca5247b64e1eb4cbb5b7
667,722
def normalize_events_list(old_list): """Internally the `event_type` key is prefixed with underscore but the API returns an object without that prefix""" new_list = [] for _event in old_list: new_event = dict(_event) new_event['event_type'] = new_event.pop('_event_type') new_list.append(new_event) return new_list
ab0810763f10bb5e37fbff39043c29e685c123b2
140,667
from pathlib import Path def _make_path(base: Path, segment: str) -> Path: """ Returns the segment relative to the given base, if it's a relative path Absolute paths are returned as is. """ original_path = Path(segment) if original_path.is_absolute(): return original_path return base.joinpath(original_path)
fd009c2224b3f61759af57a7705579904dae48ba
90,886
def split_str_zmat(zmat_str: str) -> tuple: """ Split a string zmat into its coordinates and variables sections. Args: zmat_str (str): The zmat. Returns: tuple[str: The coords section, str: The variables section if it exists, else None, ] """ coords, variables = list(), list() flag = False if 'variables' in zmat_str.lower(): for line in zmat_str.splitlines(): if 'variables' in line.lower(): flag = True continue elif flag and line: variables.append(line) elif line: coords.append(line) else: splits = zmat_str.splitlines() if len(splits[0].split()) == len(splits[1].split()) and \ (len(splits[0].split()) == 2 or (len(splits[0].split()) == 1 and len(splits[1]) != 1)): # this string starts with the variables section for line in splits: if flag and line: coords.append(line) if not flag and len(line.split()) == len(splits[0].split()) and line: variables.append(line) else: flag = True elif len(splits[-1].split()) == len(splits[-2].split()) and len(splits[-1].split()) in [1, 2]: # this string starts with the coordinates section for line in splits: if flag and len(line.split()) == len(splits[-1].split()) and line: variables.append(line) if not flag and line: coords.append(line) else: flag = True coords = '\n'.join(coords) if len(coords) else zmat_str variables = '\n'.join(variables) if len(variables) else None return coords, variables
672ad8fa382374fd843009efb475383cf2c015d0
510,669
def _normalize_last_4(account_number: str): """Convert authorize.net account number to Saleor "last_4" format. Example: XXXX1111 > 1111 """ return account_number.strip("X")
9691885f67373f21490e3f2ec88e3ad8ca3e81b4
423,834
import random def roulette_selection(population, minimize, num_selected_ind): """ This function returns a list with the ids of the individuals selected by the roulette wheel selection. Note that the normalized fitness of the population must be calculated before calling this function (call the method Gavl._Population__calculate_normalized_fitness). :param population: (list of Individuals) This is a list of individuals (see class Individual). :param minimize: (int) Int that represents if the goal is minimizing the fitness (minimize = 1) or maximizing it (minimize = 0). :param num_selected_ind: (int) number of individuals to be selected. :return: * :list_selected_individuals: (list of str) List with the ids of the selected individuals. Note that there can be repeated individuals. """ if minimize: list_ids_normalizedfitness = [(ind._id, ind.inverse_normalized_fitness_value) for ind in population] else: list_ids_normalizedfitness = [(ind._id, ind.normalized_fitness_value) for ind in population] list_selected_individuals = [] # List that will contain the ids of the selected individuals. sum_fit = sum(map(lambda x: x[1], list_ids_normalizedfitness)) # Sum of the INVERSE normalized fitness for _ in range(num_selected_ind): population_cumulative_fitness = 0 # Population cumulative fitness selected_cumulative_fitness = random.random() * sum_fit # Cumulative fitness of the selected individual for ind in list_ids_normalizedfitness: population_cumulative_fitness += ind[1] if selected_cumulative_fitness <= population_cumulative_fitness: list_selected_individuals.append(ind[0]) # Add the id of the individual to the population break return list_selected_individuals
1e4121cb67fc39f5c4e2d197dccf30b9e36f5a80
468,601
def RR_calc(classes, TOP): """ Calculate Global performance index (RR). :param classes: confusion matrix classes :type classes: list :param TOP: number of positives in predict vector per class :type TOP: dict :return: RR as float """ try: class_number = len(classes) result = sum(list(TOP.values())) return result / class_number except Exception: return "None"
814a11c339b25dc687d537efd3244ddad9c0f8fd
701,408
def find_averages_of_subarrays_pythonic(arr: list[int], K: int) -> list[float]: """ Args: arr: input array K: subarray (window) size Returns: average of all K-length subarrays Examples: >>> find_averages_of_subarrays_pythonic([1, 3, 2, 6, -1, 4, 1, 8, 2],5) [2.2, 2.8, 2.4, 3.6, 2.8] >>> find_averages_of_subarrays_pythonic([],5) [] >>> find_averages_of_subarrays_pythonic([1],0) [] """ ## EDGE CASES ## default_ret_val = [] if not arr: return default_ret_val if K <= 0: return default_ret_val """ALGORITHM """ ## INITIALIZE VARS ## window_start, window_end = 0, K - 1 window_sum = float(sum(arr[window_start:window_end])) # DS's/res res = [] ## SLIDING for window_end in range(window_end, len(arr)): ## EXPANSION window_sum += arr[window_end] # handle_window_expansion_at_end ## WINDOW MATCH res.append(window_sum / K) # handle_window_match ## CONTRACTION window_sum -= arr[window_start] # handle_window_contraction_at_beginning window_start += 1 return res
76bb94daf1aa50fe8560bb8a2b36f78a772e221a
331,260
def convert_null_geometry_to_none(frame): """Utility method. Convert any geometry in the geoDataFrame which is "null" (`None` or empty) to `None`. The returned geoDateFrame is suitable for saving. """ def null_to_none(x): if x is None or x.is_empty: return None return x newgeo = frame.geometry.map(null_to_none) return frame.set_geometry(newgeo)
ad5bff0e79d9f74c6a153561a6215b40cd3c1866
208,335
def socket_mem_from_vars(ansible_vars): """ Return list of DPDK socket memory values defined in Ansible variables """ numa_nodes = ansible_vars["numa_nodes"] socket_mem = [] for node in sorted(numa_nodes.keys()): socket_mem.append(numa_nodes[node]["dpdk_socket_mem"]) return socket_mem
0342204c94adbc3ba392e9c69aa22eb678537bb9
508,194
def nested_lookup(n, idexs): """Function to fetch a nested sublist given its nested indices. Parameters ---------- n: list, the main list in which to look for the sublist idexs: list, the indices of the sublist Returns ------- list: sublist with given indices """ if len(idexs) == 1: return n[idexs[0]] return nested_lookup(n[idexs[0]], idexs[1:])
ba4927e749be979fa297384fa9345db78243fa93
671,552
def is_model_on_gpu(model): """ Function to check whether given model is created on GPU or CPU Assumption : model is on single device :return: True if the model is on GPU, False if on CPU """ return next(model.parameters()).is_cuda
64a4fcabbde843b6b26e6f3a73c51e580304f1a4
39,455
def getWindowGeometry( topLevelWindow ): """ Returns a tuple of ( width, height, distanceFromScreenLeft, distanceFromScreenTop ) """ try: dimensions, topDistance, leftDistance = topLevelWindow.geometry().split( '+' ) width, height = dimensions.split( 'x' ) geometry = ( int(width), int(height), int(topDistance), int(leftDistance) ) # faster than above line except: raise ValueError( "Failed to parse window geometry string: " + topLevelWindow.geometry() ) return geometry
3b3b818ae281410fa36f45d4844685d4298ae15a
533,735
def _window_has_intervening_extrema(window, contour, mode): """ Steps 8/9. If there exists a sequence of equal maxima or minima, check if the sequence contains an intervening opposite extrema, i.e. if a sequence of two equal maxima contains a minima between them. >>> maxima_group = [ ... (2, [2, {1}]), ... (4, [2, {1}]) ... ] >>> contour = [[1, {1, -1}], [0, {-1}], [2, {1}], [0, {-1}], [2, {1}], [1, {1, -1}]] >>> _window_has_intervening_extrema(maxima_group, contour=contour, mode="max") True """ contour_index_range = [window[0][0], window[-1][0]] # Two trivial cases if len([x for x in window if 1 in x[1][1]]) == 1: # Single extrema. return True elif window[0][0] + 1 == window[-1][0]: # Two contiguous extrema. if mode == "max": return -1 in contour[contour_index_range[1]][1] if mode == "min": return 1 in contour[contour_index_range[1]][1] """ IMPORTANT NOTE: The conditions for intervening extrema are unclear from Schultz's paper. For example, are beginning/ending contour elements allowed to contain the opposite extrema value? I assume not, so I restrict the range. Include start: intervening_range = contour[window[0][0]:window[-1][0]+1] """ intervening_range = contour[contour_index_range[0] + 1:contour_index_range[-1] - 1 + 1] if mode == "max": # Looking for min. return any(-1 in x[1] for x in intervening_range) if mode == "min": # Looking for max. return any(1 in x[1] for x in intervening_range)
f2ce05aa779679415d1542c4a190fae6a49ff65b
640,254
def skip_testing_during_training(task): """Filter to determine if we should be running test-time evaluation. In cloth and bag tasks, we need `--disp` (at least with PyBullet 2.8.4), and that causes problems if instantiating multiple `Environment`s, as in standard testing. Furthermore, all 'Deformable Ravens' tasks have finer grained evaluation criteria, and it is easier to have a dedicated script, `load.py`, which can process more information. We filter for all these, while being careful to avoid filtering 'cable'. Args: task: String representing the task name from the argument parser. Returns: bool indicating test-time evaluation. """ return ('cable-' in task) or ('cloth' in task) or ('bag' in task)
74ba66d8652883da7a66c9c239b45d62bc466d50
307,646
def format_semicolon_list(semi_list): """Formats a string representing a semi colon separated list into MCF property values list format. This is used to format 'PMIDs' in relationships.tsv. Args: semi_list: a string representing a semi colon separated list Returns: A string that is mcf property text values list enclosed by double quotes and comma separated. """ if not semi_list: return '' formatted_str = '' for prop_value in semi_list.split(';'): formatted_str += '"' + prop_value + '",' return formatted_str.strip(',')
5a01034407f7b4bd7c6ec89a124a2189492590dc
283,581
import string def get_sentiment(text, word_map): """ Identifies the overall sentiment of the text by taking the average of each word. Note: Words not found in the word_map dict are give zero value. """ # remove all punctuation text = text.translate(str.maketrans("", "", string.punctuation)) # split into tokens text = text.split() total_score, length = 0, 0 # get score for each word, put zero if not found scores = (word_map.get(token.lower(), 0) for token in text) # find average score for score in scores: total_score += score length += 1 return total_score / length
ee9e57c999539c0126e5c0d38711a617e82dab10
5,286
from typing import List def is_valid_sample(nn_intervals: List[float], outlier_count: int, removing_rule: float = 0.04) -> bool: """ Test if the sample meet the condition to be used for analysis Parameters ---------- nn_intervals : list list of Normal to Normal Interval outlier_count : int count of outliers or ectopic beats removed from the interval removing_rule : str rule to follow to determine whether the sample is valid or not Returns ---------- bool True if sample is valid, False if not """ result = True if outlier_count / len(nn_intervals) > removing_rule: print("Too much outlier for analyses ! You should descard the sample.") result = False if len(nn_intervals) < 240: print("Not enough Heart beat for Nyquist criteria ! ") result = False return result
a2caffb114715da6f51b760c48282e713abbea67
582,490
import yaml def update_config(config, updates): """Modifies the YAML configurations, given a list of YAML updates. """ if isinstance(updates, str): updates = [updates] for update in updates: edits = yaml.safe_load(update) for k, v in edits.items(): node = config for ki in k.split('.')[:-1]: if ki in node: node = node[ki] else: node[ki] = dict() node = node[ki] ki = k.split('.')[-1] node[ki] = v return config
f9c66068226fc44d8fe8f35bb1f21a6d8648b3fb
75,891
import re def IsHtml(data): """Return True if data is HTML, False otherwise. Args: data: str Data to check. Returns: bool True if data is HTML, False otherwise. """ # Remove banners and XML header. Convert to lower case for easy search. data = ''.join(data.split('\n')).lower() pattern = re.compile('<html>.*?<body.*?>.*?</body>.*?</html>') if pattern.findall(data): return True else: return False
c968db2e65b1a09dbdb5d820cf03b49809c320e1
136,515
def parse_ped(pedfile): """Parse the PED file and store it in a dictionary of {lineid: [PED_data]}""" ped_data = {} with open(pedfile, 'r') as f: for line in f: tmp = line.strip().split() lid = tmp[1] # add the line to the ped_data ped_data[lid] = tmp return (ped_data)
0563a53b24009d5f1616323364b39bb45d43848d
126,605
def filter_values(item): """ Returns last element of the tuple or ``item`` itself. :param object item: It can be tuple, list or just an object. >>> filter_values(1) ... 1 >>> filter_values((1, 2)) ... 2 """ if isinstance(item, tuple): return item[-1] return item
c38dd2351cab8dfdf63626eb511fc7e3c46599be
343,750
def merge_p(bi,p,bigrams): """ Calculates the merge probability by combining the probs of the bigram of words and the prob of merge. Arguments bi : bigram p : p(MG->mg) from grammar (should be 1) Returns combined probability of merge op and bigram """ (w1,w2)=bi return bigrams[w1][w2]*p
58831a487e0bb441fa3f19e0deead0a1f7632df8
681,085
import json def write_json(filename, data): """Saves data to a json file.""" with open(filename, 'w', encoding='utf-8') as file: json.dump(data, file, sort_keys=False, indent=4, ensure_ascii=False) return True
6111762b2aa9b93791cf7593f9eaaa0f87c5f83a
535,282
import re def get_dihedrals(qm_scan_file): """ Returns dihedrals from the torsiondrive scan file. Parameters ---------- qm_scan_file : str Output scan file containing torsiondrive scans. Returns ------- dihedrals : list List of all the dihedral values from the qm scan file. """ with open(qm_scan_file, "r") as f: lines = f.readlines() energy_dihedral_lines = [] for i in range(len(lines)): if "Dihedral" in lines[i]: energy_dihedral_lines.append(lines[i]) dihedrals = [] for i in energy_dihedral_lines: energy_dihedral = i energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral) dihedral = float(energy_dihedral[0]) dihedrals.append(dihedral) return dihedrals
12f78bfc762d28ff9992f8300e62d658090a1834
576,416
def target_function( foo: str, bar: int, /, baz: bool, spam: str = "foobar", *, egg: bytes = b"" ) -> dict[str, str]: """ This function is a test fixture. **Args** * foo (`str`): Lorem ipsum. * bar (`int`): *italic* * baz (`bool`): **emphasize** * spam (`str`): Multiline description should be indented. * egg (`bytes`): `backtick` **Returns** * `dict[str, str]`: Return type. """ return {"return": "dict"}
b9899e9c622a61fcb820d9bbc7665e9e855fa78a
297,092
def get_frame_index(d): """Get frame index from the whole dictionary E.g., '{'index': 'data/Deploy/KLAC/KLAC0570/KLAC0570_12.jpg', 'prediction': ..., 'label': ...}' ==> (int) 12 """ return int(d['index'].split('_')[-1][:-4])
4560b4255374784834c5c16c8b365d1b5b1d0fcd
676,490
def getDictFromTuple(values: tuple, keys: list, includeNone: bool = True): """returns a dict based on the tuple values and assigns the values to the keys provided\n for instance, values=(1, "bill", 5} and keys=["id", "name", "age"] returns {"id": 1, "name": "bill", "age": 5} """ _obj = {} for _i in range(len(values)): if includeNone or (values[_i] is not None): _obj[keys[_i]] = values[_i] return _obj
b4a182ee561d2640004aa57b6c75f669af9261b3
692,801
def take_transcript_id_without_version(full_id): """Returns transcript id without version and everything which is after version separating comma. Example: Input: ESNT_0001.4 Output: ESNT_0001 Input: ESNT_0002.2.some_annotation Output: ESNT_0002 """ return full_id.split('.')[0]
e88e5f20b67b672dfc2aed5b3c12302336ea8cd2
546,428
def GetIdentifier(default): """Return the identifier for the keychain.""" return default
62eda10986da424d39235051917d53715cdccb38
595,935
def doble_queso(pizza): """ (list of str) -> list of str Agrega queso al principio y final de la pizza si no tiene >>> doble_queso(['queso', "jamon"]) ['queso', 'jamon', 'queso'] >>> doble_queso(["jamon", 'queso']) ['queso', 'jamon', 'queso'] >>> doble_queso(["jamon"]) ['queso', 'jamon', 'queso'] >>> doble_queso(['queso', "jamon", 'queso']) ['queso', 'jamon', 'queso'] :param pizza: list of str la pizza a adicionar :return: pizza con doble queso """ nueva_pizza = pizza.copy() if not ('queso' == nueva_pizza[0]): nueva_pizza.insert(0, 'queso') if not ('queso' == nueva_pizza[-1]): nueva_pizza.append('queso') return nueva_pizza
a8ea9d6b63989e616f00fd9684053fcdffdb1d9d
32,939
import json def parse_log(log_file): """Parse a newline-separated logfile into result objects.""" with open(log_file, 'r') as fp: results = [json.loads(line.strip()) for line in fp] return results
f265736383940e1bac6f3fa06d29b19d9920da84
209,002
def num_disambiguated_iupac_strings(seq, ambiguous_letters, disambiguation): """ seq is Bio.Seq.Seq or str ambiguous_letters is string containing ambiguous IUPAC codes disambiguation is dict with key from ambiguous_letters and values the strings containing the unambiguous versions (e.g., Bio.Data.IUPACData.ambiguous_dna_values) """ n = 1 for letter in str(seq): if letter in ambiguous_letters: n *= len(disambiguation[letter]) return n
9a3a989506d67e9a5bf07803bc69c79979b8fd16
612,837
def num2alphabet(number): """This function converts a number into its corresponding alphabetic letter. Meaning 1 gives a A while 2 gives a B. Since i use it to acces the excel collumns I convert the numbers to uppercase chars. When a number is higher than 26 the remainder gets appended to the A such that 27 gives AA.""" # Translate number to letter if number <= 26: # If number is lower or equal to 26 translate to alphabetic letter letter_code = chr(number + 64) return letter_code else: # If number is higher than 26 append the remainder as a alphabetic letter. Example: AB n, r = divmod(number, 26) letter_code = "".join(n * [chr(1 + 64)] + [chr(r + 64)]) # Return alphabet char return letter_code
b4ece3202382330c937506a4a2c773746265e1a2
609,443
def get_vm_name(name): """ Map node name to VM name by stripping DNS suffix """ return name.split('.', 1)[0]
ca65b7ad4aad9b210cb1cc98adee86480feb0067
592,191
def binary_search(lo, hi, f): """Binary searches in [lo , hi) to find n such that f(n) == f(lo) but f(n + 1) != f(lo). It is implicitly assumed and will not be checked that f(hi) != f(lo). """ reference = f(lo) while lo + 1 < hi: mid = (lo + hi) // 2 if f(mid) == reference: lo = mid else: hi = mid return lo
bc474bb415b9f5506acc5fa9ecfe3a44b8c25bbd
524,930
def sparse_onenorm(A): """ Computes the 1-norm of the scipy sparse matrix `A`. Parameters ---------- A : scipy sparse matrix The matrix or vector to take the norm of. Returns ------- float """ return max(abs(A).sum(axis=0).flat)
2ab1d109a29390a991df8c4cb8e158566b060e4d
543,483
def if_not_read_only(f): """Raise TypeError if decortd function is called while read only is True.""" def wrapper(*args): if args[0]._read_only: raise TypeError("This dictionary is read only!") return f(*args) return wrapper
e82c744ddfee1a377454f02e43722a2687c20e8c
154,225
def _popup_footer(view, details): """ Generate a footer for the package popup that indicates how the package is installed. """ return """ {shipped} <span class="status">Ships with Sublime</span> &nbsp; &nbsp; {installed} <span class="status">In Installed Packages Folder</span> &nbsp; &nbsp; {unpacked} <span class="status">In Packages Folder</span> """.format( shipped="\u2611" if details["is_shipped"] else "\u2610", installed="\u2611" if details["is_installed"] else "\u2610", unpacked="\u2611" if details["is_unpacked"] else "\u2610")
8f159f75990e87c8d431bb2dd1d01e648079ac96
702,393
import re def extract_jobs_flags(mflags): """Extracts make job flags from a list of other make flags, i.e. -j8 -l8 :param mflags: string of space separated make arguments :type mflags: str :returns: list of make jobs flags :rtype: list """ if not mflags: return [] # Each line matches a flag type, i.e. -j, -l, --jobs, --load-average # (?:^|\s) and (?=$|\s) make sure that the flag is surrounded by whitespace # (?:...) is just a group that will not be captured, this is necessary because the whole flag should be captured # The upper two expressions are simple, they just match the flag, optional whitespace and an optional number # The bottom two expressions are more complicated because the long flag may be # followed by '=' and a number, # whitespace and a number or nothing regex = r'(?:^|\s)(-j\s*\d*)(?=$|\s)|' + \ r'(?:^|\s)(-l\s*\d*\.?\d*)(?=$|\s)|' + \ r'(?:^|\s)(--jobs(?:(?:=|\s+)\d+)?)(?=$|\s)|' + \ r'(?:^|\s)(--load-average(?:(?:=|\s+)\d*\.?\d+)?)(?=$|\s)' filtered_flags = [] for match in re.findall(regex, mflags): filtered_flags.extend([m.strip() for m in match if m]) return filtered_flags or None
2c259c53a03c7f601d81650ff994590381437611
32,414
def sort_set_by_list(s, l, keep_duplicates=True): """ Convert the set `s` into a list ordered by a list `l`. Elements in `s` which are not in `l` are omitted. If ``keep_duplicates==True``, keep duplicate occurrences in `l` in the result; otherwise, only keep the first occurrence. """ if keep_duplicates: return [e for e in l if e in s] else: res=[] s=s.copy() for e in l: if e in s: res.append(e) s.remove(e) return res
0b43367f178e6f69b40c62bc67af760c82ef9206
678,981
from pathlib import Path def get_resource_path(resource_name): """Return path to module resource dir. :return: Path to module resource dir. :rtype: Path() """ bundle_dir = Path(__file__).parent return Path.cwd() / bundle_dir / 'resources'/ resource_name
87260ab42d86ca3b69c4b1a5dcabb64951c7ab29
240,729
async def _reset_notif_task_store(**kwargs): """Reset notification_task_store to default values (triggers notifications). """ client = kwargs["client"] client.ds.set_notification_task_vars(False, {}) return "Reset notification_task_store"
6e64236b22e9766c26ac693278cdd466cdd3c4d3
466,100
def fetch_samples(prj, selector_attribute=None, selector_include=None, selector_exclude=None): """ Collect samples of particular protocol(s). Protocols can't be both positively selected for and negatively selected against. That is, it makes no sense and is not allowed to specify both selector_include and selector_exclude protocols. On the other hand, if neither is provided, all of the Project's Samples are returned. If selector_include is specified, Samples without a protocol will be excluded, but if selector_exclude is specified, protocol-less Samples will be included. :param Project prj: the Project with Samples to fetch :param str selector_attribute: name of attribute on which to base the fetch :param Iterable[str] | str selector_include: protocol(s) of interest; if specified, a Sample must :param Iterable[str] | str selector_exclude: protocol(s) to include :return list[Sample]: Collection of this Project's samples with protocol that either matches one of those in selector_include, or either lacks a protocol or does not match one of those in selector_exclude :raise TypeError: if both selector_include and selector_exclude protocols are specified; TypeError since it's basically providing two arguments when only one is accepted, so remain consistent with vanilla Python2; also possible if name of attribute for selection isn't a string """ if selector_attribute is None or \ (not selector_include and not selector_exclude): # Simple; keep all samples. In this case, this function simply # offers a list rather than an iterator. return list(prj.samples) if not isinstance(selector_attribute, str): raise TypeError( "Name for attribute on which to base selection isn't string: " "{} " "({})".format(selector_attribute, type(selector_attribute))) # At least one of the samples has to have the specified attribute if prj.samples and not any( [hasattr(s, selector_attribute) for s in prj.samples]): raise AttributeError( "The Project samples do not have the attribute '{attr}'". format(attr=selector_attribute)) # Intersection between selector_include and selector_exclude is # nonsense user error. if selector_include and selector_exclude: raise TypeError( "Specify only selector_include or selector_exclude parameter, " "not both.") # Ensure that we're working with sets. def make_set(items): if isinstance(items, str): items = [items] return items # Use the attr check here rather than exception block in case the # hypothetical AttributeError would occur; we want such # an exception to arise, not to catch it as if the Sample lacks # "protocol" if not selector_include: # Loose; keep all samples not in the selector_exclude. def keep(s): return not hasattr(s, selector_attribute) \ or getattr(s, selector_attribute) \ not in make_set(selector_exclude) else: # Strict; keep only samples in the selector_include. def keep(s): return hasattr(s, selector_attribute) \ and getattr(s, selector_attribute) \ in make_set(selector_include) return list(filter(keep, prj.samples))
03b1e006cdf4dc83075930b110bf2dac0d84d695
529,548
def set_size(width, fraction=1): """ Set aesthetic figure dimensions to avoid scaling in latex. :param float width: Width in pts :param float fraction:Fraction of the width which you wish the figure to occupy :return fig_dim: Dimensions of figure in inches :rtype: Tuple[float, float] """ fig_width_pt = width * fraction inches_per_pt = 1 / 72.27 golden_ratio = (5 ** 0.5 - 1) / 2 fig_width_in = fig_width_pt * inches_per_pt fig_height_in = fig_width_in * golden_ratio return (fig_width_in, fig_height_in)
9853916c7d9d12dcc1e149988bf118c85058b462
552,423
import re def fix_links(chunk, tag2file): """Find and fix the the destinations of hyperlinks using HTML or markdown syntax Fix any link in a string text so that they can target a different html document. First use regex on a HTML text to find any HTML or markdown hyperlinks (e.g. <a href="#sec1"> or [sec1](#sec1) ). Then use a dictionary to prepend the filename to the value of a link's href attribute (e.g. <a href="02_jupyterbook.html#sec1">) :param str chunk: text string :param dict tag2file: dictionary mapping a tag to a file basename e.g. tag2file['sec1']='02_jupyterbook' :return: chunk with fixed links :rtype: str """ chunk_out = chunk # html links pattern_tag = r'[\w _\-:]' pattern = r'<' + pattern_tag + '+ href=[\\\]{0,2}["\']#(' + pattern_tag + '+)[\\\]{0,2}["\'][^>]*>' for m in re.finditer(pattern, chunk): match = m.group() tag = m.group(1) fixed_tag = match.replace('#' +tag, tag2file.get(tag, tag) + '.html#' + tag) chunk_out = chunk_out.replace(match, fixed_tag) # markdown links pattern = r'\[' + pattern_tag + '+\]\(#(' + pattern_tag + '+)\)' for m in re.finditer(pattern, chunk): match = m.group() tag = m.group(1) fixed_tag = match.replace('#' + tag, tag2file.get(tag, tag) + '.html#' + tag) chunk_out = chunk_out.replace(match, fixed_tag) return chunk_out
5816ba69c2ee04427fbde8f355eb8c4c379805fb
686,276
def convert_subscripts(old_sub, symbol_map): """Convert user custom subscripts list to subscript string according to `symbol_map`. Examples -------- >>> oe.parser.convert_subscripts(['abc', 'def'], {'abc':'a', 'def':'b'}) 'ab' >>> oe.parser.convert_subscripts([Ellipsis, object], {object:'a'}) '...a' """ new_sub = "" for s in old_sub: if s is Ellipsis: new_sub += "..." else: # no need to try/except here because symbol_map has already been checked new_sub += symbol_map[s] return new_sub
4a97647cb14726ba3b0bc109c626b8c8cc1b5815
668,294
import torch def load_checkpoint(model, optimizer, scheduler, device, checkpoint_file: str): """Loads a model checkpoint. Params: - model (nn.Module): initialised model - optimizer (nn.optim): initialised optimizer - scheduler: initialised scheduler - device (torch.device): device model is on Returns: - model with loaded state dict - optimizer with loaded state dict - scheduler with loaded state dict """ checkpoint = torch.load(checkpoint_file, map_location=device) model.load_state_dict(checkpoint["state_dict"]) optimizer.load_state_dict(checkpoint["optimizer"]) scheduler.load_state_dict(checkpoint["scheduler"]) print(f"Loaded {checkpoint_file}, " f"trained to epoch {checkpoint['epoch']+1} with best loss {checkpoint['best_loss']}") return model, optimizer, scheduler
65cd9c6cbdc6cdd95ef0a616ee1219b1182c2160
387,569
def async_setup(hass, config): """Register the built-in map panel.""" yield from hass.components.frontend.async_register_built_in_panel( 'map', 'Map', 'mdi:account-location') return True
4144dc1af9fc2fe8ce27818fca75bec8801715d0
481,678