content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def build_tile_url( image_path: str, rgb: bool = False, url_root: str = "/", is_product: bool = False ) -> str: """Build a Tile URL for the given image path. Args: image_path: the path to the image to be processed rgb (optional): if the path should be to the DEM RGB version of tiles Returns: The URL for the specified tile set. """ prefix = "" if is_product: prefix = "products/" return ( f"{url_root}{prefix}{'rgb' if rgb else 'tile'}/{image_path}/" + r"{z}/{x}/{y}.png" )
89e83a2fe0ead5879a6cc0ef998a5a2cb8eb2bbd
171,011
def summarize_packet(pkt): """ Summarizes a packet into a human-legible string. Five key fields are represented in the summary - source/destination IP, source/destination MAC address, source/ destination TCP/UDP port, and the transport protocol in use (such as TCP/UDP). If any of the five key fields are missing, they are replaced with the text "None". Args: pkt: A pyshark.packet object that will be summarized Returns: String representing the summary of a packet """ try: l4_protocol = pkt[2].name except (AttributeError, IndexError): l4_protocol = "None" try: src_mac = pkt["Ethernet"].src except (AttributeError, IndexError): src_mac = "None" try: dst_mac = pkt["Ethernet"].dst except (AttributeError, IndexError): dst_mac = "None" try: src_ip = pkt["IP"].src except (AttributeError, IndexError): src_ip = "None" try: dst_ip = pkt["IP"].dst except (AttributeError, IndexError): dst_ip = "None" try: src_port = pkt[2].sport except (AttributeError, IndexError): src_port = "None" try: dst_port = pkt[2].dport except (AttributeError, IndexError): dst_port = "None" try: app_protocol = pkt.lastlayer().name except (AttributeError, IndexError): app_protocol = "Unknown" return "{!s: <5} ({!s: <7}) {!s: <17} {!s: >15}:{!s: <6} -> {!s: >15}:{!s: <6} {!s: <17}".format(l4_protocol, app_protocol, src_mac, src_ip, src_port, dst_ip, dst_port, dst_mac)
7e56e3c36b84ff43c7e00cf6e082e062f29d0d11
576,603
import json def platform2gates(platform_fname, gates_fname): """Tries to convert an OpenQL platform JSON file to a gatemap JSON file for use with the DQCsim operator wrapper for OpenQL. Heuristics are applied to convert common gate names to DQCsim equivalents, but you may need to do some manual adjusting.""" # Load platform description file. with open(platform_fname, 'r') as f: data = json.loads(f.read()) # Find all instruction names defined in the platform description file. insns = [] for name in data.get('instructions', []): insns.append(name.split()[0]) for name, decomp in data.get('gate_decomposition', {}).items(): insns.append(name.split()[0]) for name in decomp: insns.append(name.split()[0]) if not insns: print('No instructions found!') # Uniquify without losing order. new_insns = [] seen = set() for insn in insns: if insn not in seen: seen.add(insn) new_insns.append(insn) insns = new_insns # Try to map based on the OpenQL names. unknown_gates = set() def to_json_line(openql): dqcsim = { 'i': '"I"', 'x': '"X"', 'y': '"Y"', 'z': '"Z"', 'h': '"H"', 's': '"S"', 'sdag': '"S_DAG"', 't': '"T"', 'tdag': '"T_DAG"', 'x90': '"RX_90"', 'xm90': '"RX_M90"', 'mx90': '"RX_M90"', 'x180': '"RX_180"', 'rx90': '"RX_90"', 'rxm90': '"RX_M90"', 'rx180': '"RX_180"', 'rx': '"RX"', 'y90': '"RY_90"', 'ym90': '"RY_M90"', 'my90': '"RY_M90"', 'y180': '"RY_180"', 'ry90': '"RY_90"', 'rym90': '"RY_M90"', 'ry180': '"RY_180"', 'ry': '"RY"', 'z90': '"RZ_90"', 'zm90': '"RZ_M90"', 'mz90': '"RZ_M90"', 'z180': '"RZ_180"', 'rz90': '"RZ_90"', 'rzm90': '"RZ_M90"', 'rz180': '"RZ_180"', 'rz': '"RZ"', 'swap': '"SWAP"', 'move': '"SWAP"', 'sqswap': '"SQSWAP"', 'sqrtswap': '"SQSWAP"', 'cx': '"C-X"', 'ccx': '"C-C-X"', 'cy': '"C-Y"', 'ccy': '"C-C-Y"', 'cz': '"C-Z"', 'ccz': '"C-C-Z"', 'cphase': '"C-PHASE"', 'ccphase': '"C-C-PHASE"', 'cnot': '"C-X"', 'ccnot': '"C-C-X"', 'toffoli': '"C-C-X"', 'cswap': '"C-SWAP"', 'fredkin': '"C-SWAP"', 'meas': '"measure"', 'measx': '{\n "type": "measure",\n "basis": "x"\n }', 'measy': '{\n "type": "measure",\n "basis": "y"\n }', 'measz': '"measure"', 'prep': '"prep"', 'prepx': '{\n "type": "prep",\n "basis": "x"\n }', 'prepy': '{\n "type": "prep",\n "basis": "y"\n }', 'prepz': '"prep"', }.get( openql .replace('_', '') .replace('-', '') .replace('measure', 'meas') .lower(), None) if dqcsim is None: unknown_gates.add(openql) dqcsim = '{\n UNKNOWN?\n }' openql = '"{}":'.format(openql) return ' {} {},'.format(openql, dqcsim) # Construct the output file. output = ['{'] for insn in insns: output.append(to_json_line(insn)) if output: output[-1] = output[-1][:-1] output.append('}') output = '\n'.join(output) # Write the output file. with open(gates_fname, 'w') as f: f.write(output) # Report result. if unknown_gates: print('The following gates were not automatically recognized:') print() for gate in sorted(unknown_gates): print(' - {}'.format(gate)) print() print('You\'ll need to edit the output file!') else: print('All gates were heuristically recognized! Double-check the file, though.')
4c0c428e7eda3ce524f58beaafe0efdb5be779e0
127,526
def parse_control(c): """ Parse a carla.VehicleControl to a json object. """ return { "brake": c.brake, "gear": c.gear, "hand_brake": c.hand_brake, "manual_gear_shift": c.manual_gear_shift, "reverse": c.reverse, "steer": c.steer, "throttle": c.throttle }
7635b5f0d2835a2297e798984510c9be2b1230c4
491,446
def eval_f(f, xs): """Takes a function f = f(x) and a list xs of values that should be used as arguments for f. The function eval_f should apply the function f subsequently to every value x in xs, and return a list fs of function values. I.e. for an input argument xs=[x0, x1, x2,..., xn] the function eval_f(f, xs) should return [f(x0), f(x1), f(x2), ..., f(xn)].""" return [f(x) for x in xs] # alternatively: return list(map(f, xs))
00c6ed7fc59b213a3ec9fec9feeb3d91b1522061
1,874
import requests def get_download_id(pql_query: str, datatype: str = "ssm", output_format: str = "TSV") -> str: """ Calls an ICGC Data Portal API to retrieve a download ID for the dataset specified by a PQL query, a data type, and an output format. :param pql_query: PQL query to retrieve the dataset of interest. :param datatype: data types e.g., "ssm" for simple somatic mutation, "donor" for clinical dataset, "cnsm" for copy number somatic mutation, etc. :param output_format: output data format. Supported formats: ["json", "TSV"]. :return: a download ID """ info = f"[{{\"key\":\"{datatype}\", \"value\":\"{output_format}\"}}]" url = f"https://dcc.icgc.org/api/v1/download/submitPQL?pql={pql_query}&info={info}" response = requests.get(url) if response.status_code != 200: raise IOError(f"GET {url} resulted in status code {response.status_code}") return response.json()["downloadId"]
8bc7cbdbccc4281532d8443cdfce506262d1412c
562,521
import torch def exponential_transition(perturbations, norm, epsilon=0.3, gamma=1): """ Exponential transition rule. :param perturbations: perturbations :type perturbations: torch.autograd.Variable :param norm: norm :type norm: attacks.norms.Norm :param epsilon: epsilon :type epsilon: float :param gamma: gamma :type gamma: float :return: gamma, norms :rtype: torch.autograd.Variable, torch.autograd.Variable """ norms = norm(perturbations) return 1 - torch.exp(-gamma * norms), norms
fe18820cdf9543d0c4928543d8174107de224694
582,921
def _create_vn_str(novice_status): """Creates varsity-novice status string from the integer pseudo-enum used by the model""" if novice_status is 0: return "varsity" if novice_status is 1: return "novice" return novice_status
d00892ee1ad1fb88f07818e358a73b73864e9f6f
318,038
from typing import Optional from typing import BinaryIO import io import tarfile def merge_tar_gz(a: Optional[BinaryIO], b: Optional[BinaryIO]) -> Optional[BinaryIO]: """Merge <a> and <b>, returning a new tarfile.TarFile object. If two files in <a> and <b> have the same name, the one in <a> prevails. Both a and b can be safely closed after this function. Returns None - If both arguments are None. a - If <b> is None. b - If <a> is None. io.BytesIO - A BytesIO, cursor set at the start, corresponding to the merging of <a> into <b> (overwriting file with the same name).""" if a is None: return None if b is None else io.BytesIO(b.read()) if b is None: return io.BytesIO(a.read()) destio = io.BytesIO() with tarfile.open(fileobj=a, mode="r:gz") as t1, \ tarfile.open(fileobj=b, mode="r:gz") as t2, \ tarfile.open(fileobj=destio, mode="w:gz") as dest: t1_members = [m for m in t1.getmembers()] t1_names = t1.getnames() t2_members = [m for m in t2.getmembers() if m.name not in t1_names] for member in t1_members: if member.isdir(): dest.addfile(member) else: dest.addfile(member, t1.extractfile(member)) for member in t2_members: if member.isdir(): dest.addfile(member) else: dest.addfile(member, t2.extractfile(member)) destio.seek(0) return destio
6af11f95565992f8e0e49ec88eafc0cc5d5215f4
358,874
def make_zero_based_midi(defs): """ The official MIDI spec is 1 based (why???), but clearly most things are 0 based. So this function shifts all of the program numbers down by one and keeps 0 as piano. :param defs: :return: """ for k, v in defs.items(): pgms = [max(i - 1, 0) for i in v['program_numbers']] defs[k]['program_numbers'] = pgms return defs
305b6e6b48b116a8d86ec02036a1218d8a88070d
47,392
def intdivceil(x, y): """ Returns the exact value of ceil(x // y). No floating point calculations are used. Requires positive integer types. The result is undefined if at least one of the inputs is floating point. """ result = x // y if (x % y): result += 1 return result
bf5ee4b9b9436c698dfcb0fffb60e8034a273ce3
121,828
import six from typing import OrderedDict def ordered_recurse(value): """Recursively order nested dicts and lists at all levels.""" if isinstance(value, list): return [ordered_recurse(v) for v in value] if isinstance(value, dict): items = sorted( ((key, ordered_recurse(value)) for key, value in six.iteritems(value)), key=lambda i: i[0], ) return OrderedDict(items) return value
29331fcb7fe447ae329b91bd8a1dae7b6fa12160
228,428
def _get_queue_arguments(conf): """Construct the arguments for declaring a queue. If the rabbit_ha_queues option is set, we declare a mirrored queue as described here: http://www.rabbitmq.com/ha.html Setting x-ha-policy to all means that the queue will be mirrored to all nodes in the cluster. """ return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
ccb27dbdd1123869ba9a22c311833b95d2f2b380
329,500
from typing import Union from pathlib import Path from typing import Dict import yaml def read_config(path: Union[str, Path]) -> Dict: """ Reading configuration file based on the given path :param path: str or Path, path to the config file :return: Dict, dictionary generated by the config file which has a YAML type """ print(f"Reading YAML config file from: {str(path)}") with Path(path).open(mode="r") as stream: config: Dict = yaml.safe_load(stream=stream) return config
8d10f3b65bf2671b7e0c27644c63cc3916bc132b
443,086
def clean_text(identifica, ementa, fulltext): """ Given a DOU article titles `identifica` and an abstract `ementa`, remove the first title in `identifica`, `ementa` and the hard-coded footnote from the full text of the DOU article `fulltext`. """ # ATTENTION: this code should reproduce the cleaning performed in BigQuery, # since its resulting text was used to train the model. if identifica == None: return fulltext if fulltext == None: return None # Remove primeiro título que aparece no artigo (e tudo que vem antes): first_identifica = identifica.split(' | ')[0] text_pos = fulltext.find(first_identifica) + len(first_identifica) clean_text = fulltext[text_pos:] # Remove rodapé: clean_text = clean_text.replace('Este conteúdo não substitui o publicado na versão certificada.', '') if ementa == None: return clean_text # Remove ementa: clean_text = clean_text.replace(ementa, '') return clean_text
06d3b3db514dfa9a410ba22d7c5585fb42752e16
629,954
import functools import aiohttp def http_session_method(f): """Decorator for a method that uses an async http session""" @functools.wraps(f) async def wrapper(self, *args, **kwargs): async with aiohttp.ClientSession() as session: return await f(self, session, *args, **kwargs) return wrapper
4473073e70a61ebcf7d9a56401467fc69139aa9d
422,353
def mc_to_float(value): """ Convert from millicents to float Args: value: millicent number Returns: value in float """ return float(value) / (100 * 1000)
9c72ca3b625a5caaaca8d1a73c2a96bf1117bf11
674,949
def indices_to_sentence(indices, vocabulary): """ Turn a list of indices of a vocabulary into a sentence. """ y = indices words = [vocabulary[idx] for idx in y] return " ".join(words)
e01449b49ad11f6c1f513e273e65e8cbf2b44f6b
237,571
import csv def _get_dsv_reader(dsv_file): """ Detect the delimiter used in opened dsv (delimiter-separated values) file and return csv.DictReader object initialized to iterate over file contents Args: dsv_file(file object): opened dsv file object Returns: csv.DictReader: reader object initialized to iterate over file contents """ dialect = csv.Sniffer().sniff(dsv_file.readline()) dsv_file.seek(0) return csv.DictReader(dsv_file, dialect=dialect, quoting=csv.QUOTE_NONE)
e0e3d51b63dde582339c6fbadca32c10396a2fad
517,286
def task_builder(generator, n_train=1000, n_test=250): """ creates the task generator :generator: generator for the wanted task :n_train: number of samples to generate for training :n_test: number of samples to generate for testing :returns: dictionary of the task of generated samples """ task = {'train':[], 'test':[]} for i in range(0,n_train): inp, out = generator() task['train'].append({'input':inp, 'output':out}) for i in range(0,n_test): inp, out = generator() task['test'].append({'input':inp, 'output':out}) return task
77455dfdafc736ddb14dfeab0d4f50daf05ab133
454,553
from typing import Tuple def parse_project_name(name: str, ssh: bool = True) -> Tuple[str, str, str, bool]: """Parse the name argument for get_project Returns (name, url, branch, is_url). If name is not a full url, the returned url will be a https or ssh url depending on the boolean argument ssh. """ # This is split off the actual command function for # unit testing purposes if ':' in name: pieces = name.split(':') if len(pieces) >= 3: name = ':'.join(pieces[:-1]) branch = pieces[-1] elif 'http' in pieces[0] or '@' in pieces[0]: branch = '' else: name, branch = pieces else: branch = '' if not name.startswith(('git@', 'http')): if '/' not in name: org_name = 'leanprover-community/'+name else: org_name, name = name, name.split('/')[1] if ssh: url = '[email protected]:'+org_name+'.git' else: url = 'https://github.com/'+org_name+'.git' is_url = False else: url = name name = name.split('/')[-1].replace('.git', '') is_url = True return name, url, branch, is_url
826a4b024a8aef448964e211b2e558930d6e0919
559,667
def createZeroMat(m,n): """Return a matrix (m x n) filled with zeros.""" ret = [0] * m for i in range(m): ret[i] = [0] * n return ret
30fc0debca3b75fcec07b5ef7953c12a0a63e685
316,449
def clamp(num, min_val, max_val): """Clamps `min` within the range [`min_val`..`max_val`].""" return max(min_val, min(num, max_val))
8e15e4f972b150b6b3eca825ae691065ffb5821e
563,099
def split_semicolon_filter(s): """ Filter to take semicolon-delimited text and convert it to a list """ if s is not None: return s.strip().split(';') return None
4805f7519daed31fd82fcfc78b6ccdbc0fc132fe
120,824
import torch def effective_sample_size(log_weights): """Kish effective sample size; log weights don't have to be normalized""" return torch.exp(2*torch.logsumexp(log_weights, dim=0) - torch.logsumexp(2*log_weights, dim=0))
1979a33f040c9d703e4bbaf8338b7b260ddbb223
590,868
def enable_squash(input): """ Convert long specific enable strings to 'enabled' Takes in a dictionary of parsed output, iterates over the keys and looks for key names containing the string "enabled" at the end of the key name. Specifically the end of the key name is matched for safety. Replaces the key with simply "enabled", for example an input dictionary:: {"Path-selection enabled": False} becomes:: {"enabled": False} :param input: A dictionary of parsed output :return result: A dictionary with keys ending in "enabled" replaced with just "enabled" """ result = {} for key, value in input.items(): if key.endswith('enabled'): if 'enabled' in result: raise KeyError('Duplicate key exists') result['enabled'] = value else: result[key] = value return result
f3101af4be36b99ab95c0922162c38d0be1231a9
275,624
import torch def get_ans_idx(p_s, p_e, max_len=15, no_answer=False): """ Discretize soft predictions (probs) to get start and end indices Choose (i, j) which maximizes p1[i]*p2[j], s.t. (i <= j) & (j-i+1 <= max_len) Args: p_s: [batch_size, c_len], probs for start index p_e: [batch_size, c_len], probs for end index max_len: max length of the answer prediction no_answer (bool): Treat 0-idx as the no-answer prediction. Consider a prediction no-answer if preds[0,0]*preds[0,1] > the prob assigned to the max-probability span Returns: s_idxs: [batch_size], hard predictions for start index e_idxs: [batch_size], hard predictions for end index """ c_len = p_s.shape[1] device = p_s.device if p_s.min() < 0 or p_s.max() > 1 or p_e.min() < 0 or p_e.max() > 1: raise ValueError('Expected p_start and p_end to have values in [0, 1]') # Compute pairwise probs p_s = p_s.unsqueeze(2) # [batch_size, c_len, 1] p_e = p_e.unsqueeze(1) # [batch_size, 1, c_len] p_join = torch.bmm(p_s, p_e) # [batch_size, c_len, c_len] # Restrict (i, j) s.t. (i <= j) & (j-i+1 <= max_len) is_legal_pair = torch.triu(torch.ones((c_len, c_len), device=device)) is_legal_pair = is_legal_pair - torch.triu(torch.ones((c_len, c_len), device=device), diagonal=max_len) if no_answer: p_no_answer = p_join[:, 0, 0].clone() is_legal_pair[0, :] = 0 is_legal_pair[:, 0] = 0 else: p_no_answer = None p_join = p_join * is_legal_pair # Obtain (i, j) which maximizes p_join max_each_row, _ = torch.max(p_join, dim=2) # [batch_size, c_len] max_each_col, _ = torch.max(p_join, dim=1) # [batch_size, c_len] s_idxs = torch.argmax(max_each_row, dim=1) # [batch_size] e_idxs = torch.argmax(max_each_col, dim=1) # [batch_size] # Predict no-answer whenever p_no_answer > max_prob if no_answer: max_prob, _ = torch.max(max_each_col, dim=1) s_idxs[p_no_answer > max_prob] = 0 e_idxs[p_no_answer > max_prob] = 0 return s_idxs, e_idxs
c249c5d998eff805640f103a59bdf101351ae7a9
262,864
def dump_dataclass_object(packet_obj: object) -> bytes: """Given a dataclass object, will return it as bytes Args: packet_obj: A dataclass object Returns: bytes representing given dataclass object """ raw_packet = bytearray() for _, value in packet_obj.__dict__.items(): raw_packet += value return raw_packet
03bacd16e3ce3772037b443ae5509feec5f16cc4
215,685
def _validate_timezone(hours_offset: int): """Creates an int from -48 to 36 that represents the timezone offset in 15-minute increments as per the FlashAir docs""" param_value = int(hours_offset * 4) assert -48 <= param_value <= 36 return param_value
e2d7dd25c0252cc8dca24ad7dbbbbf8287667a05
119,457
def dataToString(var, data): """Given a tuple of data, and a name to save it as returns var <- c(data) """ #convert data to strings d = [str(d) for d in data] return "%s <- c(%s)" % (var, ",".join(d))
dcfb8e443f0a8f8c783047190822c7194104fc44
689,155
def _is_private_port_speed_item(item): """Determine if the port speed item is private network only.""" for attribute in item['attributes']: if attribute['attributeTypeKeyName'] == 'IS_PRIVATE_NETWORK_ONLY': return True return False
0ba18cf3a78204ab13ca10f319e37a5b2970a4bb
256,465
def GetStateMessagesStrings(state_messages): """Returns the list of string representations of the state messages.""" return map(lambda st: '[{}] {}'.format(str(st.severity), st.message), state_messages)
46442e1edc43ad44e23472ae30dc04b6dfa1c04e
296,066
def get_uncrop(img,size,h_cent): """ Returns a method to convert cropped coordinate to full frame coordinate. Parameters ---------- img : numpy.ndarray The full framed image. size : int The size of the cropped images. h_cent : int The horizontal centre used for cropping. Returns ------- uc : function A function to 'uncrop' coordinates. """ ar = img.shape[1] / img.shape[0]; shift = (size * (ar - 1)) / 2; scale = scale = img.shape[0] / size; def uncrop(pt): new_x = int((pt[0] + shift) * scale); new_y = int(pt[1] * scale); return (new_x,new_y); return uncrop;
d11aed5f5d368fbac15abc97b84b534d170838ea
204,555
def check_assignments(csp, var1, var2) : """Check if all constraints between var1 and var2 are satisfied for the given assignment values""" return all(constraint.check(var1[1], var2[1]) for constraint in csp.constraints_between(var1[0], var2[0]))
6e8fd48265cd5016819bedf8e2a022e1a3bdbe57
333,533
def _class_get_plugin(cls): """ Class method that retrieves the plugin instance associated with the current model class. This method should not overlap the get plugin method on the instance, and you should used this one to retrieve the absolute related system instance, avoiding problems with inheritance. :rtype: Plugin :return: The plugin instance associated with the current model class. """ return cls._system_instance.plugin
229736d4b5bb582cf3a4f538e617297156a52898
339,198
from typing import Union from typing import Iterable import re def clean_uids(uid_set: Union[str, Iterable[str]]) -> str: """ Prepare set of uid for use in IMAP commands uid RE patterns are not strict and allow invalid combinations, but simple. Example: 2,4:7,9,12:* :param uid_set: str, that is comma separated uids Iterable, that contains str uids :return: str - uids, concatenated by a comma """ # str if type(uid_set) is str: if re.search(r'^([\d*:]+,)*[\d*:]+$', uid_set): # *optimization for already good str return uid_set uid_set = uid_set.split(',') # Iterable try: uid_set_iter = iter(uid_set) except TypeError: raise TypeError('Wrong uid_set arg type: "{}"'.format(type(uid_set))) # check uid types for uid in uid_set_iter: if type(uid) is not str: raise TypeError('uid "{}" is not string'.format(str(uid))) if not re.match(r'^[\d*:]+$', uid.strip()): raise TypeError('Wrong uid: "{}"'.format(uid)) return ','.join(i.strip() for i in uid_set)
27460ff5a34785ecd1bcb0febdb4a830f142ac08
586,937
import time def retry(times, exceptions, scaling_duration=1): """ Retry Decorator Retries the wrapped function/method `times` times if the exceptions listed in ``exceptions`` are thrown :param times: The number of times to repeat the wrapped function/method :type times: Int :param Exceptions: Lists of exceptions that trigger a retry attempt :type Exceptions: Tuple of Exceptions """ def decorator(func): def newfn(*args, **kwargs): attempt = 0 while attempt < times: try: return func(*args, **kwargs) except exceptions: print( "Exception thrown when attempting to run %s, attempt " "%d of %d" % (func, attempt, times) ) attempt += 1 time.sleep(attempt * scaling_duration) return func(*args, **kwargs) return newfn return decorator
3d456ac1d4e2e487324f00f310af584fed60f763
354,558
import binascii def stringio_to_base64(stringio_obj): """Get base64 encoded version of a BytesIO object.""" return binascii.b2a_base64(stringio_obj.getvalue())
b8162a396ec2b2d7ea22ade93c06df4f2d001a7f
337,142
def filter_frontier(frontier, pred_fns, cfg_pred_fns=None): """Filter out points from the frontier that don't pass all the pred_fns. If a pred_fn is None, ignore that metric. """ new_frontier = [] for result in frontier: cfg, qos = result keep = True if not all(f(x) for x, f in zip(qos, pred_fns) if f is not None): keep = False if cfg_pred_fns is not None: if not all(f(x) for x, f in zip(cfg, cfg_pred_fns) if f is not None): keep = False if keep: new_frontier.append(result) return new_frontier
9719fb563271957eb64f65d4c1025d970f93478d
599,358
def get_value_safe(d=None, key=None): """ Return value of a given dictionary for a key. @return: value for a key, None otherwise """ if d is None or key is None: return None if key not in d: return None return d[key]
02201527fc100ef63f4731663f567fd1b5613867
682,170
def Ndavies(r,g,eta,drho,rho): """Davies (Best) number Args: r: particle size (cm) g: gravity (cm/s2) eta: dynamic viscosity (g/s/cm) drho: density difference between condensates and atmosphere (g/cm3) rho: atmosphere density (g/cm3) Returns: Davies number (Best Number) """ return 32.0*g*r**3*drho*rho/(3.0*eta**2)
b7eb8f85ade07495aaec34fc1c207782a7d771e0
290,661
import random def random_matrix (nrow, colmean, colsd): """Generate a random matrix of independent Gaussians, with nrow being the number of rows, and with colmean and colsd being vectors of column means and SDs, respectively.""" assert(len(colmean) == len(colsd)) assert(nrow > 0) ncol = len(colmean) # generate data as matrix, using nested list comprehension x = [[random.gauss(colmean[col], colsd[col]) for col in range(ncol)] for row in range(nrow)] return x
6199a8df5ee351459d696dd718351b26c397f57f
364,200
def derivative_colors(colors): """Return the names of valid color variants, given the base colors.""" return set([('on_' + c) for c in colors] + [('bright_' + c) for c in colors] + [('on_bright_' + c) for c in colors])
e29e683db530a25a0604c5dd234df68f935117be
435,676
def moving_avg(xyw, avg_len): """ Calculate a moving average for a given averaging length :param xyw: output from collapse_into_single_dates :type xyw: dict :param avg_len: average of these number of points, i.e., look-back window :type avg_len: int :return: list of x values, list of y values :rtype: tuple """ cumsum, moving_aves, x_final = [0], [], [] for i, y in enumerate(xyw['y'], 1): cumsum.append(cumsum[i - 1] + y / xyw['w'][i - 1]) if i >= avg_len: moving_ave = (cumsum[i] - cumsum[i - avg_len]) / avg_len moving_aves.append(moving_ave) x_final = [xyw['x'][i] for i in range(avg_len - 1, len(xyw['x']))] return x_final, moving_aves
5dbd2c596942638f0c0158e11c6b24e6f5a1ef4b
634,737
def perspective_request(perspective, comment): """ Generates a request to run the toxicity report""" analyze_request = { 'comment':{'text': comment}, 'requestedAttributes': {'TOXICITY': {}, 'THREAT': {}, 'INSULT': {}} } response = perspective.comments().analyze(body=analyze_request).execute() return response
76f81fdce4796906e088f4eb5362a2f9e8ab9e6d
185,218
def remove_quotes(string): """ remove all (double) quotes""" return string.replace("'", "").replace('"', '')
3f0b5a6afe50f261bd2563c6ae4165bfd2481c0e
593,664
import struct def pack_word(word, big_endian=False): """ Packs a 32-bit word into a binary data string. """ endian = ">" if big_endian else "<" return struct.pack("%sL" % endian, word)
303fd5aa6b23c0c2e212df6d880724574e7aee04
390,484
def _no_convert(_, start, end=None): """Dummy function for when no conversion is needed.""" if end: return start, end else: return start, start
623d229f0d09396cf00ba684fe4020d6450c722c
598,350
from functools import reduce def _bit_list_to_bytes(bits): """Convert a sequence of truthy values into a byte string, MSB first.""" return bytes( reduce(lambda a, b: (a << 1) | b, (int(bool(x)) for x in byte_bits)) for byte_bits in zip(*[iter(bits)]*8) )
005f3b800204e4b2053d0ba7d534d8e62e1449c7
44,467
def load_config_auth(config_parser): """Convert an auth cfparser config into a dictionary :param config_parser: configparser configuration :type paths: class:`configparser.ConfigParser` :return: configuration as a dictionary :rtype: dict """ config = {'github': {'token': config_parser['github']['token']}} if config_parser.has_option('github', 'secret'): config['github']['secret'] = config_parser['github']['secret'] return config
17d689791e4077e9b73981b8747ba239781e49ee
616,670
def sort_2metals(metals): """ Handles iterable or string of 2 metals and returns them in alphabetical order Args: metals (str || iterable): two metal element names Returns: (tuple): element names in alphabetical order """ # return None's if metals is None if metals is None: return None, None if isinstance(metals, str): if len(metals) != 4: raise ValueError('str can only have two elements.') metal1, metal2 = sorted([metals[:2], metals[2:]]) else: metal1, metal2 = sorted(metals) return metal1.title(), metal2.title()
dab922797a6c7b94d6489d8fc4d9c1d99f3ee35c
2,188
def pretty_bytes(byte_value, base_shift=0): """Pretty-print the given bytes value. Args: byte_value (float): Value base_shift (int): Base value of byte_value (0 = bytes, 1 = KiB, 2 = MiB, etc.) Returns: str: Pretty-printed byte string such as "1.00 GiB" Examples: :: >>> pretty_bytes(512) '512 B' >>> pretty_bytes(512, 2) '512 MiB' >>> pretty_bytes(65536, 2) '64 GiB' >>> pretty_bytes(65547) '64.01 KiB' >>> pretty_bytes(65530, 3) '63.99 TiB' >>> pretty_bytes(1023850) '999.9 KiB' >>> pretty_bytes(1024000) '1000 KiB' >>> pretty_bytes(1048575) '1024 KiB' >>> pretty_bytes(1049200) '1.001 MiB' >>> pretty_bytes(2560) '2.5 KiB' >>> pretty_bytes(.0001, 3) '104.9 KiB' >>> pretty_bytes(.01, 1) '10 B' >>> pretty_bytes(.001, 1) '1 B' >>> pretty_bytes(.0001, 1) '0 B' >>> pretty_bytes(100, -1) Traceback (most recent call last): ... ValueError: base_shift must not be negative """ if base_shift < 0: raise ValueError("base_shift must not be negative") tags = ["B", "KiB", "MiB", "GiB", "TiB", 'PiB', 'EiB', 'ZiB', 'YiB'] byte_value = float(byte_value) shift = base_shift while byte_value >= 1024.0: byte_value /= 1024.0 shift += 1 while byte_value < 1.0 and shift > 0: byte_value *= 1024.0 shift -= 1 # Fractions of a byte should be considered a rounding error: if shift == 0: byte_value = round(byte_value) return "{0:.4g} {1}".format(byte_value, tags[shift])
e1643791482e45b75a67fc457a451ef16e089ad6
286,629
import calendar import time def build_filename(option, quarter): """ Function to build filename based on current day and month :return: Name of the file. Example: 'teacher_q1_timestamp' """ timestamp = calendar.timegm(time.gmtime()) underscore = "_" name = [option, underscore, "q" + str(quarter), underscore, str(timestamp)] return "".join(name)
56c70cfbb82b001386b7967222b6745d0a7f2ccd
147,843
from re import compile, findall def filterSpilloverFilename(filename): """ Remove any unwanted spill-over filename endings (i.e. _NNN or ._NNN) """ # Create the search pattern pattern = compile(r'(\.?\_\d+)') found = findall(pattern, filename) if found: # Make sure that the _NNN substring is at the end of the string for f in found: if filename.endswith(f): # Do not use replace here since it might cut away something from inside the filename and not only at the end filename = filename[:-len(f)] return filename
c0053aed72cfe7c1f5a46f6224aaf8a47743d212
129,958
def remove_comma(in_str): """ Remove comma from given string """ return str(in_str).replace(",", " ").replace(" ", " ")
c6058d6afb3acaa8cf5e98e0900bd75e921fbaed
306,654
def is_ascii(str_data: str) -> bool: """Checks if string contains only ascii chars. Necessary because python 3.6 does not have a str.isascii() method. Parameters ---------- str_data : str string to check if it contains only ascii characters Returns ------- bool True if only ascii characters in the string, else False. """ try: str_data.encode('ascii') except (UnicodeEncodeError, AttributeError): return False return True
60bff3c1156863a49b85306d710f3d62d3cf4a31
222,661
def _wfdb_fmt(bit_res, single_fmt=True): """ Return the most suitable WFDB format(s) to use given signal resolutions. Parameters ---------- bit_res : int, list The resolution of the signal, or a list of resolutions, in bits. single_fmt : bool, optional Whether to return the format for the maximum resolution signal. Returns ------- fmt : str, list The most suitable WFDB format(s) used to encode the signal(s). """ if isinstance(bit_res, list): # Return a single format if single_fmt: bit_res = [max(bit_res)] * len(bit_res) return [_wfdb_fmt(r) for r in bit_res] if bit_res <= 8: return '80' elif bit_res <= 12: return '212' elif bit_res <= 16: return '16' elif bit_res <= 24: return '24' else: return '32'
9bbf0f64efcc4aebf5ac421c60c4610f23d04647
132,589
def construct_res_tuple(res): """ Build the BigDFT fragment tuple given the residue of the structure Args: res(Residue): A residue Class on the Biopython package Returns: tuple: the (chain,fragname,fragno) tuple """ chain = res.full_id[2] if len(chain.lstrip(' ')) == 0: chain = 'A' resname = res.resname position = res.full_id[3][1] return chain, resname, position
6091470e4a16f7b04ee9d3fcf5bf64f30f5a1b58
46,617
def id(args, premise): """Return ID.""" return args[0].id
9afee153891c7978d842f35c6a4102cb0842cbde
202,670
def bin2x2(arr): """Bin 2-d ``arr`` in 2x2 blocks. Requires that ``arr`` has even shape sizes""" shape = (arr.shape[0] // 2, 2, arr.shape[1] // 2, 2) return arr.reshape(shape).sum(-1).sum(1)
1568387a680ec163e514886d16a9676175ccdb81
97,825
def decode_bytes(s, encoding='utf-8', errors='replace'): """Decodes bytes to str, str to unicode.""" return s.decode(encoding, errors=errors) if isinstance(s, bytes) else s
2ff943128e83585b9c3241e8bed4db80d9090c38
637,125
def dead_state(height, width): """ Construct a dead state with all cells set to DEAD. :arg height: The height of the state, in cells. :arg width: The width of the state, in cells. :returns: A state of dimensions height x width, with all cells set to DEAD. """ return [[0 for _ in range(width)] for _ in range(height)]
b8054df6fe6d6778bc9359880ceaf3eaefe0135a
149,096
def str_time(t): """ Return the time such as %H:%M:%S. Parameters ---------- t : int Time in seconds. Returns ------- str Time in hours, minutes and seconds. """ txt = '' s, t = t % 60, t // 60 if s < 10: s = '0' + str(s) m, h = t % 60, t // 60 if m < 10: m = '0' + str(m) if h > 24: h, d = h % 24, h // 24 txt += str(d) + ' days ' if h < 10: h = '0' + str(h) return txt + '{}:{}:{}'.format(h, m, s)
16b281710bdf1519c40376a87f4d008aa042166d
415,559
def get_lower_case_dict(value_list): """ Create a dictionary whose key is the provided key value in lower case and whose value is the original key value. :param value_list: List of key strings. :return: Dictionary with lower case key and original value """ lower_case_map = dict() for value in value_list: lower_case_map[value.lower()] = value return lower_case_map
83e01cb02dfad6e058a079cb3f1accbaf40baaac
206,238
def spridning(data): """Returns the size of the range of values in the data.""" return max(data) - min(data)
f43be929d7ae64c6d06123be3d29c40427715bc8
294,469
def split_vectors(vectors): """Splits vectors into their x, y and z components.""" return vectors[..., 0], vectors[..., 1], vectors[..., 2]
2fbd6f1b6b315ab92e2a14c04e03c3a32213f01f
584,390
import csv def real_edges_from_csv_file(real_edges_file: str) -> dict: """ Reads the csv file and returns a dict of edges with the real distance between the nodes. real_edges_file: str = csv path Returns: real_edges: dict = { from_station: [ (to_station, distance), ...], to_station: [ (from_station, distance), ...] ... } where the from_station and to_station are the nodes and the distance is the real distance between them """ real_edges = {} with open(real_edges_file, newline = "") as csvfile: real_lines = csv.DictReader(csvfile) for line in real_lines: distance = float(line["cost"]) from_station = line["from"] to_station = line["to"] if from_station not in real_edges: real_edges[from_station] = [] real_edges[from_station].append((to_station, distance)) if to_station not in real_edges: real_edges[to_station] = [] real_edges[to_station].append((from_station, distance)) return real_edges
ba690f0f8abf054457db1c057e7e8c96e88e2592
370,274
from typing import List def primes(n_max: int = 100) -> List[int]: """Implements the Eratosthene's sieve """ if n_max < 2: raise ValueError t = list(range(2, n_max + 1)) for i in t: for j in (k for k in t if k > i): if j % i == 0: t.remove(j) return sorted(t)
149bffd1b9a7bdcea3234ba7f3427525c2bfb8a0
503,638
def module_name(instance): """Return the instance module name.""" return instance.__class__.__module__.split('.')[0]
258707d4f49b1234f8461cf1fb8ce72b367e2471
516,014
def soma_volume(morph): """Get the volume of a morphology's soma.""" return morph.soma.volume
5a49d7bc56783c78dfe314afbe6c0b4e7af658a5
454,101
def _make_times(delta_hours): """ Creates the list of times that a map will be downloaded for each day starting a '00' UTC. :param delta_hours: The change in hours between subsequent maps :return: The list of times represented as list of strings >>> _make_times(6) ['00', '06', '12', '18'] """ return [f'{t:02d}' for t in range(0, 24, delta_hours)]
f4fea3957048c4a624689057c43a386390b7eed7
526,642
def items(json_struct): """ Gets all items of the json_struct """ return json_struct._data.items()
eeada3b813dab22185068de8390f2ffa02dec065
66,022
def sales_growth_rate(sales_period_1, sales_period_2): """Return the sales growth rate for the current period versus the previous period. Args: sales_period_1 (float): Total company sales for previous the period. sales_period_2 (float): Total company sales for the current period. Returns: Sales growth based on sales in period 2 versus period 1. """ return ((sales_period_2 - sales_period_1) / sales_period_1) * 100
31927f564c327df68ac45b2392c331d87e00f8da
158,886
def iqr(x): """Return the interquartile range of the input numpy array arguments: x: numpy array of numeric type returns: the interquartile range of x""" return x.quantile(0.75) - x.quantile(0.25)
4446aee39d298baa9262d5c013fe5bbdbc9c8789
446,919
def check_ext(file_name, ext): """Check the extension for a file name, and add if missing. Parameters ---------- file_name : str The name of the file. ext : str The extension to check and add. Returns ------- str File name with the extension added. """ return file_name + ext if not file_name.endswith(ext) else file_name
2fb8fbd0f070f35ea8be7cccd738518a954b9f1f
558,587
def prepare_querystring(*query_arguments, **kw_query_arguments): """Prepare a querystring dict containing all query_arguments and kw_query_arguments passed. :return: Querystring dict. :rtype: dict """ querystring = dict() for argument_dict in query_arguments: if isinstance(argument_dict, dict): querystring.update(argument_dict) querystring.update(kw_query_arguments) return querystring
ba6e0f26016bf0aae66ff54ab29cd09e7114f89f
288,370
def contour_enclosed_area_py(verts): """ Compute the area enclosed by a contour. Copied from https://arachnoid.com/area_irregular_polygon/ Parameters ---------- verts : array_like 2D shape (N,2) array of vertices. Uses scikit image convetions (j,i indexing) Returns ---------- area : float Area of polygon enclosed by verts. Sign is determined by vertex order (cc vs ccw) """ a = 0 ox, oy = verts[0] for x, y in verts[1:]: a += (x * oy - y * ox) ox, oy = x, y return a / 2
e384d39f5a1526ece0f9b18237729312e7e67c12
136,566
def prefer_safest(graph, next_sys): """Return a weight for prefering the safest route.""" if graph.security(next_sys) < 0.45: # low/null return 50000.0 return 1.0
da3622d06b6fd54d8ca520727bf99861f64cdb5b
528,496
def get_iftable(snmp_data): """ Gets the interface table (if_index and interface) for a given device for further snmp lookups Args: snmp_data - snmp data returned by cmdgen.nextCmd() for mib = .1.3.6.1.2.1.2.2.1.2 Returns: if_table - dict formatted as if:if_index inverse_if_table - dict formated as if_index:if Sample Output: inverse_if_table = {u'719: u'Ethernet4/29/3', u'718':u'Ethernet4/29/2'} if_table = {u'Ethernet4/29/3':u'719', u'Ethernet4/29/2': u'718'} """ if_table = dict() inverse_if_table = dict() # Populate the if_table dict with parsed output for if_tuple in snmp_data: if_table[str(if_tuple[0][1])] = str(if_tuple[0][0]).split(".")[-1] inverse_if_table[str(if_tuple[0][0]).split(".")[-1]] = str(if_tuple[0][1]) return (if_table, inverse_if_table)
de640b8619d3eb61c57a08337b34c42b95d10e74
476,966
def calculate_time_match_fifo(ai, aj, times0=None): """ Associate the times between two lists of timestamps using FIFO Parameters -------------- ai First list of timestamps aj Second list of timestamps times0 Correspondence between execution times Returns -------------- times0 Correspondence between execution times """ if times0 is None: times0 = [] k = 0 z = 0 while k < len(ai): while z < len(aj): if ai[k] < aj[z]: times0.append((ai[k], aj[z])) z = z + 1 break z = z + 1 k = k + 1 return times0
f59d873961e36c72cfa1fcba51afb8f7c80ba8e2
512,900
def _get_port_to_string(iface): """Simple helper which allows to get string representation for interface. Args: iface(list): Which IXIA interface to use for packet sending (list in format [chassis_id, card_id, port_id]) Returns: str: string in format "chassis_id/card_id/port_id" """ return "/".join(map(str, iface))
3d1da011c2bc63657020043f7a0aca1e14e5f276
639,350
def str_day(day): """Transforms datetime day object into a "%d/%m/%y" string. Args: day (datetime/datetime.date): Day. Returns: A "%d/%m/%y" string representation. """ return day.strftime("%d/%m/%y")
de5b07d0f46f91aaf5ef4078902b3ae7b80b5f5e
481,452
def get_chunk_region(chunkX, chunkZ): """ Returns the name of the region file given global chunk coords """ regionX = chunkX / 32 regionZ = chunkZ / 32 region_name = 'r.' + str(regionX) + '.' + str(regionZ) + '.mcr' return region_name
8afb687fa0305274f9d78eaa6c318ed7dcfb9e80
139,726
def make_feature(wordlist, toplist): """Return a feature vector corresponding to a wordlist. The wordlist represents an article. We count the incidences in wordlist of each word in toplist. """ # This could be optimized if necessary. feature = [wordlist.count(word) for word in toplist] binary_feature = [int(f>0) for f in feature] # I return the binary version for now, rather than worry about # the proper normalization. return binary_feature
1b993eaf24f47149b8dd9655027388454127a47f
545,212
def xmlbool(bool): """Convert a boolean into the string expected by the elfweaver XML.""" # Is there a smarter way of doing this? if bool: return "true" else: return "false"
5badffb116178f9a0538c6596ea18cdfe6077401
603,909
import math def binom(n, r): """ return binomial coefficient: n choose k""" return math.factorial(n) // math.factorial(n - r) // math.factorial(r)
021d8e6b6ed8d0bb1acb610b4d4bfaacf1ab03f0
480,017
import click def selection_prompt(prompt: str, choices: list) -> str: """ Prompts the user to select an option from an list of choices. :param prompt: The text of the prompt. :param choices: A list of choices. :return: The user's selection. """ joiner = "\n* " selection = click.prompt(f"\n{prompt}\n\n" f"* {joiner.join(choices)}", type=click.Choice(choices, case_sensitive=False), show_choices=False, prompt_suffix="\n\n> ") return selection.casefold()
e6b8221e790fb11ac217368042f96b52a220b637
59,782
def list_generator(start: int, step: int, number: int) -> list: """ Generates a list with 'number' of integers. The first one equals to 'start' and every consequtive is increased by 'step'. """ i = start retval = [i] for n in range(number): i += step retval.append(i) return retval
270db1e9d07c42f2fe4dfb37cd8d056f6264ba20
438,031
from typing import List def _format_predicted_class_strings(class_names: List[str]) -> List[str]: """Format the class names by replacing underscores with spaces and capitilize every word's first letter. Args: class_names (List[str]): A List of class names. Returns: List[str]: A List of formatted class names. """ return [class_name.replace("_", " ").title() for class_name in class_names]
0cb10b61e7ee75b86d755ba18d48711fd6353a41
264,354
import re def squish(text): """Turn any run of whitespace into one space.""" return re.sub(r"\s+", " ", text)
631cdf496e95cab9062156de4fcd7bf353844a50
680,900
import torch def prepare_batch(batch, device, non_blocking, new_shape=None): """Prepare the batch data for training/inference, move data to GPU, reshape the target if necessary. Args: batch (torch.Tensor): A batch of data. device (torch.device or str): Device to load the backbone and data. non_blocking (bool): Whether tries to convert asynchronously with respect to the host if possible. https://pytorch.org/docs/stable/tensors.html#torch.Tensor.to new_shape (tuple): The new shape of the target variable, sometimes necessary for certain API calls. Returns: (torch.Tensor, torch.Tensor) """ x = batch['image'].to(device, dtype=torch.float, non_blocking=non_blocking) y = batch['target'].to(device, dtype=torch.float, non_blocking=non_blocking) if new_shape: y = y.view(*new_shape) return x, y
81fdc6576cf9b9f360256ac2247d68a1f92da361
365,257
def get_local_file_content(file_path): """Gets the contents of a local file. Args: file_path: The path of the file. Returns: The content fetched from the local file. """ with open(file_path, "r") as opened_file: return opened_file.read()
c144ac9abf30dc2be38d82b40e260a0b611f93a0
566,317
def rec_cmp_releases(one, two): """Recursive function that compares two version strings represented as lists to determine which one comes "after" / takes precedence, or if they are equivalent. List items must be integers (will throw TypeError otherwise). If the left argument ("one") is a later version, returns 1. If the right argument ("two") is a later version, returns 2. If they are equivalent, returns 0. :param one: list of ints to compare to two :param two: list of ints to compare to one :returns: code in (0, 1, 2) :rtype: int """ # we've exhausted all three levels of comparison, so logically we're at equivalence. if len(one)==0: return 0 top1 = one[0] top2 = two[0] if top1 > top2: return 1 elif top1 < top2: return 2 else: return rec_cmp_releases(one[1:], two[1:])
01632c265982107e2836e7b34b9acc5664f01c2e
528,527
def tokenize_mutation_seq(seq, placeholder_token='_'): """Converts a variable-length mutation sequence to a fixed-length sequence. For an N-residue reference sequence, the encoding is shape (N+1, M, A), where A is the alphabet size (e.g., A=20 for the canonical peptide alphabet) and M is the number of distinct mutation types at each position; here, M=2 (1x sub + 1x ins at each reference sequence position). Args: seq: (str) A mutation sequence to tokenize; e.g., "__A_" or "aTEST". placeholder_token: (str) Sentinel value used to encode non-mutated positions in the mutation sequence. Returns: A length-N+1 sequence of ("<substitution_token>", "<insertion token>") 2-tuples. """ tokens = [] i = 0 # Consume the prefix insertion mutation if there is one. # A prefix insertion is denoted by a leading lower case letter on the seq. if seq[i].islower(): tokens.append((placeholder_token, seq[i].upper())) i += 1 else: tokens.append((placeholder_token, placeholder_token)) while i < len(seq): if i < len(seq) - 1 and seq[i + 1].islower(): tokens.append((seq[i], seq[i+1].upper())) i += 2 else: tokens.append((seq[i], placeholder_token)) i += 1 return tokens
a5bcb1e5d66d5a22f3ed2a5ed2371f73583f9d64
192,110
def prod(l): """ l: iterable of numbers. returns: product of all numbers in l """ p = 1 for i in l: p *= i return p
0575ddd33f67e416ab96c26d9f010756b607261d
668,941
def lv1_consts(key=None): """ defines consts used while reading Sciamachy level 0 data """ consts = {} consts['mds_size'] = 1247 consts['max_clusters'] = 64 consts['uvn_channels'] = 5 consts['swir_channels'] = 3 consts['all_channels'] = consts['uvn_channels'] + consts['swir_channels'] consts['channel_pixels'] = 1024 consts['all_pixels'] = consts['all_channels'] * consts['channel_pixels'] consts['num_pmd'] = 7 consts['num_frac_polv'] = 12 consts['num_spec_coeffs'] = 5 if key is None: return consts if key in consts: return consts[key] raise KeyError('level 1b constant {} is not defined'.format(key))
763ed98b2a136aaac406a9b2872c89ab65c35c8f
346,903
def getDistance(interval_a, interval_b): """Returns the distance between two intervals""" return max(interval_a[0] - interval_b[1], interval_b[0] - interval_a[1])
16fc181560ec01e5bddb7da6fbb911b242126112
51,063
def align(offset): """Align an offset to a multiple of 4 Args: offset (int): Offset to align Returns: int: Resulting aligned offset (rounds up to nearest multiple) """ return (offset + 3) & ~3
4ab72bc7fc86b1b61ec0bf4033252d39c0fbaaff
164,026
def date_formatter(date_val): """ Auto formats date values (month, day, hour, minute) to two digits. This is needed for proper date formatting by Plotly. i.e. the month of March is represented in the database as and integer of 3. This needs to be changed to a string of '03' to be plotted properly. :param date_val: Date/time value (month, day, hour, minute, second) :type date_val: int """ if date_val < 10: date_val = '{}{}'.format('0', str(date_val)) return date_val elif date_val >= 10: return str(date_val) else: print('Improper date value formatting') print(date_val)
d1ae0495d018c3ef0981c97e0e32201216569d7d
198,265
def get_param_groups(net, weight_decay, norm_suffix='weight_g', verbose=False): """Get two parameter groups from `net`: One named "normalized" which will override the optimizer with `weight_decay`, and one named "unnormalized" which will inherit all hyperparameters from the optimizer. Args: net (torch.nn.Module): Network to get parameters from weight_decay (float): Weight decay to apply to normalized weights. norm_suffix (str): Suffix to select weights that should be normalized. For WeightNorm, using 'weight_g' normalizes the scale variables. verbose (bool): Print out number of normalized and unnormalized parameters. """ norm_params = [] unnorm_params = [] for n, p in net.named_parameters(): if n.endswith(norm_suffix): norm_params.append(p) else: unnorm_params.append(p) param_groups = [{'name': 'normalized', 'params': norm_params, 'weight_decay': weight_decay}, {'name': 'unnormalized', 'params': unnorm_params}] if verbose: print('{} normalized parameters'.format(len(norm_params))) print('{} unnormalized parameters'.format(len(unnorm_params))) return param_groups
54cee9a60836a62ebfe7e88336ed1f8915e1af09
400,886