content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def flatten_json(nested_dict, keep_nested_name=True): """Turns a nested dictionary into a flattened dictionary. Designed to facilitate the populating of Config.Part tables with the corresponding config json list of parameters from the Config master table. Args: nested_dict: dict Nested dictionary to be flattened keep_nested_name: boolean, default True If True, record names will consist of all nested names separated by '_'. If False, last record name is chosen as new recod name. This is only possible for unique record names. Returns: dict Flattened dictionary Raises: ValueError: Multiple entries with identical names """ out = {} def flatten(x, name=""): if isinstance(x, dict): for key, value in x.items(): flatten(value, (name if keep_nested_name else "") + key + "_") else: if name[:-1] in out: raise ValueError("Multiple entries with identical names") out[name[:-1]] = x flatten(nested_dict) return out
78f2886ff45a40690b2b3ed031736862823837d6
221,709
def space_join(conllu,sentence_wise=False): """Takes conllu input and returns: All tokens separated by space (if sentence_wise is False), OR A list of sentences, each a space separated string of tokens """ lines = conllu.replace("\r","").strip().split("\n") lines.append("") # Ensure last blank just_text = [] sentences = [] length = 0 for line in lines: if "\t" in line: fields = line.split("\t") if "." in fields[0]: # ellipsis tokens continue if "-" in fields[0]: # need to get super token and ignore next n tokens just_text.append(fields[1]) start, end = fields[0].split("-") start = int(start) end = int(end) length = end-start+1 else: if length > 0: length -= 1 continue just_text.append(fields[1]) elif len(line.strip())==0 and sentence_wise: # New sentence sent_text = " ".join(just_text) sentences.append(sent_text) just_text = [] if sentence_wise: return sentences else: text = " ".join(just_text) return text
e3e752906b57090f56c2678031f295c5a8e66f29
41,669
def get_desc(order_id): """ :param order_id: order id :return: description string """ return 'Pay for order #: %s' % order_id
6d8084a14c61a8ea9cb7adfdf99824edbf2e97fc
360,549
def stringify_query_param(query_params): """ Using this to append key value to url :param query_params: :return: string in form of key=value&key1=value1 """ params = [] for key, value in query_params.items(): params.append(str(key) + "=" + str(value)) return "&".join(params)
66d265aff310d4f05396e042cdc2d2f6a1eee9de
406,797
import random def get_perspective_params(img, distortion_scale): """Helper function to get parameters for RandomPerspective. """ img_width, img_height = img.size distorted_half_width = int(img_width / 2 * distortion_scale) distorted_half_height = int(img_height / 2 * distortion_scale) top_left = (random.randint(0, distorted_half_width), random.randint(0, distorted_half_height)) top_right = (random.randint(img_width - distorted_half_width - 1, img_width - 1), random.randint(0, distorted_half_height)) bottom_right = (random.randint(img_width - distorted_half_width - 1, img_width - 1), random.randint(img_height - distorted_half_height - 1, img_height - 1)) bottom_left = (random.randint(0, distorted_half_width), random.randint(img_height - distorted_half_height - 1, img_height - 1)) start_points = [(0, 0), (img_width - 1, 0), (img_width - 1, img_height - 1), (0, img_height - 1)] end_points = [top_left, top_right, bottom_right, bottom_left] return start_points, end_points
49528cc8400c6ef7ad4a4cc8b9efd38c64ecf2cb
87,390
from typing import List from typing import Tuple from typing import Any import configparser def config_items(path: str) -> List[Tuple[Tuple[str, str], Any]]: """Return config file option, value pairs.""" config = configparser.ConfigParser() config.read(path) res = [] for section in config.sections(): for opt, val in config.items(section): res.append(((section, opt), val)) return res
0c7b8cc1b5bafe0aea4552b3f6a488dbe84bde2e
62,296
def legendre(a, m): """ This function returns the Legendre symbol (a/m). If m is an odd composite then this is the Jacobi symbol. """ a = a % m symbol = 1 while a != 0: while a & 1 == 0: a >>= 1 if m & 7 == 3 or m & 7 == 5: symbol = -symbol a, m = m, a if a & 3 == 3 and m & 3 == 3: symbol = -symbol a = a % m if m == 1: return symbol return 0
45ee0938fafab5c32e8267b087d09459300229b7
105,611
def clamp(n, lower, upper): """ Restricts the given number to a lower and upper bound (inclusive) :param n: input number :param lower: lower bound (inclusive) :param upper: upper bound (inclusive) :return: clamped number """ if lower > upper: lower, upper = upper, lower return max(min(upper, n), lower)
9e73b0662ba0f29f0d23c8621e4e8da274b6569c
77,178
from typing import Optional from typing import Tuple def _get_validation_scope( has_valid_abbrev: bool, blacklist_policy: Optional[str], ) -> Tuple[str, str]: """Determine the validationScope for DST and abbreviations. Returns tuple of the C++ ValidationScope and a human-readable comment. """ if not has_valid_abbrev: return 'ValidationScope::kNone', ' INVALID' if not blacklist_policy: return 'ValidationScope::kAll', '' if blacklist_policy == 'partial': return 'ValidationScope::kExternal', ' BLACKLISTED' if blacklist_policy == 'full': return 'ValidationScope::kNone', ' BLACKLISTED' raise Exception(f"Unrecognized blacklist policy '{blacklist_policy}'")
8afc0f90e0179d640c0ec80434fad0170257d911
630,756
from typing import Dict from typing import Set def filter_least_used_disks(disk_to_copy_processes_count: Dict[str, int]) -> Set[str]: """Filters for the least used disk Parameters ---------- disk_to_lockfile_count : Dict[str, int] Dictionary of disks with lockfile count Returns ------- available_dirs : Set[str] Available directories with minimal lockfile count. In case of equal lockfile count then the disks are given randomly. """ minimum_number_of_lockfiles = min(disk_to_copy_processes_count.values()) available_target_dirpaths = { dirpath for dirpath in disk_to_copy_processes_count if disk_to_copy_processes_count[dirpath] == minimum_number_of_lockfiles } return available_target_dirpaths
c52e55837cd0cbd8216441e0b2d9e14e7d0862b8
109,245
def is_autosomal(chrom): """Keep chromosomes that are a digit 1-22, or chr prefixed digit chr1-chr22 """ try: int(chrom) return True except ValueError: try: int(str(chrom.replace("chr", ""))) return True except ValueError: return False
3855b982ea205a210912aa4b478898e3ec4d9388
294,549
def q(s): """ Quote the given string """ return "'" + str(s) + "'"
0c0a1477e740b430d5e6997c0115ef317457526c
14,958
def standardize_sample_duration(sample_duration): """ sample_duration: a string description of duration at which devices sample and measure pollutants returns a standardized string - '24-hour' or '8-hour' or '1-hour' """ durations = ['24','8','1'] for duration in durations: if duration in sample_duration: return duration+'-hour' return 'unknown'
88b8ac0ba85af457816b547a120ee4efd673902e
194,444
def _initial_GRASP_candidates(target,primes,forbidden): """Helper function for GRASP driver search. Constructs initial candidates for driver nodes. Parameters ---------- target : partial state dictionary pyboolnet implicant that defines target fixed node states. primes : pyboolnet primes dictionary Update rules. forbidden : set of str variable names Variables to be considered uncontrollable (the default is None). Returns ------- candidates : list of partial state dictionaries List of variable states that can potentially lead to the target. """ if forbidden is None: candidate_vars = list(primes.keys()) else: candidate_vars = [k for k in primes if not k in forbidden] candidates = [] for st in [0,1]: candidates += [{k:st} for k in candidate_vars] return candidates
79971aa46e6b661c92b0686bf5744172f18f1dd2
265,912
def split_stable_id(stable_id): """ Split stable id, returning: * Document (root) stable ID * Context polymorphic type * Character offset start, end *relative to document start* Returns tuple of four values. """ split1 = stable_id.split('::') if len(split1) == 2: split2 = split1[1].split(':') if len(split2) == 3: return split1[0], split2[0], int(split2[1]), int(split2[2]) raise ValueError("Malformed stable_id:", stable_id)
7b0d38edf8943bb9f880472587d607dcfeb6fe58
314,250
def _get_image_repo(image: str) -> str: """Extracts image name before ':' which is REPO part of the image name.""" image_fields = image.split(':') if len(image_fields) > 2: raise ValueError(f'Too many ":" in the image name: {image}') return image_fields[0]
0a49bd47dd1b0018129a4795d243c259f1eb00e4
477,582
def get_simple_row_info(row): """ Get simple minddata pipeline row information. Args: row (list[str, int, float]): The minddata pipeline row information. Returns: list[str, int, float], the simple minddata pipeline row information. """ simple_info = row[0:2] simple_info.extend(row[4:]) return simple_info
4be4af5b568878c1bc2b28ea5529a50725c7a82e
428,886
def muon_filter(image, thr_low = 0, thr_up = 1.e10): """ Tag muon with a double threshold on the image photoelectron size Default values apply no tagging Paramenters --------- image: `np.ndarray` number of photoelectrons in each pixel thr_low: `float` lower size threshold in photoelectrons thr_up: `float` upper size threshold in photoelectrons Returns --------- `bool` it determines whether a muon was tagged or not """ return image.sum() > thr_low and image.sum() < thr_up
240b43cdb239e3d54e0072829c0f6d7e8aea120f
460,603
def Join(iterable, separator=''): """ iterable >> Join(separator='') Same as Python's sep.join(iterable). Concatenates the elements in the iterable to a string using the given separator. In addition to Python's sep.join(iterable) it also automatically converts elements to strings. :param iterable iterable: Any iterable :param string separator: Seperator string between elements. :return: String of with concatenated elements of iterable. :rtype: str """ return separator.join(map(str, iterable))
01595a2e77e7bc990d47c2dd062797ee920124bc
384,713
def _collect_duplicates(data_list): """Collects duplicate items from a list and returns them. :param data_list: A list of items to check for duplicates. The list may include dict items. :returns: A set of items that are duplicates in data_list. If no duplicates are found, the returned set is empty. """ seen = [] dups = set() for datum in data_list: if datum in seen: dups.add(datum) continue seen.append(datum) return dups
f98c12fde7aec5a2a2893efb3bf4ae307d5c8e43
325,178
import base64 def base64url_decode(data: bytes) -> bytes: """ Decodes a URL Safe Base64 encoded bytes string into its original contents. :param data: Data to be decoded. :type data: bytes :return: Original contents of the provided encoded data. :rtype: bytes """ data += b"=" * (len(data) % 4) return base64.urlsafe_b64decode(data)
b5b98aa01005ce0594d52ebaf3adeab46759ba71
585,390
def skip_some_objects(app, what, name, obj, skip, options): """Exclude some objects from the documentation""" if getattr(obj, '__module__', None) == 'collections': return True
832c545997215f30fd735511f68c97629b9187b0
455,878
def count_nonref_reads(record_sample): """Count the number of reads supporting all non-reference alleles""" allelic_depths = record_sample['AD'] try: nonref_reads = sum(allelic_depths[1:]) except TypeError: # Occurs when AD is a single value, not a list nonref_reads = 0 return(nonref_reads)
79f628306e04078a3ea5f6957630883dbc918e9a
120,062
from typing import List import itertools def split_items(items: List, sizes: List[int]) -> List: """Split the items into a list each with length as specified in sizes len(split_items) = len(sizes) len(split_items[i]) = sizes[i] Args: items (List): Flat list of items to be split sizes (List[int]): Sizes of elements in the split list Returns: List: Split list """ it = iter(items) return [list(itertools.islice(it, 0, size)) for size in sizes]
52ffca97467d48b6e524862937518138a5502664
559,028
def _get_piecewise_val(knots, t): """ Based on the knots specified for a piecewise linear function and a point in the domain 't', return the value of the piecewise linear function. knots: dictionary where keys and values should all be numeric. t: numeric within the domain specified by the knots. returns: float """ knots = {i: knots[i] for i in sorted(knots.keys())} knot_times = list(knots.keys()) knot_vals = list(knots.values()) if t < knot_times[0] or t > knot_times[-1]: raise ValueError(f"Cannot determine piecewise value for t={t}") s = [] for i in range(1, len(knot_times)): s.append((knot_vals[i] - knot_vals[i-1]) / (knot_times[i] - knot_times[i-1])) j = 0 while knot_times[j+1] < t: j += 1 return knot_vals[j] + s[j]*(t - knot_times[j])
68d969247d324b5116ca5854a91fde4c06ef0259
268,114
def dm_smear(dm, BW, center_freq): """ dm_smear(dm, BW, center_freq): Return the smearing in sec caused by a 'dm' over a bandwidth of 'BW' MHz centered at 'center_freq' MHz. """ return dm * BW / (0.0001205 * center_freq * center_freq * center_freq)
28a8d91137e5a15205eeed95498a7aac5b770f36
566,212
def get_object(model, session, *args, **kwargs): """ Use get() to return an object, return None if object does not exist. """ try: model_object = session.query(model).filter(*args, **kwargs).first() return model_object except Exception as e: return None
c717781a04a5401e883860458bd309c61ad2e3a8
572,207
def get_actual_source_freqs(messages_dets, expected_source_freqs): """ Check the message sources are as expected. Note - we don't have to know what messages generated from helpers in other modules will do - just what we expect from this module. So we don't specify what sources we expect - just those that we require (and how often) and those we ban (we expect those 0 times). Note - exclude system-generated messages e.g. a message fails to run so we get a message all right but it is a message reporting the problem. Don't count those ;-). :param list messages_dets: list of MessageDets named tuples :param dict expected_source_freqs: keys are sources (strings) and values are integers. The integer should be set to 0 if we want to explicitly ban a source i.e. we do not expect it provide a message. E.g. if our list does not have mixed data types we do not expect a message saying there are. :return: whether it is as expected or not :rtype: bool """ overall_snippet_messages_dets, block_level_messages_dets = messages_dets all_messages_dets = ( overall_snippet_messages_dets + block_level_messages_dets) actual_source_freqs = {source: 0 for source in expected_source_freqs} for message_dets in all_messages_dets: if message_dets.source in expected_source_freqs: actual_source_freqs[message_dets.source] += 1 ## if we track any sources not in the expected list the dicts will vary even if the results for the tracked sources are exactly as expected and we'll fail the test when we shouldn't) return actual_source_freqs
3bf2bc50e4983af378f734fb71a2a2494b174de4
645,337
def dms2deg(valin): """ Converts DMS input to decimal degrees. Input can be either a string delimeted by : or spaces, or a list of [D,M,S] numbers. Parameters ---------- valin: float Input value in DMS. Can be either: \n - a string delimeted by : or spaces \n - a list of [D,M,S] numbers (floats or ints) \n Returns ------- valout : float Degrees corresponding to the DMS value Examples -------- # e.g., '-78:12:34.56' corresponds to -77.7904 deg \n obs.dms2deg('-78:12:34.56') #--> -77.79039999999999 \n obs.dms2deg('-78 12 34.56') #--> -77.79039999999999 \n obs.dms2deg([-78,12,34.56]) #--> -77.79039999999999 """ if type(valin)==str: if ':' in valin: ra=[float(val) for val in valin.split(':')] else: ra=[float(val) for val in valin.split(' ')] else: ra=valin valout=ra[0]+ra[1]/60.+ra[2]/3600. return valout
3efac9d11c8a7b5933766a0610f49a884e20925b
11,101
import torch def generate_linear_data(n=1000): """ Generates an example dataset that can be seperated linearly Output: inputs : nx2 dimension FloatTensor targets : nx1 dimension LongTensor with range [0,1] """ torch.manual_seed(123) inputs = torch.rand(n,2) targets = torch.sum(inputs, dim=1).sub(0.9).sign().sub(1).div(2).abs().long().view(-1, 1) return inputs, targets
c3cc6167afc75b848f5e9c416609daf3854a0ff5
626,918
import unicodedata def to_ascii(s): """ Translates the string or bytes input into an ascii string with the accents stripped off. """ if isinstance(s, bytes): s = s.decode('utf-8') return ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
fb86e21b66a1abd9acd144cbcb596a2d8835b748
683,927
def reprify(obj, fields): """ Returns a string suitable as a ``__repr__`` for ``obj`` that includes the fields in the list ``fields`` """ return '{0.__module__}.{0.__name__}({1})'.format( type(obj), ', '.join('{}={!r}'.format(f, getattr(obj, f)) for f in fields), )
96946859d6d59211248c56c5d7d629fcf270464b
369,151
import warnings def deepmerge(a, b, path=None, overwrite=True): """Merges dict b into dict a overwrite : bool Overwrites value in a with value in b if True with a warning, else raises Exception Based on: https://stackoverflow.com/questions/7204805/how-to-merge-dictionaries-of-dictionaries/7205107#7205107 """ if path is None: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): deepmerge(a[key], b[key], path + [str(key)]) elif a[key] == b[key]: pass # same leaf value else: if overwrite: warnings.warn( "Overwrote value " + str(a[key]) + " with " + str(b[key]) + " while merging dicts." ) a[key] = b[key] else: raise Exception( "Conflict at %s" % ".".join(path + [str(key)]) + str(a[key]) + " conflicts with " + str(b[key]) + " while merging dicts." ) else: a[key] = b[key] return a
b95114d22b00862ed11e1acf375bf63a4359ba78
249,917
import click def get_stake_amount() -> float: """Retrieve desired stake amount from user Each stake is 10 TRB on TellorFlex Polygon. If an address is not staked for any reason, the PolygonReporter will attempt to stake. Number of stakes determines the reporter lock: reporter_lock = 12hrs / N * stakes Retrieves desidred stake amount from user input.""" msg = "Enter amount TRB to stake if unstaked:" stake = click.prompt(msg, type=float, default=10.0, show_default=True) assert isinstance(stake, float) assert stake >= 10.0 return stake
522f10386ea2ecfe4a15c40105a1517738be0a2d
517,907
def extract_key(key_shape, item): """ construct a key according to key_shape for building an index usage:: key_shape = "foo", "bar" item = {"baz": 1, "bar": 2, "foo": 3} extract_key(key_shape, item) -> {"foo": 3, "bar": 2} """ return {field: item[field] for field in key_shape}
8f0b28184c9d35beef23b669aafea66df19eaf09
236,782
import pickle def load_sim_data(filename): """ Load pickled dataset and return as list. """ with open(filename, 'rb') as f: done = False data_list = [] while not done: try: data = pickle.load(f) data_list.append(data) except EOFError as err: done = True return data_list
628e8ae3523b91a306a5e6b556790625feb6637e
100,646
def new_item(strict=True): """Item builder. Return a dictionary from the provided template. Item keys are updated from **args key-value pairs. ---------- Parameters **args : Key-value pairs. ---------- Return Return a dictionary with the predefined keys. """ secret = { 'label': '', 'auto': 'True', 'length': '', 'letters': '', 'symbols': '', 'numbers': '', 'password': ''} item_template = { 'name': '', # new name 'url': '', # Check valid url 'login': '', # Any string 'email': '', # @-mail 'description': '', # Any string 'tag': '', # Any string 'color': '', # Basic colors as string 'created': '', # Date 'changed': '', # Date # List of secrets 'secrets': [dict(secret), dict(secret), dict(secret)], 'history': '' # Record history - not yet managed } return dict(item_template)
61af4e64e155e99b59e4a537d82fadfa0b782f91
253,462
import ast def update_function_name(func_node, name): """ Update name of function AST :param func_node: instance of `ast.FunctionDef` :param name: New name of function :return: New function node with new name """ return ast.FunctionDef( name=name, args=func_node.args, body=func_node.body, decorator_list=func_node.decorator_list if hasattr(func_node, 'decorator_list') else [] )
0721a44de9a36f113323e5d3367b263066b89d49
169,931
def is_palindrome(s): """ Input: s, a string Returns True if s is a palindrome, False otherwise """ def to_chars(s): s = s.lower() ans = '' for char in s: if char in 'abcdefghijklmnopqrstuvwxyz': ans = ans + char return ans def is_pal(s): if len(s) <= 1: return True else: return s[0] == s[-1] and is_pal(s[1:-1]) return is_pal(to_chars(s))
a956ee66f20d57eb58dae99c7108739b84bf313d
692,663
def xyz_datashape(al, xyz): """Get datashape from axislabels and bounds.""" x, X, y, Y, z, Z = xyz if al == 'zyx': datalayout = (Z - z, Y - y, X - x) elif al == 'zxy': datalayout = (Z - z, X - x, Y - y) elif al == 'yzx': datalayout = (Y - y, Z - z, X - x) elif al == 'yxz': datalayout = (Y - y, X - x, Z - z) elif al == 'xzy': datalayout = (X - x, Z - z, Y - y) else: datalayout = (X - x, Y - y, Z - z) return datalayout
8215b7f5341a7f7e4d70c57c4641abfbf9f0e941
678,814
from functools import reduce def attrgetter(item, default=''): """operator.attrgetter with a default value.""" reducer = lambda obj, name: getattr(obj, name, default) return lambda obj: reduce(reducer, item.split('.'), obj)
b95b24b9333e0e0adadec4cfc9862ee574e1c017
39,426
def parse_concepts(filename): """Takes a markdown file with with a certain structure and parses it to separate the concept and the relations between the concepts. Structure: # [Title] ## [Concept] [Some text] [Even Latex math] ### [Any subtitle] ### Utiliza: - [Related concept 1] - [Related concept 2] ## [Concept] ... The functions returns an array of dicts and a string with the Title. Each dictionary correspond to a concept and has keys for: id -> The position of the dict in the array. This is useful to build the network. name -> The title of the concept. What appears as [Concept] in the structure. uses -> Array of the indexes of the concepts in the "Utiliza:" section. content -> All of the plain text beetween the Concept title and the "Utiliza:" section. """ # Open the markdown file with open(filename, "r") as file: text = file.read() # Create list of concepts and save title Concepts = [] index = 0 sections = text.split("\n## ") Title = sections[0].strip("# ").strip("\n") for con in sections[1:]: concept = {} lines = [i for i in con.split("\n") if i != ""] concept["id"] = index concept["name"] = lines[0] try: end_index = lines.index("### Utiliza:") concept["uses"] = [line.strip("- ") for line in lines[end_index+1:]] except: concept["uses"] = [] end_index = len(lines) concept["content"] = "\n".join(lines[1:end_index]) concept["content"] = "##"+concept["name"]+ "\n" + concept["content"] Concepts.append(concept) index += 1 # Update relative indexes for con in Concepts: uses_index = [] for i in Concepts: if i["name"] in con["uses"]: uses_index.append(i["id"]) con["uses"] = uses_index return Concepts, Title
133d3df181f13f925a14e0843e911f16c4fe4d88
332,139
def edge_check(p,start,end): """ This function checks if the coordinates of a point lies at the edge of a grid. It returns a list of boolean values.""" check=[] for i in p: check.append((i-end)==0 or (i-start)==0) return check
21b9ec594dbeeba48351445982a7baf476038e57
214,327
def lv_unpack(txt): """ Deserializes a string of the length:value format :param txt: The input string :return: a list og values """ txt = txt.strip() res = [] while txt: l, v = txt.split(":", 1) res.append(v[: int(l)]) txt = v[int(l):] return res
e0390bb200515a595e7f177404fdfa44a11b1c7f
687,292
import hmac import hashlib def hmac_sha512(key: bytes, data: bytes) -> bytes: """ Return the SHA512 HMAC for the byte sequence ``data`` generated with the secret key ``key``. Corresponds directly to the "HMAC-SHA512(Key = ..., Data = ...)" function in BIP32 (https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#conventions). :param key: The secret key used for HMAC calculation. :param data: The data for which an HMAC should be calculated. :return: A byte sequence containing the HMAC of ``data`` generated with the secret key ``key``. """ h = hmac.new(key, data, hashlib.sha512) return h.digest()
64850ea2d5e921138d8e0ebc2d021f8eaf5a7357
706,282
import struct import socket def ipStringToInt(ip_string): """Convert string formatted IP to IP int""" return struct.unpack('!L',socket.inet_aton(ip_string))[0]
8e212ec27908cc005df8fed972d262ee9807e90a
241,827
import copy import gc def get_data(img): """Get the data in the image without having a side effect on the Nifti1Image object Parameters ---------- img: Nifti1Image Returns ------- np.ndarray """ if hasattr(img, '_data_cache') and img._data_cache is None: # Copy locally the nifti_image to avoid the side effect of data # loading img = copy.deepcopy(img) # force garbage collector gc.collect() return img.get_data()
6eb70b2cec2f2e1a7caf1204f64258128a038e36
204,866
import hashlib def get_hash(filename, hash_type="sha256"): """ get a files hash checksum """ h = hashlib.new(hash_type) with open(filename, "rb") as f: h.update(f.read()) return h.hexdigest()
5f6a3d8c8041bd154977a3e4946eb06658232066
481,516
from typing import List import re def validator(arr: List[str]) -> List[str]: """ >>> validator(['4123456789123456', '5123-4567-8912-3456', ... '61234-567-8912-3456', '4123356789123456', ... '5133-3367-8912-3456', '5123 - 3567 - 8912 - 3456']) ['Valid', 'Valid', 'Invalid', 'Valid', 'Invalid', 'Invalid'] """ pattern = re.compile(r"^(?!.*(\d)(-?\1){3})[456]\d{3}(?:-?\d{4}){3}$") return ["Valid" if pattern.match(row.strip()) else "Invalid" for row in arr]
081bcf6cd0ee0dbd7b352ee5acf0d6574fa26b9e
365,332
from typing import List from typing import Tuple def bio_to_spans(text: List[str], tags: List[str]) -> List[Tuple[int, int, str]]: """ Convert BIO tagged list of strings into span starts and ends Args: text: list of words tags: list of tags Returns: tuple: list of start, end and tag of detected spans """ pointer = 0 starts = [] for ( i, t, ) in enumerate(tags): if t.startswith("B-"): starts.append((i, pointer)) pointer += len(text[i]) + 1 spans = [] for s_i, s_char in starts: label_str = tags[s_i][2:] e = 0 e_char = len(text[s_i + e]) while len(tags) > s_i + e + 1 and tags[s_i + e + 1].startswith("I-"): e += 1 e_char += 1 + len(text[s_i + e]) spans.append((s_char, s_char + e_char, label_str)) return spans
894db8e3a6189d9e0a168eca683ded67e1aa9968
494,621
def isiterable(o): """ Return True if the given object supports having iter() called on it BUT is not an instance of string. Otherwise return False. """ if isinstance(o, (str, bytes)): return False try: iter(o) except TypeError: return False else: return True
6577933baf41218364d8f47fec8b4e3c5854c6da
616,854
def get_label_filter(input_labels): """ Allow labels input to be formatted like: -lb key1:value -lb key2:value AND -lb key1:value,key2:value Output: key1:value,key2:value :param list(str) input_labels: list of labels, like, ['key1:value1', 'key2:value2'] or ['key1:value1,key2:value2'] :return str: labels formatted, like, 'key1:value1,key2:value2' """ if input_labels is None: return [] label_filter = [] for label in input_labels: sub_labels = [sub_label.strip() for sub_label in label.split(",")] label_filter.extend(sub_labels) return ",".join(label_filter)
787ce61c130432065c7d0f50e06fe1f3903dfd59
640,041
def ensure_list(val): """Converts the argument to a list, wrapping in [] if needed""" return val if isinstance(val, list) else [val]
10ec23fac23e27defbf5a9325f3c863f8c0b6d50
656,142
def apply_local_threshold(latency_ms, server_descriptions): """All servers with round trip times within latency_ms of the fastest one. No ServerDescription's round_trip_time can be None. The `server_descriptions` passed to this function should have non-readable servers (e.g. RSGhost, RSArbiter, Unknown) filtered out (e.g. by readable_server_selector or secondary_server_selector) first. """ if not server_descriptions: # Avoid ValueError from min() with empty sequence. return [] # round_trip_time is in seconds. if any(s for s in server_descriptions if s.round_trip_time is None): raise ValueError("Not all servers' round trip times are known") fastest = min(s.round_trip_time for s in server_descriptions) return [ s for s in server_descriptions if (s.round_trip_time - fastest) <= latency_ms / 1000.]
10926d058e4e96725b1571f4cf56872c7ba76619
406,593
from typing import List def quicksort(items: List[int]) -> List[int]: """Tony Hoare's algorithm. Returns a new list with sorted items. For an empty list it returns an empty list. >>> quicksort([1,8,3,5]) [1, 3, 5, 8] >>> quicksort([6,4,9,3,2]) [2, 3, 4, 6, 9] >>> quicksort([2,2,2,-3]) [-3, 2, 2, 2] >>> quicksort([1]) [1] >>> quicksort([]) [] """ if not items: return [] if len(items) == 1: return items else: anchor = items[0] lower_values = [] higher_values = [] equal_values = [] for elem in items: if elem < anchor: lower_values.append(elem) elif elem > anchor: higher_values.append(elem) else: # elem == anchor equal_values.append(elem) return quicksort(lower_values) + equal_values + quicksort(higher_values)
f7aad895a249af5a793140c25ed9f48d4c2dce6f
284,668
def _raw_to_int(raw_data): """Converting list of raw hex values as strings to integers.""" return [int(x, 16) for x in raw_data]
e8ae4784e142bcfa3ba8d7b013871986a1b5173a
45,553
def indiscriminate_time(r, tau, N_r): """ Expected time spent in park by an indiscriminate poacher Parameters ---------- r: float between 0 and 1 - the proportion of horns devalued tau: float - the time taken to kill a horn N_r: int - number of devalued rhinos that correspond to a single valued rhino """ return tau * (1 - r ** N_r) / (1 - r)
b5c45ef1f55c2b5b3942dae3d80f925a82314e31
608,174
def contains(seq, value): """ Description ---------- Checks to see if a value is in the sequence or dictionary. Parameters ---------- seq : (list or tuple or set or dict or string) - sequence/dictionary to search in\n value : any - value to search for Returns ---------- bool - True (value found), False (value not found) Examples ---------- >>> lst = [1, 2, 3, 4, 5] >>> contains(lst, 4) -> True >>> contains(lst, 10) -> False """ if isinstance(seq, dict): return value in seq.keys() or value in seq.values() return value in seq
ca408206626d230ac5c59157f9012046071fd164
679,140
def port_hash(name): """ Given a string, returns a port number between 49152 and 65535 This range (of 2**14 posibilities) is the range for dynamic and/or private ports (ephemeral ports) specified by iana.org. The algorithm is deterministic. """ fac = 0xd2d84a61 val = 0 for c in name: val += (val >> 3) + (ord(c) * fac) val += (val >> 3) + (len(name) * fac) return 49152 + (val % 2**14)
f0aee6f45014d191df985b6d1fcf8cf90cada488
438,336
def get_item(index, my_list): """ This function will get an item from a list using the index of the item :param index: Integer :param my_list: List :return: String """ return my_list[index]
1d05e6135dab65935d68d8683b7edd29b742e061
252,777
def get_unique_item(lst): """ For a list, return a set of the unique items in the list. """ return list(set(lst))
21e608037d264870de23ef8bb1434fa47914ee29
393,746
import math def get_confs_per_split(batch, num_confs, sub_batch_size): """ Get the number of conformers per sub-batch. Args: batch (dict): batched sample of species num_confs (int): number of conformers in the species sub_batch_size (int): maximum number of conformers per sub-batch. Returns: confs_per_split (list[int]): number of conformers in each sub-batch. """ val_len = len(batch["nxyz"]) inherent_val_len = val_len // num_confs split_list = [sub_batch_size * inherent_val_len] * math.floor( num_confs / sub_batch_size) # if there's a remainder if sum(split_list) != val_len: split_list.append(val_len - sum(split_list)) confs_per_split = [i // inherent_val_len for i in split_list] return confs_per_split
e8fbba0b0f0f7a7ba30c902506bc3392a5f0474d
212,701
def handler_Tag(obj, H, hrepr): """ Returns the default representation for a tag, which is the tag itself. """ return obj
9ffdef62e3f8c5a5ea87bd11208dd7e57f53035b
401,892
def get_pull_test_images_steps(test_image_suffix): """Returns steps to pull testing versions of base-images and tag them so that they are used in builds.""" images = [ 'gcr.io/oss-fuzz-base/base-builder', 'gcr.io/oss-fuzz-base/base-builder-swift', 'gcr.io/oss-fuzz-base/base-builder-jvm', 'gcr.io/oss-fuzz-base/base-builder-go', 'gcr.io/oss-fuzz-base/base-builder-python', 'gcr.io/oss-fuzz-base/base-builder-rust', ] steps = [] for image in images: test_image = image + '-' + test_image_suffix steps.append({ 'name': 'gcr.io/cloud-builders/docker', 'args': [ 'pull', test_image, ], 'waitFor': '-' # Start this immediately, don't wait for previous step. }) # This step is hacky but gives us great flexibility. OSS-Fuzz has hardcoded # references to gcr.io/oss-fuzz-base/base-builder (in dockerfiles, for # example) and gcr.io/oss-fuzz-base-runner (in this build code). But the # testing versions of those images are called e.g. # gcr.io/oss-fuzz-base/base-builder-testing and # gcr.io/oss-fuzz-base/base-runner-testing. How can we get the build to use # the testing images instead of the real ones? By doing this step: tagging # the test image with the non-test version, so that the test version is used # instead of pulling the real one. steps.append({ 'name': 'gcr.io/cloud-builders/docker', 'args': ['tag', test_image, image], }) return steps
caf1846e51618844fa5c55ae988933a80e626952
403,328
import decimal def RichardsonExtrapolation(fh, fhn, n, k): """Compute the Richardson extrapolation based on two approximations of order k where the finite difference parameter h is used in fh and h/n in fhn. Inputs: fh: Approximation using h fhn: Approximation using h/n n: divisor of h k: original order of approximation Returns: Richardson estimate of order k+1""" n = decimal.Decimal(n) k = decimal.Decimal(k) numerator = decimal.Decimal(n**k * decimal.Decimal(fhn) - decimal.Decimal(fh)) denominator = decimal.Decimal(n**k - decimal.Decimal(1.0)) return float(numerator/denominator)
899514b887020980a3bceb4a0dcfd0abcffd1063
15,268
def get_gpu_str(devices): """Produce string of GPUs given the list of int Args: devices (list/tupple): list of device numbers Return: string of device numbers, comma separated """ gpu_str = '' for gpu in devices: gpu_str += str(gpu) + ',' return gpu_str[:-1]
cbb1e7df1fc71b4aab40fbc290766d72f056b7da
372,573
def get_output_keys(connections): """Return a list of output keys for the given connection block. """ keys = list() for tfk in connections.transforms_functions: for d in range(tfk.transform.shape[0]): keys.append(tfk.keyspace.key(d=d)) return keys
db848bf4749fe0dd10ac2cca44226d794536e5e6
424,709
import math def chute_velocity(mass, diameter, drag=0.75, gravity=9.8, air_density=1.22): """ Determine the velocity of the rocket when it hits the earth given the mass of the rocket and the diameter of the chute. mass: mass of rocket in kg diameter: diameter of the chute in meters drag: drag coefficient for chute """ return math.sqrt((8 * mass * gravity) / (math.pi * air_density * drag * diameter ** 2))
d07ee18e3c24fb9879baacc8a2fbc3eeeea59b7a
71,089
def max_distant_color(color): """ Returns the maximally distant uint8 color for a given uint8 color. """ return tuple(255 if item <= 127 else 0 for item in color)
b82fe35368f93abf187a277e5e384759bb191cac
585,370
def wrap_pm180(valin): """ Wraps a value (float) to a -180 to 180 degree range. Parameters ---------- valin: float Input value in degrees Returns ------- valout : float Example ------- # e.g., 200 degrees corresponds to -160 degrees when limited to [-180,180] \n obs.wrap_pm180(200) #--> -160.0 Note ---- Similar to (valin % 180.). """ val_red=valin%360-360 valout=val_red+[360. if val_red<=-180 else 0.][0] return valout
86ffce0cf56deb00da27e51929b4ccd9c51239ae
528,330
def get_lang_abbr_from_resp(http_resp): """ This function takes a requests object containing a response from detectlanguage.com, parses it, and returns the abbreviation of the language detected. """ return http_resp.json()["data"]["detections"][0]["language"]
6635b88306fbc4f149307133c0a118542a8709a9
34,677
def factorial(num): """ Factirial of a number : input a number and it returns its factirial """ if num==1: return 1 else: return num * factorial(num -1)
7802581d29385e1db0baba4f08b40052b299feb4
411,713
import zipfile def read_kmz(infile): """read raw kml/kmz file""" if infile.upper().endswith('.KML'): with open(infile, "r") as f: return f.read() if infile.upper().endswith('.KMZ'): with zipfile.ZipFile(infile, "r") as f: return f.read(f.namelist()[0])
fa18f5bd1c7e0df5de2851d3856cbf1b573e9cac
293,575
def NUTS_down(NUTS): """ For a given NUTS-region, finds the corresponding region its is part of Arguments: *NUTS* (string) -- name of the NUTS region (e.g NL413 ) Returns: *NUTS_lower* (string) -- name of the NUTS region one level lower (e.g. NL41) """ if len(NUTS) <= 2: #the NUTS-0 level (country) is the lowest level and has length 2 raise ValueError("Cannot aggregate {} to a lower NUTS-level".format(NUTS)) #would be nice to check if the output exists as a region return str(NUTS)[:-1]
ee98b2328e0b0efde18a5a0e01ccfe4b8de6a6e9
321,630
def to_bytes(value: int, size: int) -> bytearray: """ Converts an integer value into a bytearray. :param int value: Input value. :param int size: Number of bytes. :return: Value bytes. :rtype: bytearray """ return bytearray(value.to_bytes(length=size, byteorder='big', signed=False))
1545b8e55e6e39fad8b02b19fd64d0beb78a4a3d
276,249
from datetime import datetime def week_num(dt: datetime): """Returns week number of the given day """ dt_first = dt.replace(day=1) dt_first_weekday = dt_first.weekday() return int((dt.day + dt_first_weekday - 1) / 7)
dc6c082cd4c60ec022550a90a0b1c88e208aaa51
445,748
def FTHBTaxCredit(MARS, FTHB_credit, FTHB_credit_c, c00100, FTHB_credit_e, fthbc, fthb_credit_amt): """ Computes refundable first time homebuyers' tax credit amount. """ if FTHB_credit is True: # max credit fthbc = max(0., min(FTHB_credit_c, fthb_credit_amt)) # eliminated based on agi positiveagiamt = max(c00100, 0.) fthb_max_agi = FTHB_credit_e[MARS - 1] if positiveagiamt <= fthb_max_agi: fthbc = fthbc else: fthbc = 0. return (fthbc)
78979656fd40993eeaade228ec77a543ef62b856
413,160
def strstr(cpu_context, func_name, func_args): """ Locate substring. Returns a pointer to the first occurrence of str2 in str1, or a null pointer if str2 is not part of str1. """ str1_ptr, str2_ptr = func_args str1 = cpu_context.memory.read_data(str1_ptr) str2 = cpu_context.memory.read_data(str2_ptr) offset = str1.find(str2) if offset == -1: return 0 return str1_ptr + offset
736c1e6b66bbc9279e2d6feff1f9df342ac7c19f
180,579
import re def _verify_host_data(host_hash, zone, host_name): """ Check if hosts conifguration is sane. Check if hosts configuration follows some basic rules on which the script's main loop depends. Args: host_hash: A hash containing host's configuration. Fields are as follows: ip: mandatory. IP address of the host port: optional, default is 53. Port to use while connecting. key-id: optional. The ID of the key that should be send to the host. key-data: optional. The TSIG key itself. master: optional, default is False. Flag depicting whether host is master for the zone or not. zone: The zone to which host's configuration belongs to. host_name: The hostname of the host. Returns: A string containing description of problems found or an empty string if there are none. """ msg = [] if 'ip' in host_hash: ip = host_hash['ip'] if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): msg.append('Zonehost {0} from zone {1} has malformed IP: {2}.'.format( host_name, zone, ip)) else: msg.append('Zonehost {0} from zone {1} '.format(host_name, zone) + 'does not have mandatory "ip" field.') if 'port' in host_hash: port = host_hash['port'] if not isinstance(port, int) or port < 1 or port > 65535: msg.append('Zonehost {0} of zone {1} '.format(host_name, zone) + 'has malformed port: {0}.'.format(port)) if "key-id" in host_hash or "key-data" in host_hash: if not ("key-id" in host_hash and "key-data" in host_hash): msg.append('Zonehost {0} from zone {1} '.format(host_name, zone) + 'should have both "key-id" and "key-data" keys ' + 'defined or none of them.') else: if not re.match(r"^[a-zA-Z0-9-\.]+$", host_hash['key-id']): msg.append('Zone {0}, zonekey for host {1}'.format(zone, host_name) + ' has invalid "id" entry: {0}.'.format( host_hash['key-id'])) if not re.match( "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", host_hash['key-data']): msg.append('Zone {0}, zonekey for host {1}'.format(zone, host_name) + ' has non-base64 "data" entry.') return msg
eae02db2c98c2380ce1a180a9d56b83d9eb5bb53
129,163
def preprocess(line): """ Return line string after removing commented portion and excess spaces. """ if "//" in line: line = line[:line.index('//')] line = line.strip() return line
88619331c043911fe4f64f725cad99ee72cce2b2
563,141
def timestamp_is_valid(current_position, last_position): """ Returns True if the current position's timestamp comes after the previous position's timestamp """ return current_position.timestamp > last_position.timestamp
77e7a3dea0870f659357599f799c1a11c14463a0
166,936
def resize_keypoint(keypoint, in_size, out_size): """Change values of keypoint according to paramters for resizing an image. Args: keypoint (~numpy.ndarray): Keypoints in the image. The shape of this array is :math:`(K, 2)`. :math:`K` is the number of keypoint in the image. The last dimension is composed of :math:`y` and :math:`x` coordinates of the keypoints. in_size (tuple): A tuple of length 2. The height and the width of the image before resized. out_size (tuple): A tuple of length 2. The height and the width of the image after resized. Returns: ~numpy.ndarray: Keypoint rescaled according to the given image shapes. """ keypoint = keypoint.copy() y_scale = float(out_size[0]) / in_size[0] x_scale = float(out_size[1]) / in_size[1] keypoint[:, 0] = y_scale * keypoint[:, 0] keypoint[:, 1] = x_scale * keypoint[:, 1] return keypoint
b299a1e2e0031e6ae9111d2261dd98b9d6ce0660
26,209
def _escape_strings(strings): """escape to squarebracket and doublequote. >>> print(_escape_strings("hoge")) hoge >>> print(_escape_strings("[hoge")) \\[hoge >>> print(_escape_strings("hoge]")) hoge\\] >>> print(_escape_strings("[hoge]")) \\[hoge\\] >>> print(_escape_strings('[ho"ge]')) \\[ho\\"ge\\] """ target_chars = '[]"`' ret = [] for string in strings: if string in target_chars: string = "\\" + string ret.append(string) return "".join(ret)
e1a80def54cfe40da9634b5bbe7f157539a864d1
46,818
import torch def is_complex_data(data: torch.Tensor, complex_last: bool = True) -> bool: """Returns True if data is a complex tensor, i.e. has a complex axis of dimension 2, and False otherwise. Parameters ---------- data: torch.Tensor For 2D data the shape is assumed ([batch], [coil], height, width, [complex]) or ([batch], [coil], [complex], height, width). For 3D data the shape is assumed ([batch], [coil], slice, height, width, [complex]) or ([batch], [coil], [complex], slice, height, width). complex_last: bool If true, will require complex axis to be at the last axis. Default: True. Returns ------- bool """ if 2 not in data.shape: return False if complex_last: if data.size(-1) != 2: return False else: if data.ndim == 6: if data.size(2) != 2 and data.size(-1) != 2: # (B, C, 2, S, H, 2) or (B, C, S, H, W, 2) return False elif data.ndim == 5: # (B, 2, S, H, W) or (B, C, 2, H, W) or (B, S, H, W, 2) or (B, C, H, W, 2) if data.size(1) != 2 and data.size(2) != 2 and data.size(-1) != 2: return False elif data.ndim == 4: if data.size(1) != 2 and data.size(-1) != 2: # (B, 2, H, W) or (B, H, W, 2) or (S, H, W, 2) return False elif data.ndim == 3: if data.size(-1) != 2: # (H, W, 2) return False else: raise ValueError(f"Not compatible number of dimensions for complex data. Got {data.ndim}.") return True
54a22f6be68b31e838ae0ae33c146f4eff86c6b4
528,958
def first_item_split_on_space(x): """ Brute force extract the first part of a string before a space. """ return str(x).split()[0]
0f48d79e339972c8039369f9b1bf9fc28410a1d3
388,736
def _nullable_list_symmetric_difference(list_1, list_2): """ Returns the symmetric difference of 2 nullable lists. Parameters ---------- list_1 : `None` or `list` of ``DiscordEntity`` First list. list_2 : `None` or `list` of ``DiscordEntity`` First list. Returns ------- symmetric_difference : `None` or `list` of ``DiscordEntity`` A list with the two list's symmetric difference. """ if list_1 is None: if list_2 is None: return None else: return list_2.copy() else: if list_2 is None: return list_1.copy() symmetric_difference = set(list_1) ^ set(list_2) if not symmetric_difference: return None return list(symmetric_difference)
b0d44674f0c32f1909c52d5bf0b617dfb7d2547d
623,346
import six def to_unicode(s, encoding='utf-8'): """Convert the string s to unicode if it is of type bytes. Returns: Unicode string """ if isinstance(s, six.text_type): return s elif isinstance(s, bytes): return s.decode(encoding) return s
00375bd7322b101234fa6afa3a093e599e3f15de
404,378
def is_palindrome(string: str) -> bool: """Checks is given string is palindrome. Examples: >>> assert is_palindrome("abccba") >>> >>> assert is_palindrome("123321") >>> >>> assert not is_palindrome("abccbX") """ if not isinstance(string, str): raise TypeError("Input value should be string.") return string.lower() == string[::-1].lower()
0354b8f6a3058e823c4591266918fe234dce82dd
620,599
from datetime import datetime def _delete_event_trigger_file(blob_service_client, trigger_file_path, container_name): """Helper function that delete event trigger file from target container of storage account. Warning! If target deleting file is not there, BlobNotFound exception will be raised. Args: blob_service_client (BlobServiceClient): Azure Storage Account service client trigger_file_path (str): Path of trigger file in storage account container container_name (str): Name of target container to upload the trigger file. Returns: blob_properties, including last modified time """ container_client = blob_service_client.get_container_client(container_name) # Delete target event trigger file from storage account container_client.delete_blob(trigger_file_path) deleted_time = datetime.utcnow() blob_properties = { "etag": "", "last_modified": deleted_time } return blob_properties
0d9d14579aa7fe79d1de6d992cc4d09c892238d9
539,804
def unescape(strs): """Replace HTML-safe sequences "&amp;", "&lt;"" and "&gt;" to special characters.""" strs = strs.replace("&amp;", "&") strs = strs.replace("&lt;", "<") strs = strs.replace("&gt;", ">") return strs
359ae31e87c50e21e7e61140683d78c8aacd57fe
322,919
import csv def get_framework_result(file_path): """ Get framework result from the framework file. Args: file_path (str): The framework file path. Returns: list[list], the parsed framework information. """ result = [] with open(file_path, 'r') as file: csv_reader = csv.reader(file) for row in csv_reader: result.append(row) return result
fe5ab39b4694f4eb853ab2452cfcd7f715be636f
398,262
def sum_of_all(list_of_nums: list): """adds all numbers in a list together and returns the sum""" sum_of_nums = 0 for num in list_of_nums: sum_of_nums += num print(sum_of_nums) return sum_of_nums
a91ddda3294eb2d0e90f95d5027397a8bd5ba623
664,591
def pandas_df_to_temporary_csv(tmp_path): """Provides a function to write a pandas dataframe to a temporary csv file with function scope.""" def _pandas_df_to_temporary_csv(pandas_df, sep=",", filename="temp.csv"): temporary_csv_path = tmp_path / filename pandas_df.to_csv(temporary_csv_path, sep=sep, header=True, index=False, na_rep="") return temporary_csv_path return _pandas_df_to_temporary_csv
5ec9b3072928e3cdbe067dfcb33010b2a51a267b
702,554
from io import StringIO import csv def create_mock_csv_from_dataframe(df): """ Converts a pandas DataFrame to a StringIO object. Used to mock tests of functions that read from the filesystem, so we only need to keep track of one source of truth. Args: df (pandas dataframe): to be converted into a StringIO object Returns: io.StringIO representation of `df` """ csvfile = StringIO() csvfile.seek(0) fieldnames = df.columns writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for row in df.iterrows(): writer.writerow(row[1].to_dict()) csvfile.seek(0) print('\nCSV Object Created') return csvfile
606393510f14add25cc0ee1f4f098a4e2c626727
287,709
import sqlite3 def connect_sqlite_db_and_cursor(db_name): """ Creates a connection to a sqlite3 db :param db_name: filename of the db :return: sqllite_connection , cursor Object """ print("Connect to Database") sqllite_connection = sqlite3.connect(db_name) cursor_object = sqllite_connection.cursor() print("Database connected") return sqllite_connection, cursor_object
63a752f64699acb971384d73615911f0749fec80
405,523
def get_bounding_boxes(metadata): """ localization.txt (for bounding box) has the structure <path>,<x0>,<y0>,<x1>,<y1> path/to/image1.jpg,156,163,318,230 path/to/image1.jpg,23,12,101,259 path/to/image2.jpg,143,142,394,248 path/to/image3.jpg,28,94,485,303 ... One image may contain multiple boxes (multiple boxes for the same path). """ boxes = {} with open(metadata.localization) as f: for line in f.readlines(): image_id, x0s, x1s, y0s, y1s = line.strip('\n').split(',') x0, x1, y0, y1 = int(x0s), int(x1s), int(y0s), int(y1s) if image_id in boxes: boxes[image_id].append((x0, x1, y0, y1)) else: boxes[image_id] = [(x0, x1, y0, y1)] return boxes
f3c8fddca711e77f7ef980736a7645329244afc8
418,710
def nrow(self): """ return the number of rows This is faster than self.shape[0] """ return len(self.index)
eea2085ab02c8962430de2643e7452e7cda40775
70,514
def apply_opcode3(code_list, opcode_loc, programme_input=1): """When you've determined that the opcode is 3 - which means to take an input value and store it in the location of its only parameter then you can use this function to adjust code_list. Parameters ---------- code_list : list The opcode opcode_loc : int The index of the opcode in code_list programme_input : int input value, default 1 Returns ------- code_list : list The whole programme """ opcode, param1 = code_list[opcode_loc:opcode_loc+2] # Now lets actually do what the opcode says: put the input value at the # location given by param1 code_list[param1] = programme_input return code_list
c6fe57149630ee9f0f38a9e35277ac7ba1c43897
588,071
import math def _daylight_hours(sunset_hour_angle_radians): """ Calculate daylight hours from a sunset hour angle. Based on FAO equation 34 in Allen et al (1998). :param sunset_hour_angle_radians: sunset hour angle, in radians :return: number of daylight hours corresponding to the sunset hour angle :rtype: float :raise ValueError: if the sunset hour angle is not within valid range """ # validate the sunset hour angle argument, which has a valid # range of 0 to pi radians (180 degrees), inclusive # see http://mypages.iit.edu/~maslanka/SolarGeo.pdf if not 0.0 <= sunset_hour_angle_radians <= math.pi: raise ValueError("sunset hour angle outside valid range [0.0 to {math.pi!r}]: {sunset_hour_angle_radians!r}".format(sunset_hour_angle_radians)) # calculate daylight hours from the sunset hour angle return (24.0 / math.pi) * sunset_hour_angle_radians
7eef992832103f0fefdfaa3d936aa7015f3fdc61
649,214
def get_string_event_attribute_rep(event, event_attribute): """ Get a representation of the feature name associated to a string event attribute value Parameters ------------ event Single event of a trace event_attribute Event attribute to consider Returns ------------ rep Representation of the feature name associated to a string event attribute value """ return "event:" + str(event_attribute) + "@" + str(event[event_attribute])
c97cf3b34bed1b350ba2bce7a05966d8ddbfbeef
590,042