content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def _mergeEnvironments(currentEnv, otherEnv): """Merges two execution environments. If both environments contain PATH variables, they are also merged using the proper separator. """ resultEnv = dict(currentEnv) for k, v in otherEnv.items(): if k == 'PATH': oldPath = currentEnv.get(k, '') if oldPath != '': resultEnv[k] = ';'.join((oldPath, v)) else: resultEnv[k] = v else: resultEnv[k] = v return resultEnv
37167ac88ba48f3fbf9615409d882e4149d9b11c
206,950
import requests from bs4 import BeautifulSoup def scrap_page(slug): """ Scrape the documentation page body text. :param slug: Documentation URL slug eg. 'the-lead-page' :return: Documentation page body text if it exists """ url = f"https://help.close.com/docs/{slug}" response = requests.get(url) document = BeautifulSoup(response.text, 'html.parser') containers = document.select('#content-container') if containers: return containers[0].get_text()
c11279d6292d9a0f7171a8bc5ed94b1f5cdc363d
33,463
def get_planet_product_type(img_path): """ return if file is scene or OrthoTile""" split = img_path.stem.split('_') # check if 4th last imagename segment is BGRN if split[-4] == 'BGRN': pl_type = 'OrthoTile' else: pl_type = 'Scene' return pl_type
70aac7930fb95c0270f4b74a51a09e5d72ea8aeb
630,258
def grab_children_from_sequence(seq, t_i): """ Given a sequence of nodes and a sequence of coordinates return the corresponding children. This allows us to access a root node's children given the indices which that node has stored during the ast_to_nodelist. """ return [seq[t] for t in t_i]
defb1bc0172d4fe41ece65d69eb6d2ed93f1c7fb
545,691
from typing import Iterable def _unique(*args: Iterable) -> list: """Return a list of all unique values, in order of first appearance. Args: args: Iterables of values. Examples: >>> _unique([0, 2], (2, 1)) [0, 2, 1] >>> _unique([{'x': 0, 'y': 1}, {'y': 1, 'x': 0}], [{'z': 2}]) [{'x': 0, 'y': 1}, {'z': 2}] """ values = [] for parent in args: for child in parent: if child not in values: values.append(child) return values
5d43dfa3d13b04b40c2ae37fac25df44d4d37295
686,691
def combine_images(center, left, right, measurement, correction): """ Combine the image paths from `center`, `left` and `right` using the correction factor `correction` Returns ([image_paths], [measurements]) """ image_paths = [] image_paths.extend(center) image_paths.extend(left) image_paths.extend(right) measurements = [] measurements.extend(measurement) measurements.extend([x + correction for x in measurement]) measurements.extend([x - correction for x in measurement]) return (image_paths, measurements)
5ad3322ec80051dea121a05ad54326169b62c464
137,768
import functools import operator def compute_check(msg): """ Compute the check value for a message AUCTUS messages use a check byte which is simply the XOR of all the values of the message. """ if len(msg) == 0: return 0 return functools.reduce(operator.xor, msg)
1dec5ac53abaa406760d3a0005cdde01953ef68e
658,774
import json def _get_options(options_file=None): """ Read in options_file as JSON. :param options_file: filename to return :type options_file: str :return: options as dictionary :rtype: dict """ if options_file is not None: with open(options_file, 'r') as opt_file: options = json.loads(opt_file.read()) else: options = {} return options
9011175e8583541982f761a9f60375a0bdca88c1
627,046
def parse_account(config, auth, account): """ Breaks a [account:advertiser@profile] string into parts if supplied. This function was created to accomodate supplying advertiser and profile information as a single token. It needs to be refactored as this approach is messy. Possible variants include: * [account:advertiser@profile] * [account:advertiser] * [account@profile] Args: * auth: (string) Either user or service. * account: (string) A string represeting [account:advertiser@profile] Returns: * ( network_id, advertiser_ids, profile_id) after parsing the account token. """ network_id = account advertiser_ids = None profile_id = None # if exists, get profile from end try: network_id, profile_id = network_id.split('@', 1) except: profile_id = None # if exists, get avertiser from end try: network_id, advertiser_ids = network_id.split(':', 1) except: pass # if network or advertiser, convert to integer if network_id is not None: network_id = int(network_id) if advertiser_ids is not None: advertiser_ids = [ int(advertiser_id.strip()) for advertiser_id in advertiser_ids.split(',') ] return network_id, advertiser_ids
a1c00ebc9f03358d7864765bac5ad545497444ad
45,728
def get_units(unit): """ Return a dict of units based on the unit format code. :type unit: str :param unit: unit format :return dict containing units for weather measurements """ if unit == 'us': return { 'unit': 'us', 'nearestStormDistance': 'mph', 'precipIntensity': 'in/h', 'precipIntensityMax': 'in/h', 'precipAccumulation': 'in', 'temperature': 'F', 'temperatureMin': 'F', 'temperatureMax': 'F', 'apparentTemperature': 'F', 'dewPoint': 'F', 'windSpeed': 'mph', 'pressure': 'mb', 'visibility': 'mi' } elif unit == 'ca': return { 'unit': 'ca', 'nearestStormDistance': 'km', 'precipIntensity': 'mm/h', 'precipIntensityMax': 'mm/h', 'precipAccumulation': 'cm', 'temperature': 'C', 'temperatureMin': 'C', 'temperatureMax': 'C', 'apparentTemperature': 'C', 'dewPoint': 'C', 'windSpeed': 'km/h', 'pressure': 'hPa', 'visibility': 'km' } elif unit == 'uk2': return { 'unit': 'uk2', 'nearestStormDistance': 'mi', 'precipIntensity': 'mm/h', 'precipIntensityMax': 'mm/h', 'precipAccumulation': 'cm', 'temperature': 'C', 'temperatureMin': 'C', 'temperatureMax': 'C', 'apparentTemperature': 'C', 'dewPoint': 'C', 'windSpeed': 'mph', 'pressure': 'hPa', 'visibility': 'mi' } # si return { 'unit': 'si', 'nearestStormDistance': 'km', 'precipIntensity': 'mm/h', 'precipIntensityMax': 'mm/h', 'precipAccumulation': 'cm', 'temperature': 'C', 'temperatureMin': 'C', 'temperatureMax': 'C', 'apparentTemperature': 'C', 'dewPoint': 'C', 'windSpeed': 'm/s', 'pressure': 'hPa', 'visibility': 'km' }
49bcb5a336e09fdb8abb14fb13488e8135c707e7
387,333
def add_countdown_in(date, event, events_dates_per_event): """ Given a date and an even, compute the number of days until the next occurence of this events within events_dates_per_event """ countdown = [] for special_date in events_dates_per_event[event]: date_count_down = (special_date - date).days if date_count_down >= 0: countdown.append(date_count_down) return min(countdown)
0a8cd3c9100f301b9a681d49fd4bb624067cdb88
337,835
def rm_www_prefix(text): """ Remove www prefix """ prefix = "www." if text.startswith(prefix): return text[len(prefix) :] return text
e9b460268126f13531cbeb308d7960d0694ef8f8
640,296
def normalize_pipeline_name(name=''): """Translate unsafe characters to underscores.""" normalized_name = name for bad in '\\/?%#': normalized_name = normalized_name.replace(bad, '_') return normalized_name
750b91c1d19d9a7ce92b9c5efa453a2a13595776
610,574
from typing import List from typing import Callable def abstract_calc_sim(query: List[str], content: List[List[str]], func_vec: Callable[[List[str], List[str]], float]) -> List[float]: """ Calculate the sim_scores for every query compared to each element of content Parameters ---------- query element to compare to every element of content content content to compare similarity for func_vec function for similarity_scores Returns ------- sim_scores similarity scores """ sim_scores = [] for i in content: if i and query: sim_value = func_vec(query, i) sim_scores.append(sim_value) else: sim_scores.append(float(0)) return sim_scores
85b58c82ec2babe7c1e1f2f199212ec792290ad8
611,542
import random def generate_random_token(length=64): """ Generates a random token of specified length. """ lrange = 16 ** length hexval = "%0{}x".format(length) return hexval % (random.randrange(lrange))
c7ebfb69884e9a751fc0be74b82fd7233bad20d3
268,110
def is_from_new_app(metadata): """Events created by the new www.missionbit.org donation portal should be ignored """ return metadata.get("app") == "www.missionbit.org"
44350b151accd57ab898e5131b596b752ae4517e
302,000
import re def remove_marks(text): """ Remove those unnecessary marks inside texts. Arg: text: A string that could be either posts or comments. Return: new_text: A string which is a clean sentence. """ # remove HTML tags new_text = re.sub('<.*?>', '', text) # remove URL new_text = re.sub('http\S+', '', new_text) # replace number with <NUM> token new_text = re.sub('\d+', ' NUM ', new_text) return new_text
c747662ce19205ea5ce568ef0ec4c01d44d18856
196,971
import glob def synth_fnames(state, county): """ get list of fnames for synthetic population for given state/county Parameters ---------- state : str, two-letters, e.g. 'mn' county : int, FIPS code without state digits (e.g. 53) """ fname_list = glob.glob(f'/ihme/scratch/users/beatrixh/synthetic_pop/pyomo/best/{state.lower()}/*county{county:03d}*.csv') return fname_list
74ecc23ce1282773ac16c6e45061e6e1f52477cb
288,048
from pathlib import Path from typing import Iterable import click def _requirements( path: Path, packages: Iterable[str], silent: bool = False ) -> Path: """ Create a ``requirements.txt`` file. :param path: the output path :param packages: the packages :param silent: ``True`` to suppress normal output """ # Figure out where we're going to write the files. _path = path.expanduser().resolve() # Write each package as a new line in the new requirements file. with open(str(_path), 'a') as fb: for package in packages: fb.write(f"{package}\n") # If we're not running silent... if not silent: # ...show 'em what we wrote. click.echo("requirements:") click.echo(_path.read_text()) # Return the file path to the caller. return _path
e3e477535ec17bcb32aaa1a066bb3736c20a9054
510,420
def lput(thing, l): """ LPUT thing list outputs a list equal to its second input with one extra member, the first input, at the end. """ return l + [thing]
0697b1faa34e046ba9a58a85c6955f0975867d33
267,296
def apply_time_filter(dataset_row, time_interval): """Returns True if data is within the given time intervals.""" merge_time = dataset_row['grounded_normalized_time'] lower_time, upper_time = time_interval return merge_time > lower_time and merge_time < upper_time
50ffb01134a2c509eb5e01986c90d261f10ef2a7
143,989
def _is_variable_assignation(line: str) -> bool: """Check if the line is a variable assignation. This function is used to check if a line of code represents a variable assignation: * if contains a "=" (assignation) and does not start with import, nor @, nor def, nor class. * then it is ==> is a global variable assignation. :param line: Line to parse :return: <Boolean> """ if "=" in line: parts = line.split() if ( not ( line.startswith("from") or line.startswith("import") or line.startswith("@") or line.startswith("def") or line.startswith("class") ) and len(parts) >= 3 and parts[1] == "=" ): # It is actually an assignation return True else: # It is an import/function/decorator/class definition return False else: # Not an assignation if does not contain "=" return False
0977ce8dfb42242687cca763b46f87f7136a512f
302,819
def gconfname(path): """Turn gconf path into name.""" return path.split('/')[-1]
412b3cd0219793622d10d753fe3e98835b2662f1
507,615
def calculate_polynomial_derivative_term(coefficient, variable, order): """Calculates the derivative of the nth order term of a polynomial. Args: coefficient (float): The coefficient of the nth order term in the polynomial variable (float): float to plug in for the variable in the polynomial order (int): order of the nth order term in the polynomial (so, n.) Returns: float: The result of taking the derivative of the nth order term a polynomial, :math:`n \\cdot \\text{coefficient} \\cdot \\text{variable}^{n-1}` So, the edge case of taking the derivative of the zeroth-order term is taken care of, since you explicity multiply by the order of the polynomial (which is zero in the n = 0 case.) Raises: TypeError: A non-integer was passed as the order. """ if type(order) != int: raise TypeError('Non-integer order in polynomial term') else: return order * coefficient * variable**(order - 1)
46fc609334b41099fcb99784670bd93f54846353
683,770
def reverse_geometric_key(string): """Reverse a geometric key string into xyz coordinates.""" xyz = string.split(',') return [float(i) for i in xyz]
4ca34a6c5f3607f3c1ea38642309e0ef988a61d9
669,685
def remove_empty_files(data_dic,threshold=5): """ Remove the empty files from dict. An empty file is a file where the number of chars is below the threshold. return a dict and a list of removed files """ removed_files_list = [file for file in data_dic.keys() if len(data_dic[file]['text'])<=threshold ] for file in removed_files_list: del data_dic[file] return data_dic,removed_files_list
55155a3264a75af7b716846cd2ce8e13ebcbc02d
364,739
def read_test(data_path): """Read a wd_*.f1 file in NET-COLING2018 dataset and returns as a list.""" test_examples = [] with open(data_path, 'r', encoding='utf-8') as lines: for line in lines: if line: test_ex = list(line.strip()) test_examples.append(test_ex) return test_examples
c479ae7953188db5077d6acad360234c7599b74f
583,642
import requests def get_json_resource(resource_uri): """ Get a uri which returns JSON, return Python object. Returns None if the request status code is 4xx or 5xx. :param resource_uri: uri to request :return: Python dict/object from the request JSON. """ r = requests.get(resource_uri) if r.status_code == requests.codes.ok: return r.json() else: return None
41e97ab464a12298fc718d8328e90ae4e8efd884
73,276
def coordinate_of_sequence(sequence): """ ___0__1__2__3__4__ 4| 1 2 3 4 5 3| 6 7 8 9 10 2| 11 12 13 14 15 1| 16 17 18 19 20 0| 21 22 23 24 25 """ y = 4 - ((sequence - 1) // 5) x = (sequence - 1) % 5 return x, y
3f6a81a9eb0a2e1fe9e4d68fd212b7914ea700be
660,505
def log2(num): """ Integer-valued logarigthm with base 2. If ``n`` is not a power of 2, the result is rounded to the smallest number. """ pos = 0 for pow_ in [16, 8, 4, 2, 1]: if num >= 2 ** pow_: num //= (2 ** pow_) pos += pow_ return pos
b692d508029505a62e81dd4d8efaa0e21435b007
601,726
import requests def edges(word: str, lang: str = "th"): """ Get edges from `ConceptNet <http://api.conceptnet.io/>`_ API. ConceptNet is a public semantic network, designed to help computers understand the meanings of words that people use. For example, the term "ConceptNet" is a "knowledge graph", and "knowledge graph" has "common sense knowledge" which is a part of "artificial inteligence". Also, "ConcepNet" is used for "natural language understanding" which is a part of "artificial intelligence". | "ConceptNet" --is a--> "knowledge graph" --has--> "common sense"\ --a part of--> "artificial intelligence" | "ConceptNet" --used for--> "natural language understanding"\ --a part of--> "artificial intelligence" With this illustration, it shows relationships (represented as *Edge*) between the terms (represented as *Node*) :param str word: word to be sent to ConceptNet API :param str lang: abbreviation of language (i.e. *th* for Thai, *en* for English, or *ja* for Japan). By default, it is *th* (Thai). :return: return edges of the given word according to the ConceptNet network. :rtype: list[dict] :Example: :: from pythainlp.corpus.conceptnet import edges edges('hello', lang='en') # output: # [{ # '@id': '/a/[/r/IsA/,/c/en/hello/,/c/en/greeting/]', # '@type': 'Edge', # 'dataset': '/d/conceptnet/4/en', # 'end': {'@id': '/c/en/greeting', # '@type': 'Node', # 'label': 'greeting', # 'language': 'en', # 'term': '/c/en/greeting'}, # 'license': 'cc:by/4.0', # 'rel': {'@id': '/r/IsA', '@type': 'Relation', 'label': 'IsA'}, # 'sources': [ # { # '@id': '/and/[/s/activity/omcs/vote/,/s/contributor/omcs/bmsacr/]', # '@type': 'Source', # 'activity': '/s/activity/omcs/vote', # 'contributor': '/s/contributor/omcs/bmsacr' # }, # { # '@id': '/and/[/s/activity/omcs/vote/,/s/contributor/omcs/test/]', # '@type': 'Source', # 'activity': '/s/activity/omcs/vote', # 'contributor': '/s/contributor/omcs/test'} # ], # 'start': {'@id': '/c/en/hello', # '@type': 'Node', # 'label': 'Hello', # 'language': 'en', # 'term': '/c/en/hello'}, # 'surfaceText': '[[Hello]] is a kind of [[greeting]]', # 'weight': 3.4641016151377544 # }, ...] edges('สวัสดี', lang='th') # output: # [{ # '@id': '/a/[/r/RelatedTo/,/c/th/สวัสดี/n/,/c/en/prosperity/]', # '@type': 'Edge', # 'dataset': '/d/wiktionary/en', # 'end': {'@id': '/c/en/prosperity', # '@type': 'Node', # 'label': 'prosperity', # 'language': 'en', # 'term': '/c/en/prosperity'}, # 'license': 'cc:by-sa/4.0', # 'rel': { # '@id': '/r/RelatedTo', '@type': 'Relation', # 'label': 'RelatedTo'}, # 'sources': [{ # '@id': '/and/[/s/process/wikiparsec/2/,/s/resource/wiktionary/en/]', # '@type': 'Source', # 'contributor': '/s/resource/wiktionary/en', # 'process': '/s/process/wikiparsec/2'}], # 'start': {'@id': '/c/th/สวัสดี/n', # '@type': 'Node', # 'label': 'สวัสดี', # 'language': 'th', # 'sense_label': 'n', # 'term': '/c/th/สวัสดี'}, # 'surfaceText': None, # 'weight': 1.0 # }, ...] """ obj = requests.get(f"http://api.conceptnet.io/c/{lang}/{word}").json() return obj["edges"]
1cc7041a1805aa05e8318097cf88bad93962a6b0
56,420
import torch def raw_net_output(net, example): """ Args: net (FlowNet): Instance of networks.flownet.FlowNet model. example (dict): Un-processed example. Returns: flow_pred (Tensor, shape=1x2xHxH, type=FloatTensor): Raw flow prediction output of net. """ net.eval() example = net.preprocess(example) cs_input, tg_input = example['net_cs_im'], example['net_tg_im'] cs_input, tg_input = cs_input.unsqueeze(0).cuda(), tg_input.unsqueeze(0).cuda() with torch.no_grad(): flow_pred, stride = net(cs_input, tg_input)[0][0] # 0 indexes final flow_pred and its stride flow_pred = net.upsample(flow_pred, scale_factor=stride) return flow_pred
5587b884c6652ec90bf0739986cda5510250c71b
363,211
def term_key_to_semester_year(term_key): """ Convert a 4-digit term number to semester and year """ term_key = int(term_key) return ["Spring", "Summer", "Fall"][term_key % 10 // 2 - 2], 2000 + term_key % 1000 // 10
a5e6a31574b63a64d55e76142862d569afe4d10d
284,250
def find_segments(j, e, c, OPT): """ Given an index j, a residuals dictionary, a line cost, and a dictionary of optimal costs for each index, return a list of the optimal endpoints for least squares segments from 0-j """ if j == -1: return [] else: vals = [(e[i][j] + c + OPT[i-1]) for i in range(0, j+1)] min_index = vals.index(min(vals)) return find_segments(min_index-1, e, c, OPT) + [min_index]
3f3ad3c4dee3bffceadba0ac6df76985048ac256
111,331
def _is_combinator_subset_of(specific, general, is_first=True): """Return whether `specific` matches a non-strict subset of what `general` matches. """ if is_first and general == ' ': # First selector always has a space to mean "descendent of root", which # still holds if any other selector appears above it return True if specific == general: return True if specific == '>' and general == ' ': return True if specific == '+' and general == '~': return True return False
c35b4dfd9692980e036537b9a4007dfa634e0781
79,587
import random def get_starts(N, M, batch_size, option): """ Given the number of elevations(N), azimuths(M), batch size and the option (different types of starts), this function returns the start indices for the batch. start_idx: list of [start_elev, start_azim] for each panorama in the batch """ if option == 0: start_idx = [[random.randint(0, N-1), random.randint(0, M-1)] for i in range(batch_size)] else: start_idx = [[N//2, M//2-1] for i in range(batch_size)] return start_idx
7192c1f8234413834a6f4bd511a63ce84f6cf7f4
463,676
def calculate_fantasy_points(player) -> float: """ Calculate the fantasy points this player earned from the formula Kill = 0.3 Death = -0.3 Assist = 0.15 Last Hit = 0.003 Gold per minute = 0.002 EXP per minute = 0.002 Seconds of enemy stuns = 0.07 Every 1000 allied healing done = 0.4 Tower Kill = 1 Roshan Kill = 1 First blood = 3 https://dota2.gamepedia.com/Fantasy_Dota Parameters ---------- player: Summary - a player summary Returns ------- The fantasy points scored by this player """ return ( player["kills"]*0.3 - player["deaths"]*0.3 + player["assists"]*0.15 + player["last_hits"]*0.003 + player["gpm"]*0.002 + player["xpm"]*0.002 + player["enemy_stun_time"]*0.07 + (player["healing"]/1000)*0.4 + player["towers_killed"] + player["rosh_kills"] + (3 if player["first_blood"] else 0) )
a2a0c91a69720904a81fee58d5a5ecf236f32bdc
677,997
def remap(indict, *args, **kwargs): """ Re-map keys of indict using information from arguments. Non-keyword arguments are keys of input dictionary that are passed unchanged to the output. Keyword arguments must be in the form new="old" and act as a translation table for new key names. """ outdict = {role: indict[role] for role in args} outdict.update( {new: indict[old] for new, old in kwargs.items()} ) return outdict
e44678373451587d74bb38a6a53b36155c42bc30
89,875
def add_trailing_slash(path): """Add a trailing slash if not present Parameters ---------- path : str A string representing a path Returns ------- str A new string with a trailing / if not previously present. """ if path[-1] != '/': path += '/' return path
ae822b3ce8087a7fd856208d4cd04d327cfeb88f
523,078
def registrar(registry, name="entry"): """ Creates and returns a register function that can be used as a decorator for registering functions into the given registry dictionary. :param registry: Dictionary to add entry registrations to. :param name: Name to give to each entry. "entry" is used by default. :returns: A register() decorator that can be used to fill in the given registry dictionary. """ def register_func(entry_name_or_func): """ Registers an entry for the CPU emulator. """ if callable(entry_name_or_func): # If function, that means no argument was passed in and we should register using the function name. func = entry_name_or_func entry_name = func.__name__.lower() registry[entry_name] = func return func # Otherwise, register with user provided name entry_name = entry_name_or_func if entry_name in registry: raise ValueError("Duplicate {} name: {}".format(name, entry_name)) def _wrapper(func): # Register function as entry. registry[entry_name.lower()] = func return func # Must return function afterwards. return _wrapper return register_func
9ca990c262133d26659fb92823f0b12b867424db
643,229
def guess_type_value_type(none=True): """ @param none if True and all values are empty, return None @return the list of types recognized by guess_type_value """ typstr = str return [None, typstr, int, float] if none else [typstr, int, float]
5926dc0ce7f766cb1e596fe8d4f5ff97f41fa9c9
661,424
import re def parse_header(header): """ Extract ID of the (overall) sequence and the sequence range fromat a sequence header of the form sequenceID/start-end. If the header contains any additional information after the first whitespace character (e.g. sequence annotation), it will be discarded before parsing. If there is no sequence range, only the id (part of string before whitespace) will be returned but no range. Parameters ---------- header : str Sequence header Returns ------- seq_id : str Sequence identifier start : int Start of sequence range. Will be None if it cannot be extracted from the header. stop : int End of sequence range. Will be None if it cannot be extracted from the header. """ # discard anything in header that might come after the # first whitespace (by convention this is typically annotation) header = header.split()[0] # try to extract region from sequence header m = re.search("(.+)/(\d+)-(\d+)", header) if m: id_, start_str, end_str = m.groups() region_start, region_end = int(start_str), int(end_str) return id_, region_start, region_end else: # cannot find region, so just give back sequence iD return header, None, None
2c0ab6abfdecf4d023138db0cae6031f61a8a9d0
449,844
def is_authenticated(cookie_string, prefix='', rconn=None): """Check for a valid cookie, if exists, return True If not, return False.""" if rconn is None: return False if (not cookie_string) or (cookie_string == "noaccess"): return False cookiekey = prefix+cookie_string try: if rconn.exists(cookiekey): # key exists, and update expire after ten minutes rconn.expire(cookiekey, 600) else: return False except: return False return True
0d90aff0e85a4e2523991ef035bce68ca7636966
230,633
def powerhalo(r,rs=1.,rc=0.,alpha=1.,beta=1.e-7): """return generic twopower law distribution inputs ---------- r : (float) radius values rs : (float, default=1.) scale radius rc : (float, default=0. i.e. no core) core radius alpha : (float, default=1.) inner halo slope beta : (float, default=1.e-7) outer halo slope returns ---------- densities evaluated at r notes ---------- different combinations are known distributions. alpha=1,beta=2 is NFW alpha=1,beta=3 is Hernquist alpha=2.5,beta=0 is a typical single power law halo """ ra = r/rs return 1./(((ra+rc)**alpha)*((1+ra)**beta))
455fad3ae6c8338660b938c016f33987cf0fb71e
480,566
from typing import List def ParseFilterFile(filepath: str, p_filts: List[str], n_filts: List[str]) -> str: """Takes a path to a filter file, parses it, and constructs a gtest_filter string for test execution. Args: filepath (str): The path to the filter file to be parsed into a --gtest_filter flag. p_filts (List[str]): An initial set of positive filters passed in a flag. n_filts (List[str]): An initial set of negative filters passed in a flag. Returns: str: The properly-joined together gtest_filter flag. """ positive_filters = p_filts negative_filters = n_filts with open(filepath, "r") as file: for line in file: # Only take the part of a line before a # sign line = line.split("#", 1)[0].strip() if line == "": continue elif line.startswith("-"): negative_filters.append(line[1:]) else: positive_filters.append(line) return "--gtest_filter={}-{}".format(":".join(positive_filters), ":".join(negative_filters))
d7830144dac8bde116acb4e19d130242aa624e7f
164,888
def is_leap_year(year): """ Returns whether a year is a leap year - Leap years are any year that can be exactly divided by 4 - Except if it can be divided exactly by 100 then it isn't - But if it can be divided exactly by 400 then it is a leap year """ if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): return True else: return False
23d850d2fa3a6c0ce430227569765e473a9d637a
634,586
def parseIntList(string): """Parses a comma-separated string of ints into a list""" splitarray = string.split(",") int_list = [] for elem in splitarray: int_list.append(int(elem)) return int_list
2ade8cf58b5e686e202a7a96dcf46c97b2dfd0e8
515,393
def convert_str_version_number(version_str): """ Convert the version number as a integer for easy comparisons :param version_str: str of the version number, e.g. '0.33' :returns: tuple of ints representing the version str """ version_numbers = version_str.split('.') if len(version_numbers) != 2: raise ValueError(f"Version number is malformed: '{version_str}'") return tuple(int(part) for part in version_numbers)
b550b7d07d9b226800de261f792bf0995ac21738
7,265
import torch def float_2_label(x, bits): """output = float_2_label(x, bits) Assume x is a float value, do N-bits quantization and return the code index. input ----- x: data to be converted, any shape x value should be [-1, 1] bits: number of bits, int output ------ output: tensor.float, [0, 2**bits-1] Although output is quantized, we use torch.float to save the quantized values """ #assert abs(x).max() <= 1.0 # scale the peaks peak = torch.abs(x).max() if peak > 1.0: x /= peak # quantize x = (x + 1.) * (2**bits - 1) / 2 return torch.clamp(x, 0, 2**bits - 1)
db08bd302b84123569012443b98918914673e762
428,432
import typing def _split_bits(i: int) -> typing.Tuple[bool, bool, bool, bool, bool, bool, bool, bool]: """Split a byte (an int) into its 8 bits (a tuple of 8 bools).""" assert i in range(256) return ( bool(i & (1 << 7)), bool(i & (1 << 6)), bool(i & (1 << 5)), bool(i & (1 << 4)), bool(i & (1 << 3)), bool(i & (1 << 2)), bool(i & (1 << 1)), bool(i & (1 << 0)), )
cdb7b08ab9969d56de284c214ae4ecfbb03b63ac
350,194
import math import torch def adjust_intrinsics_crop(K, crop_size, bbox, max_crop_area): """ Adjust intrinsics to render the crop Args: sample: intrinsics parameters crop_size: size of the crop bbox: 2D bounding box max_crop_area: Maximum crop area (to assure Returns: new crop size, adjusted intrinsics, and original intrinsics """ l, t, r, b = bbox crop_H, crop_W = crop_size area_ratio_sqrt = math.sqrt(max_crop_area / (crop_H * crop_W)) crop_size = (crop_size * area_ratio_sqrt) crop_size = crop_size.int().numpy().tolist() intrinsics = torch.Tensor(K) intrinsics[0, 2] -= l intrinsics[1, 2] -= t off_intrinsics = intrinsics.clone() intrinsics[:2] *= area_ratio_sqrt return crop_size, intrinsics, off_intrinsics
99e4a9e5df0b257065bcaf995bc34a7c35a0389a
480,333
def overlap(a, b): """ Checks to see if two casings intersect, or have identical start/end positions. """ # If the casing start/end intersects intersect = (a[0] > b[0] and a[0] < b[1]) or (a[1] > b[0] and a[1] < b[1]) # If the casings start or end in the same place overlap = (a[0] == b[0]) or (a[1] == b[1]) return intersect or overlap
28d3382bb8ebd0fc12bb18c6311648d76f593bee
665,009
from typing import Dict def pay_invoice(self, currency: str, order_id: str) -> Dict: """ Enables authenticated users to pay the invoice order :param currency: (required) currency name :param order_id: (required) invoice order ID :return: Dict """ method = 'POST' api_url = '/api/v1/invoice/pay' path = self._host + api_url params = { "currency": currency, "order_id": order_id } signature = self._make_signature(method, api_url, params) return self._make_request(method, path, signature, params=params)
fce3b4c5348ac378ed07c79c8874ec4254d9dc8c
446,569
def upgrade(module, port_path): """ Upgrade outdated ports. """ rc, out, err = module.run_command("%s upgrade outdated" % port_path) # rc is 1 when nothing to upgrade so check stdout first. if out.strip() == "Nothing to upgrade.": changed = False msg = "Ports already upgraded" return (changed, msg) elif rc == 0: changed = True msg = "Outdated ports upgraded successfully" return (changed, msg) else: module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err)
f3efc7e650ef6e448f987b39ddebab8051a0f9a6
117,187
def parse_ip_regex_str(ip_regexs_str): """ It splits a comma separated string into a list. Each one in the result list should be a regex string. :param ip_regexs_str: is a comma separated string, such as: `192[.]168[.],172[.]16[.]`. With this argument, it returns: `['192[.]168[.]', '172[.]16[.]']`. These two regex matches all ipv4 addresses those are started with `192.168.` or `172.16.` :return: a list of regex string. """ ip_regexs_str = ip_regexs_str.strip() regs = ip_regexs_str.split(',') rst = [] for r in regs: # do not choose ip if it matches this regex if r.startswith('-'): r = (r[1:], False) else: r = (r, True) if r[0] == '': raise ValueError('invalid regular expression: ' + repr(r)) if r[1]: r = r[0] rst.append(r) return rst
b51799af47644967a598128f5e41d4978a8a29b8
337,771
def xor(a, b): """ exclusive or; a or b, but not both >>> xor(1, 0) 1 >>> xor(0, 1) 1 >>> xor(1, 1) 0 >>> xor(0, 0) 0 """ return int((a or b) and not (a and b))
d5da57473ca7b0e1caaa9206259a4632af05daa1
403,689
def get_variables(interface1, interface2, interface3): """Create and return a dictionary of test variables. :param interface1: Name of an interface. :param interface2: Name of an interface. :param interface3: Name of an interface. :type interface1: string :type interface2: string :type interface3: string :returns: Dictionary of test variables - settings for Honeycomb's SPAN port mirroring suite. :rtype: dict """ variables = { "interface1": interface1, "interface2": interface2, "interface3": interface3, # Interface 2 - ingress "settings_receive": { "state": "receive", "iface-ref": interface2, }, # Interface 2 - egress "settings_transmit": { "state": "transmit", "iface-ref": interface2, }, # Interface 2 - ingress/egress "settings_both": { "state": "both", "iface-ref": interface2, }, # Interface 3 - ingress/egress "settings_if2": { "state": "both", "iface-ref": interface3, }, # IP addresses for traffic test "tg_to_dut_if1_ip": "192.168.1.1", "dut_to_tg_if1_ip": "192.168.1.2", "tg_to_dut_if2_ip": "192.168.2.1", "dut_to_tg_if2_ip": "192.168.2.2", "prefix": 24, } return variables
3910b1d4454409fcdb2f30d6bf6e7ca2210d3b65
641,178
import torch def extract_torchmeta_task(cs, class_ids): """ Extracts a single "episode" (ie, task) from a ClassSplitter object, in the form of a dataset, and appends variables needed by DatasetDistance computation. Arguments: cs (torchmeta.transforms.ClassSplitter): the ClassSplitter where to extract data from class_ids (tuple): indices of classes to be selected by Splitter Returns: ds_train (Dataset): train dataset ds_test (Dataset): test dataset """ ds = cs[class_ids] ds_train, ds_test = ds['train'], ds['test'] for ds in [ds_train, ds_test]: ds.targets = torch.tensor([ds[i][1] for i in range(len(ds))]) ds.classes = [p[-1] for i,p in enumerate(cs.dataset._labels) if i in class_ids] return ds_train, ds_test
7a342b95d88a65b9103380b3ed08d9e28a3ca5bf
675,272
def blurhash_validity_checker(blurhash: str) -> bool: """Checks if a blurhash is valid and returns True or False""" if len(blurhash) < 6: return False characters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz#$%*+,-.:;=?@[]^_{|}~" characters_values = dict(zip(characters, range(len(characters)))) size_info = characters_values[blurhash[0]] size_y = int(size_info / 9) + 1 size_x = (size_info % 9) + 1 if len(blurhash) != 4 + 2 * size_x * size_y: return False return True
7c2c85faccaefcbcff0429e6fe32e6609087fdd8
145,066
def get_sentiment_emoji(sentiment): """Returns an emoji representing the sentiment score.""" if sentiment == 0: return ":neutral_face:" elif sentiment > 0: return ":thumbsup:" else: # sentiment < 0: return ":thumbsdown:"
5e58199faf0fb846c40417e7b9ffa1a3701d82f6
695,122
def get_end(model): """ Get the end position of a (feature) location. For point locations the position value is returned. In case of uncertain end range, the maximum is returned. """ if model.get("location"): model = model["location"] if model["type"] == "range": return model["end"]["position"] elif model["type"] == "point": return model["position"]
29bf438c48dcf4a1f549bbe0b9e91b5574e7166e
55,181
import patoolib def has_codec (program, codec): """Test if program supports given codec.""" if program == '7z' and codec == 'rar': return patoolib.util.p7zip_supports_rar() if patoolib.program_supports_compression(program, codec): return True return patoolib.util.find_program(codec)
2ea07f0b215f4332803b09a1e5c7dbcc0bc6a165
248,759
def get_interface_setting(file): """Return the value content of a setting file as int""" with open(file) as f: value = f.read() return int(value)
e2bb30d0ce5ea00ad29abf5fcc4d27548fd292ea
164,335
def exponential_backoff(count): """ Wait 2**N seconds. """ return 2 ** count
1eda644103e6e5875c0ae483dafbbf3fe417b6a5
305,734
def profile_scorer(sitter_name): """ Returns a float value (limited to 2 decimal places) as defined for 1. Profile Score: " 5 times the fraction of the English alphabet comprised by the distinct letters in what we've recovered of the sitter's name. For example, the sitter name `Leilani R.` has 6 distinct letters." """ # make string all lowercase..to help count distinct sitter_name = sitter_name.lower() # variable to hold count of distinct characters distinct_count = 0 # check if sitter_name has all characters A-Z or a-z if sitter_name.isalpha(): distinct_count = len(set(sitter_name)) else: # if it happens that entire string is not comprised of alphabetical characters.. distinct_set = set() # iterate over each value in the string..and check if isalpha for char in sitter_name: if char.isalpha(): distinct_set.add(char) # save count of distinct char count in variabl distinct_count distinct_count = len(distinct_set) print('distinct count = ', distinct_count) # return the Profile Score to 2 decimal places return round(5 * (distinct_count / 26.0), 2)
b807a17d65ff143d1beeae7aa1d9497517165187
341,549
from requests import get def url_resolves(any_url: str) -> bool: """Checks if the given url resolves sucessfully Args: any_url (str): the full url of any media, article, content Returns: bool: returns False if it can't resolve the given link """ response = get(any_url) if response.status_code == 200: return True return False
66f2a38bc05ddd2faeb0d62261c518df016738b7
158,902
from pathlib import Path def is_relative_to(parentpath: Path, subpath: Path) -> bool: """ Check if subpath is relative to parentpath Parameters ---------- parentpath The (potential) parent path subpath The (potential) subpath Returns ------- rc A boolean indicating whether subpath is relative to parentpath """ if not (isinstance(parentpath, Path) and isinstance(subpath, Path)): raise TypeError("Arguments must both be pathlib objects") try: subpath.relative_to(parentpath) return True except ValueError: return False
93ff7f8d3c0f93bbdaa427a7494a0049f803ea95
446,550
def present_value(value, year, discount_rate, compounding_rate=1): """ Calculates the present value of a future value similar to numpy.pv, except numpy.pv gave weird negative values :param value: The future value to get the present value of :param year: How many years away is it? :param discount_rate: The discount rate to use for all years/periods in between :param compounding_rate: How often during the period should values be compounded. 1 means once a year, 365 means daily, etc. :return: present value of the provided value """ return value * (1 + float(discount_rate)/float(compounding_rate)) ** (-year*compounding_rate)
15732ecd69424c1e617ce867f1e72ff5cdb7e97f
182,421
def check_crop_size(image_height, image_width): """Checks if image size divisible by 32. Args: image_height: image_width: Returns: True if both height and width divisible by 32 and False otherwise. """ return image_height % 32 == 0 and image_width % 32 == 0
7dc5b0c640be5bf32aca27376c7b192ba7a43c14
497,295
import logging def get_logger(name, level=logging.CRITICAL+1): """ Create a suitable logger object for this module. The goal is not to change settings of the root logger, to avoid getting other modules' logs on the screen. If a logger exists with same name, reuse it. (Else it would have duplicate handlers and messages would be doubled.) The level is set to CRITICAL+1 by default, to avoid any logging. """ # First, test if there is already a logger with the same name, else it # will generate duplicate messages (due to duplicate handlers): if name in logging.Logger.manager.loggerDict: #NOTE: another less intrusive but more "hackish" solution would be to # use getLogger then test if its effective level is not default. logger = logging.getLogger(name) # make sure level is OK: logger.setLevel(level) return logger # get a new logger: logger = logging.getLogger(name) # only add a NullHandler for this logger, it is up to the application # to configure its own logging: logger.addHandler(logging.NullHandler()) logger.setLevel(level) return logger
1f2be45fec215e5ba09e787769b1c7d2418de398
284,988
import torch def get_dihedral(c1, c2, c3, c4): """ Returns the dihedral angle in radians. Will use atan2 formula from: https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics Inputs: * c1: (batch, 3) or (3,) * c2: (batch, 3) or (3,) * c3: (batch, 3) or (3,) * c4: (batch, 3) or (3,) """ u1 = c2 - c1 u2 = c3 - c2 u3 = c4 - c3 return torch.atan2( ( (torch.norm(u2, dim=-1, keepdim=True) * u1) * torch.cross(u2,u3, dim=-1) ).sum(dim=-1) , ( torch.cross(u1,u2, dim=-1) * torch.cross(u2, u3, dim=-1) ).sum(dim=-1) )
d514c55e27c59b3150d5d7168814565bb1194f65
217,994
def lines(s): """ Split a string in lines using the following conventions: - a line ending \r\n or \n is a separator and yields a new list element - empty lines or lines with only white spaces are not returned. - returned lines are stripped. Because of these constraints "".split() cannot be used directly. We first replace things where we want to split with line endings, then we splitlines. For example: >>> t='''This problem is. ... It is therefore ... ... ... However,we ... without introducing .. ... However, I have ... ... ... ''' >>> len([p[1] for p in lines(t)]) 5 >>> expected = ['This problem is.', 'It is therefore', 'However,we', 'without introducing ..', 'However, I have'] >>> assert expected == [p for p in lines(t)] """ return [l.strip() for l in s.splitlines() if l.strip()]
8a446927e83b68b267986461dc55410a2fad3de0
598,502
def _band_detection(observations, min_freq, max_freq): """ Obtain a subset of the provided pandas.DataFrame that contains observations in the range of minimum frequency (min_freq) to maximum frequency (max_freq) in GHz. This is used to define the frequency range of various ALMA bands. """ band_obs = observations[(observations['central_freq_GHz'] >= min_freq) & (observations['central_freq_GHz'] <= max_freq)] return band_obs
85574d12d89f5e599bc6d27904aaa0dd081bcf15
510,535
def sort(purpose, config_projects, requested_projects): """Sort projects in the config for the specified 'purpose' into valid and stale entries. The config_projects mapping will have all 'purpose' projects deleted from it in the end. The requested_projects mapping will have any project which was appropriately found in config_projects deleted. In the end: - config_projects will have no projects related to 'purpose' left. - requested_projects will have projects for which no match in config_projects was found. - The first returned item will be all projects which had a match in both config_projects and requested_projects for 'purpose' - The second item returned will be all projects which match 'purpose' that were not placed into the first returned item """ projects = {} stale = {} config_subset = { project: details for project, details in config_projects.items() if details.purpose == purpose } for name, details in config_subset.items(): del config_projects[name] config_version = details.version match = False if name in requested_projects: requested_version = requested_projects[name].version if config_version == requested_version: projects[name] = details del requested_projects[name] match = True if not match: stale[name] = details return projects, stale
f31a9440584a29eb556038650605345a191a68f4
155,850
import string def is_punctuation(term): """ Check whether a string consists of a single punctuation mark Example: >>> is_punctuation(".") True >>> is_punctuation("word") False """ return len(term) == 1 and term in string.punctuation
3689159c0773bb6db025d84373206524b6bed5e3
621,689
import re def get_pattern_from_attr_permissions_dict(attr_permissions): """ Construct a compiled regular expression from a permissions dict containing a list of what to include and exclude. Will be used in ObjWrapper if permissible_attr_pattern is a dict. Note that the function enforces certain patterns (like inclusions ending with $ unless they end with *, etc. What is not checked for is that the "." was meant, or if it was "\." that was meant. This shouldn't be a problem in most cases, and hey! It's to the user to know regular expressions! :param attr_permissions: A dict of the format {'include': INCLUSION_LIST, 'exclude': EXCLUSION_LIST}. Both 'include' and 'exclude' are optional, and their lists can be empty. :return: a re.compile object >>> attr_permissions = { ... 'include': ['i.want.this', 'he.wants.that'], ... 'exclude': ['i.want', 'he.wants', 'and.definitely.not.this'] ... } >>> r = get_pattern_from_attr_permissions_dict(attr_permissions) >>> test = ['i.want.this', 'i.want.this.too', 'he.wants.that', 'he.wants.that.other.thing', ... 'i.want.ice.cream', 'he.wants.me' ... ] >>> for t in test: ... print("{}: {}".format(t, bool(r.match(t)))) i.want.this: True i.want.this.too: False he.wants.that: True he.wants.that.other.thing: False i.want.ice.cream: False he.wants.me: False """ s = "" # process inclusions corrected_list = [] for include in attr_permissions.get('include', []): if not include.endswith('*'): if not include.endswith('$'): include += '$' else: # ends with "*" if include.endswith('\.*'): # assume that's not what the user meant, so change include = include[:-3] + '.*' elif include[-2] != '.': # assume that's not what the user meant, so change include = include[:-1] + '.*' corrected_list.append(include) s += '|'.join(corrected_list) # process exclusions corrected_list = [] for exclude in attr_permissions.get('exclude', []): if not exclude.endswith('$') and not exclude.endswith('*'): # add to exclude all subpaths if not explicitly ending with "$" exclude += '.*' else: # ends with "*" if exclude.endswith('\.*'): # assume that's not what the user meant, so change exclude = exclude[:-3] + '.*' elif exclude[-2] != '.': # assume that's not what the user meant, so change exclude = exclude[:-1] + '.*' corrected_list.append(exclude) if corrected_list: s += '(?!' + '|'.join(corrected_list) + ')' return re.compile(s)
e44aa81d3fca49be24a50b7b81b375af22530c8d
74,491
def sanitize(time_string): """ :param time_string: Input time string, which may have mins and seconds separated by either ':', '-' or '.' :return: Uniformly formatted time string with mins and secs separated by '.' """ if "-" in time_string: splitter = "-" elif ":" in time_string: splitter = ":" else: return time_string (mins, secs) = time_string.split(splitter) return mins + "." + secs
da84722f9125bb9c9de289da35e29b950f768df2
238,678
def _create_name_mapping(all_nodes): """ helper function to creates the name of the nodes within a GraphPipeline model - if no ambiguities, name of node will be name of model - otherwise, name of node will be '%s_%s' % (name_of_step,name_of_model) Parameters ---------- all_nodes : list of 2-uple nodes of graph : (step_name, model_name) Returns ------- dictionnary with key = node, and value : string corresponding to the node """ count_by_model_name = dict() mapping = {} done = set() for step_name, model_name in all_nodes: if (step_name, model_name) in done: raise ValueError("I have a duplicate node %s" % str((step_name, model_name))) done.add((step_name, model_name)) count_by_model_name[model_name[1]] = count_by_model_name.get(model_name[1], 0) + 1 for step_name, model_name in all_nodes: if count_by_model_name[model_name[1]] > 1: mapping[(step_name, model_name)] = "%s_%s" % (model_name[0], model_name[1]) else: mapping[(step_name, model_name)] = model_name[1] count_by_name = dict() for k, v in mapping.items(): count_by_name[k] = count_by_model_name.get(k, 0) + 1 for k, v in count_by_name.items(): if v > 1: raise ValueError("I have duplicate name for node %s" % str(k)) return mapping
09a51c45c740a78e324adc4949483c4f9fcd0ac2
381,359
import ipaddress def address_in_range(address, net_ranges): """ check if given ip address is in user network ranges :param address: string ip_address :param net_ranges: list of network ranges :return: boolean """ result = False for adr_range in net_ranges: try: result = result or ipaddress.ip_address(address) in ipaddress.ip_network(adr_range) except ValueError: return False return result
4c4ad53154feede960b4c44b43b46be2414af7ef
216,275
import socket def get_localIP_to_remoteIP(connection_type, external_ip, external_port=80): """ <Purpose> Resolve the local ip used when connecting outbound to an external ip. <Arguments> connection_type: The type of connection to attempt. See socket.socket(). external_ip: The external IP to attempt to connect to. external_port: The port on the remote host to attempt to connect to. <Exceptions> As with socket.socket(), socketobj.connect(), etc. <Returns> The locally assigned IP for the connection. """ # Open a socket sockobj = socket.socket(socket.AF_INET, connection_type) try: sockobj.connect((external_ip, external_port)) # Get the local connection information for this socket (myip, localport) = sockobj.getsockname() # Always close the socket finally: sockobj.close() return myip
5b8c3299dbf3534664e84661de1deb82bfcbf8ce
611,599
def int_comma(value): """ Converts an integer to a string containing commas every three digits """ val_str = str(value) split = val_str.split(".") val_int = split[0] try: val_point = "." + split[1] except IndexError: val_point = "" val_list = [] for count, i in enumerate(reversed(val_int)): val_list.append(i) if (count+1) % 3 == 0: if len(val_int) == (count + 1): break else: val_list.append(",") val_list.reverse() final = "".join(val_list) final += val_point return final
f0c20adebaf3c7acaa3ef4b37283c0d0fba1c756
350,476
def subverParseClient(s): """return the client name given a subversion string""" return s[1:].split(":")[0]
35de59e7a18603341154c26f5da3c19a80631976
684,348
from datetime import datetime def calc_open_doors(event_date: str): """Given event date, calculates the time doors open From: https://www.facebook.com/pg/openstudio.bar/about/?ref=page_internal - Open times for venue, not really a doors open policy, just open times Args: event_date (str): Show date in format %Y-%m-%d Returns: str: Doors open time or None if date is invalid """ # Create a date object try: temp_date = datetime.strptime(event_date, '%Y-%m-%d') except ValueError: return None # Open venue so doors open 19:00 on weekday and 13:30 on weekends according to bar facebook page if temp_date.weekday() >=5: return "7:00 pm" else: return "1:30 pm"
65b9c1f028dd668375bccf504a86cfe97ade2299
403,785
def identity_reducer(k, vs): """ This is the identity reducer, unrolls the values and recreates all the (k, v) pairs again. :param k: key :param vs: list of values :return: (k,v) as a pair """ return [(k, v) for v in vs]
11ed99e4f12c462534f3f004533bcb699d1d9b66
156,901
import platform def wrapCommand(cmd): """ If running on OS X 10.5 or older, wrap |cmd| so that it will be executed as an i386 binary, in case it's a 32-bit/64-bit universal binary. """ if platform.system() == "Darwin" and \ hasattr(platform, 'mac_ver') and \ platform.mac_ver()[0][:4] < '10.6': return ["arch", "-arch", "i386"] + cmd # otherwise just execute the command normally return cmd
dcb9cb7e3a53c42cde15ef9828bd201f5706c35c
433,276
def get_car_unchanging_properties(car): """ Gets car properties that are expected to not change at all for a given car VIN/ID during a reasonable timescale (1 week to 1 month) :param car: car info in original system JSON-dict format :return: dict with keys mapped to common electric2go format """ return { 'vin': car['Id'], 'license_plate': car['Name'], 'model': 'Toyota Prius C' }
0b339e82878d3b3adde1155fb83f8c2c300c97ae
52,604
def calc_vmin_vmax(list_xr, quant=0.01): """ Calculates vmin/vmax for list of xr DataArrays. Convenient for plotting multiple subplots with same limits. :param list_xr: list of xarray DataArrays. :param quant: quantile to calculate vmin/vmax from :return: vmin, vmax Example ----- >>> from negi_stuff.modules.plot.plot import calc_vmin_vmax >>> list_xr = [] >>> for i in range(5): >>> for data = np.random.rand(4, 5) >>> locs = np.arange(5) >>> times = pd.date_range('2000-01-01', periods=4) >>> foo = xr.DataArray(data, coords=[times, locs], dims=['time', 'space']) >>> list_xr.append(foo) >>> vmin, vmax = calc_vmin_vmax(list_xr) >>> fig, axs = plt.subplots(5) >>> for ax, da in zip(axs, list_xr): >>> ax.plot(ax=ax, vmin=vmin, vmax=vmax) """ max_v = -9e99 min_v = 9e99 for xa in list_xr: max_v = max(max_v, xa.quantile(1 - quant)) min_v = min(min_v, xa.quantile(quant)) return min_v, max_v
9257141bfd61e7427f28d55e50ec729cd7f720ab
159,854
def db2amplitude(db: list): """ Convert decibel to amplitude :param db: the decibel value :return: the amplitude value """ amp = [pow(10, (d / 20)) for d in db] return amp
0b46e69a992e2fca68e2d74f5d9f8afc1fe6f8c3
289,902
def AntelopePf2dict(pf): """ Converts a AntelopePf object to a Python dict. This converts a AntelopePf object to a Python dict by recursively decoding the tbls. :param pf: AntelopePf object to convert. :type md: :class:`~mspasspy.ccore.AntelopePf` :return: Python dict equivalent to md. :rtype: dict """ keys = pf.keys() tbl_keys = pf.tbl_keys() arr_keys = pf.arr_keys() data = {} for key in keys: val = pf.get(key) data[key] = val for key in tbl_keys: val = pf.get_tbl(key) data[key] = val for key in arr_keys: pf_branch = pf.get_branch(key) branch_dict = AntelopePf2dict(pf_branch) data[key] = branch_dict return data
cc7b78be2ae352a43f1f1a52b755e2986effaac0
598,054
def _seq_id_filter(id: str) -> str: """ Replaces underscores and semicolons with dashes in the sequence IDs. This is needed to have nice output filename templates with underscore as a delimiter for parameters """ result = id.replace("_", "-") return result.replace(";", "-")
e6355b1f94e76d255a1052072706619299ea4b51
38,028
def has_organization_field(obj): """ To check whether this object has a organization field :param obj: object to check :return: True if organization field exists, otherwise false """ return hasattr(obj, 'organization')
6a9e3e523a84953b0974abd286f09be4ea151ea6
365,902
def last_digit_of(a,b): """ Calculates the last digit of a^b Args: a (int): Base of a^b b (int): Exponent of a^b Returns: (int): The last digit of a^b """ last_digit_a = int(str(a)[-1]) if b % 4 == 0: exp = 4 else: exp = b % 4 return (last_digit_a ** exp) % 10
9896eab66290650b9f76bc888c69020560c07039
605,246
def extract_channel(signal, channel_number): """Extract single channel from a multi-channel (stereo) audio signal""" try: return signal[:, channel_number] except IndexError: return signal
387a1669699e5a46f53710e805707fa3ff374ba6
292,664
def count_mpmcts(filename): """ Given a .pla file with a list of MPMCT gates, tally up the number of gates with each different number of controls Parameters: filename (str): Name of a .pla-formatted file Outputs: num_total_controls (int): the maximum number of control bits mpmct_tally (dict[int, int])): A dictionary with keys as the number of control bits and values as the number of occurences of that many controls in the .pla file. """ mpmct_tally = {} num_total_controls = -1 # Use the output PLA file to determine how many MPMCTs with each # different number of possible controls with open(filename, "r") as exorcised_file: gate_lines = exorcised_file.readlines()[11:-1] for line in gate_lines: control_string = line.strip().split(" ")[0] num_controls = len(control_string) - control_string.count("-") if num_total_controls == -1: num_total_controls = len(control_string) if num_controls not in mpmct_tally: mpmct_tally[num_controls] = 1 else: mpmct_tally[num_controls] += 1 return num_total_controls, mpmct_tally
253f6388729416b33e8e95fac4f85c5976cef371
615,634
import random def get_random_integer(min_result=0, max_result=65535): """ Returns a random integer. :param min_result: The minimum number that can be generated. :type min_result: :class:`int` :param max_result: The maximum number that can be generated. :type max_result: :class:`int` :returns: :class:`int` -- The generated random number. """ return random.randint(min_result, max_result)
9a18edf777b8a08cbb952a0ca8bfe4ca54011d83
533,565
def update_vocab_info(vocab, word_counts): """ return dict of { word : {'count', 'index'} } """ return { word: {'count': word_counts[word], 'index':i} for i,word in enumerate(vocab)}
a7271175af5aad6d766c6db1c16e2574ee6a901b
98,957
def cart2(list1: list, list2: list) -> list: """Cartesian product of two lists. :param list list1: input list 1 :param list list2: input list 2 :return: a new list contains all Cartesian products of the two lists. :rtype: list >>> cart2(['a','b'], [1,2]) [['a',1],['a',2],['b',1], ['b',2]] """ def aux(list1: list, list2: list, accum: list) -> list: if len(list1) == 0 or len(list2) == 0: # base case return accum elif len(list1) == 1: # start to traverse list2 return aux(list1, list2[1:], accum + [[list1[0], list2[0]]]) else: return aux(list1[1:], list2, aux([list1[0]], list2, accum)) return aux(list1, list2, [])
3deb6106b36dc81ed2b8b4251290c687b591c157
689,219
import itertools def natoms(self): """ Sequence of number of sites of each type associated with the Poscar. Similar to 7th line in vasp 5+ POSCAR or the 6th line in vasp 4 POSCAR. """ return [len(tuple(a[1])) for a in itertools.groupby(self.syms)]
e4348815be3768a280766addbccfaf5d659ccfca
634,711
def truncation_selection(population, n_best): """ Sort the individuals according to their fitness and pick the first `n_best` for reproduction returns a tuple, with individual and its fitness """ sorted_population = sorted(population, key=lambda x: x.fitness) return sorted_population[0:n_best]
ca331748cf4903e9883259d4e86d06349c3a8d38
215,180
def ParseAction(messages, action): """Converts an action string to the corresponding enum value. Options are: 'allow' or 'deny', otherwise None will be returned. Args: messages: apitools.base.protorpclite.messages, the proto messages class for this API version for firewall. action: str, the action as a string Returns: ActionValueValuesEnum type """ if not action: return None return messages.FirewallRule.ActionValueValuesEnum(action.upper())
e13285d2a7873ecbdd7fcf7756e31d9653b7a5d1
639,617