content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_label_ids(service): """Returns labelIds given the service you're using. Used to add labels to messages""" all_labels = service.users().labels().list(userId="me").execute() return {label["name"]: label["id"] for label in all_labels["labels"]}
ef83b0fabe8ec38383f7b7e5aeda816516dc6d88
466,214
from typing import List def canReach(arr: List[int], start: int) -> bool: """Return true iff you can reach any index in 'arr' with value 0, starting from the specified 'start' index.""" visited = set() leftBound = 0 rightBound = len(arr) - 1 def canReachViaDFS(curr: int) -> bool: """Apply DFS to check whether the zero value in 'arr' can be reached given the 'curr' index.""" if arr[curr] == 0: # Found the target. return True visited.add(curr) left = curr - arr[curr] if left >= leftBound and left not in visited and canReachViaDFS(left): return True right = curr + arr[curr] if right <= rightBound and right not in visited and canReachViaDFS(right): return True visited.remove(curr) return False return canReachViaDFS(start)
ee84aed2486fcd03545bea8302c0a331a04e5ce4
574,851
def is_solution(cnf, assignment): """Test whether the given assignment is a solution to the CNF. assignment is a dictionary from variable names to truth values. It is not required that all variables in the CNF are assigned. """ for clause in cnf: satisfied = False for lit in clause: name, val = lit if name in assignment and val == assignment[name]: satisfied = True break if not satisfied: return False return True
74c83fc8c0089261d3afa344ea9ee7e62e850621
631,156
def has_permissions(ctx, **perms): """Copy of code from discord.py to work outside of wrappers""" ch = ctx.channel permissions = ch.permissions_for(ctx.author) missing = [perm for perm, value in perms.items() if getattr(permissions, perm, None) != value] if not missing: return True return False #raise commands.MissingPermissions(missing)
e1c4e260794ca8534758eb1a9b9a4b003c3dd98c
260,687
from typing import Dict from typing import Any def __get_cumulative_cases_count(data_json: Dict[str, Any]) -> int: """ Returns the cumulative number of cases stored in the given data point. """ try: return data_json["cumCasesByPublishDate"] except KeyError: return 0
4e6ceef73880fe3430c92bc5a43deef545b01985
639,479
def to_sql_name(name): """ Ensure ``name`` is a valid SQL name. """ return name.lower().replace(' ', '_')
f3157d9444793d0af05e27317fdab2aa55531b84
704,007
def rate(e0, e1, n0, n1, nr, p): """ This equation is solved for p to determine the convergence rate when the reference solution is used to measure the error. y(p) = 0 determines the convergence rate. e0, n0 : Error and grid number for grid "0" e1, n1 : Error and grid number for grid "1" nr : Reference grid number p : Convergence rate to solve for """ h0 = 0.5 ** (n0 * p) h1 = 0.5 ** (n1 * p) hr = 0.5 ** (nr * p) y = e0 * (h1 - hr) - e1 * (h0 - hr) return y
1661abd1f867f65fe536bebd3ea319f002d2956a
674,874
def feet_to_meters(feet: float) -> float: """ Convert feet to meters. :param feet: a measurement in feet. :returns: measurement in meters """ return feet / 0.3048
0ea73d0d66100405b5355844feedde5525000e60
230,778
def triangular_number(x: int) -> int: """ Get the xth triangular number """ return (x * (x + 1)) // 2
db640d26cfa4ba524c18bc443fc657758260f585
631,341
import collections def group_models_by_index(backend, models): """ This takes a search backend and a list of models. By calling the get_index_for_model method on the search backend, it groups the models into the indices that they will be indexed into. It returns an ordered mapping of indices to lists of models within each index. For example, Elasticsearch 2 requires all page models to be together, but separate from other content types (eg, images and documents) to prevent field mapping collisions (eg, images and documents): >>> group_models_by_index(elasticsearch2_backend, [ ... wagtailcore.Page, ... myapp.HomePage, ... myapp.StandardPage, ... wagtailimages.Image ... ]) { <Index wagtailcore_page>: [wagtailcore.Page, myapp.HomePage, myapp.StandardPage], <Index wagtailimages_image>: [wagtailimages.Image], } """ indices = {} models_by_index = collections.OrderedDict() for model in models: index = backend.get_index_for_model(model) if index: indices.setdefault(index.name, index) models_by_index.setdefault(index.name, []) models_by_index[index.name].append(model) return collections.OrderedDict([ (indices[index_name], index_models) for index_name, index_models in models_by_index.items() ])
54add7cadd33d76b492a41f775137f304dbc56a6
357,781
import requests def send_get_request(args): """ Send an HTTP GET request to Building Energy Gateway. This is the datareadingrequests equivalent of building_data_requests' post_request(). Note that it does not retry requests without SSL. :param args: Arguments for the request. :return: Response object. """ return requests.get("https://energize.andoverma.us", params=args)
5a286d3a34b60f28a4d1a732c360077b829d1c7d
355,923
def binary_search(search_list, search_item): """ Given an ordered list find the search_item. Complexity - Best: O(1) - Average: O(log n) - Worst: O(log n) """ li_len = len(search_list) mid_idx = li_len / 2 mid_val = search_list[mid_idx] if li_len == 1 and mid_val != search_item: return None if mid_val == search_item: return mid_val elif search_item < mid_val: return binary_search(search_list[:mid_idx], search_item) elif search_item > mid_val: return binary_search(search_list[(mid_idx+1):], search_item)
11777decb457db42a3129eca84d90777cf3ba640
348,450
def insert_sort(nums): """ 插入排序: 构建有序数组,对未排序数据,从后向前扫描找到对应位置插入 时间复杂度: O(n^2) @param nums: 无序数组 @return: 升序数组 >>> import random >>> nums = random.sample(range(-50, 50), 50) >>> insert_sort(nums) == sorted(nums) True >>> insert_sort([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2]) True >>> insert_sort([]) [] """ length = len(nums) for i in range(1, length): for j in range(i, 0, -1): if nums[j] < nums[j - 1]: nums[j], nums[j - 1] = nums[j - 1], nums[j] return nums
f3c59f677a1682506db227f1e302796a7a962563
182,558
def extract_ref_uri(ref: str, uri: str) -> str: """Extract uri from ref.""" if ref.startswith("#/components/schemas/"): return f"{uri}#{ref[21:]}" else: return f"{uri}#{ref}"
baab77bd50c1522161a1d2dfeccf4068ada17b32
203,899
def j2_quote(s): """Jinja2 custom filter that quotes a string """ return '"{}"'.format(s)
2ea55bdc9da276cb4fd5edf56d53a57ff2133899
550,054
def getsepfield(s:str, n:int, sep:str=","): """Get nth word in s, separated by sep""" list=s.split(sep) if n<0 or n>=len(list): return "" return list[n]
4ce9f941c07db19a34c99c1c454e773b736f637f
409,021
def ascii_identify(origin, *args, **kwargs): """Check whether given filename is ASCII.""" return (isinstance(args[0], str) and args[0].lower().split('.')[-1] in ['txt', 'dat'])
9dd3b0f81f25903e17f874bc394baf7bd7c8f508
452,683
def _as_stac_instruments(value: str): """ >>> _as_stac_instruments('TM') ['tm'] >>> _as_stac_instruments('OLI') ['oli'] >>> _as_stac_instruments('ETM+') ['etm'] >>> _as_stac_instruments('OLI_TIRS') ['oli', 'tirs'] """ return [i.strip("+-").lower() for i in value.split("_")]
5666da0b9eef7741cba7f739854a16b7c9339a0a
502,435
def annotated(**kwargs): """Decorator for annotating function objects with **kwargs attributes. Args: **kwargs (dict): Map of attribute values to names. Example: Decorate `f` with `priority=10`:: @annotated(priority=10) def f(): pass assert f.priority == 10 """ def _decorator(f): for key, val in kwargs.items(): setattr(f, key, val) return f return _decorator
11f694121bf6678a3e503321123f34d9a461a2ec
392,127
def EnumTextToValue(fdesc, enum_text): """Looks up the value of enum_text, given the field descriptor it came from. Args: fdesc: the descriptor of the field the enum_text belongs to. enum_text: the text of an enum. Returns: integer representing the enum_text """ # descriptor.py return fdesc.enum_type.values_by_name[enum_text].number
50b8da1c2d447d27522e2284274403e5bca88fb1
527,806
def unpad(x, x_mask): """ Unpad a batch of sequences by selecting elements not masked by x_mask. Returns a list of sequences and their corresponding lengths """ x_unp = [] x_unp_mask = [] x_unp_len = [] for seq, seq_mask in zip(x, x_mask): seq_unp = seq[(1 - seq_mask)] x_unp.append(seq_unp) x_unp_mask.append(seq_mask[1 - seq_mask]) x_unp_len.append(seq_unp.shape[0]) return x_unp, x_unp_mask, x_unp_len
4a447505e11505fe5fe6139729c488957a30d9df
69,574
def descope_queue_name(scoped_name): """Returns the unscoped queue name, given a fully-scoped name.""" # NOTE(kgriffs): scoped_name can be either '/', '/global-queue-name', # or 'project-id/queue-name'. return scoped_name.partition('/')[2] or None
369c58e827121ae369dae2e8e898e0c9898f5224
269,532
def new_admin(user, password_hasher): """ Create new admin user for testing. """ User = user # Admin user mock data admin_user_data = { "username": "admin", "email": "[email protected]", "password": password_hasher.hash("admin"), } new_admin = User(**admin_user_data) new_admin.save() return new_admin
bc095d270ccb1f124a408a65b6a3684f407bbda0
649,241
import uuid def get_random_filename(ext, prefix=""): """ Returns a randomly generated string with the specified extension `ext` appended at the end. `ext` should have a dot. A str `prefix` can also be prefixed to the filename. """ # hex returns a string with no dashes return prefix + str(uuid.uuid4().hex) + ext
e148ba7f0f87cffdcbdb6bff30b4a12d399844f5
489,390
def process_secondary_inputs(dict_): """ This functions processes the secondary input parameters. Parameters ---------- dict_: dict Estimation dictionary. Returned by grmpy.read(init_file). Returns ------- trim: bool, default True Trim the data outside the common support, recommended. rbandwidth: float, default 0.05 Bandwidth for the Double Residual Regression. reestimate_p: bool, default False Re-estimate P(Z) after trimming, not recommended. show_output: bool, default False Show intermediate outputs of the estimation process. """ try: dict_["ESTIMATION"]["trim_support"] # Set default to True except KeyError: trim = True else: trim = dict_["ESTIMATION"]["trim_support"] try: dict_["ESTIMATION"]["reestimate_p"] # Set default to False except KeyError: reestimate_p = False else: reestimate_p = dict_["ESTIMATION"]["reestimate_p"] try: dict_["ESTIMATION"]["rbandwidth"] # Set default to 0.05 except KeyError: rbandwidth = 0.05 else: rbandwidth = dict_["ESTIMATION"]["rbandwidth"] try: dict_["ESTIMATION"]["show_output"] # Set default to True except KeyError: show_output = False else: show_output = dict_["ESTIMATION"]["show_output"] return trim, rbandwidth, reestimate_p, show_output
9773d92a294be75d2ee6f6172ff07db020e8bc7b
560,559
def is_gem_file(location): """ Return True if a file is a .gem archive or a .gemspec file. """ return location.endswith(('.gem', '.gemspec'))
a9b20df59e437e94b9370dfa93657141bdb61372
505,310
def _get_reason(cluster_or_step): """Get state change reason message.""" # StateChangeReason is {} before the first state change return cluster_or_step["Status"]["StateChangeReason"].get("Message", "")
bb18c94b2647bb0cc0234c218281d900b48119fe
596,167
import json def intrinsics(kinect, path='calibrate/IR/intrinsics_retrieved_from_kinect_mapper.json', write=False): """ Retrieve the depth camera intrinsics from the kinect's mapper and write them at: calibrate/IR/intrinsics_retrieved_from_kinect_mapper.json :param kinect: kinect instance :param path: path to save the intrinsics as a json file :param write: save or not save the intrinsics :return: returns the intrinsics matrix """ intrinsics_matrix = kinect._mapper.GetDepthCameraIntrinsics() if write: with open(path, 'w', encoding='utf-8') as json_file: configs = {"FocalLengthX": intrinsics_matrix.FocalLengthX, "FocalLengthY": intrinsics_matrix.FocalLengthY, "PrincipalPointX": intrinsics_matrix.PrincipalPointX, "PrincipalPointY": intrinsics_matrix.PrincipalPointY, "RadialDistortionFourthOrder": intrinsics_matrix.RadialDistortionFourthOrder, "RadialDistortionSecondOrder": intrinsics_matrix.RadialDistortionSecondOrder, "RadialDistortionSixthOrder": intrinsics_matrix.RadialDistortionSixthOrder} json.dump(configs, json_file, separators=(',', ':'), sort_keys=True, indent=4) return intrinsics_matrix
811b4128090a7015d297a0bd85432ad321369e99
630,100
def lt(x, y): """Less than""" return x < y
9d6b86afabea8adf527f86a06e96e1946290e602
405,348
import requests def near(x, y, url, resource='/grid/near'): """Determines chips and tiles that lie a point Args: x (int): projection coordinate x y (int): projection coordinate y url (str): protocol://host:port/path resource (str): /grid/near/resource (default: /grid/near) Returns: dict Example: >>> chipmunk.near(x=0, y=0, url='http://host:port/path') {'chip': [{'grid-pt': [854.0, 1105.0], 'proj-pt': [-3585.0, -195.0]}, {'grid-pt': [854.0, 1104.0], 'proj-pt': [-3585.0, 2805.0]}, {'grid-pt': [854.0, 1103.0], 'proj-pt': [-3585.0, 5805.0]}, {'grid-pt': [855.0, 1105.0], 'proj-pt': [-585.0, -195.0]}, {'grid-pt': [855.0, 1104.0], 'proj-pt': [-585.0, 2805.0]}, {'grid-pt': [855.0, 1103.0], 'proj-pt': [-585.0, 5805.0]}, {'grid-pt': [856.0, 1105.0], 'proj-pt': [2415.0, -195.0]}, {'grid-pt': [856.0, 1104.0], 'proj-pt': [2415.0, 2805.0]}, {'grid-pt': [856.0, 1103.0], 'proj-pt': [2415.0, 5805.0]}], 'tile': [{'grid-pt': [16.0, 23.0], 'proj-pt': [-165585.0, -135195.0]}, {'grid-pt': [16.0, 22.0], 'proj-pt': [-165585.0, 14805.0]}, {'grid-pt': [16.0, 21.0], 'proj-pt': [-165585.0, 164805.0]}, {'grid-pt': [17.0, 23.0], 'proj-pt': [-15585.0, -135195.0]}, {'grid-pt': [17.0, 22.0], 'proj-pt': [-15585.0, 14805.0]}, {'grid-pt': [17.0, 21.0], 'proj-pt': [-15585.0, 164805.0]}, {'grid-pt': [18.0, 23.0], 'proj-pt': [134415.0, -135195.0]}, {'grid-pt': [18.0, 22.0], 'proj-pt': [134415.0, 14805.0]}, {'grid-pt': [18.0, 21.0], 'proj-pt': [134415.0, 164805.0]}]} """ url = '{}{}'.format(url, resource) return requests.get(url=url, params={'x': x, 'y': y}).json()
5d52f48b7b1b4c26d62916fa2887cc0dd18c96af
224,520
def calc_check_digit(number): """Calculate the check digit.""" check = sum(((i % 9) + 1) * int(n) for i, n in enumerate(number[:-1])) % 11 if check == 10: check = sum((((i + 2) % 9) + 1) * int(n) for i, n in enumerate(number[:-1])) % 11 return str(check % 10)
948a5bbdda6c3cc4f95042f87d65d403d739537e
149,541
def _find_id_row(table_as_list: list): """ Finds Id row in the first section of the 'casadm -P' output. """ for line in table_as_list: if "Id" in line: return line raise Exception("Cannot find Id row in the 'casadm -P' output")
b268fb12a068b2f9bac0fc779715276f38dcff5e
138,311
def upload_global_tag(task): """ Get the global tag that is supposed to be used for uploads for the given task. Parameters: task (str): An identifier of the task. Supported values are 'master', 'validation', 'online', 'prompt', data', 'mc', 'analysis' Returns: The name of the GT for uploads or None if a new GT should be created by the client for each upload request. """ if task == 'master': return None elif task == 'validation': return None elif task == 'online': return None elif task == 'prompt': return None elif task == 'data': return None elif task == 'mc': return None elif task == 'analysis': return None
d84728a9fa1eb39083b305595d2ba11e40b4fb87
315,819
def cb_format_default_value(val): """Value formatting callback for default cell""" return "" if val is None else str(val)
71279ec635f010efe3db9731927e6df71a91eba0
563,991
def get_mask(source, source_lengths): """ Arguments --------- source : [T, B, C] source_lengths : [B] Returns ------- mask : [T, B, 1] Example: --------- >>> source = torch.randn(4, 3, 2) >>> source_lengths = torch.Tensor([2, 1, 4]).int() >>> mask = get_mask(source, source_lengths) >>> print(mask) tensor([[[1.], [1.], [1.]], <BLANKLINE> [[1.], [0.], [1.]], <BLANKLINE> [[0.], [0.], [1.]], <BLANKLINE> [[0.], [0.], [1.]]]) """ T, B, _ = source.size() mask = source.new_ones((T, B, 1)) for i in range(B): mask[source_lengths[i] :, i, :] = 0 return mask
3042598f7fe1ecc0a9b4f69ab3e8fca6823e7d32
585,870
def hasUserMatch(uList, tweetJSON): """Returns true if a tweet from any user in QUERY_FROM_USERS is found""" return any(keyword in tweetJSON['user']['screen_name'] for keyword in uList)
b327520045927331a774f6fe84a5ee882184489b
407,251
import requests def get_html(url): """ Returns the HTML of the url page """ r = requests.get(url, headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36' }) html = r.text return html
555a16dca9c3e5fbae3c356f6c9b11332e673755
225,333
def identity(x): """ The identity function. Returns its argument. not to be confused with the id() builtin >>> identity('foo') 'foo' """ return x
914f6432f202712f159e16ed40f0b4c9c458acca
69,431
def get_copiable_service_data(service, *, questions_to_exclude=None, questions_to_copy=None): """ Filter out any service data that shouldn't be copied to a new draft service, by either including copyable questions or excluding non-copyable questions. There is validation at view-level to prevent both `to_copy` and `to_exclude` lists being supplied, so this function should only receive one or the other. However we want to deprecate use of `to_copy`, so if both are somehow supplied, we use the `to_exclude` list. If neither list is provided, then all data fields on the service are included. :param service: service object with a JSON dict `data` attribute (required) :param questions_to_exclude: iterable of question IDs that must not be copied :param questions_to_copy: iterable of question IDs that may be copied (to be deprecated) :return: JSON dict of service data """ if questions_to_exclude: return {key: value for key, value in service.data.items() if key not in questions_to_exclude} if questions_to_copy: return {key: value for key, value in service.data.items() if key in questions_to_copy} return service.data
15cae7f6afed920690c4ce37492a594bb8be18d3
430,536
def clever_split(line: str) -> list: """Get separated (tabs or whitespaces) elements from line. Parameters ---------- line : str Returns ------- elems : list of str Separated elements. """ # split elements by tabs cols = line.split('\t') # if no tabs, split by whitespace if len(cols) <= 1: cols = line.split(' ') # strip elements of trailing whitespaces elems = [c.strip() for c in cols if len(c.strip()) > 0] return elems
2cc0c11764ed0c7f216818ad86356bd2d327f138
66,692
def oneliner(text): """ remove \n in text. """ return text.replace(u'\n', ' ')
ff800a3813c5efab901b4fbfb43bc872e3857fa0
482,599
import json def load_dexpreopt_configs(configs): """Load dexpreopt.config files and map module names to library names.""" module_to_libname = {} if configs is None: configs = [] for config in configs: with open(config, 'r') as f: contents = json.load(f) module_to_libname[contents['Name']] = contents['ProvidesUsesLibrary'] return module_to_libname
b3a8763ee182fa7e9da968404369933e494663b5
26,746
import uuid def generate_id() -> str: """ Create a unique ID. This is passed in each message, and used to match requests to responses. """ return uuid.uuid4().hex
eae268d641f0ee6e310ea85d75316bfa327b31b9
625,865
def errfun_sae(x,y): """ Sum of absolute errors """ return sum(abs(x-y))
15dd69b81a2781e5f815e0db5fdfdf808b9dfb1e
641,497
def loc2rank(loc, procs): """ Return the MPI rank using - loc, the tuple of subdomain location - procs, the tuple of number of subdomains in each direction loc2rank is the reverse operation of rank2loc """ rank = (loc[0]*procs[1] + loc[1])*procs[2] + loc[2] return rank
3c3b8fe25805abc4000cc0c7cb091b8bfdbdf043
376,447
import torch def compute_logits(cluster_centers, data): """Computes the logits of being in one cluster, squared Euclidean. Args: cluster_centers: [K, D] Cluster center representation. data: [B, N, D] Data representation. Returns: log_prob: [B, N, K] logits. """ k = cluster_centers.shape[0] b = data.shape[0] cluster_centers = cluster_centers.unsqueeze(dim=0) # [1, K, D] data = data.contiguous().view(-1, data.shape[-1]).unsqueeze(dim=1) # [N, 1, D] # neg_dist = -torch.sum(torch.pow(data - cluster_centers, 2), dim=-1) # [N, K] neg_dist = -torch.mean(torch.pow(data - cluster_centers, 2), dim=-1) # [N, K] neg_dist = neg_dist.view(b, -1, k) return neg_dist
ec0f7c69654b4e233a0d98582d06f9cf169f2378
76,342
import functools def get_nested_attr(obj, attr, *args): """Gets the value of a nested attribute from an object. Args: obj: an object to retrieve an attribute value from. attr: a string of the attribute separated by dots. Returns: The object attribute value or the given *args if the attr isn't present. """ def _getattr(obj, attr): return getattr(obj, attr, *args) return functools.reduce(_getattr, [obj] + attr.split('.'))
b708898e5b8b12df34768e0e9c7281aae1bbc2db
520,377
def conv_str_series_data(str_series: str) -> list: """ Convert a string of comma separated values into a list of floats Args: str_series (str): the string that you want to convert to a list Returns: A list of floats """ str_list = str_series.strip("[]").split(",") float_list = [float(x) for x in str_list] return float_list
15191bf4fe98aab10f2f37f63b122df01037eb40
472,484
def empty_str(value: str) -> bool: """Return True if string is empty (whitespace don't count) or None. """ return True if value is None else value.strip() == ''
23420f8dacb8447f9338cf34a66bb7c21284d6bb
586,828
def parse_authentication(authentication): """Split the given method:password field into its components. authentication (str): an authentication string as stored in the DB, for example "plaintext:password". return (str, str): the method and the payload raise (ValueError): when the authentication string is not valid. """ method, sep, payload = authentication.partition(":") if sep != ":": raise ValueError("Authentication string not parsable.") return method, payload
f0bbcf98a3b38cadd40e36cd6045164a85a6e6f9
416,801
from typing import Pattern def mnemonic(column: str, size: int, drop: Pattern) -> str: """ For a given column find a reasonable mnemonic Parameters ---------- column: str The string to find a mnemonic for size: int The maximum length of the mnemonic drop: Pattern A pattern to remove (e.g. vowels) from the string """ extra = len(column) - size if extra <= 0: return column.upper() return drop.sub('', column.upper(), count=extra)[0:size]
84c056ae77f4d61d9398be9216b70a824ac601b8
213,288
def subdivide_stats(data): """ If a key contains a ., create a sub-dict with the first part as parent key """ ret = {} for key, value in data.items(): if '.' in key: parent, subkey = key.split('.', 2) if parent not in ret: ret[parent] = {} ret[parent][subkey] = value else: ret[key] = value return ret
65e47db5c75118c1939a8ecdd3ad581a053da893
12,625
def flatten_entrypoints(ep): """Flatten nested entrypoints dicts. Entry points group names can include dots. But dots in TOML make nested dictionaries: [entrypoints.a.b] # {'entrypoints': {'a': {'b': {}}}} The proper way to avoid this is: [entrypoints."a.b"] # {'entrypoints': {'a.b': {}}} But since there isn't a need for arbitrarily nested mappings in entrypoints, flit allows you to use the former. This flattens the nested dictionaries from loading pyproject.toml. """ def _flatten(d, prefix): d1 = {} for k, v in d.items(): if isinstance(v, dict): for flattened in _flatten(v, prefix+'.'+k): yield flattened else: d1[k] = v if d1: yield prefix, d1 res = {} for k, v in ep.items(): res.update(_flatten(v, k)) return res
38bc521181d9473cd4b40424331eac162dc47574
518,228
from typing import MutableMapping def reduce_schema(d): """ Takes a jsonschema loaded as a dictionary and return a reduced structure ignoring everything apart from the structure it describes. """ r = {} CONTAINS_KEYS = "properties" if CONTAINS_KEYS in d: for k, v in d[CONTAINS_KEYS].items(): if isinstance(v, MutableMapping) and v.get(CONTAINS_KEYS): r[k] = reduce_schema(v) else: r[k] = None return r
2f8448306283bfa26a903689005b27f906e96c03
465,143
def circle(x, y, radius, canvas, color): """Create a circle from the middle coordinates :param x: coordinated x :param y: coordinated y :param radius: circle radius :param color: circle color :param canvas: environment :return: a circle canvas object """ return canvas.create_oval(x - radius, y - radius, x + radius, y + radius, fill=color, outline='')
b3e3789bb1f7cad2c7c82e2b80f5d406ddda9dd8
162,835
def get_expected_output_bcf_files_dict(base_out): """ :param base_out: Base path structure for log files. For example, if the expected path for the log is 'work/step.path/log/step.bcf', the argument should be 'work/step.path/log/step'. :type base_out: str :return: Returns dictionary with expected path for bcf related files based on the provided input. """ # Define expected expected = { "bcf": base_out + ".bcf", "bcf_md5": base_out + ".bcf.md5", "csi": base_out + ".bcf.csi", "csi_md5": base_out + ".bcf.csi.md5", } # Return return expected
934120b4b8a86f6c0797d1befefdf9fc7a76d2e1
567,471
def _testLengthBoundaryValidity(dataLength, tableDirectory): """ >>> test = [ ... dict(tag="test", offset=44, length=1) ... ] >>> bool(_testLengthBoundaryValidity(45, test)) False >>> test = [ ... dict(tag="test", offset=44, length=2) ... ] >>> bool(_testLengthBoundaryValidity(45, test)) True """ errors = [] entries = [(entry["offset"], entry) for entry in tableDirectory] for o, entry in sorted(entries): offset = entry["offset"] length = entry["length"] tag = entry["tag"] end = offset + length if end > dataLength: errors.append("The length of the %s table is not valid." % tag) return errors
e5876302e2fdeb2cb1b708f49777b73183d2a938
663,052
def prune_graph_nodes(graph, tag): """ Filers graph nodes to only nodes that are tagged with the specified tag :param graph: Original graph to prune :param tag: Tag for nodes of interest :return: List of nodes tagged with the specified tag """ node_list = [] for node_name in graph.nodes(): try: if graph.node[node_name]['tag'] == tag: # if graph.node[node_name]['attr_dict'][TAG_ATTR] == tag: node_list.append(node_name) except KeyError: pass return node_list
9a8b1475b75e52f9e84173431dd2c0940a092365
277,945
def to_dict_of_list(in_list): """ Convert a list of dicts to a dict of lists. Args: in_list (list): The list of dicts to be converted. Returns: dict: The converted dict of lists. """ for i in range(len(in_list) - 1): if in_list[i].keys() != in_list[i + 1].keys(): raise ValueError('dict keys are not consistent') out_dict = dict() for key in in_list[0]: out_dict[key] = [item[key] for item in in_list] return out_dict
395e458ece84d237887944e5791b479c0a2b5784
554,107
def to_24hour_format(now_hour, hour): """ 24時間表記を返す :param now_hour:現在時刻 (時) :param hour:時刻 (12時間表記 / 時) :return: 時刻 (24時間表記 / 時) """ # 時刻の下駄 hour_padding = 12 if 11 < now_hour else 0 return hour_padding + (hour + 1) // 5
22cb3ddfdda80f4379efcaf07f648763748a42ad
388,550
def get_package_name(pin, platform=None): """Returns the CIPD package name for a pin.""" name = pin.package_base if pin.platform: name += platform or '${platform}' return name
c2804ad4fab4bf4c3acbb70398aa3c1ae0343f38
526,736
def msa_residential_demand(residential_demand): """ Residential demand aggregated to the MSA. """ df = residential_demand.local return df.fillna(0).sum(axis=1).to_frame('msa')
8db34b1b72314ad5cc42d0f4475b354a522f8675
445,223
def color_in_list(chip, items): """ Function to check whether the same color already exists in a list :param chip: Item whose color will be checked :param items: List of elements already in streak :return: True if color already exists, False otherwise """ for item in items: if item[1] == chip[1]: return True return False
0b5a467e51f531f4043031088615a30ce8639833
269,793
def _is_python_file(filename): """Check if the input file looks like a Python script Returns True if the filename ends in ".py" or if the first line contains "python" and "#!", returns False otherwise. """ if filename.endswith('.py'): return True with open(filename, 'r') as file_handle: first_line = file_handle.readline() return 'python' in first_line and '#!' in first_line
a6d99166c6b76c4ae0ad5f5036951986fd5a102b
696,912
def lower_first(string): """Lower the first character of the string.""" return string[0].lower() + string[1:]
0cde723bcdf2222ea710dad5f713842ea36a72d3
404,236
def select_answer(list_of_answers): """The player select *one* char out of a list""" answer = "" while not answer in list_of_answers: answer = input(">>>") return answer
03320e5d677d9dc07ea564b60294e098d38763bc
97,371
from datetime import datetime def to_time_string(value): """ gets the time string representation of input datetime with utc offset. for example: `23:40:15` :param datetime | time value: input object to be converted. :rtype: str """ time = value if isinstance(value, datetime): time = value.timetz() return time.isoformat(timespec='seconds')
b81ffff8b4ab626e0094bcfeb0bf6916de89d344
28,172
def is_relevant_asset(asset: dict): """ Returns if an asset is relevant for this process. For being relevant the asset name needs to contain "windows" or "linux" :param asset: The asset information as it is available in the samples directory. :type asset: dict :return: """ return "linux" in asset["name"] or "win32" in asset["name"]
de142fdcc0cb096899d5124a16a2cbd722879e2b
570,062
def entradaValida(p1, p2, p3, p4, p5, p6, p7, p8, p9): """ Recebe os valores das nove posições do tabuleiro e verifica se os valores são válidos, ou seja, retorna True se cada variável possui " " ou "x" ou "o" e False, caso contrário. """ #Complete o código da função if p1 == " " or p1 == "x" or p1 == "o": return True if p2 == " " or p2 == "x" or p2 == "o": return True if p3 == " " or p3 == "x" or p3 == "o": return True if p4 == " " or p4 == "x" or p4 == "o": return True if p5 == " " or p5 == "x" or p5 == "o": return True if p6 == " " or p6 == "x" or p6 == "o": return True if p7 == " " or p7 == "x" or p7 == "o": return True if p8 == " " or p8 == "x" or p8 == "o": return True if p9 == " " or p9 == "x" or p9 == "o": return True return False
811ff88cfca4e4150b1a42946f2eb71c522b4886
180,814
import math def gen_split_slices(total_len, part_len=None, n_parts=None): """Generate slices to split a sequence. Returns a list of slices to split a sequence into smaller pieces. Args: total_len (int): Length of the full sequence. part_len (int, optional): Length of each piece, may be None if n_parts is specified. n_parts (int, optional): Number of pieces, ignored if part_len is specified. Returns: (list of slice) Slices to split the sequence. """ if part_len is None: if n_parts is None: raise ValueError('must specify either part_len or n_parts') else: n_parts = math.ceil(total_len / part_len) return [slice(i * total_len // n_parts, (i+1) * total_len // n_parts) for i in range(n_parts)]
c654e629a8785fd7d4c108f3b76828c25eca6518
324,279
def get_package_version(package): """ Return the version number of a Python package as a list of integers e.g., 1.7.2 will return [1, 7, 2] """ return [int(num) for num in package.__version__.split('.')]
682eb4ffdba67d189997ceb629b06cb1ccb2a437
47,404
def get_theme_path(theme): """Return the theme's path, which equals the theme's name.""" return theme
a4f151e24bdaf31cdf5e849c84ae5a68b5a75a37
424,830
def map_patches_to_sources(inverse_mapping, centred_img_patches): """ Given the inverse mapping from image to source space, calculate the array of modal sources. Parameters ---------- inverse_mapping : numpy array (floats) The {NUM_MODES x p} matrix transform that maps the image patches to the desired sources. centred_img_patches : numpy array (floats) The {p x NUM_PATCHES} array of all centred vectorised image patches. Returns ------- numpy array (floats) The {NUM_MODES x NUM_PATCHES} array of sources. """ return inverse_mapping @ centred_img_patches
ed02887b145e29958a5dbfad091a43d1a1e4e44b
374,848
def channel_from_snippet(resp): """ Convert a Youtube api snippet response into a dictionary of channel info. Contains: name, image, description, channel_id """ return { 'name': resp['snippet']['title'], 'image': resp['snippet']['thumbnails']['high']['url'], 'description': resp['snippet']['description'], 'channel_id': resp['snippet']['resourceId']['channelId'], }
36ed93a02b18c4bbc9436477d9efdadf7287c663
448,987
def dict_to_cvode_stats_file(file_dict: dict, log_path: str) -> bool: """ Turns a dictionary into a delphin cvode stats file. :param file_dict: Dictionary holding the information for the cvode stats file :param log_path: Path to were the cvode stats file should be written :return: True """ file_obj = open(log_path + '/integrator_cvode_stats.tsv', 'w') file_obj.write(' Time [s]\t Steps\t RhsEvals\t LinSetups\t NIters\t NConvFails\t NErrFails\t' ' Order\t StepSize [s]\n') for line_index in range(0, len(file_dict['time'])): time_string = ' ' * (25 - len(str("{:.10f}".format(file_dict['time'][line_index])))) + \ str("{:.10f}".format(file_dict['time'][line_index])) steps_string = ' ' * (10 - len(str(file_dict['steps'][line_index]))) + \ str(file_dict['steps'][line_index]) rhs_string = ' ' * (10 - len(str(file_dict['rhs_evaluations'][line_index]))) + \ str(file_dict['rhs_evaluations'][line_index]) lin_string = ' ' * (10 - len(str(file_dict['lin_setups'][line_index]))) + \ str(file_dict['lin_setups'][line_index]) iterations_string = ' ' * (8 - len(str(file_dict['number_iterations'][line_index]))) + \ str(file_dict['number_iterations'][line_index]) conversion_fails_string = ' ' * (11 - len(str(file_dict['number_conversion_fails'][line_index]))) + \ str(file_dict['number_conversion_fails'][line_index]) error_fails_string = ' ' * (11 - len(str(file_dict['number_error_fails'][line_index]))) + \ str(file_dict['number_error_fails'][line_index]) order_string = ' ' * (6 - len(str(file_dict['order'][line_index]))) + \ str(file_dict['order'][line_index]) step_size_string = ' ' * (14 - len(str("{:.6f}".format(file_dict['step_size'][line_index])))) + \ str("{:.6f}".format(file_dict['step_size'][line_index])) file_obj.write(time_string + '\t' + steps_string + '\t' + rhs_string + '\t' + lin_string + '\t' + iterations_string + '\t' + conversion_fails_string + '\t' + error_fails_string + '\t' + order_string + '\t' + step_size_string + '\n') file_obj.close() return True
4b6d92ad610c47eed5b2e593980a74f617ed44f4
9,310
def serial_line_endings_to_sublime(text, line_endings): """ Converts the serial line endings to sublime text line endings :param text: the text to convert line endings for :param line_endings: the serial's line endings setting: "CR", "LF", or "CRLF" :return: the new text """ if line_endings == "CR": return text.replace("\r", "\n") if line_endings == "CRLF": return text.replace("\r", "") return text
e8304fffdc94d0e16cdfeedb26708de970fe63fd
570,320
import torch def torch_eye(n, m=None, out=None): """ Like `torch.eye()`, but works with cuda tensors. """ if m is None: m = n try: return torch.eye(n, m, out=out) except TypeError: # Only catch errors due to torch.eye() not being available for cuda tensors. module = torch.Tensor.__module__ if out is None else type(out).__module__ if module != 'torch.cuda': raise Tensor = getattr(torch, torch.Tensor.__name__) cpu_out = Tensor(n, m) cuda_out = torch.eye(m, n, out=cpu_out).cuda() return cuda_out if out is None else out.copy_(cuda_out)
9ba8ca767ea4bc60dee5ed3b35d4eb82afca609b
632,288
def expandToDictionary(data, key: str): """ This function expand a non dictionary variable in to a dictionary with a key Eg. a = value -> {"key": value} """ if not isinstance(data, dict): return {key: data} else: return data
07ea2bd7685a5b649c3dc8c2f37b6807f9c55ac2
241,946
def _get_mock_key_pair_dict( ) -> dict: """Create and return a dict of mock key pairs""" return { 'KeyPairs': [ { 'KeyPairId': 'mock-key-pair-1', 'KeyFingerprint': 'mock-key-fingerprint-1', 'KeyName': 'mock-key-pair-name-1', 'KeyType': 'mock-key-pair-type-1', 'Tags': [ { 'Key': 'mock-key-1', 'Value': 'mock-value-1' }, ], }, { 'KeyPairId': 'mock-key-pair-2', 'KeyFingerprint': 'mock-key-fingerprint-2', 'KeyName': 'mock-key-pair-name-2', 'KeyType': 'mock-key-pair-type-2', 'Tags': [ { 'Key': 'mock-key-2', 'Value': 'mock-value-2' }, ], }, ], }
106ac55fbaa08c6b57bb1d8f913bbae59ff7a068
387,944
def compute_reduced_cost(column, duals): """Compute and return the reduced cost of 'column'.""" return (column.objective_coefficient - sum(duals[index] * coef for index, coef in zip(column.row_indices, column.row_coefficients)))
14b02864f9a9ca10584b3436f1e33e14ffd90fc9
645,684
from pathlib import Path import yaml import json def _read_data_file(path): """ Read the contents of a data file in JSON, YAML or BibTeX format. Parameters ---------- path : :class:`pathlib.Path` The path of the JSON or YAML data file. Returns ------- data : dict The contents of the file as a dictionary. """ path = Path(path) suffix = path.suffix loader = {".yml": yaml.safe_load, ".yaml": yaml.safe_load, ".json": json.loads} data = loader[suffix](path.read_text(encoding="utf-8")) return data
823c9976a618e9cfd1301c51a4b28dced44010c4
510,121
import logging def new_console_logger(level=logging.WARNING): """Creates a new console logging handler for use in logger. :rtype: logging.StreamHandler :return: new logging.StreamHandler object """ c_logger = logging.StreamHandler() c_logger.setLevel(level) c_logger.setFormatter(logging.Formatter("%(levelname)-8s - %(message)s")) return c_logger
9a5ceb915f00a523769ddef02776db0fc9d6448b
361,829
from datetime import datetime import calendar def get_month_firstday_and_lastday(year=None, month=None): """ :param year: 年份,默认是本年,可传int或str类型 :param month: 月份,默认是本月,可传int或str类型 :return: firstDay: 当月的第一天,datetime.date类型 lastDay: 当月的最后一天,datetime.date类型 """ if year: year = int(year) else: year = datetime.today().year if month: month = int(month) else: month = datetime.today().month # 获取当月第一天的星期和当月的总天数 firstDayWeekDay, monthRange = calendar.monthrange(year, month) # 获取当月的第一天 firstDay = datetime(year=year, month=month, day=1) lastDay = datetime(year=year, month=month, day=monthRange) return firstDay, lastDay
b3aadf8f425995df03cfa37a12a0eb94105497cd
138,669
def reverse_bytes(hex_string): """ This function reverses the order of bytes in the provided string. Each byte is represented by two characters which are reversed as well. """ #print 'reverse_bytes(' + hex_string + ')' chars = len(hex_string) if chars > 2: return reverse_bytes(hex_string[chars/2:]) + reverse_bytes(hex_string[:chars/2]) else: return hex_string[1] + hex_string[0]
46b5a2d8bd799721f6a72ce1c7b2114e3db3e411
166,094
from typing import Dict from typing import Any import yaml import json def load_configuration(config_path: str) -> Dict[str, Any]: """ Load a configuration file Parameters ---------- config_path: str Path to yaml or json configuration file Returns ------- dict[str, Any] Configuration dictionary """ data = {} with open(config_path, "r", encoding="utf8") as f: if config_path.endswith(".yaml"): data = yaml.load(f, Loader=yaml.SafeLoader) elif config_path.endswith(".json"): data = json.load(f) if not data: return {} return data
90b66b7537dcd0d63756de84cc21f9220f8ebe2b
607,755
from typing import Any def constantly(x: Any) -> Any: """ Return a function that returns ``x`` given any arguments. """ def f(*args: Any, **kwargs: Any) -> Any: return x return f
d9a98852713b12ed70ede95f2ab7ded01b83deb2
508,064
def readConfigFile(configFile): """Reads a config file with no section headers and returns a dictionary of key-value pairs""" separator = "=" keys = {} # I named your file conf and stored it # in the same directory as the script with open(configFile) as f: for line in f: if separator in line: # Find the name and value by splitting the string name, value = line.split(separator, 1) # Assign key value pair to dict # strip() removes white space from the ends of strings keys[name.strip()] = value.strip() return keys
8e1a3a7ed85f20b0ab172718ebf2f79355d22534
608,102
def export_3dpdf( client, file_=None, filename=None, dirname=None, height=None, width=None, dpi=None, use_drawing_settings=None, sheet_range="all", ): """Export a model to a 3D PDF file. Args: client (obj): creopyson Client. `file_` (str, optional): Model name. Defaults is current active model. filename (str, optional): Destination file name. May also contain a path to the file. Defaults is the model name with the appropriate file extension, in Creo's working directory. dirname (str, optional): Destination directory. Defaults is Creo's current working directory. height (float, optional): PDF image height. Defaults is Creo default export height. width (float, optional): PDF image width. Defaults is Creo default export width. dpi (int, optional): PDF Image DPI. Default is Creo default export DPI. use_drawing_settings (boolean, optional): Whether to use special settings for exporting drawings. Defaut is False. sheet_range (string): Range of drawing sheets to export. Default vale is "all". Valid values: "all", "current", range of sheet numbers (ex: "1,3-4") Returns: dict: dirname (str): Directory of the output file filename (str): Name of the output file """ data = {"sheet_range": sheet_range} if file_ is not None: data["file"] = file_ else: active_file = client.file_get_active() if active_file: data["file"] = active_file["file"] if filename is not None: data["filename"] = filename if dirname is not None: data["dirname"] = dirname if height is not None: data["height"] = height if width is not None: data["width"] = width if dpi is not None: data["dpi"] = dpi if use_drawing_settings is not None: data["use_drawing_settings"] = use_drawing_settings return client._creoson_post("interface", "export_3dpdf", data)
4f6a0b11b7426cc443766f60182210d40d58dee1
597,598
def remove_job_if_exists(update,context,name): """Remove job with given name. Returns whether job was removed.""" current_jobs = context.job_queue.get_jobs_by_name(name) if not current_jobs: return False for job in current_jobs: job.schedule_removal() return True
a903e39ba8d1e6c4b808a0774835a2aa75159e7a
620,911
def f_read_raw_mat_length(filename, data_format='f4'): """len = f_read_raw_mat_length(filename, data_format='f4') Read length of data, i.e., number of elements in the data file. If data is in shape (N, M), then len = N * M input ----- filename: str, path to the binary data on the file system format: str, please use the Python protocal to write format default: 'f4', float32 output ------ len: int, number of data elements in the data file """ f = open(filename,'rb') tmp = f.seek(0, 2) bytes_num = f.tell() f.close() if data_format == 'f4': return int(bytes_num / 4) else: return bytes_num
7abaa2a31733b31e6bd1a5397783515a9502593a
294,348
def range_squared(n): """A function range_squared(n) that takes an non-negative integer value n and returns the list [0, 1, 4, 9, 16, 25, ..., (n-1)^2]. If n is zero, the function should return the empty list.""" if n > 0: return [i ** 2 for i in range(n)] # return list(map(lambda x: x ** 2, range(n))) elif n == 0: return [] else: return "Error: n must be zero or a positive integer."
45ffd826f2b9a4cdbbda3fe4fddb7d42dfab85e2
337,265
def cmp(a, b): # pragma: no cover """ Replacement for built-in function ``cmp`` that was removed in Python 3. Note: Mainly used for comparison during sorting. """ if a is None and b is None: return 0 elif a is None: return -1 elif b is None: return 1 return (a > b) - (a < b)
bcb268df89f7a0c4201ac5dc549866d449597a81
460,880
def irange(*args): """ Similar to range but stop is an inclusive upper bound. """ if len(args) == 0: raise TypeError("irange expected at least 1 arguments, got 0") elif len(args) == 1: stop = args[0] start = 0 step = 1 elif len(args) == 2: start, stop = args step = 1 elif len(args) == 3: start, stop, step = args else: raise TypeError("irange expected at most 3 arguments, got " + str(len(args))) if step == 0: raise ValueError("irange() step argument must not be zero") stop = stop + 1 if step > 0 else stop - 1 return range(start, stop, step)
c25e8b7ab07a645199233f033a1417dac5355ca2
603,623
def decode_distribution(encoded_distribution): """ Decode the cnn json distribution (usually encoded data) Return a list of float """ if isinstance(encoded_distribution, list): return encoded_distribution assert isinstance(encoded_distribution, str) distribution_str_list = encoded_distribution.split(',') distribution = list() for data_str in distribution_str_list: data = float(data_str) distribution.append(data) return distribution
b18b867bab515a6679ccdf082b61cf4b00cd701a
227,243
def get_metadata(session, name): """ Gets meta data from pypi for the given package. :param session: requests Session instance :param name: str, package :return: dict, meta data """ resp = session.get("https://rubygems.org/api/v1/gems/{}.json".format(name)) if resp.status_code == 200: return resp.json() return {}
e0cce4b74e9c3e17b8f42b9b97fd6a3d48f66780
240,496
def get_trig_id(ctx) -> str: """Pass in dash.callback_context to get the id of the trigger returned""" return ctx.triggered[0]['prop_id'].split('.')[0]
c4d3a8355cf084eef26835227477c291a9104c57
175,581
def to_numpy(tensors): """Converts tf.Tensors to numpy array. Parameters ---------- tensors : tf.Tensor | dict | list Returns ------- arrays : np.array | dict | list """ if type(tensors) == list: return [to_numpy(t) for t in tensors] if type(tensors) == dict: return {k: to_numpy(v) for k, v in tensors.items()} return tensors.numpy()
b089cddb399a3ffb43a22059d2ca74bbff87869c
150,126
def _scale_formatter(tick_value: float, pos: int, factor: float) -> str: """ Function for matplotlib.ticker.FuncFormatter that scales the tick values according to the given `scale` value. """ return "{0:g}".format(tick_value*factor)
cc225b1a589ec2468d71bc5670d3d769b81c3c2a
375,949
import torch def rtril(M, diagonal=0): """Takes the lower-triangular of the rightmost 2 dimensions.""" return M*torch.tril(torch.ones(M.shape[-2], M.shape[-1]), diagonal=diagonal)
f8538e7fb069e0ccbcc58a22b7f174d3e9e677bf
413,250
import requests def urlretrieve(url, dest, timeout=300): """ Download a file. :param url: The url of the source file. :param dest: destination file path. """ with open(dest, 'wb') as f: r = requests.get(url, allow_redirects=True, timeout=timeout) if r.status_code == 200: f.write(r.content) return 0 else: return -1
f49a51be5ae2a7fadedc1e624c48da25de86b5b1
335,106