content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import click def prompt_text(text, default=None): """Prompt user text input""" prompt_ans = click.prompt(click.style(text, fg="blue"), default=default) return prompt_ans
0df96f28370e87fe4fb2a35f00c97610573afeba
561,173
from datetime import datetime def change_date_format(date: str) -> str: """ Parsing "%Y-%m-%d to %m/%d/%Y """ datetime_object = datetime.strptime(date, "%Y-%m-%d") return datetime_object.strftime("%m/%d/%Y")
284910b267e87f4616b4d0e8aa4aefd3db9fcafe
639,078
def deep_round(A, ndigits=5): """ Rounds numbers in a list of lists. Useful for approximate equality testing. """ return [[round(val, ndigits) for val in sublst] for sublst in A]
cebfb6b2dbe83bcc7222e0dc1b67ca98e95576c5
18,719
import gzip def opengz(filename, mode): """Return either the system open or the gzip open """ if filename.endswith('.gz'): return gzip.open(filename, mode) else: return open(filename, mode)
1e0ed8489bc1059e3edf23de49f2420a224d932d
265,040
import binascii import re def humanhexlify(data, n=-1): """Hexlify given data with 1 space char btw hex values for easier reading for humans :param data: binary data to hexlify :param n: If n is a positive integer then shorten the output of this function to n hexlified bytes. Input like 'ab\x04ce' becomes '61 62 04 63 65' With n=3 input like data='ab\x04ce', n=3 becomes '61 62 04 ...' """ tail = b' ...' if 0 < n < len(data) else b'' if tail: data = data[:n] hx = binascii.hexlify(data) return b' '.join(re.findall(b'..', hx)) + tail
883323524ecc8b9f55138d290a38666e5c06bac3
699,934
def get_members(api, operation): """ Gets the operation's members from api.json. Includes metadata such as required parameters needed for the schema. Example: get_members(api, 'RegisterTaskDefinitionRequest') :param api: the api.json file read into JSON format :param operation: the type of task the user wants to use :return: all of the references, the required references """ members = {} required = [] if 'required' in api['shapes'][operation]: required.extend(api['shapes'][operation]['required']) if 'members' in api['shapes'][operation]: for member in api['shapes'][operation]['members']: members[member] = api['shapes'][operation]['members'][member]['shape'] return members, required
a28d1f3e1385ba37760c4c5055ee253493f405fe
547,709
def clip(text, max_len=80): """Return max_len characters clipped at space if possible""" text = text.rstrip() if len(text) <= max_len or ' ' not in text: return text end = len(text) space_at = text.rfind(' ', 0, max_len + 1) if space_at >= 0: end = space_at else: space_at = text.find(' ', max_len) if space_at >= 0: end = space_at return text[:end].rstrip()
01530ebe0e25a03c4f49204b7816d0f000796a43
657,813
def apply_message_parser(parser): """ Decorator that passes the result of a message parser to a handler as kwargs. The parser will only be passed a `message` kwarg. """ def inner_decorator(handler): def wrapped(client, message): parser_result = parser(message=message) handler(client=client, message=message, **parser_result) return wrapped return inner_decorator
400fa032e2ce84a8e19bec8d11216b052a0b24e1
120,670
import torch def l2_normalize(x, dim=None, eps=1e-12): """Normalize a tensor over dim using the L2-norm.""" sq_sum = torch.sum(torch.square(x), dim=dim, keepdim=True) inv_norm = torch.rsqrt(torch.max(sq_sum, torch.ones_like(sq_sum)*eps)) return x * inv_norm
2247413b4ce424c13563169ed0a8f31cfb3f1a3c
319,457
def addf(a, b): """Add values without converting them to integers (as |add seems to do)""" return a + b
9c9822fb849bd00fe25411e75f46010819ed5e92
547,285
import torch def reshape(input, shape): """ Returns a tensor with the same data and number of elements as ``input``, but with the specified shape. When possible, the returned tensor will be a view of ``input``. Examples:: >>> import torch >>> import treetensor.torch as ttorch >>> ttorch.reshape(torch.tensor([[1, 2], [3, 4]]), (-1, )) tensor([1, 2, 3, 4]) >>> ttorch.reshape(ttorch.tensor({ ... 'a': [[1, 2], [3, 4]], ... 'b': {'x': [[2], [3], [5], [7], [11], [13]]}, ... }), (-1, )) <Tensor 0x7fc9efa3bda0> β”œβ”€β”€ a --> tensor([1, 2, 3, 4]) └── b --> <Tensor 0x7fc9efa3bcf8> └── x --> tensor([ 2, 3, 5, 7, 11, 13]) .. note:: If the given ``shape`` is only one tuple, it should make sure that all the tensors in this tree can be reshaped to the given ``shape``. Or you can give a tree of tuples to reshape the tensors to different shapes. >>> import torch >>> import treetensor.torch as ttorch >>> ttorch.reshape(ttorch.tensor({ ... 'a': [[1, 2], [3, 4]], ... 'b': {'x': [[2], [3], [5], [7], [11], [13]]}, ... }), {'a': (4, ), 'b': {'x': (3, 2)}}) <Tensor 0x7fc9efa3bd68> β”œβ”€β”€ a --> tensor([1, 2, 3, 4]) └── b --> <Tensor 0x7fc9efa3bf28> └── x --> tensor([[ 2, 3], [ 5, 7], [11, 13]]) """ return torch.reshape(input, shape)
b75a57b9aaf00ea006ea3fc0ab04c759c1e69663
182,836
def uniq_stable(elems): """uniq_stable(elems) -> list Return from an iterable, a list of all the unique elements in the input, but maintaining the order in which they first appear. A naive solution to this problem which just makes a dictionary with the elements as keys fails to respect the stability condition, since dictionaries are unsorted by nature. Note: All elements in the input must be hashable. """ unique = [] unique_set = set() for nn in elems: if nn not in unique_set: unique.append(nn) unique_set.add(nn) return unique
15ecacbb1561cc1b972c5bd974286c128ee01488
553,872
import torch def calc_discounted_return(rewards, discount, final_value): """ Calculate discounted returns based on rewards and discount factor. """ seq_len = len(rewards) discounted_returns = torch.zeros(seq_len) discounted_returns[-1] = rewards[-1] + discount * final_value for i in range(seq_len - 2, -1 , -1): discounted_returns[i] = rewards[i] + discount * discounted_returns[i + 1] return discounted_returns
d937ae4f066e275a32e547541505b7ad9c28b8ba
405,447
import torch def is_tensor_like(inp): """ Returns ``True`` if the passed-in input is a Tensor-like. Currently, this occurs whenever there's a ``__torch_function__`` attribute on the type of the input. Examples -------- A subclass of tensor is generally a Tensor-like. >>> class SubTensor(torch.Tensor): ... >>> is_tensor_like(SubTensor([0])) True Built-in or user types aren't usually Tensor-like. >>> is_tensor_like(6) False >>> is_tensor_like(None) False >>> class NotATensor: ... >>> is_tensor_like(NotATensor()) False But, they can be made Tensor-like by implementing __torch_function__. >>> class TensorLike: ... def __torch_function__(self, func, types, args, kwargs): ... return -1 >>> is_tensor_like(TensorLike()) True """ return type(inp) is torch.Tensor or hasattr(type(inp), "__torch_function__")
3084351eb5bf7cb26dcba0c3aa4b22f09293e960
392,128
def get_flags(s): """ Get flags, replacing empty flags with '_' for clarity (' S ' becomes '_S_') """ m_flag = s.read(1) m_flag = m_flag if m_flag.strip() else '_' q_flag = s.read(1) q_flag = q_flag if q_flag.strip() else '_' s_flag = s.read(1) s_flag = s_flag if s_flag.strip() else '_' return [m_flag + q_flag + s_flag]
c663b57adea3724e6c6b9d553497c5defa3d8111
208,767
import torch def euclidean_reward(target_mel, pred_mel, reward_min=None, reward_max=None, verbose=0): """Negative Euclidean norm of the difference between the two flattened inputs. If reward_min or reward_max values are given, it caps the reward at appropriate levels. """ target_mel = torch.flatten(target_mel) pred_mel = torch.flatten(pred_mel) diff = target_mel-pred_mel reward = -torch.norm(diff, p=2).view(1,1) if reward_min: reward = torch.max(torch.Tensor([reward, reward_min])).view(1,1) if reward_max: reward = torch.min(torch.Tensor([reward, reward_max])).view(1,1) if verbose > 1: print("REWARDS: Euclidean reward: {:.6f}".format(reward.item())) return reward
f01a11a8c2fa51f09dc5529dfc605b93a0021b98
86,799
def get_by_pickup(state,b1): """Generate a pickup subtask.""" if state.is_true("clear", [b1]): return [('pickup_task',b1)] return False
3a5a6c9960b210ca8d860f914fb848472590d0be
594,220
from typing import List from typing import Match import re def replace_math(input: str, math: List[str]) -> str: """ input: a string, already processed into HTML by some markdown renderer; may contain @@number@@ blocks indicating where math was removed by remove_math. math: a list of strings containing math blocks to be spliced back into input Put back the math strings that were saved """ def replacer(match: Match): index = int(match.group(1)) return math[index] text = re.sub(r'@@(\d+)@@', replacer, input) return text
5103c2cea2e5a085c4461f5cac8f05c77411d085
186,363
import re def header_match(line): """ Return True if line match the HPROF header line, False otherwise""" pattern = 'JAVA PROFILE \d.\d.\d, created \w+ \w+ +\d+ \d{2}:\d{2}:\d{2} \d{4}' return re.match(pattern, line) is not None
19aa37c993d3d5a1d9bf50224b853bf54477c6da
199,755
def to_wiki_format(site, title, ignore_ns=False): """Convert a page title or username to 'canonical' wiki format. Wiki format uses spaces instead of underscores, and capitalizes the first letter. It also uses custom (not canonical) namespace names. The first parameter should be a Pywikibot Site object, and is used to look up namespace names. """ def _format(title): title = title.replace("_", " ") return (title[0].upper() + title[1:]) if title else "" if ":" in title and not ignore_ns: ns_name, base_name = title.split(":", 1) if ns_name in site.namespaces: ns_name = site.namespaces[ns_name].custom_name return ns_name + ":" + _format(base_name) return _format(title)
9141457b10496fcbc17c84614bc13a0e20815682
561,326
def l1_loss(x: float, y: float) -> float: """ Compute the L1 loss function. This is the absolute value of the difference of the inputs. https://www.bitlog.com/knowledge-base/machine-learning/loss-function/#l1-error Parameters ---------- x : float The estimate y : float The actual value """ return abs(x - y)
361e52b5dc2c72e259c55137686fbe0efb2b0f05
171,980
def collect_attributes(attributes, *containers): """Collect attributes from arguments. `containers` are objects with method `all_attributes` or might be `Nulls`. Returns a list of attributes. Note that the function does not check whether the attribute is an actual attribute object or a string.""" # Method for decreasing noise/boilerplate collected = [] if attributes: collected += attributes for container in containers: if container: collected += container.all_attributes return collected
dd5beaa29dbb7b6d0b137dda8154ae492dd8dae2
291,440
def make_default_user_query(users, query_data, values, search_on, extra_data={}): """ given a query_data dict and values which come from the ui, generate a dict that will be used for a user query this default query is a within query, that optionally adds some extra key/value data to the query dict """ query = {} query_str = query_data['query'] within = query_str + '__in' query[within] = values extra_info = query_data.get('extra') if extra_info: query.update(extra_info) if extra_data.get('istoggle', True): users = users.filter(**query) human_query = u"%s is in (%s)" % (search_on, u', '.join(values)) else: users = users.exclude(**query) human_query = u"%s is not in (%s)" % (search_on, u', '.join(values)) return users, human_query
eb055b6fcace0425d783973ca9dc75932c862f28
439,252
def get_nice_size(size, base=1000.0): """ Convert number of bytes to string with KB, MB, GB, TB suffixes """ if size < base: return '%sB' % (size) if size < base**2: return '%.2fKB' % (size / base) if size < base**3: return '%.2fMB' % (size / base**2) if size < base**4: return '%.2fGB' % (size / base**3) return '%.2fTB' % (size / base**4)
f9a00fd3434f7892766ab481d76bd7c5f48f6355
517,971
def make_figure_basename(local_station_id, reference_station_id, xy_or_yx, matlab_or_fortran): """ Parameters ---------- local_station_id: str station label reference_station_id: str remote reference station label xy_or_yx: str mode: "xy" or "yx" matlab_or_fortran: str "matlab" or "fortran". A specifer for the version of emtf. Returns ------- figure_basename: str filename for figure """ station_string = f"{local_station_id}" if reference_station_id: station_string = f"{station_string}_rr{reference_station_id}" figure_basename = ( f"synthetic_{station_string}_{xy_or_yx}_{matlab_or_fortran}.png" ) return figure_basename
c6d5e29215050cd507220e7df2fb733e04a64d7e
512,952
def pwd_checker(pwd): """A utility to use for verify_client_func. Checks a client's auth password""" return (lambda id_data: pwd == id_data.get("pwd"))
aaa0702ebdc296bb0a1e2df308ed0db40b0cc367
570,391
def removeNCharsFromCol(data, n, col, start): """ Removes n characters from the value of a given column for every row either from the start or the end of the string :param data: The data to process :param n: The number of characters :param col: The index of the column to alter :param start: Remove from start (True) or end (False) """ for i in range(len(data)): try: data[i][col] = data[i][col][n:] if start else data[i][col][:-n] except IndexError: pass # Empty field return data
195ddde00341103175ef4110b25706785d0e59e3
426,904
def add_row(key, value): """ Add a row to the HTML table """ body = '<td>' + key + '</td>\n' body += '<td>' + str(value) + '</td>\n' body += '</tr>\n' return body
5c8b78e25001df77a3560234a1eda8a2043a7c9d
473,158
def select(S, i, key=lambda x: x): """ Select the element, x of S with rank i (i elements in S < x) in linear time Assumes that the elements in S are unique Complexity: O(n) Every step in this algorithm is approximately linear time, the sorting here only ever happens of lists of length <= 5 """ # Divide the list into columns of 5 sublists = [S[k:k+5] for k in range(0, len(S), 5)] # Find the medians of each column medians = [ sorted(sublist, key=key)[len(sublist) // 2] for sublist in sublists ] if len(medians) <= 5: # If the number of columns is less than 5 elements # return the median of medians x = medians[len(medians) // 2] else: # Otherwise recursively find the median of medians x = select(medians, len(medians) // 2) L = [y for y in S if y < x] R = [y for y in S if y > x] k = len(L) if k > i: return select(L, i) if k < i: return select(R, i - k) return x
25e2efe2da161af70f4355859f42867b5570b0b8
358,777
def control_change_rate_cost(u, u_prev): """Compute penalty of control jerk, i.e. difference to previous control input""" return (u - u_prev) ** 2
09439fa7b2b6afa306e41e85434db3064be0a481
637,904
def get_area_by_player_position(areas, x, y): """ This function checks the area the player is in given its position in the maps """ epsilon = 1 # Variance of 1 block for i, area in areas: if ( area["x1"] - epsilon <= x <= area["x2"] + epsilon and area["y1"] - epsilon <= y <= area["y2"] + epsilon ): return i, area["id"]
88469e04ff95f74311a5427399a4c2c4dec00bc8
192,339
import ast def handle_operator_code(self, opcode): """ Parses an operator code and returns its string representation. Returns an empty string on error. """ if isinstance(opcode, ast.Add): op = "+" elif isinstance(opcode, ast.Sub): op = "-" elif isinstance(opcode, ast.Mult): op = "*" elif isinstance(opcode, ast.MatMult): op = "*" elif isinstance(opcode, ast.Div): op = "/" elif isinstance(opcode, ast.Mod): op = "%" elif isinstance(opcode, ast.Pow): op = "^" elif isinstance(opcode, ast.LShift): op = "<<" elif isinstance(opcode, ast.RShift): op = ">>" elif isinstance(opcode, ast.BitOr): op = "|" elif isinstance(opcode, ast.BitXor): op = "^" elif isinstance(opcode, ast.BitAnd): op = "&" elif isinstance(opcode, ast.FloorDiv): op = "//" else: self.log_with_loc( "Failed to identify the operator. Using an empty dummy.", loglevel="ERROR") op = "" return op
701cac1ad4485115b3aca94246e06bd1fee504dc
76,264
import re def find_mapping(mapping, target): """Retrieves sample type from recipe Args: mapping (dic): regex-to-value mapping target (string): target match for regexes Returns: value (any): desired mapping of target """ for key_re, val in mapping.items(): expr = re.compile(key_re, re.IGNORECASE) if(expr.match(target)): return val return None
574b28e91734d78cfa4fe299d8939ca497136467
175,070
def add_dicts(dict1, dict2) -> dict: """ Return a dictionary with the sum of the values for each key in both dicts. """ return {x: dict1.get(x, 0) + dict2.get(x, 0) for x in set(dict1).union(dict2)}
3a17303cee6005bb87ed779dae20fce7e30ef148
155,211
import math def fcst_hr_min(time,start): """!Return forecast time in hours and minutes. Given a forecast datetime.datetime and an analysis datetime.datetime, this returns a tuple containing the forecast hour and minute, rounded to the nearest integer minute. @param time forecast time as a datetime.datetime @param start analysis time as a datetime.datetime @returns a tuple (ihours,iminutes)""" dt=time - start # forecast time # Convert to hours and minutes, round to nearest minute: fhours=dt.days*24 + dt.seconds/3600 + dt.microseconds/3600e6 (fpart,ihours)=math.modf(fhours) if fpart>1.: assert(fpart<1.) iminutes=round(fpart*60) return (ihours,iminutes)
2916387b979e8825501f15fd683cb4a64d4bc937
485,475
def del_suffix(target: str, suffix: str): """ If `target` ends with the `suffix` string and `suffix` is not empty, return string[:-len(suffix)]. Otherwise, return a copy of the original string. """ if (len(suffix) > 0) and (target.endswith(suffix) is True): try: # python >= 3.9 target = target.removesuffix(suffix) except AttributeError: # python <= 3.7 target = target[:-len(suffix)] return target
ab9307e6b2673a68d5d87af25b02547e6755fb4b
149,342
def concat_body_paragraphs(body_candidates): """ Concatenate paragraphs constituting the question body. :param body_candidates: :return: """ return ' '.join(' '.join(body_candidates).split())
a0faaa0ae0be0cda007c2af1f6e47f3b745862b3
9,536
def partition(m): """Returns the number of different ways one hundred can be written as a sum of at least two positive integers. >>> partition(100) 190569291 >>> partition(50) 204225 >>> partition(30) 5603 >>> partition(10) 41 >>> partition(5) 6 >>> partition(3) 2 >>> partition(2) 1 >>> partition(1) 0 """ memo = [[0 for _ in range(m)] for _ in range(m + 1)] for i in range(m + 1): memo[i][0] = 1 for n in range(m + 1): for k in range(1, m): memo[n][k] += memo[n][k - 1] if n > k: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] - 1
a629145d817089ecb3c2943ab3c3bb195dc9d016
446,823
import unicodedata def remove_control_characters(s): """Some input (like filenames) has some really nasty control chars. This trick removes those (https://stackoverflow.com/a/19016117)""" return "".join(ch for ch in s if unicodedata.category(ch)[0] != "C")
f47788f52ccd9d0863349e52d766ecbcc3f64c06
549,447
def read_nix_prop(prop): """Returns a nox property value across different nix versions. :param prop: the nix Property. :return: The value stored in the property. """ try: return prop.values[0].value except AttributeError: return prop.values[0]
a330216dfc3c080e466d5c18e2b9368ca02cf9b3
170,263
def available_colors(G, vertex, number_of_colors): """Returns all the available colors for vertex Parameters: G: a networkx graph with Graph Nodes vertex: the vertex number (int) number_of_colors: the number of colors (int) Returns: colors: list of available colors (list) """ colors = [x for x in range(0, number_of_colors)] for neighbor in G.neighbors(vertex): try: index = colors.index(G.nodes[neighbor]['node'].color) colors.pop(index) except Exception: pass return colors
b19dfe9516eb7a74d259a3d69b868e78fe56d3e9
696,341
from typing import Iterator import json def dictsets_match(dictset_1: Iterator[dict], dictset_2: Iterator[dict]): """ Tests if two dictsets match regardless of the order of the order of the records in the dictset. Return is True if the sets match. Note that this will exhaust a generator. Parameters: dictset_1: iterable of dictionaries: The first dictset to match dictset_2: iterable of dictionaries: The second dictset to match Returns: boolean """ def _hash_set(dictset: Iterator[dict]): xor = 0 for record in dictset: entry = json.serialize(record) # type:ignore _hash = hash(entry) xor = xor ^ _hash return xor return _hash_set(dictset_1) == _hash_set(dictset_2)
c966c553096851c876f4f4f14675073f861d3372
576,215
from pathlib import Path def get_image_paths(image_folder: Path) -> list: """ Get all images in image folder ending in .jpg or .png """ suffix_set = {'.jpg', '.png'} return [ image for image in image_folder.iterdir() if image.suffix in suffix_set ]
edbeacbe899f2e19a1d8a460d2b30215e28c2a46
643,909
import torch def dice_loss(yhat, ytrue, epsilon=1e-6): """ Computes a soft Dice Loss Args: yhat (Tensor): predicted masks ytrue (Tensor): targets masks epsilon (Float): smoothing value to avoid division by 0 output: DL value with `mean` reduction """ # compute Dice components intersection = torch.sum(yhat * ytrue, (1,2,3)) cardinal = torch.sum(yhat + ytrue, (1,2,3)) return torch.mean(1. - (2 * intersection / (cardinal + epsilon)))
332ff43c0ce0f46b2b577823eeb3becbe1087aac
510,257
def safe_zip(*args): """zip, with a guarantee that all arguments are the same length. (normal zip silently drops entries to make them the same length) """ length = len(args[0]) if not all(len(arg) == length for arg in args): raise ValueError("Lengths of arguments do not match: " + str([len(arg) for arg in args])) return zip(*args)
24b0c8c84f7f45d32ca4cc41bcbdf89327973191
399,751
def _DecodeRawShape(op): """Shape function for the DecodeRaw op.""" # NOTE(mrry): Last dimension is data-dependent. return [op.inputs[0].get_shape().concatenate([None])]
40d90298ac0bb672d14469b681a96da5b83fb7e7
146,811
def isVC(E, C): """checks if C is a vertex cover for graphs E C -- set of vertices E -- graphs represented as a list of edges returns True/False""" for (x, y) in E: if (x not in C) and (y not in C): return False return True
35faa426df5ed64bfa956b88d594bf05e9338291
475,537
def abs_value_equal(x, y): """Return whether or not the absolute value of both numbers is the same. Please refrain from using libraries (abs) >>> abs_value_equal(-2, -2) True >>> abs_value_equal(-3, 3) True >>> abs_value_equal(1, 2) False >>> abs_value_equal(3, 3) True >>> abs_value_equal(-6, -6) True >>> abs_value_equal(-1, -5) False >>> abs_value_equal(5, -6) False """ return ((x==y) or (x+y==0))
5047caa23e0e1fd78e8696f706c5aa8ad43b0e22
48,291
def Top5Criterion(x,y, model): """Returns True if model prediction is in top5""" return (model(x).topk(5)[1]==y.view(-1,1)).any(dim=-1)
2442e70360a22ecd538aa378baeadf44b431213f
397,751
def is_valid_iteration(n): """ Returns: True if n is an int >= 1; False otherwise. Parameter n: the value to check Precondition: NONE (n can be any value) """ return (type(n) == int and 1 <= n)
a1ad9722af342f6b33d4380bde3bf97071cc1dc5
565,667
import hashlib def get_hashed(key): """ Returns a hashed version of a given key. One way encryption. """ return hashlib.md5(key.encode('utf-8')).hexdigest()
cc88aeb0073f8e5733c11cef613ed009ca55aaf0
343,380
def remove_vectored_io_slice_suffix_from_name(name, slice): # type: (str, int) -> str """Remove vectored io (stripe) slice suffix from a given name :param str name: entity name :param int slice: slice num :rtype: str :return: name without suffix """ suffix = '.bxslice-{}'.format(slice) if name.endswith(suffix): return name[:-len(suffix)] else: return name
00041b582eff2bf1a78717dfaf5ed00d042d8c94
394,469
def splitter(model): """ Splits model parameters into multiple groups to allow fine-tuning """ params = list(model.parameters()) return [ # weights and biases of the first linear layer params[:2], # weights and biases of the second linear layer params[2:], ]
2ac16b536bd50884c50ade3fb67f6ca43a16799d
13,517
from typing import Iterable from typing import Optional def mconcat(xs: Iterable[Optional[str]]) -> str: """Concatenate strings that are not None from a sequence.""" return ''.join([x for x in xs if x is not None])
a1c91f442bd9b1c77a7fbd1eba5705032a079284
295,232
def ismarionette(session): """ bool: Whether the session is using Marionette. """ return getattr(session.driver, "_marionette", False)
24368759f626ee1c1d3b6aade19fcf8ad0b19f17
131,424
def _update_versions_in_requirements(requirements: "list[str]", packages: dict) -> str: """Update the versions in requirements with the provided package to version mapping.""" for i, package in enumerate(requirements.copy()): if package.startswith("#"): continue if not len(package.strip()): continue try: requirements[i] = package + "==" + packages[package.lower().replace("_", "-")] except KeyError: raise AttributeError(f"{package} could not be found in poetry.lock") from None return "\n".join(requirements)
374b4b6c3f212754a011511bf59d71f8e6e3a5d6
509,701
def unsort(input, indices, dim=1): """ unsort the tensor based on indices which are created by sort """ """ dim is the dimension of batch size """ output = input.new(*input.size()) output.scatter_(dim, indices.unsqueeze(0).unsqueeze(2), input) return output
49cf5555d087942796289e7378198f2943345f7c
135,044
def distance(a, b): """Simple distance between two 1D arrays""" return ((a - b) ** 2).sum() ** (0.5)
073b5e7ceb41266dd8edd1cafd62da140ac55846
615,419
def filter_example(ex, use_src_len=True, use_mt_len=True, use_pe_len=True, min_src_len=1, max_src_len=float('inf'), min_mt_len=1, max_mt_len=float("inf"), min_pe_len=1, max_pe_len=float("inf")): """Return whether an example is an acceptable length. If used with a dataset as ``filter_pred``, use :func:`partial()` for all keyword arguments. Args: ex (torchtext.data.Example): An object with a ``src``, ``mt``, and ``pe`` property. use_src_len (bool): Filter based on the length of ``ex.src``. use_pe_len (bool): Similar to above. min_src_len (int): A non-negative minimally acceptable length (examples of exactly this length will be included). min_pe_len (int): Similar to above. max_src_len (int or float): A non-negative (possibly infinite) maximally acceptable length (examples of exactly this length will be included). max_pe_len (int or float): Similar to above. """ src_len = len(ex.src[0]) mt_len = len(ex.mt[0]) pe_len = len(ex.pe[0]) return (not use_src_len or min_src_len <= src_len <= max_src_len) and \ (not use_mt_len or min_mt_len <= mt_len <= max_mt_len) and \ (not use_pe_len or min_pe_len <= pe_len <= max_pe_len)
bbec0e3b3e4e13485eea698bc7a6a9e726ffbd78
585,646
def fish(fs): """ Transform a list of fish represented by their time-to-spawn state into a list of the number of fish in each state indexed by their time-to-spawn. Under this strategy, the example input [3, 4, 3, 1, 2] would be transformed into [0, 1, 1, 2, 1, 0, 0, 0, 0]. """ fis = [0] * 9 for f in fs: fis[f] += 1 return fis
79837d897c730be13b150eb56d9c5bb49d8bb643
170,344
def get_el_feedin_tariff_chp(q_nom, el_feedin_epex=0.02978, vnn=0.01): """ Calculates feed-in tariff for CHP-produced electricity. Parameters ---------- q_nom : nominal thermal power of chp in kW el_feedin_epex : epex price for electricity in Euro/kWh vnn : avoided grid charges ("vermiedenes Netznutzungsentgelt") in Euro/kWh Returns ------- feed-in tariff in EUR/kWh """ # KWKG 2016 revenues for el. feed-in + feedin price from epex + avoided grid charges if q_nom < 50: return 0.08+el_feedin_epex+vnn # Euro/kWh, only paid for 60.000 flh elif 50 <= q_nom < 100: return 0.06+el_feedin_epex+vnn # Euro/kWh, only paid for 30.000 flh elif 100 <= q_nom < 250: return 0.05+el_feedin_epex+vnn # Euro/kWh, only paid for 30.000 flh elif 250 <= q_nom < 2000: return 0.044+el_feedin_epex+vnn # Euro/kWh, only paid for 30.000 flh else: # q_nom > 2000: return 0.031+el_feedin_epex+vnn
d25c28aa678a5b30a2c13f143b3cfb46cd9d2edb
609,194
def check_padding(query): """ Check for missing padding in base64 encoding and fill it up with "=". :param query: :return: query """ missing_padding = len(query) % 4 if missing_padding: query += "=" * (4 - missing_padding) return query
d8ad3c96074d311dbd5ba17bb93d7ca7a8b5ccab
10,841
def create_set_list(test_set, fil_df): """ This function creates the list that is going to be appended to the dataframe to determine which specific sentences in the data frame are a part of the testing and training sets. When this list is added to the dataframe as a column called 'set' it is going to have a 1 in the row where that sentence in that row is a part of the test set and a 0 in the row where the sentence in that row is a part of the training set. It returns the filtered dataframe with this list appended on to it with the column label 'set' . """ # Create a list of 0's set_list = [] sentences = fil_df['Input.text'].tolist() for i in range(len(fil_df['top_label'])): set_list.append(0) # put a 1 in the row where a testing sentence is for sentence in test_set: index = sentences.index(sentence) set_list[index] = 1 # add the testing and training list to the dataframe fil_df['Set'] = set_list return fil_df
575789eae77e916ac7c47620ee42da4e7efb351c
369,331
def _ensure_rst_content_directive(text: str): """ Ensures a `contents` directive at the top of every document. """ return ( '.. contents::\n' '\n' + text )
598ae77a87cfb648620f96d0d1b516a449c3befb
140,019
def collect_fragments(folder, foldername, order): """Collects the fragments in 'folder' and returns the pathnames of the fragements (starting from folder) ordered by the value of the order metadata parameter in each fragment. """ fragments = [entry for entry in folder if folder[entry].is_fragment()] if order: fragments.sort( key=lambda item: folder[item].bestmatch('ANY')['metadata'][order], reverse=True) return [foldername + "/" + entry for entry in fragments]
dde6b4622f93a2f2a934a479e10a894480e56fe2
500,713
import math def logit(x): """Inverse sigmoid for a single value x""" return math.log(x / (1 - x))
eb56c75166bfb768cec64990d11afdb6352784d8
516,670
import json def json_loads(data, *args, **kwargs): """A custom JSON loading function which passes all parameters to the json.loads function.""" return json.loads(data, *args, **kwargs)
1fa1c4af090920e9eb82a8c32b0a1418c07f5801
245,706
def trajectory_importance_max_avg(states_importance): """ computes the importance of the trajectory, according to max-avg approach: delta(max state, avg) """ avg = sum(states_importance) / len(states_importance) return max(states_importance) - avg
8fde9c62c661f8ea2041a159864d2c27e662988d
64,266
from typing import Union from pathlib import Path from typing import List def parse_file_to_list(file: Union[Path, str]) -> List[str]: """ Given a text file, read contents to list of lines. Strip lines, ignore empty and comment lines Args: file: Input file. Returns: List of lines. """ # loop lines output = [] for line in Path(file).read_text(encoding="utf8").splitlines(keepends=False): # strip line line = line.strip() if line == "": # skip empty line continue if line[0] == "#": # skip comment line continue # collect output.append(line) return output
1c5208399eb42658aca80debc169399bf7d32579
273,288
def average(lst): """ calculate average of a list (of numbers) """ return sum(lst) / len(lst)
dc18524ac4abf4e7fd98a7fc6e11bace04686747
53,052
import json def fetch_abi(erc721=False): """ Fetch ABI of a given contract, if it is verified Return default ABI, otherwise :param erc721: if the contract is ERC721 :type erc721: bool :return: ABI of the contract :type: dict """ if erc721: with open('app/abi/default-erc721.json') as abi: return json.load(abi) with open('app/abi/default-erc20.json') as abi: return json.load(abi)
d5074ecf3142161e63116c34b2028b18feb58ee7
554,077
import torch import math def generate_A_with_L_mu(n, d, L, mu=-1): """ Generate a data matrix A for f(x) = \frac{1}{n} || A x - b ||_2^2 with L-smoothnes and mu-convexity. The L-smoothness is the largest eigenvalue of the hessian of f hessian(f)(x) = \frac{2}{n} A^T A """ assert mu <= L # Generate unitary matrix U and V dummy_matrix = torch.randn(n, d) U, _, V = torch.linalg.svd(dummy_matrix) # Construct matrix S such that S.T @ S has largest elements L # and smallest elements mu. smallest = math.sqrt(abs(mu) * n / 2) largest = math.sqrt(L * n / 2) diag = torch.linspace(start=smallest, end=largest, steps=min(n, d)) S = torch.zeros(n, d) S[list(range(d)), list(range(d))] = diag # Reconstruct A return U @ S @ V.T
b511429ce28480137f7b229bac4c1dc683e8bbf5
319,677
def select_atoms_from_list(PDB_atom_name, atoms_list): """ Given a pdb atom name string and a list of atoms (BioPython Atom) it returns the Bio.Atom correspondent to the atom name. :param PDB_atom_name: string with an atom name :param atoms_list: list of Bio.Atoms :return: Bio.Atom correspondent to the atom name """ for atom in atoms_list: if atom.name == PDB_atom_name: return atom
fbeec150431898407bac6b000759dcaa5908463c
422,969
def ner_to_sent(sent, replaced, tag="<NE>"): """ Args: - sent is the sentence that has the NER tags in them instead of the actual named entities. - replaced is the corresponding list of named entities that will be inserted in the order of appearance for the tags. - tag is the tag that was used to replace the named entities. Returns: - sent but with it's original named entities Raises: - ValueError if the amount of <NE> and the length of the replacement list do not match. This function serves to convert the result from sent_to_ner back to a sentence with the named entities in it. """ if len(replaced) != sent.count(tag): raise ValueError("The wrong replaced list has been provided" " to ner_to_sent.") for i, word in enumerate(sent): if word == tag: # pop makes sure that every replacement is inserted exactly # once sent[i] = replaced.pop(0) return sent
6c2d0d6e3f88ef23b7924eca98d28e1adda47b28
500,988
def _lin_f(p, x): """Basic linear regression 'model' for use with ODR. This is a function of 2 variables, slope and intercept. """ return (p[0] * x) + p[1]
52bc228dd48ee7939fa60cd052989de70a44b197
698,315
import math def crop(pois, lat, long, max_dist): """ Crop a list to points that are within a maximum distance of a center point. :param pois: list of lats and longs :param lat: center lat :param long: center long :param max_dist: max distance :return: a filtered list """ # Convert from meters to radians: rad_dist = max_dist * math.cos(lat) / 111320 crop_list = [] for i in pois: if math.hypot(lat-i[0], long - i[1]) <= rad_dist: crop_list.append(i) return crop_list
687dd3b073fc5e20fb7e3e4b0b511374a590bf14
127,823
def nvl(value, default): """ Evaluates if value es empty or None, if so returns default Parameters: value: the evalue to evaluate default: the default value Returns: value or default """ if value: return value return default
67df45a6e63c107dcef99fc7bdbaa7064b695f66
700,438
def unique(valuelist): """Return all values found from a list, but each once only and sorted.""" return sorted(list(set(valuelist)))
1218bb7c353a898b2815c5cd95b8ef71e386b91f
30,023
def normalise_activity_by_sector_month( topic_activity, sector_month_labels, sector_variable="sector" ): """Normalise s.t. each [sector, month] sums to 1.""" norm_factor = ( sector_month_labels.dropna() .groupby( [ "month", sector_variable, ] ) .size() .sort_index() ) # Each sector sums to 1 return topic_activity.reorder_levels(["month", sector_variable]).divide( norm_factor.reorder_levels(["month", sector_variable]), axis="rows" )
0c9fb4d8cd3b877cc08568bedb7019cbbc40cd90
679,806
def e_mag2frac(errmag): """Convert mag error to fractionary flux error""" return 10.**(.4*errmag)-1.
2db0b3985d427cdf45ae76867396e06b6ee4b68d
477,437
def get_idx_slice(axis, i, ndim): """ Returns a slice `s` where mat[s] == mat[:, ---, :, i, :, ---, :] |_______| |_______| axis ndim - axis - 1 This allows indexing into an arbitrary dimension of an n-dimensional array. """ idx = [slice(None)] * ndim idx[axis] = i return idx
2bf90d2d8fc955a0fa14cfccb17bed991d0be84e
295,027
import time import json async def tts(ws, text, voice): """Send a text-to-speech request on an established websocket connection. :param ws: an established websocket connection :param text: the text to be converted to an WAVE file :param voice: voice used to generate the WAVE file :return: the TTS response. >>> if resp['payload']['success'] is True then resp['payload']['audio_url'] # contains the converted audio URL. >>> if resp['payload']['success'] is False then resp['payload']['reason'] # contains the failure reason. """ tts_request = { 'emit': "tts", 'payload': { 'text': text, 'voice': voice, 'timestamp': int(time.time()) } } await ws.send(json.dumps(tts_request)) return json.loads(await ws.recv())
8b318f2524de729bc1038d7fb5385b0762e82bf2
525,587
def load_file(file_path): """Load the contents of a file into a string""" try: with open(file_path, 'r') as file: return file.read() except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available return None
2cceabeb5b4bee494357001e7c6235acb99439fe
218,690
def compute_something(a: float, b: int) -> float: """Sums `a` and `b`. Args: a: A brief explanation of `a`. b: A brief explanation of `b`. Returns: float: The sum of `a` and `b`. Notes: The addition of an `int` and a `float` returns a `float`. Mathematically, this performs the following operation: .. math:: c = a + b Warnings: The code will not break if you pass two str. """ return a + b
997d6d811d10a2d70606addc76f7e906e8f9c73d
15,783
import json def load_weather_data(file_path): """Given the `file_path` to the file containing the weather data in JSON format, this function loads the data back into a dictionary and returns it.""" with open(file_path) as weather_input: weather_data = json.load(weather_input) return weather_data
dca0e6a59e8341cb9ebf8eca63e00eace52175d9
227,666
def merge(nums1, nums2): """ Merge two given sorted arrays by merge sort :param nums1: first array :type nums1: list[int] :param nums2: second array :type nums2: list[int] :return: merged array :rtype: list[int] """ result = [] i = j = 0 while i < len(nums1) and j < len(nums2): if nums1[i] < nums2[j]: result.append(nums1[i]) i += 1 else: result.append(nums2[j]) j += 1 while i < len(nums1): result.append(nums1[i]) i += 1 while j < len(nums2): result.append(nums2[j]) j += 1 return result
4ec45d0c65bb85f45d9b8266bba268ace22177f6
634,872
def findMatching(device, device_arr): """ Check the array of devices to see if there is a matching element. Args: device (string): The name of the device attempting to be added. device_arr (array): An array of the devices attached to a profile. """ matches = [x for x in device_arr if x == device] if matches != []: return True else: return False
388e7ee8c66d91f52935dfe61e9b2ce8cb3ac661
643,970
import string def term_frequency(term: str, document: str) -> int: """ Return the number of times a term occurs within a given document. @params: term, the term to search a document for, and document, the document to search within @returns: an integer representing the number of times a term is found within the document @examples: >>> term_frequency("to", "To be, or not to be") 2 """ # strip all punctuation and newlines and replace it with '' document_without_punctuation = document.translate( str.maketrans("", "", string.punctuation) ).replace("\n", "") tokenize_document = document_without_punctuation.split(" ") # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()])
fd3650360e2f1177c5e0514c10ba908767c2daf0
651,204
def heading_sub_degrees(a, b, mod=360): """ Calculate the difference from -mod to mod between two angles. a > b will return positive difference, a < b will return negative difference (this example does not account for all quadrant. Consider this example in the first quadrant for clarity). :param a: First angle. :param b: Second angle. :return: The angle from -mod to mod between a and b. """ diff = (a - b) % mod if diff > mod / 2: return diff - mod else: return diff
20262062065c7de8b689f2dec34c7cdb1329e0fc
190,680
def all_boolean_reductions(request): """Fixture for boolean reduction names.""" return request.param
f4ac578c5ab5777872c9345182b5f1081bd0f00d
625,787
import json def get_json_file_content(json_path: str): """ Get the json content of the json_path. :param: json_path, the path to the json file. :return: a dict representing the file content. """ with open(json_path) as json_file: return json.load(json_file)
b77d6ac89107799cb5722c49a8cb0261a661c7f0
372,952
def vaf_to_ar(vaf): """ Convert VAF to AR. VAF (variant allele frequency) = V-AF AR (allele ratio) = V-AF / WT-AF V-AF + WT-AF = 100 (%) Note: if VAF == 100: AR = -1 (instead of 100 / 0) Args: vaf (dc.Decimal): VAF to convert. Returns: AR (dc.Decimal) """ if vaf == 100: return -1 return vaf/(100 - vaf)
bf1847fd9d832d278f059e036d02cb198fc076f3
362,622
def get_classid(class_attribute, feat_name): """ Parameters ---------- class_attribute: Contains the name of the attribute/column that contains categorical values feat_name : string Contains the name of the attribute/column Returns ------- class_idx:int Returns an integer value that will represent each categorical value """ for class_idx,class_attr in enumerate(class_attribute): if class_attr in feat_name: return class_idx
f327204e3adb2c2a553c5a7c2d830573fed08dc5
531,106
def is_ref(frag): """ Test whether a given Bokeh object graph fragment is a reference. A Bokeh "reference" is a ``dict`` with ``"type"`` and ``"id"`` keys. Args: frag (dict) : a fragment of a Bokeh object graph Returns: True, if the fragment is a reference, otherwise False """ return isinstance(frag, dict) and \ frag.get('type') and \ frag.get('id')
bd9fecac28c0f5aa4c5743c1c176806cdc8d865c
350,618
def format_number(x): """Format number to string Function converts a number to string. For numbers of class :class:`float`, up to 17 digits will be used to print the entire floating point number. Any padding zeros will be removed at the end of the number. See :ref:`user-guide:int` and :ref:`user-guide:double` for more information on the format. .. note:: IEEE754-1985 standard says that 17 significant decimal digits are required to adequately represent a 64-bit floating point number. Not all fractional numbers can be exactly represented in floating point. An example is 0.1 which will be approximated as 0.10000000000000001. Parameters ---------- x : :class:`int` or :class:`float` Number to convert to string Returns ------- vector : :class:`str` String of number :obj:`x` """ if isinstance(x, float): # Helps prevent loss of precision as using str() in Python 2 only prints 12 digits of precision. # However, IEEE754-1985 standard says that 17 significant decimal digits is required to adequately represent a # floating point number. # The g option is used rather than f because g precision uses significant digits while f is just the number of # digits after the decimal. (NRRD C implementation uses g). value = '{:.17g}'.format(x) else: value = str(x) return value
ac0e402dbbdc7d868a36f4176072905bfd54318a
165,600
def strip_dict(d): """ Return a new dictionary, like d, but with any string value stripped >>> d = {'a': ' foo ', 'b': 3, 'c': ' bar'} >>> result = strip_dict(d) >>> type(result) <type 'dict'> >>> sorted(result.items()) [('a', 'foo'), ('b', 3), ('c', 'bar')] """ return dict((k, v.strip() if hasattr(v, 'strip') else v) for k, v in d.iteritems())
2267ed6e4bb48a84a0213ce85f88ae7f1ffd8e33
478,456
def elem(x, xs): """ elem :: Eq a => a -> [a] -> Bool elem is the list membership predicate, elem(x, xs). For the result to be False, the list must be finite; True, however, results from an element equal to x found at a finite index of a finite or infinite list. """ return x in xs
e5a40b1a89f40ce7d005f1d6313f5dd5fd8fe581
339,186
def partition(xrange, sz, overlap=0, skip=[]): """Divide range into subsets specified by size, overlap, and indices to skip. Args: xrange (range) sz (int): target size of subset overlap (int): amount of overlap between subsets skip ([int]): indices to skip in start/end of subset Returns: list of subset ranges """ subsets = [] xs_start = xrange.start xs_next = min(xs_start + sz, xrange.stop) while xs_start < xs_next: xs_stop = xs_next while xs_stop in skip and xs_stop < xrange.stop: xs_stop += 1 while xs_start in skip and xs_start + overlap < xs_stop: xs_start += 1 xs_stop = min(xrange.stop, xs_stop + overlap) if xs_start < xs_stop: subsets.append(range(xs_start, xs_stop)) xs_start = xs_next xs_next = min(xs_next + sz, xrange.stop) return subsets
661c2fd33b0c5b0428ec35e2599b73d3a0bedbae
337,458
def read_file_lines(file): """Returns a list of str lines in text file""" with open(file, mode="rt", encoding="utf-8") as f: return f.readlines()
dac8acbf25c7c9833f20b913d4aff068afa57fa9
644,334
def ordinal_suffix(n): """Return the ordinal suffix for a positive integer >>> ordinal_suffix(0) '' >>> ordinal_suffix(1) 'st' >>> ordinal_suffix(2) 'nd' >>> ordinal_suffix(3) 'rd' >>> ordinal_suffix(4) 'th' >>> ordinal_suffix(11) 'th' >>> ordinal_suffix(12) 'th' >>> ordinal_suffix(13) 'th' >>> ordinal_suffix(21) 'st' >>> ordinal_suffix(22) 'nd' >>> ordinal_suffix(23) 'rd' >>> ordinal_suffix(101) 'st' >>> ordinal_suffix(111) 'th' >>> ordinal_suffix(112) 'th' >>> ordinal_suffix(113) 'th' >>> ordinal_suffix(121) 'st' >>> ordinal_suffix(1111) 'th' >>> ordinal_suffix(1322) 'nd' >>> ordinal_suffix('horse') '' """ try: n = int(n) except Exception: return '' if n < 1: return '' elif n >= 100: return ordinal_suffix(n%100) elif 11 <= n <= 13: return 'th' elif n%10 in (1,2,3): return ('st','nd','rd')[n%10-1] else: return 'th'
53617737aaf28c2d239301358f01d1b0cea9f6cb
10,604