content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def matmul(mat, rhs): """ Computes a matrix multiplication between a matrix (mat) and a right hand side (rhs). If mat is a tensor, then this is the same as torch.matmul. This function can work on lazy variables though Args: - mat (matrix nxn) - left hand size matrix - rhs (matrix nxk) - rhs matrix or vector Returns: - matrix nxk """ return mat.matmul(rhs)
e2178df8d1cfd7da189b2549c7d6d7ec72f69e14
481,831
from typing import Any from typing import List from typing import Union def get_value_by_path(target: Any, path: List[Union[str, int]]) -> Any: """ Gets the value at path of dict or list. :Example: .. code-block:: pycon In [0]: target = { 'a': { 'b': { 'c': 3 } } } Out[0]: {'a': [{'b': {'c': 'AAA'}}]} In [1]: target = { 'a': [{ 'b': { 'c': 3 } }] } In [2]: get_value_by_path(target, ["a", 0, "b", "c"]) Out[2]: 3 The behavior of the function is similar to: https://lodash.com/docs#get """ result = target for segment in path: if isinstance(result, dict): result = result[segment] elif isinstance(result, list): result = result[int(segment)] else: raise Exception(f"Invalid path: {path}") return result
0f8ee689cbcb969c60c4f74dcc5dec9cb4f77b2a
199,035
def model_name(value): """ Return the model verbose name of an object """ return value._meta.verbose_name
6338a63dc36de91cd991b1838aacd730584434a7
148,439
def as_dictionary_of_dictionaries(labels, means, cov_diags): """ Dictionary storing one dictionary of parameters per category. """ assert len(labels) == len(means) == len(cov_diags) all_params = dict() for label, mean, cov_diag in zip(labels, means, cov_diags): category_params = dict() category_params['mean'] = mean category_params['cov_diag'] = cov_diag all_params[label] = category_params return all_params
c355f5ac3d2e8429bcf54e2420cf19c6c5efdd68
649,397
def _get_modified_counts(issue): """Helper to determine the modified line counts of the latest patch set.""" modified_added_count = 0 modified_removed_count = 0 # Count the modified lines in the patchset. patchsets = list(issue.patchsets) if patchsets: for patch in patchsets[-1].patches: modified_added_count += patch.num_added modified_removed_count += patch.num_removed return modified_added_count, modified_removed_count
adef22502f7c61fdb772222b133c0d171255b7af
230,058
from datetime import datetime import calendar def dt_to_ts(value): """ If value is a datetime, convert to timestamp """ if not isinstance(value, datetime): return value return calendar.timegm(value.utctimetuple()) + value.microsecond / 1000000.0
6eb254d301630444ef9a0cb313cf25512ac21556
576,468
def sieve_of_eratosphenes(n: int) -> list[int]: """ Finds prime numbers <= n using the sieve of Eratosphenes algorithm. :param n: the upper limit of sorted list of primes starting with 2 :return: sorted list of primes Integers greater than 2 are considered good input that gives meaningful results: >>> sieve_of_eratosphenes(17) [2, 3, 5, 7, 11, 13, 17] >>> sieve_of_eratosphenes(16) [2, 3, 5, 7, 11, 13] Other integers are considered relatively good input that results in empty list: >>> sieve_of_eratosphenes(0) [] >>> sieve_of_eratosphenes(-7) [] Other types cause TypeError and so are considered bad input: >>> sieve_of_eratosphenes("m") Traceback (most recent call last): ... TypeError: can only concatenate str (not "int") to str >>> sieve_of_eratosphenes(2.3) Traceback (most recent call last): ... TypeError: can't multiply sequence by non-int of type 'float' """ sieve = [True] * (n+1) for i in range(2, n+1): if sieve[i] and i*i < n: for k in range(i*i, n+1): if not k%i: sieve[k] = False prime_numbers = [] for i in range(2, n+1): if sieve[i]: prime_numbers.append(i) return prime_numbers
4dbfdffe0ff6e360361daccdcd39fb7fb3d09a03
702,743
def is_valid_name(name): """Checks that GitHub user's name is valid""" return name and len(name) >= 3 and ' ' in name
7197c0d420cfcd59eb68337e0933c02f00e9f801
638,457
import inspect def get_named_args(func): """Get all non ``*args/**kwargs`` arguments for a function""" s = inspect.signature(func) return [ n for n, p in s.parameters.items() if p.kind in [p.POSITIONAL_OR_KEYWORD, p.POSITIONAL_ONLY, p.KEYWORD_ONLY] ]
492d8d8333b472cb0eb7b4573be42e45550175ea
322,571
def _add_velocity_actuator(joint): """Adds a velocity actuator to a joint, returns the new MJCF element.""" # These parameters were adjusted to achieve a grip force of ~25 N and a finger # closing time of ~1.2 s, as specified in the datasheet for the hand. gain = 10. forcerange = (-1., 1.) ctrlrange = (-5., 5.) # Based on Kinova's URDF. return joint.root.actuator.add( 'velocity', joint=joint, name=joint.name, kv=gain, ctrllimited=True, ctrlrange=ctrlrange, forcelimited=True, forcerange=forcerange)
ce6dd249233427a1e06bde034bd390e073bee49e
416,218
import torch import math import random def prepare_data_for_node_classification(datalist, train_ratio=0.3, rnd_labeled_nodes=True): """For each graph split the nodes for training and testing. It creates a train_mask ehere elements equal to 1 are for training. rnd_labeled_nodes=True means that the nodes that are given labels for training are choosen at random (at different epochs, the same graph can have different labeled nodes).""" for graph in datalist: num_nodes = graph.num_nodes num_classes = graph.node_y.size(1) nodes_per_class = {} for node, y in enumerate(graph.node_y.argmax(1)): y = y.item() if y not in nodes_per_class: nodes_per_class[y] = [] nodes_per_class[y].append(node) train_mask = torch.zeros((num_nodes, 1)) for y in nodes_per_class.keys(): num_nodes_in_class = len(nodes_per_class[y]) num_train_nodes = math.floor(num_nodes_in_class*train_ratio) if rnd_labeled_nodes: train_nodes = random.sample(nodes_per_class[y], num_train_nodes) else: train_nodes = nodes_per_class[y][:num_train_nodes] for node in train_nodes: train_mask[node] = 1 graph.train_mask = train_mask return datalist
257d8097993d9178f0682d63d158d0e2578d2feb
80,619
def split_by_indel_size(df, indel_class): """Split dataframe by indel size Args: df (pandas.DataFrame) indel_class (str): "s" for 1-nt, "m" for >1-nt indels Returns: df (pandas.DataFrame): only contains 1-nt or >1-nt indels """ if indel_class == "s": return df[df["indel_size"] == 1].reset_index(drop=True) else: return df[df["indel_size"] > 1].reset_index(drop=True)
ec5a70cef8d8ce5a2c1e48f68c8f6af4524a80a8
402,440
def div255(x): """ Scale clip frames from [0, 255] to [0, 1]. Args: x (Tensor): A tensor of the clip's RGB frames with shape: (channel, time, height, width). Returns: x (Tensor): Scaled tensor by divide 255. """ return x / 255.0
4f6258d724ab9f85c3e1268711a5c3f4e5b3d22f
520,320
def dict_merge(base, top): """Recursively merge two dictionaries, with the elements from `top` taking precedence over elements from `top`. Returns ------- out : dict A new dict, containing the merged records. """ out = dict(top) for key in base: if key in top: if isinstance(base[key], dict) and isinstance(top[key], dict): out[key] = dict_merge(base[key], top[key]) else: out[key] = base[key] return out
a6c97c1b82b2ae9be9b31f06fc49c323f3c126cc
317,649
def scenegraph_to_json(sg): """ Dump an "SceneGraph" object to a dict that's used for evaluation. The output will be saved as json. Args: sg (SceneGraph): Returns: dict: contains predictions for one image. """ boxes = sg.pred_boxes.tensor.numpy().tolist() # for vg evaluation, all boxes should be in XYXY_ABS scores = sg.scores.numpy().tolist() classes = sg.pred_classes.numpy().tolist() rel_scores = sg.rel_scores.numpy().tolist() rel_inds = sg.rel_inds.numpy().tolist() result = { "category_ids": classes, "bboxes": boxes, "scores": scores, "rel_scores": rel_scores, "rel_inds": rel_inds } return result
2a39a81dfe6fcc14b8528c08bdad9521f35461db
684,120
def compute_pascal(n): """ Compute the nth row of Pascal’s triangle Parameters ---------- n : integer which row too compute Returns ------- pascal_n : a list of integers The nth row of Pascal’s triangle. """ pascal_n = [1] prev = 1 for k in range(1,n+1): cur = prev * (n+1-k)/k pascal_n.append(int(cur)) prev = cur return(pascal_n)
54b2d5ca80412d2da4da4e9f09dff25026205d3d
29,012
import typing def bytes_escape(bs: bytes, *, quote: typing.Optional[str] = None) -> str: """Convert a bytestring to an ASCII string, with non-ASCII characters hex-escaped. (We implement our own escaping mechanism here to not depend on Python's str or bytes repr.) """ out = [] # The bytestring is decoded as Latin-1 instead of ASCII here so that non-ASCII characters don't cause an error. # The conditions inside the loop ensure that only ASCII characters are actually output. for byte, char in zip(bs, bs.decode("latin-1")): if char in {quote, "\\"}: out.append(f"\\{char}") elif char.isprintable() and byte < 0x80: out.append(char) else: out.append(f"\\x{byte:02x}") return "".join(out)
950e85beda5ebb159b465859cdd008fce63aa5ca
509,558
from typing import Optional def nround(value: Optional[float], digits: int) -> Optional[float]: """ Returns a rounded value, properly honoring ``None`` objects. :param value: Float value (or None) :param digits: number of digits :returns: rounded float value (or None) """ return None if value is None else round(value, digits)
3a0567fecdeff6cfb222278a2fc70eda368c9711
546,891
def bitstring2index(bitstring): """ Turns a bitstring into an index. >>> bitstring2index((0, 0, 1, 0, 1, 0, 1, 0)) 42 """ return sum(value * 2 ** i for i, value in enumerate(bitstring[::-1]))
6d3a4e85e8610241dfd50b7b57fb68f53a2bf15c
226,431
def drop_cols(df): """Drops unused columns from passed dataframe. The variables that are eliminated are: - codigo_cierre - fecha_cierre - año_cierre - mes_cierre - hora_cierre - latitud - longitud - clas_con_f_alarma - delegacion_cierre - geopoint Parameters: df (dataframe) """ dropped_columns = ['codigo_cierre', 'fecha_cierre', 'año_cierre', 'mes_cierre', 'hora_cierre', 'latitud', 'longitud', 'clas_con_f_alarma', 'delegacion_cierre', 'geopoint'] df.drop(dropped_columns, axis='columns', inplace=True) return df
cb514b6c09136960bdda7a061cac77240615f7f6
560,329
from pathlib import Path def get_model_version_file(model_version, file_path): """Build the path to the expected location of a model_version file. Parameters ---------- model_version : modmon.schema.db.ModelVersion Model version object file_path : str Path to file (relative to model_version location) Returns ------- str Expected path to metrics file """ return Path(model_version.location, file_path)
0f2d6a152f334a1bdc6d2b02e4eb1455710e26a1
389,926
def last_home_loss_road_win(games): """ Given a list of games, returns the most recent home loss and road win. """ try: last_home_loss = games.filter(t1_game_type='H', t1_result='L')[0] except: last_home_loss = None try: last_road_win = games.filter(t1_game_type='A', t1_result='W')[0] except: last_road_win = None return last_home_loss, last_road_win
6a18309793aebba0baee79e02e7833d5adf6fc27
594,536
def split_array(text): """Split a string assuming it's an array. :param text: the text to split :type text: str :returns: list of splitted strings :rtype: list """ if not isinstance(text, str): return text # for some reason, titles.akas.tsv.gz contains \x02 as a separator sep = ',' if ',' in text else '\x02' return text.split(sep)
0980125329538bf1fc70fdaf2f2a2eb8cdd4511c
196,688
def _get_command(path, command_directory): """ Args: path: The pathlib.Path to _compile_command file. command_directory: The directory commands are run from. Returns a string to stick in compile_commands.json. """ with path.open("r") as f: contents = f.read().split("\0") if len(contents) != 2: # Old/incomplete file or something; silently ignore it. return None if contents[1].startswith("external/"): # Do not include compilation commands for depdendencies return None return """{ "directory": "%s", "command": "%s", "file": "%s" }""" % (command_directory, contents[0].replace('"', '\\"'), contents[1])
ecf503d2ed406d4cb49088446938619a4a491b71
483,265
def get_misc_model(test_list, extra_model_data=None): """Build a model that will run any missing tests.""" model = { "is_misc": True, "excluded_tests": test_list, } if extra_model_data: model.update(extra_model_data) return model
e5c0ed88a0745d6e7c0618afdb319b9e57714291
280,293
import re def remove_strings(codelines): """ Removes strings from the code. """ reg = re.compile("\"[^\"]*\"") newlines = [] for l in codelines: l = re.sub("\"[^\"]*\"", "", l) newlines.append(l) return newlines
bda1fa811eff338fa33842fc586edf8fef6dec52
557,097
def distance(row, edge): """ Suppression function that converts 'distance' into 'strength' """ return 1 / row['distance']
bdd7084522ae6b41a30a34f9b276755e9f4944bc
503,520
def to_numpy(tensor): """ Convert a tensor, either a Torch or TensorFlow tensor, to a numpy tensor """ if 'detach' in dir(tensor): # It's torch return tensor.cpu().detach().numpy() else: # It's tensorflow return tensor.numpy()
d9c1c5d03298573832cda0ce61ed69b9953fad96
323,769
def clean_word(word): """Get the junk off a word.""" return word.strip().strip(".,'!?\"()*;:-")
f1328ec54f346e53a7f1cc5935394d6b3c3d1bac
425,384
def generate_solution(x: int, n: int) -> int: """This is the "naive" way to compute the solution, for testing purposes. In this one, we actually run through each element. """ counter = 0 for i in range(1, n + 1): for j in range(1, n + 1): if i * j == x: counter += 1 return counter
fb074da93890f19b3aebce9babf43a9d29c68897
695,579
import torch def _grad_spherical_harmonics_l1(xyz, m): """Compute the nabla of 1-1 Spherical Harmonics Args: xyz : array (Nbatch,Nelec,Nrbf,Ndim) x,y,z, of (Point - Center) m : second quantum number (-1,0,1) Returns \nabla Y0-1 = \sqrt(3 / (4\pi)) ( 1/r^3 * [-yx, x^2+z^2, -yz] ) (m=-1) \nabla Y00 = \sqrt(3 / (4\pi)) ( 1/r^3 * [-zx, -zy, x^2+y^2] ) (m= 0) \nabla Y01 = \sqrt(3 / (4\pi)) ( 1/r^3 * [y^2+z^2, -xy, -xz] ) (m=-1) """ r = torch.sqrt((xyz**2).sum(3)) r3 = r**3 c = 0.4886025119029199 p = (c / r3).unsqueeze(-1) if m == -1: return p * (torch.stack([-xyz[:, :, :, 1] * xyz[:, :, :, 0], xyz[:, :, :, 0]**2 + xyz[:, :, :, 2]**2, -xyz[:, :, :, 1] * xyz[:, :, :, 2]], dim=-1)) if m == 0: return p * (torch.stack([-xyz[:, :, :, 2] * xyz[:, :, :, 0], -xyz[:, :, :, 2] * xyz[:, :, :, 1], xyz[:, :, :, 0]**2 + xyz[:, :, :, 1]**2], dim=-1)) if m == 1: return p * (torch.stack([xyz[:, :, :, 1]**2 + xyz[:, :, :, 2]**2, -xyz[:, :, :, 0] * xyz[:, :, :, 1], -xyz[:, :, :, 0] * xyz[:, :, :, 2]], dim=-1))
159648d3047f5cad38ab415ac70f012e157fc112
163,746
def remove_spaces(st): """ Removes spaces from a string :param st: (str) input string with spaces :return: (str) string without any spaces """ return st.replace(' ', '')
3419571854077e2365c94d96394be0edfbe8912d
646,539
import json def json_raw_to_dictionary(json_raw): """Helper method to that converts JSON to a dictionary""" return json.loads(json_raw)
ff7842fa0492255b38d8db088ffc5cb8dfdfee8e
603,128
from typing import Mapping def check_dict_nested_attrs(item: Mapping, dict_data: Mapping) -> bool: """ Checks the values from `dict_data` are contained in `item` >>> d = {'a': 1, 'b': {'c': 2}} >>> check_dict_nested_attrs(d, {'a': 1}) True >>> check_dict_nested_attrs(d, {'b': {'c': 2}}) True >>> check_dict_nested_attrs(d, {'d': []}) False """ for key, value in dict_data.items(): if key not in item: return False item_value = item[key] if isinstance(item_value, Mapping): if not check_dict_nested_attrs(item_value, value): return False elif item_value != value: return False return True
08ed8dbc405e236b95e33e10e9c342e15b6363c9
27,132
def bounds_elementwise(lst): """Given a non-empty list, returns (mins, maxes) each of which is the same length as the list items. >>> bounds_elementwise([[0,6,0], [5,0,7]]) ([0,0,0], [5,6,7]) """ indices = list(range(len(lst[0]))) mins = [min(el[i] for el in lst) for i in indices] maxes = [max(el[i] for el in lst) for i in indices] return (mins, maxes)
5fa4fbe75db310d971005c88fc6d04058d3cd998
25,108
def join_fields(fields): """ Joins the provided fields with a comma and returns the result :param list[str] fields: :return: a string with fields joined by comma :rtype: str """ return ",".join(fields)
bdbb23192b1fb008e53fba0198b93be46253276a
197,570
import json def format_json(content, sort_keys=False): """ Format a given dictionary to a json formatted string. """ json_string = json.dumps( content, sort_keys=sort_keys, indent=2, separators=(',', ': ')) return json_string
0d7500eafccf4bb5bf61c9f3b51fef16b42d46e4
392,631
def CalculateNewBackgroundPosition(m): """Fixes horizontal background-position percentages. This function should be used as an argument to re.sub since it needs to perform replacement specific calculations. Args: m: A match object. Returns: A string with the horizontal background position percentage fixed. BG_HORIZONTAL_PERCENTAGE_RE.sub(FixBackgroundPosition, 'background-position: 75% 50%') will return 'background-position: 25% 50%'. """ # The flipped value is the offset from 100% new_x = str(100-int(m.group(4))) # Since m.group(1) may very well be None type and we need a string.. if m.group(1): position_string = m.group(1) else: position_string = '' return 'background%s%s%s%s%%%s' % (position_string, m.group(2), m.group(3), new_x, m.group(5))
c68e184d2b9864d1128e9fccf7d0c249276f6938
195,913
from typing import List def find_substring_by_pattern( strings: List[str], starts_with: str, ends_before: str ) -> str: """ search for a first occurrence of a given pattern in a string list >>> some_strings = ["one", "two", "three"] >>> find_substring_by_pattern(some_strings, "t", "o") 'tw' >>> find_substring_by_pattern(some_strings, "four", "five") Traceback (most recent call last): ... ValueError: pattern four.*five not found :param strings: a list of strings where the pattern is searched for :param starts_with: the first letters of a pattern :param ends_before: a substring which marks the beginning of something different :returns: a pattern which starts with ``starts_with`` and ends before ``ends_before`` """ for package_name in strings: starting_index = package_name.find(starts_with) if starting_index >= 0: ending_index = package_name.find(ends_before) return package_name[starting_index:ending_index] raise ValueError(f"pattern {starts_with}.*{ends_before} not found")
4bc0abe6fcdbf81350b575dd9834b9c646fda81e
15,948
def truncate(s, length): """Truncate a string to a specific length The result string is never longer than ``length``. Appends '..' if truncation occurred. """ return (s[:length - 2] + '..') if len(s) > length else s
a94a1dd852749a3d4f96fa3a58db32d50a882074
39,462
def recursive_replace_val_str(obj, string, new_string, callback_on_replaced=None): """Recursively replace value strings in a nested dict/list `obj`. Replaces `string` value with `new_string`. If `callback_on_replaced` is not None, then call it on a replaced full string. """ if isinstance(obj, list): obj = [ recursive_replace_val_str(x, string, new_string, callback_on_replaced) for x in obj ] elif isinstance(obj, dict): for k in obj.keys(): obj[k] = recursive_replace_val_str( obj[k], string, new_string, callback_on_replaced ) elif isinstance(obj, str): if string in obj: obj = obj.replace(string, new_string) if callback_on_replaced: callback_on_replaced(obj) return obj
530d9ff4937c3e94fddd64b7f277e3182b8116ba
551,866
def find_missing_timestamps(timestamps): """Find missing value(s) in a sequence of timestamps. This function also takes into consideration the scenario when timestamp 0 is missing from the timestamps argument. Args: timestamps (list): A list of timestamps (in picoseconds). Returns: An ascendingly sorted list of missing timestamp values (if any). """ if timestamps is None or len(timestamps) == 0: return None min_correct_timestamp = timestamps[0] if timestamps[0] == 0 else 0 max_correct_timestamp = timestamps[-1] correct_time_values_in_ps = ( set(range(min_correct_timestamp, max_correct_timestamp, 100)) ) missing_timestamps = correct_time_values_in_ps.difference(timestamps) return sorted(missing_timestamps)
870dea20116adad0ac89ad4e1b5b569d75cdeda0
159,183
def set_num_parts(df, max_num_parts): """Set number of partitions. :param max_num_parts maximum number of partitions """ if df.rdd.getNumPartitions() > max_num_parts: df = df.repartition(max_num_parts) return df
7c38bb8a6d532cd44536e3ca1715bca42df32cd4
520,666
def get_object(context, key): """ Get object by key from enabled depots """ for depot in context.depots["object"].values(): try: value = depot.get_object(key) if value is not None: return value except: # pylint: disable=W0702 pass return None
991bd366505f5454c2ec5196c8ff5dbc816dd14b
542,284
from typing import Any def if_none(value: Any, default: Any): """Return default if value is None.""" return value if value is not None else default
dfdc45a0c77832c8203cd9104af45b5f4beee334
291,101
def unique(seq, key=None, return_as=None): """Unique the seq and keep the order. Instead of the slow way: `lambda seq: (x for index, x in enumerate(seq) if seq.index(x)==index)` :param seq: raw sequence. :param return_as: generator for default, or list / set / str... >>> from torequests.utils import unique >>> a = [1,2,3,4,2,3,4] >>> unique(a) <generator object unique.<locals>.<genexpr> at 0x05720EA0> >>> unique(a, str) '1234' >>> unique(a, list) [1, 2, 3, 4] """ seen = set() add = seen.add if key: generator = (x for x in seq if key(x) not in seen and not add(key(x))) else: generator = (x for x in seq if x not in seen and not add(x)) if return_as: if return_as == str: return "".join(map(str, generator)) else: return return_as(generator) else: # python2 not support yield from return generator
c434bf9e0ce2b268b9aabdf8f55c82b817b59278
549,291
from typing import Tuple from typing import Any import math def _calc_dist(p1: Tuple[Any, Any], p2: Tuple[Any, Any]) -> float: """ Calculate distance between two points """ return math.sqrt(((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))
28de5d89e4ffd9f8957bbe2102c3fe24baa45a79
489,272
def best_and_worst_hour(percentages): """ Args: percentages: output of compute_percentage Return: list of strings, the first element should be the best hour, the second (and last) element should be the worst hour. Hour are represented by string with format %H:%M e.g. ["18:00", "20:00"] """ #position [0] returns: profit = 0, and [1] returns time hora = 1 # Calculate the best and worst hour: bestHour = max(zip(percentages.values(), percentages.keys())) worstHour = min(zip(percentages.values(), percentages.keys())) return [bestHour[hora], worstHour[hora]]
6424ffda25c9d6deb9b8f54e2268a85ea36163d7
464,644
from typing import OrderedDict def _groups_fail_table(groups): """Returns a dict associating each element to the groups it can fail. This function is used both in ``plot_elements_in_group`` and ``find_logical_saboteurs``. Parameters ---------- groups Ordered dict of the form {group_name: [elements in groups]}. Returns ------- fail_table A dict {element_name: list[True False False ...]} where list[i] is True if the element is part of the i-th group in the provided ordered dictionary ``groups``. """ groups = list(groups.values()) all_elements = [] for group in groups: for element in group: if element not in all_elements: all_elements.append(element) return OrderedDict( [ (element, [(element in group) for group in groups]) for element in all_elements ] )
2fda833cc84198186442208c1deb366b0fe7cb80
259,397
def yaml_diagnostic(name='${diag_name}', message='${message}', file_path='/tidy/file.c', file_offset=1, replacements=(), notes=()): """Creates a diagnostic serializable as YAML with reasonable defaults.""" result = { 'DiagnosticName': name, 'DiagnosticMessage': { 'Message': message, 'FilePath': file_path, 'FileOffset': file_offset, }, } if replacements: result['DiagnosticMessage']['Replacements'] = [{ 'FilePath': x.file_path, 'Offset': x.offset, 'Length': x.length, 'ReplacementText': x.text, } for x in replacements] if notes: result['Notes'] = [{ 'Message': x.message, 'FilePath': x.file_path, 'FileOffset': x.file_offset, } for x in notes] return result
9a4764a9d8ef107c77d29bbec54c6c8d1d7e0f20
341,957
def rename(target, keys): """Returns a dict containing only the key/value pairs from keys. The keys from target will be looked up in keys, and the corresponding value from keys will be used instead. If a key is not found, it is skipped. Args: target: the dictionary to filter keys: the fields to filter """ result = {} for key, value in target.iteritems(): if key in keys: new_key = keys[key] result[new_key] = value return result
61cd8950bf029778449e7fdede370debcb6ace82
519,896
import torch def dist(input, other, *args, **kwargs): """ Returns the p-norm of (``input`` - ``other``) Examples:: >>> import torch >>> import treetensor.torch as ttorch >>> t1 = torch.randn(5) >>> t1 tensor([-0.6566, 1.2243, 1.5018, -0.1492, 0.8947]) >>> t2 = torch.randn(5) >>> t2 tensor([0.5898, 0.6839, 0.0388, 0.4649, 0.7964]) >>> ttorch.dist(t1, t2) tensor(2.0911) >>> tt1 = ttorch.randn({'a': (5, ), 'b': {'x': (6, )}}) >>> tt1 <Tensor 0x7f95f68495f8> ├── a --> tensor([-0.5491, 1.5006, -0.0483, 1.2282, -1.4837]) └── b --> <Tensor 0x7f95f68494e0> └── x --> tensor([-1.8414, 1.2913, 0.0943, 0.3473, 1.2717, 0.6013]) >>> tt2 = ttorch.randn({'a': (5, ), 'b': {'x': (6, )}}) >>> tt2 <Tensor 0x7f95f68ef2b0> ├── a --> tensor([ 0.1389, -0.7804, -1.3048, -1.1066, 1.3225]) └── b --> <Tensor 0x7f95f6849dd8> └── x --> tensor([ 1.4873, 0.2218, -0.1063, -0.8726, -0.6756, 0.4805]) >>> ttorch.dist(tt1, tt2) <Tensor 0x7f95f6849358> ├── a --> tensor(4.5366) └── b --> <Tensor 0x7f95f68494a8> └── x --> tensor(4.1904) """ return torch.dist(input, other, *args, **kwargs)
44b94cb0a2f1efe450e12ee25025d0fc98cb4570
513,901
def get_obj_attr(obj, attr): """ Allows us to access property by variable value inside our templates. Example: `data={'monday': True}`, `day='monday'`, then we can do: `{{ data|get_obj_attr:day }}` Parameters ---------- obj attr Returns ------- Attribute of obj """ return getattr(obj, attr)
825c93651218ea3d7e328215f0292e7f12bf41ff
140,361
from typing import Counter def wordle_judge(input_word: str, answer_word: str)-> int: """ Judge input word based on the wordle rule We assume input_word and answer_word are of the same length (but no check is conducted) Judge results are defined as a sequence of {0, 1, 2}, where 2: exact, 1: partial, 0: none Return value is the base-10 integer value that represents this result interpreted as an integer of base 3. e.g. 22001 --> 2 * 3^4 + 2 * 3*3 + 0 * 3^2 + 0 * 3^2 + 1 * 3^0 = 181 """ exactmatch = [a==b for a, b in zip(input_word, answer_word)] lettercount = Counter(b for b, m in zip(answer_word, exactmatch) if not m) partialmatch = [False] * len(input_word) for i, (a, m) in enumerate(zip(input_word, exactmatch)): if m: continue if lettercount.get(a, 0) > 0: lettercount[a] -= 1 partialmatch[i] = True out = 0 power = 1 for x, y in zip(reversed(exactmatch), reversed(partialmatch)): if x: out += power*2 elif y: out += power power *= 3 return out
ebf545f44fe34425d938a4ab09a3418810c244f8
687,506
import re def normalize_name(name, separator="_"): """Normalizes a name by replacing all non-alphanumeric characters with underscores.""" return re.sub("[^A-Za-z0-9_]+", separator, name)
fa186c1601657d960ba6630ab508883228b40452
640,429
def sanitizeAMMName(name: str): """ Sanitize the callsites for AMM dataset. """ if "::" in name: name = name.split("::")[-1] else: name = name return name
c3ec846377506dbb7091ee4822e23813a8a4beae
453,144
def html_encode(unicode_data, encoding='utf-8'): """Encode HTML""" return unicode_data.encode(encoding, 'html_replace')
4f210ce780e1d0e3123bfacf129a2cbd5c25f65b
91,002
def sort_double_char_labels(labels): """Sort double char labels based on the order of repeated, lower and other labels""" repeated_char_labels = [label for label in labels if label[0] == label[1]] lower_char_labels = [label for label in labels if label[0].islower() and label[1].islower() and label not in repeated_char_labels] other_labels = [label for label in labels if label not in repeated_char_labels and label not in lower_char_labels] labels = repeated_char_labels + lower_char_labels + other_labels return labels
979bd0de303706e27a12544f0b201484e8b4f7cb
93,216
def is_misc(_item): """item is report, thesis or document :param _item: Zotero library item :type _item: dict :returns: Bool """ return _item["data"]["itemType"] in ["thesis", "report", "document"]
2b0232a658717769ae34e06d006831efb20a791d
603,048
def format_time(stamp): """ formats time to seconds :param stamp: rospy.time :return: time i seconds """ return stamp.secs + stamp.nsecs * (10 ** -9)
a70504de93f9bb2987fe8c3030de6fd29f1866cb
544,675
def _get_match_fraction(aligned_segment): """Get the fraction of a read matching the reference""" matching_bases = aligned_segment.cigartuples[0][1] return float(matching_bases)/aligned_segment.query_length
235deb526d5f33662f5f05c36d3a3b755079296e
103,187
def is_left_bound(x): """ Was left bound reached? :param x: x coordinate. :return: True - bound was reached, False - otherwise. """ return x <= 0
6c23d733c943b78e94a3a14de61b595cc6b7d00b
169,579
import re def contains_reserved_char(word: str): """ The following reserved chars are not permitted at all: < (less than) > (greater than) : (colon) " (double quote) / (forward slash) \\ (single backslash (escaped here)) | (vertical bar or pipe) ? (question mark) * (asterisk) """ # single backslash, vertical bar, question mark and asterisk are escaped return re.match(r'\S*(<+|>+|:+|"+|/+|\\+|\|+|\?+|\*+)', word) is not None
a22ea99ede7eeb0507522147188ca81031cce2ca
300,867
def get_product_images(product): """Return list of product images that will be placed in product gallery.""" return list(product.images.all())
64108e6ba906f9fcafd16af172c2af9f34d2eb20
655,021
from typing import List import textwrap def generate_build_file_contents(name: str, dependencies: List[str]) -> str: """Generate a BUILD file for an unzipped Wheel Args: name: the target name of the py_library dependencies: a list of Bazel labels pointing to dependencies of the library Returns: A complete BUILD file as a string """ return textwrap.dedent( """\ package(default_visibility = ["//visibility:public"]) load("@rules_python//python:defs.bzl", "py_library") py_library( name = "{name}", srcs = glob(["**/*.py"]), data = glob(["**/*"], exclude=["**/*.py", "**/* *", "BUILD", "WORKSPACE"]), # This makes this directory a top-level in the python import # search path for anything that depends on this. imports = ["."], deps = [{dependencies}], ) """.format( name=name, dependencies=",".join(dependencies) ) )
6714a8e214eb4d89614702643f5d900d864cbe53
632,825
import functools import operator def _compute_product_0(xs): """Uses built-in Python tools to compute the product of the list.""" if not xs: return 0 return functools.reduce(operator.mul, xs)
215c5f428b9b4ba5bef72b2b78e8d01084079c50
539,490
def _get_reduce_dims(op): """Returns the reduction dimensions for grouping weights of various ops.""" type_to_dims = {'Conv2D': (0, 1, 2), 'Conv2DBackpropInput': (0, 1, 3)} try: return type_to_dims[op.type] except KeyError: raise ValueError('Reduce dims are unknown for op type %s' % op.type)
e08e2c699971f855091bd3991a86dde3e3610f4a
350,082
def get_distance_squared(x1: float, y1: float, x2: float, y2: float) -> float: """ it returns the distance squared between two points :param x1: the x position of the first object :param y1: the y position of the first object :param x2: the x position of the second object :param y2: the y position of the second object :type x1: float :type y1: float :type x2: float :type y2: float :return: float """ return (x1 - x2) ** 2 + (y1 - y2) ** 2
542277b67b1d467434791a329619fc70c2845ca0
375,890
import collections def _split_by_exec_group(testcases): """ Split testcases into those with an execution group and those without one. """ serial_cases = [] parallel_cases = collections.OrderedDict() for testcase in testcases: exec_group = getattr(testcase, "execution_group", None) if exec_group: if exec_group in parallel_cases: parallel_cases[exec_group].append(testcase) else: parallel_cases[exec_group] = [testcase] else: serial_cases.append(testcase) return serial_cases, parallel_cases
caaed4d4487828f6b79cc5b3a4fe85a79c83c5a6
560,962
def average_of_factors(n): """ n (int): number Returns the average of the positive divisors of n """ # Initialize the sum total = 0 # Initialize the number of factors count = 0 for i in range(1, n+1): # Check if i is a divisor of n if n % i == 0: total += i count += 1 average = total/count return average
622f5793685c03ec0b367c23313327264a79b9b2
642,078
def df_variant_id(row): """Get variant ID from pyvcf in DataFrame""" if row['ID'] != '.': return row['ID'] else: return row['CHROM'] + ':' + str(row['POS'])
460868e501b5e4683fc88d80cee363b5778d7623
669,788
def render_output(out: list): """Pretty's `create_seating_chart` output Args: out (list): Output of `create_seating_chart` Returns: str: Prettified output for presentation to user """ return "\n\r".join([", ".join(i) for i in out])
20fc9ed04d656c90298ae76041a94ec3f1dab8b5
211,927
def align_skeleton(skeleton, code): """ Aligns the given skeleton with the given code, minimizing the edit distance between the two. Both skeleton and code are assumed to be valid one-line strings of code. >>> align_skeleton(skeleton="", code="") '' >>> align_skeleton(skeleton="", code="i") '+[i]' >>> align_skeleton(skeleton="i", code="") '-[i]' >>> align_skeleton(skeleton="i", code="i") 'i' >>> align_skeleton(skeleton="i", code="j") '+[j]-[i]' >>> align_skeleton(skeleton="x=5", code="x=6") 'x=+[6]-[5]' >>> align_skeleton(skeleton="return x", code="return x+1") 'returnx+[+]+[1]' >>> align_skeleton(skeleton="while x<y", code="for x<y") '+[f]+[o]+[r]-[w]-[h]-[i]-[l]-[e]x<y' >>> align_skeleton(skeleton="def f(x):", code="def g(x):") 'def+[g]-[f](x):' """ skeleton, code = skeleton.replace(" ", ""), code.replace(" ", "") def helper_align(skeleton_idx, code_idx): """ Aligns the given skeletal segment with the code. Returns (match, cost) match: the sequence of corrections as a string cost: the cost of the corrections, in edits """ if skeleton_idx == len(skeleton) and code_idx == len(code): return "", 0 if skeleton_idx < len(skeleton) and code_idx == len(code): edits = "".join(["-[" + c + "]" for c in skeleton[skeleton_idx:]]) return edits, len(skeleton) - skeleton_idx if skeleton_idx == len(skeleton) and code_idx < len(code): edits = "".join(["+[" + c + "]" for c in code[code_idx:]]) return edits, len(code) - code_idx possibilities = [] skel_char, code_char = skeleton[skeleton_idx], code[code_idx] # Match if skel_char == code_char: edits, cost_ = helper_align(skeleton_idx + 1, code_idx + 1) possibilities.append((skel_char + edits, cost_)) # Insert edits, cost_ = helper_align(skeleton_idx, code_idx + 1) prefix = "+[" + code_char + "]" possibilities.append((prefix + edits, cost_ + 1)) # Delete edits, cost_ = helper_align(skeleton_idx + 1, code_idx) prefix = "-[" + skel_char + "]" possibilities.append((prefix + edits, cost_ + 1)) return min(possibilities, key=lambda x: x[1]) result, cost = helper_align(0, 0) return result
799294dfde023276ba1fe159eacad4b0973beb49
577,241
def x_count(board): """ count how many 'x'-es are on the board """ return len([ch for ch in board if ch == 'x'])
64c544215bdfdfb43963cd73e91be75f4f98fe4e
460,001
def wrap_seq_for_applescript(seq): """Wrap a Python sequence in braces and quotes for use in AppleScript""" quoted = [f'"{item}"' for item in seq] joined = ', '.join(quoted) wrapped = '{' + joined + '}' return wrapped
61e38158a7a64bb69efa3b8c78e98324f5a7a5d1
155,312
def fvs(pv, i, n): """ This function is for the future value of money over a period of time using simple interest. pv = Initial Investment (1000) i = interest rate as decimal (.0675) n = the number of periods (20) Example: fv(1000, .0675, 20) """ return pv * i * n
84b4866f8a89a92634da1e842609512d8e3b0be2
185,186
import random def random_string(length, letters="abcdefghijklmnopqrstuvwxyz"): """ Returns a random string of length `length` from a set of letters. """ return "".join([random.choice(letters) for _ in range(length)])
05791734997e44159fda61fd62fb83ec2c9cc138
220,555
import time def retry(count=1, delay=0, retry_on=Exception): """ Decorator which tries to run specified fuction if the previous run ended by given exception. Retry count and delays can be also specified. """ if count < 0 or delay < 0: raise ValueError('Count and delay has to be positive number.') def decorator(func): def wrapper(*args, **kwargs): tried = 0 while tried <= count: try: return func(*args, **kwargs) except retry_on: if tried >= count: raise if delay: time.sleep(delay) tried += 1 wrapper.__name__ = func.__name__ return wrapper return decorator
d0047841f62d4bc4903d6ef54e28c9588d0e5987
545,982
def make_ffmpeg_section_args( filename, start, length, *, before_options=(), options=(), ): """Returns a list of arguments to FFmpeg It will take the required amount of audio starting from the specified start time and convert them into PCM 16-bit stereo audio to be piped to stdout. The before_options argument will be passed after `-ss` and before `-i`, and the options argument will be passed after `-t` and before `pipe:1`. The returned args are of this form: -ss {start} -t {length} {before_options} -i {filename} -f s16le -ar 48000 -ac 2 -loglevel warning {options} pipe:1 """ if start is None: raise ValueError("start must be a float") if isinstance(before_options, str) or isinstance(options, str): # Strings are naughty. Force user to split them beforehand raise ValueError("FFmpeg options should be lists, not strings") return [ "-ss", str(start), "-t", str(length), *(before_options or ()), "-i", filename, "-f", "s16le", "-ar", "48000", "-ac", "2", "-loglevel", "warning", *(options or ()), "pipe:1", ]
4599368c2b44a51da1ea1c2935305a4423ae581d
662,184
def BFS(graph, s, t, parent): """ Populates parent with nodes to visit and returns true if there are nodes left to visit Args: graph: an array of arrays of integers where graph[a][b] = c is the max flow, c, between a and b. s: Source of the graph t: Sink or "end" of the graph parent: Array holding the nodes to visit """ # Start with none of the nodes visited visited = [False] * len(graph) # Begin queue at source. Will hold all nodes yet to visit queue = [] queue.append(s) # "Visited" aka will visit source node visited[s] = True # While there are still nodes in queue while queue: # Current searching node u = queue.pop(0) # Check each possible connection for ind in range(len(graph[u])): # If that connection hasn't been visited and is recieving source if visited[ind] is False and graph[u][ind] > 0: # Add the connection to the queue of ones to search queue.append(ind) # Set it to being visited visited[ind] = True # Add the search to the parent parent[ind] = u return True if visited[t] else False
db01bf4893d29e7840e7a96738d8aefbd145f865
101,002
def get_attached_instance(self): """ Returns the Instance object the volume is mounted on. :param boto.ec2.volume.Volume self: Current volume :return: boto.ec2.instance.Instance if the volume is attached to an instance and the instance is found. None otherwise. :rtype: boto.ec2.instance.Instance """ if self.attachment_state() == 'attached': return self.connection.get_only_instances(self.attach_data.instance_id)[0] else: print("{} volume isn't attached to an instance".format(self)) return None
28fdb5cbc50a7aa071cacc07ad34e98ee744fc38
515,154
def sort_by_inclusion(array: list) -> list: """ Сортировка простым включением. Суть: массив делится на две части - отсортированную и неотсортированную. На каждом шаге берется очередной элемент из неотсортированной части и "включается" в отсортированную часть массива. Максимальная временная сложность: О(n^2) Средняя временная сложность: О(n^2) Минимальная временная сложность: О(n) Пространственная сложность: О(1) (*) Алгоритм НЕ устойчивой сортировки. :param array: исходный массив :return array: упорядоченный исходный массив """ n = len(array) for i in range(n - 1): # сохраняем текущий элемент temp = array[i + 1] # сдвигаем элементы большие чем текущий j = i while array[j] > temp and j >= 0: array[j + 1] = array[j] j -= 1 # вставляем текущий элемент array[j + 1] = temp return array
296a222d0a24b3fc85d1b6da6aec3e42358dc912
645,754
def get(row, delimiter, indices): """Extract key and value from row. >>> get('a,b,c', ',', (0, 1)) (('a', 'b'), ('c',)) >>> get('a,b,c', ',', (0, 2)) (('a', 'c'), ('b',)) """ row = row.rstrip().split(delimiter) key = tuple(row[k] for k in indices) val = tuple(v for i, v in enumerate(row) if i not in indices) return key, val
96a55b94612d35423123ce0842cff32b184f1d08
147,496
import functools def union_of_non_none_sets(sets): """ Helper function, takes a list of [set or None] and returns the union of all non-None elements. """ return functools.reduce(lambda x, y: x.union(y), filter(lambda z: z is not\ None, sets), set())
666a71b8ed2e36f5060b900f3cd1140d8cd36f80
568,302
def get_primary_key_params(obj): """ generate a dict from a mapped object suitable for formatting a primary key logline """ params = {} for key in obj.__table__.primary_key.columns.keys(): params[key] = getattr(obj, key) return params
aad247b31925389bca21ef35fc6416c286587eee
696,243
from typing import Any from typing import Type def raise_exception_if_invalid_type( parameter_name: str, parameter_value: Any, expected_type: Type, ) -> None: """Raises a TypeError if the `parameter_value` given is not of the type `expected_type`; otherwise returns None""" if not isinstance(parameter_value, expected_type): raise TypeError(f"Expected `{parameter_name}` to be of type `{expected_type}`, but got type `{type(parameter_value)}`") return None
00932fd5f9da063ed0a8ee232d8fa13121837730
591,106
def b_delta(rewards,states,alpha): """ Implements the Resorla-Wagner (delta) learning rule. V_intial is 0. Note: Null (0 or '0') states are silently skipped. Returns two dictionaries containing value and RPE timecourses, for each state. """ # Init s_names = set(states) V_dict = {} RPE_dict = {} for s in s_names: V_dict[s] = [0.] RPE_dict[s] = [] for r,s in zip(rewards,states): ## Skip terminal states if (s == 0) | (s == '0'): continue V = V_dict[s][-1] ## the Delta rule: RPE = r - V V_new = V + alpha * RPE ## Store and shift V_new to ## V for next iter V_dict[s].append(V_new) ## Store RPE RPE_dict[s].append(RPE) return V_dict, RPE_dict
ca8025307de097cdcc629141e944a0b91bcaf4bd
683,529
def update(input_ch, answer, dashed): """ This function update the progress when users make a right guess (string manipulation) :param input_ch: str, an alphabet users enter in this round :param answer: str, the final answer :param dashed: str, the dashed answer :return decrypted: str, the updated progress """ # Create an empty string decrypted = '' for i in range(len(answer)): # Find the position of character users guess right and update it if answer[i] == input_ch: decrypted += input_ch else: # Remain the part users already solved, and fill the unsolved part with dash sign if dashed[i] != '-': decrypted += dashed[i] else: decrypted += '-' return decrypted
5e2528fff524d6a4082b04998c9cda68380531b3
32,976
def SplitListByGrouping(list, x): """ Receive a list of items. If the length is longer than x, then split the list into several lists, each with a maximum of x elements. This is then returned as a 2D array. Args list: list of items in any format or mixed. e.g. ['ABC', 23590.34, 23] If a single string is inputted, then the string is split e.g. 'a235235' -> ['a23', '523', '5'] x: maximum length to be allowed for any single list. e.g. 100 Returns: outputList: list of items, where each item is a list of maximum length x. """ outputList = [] index = 0 while index < len(list): item = list[index : (index + x)] outputList.append(item) index += x return outputList
87866fe3a1e62ec0421080e20b6880335d84f18c
73,098
def median(x): """Find the median of a sample by sorting the sample and then returning the middle element.""" x = sorted(x) # Sort the sample n = len(x) middle = n // 2 if n % 2 == 0: # If there's an even number of elements, the median is an average of the two middle values return 0.5 * (x[middle - 1] + x[middle]) else: # If there's an odd number of elements, the median is the middle element return x[middle]
ddad017c23b5b8da540046505e1a79c289065954
248,477
def uppercase_initial(string): """ Return a capitalized string. :param string: Input string :type string: str :return: Capitalized input string :rtype: str >>> uppercase_initial("disableApiTermination") "DisableApiTermination" """ capital = string[0].upper() return capital + string[1:]
6c7f82e76343cce7ef266e435b623ab62defab97
421,863
def get_point(values, pct): """ Pass in array values and return the point at the specified top percent :param values: array: float :param pct: float, top percent :return: float """ assert 0 < pct < 1, "percentage should be lower than 1" values = sorted(values) return values[-int(len(values)*pct)]
11474d804e0845284a864a5cb8de23299999a5e7
12,351
def spec_with_external_docs() -> str: """Helper for creating a specification object with externalDocs.""" _spec = """ openapi: 3.0.3 info: title: Swagger Petstore version: 1.0.0 paths: {} externalDocs: description: Find more info here url: https://example.com """ return _spec
fe425d3ee3185b3449c67b456c03abf401d39d7d
343,509
import struct def unpack_linkmsg(data): """For *LINK RTNETLINK messages, unpack the body Discards a number of uninteresting parts. Returns if type, flags and remainder of message """ if_family, _, if_type, index, flags, change = struct.unpack("=BBHiII", data[:16]) # family == AF_UNSPEC. _ is a pad word. change is not used by kernels yet. return if_type, flags, data[16:]
7d84da34b3cb0fac1c03e757254436650455d74b
617,064
def train_valid_test_split(df, train_proportion): """Splits the dataframe into train/valid/test. Train will consist of `train_proportion` many of the rows, valid and test split the rest evenly. """ train_size = int(train_proportion * len(df)) valid_size = (len(df) - train_size) // 2 train = df[:train_size] valid = df[train_size:train_size + valid_size] test = df[train_size + valid_size:] return train, valid, test
a6cec5bec2fa18a3a7d766ef5619d95a9dddf426
315,142
import math def _is_close(a, b, alpha=0.01): """ Check if two blobs are close to one another :param a: first blob. This is larger than b. :param b: second blob. This is smaller than a. :param alpha: The amount of overlap allowed between blobs :returns: if blobs are close """ if alpha < 0 or alpha > 1: raise ValueError("Value of alpha must be between 0 and 1.") ay, ax, ar = a by, bx, br = b d = math.sqrt((ax - bx)**2 + (ay - by)**2) return d <= ar - br*alpha
44b960303d5612895728bb939a0be28dd063f5b1
596,397
def get_lower_bound(floors, loc): """ Computes a lower bound for the number of moves required to move all chips and generators to the top. The optimal strategy is to move to the bottom layer (taking one item to power the elevator), then move all items up to the next floor (move two items up then one item down until two remain then move both up together). This process repeats until everything is on the top floor. This lower bound doesn't take into account other compatibility conditions such as valid combinations of items in elevators / on floors. """ def add_min_moves(count): # number of moves required to move `count` items up to the next floor if count <= 2: # if 2 items or fewer, can do it in a single move return 1 # otherwise we require two moves per item (take two up, move one back # down) except the last two which require only one move return 2 * (count - 2) + 1 # initialise lower bound lower_bound = 0 # count the number of items on each floor counts = [len(floor) for floor in floors] # find out which is the lowest floor with items first_non_empty = [count > 0 for count in counts].index(True) if loc < first_non_empty: # if we start below the first floor with items, we don't have anything # to power the elevator! raise ValueError( "You need an item on the current floor to power the elevator" ) elif loc > first_non_empty: # we move down to the lowest non-empty floor, taking one item with us lower_bound += loc - first_non_empty counts[loc] -= 1 counts[first_non_empty] += 1 cumulative_count = 0 for count in counts[:-1]: # for each floor except the last, we move all items to the next floor cumulative_count += count lower_bound += add_min_moves(cumulative_count) return lower_bound
1ce7c5e58727d14172a9c74e4b8e72db836ce255
242,345
def get_next_counts(file): """ read next line in file, extract counts from line :param file: given file, already opened :return: chromosome, position and counts """ new_line = file.readline() line = new_line.decode() line = line.strip('\n').split('\t') if len(line) == 11: chrom = line[0] pos = int(line[1]) counts = [int(line[10]), int(line[9]), int(line[8]), int(line[7])] return chrom, pos, counts else: # end of file return -1, -1, []
c7c597451a4c24397ee596a0f796892a67922187
158,332
import textwrap def indent(lines, spaces=4): """Indent `lines` by `spaces` spaces. Parameters ---------- lines : Union[str, List[str]] A string or list of strings to indent spaces : int The number of spaces to indent `lines` Returns ------- indented_lines : str """ if isinstance(lines, str): text = [lines] text = '\n'.join(lines) return textwrap.indent(text, ' ' * spaces)
46e7ad188b634a2899fcd8c97ad47e48e6609060
481,144
def choose_ncv(k): """ Choose number of lanczos vectors based on target number of singular/eigen values and vectors to compute, k. """ return max(2 * k + 1, 20)
e6fdf5a39979dacfda8ee897f8bc274e0574923d
560,128