content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
from typing import Sequence from typing import Tuple def my_quat_conjugate(q: Sequence[float]) -> Tuple[float,float,float,float]: """ "invert" or "reverse" or "conjugate" a quaternion by negating the x/y/z components. :param q: 4x float, W X Y Z quaternion :return: 4x float, W X Y Z quaternion """ return q[0], -q[1], -q[2], -q[3]
821e96dff3e65f4f578bb1ad626bac91a06f7401
686,126
def _decode(std): """Decodes the bytes-like output of a subprocess in UTF-8. This private function is wrapped in :func:`call_commandline()`. Args: std (bytes-like): The ``stdout`` or ``stderr`` (or whatever) of a subprocess. Returns: A list of decoded strings. Example: >>> _decode([bytes('This is a test.', encoding='utf-8')]) ['This is a test.'] """ return [line.decode('utf-8').replace('\n', '') for line in std]
6029c9e02b8ee8a4b75ad9dbdfc75b7b45d1d9c2
449,311
def _coerce_params(params): """Force xgboost parameters into appropriate types The output from hyperopt is always floats, but some xgboost parameters explicitly require integers. Cast those as necessary Parameters ---------- params : dict xgboost parameters Returns ------- dict Input parameters coerced as necessary """ types = { 'max_depth': int, 'max_bin': int, 'num_class': int, 'silent': int, } retval = params.copy() for (k, val_type) in types.items(): if k in params: retval[k] = val_type(params[k]) return retval
654fa68acb26e988b83b7d8835ef81be055d4cb2
565,018
import time from datetime import datetime def get_current_time(date_and_time=True, reverse_date=False): """ Returns current time :param date_and_time: bool, Whether to return only the time or time and data :param reverse_date: bool, Whether to return date with {year}-{month}-{day} format or {day}-{month}-{year} format :return: str """ mtime = time.time() date_value = datetime.fromtimestamp(mtime) hour = str(date_value.hour) minute = str(date_value.minute) second = str(int(date_value.second)) if len(hour) == 1: hour = '0' + hour if len(minute) == 1: minute = '0' + minute if len(second) == 1: second += '0' time_value = '{}:{}:{}'.format(hour, minute, second) if not date_and_time: return time_value else: year = date_value.year month = date_value.month day = date_value.day if reverse_date: return '{}-{}-{} {}'.format(year, month, day, time_value) else: return '{}-{}-{} {}'.format(day, month, year, time_value)
35431d8807544f15dbf86a7097393851a13c154f
198,749
def inverse_Z26(integer): """Method Defined to calculate the inverse of \ an Integer in Z-26 and avoiding ValueError is \ inverse doesn't exists \nPARAMETERS\n integer: input integer for inversion \nRETURNS\n inverse: the inverse of integer in Z-26 \ (or None if ValueError) """ inverse = None try: inverse = pow(integer, -1, 26) except ValueError: print(f"Value of 'a' -> {integer} is Non-Invertible in Z-26.") return inverse
a90d71e2ff06f6871f51ad0cb10a06f22fe496bb
470,914
def size_batch(F_vol, tau_reaction, tau_cleaning, N_reactors, V_wf) -> dict: """ Solve for batch reactor volume, cycle time, and loading time. Parameters ---------- F_vol : float Volumetric flow rate. tau_reaction : float Reaction time. tau_cleaning : float Cleaning in place time. N_reactors : int Number of reactors. V_wf : float Fraction of working volume. Returns ------- dict * 'Reactor volume': float * 'Batch time': float * 'Loading time': float Notes ----- Units of measure may vary so long as they are consistent. The loading time can be considered the cycle time. """ # Total volume of all reactors, assuming no downtime V_T = F_vol * (tau_reaction + tau_cleaning) / (1 - 1 / N_reactors) # Volume of an individual reactor V_i = V_T/N_reactors # Time required to load a reactor tau_loading = V_i/F_vol # Total batch time tau_batch = tau_reaction + tau_cleaning + tau_loading # Account for excess volume V_i /= V_wf return {'Reactor volume': V_i, 'Batch time': tau_batch, 'Loading time': tau_loading}
a0afd80babe24c1c440f5af262090c98b4e3592d
459,888
from typing import Counter def list_duplicates(elements): """ Return list of elements that are visible more than once in the input elements. """ return [item for item, count in Counter(elements).items() if count > 1]
dbaf179868680e3472b6b77e502f2d2657ee3b7f
379,404
def tick(frameNum, pts, beak, birds): """ Update function for animation """ birds.tick(frameNum, pts, beak) return pts, beak
e22fcad89d00ed042d94a3c6cb709395ab7bc21f
611,476
def floyd(graph): """Floyd-Warshall algorithm for finding shortest path between all vertices in a weighted graph. Args: graph: Weighted graph, directed or undirected but may not contain negative cycles. Returns: Dictionary of dictionaries where d[source][dest] is distance between two vertices. In case there's no path between vertices the distance is float('inf'). """ default = {'weight': float('inf')} res = {x: {y: graph[x].get(y, default)['weight'] if x != y else 0 for y in graph.vertices} for x in graph.vertices} for k in graph.vertices: for i in graph.vertices: for j in graph.vertices: res[i][j] = min(res[i][j], res[i][k] + res[k][j]) return res
b815abcf55f78bd5eba18c355043106a13d0fe8f
244,835
def find_first_undefined_cell(grid): """Returns the r,c of the first undefined cell""" for x,row in enumerate(grid): for y,vy in enumerate(row): if vy < 0: return x, y return -1, -1
598421adb3f7ec40ef4b61d29ec282ae0605185b
511,453
def sdf_count(file_obj): """Count the number of molecules in an SDF file. Counts the number of times '$$$$' occurs at the start of lines in the file. Parameters ---------- file_obj : file-like object Returns ------- int The number of molecules in the file. """ return sum(1 for line in file_obj if line[:4] == b'$$$$')
c0e27ba9166518b722c7d353e4e7c23a14bdc1dd
224,394
def _get_responses(link): """ Returns minimally acceptable responses object based on action / method type. """ template = {'description': ''} if link.action.lower() == 'post': return {'201': template} if link.action.lower() == 'delete': return {'204': template} return {'200': template}
ae98945891c5b686caf2ff8405deb51b714b8b9b
479,453
def has_glyphs(word, available_glyphs_string): """ Are all the chars in word in available_glyphs_string? Example: >>> has_glyphs("speaker", "sperk") False >>> has_glyphs("speaker", "aekrps") True >>> has_glyphs("dog,cat", "dogcat") False """ if available_glyphs_string: return all(char in available_glyphs_string for char in word) else: return True
fcd1dfd402f32569145ee395672806551b0998be
461,571
def list_chunk_gen(lst, size=1000): """Given list, generate chunks <= size""" n = max(1, size) return (lst[k:k+n] for k in range(0, len(lst), n))
7e78c166708999a761f0e729261581e4fe2e020a
537,229
import csv def read_param_file(filename, delimiter=None): """Unpacks a parameter file into a dictionary Reads a parameter file of format:: Param1,0,1,Group1,dist1 Param2,0,1,Group2,dist2 Param3,0,1,Group3,dist3 (Group and Dist columns are optional) Returns a dictionary containing: - names - the names of the parameters - bounds - a list of lists of lower and upper bounds - num_vars - a scalar indicating the number of variables (the length of names) - groups - a list of group names (strings) for each variable - dists - a list of distributions for the problem, None if not specified or all uniform Arguments --------- filename : str The path to the parameter file delimiter : str, default=None The delimiter used in the file to distinguish between columns """ names = [] bounds = [] groups = [] dists = [] num_vars = 0 fieldnames = ['name', 'lower_bound', 'upper_bound', 'group', 'dist'] with open(filename, 'rU') as csvfile: dialect = csv.Sniffer().sniff(csvfile.read(1024), delimiters=delimiter) csvfile.seek(0) reader = csv.DictReader( csvfile, fieldnames=fieldnames, dialect=dialect) for row in reader: if row['name'].strip().startswith('#'): pass else: num_vars += 1 names.append(row['name']) bounds.append( [float(row['lower_bound']), float(row['upper_bound'])]) # If the fourth column does not contain a group name, use # the parameter name if row['group'] is None: groups.append(row['name']) elif row['group'] is 'NA': groups.append(row['name']) else: groups.append(row['group']) # If the fifth column does not contain a distribution # use uniform if row['dist'] is None: dists.append('unif') else: dists.append(row['dist']) if groups == names: groups = None elif len(set(groups)) == 1: raise ValueError('''Only one group defined, results will not be meaningful''') # setting dists to none if all are uniform # because non-uniform scaling is not needed if all([d == 'unif' for d in dists]): dists = None return {'names': names, 'bounds': bounds, 'num_vars': num_vars, 'groups': groups, 'dists': dists}
e2cc1f4d5b2256c3e06a101d1898cdd313780459
357,035
def dense(x, w, b, phi): """Computes a dense layer operation on the input data. This function implements a single dense layer for a neural network. The input data is dotted with the weights vector. The bias is added to the output. Finally, the activation function is run over the data and the result is returned. Parameters ---------- x : numpy.ndarray Input data. w : numpy.ndarray Weights vector. b : numpy.ndarray Bias vector. phi : function Activation function to apply to the output. Returns ------- numpy.ndarray Output of the dense layer. """ return phi(x @ w + b)
994c5ce4a56a665d2c43043f97326c0630319dd1
475,892
from typing import List def s3_public_mapper(filename: str, attachments: List) -> str: """Get public S3 filename for requested attachment """ # Find requested attachment from post attachments attachments_ = [ attachment for attachment in attachments if attachment.original_filename == filename ] if not attachments_: return filename return attachments_[0].s3_full_url
156abf9d3d2d17dfb0001f23586304aa7fcf3958
477,431
def _embed_initial_state(initial_state, embedding, qubits): """Embed the states provided by the initial_state parameter used for reverse annealing. Args: initial_state (list of lists): Logical initial state as it would be passed to SAPI for reverse annealing. embedding (dict): The embedding used to embed the initial state. Maps logical indices to chains. qubits (list): A list of qubits on the target topology. Returns (list of lists): The initial_state, embedded according to the provided embedding. """ # Initialize by setting all qubits to 1 (these will be overwritten for active qubits). embedded_state = {q: 1 for q in qubits} for logical_idx, logical_value in initial_state: # Iterate through the logical qubit, state pairs. for embedded_idx in embedding[logical_idx]: # For each embedded qubit in the corresponding chain... embedded_state[embedded_idx] = int(logical_value) # make the embedded state equal to the logical state. # Convert dictionary to a list of lists. embedded_state_list_of_lists = [[q_emb, embedded_state[q_emb]] for q_emb in sorted(embedded_state.keys())] return embedded_state_list_of_lists
0c5b3722421288674efceba743a55c45d90ac7d4
95,666
from typing import Type from typing import Iterable def _mro(type_: Type) -> Iterable[type]: """Fetch the MRO for a type.""" # The MRO is calculated at class creation time and set to `__mro__`. # https://github.com/python/cpython/blob/v3.8.3/Objects/typeobject.c#L5338 # The `mro()` method is only called during creation in case a type overrides # MRO calculation. return type_.__mro__
fed3430843116335729b56343a0126c916e037a8
378,058
def gGetCoordinateSystem (view): #<a name="gGetCoordinateSystem"</a>[<a href="g.html#gGetCoordinateSystem">Doc</a>] """Returns the normal values associated with this view as a tuple (x1, y1, x2, y2, corner) where x1 and x2 are the left and right values and y1 and y2 are the top and bottom values and corner specifies which corner values are relative to""" if view.csLeft <= view.csRight: if view.csBottom <= view.csTop: corner = 'lowerLeft' else: corner = 'upperLeft' elif view.csBottom <= view.csTop: corner = 'lowerRight' else: corner = 'upperRight' return min(view.csLeft, view.csRight), min(view.csTop, view.csBottom), \ max(view.csRight, view.csLeft), max(view.csTop, view.csBottom), \ corner
7a9dfa8906fe26bc469033bd0cef2ccaa737eef1
286,340
def EXACT(string1, string2): """ Tests whether two strings are identical. Same as `string2 == string2`. >>> EXACT("word", "word") True >>> EXACT("Word", "word") False >>> EXACT("w ord", "word") False """ return string1 == string2
d6feb4c40bc93fa1ec5426f394a47c5d42c21dfd
39,097
import configparser import ast def extract_config_from_cfg(cfg_path): """Extract input data from *.cfg file. Parameters ---------- cfg_path : Path Config file to read from. Returns ------- dict Config keywords: python objects pairs. """ # Start parser engine and read cfg file. cfg = configparser.ConfigParser() cfg.read(cfg_path) # Gather all input variables and merge them in one dict. input_data = ({k.lower(): v for k, v in cfg.items(i)} for i in cfg.sections()) config_dict = {} for i in input_data: config_dict.update(i) # Try to convert variables to Python objects. output_data = {} for k, value in config_dict.items(): try: output_data[k] = ast.literal_eval(value) except SyntaxError: output_data[k] = value except TypeError: output_data[k] = value return output_data
eda496688384f681659d70cc19de25b01307b1ef
528,926
def full_community_name(community): """Combine community name with alt_name if it exists""" if "alt_name" in community: return "{0} ({1})".format(community["name"], community["alt_name"]) return community["name"]
f2c63387ced80b37c14d79b0974c1c2a11b520ee
565,410
def calc_vehicle_offset(undist, left_fit, right_fit): """ Calculate vehicle offset from lane center, in meters """ # Calculate vehicle center offset in pixels bottom_y = undist.shape[0] - 1 bottom_x_left = left_fit[0]*(bottom_y**2) + left_fit[1]*bottom_y + left_fit[2] bottom_x_right = right_fit[0]*(bottom_y**2) + right_fit[1]*bottom_y + right_fit[2] vehicle_offset = undist.shape[1]/2 - (bottom_x_left + bottom_x_right)/2 # Convert pixel offset to meters xm_per_pix = 3.7/700 # meters per pixel in x dimension vehicle_offset *= xm_per_pix return vehicle_offset
b70e0f50381c3fca4c609c248a2c9e46488ccbf9
533,489
def check_replied_to(comment): """Check if the bot has already replied to a post """ with open("replied_ids.txt", "r") as f: replied_ids = set(f.read().splitlines()) # check if comment already replied to return comment.id in replied_ids
632813fd59ab1f1061d8c0aed7481a053c820864
131,178
def get_approving_reviewers(props): """Retrieves the reviewers that approved a CL from the issue properties with messages. Note that the list may contain reviewers that are not committer, thus are not considered by the CQ. """ return sorted( set( message['sender'] for message in props['messages'] if message['approval'] and message['sender'] in props['reviewers'] ) )
3153b1c3e50292f7f5aace70843d3f910f4fefb8
623,536
from typing import Any import decimal def reduce_decimal(num: Any, prec: int = 2) -> str: """Return a given number with precision or cut trailing zeros if possible. :param num: Number given, anything that can be turned into a decimal. :param prec: given precision. :return: Number with at most the given precision. """ dec = decimal.Decimal(f"{num:.{prec}f}").normalize() return str(dec)
9fb93ef56fbe5684576ec0b9ac03480e4e6b969a
555,714
def un_div(text): """Remove wrapping <div...>text</div> leaving just text.""" if text.strip().startswith("<div ") and text.strip().endswith("</div>"): text = text.strip()[:-6] text = text[text.index(">") + 1:].strip() return text
69d13c7f5525d58a71b835a3a7010613e3ba2445
430,332
def create_accdata3(packer, enabled, fcw_status, fcw_sensitivity, chevrons, gap): """Creates a CAN message for ACCDATA_3""" values = { "FdaMem_B_Stat": 1, "AccMemEnbl_B_RqDrv": 1, "FcwMemStat_B_Actl": fcw_status, "FcwMemSens_D_Actl": fcw_sensitivity, "AccFllwMde_B_Dsply": chevrons, "AccTGap_B_Dsply": gap } return packer.make_can_msg("ACCDATA_3", 0, values)
11885b50c8353144699f4e0ef7394f9e84053b25
226,469
def gama_to_vswr(gama): """Calculate VSWR from gama/Reflection Coefficient(Γ) RL""" vswr = abs((1 + abs(gama))/(1-abs(gama))) return vswr
29649a4c18e56a2daeb5cbb091fd43e3122dfefa
318,782
def update_mov_avg(**kwargs): """ Function to iteratively calculate moving average with a given window""" kwargs['cont'].appendleft(kwargs['value']) new_mean = sum(kwargs['cont']) / len(kwargs['cont']) return new_mean
c26a17959be7461187853dc9c7a772406e18baf9
203,442
from datetime import datetime import json def generate_event(file_name, datetime=datetime): """Function to generate json with a timestamp and filname headers. """ return json.dumps({'timestamp': datetime.now().isoformat(), 'filename': file_name})
d546f28902ebbc98d81031740c8c2368e5aa7baa
44,382
def get_has_feature(sentences, features): """ Parameters ---------- sentences: list of strs, The info text of a mushroom species split into sentences. features: list of strs Key words ["bruis", "bleed"] for attribute does-bruise-or-bleed Return ------ list of str, ['t'] if one of feature in features is in one of the sentences and else ['f'] (dataset encoded boolean) """ for sentence in sentences: sentence = sentence.lower() for feature in features: if feature in sentence: return ['t'] return ['f']
c2d53e59b21573ea0b95009d34640e86f66c8a6a
605,403
def attribute_search(lb_resource, attr_name): """This helper method will recursively walk up the slb tree starting from the provided lb_resource and find an attribute with the provided name. Though objects like pool refrence a listener and loadbalancer, the following discrete search orders are used: 1: member -> pool -> listener -> loadbalancer 2: healthmonitor -> pool -> listener -> loadbalancer :param lb_resource: An slb data model :param obj_type: String name of an slb object (ie 'loadbalancer', 'pool') :return: Returns the requested attribute value or none """ if hasattr(lb_resource, attr_name): return getattr(lb_resource, attr_name) elif hasattr(lb_resource, 'pool'): return attribute_search(lb_resource.pool, attr_name) elif hasattr(lb_resource, 'listener'): return attribute_search(lb_resource.listener, attr_name) elif hasattr(lb_resource, 'load_balancer'): return attribute_search(lb_resource.load_balancer, attr_name) return None
902707c5a52f8c5fdbe3bc5e44590626bf264959
336,002
def _CheckBarcode(barcode): """Check weather the UPC-A barcode was decoded correctly. This function calculates the check digit of the provided barcode and compares it to the check digit that was decoded. Args: barcode(string): The barcode (12-digit). Return: (bool): True if the barcode was decoded correctly. """ if len(barcode) != 12: return False r1 = range(0, 11, 2) # Odd digits r2 = range(1, 10, 2) # Even digits except last dsum = 0 # Sum all the even digits for i in r1: dsum += int(barcode[i]) # Multiply the sum by 3 dsum *= 3 # Add all the even digits except the check digit (12th digit) for i in r2: dsum += int(barcode[i]) # Get the modulo 10 dsum = dsum % 10 # If not 0 substract from 10 if dsum != 0: dsum = 10 - dsum # Compare result and check digit return dsum == int(barcode[11])
e46eacb16ddf833ea46cb6e2917ae37fbd96b14a
551,817
def get_grouping_from_another_yang_model(yang_model_name: str, conf_mgmt) -> list: """ Get the YANG 'grouping' entity Args: yang_model_name - YANG model to search conf_mgmt - reference to ConfigMgmt class instance, it have yJson object which contain all parsed YANG models Returns: list of 'grouping' entities """ ret_grouping = list() for yang_model in conf_mgmt.sy.yJson: if (yang_model.get('module').get('@name') == yang_model_name): grouping = yang_model.get('module').get('grouping') if isinstance(grouping, list): ret_grouping.extend(grouping) else: ret_grouping.append(grouping) return ret_grouping
31d6fcfbafe4e8c7a0da73a0909b33017976c602
649,102
def undersampled(semibmaj, semibmin): """ We want more than 2 pixels across the beam major and minor axes. :param Semibmaj/semibmin: describe the beam size in pixels :returns: True if beam is undersampled, False otherwise """ return semibmaj * 2 <= 1 or semibmin * 2 <= 1
45f505b24cc79e1d33a595251fb5150f82354878
548,796
import collections def build_node_statistics(nodes, images): """Build a dictionary of cache statistics about a group of nodes.""" # Generic statistics applicable to all groups of nodes. node_statistics = { 'provisioned': len(list(filter(lambda n: n.provisioned, nodes))), 'not provisioned': len(list(filter(lambda n: not n.provisioned, nodes))), 'available (not cached)': len(list(filter(lambda n: n.can_cache(), nodes))), 'cached (includes \'caching\')': len(list(filter(lambda n: n.cached and not n.provisioned, nodes))), 'total': len(nodes), 'images': collections.defaultdict(lambda: 0) } image_names_by_uuid = {image.uuid: image.name for image in images} # Build statistics around which images are cached. for node in nodes: if (node.cached and not node.provisioned and node.cached_image_uuid is not None): # If we don't know the name of the image, just return the UUID. image_name = image_names_by_uuid.get(node.cached_image_uuid, node.cached_image_uuid) node_statistics['images'][image_name] += 1 return node_statistics
6f1b6a7128a088168f3d31054458275d5f13df53
8,296
def get_substring_performance_score(probabilities, substring_seq): """ Compute performance for a given substring -- precision, recall, F1-Score, and Exact Match. params: probabilities (list): List of probabilities substring_seq (tuple): start and end index (both inclusive) Returns: float: performance metrics: precision, recall, F1-Score, and Exact Match. """ # sum of all probabilities sum_of_all_probabilities = sum(probabilities) # probability substring substring_probabilities = probabilities[substring_seq[0]:substring_seq[1]] # sum of probabilities of substring sum_of_substring_probabilities = sum(substring_probabilities) # sum of indicator random variables num_of_indicator_rv = len(substring_probabilities) precision = sum_of_substring_probabilities / num_of_indicator_rv recall = sum_of_substring_probabilities / sum_of_all_probabilities # calculating expected ef1 score if precision + recall != 0: f1_score = (2 * precision * recall) / ( precision + recall) else: # handling of division by 0 f1_score = 0 em = 0 if sum_of_all_probabilities == sum_of_substring_probabilities: em = 1 return precision, recall, f1_score, em
e37a63e11774efb1d2956e23097d2646b3254b8e
307,202
def model_to_dict(instance, fields=None, exclude=None): """ Return a dict containing the data in instance. This differs from django.forms.model_to_dict by containing non-editable fields and relation values being the related objects instead of pk of those objects. :param instance: Model instance :param fields: list of fields in dict :param exclude: list of fields not in dict :return: dictionary of instance fields and values """ data = {} for field in instance._meta.fields: if fields and field.name not in fields: continue if exclude and field.name in exclude: continue data[field.name] = getattr(instance, field.name) return data
bf08b6eead47ede0923c76ac984219490f44dc91
628,042
def get_node_text(node): """ Return the text content of an xml.dom Element Node. If node does not have content, this function return an empty string. """ text = '' node.normalize() if node.firstChild and node.firstChild.data: text = node.firstChild.data.strip() return text
9ed3d2161f1c00b4b657c6b08504679739d525a9
300,746
def _get_source_op_slices(op_slices, op_reg_manager): """Returns list of OpSlice that are sources. Args: op_slices: List of OpSlice. op_reg_manager: OpRegularizerManager to keep track of slicing. Returns: List of OpSlice that are sources. """ op_groups = [op_reg_manager.get_op_group(op_slice) for op_slice in op_slices if op_reg_manager.get_op_group(op_slice) is not None] return list(set([source_op_slice for op_group in op_groups for source_op_slice in op_group.source_op_slices]))
fd496e1c52606004e34aeae54f6bbf36886c72af
492,234
def is_an_url(link): """Returns true if the given string is a URL""" if( link.startswith("http://") or link.startswith("https://") or link.startswith("http://") or link.startswith("www.") ): return True else: return False
1914b7499ab01d6f3056a913e3d991a9c11feb89
330,714
def copy_docstring(other): """ Decorator that sets ``__doc__`` to ``other.__doc___``. """ def wrapper(func): func.__doc__ = other.__doc__ return func return wrapper
b28c47b59325661bb4dff66d7f9c2cb5915760f2
348,869
def normalize(position): """ Accepts `position` of arbitrary precision and returns the block containing that position. Parameters ---------- position : tuple of len 3 Returns ------- block_position : tuple of ints of len 3 """ x, y, z = position x, y, z = (int(round(x)), int(round(y)), int(round(z))) return (x, y, z)
cd927de82ffacd9a97f083c5a1937160422b713c
466,844
import re def string_to_list(option): """Convert string to list. List may be enclosed in [], {}, (), or not enclosed. List entries must be comma delimited. Args: option (str) String to convert. Returns: (list) List of strings. """ PATTERN = r"[\{\[\(](.*)[\}\]\)]" match = re.search(PATTERN, option, re.DOTALL) optionStr = match.groups()[0] if match else option return [name.strip() for name in optionStr.split(",")]
78c3e82b845a73b354f4840393a3a2d3a511c30a
633,965
def bag_organizer(bag_set, bag_sizes): """ Sorts bags by magnitude, pads, and concactenates into one feature list Parameters ----------- bag_set : dict dictionary filled with all of the current molecules information bag_sizes : dict dictionary of the largest bag sizes in the dataset Returns -------- feat_list : list sorted and padded feature list of the current molecule """ feat_list = [] bag_keys = list(bag_set.keys()) for i in range(len(bag_keys)): # grab the size of the largest bag and length of current molecule bag size = bag_sizes[bag_keys[i]] + 1 baglen = len(bag_set[bag_keys[i]]) if baglen > (size - 1): raise Exception( '{}-bag size is too small. Increase size to {}.'.format(bag_keys[i], baglen)) pad = size - baglen # sort the bag by magnitude and pad with zeros to make all same length bag_set[bag_keys[i]] = sorted(bag_set[bag_keys[i]], reverse=True) bag_set[bag_keys[i]].extend([0.] * pad) feat_list.append(bag_set[bag_keys[i]]) return feat_list
e611cfeff8c5ca0a7dedbd11f18201e7b5ceb627
287,837
def p_maxlh(n,k): """ Calculates the maximum likelihood estimator for the kth sample of n total samples. Args: n: the number of samples from the random variable, X k: the hierarchical position of the sample when ordered k in [0,n) Returns: the maximum likelihood estimator, p_hat, such that Lhood(Prob(X < Xk) = p_hat) is maximized Raises: N/A """ return (k+.5) / n
29d709124b6bd8d86ef01cb7603297b8cfc774c5
647,063
def get_year_bin(year, year_bins): """ Returns the bin containing the given year. Intended for small lists. Parameters: ----------- year: int The current simulation year. year_bins: list List of years. Returns: -------- The year bin that contains the provided year. """ year_bins = sorted(year_bins) first_bin = year_bins[0] if year is None or year <= first_bin: return first_bin idx = -1 for y_bin in year_bins: if year < y_bin: break idx += 1 return year_bins[idx]
113fef44d232c1a0e32e37f658572b5c5aaa53e1
559,734
def get_profile_image(user_id: int): """Return url of profile image of user.""" return f"http://s.ppy.sh/a/{user_id}"
7195654388a9325efbc1c7c3e47dcd7b9ebc79f8
435,959
def VSR_script_time(doy,h,m,s): """This creates a timestamp such as VSR script files use. Example: In [3]: VSR_script_time(101,3,25,45) Out[3]: '101/03:25:45'""" return ("%03d" % doy)+'/'+("%02d" % h)+':'+("%02d" % m)+':'+("%02d" % s)
b745aa70436df896e8dd035aee7b736e76a77fb7
505,609
import inspect def str2type_name(str2type_function): """ Return the function name of the function returned from str2type, or any function obtained by the type() operation. Useful when reporting conversion str-to-value errors. """ s2t = str2type_function if "<type '" in str(s2t): name = str(s2t).split("'")[1] # <type 'int'> elif inspect.isclass(s2t): name = s2t.__class__.__name__ elif inspect.isfunction(s2t): name = s2t.__name__ else: name = str(s2t) return name
324e64a6108433ee2646bfe5f9035af62e175b73
551,036
def get_frame_durations(sentences, timestamps): """Given a list of sentences and timestamps of all words, return timestamps of each sentence Args: sentences (list): List of sentences timestamps (list): [ [<word_string>, <start_time_float>, <end_time_float>], ... ] Returns: list: List of tuples ( <sentence> , <end_time_millisecond> ) """ i = 0 durations = [] previous = None for j, sentence in enumerate(sentences): words = sentence.split() word_timestamps = [] for word in words: if word in timestamps[i][0]: word_timestamps.append( (word, timestamps[i][1], timestamps[i][2])) i += 1 if j == 0: duration = int(word_timestamps[-1][2] * 1000) else: duration = int((word_timestamps[-1][2] - previous)*1000) previous = word_timestamps[-1][2] durations.append((sentence, duration)) return durations
7063e2d5df87c430384cd506b27671bf64ffcf2e
658,274
def part2graph(parts): """Creates a graph (2d dict) that represents the partitioning note: items in 'parts' should not be integers, (usually strings or objects) """ # create graph i = 0 vertices = {} for part in parts: for item in part: vertices.setdefault(item, {})[i] = 1 vertices.setdefault(i, {})[item] = 1 i += 1 return vertices
387fd22b71c9badc96b889c4cda5ea8fe25d9ec9
140,643
def is_on(current_source): """Query the current source as on or off. args: current_source (pyvisa.resources.gpib.GPIBInstrument): Keithley 6221 returns: (bool): True (on) or False (off) """ result = current_source.query("output?") if float(result.replace("\n", "")) == 1: return True else: return False
3311b787282914298db015daf3bc8420ad4a3d53
354,363
def deleteindex(es, params): """ Deletes all indices in Elasticsearch matching the specified index pattern. It expects the parameter hash to contain the following key: "index_pattern" - Specifies the index pattern to delete. Defaults to 'elasticlogs-*' """ if 'index_pattern' in params: index_pattern = params['index_pattern'] else: index_pattern = "elasticlogs-*" es.indices.delete(index=index_pattern) return 1, "ops"
4e1d030632733ff4a8c4dfd89b0b1daefd057fe0
592,584
import re def realname(module_name): """ Return the real name of a module. The real name of a modules is in lower snake_case. Transform a CamelCase name into snake_case name. """ reg_first = re.compile(r'(.)([A-Z][a-z]+)') reg_all = re.compile(r'([a-z0-9])([A-Z])') s1 = reg_first.sub(r'\1_\2', module_name) return reg_all.sub(r'\1_\2', s1).lower()
33982291832b0ab30237003c80c9b0a4e2aede68
639,533
def threshold(pred, param): """ Takes the predicted image "pred", thresholds it with the determined param, returns binary image. Parameters ---------- pred : np.array Prediction image param : float Threshold for the input image Returns ------- np.ndarray Binary np.array """ pred[pred >= param] = 1 pred[pred < param] = 0 return pred
e38ab2683d48fc069782d0a18ffad51cce944aec
35,015
def barriers_for_book(book, barrier_classes, fail_fast=False): """Return ReleaseBarrier instances for a book. Args: book: Row instance representing a book. barrier_classes: list of BaseReleaseBarrier subclasses fail_fast: If True, return the first barrier that applies. This can be used to test if a book has any barriers. Returns: list of BaseReleaseBarrier sub class instances representing all barriers that prevent the complete of the book. """ barriers = [] for barrier_class in barrier_classes: barrier = barrier_class(book) if barrier.applies(): barriers.append(barrier) if fail_fast and barriers: break return barriers
ed05c186971c2e5ad71599597216bf09b3ac695a
365,824
def _validate_int(value): """Return True if value is an integer""" return value.__class__ == int
8353b2932956f1b0eafeb168daafea9a14c5133b
563,378
def c_to_k(t_c): """Convert Celsius to Kelvin.""" if t_c is None: return None return t_c + 273.15
12b812714681f4f95e9ccb77b2604f5b2cbe3499
171,766
import secrets def genpw(length, encoder): """ Generate a random password. Arguments: length: Length of the requested password. encoder: function to encode the bytes data. Returns: A password string. """ d = secrets.token_bytes(2*length) return encoder(d).decode()[:length]
46841aba8098f0cc3332b95bd2fddcdca42a1e6a
325,024
def next_code(value: int, mul: int = 252533, div: int = 33554393) -> int: """ Returns the value of the next code given the value of the current code The first code is `20151125`. After that, each code is generated by taking the previous one, multiplying it by `252533`, and then keeping the remainder from dividing that value by `33554393` """ return (value * mul) % div
a9e5183e405574cc56a138a244f14de08ea68d00
708,117
def bool_or_none_to_str(x): """Converts a bool or None value to a string.""" if x is None: return "None" else: return "1" if x else ""
68a25ac53745f2e8350e2107b7e4f49773c65c8a
358,452
def strip_attributes(node, preserve_attrs): """Strip unnecessary data attributes from node.""" node_attrs = list(node[1].keys()) for attr in node_attrs: if attr not in preserve_attrs: del node[1][attr] return node
07cbaeb3a0f212d291600a5bd60388b776715d00
482,964
def dmenu_format(choices, fmt=None): """Format iterable @choices into '\n' delimited dmenu input Arguments: fmt - Format string for each key:choice pair """ if fmt is None: fmt = '{}: {}' dmenu_line = '' for c in choices: dmenu_line += (fmt+'\n').format(c[0], c[1]) return dmenu_line
2fdc3b3741bcb7b8052c4336969352395cda545e
528,195
import re def match_is_ipv4_address(value): """Match given value as a valid dotted-quad IPv4 address.""" # Apply the dotted-quad pattern to the string and detect a mismatch try: match = re.search(r'^(\d+)\.(\d+)\.(\d+)\.(\d+)$', value) except TypeError: return u'{0} must be a string in IPv4 dotted-quad notation'.format( repr(value)) if not match: return u'"{0}" must be in IPv4 dotted-quad notation'.format( value) # Validate the range of each octet octets = [int(x) for x in match.groups()] for idx, octet in enumerate(octets): if octet > 255: return '{0} octet of "{1}" exceeds 255'.format( ['1st', '2nd', '3rd', '4th'][idx], value) return None
d4573d5919d1811b83c26928f3e403d070c41f37
14,449
def make_split_groups_out(file_name): """ Breaks the output of cd-hit and return it as a list of strings were each element correspond to a group. """ with open(file_name, "r") as handle: groups = handle.read() groups_split = groups.split(">Cluster")[1:] return groups_split
1e7d2252a64f8b100303a0c98814365bd53cdb56
265,938
def plugin(plugin_without_server): """ Construct mock plugin with NotebookClient with server registered. Use `plugin.client` to access the client. """ server_info = {'notebook_dir': '/path/notebooks', 'url': 'fake_url', 'token': 'fake_token'} plugin_without_server.client.register(server_info) return plugin_without_server
2cc59e2fd8de66ce7dbd68114ae5805bc13527fb
690,950
def my_round(x, digits=0): """ Round the floating point number or list/tuple of floating point numbers to ``digits`` number of digits. Calls Python's ``round()`` function. EXAMPLES:: >>> print(my_round(1./7, 6)) 0.142857 >>> print(my_round((1./3, 1./7), 6)) (0.333333, 0.142857) """ try: result = round(x, digits) except TypeError: result = [my_round(xx, digits) for xx in x] if isinstance(x, tuple): result = tuple(result) return result
2fe3e208c8d3c893d655778273d3832f63edc40d
302,306
import gzip def get_file_handle(file_path, compression): """ Returns a file handle to the given path. :param file_path: path to the file to open :param compression: indicates whether or not the input file is compressed :return: a file handle to file_path """ if compression: return gzip.open(file_path, 'rt', encoding='utf-8', errors='strict') else: return open(file_path, 'rt', encoding='utf-8', errors='strict')
44c97b211c4b44679934eede62845c58947c4091
702,175
from typing import Dict def _get_user_dict(client) -> Dict[str, dict]: """ Returns a dict of all users with key userid and value the user object """ response = client.users_list() users = response["members"] return {elem["id"]: elem for elem in users}
9af1fdc115f04127293a5936bc93c5383e6af567
132,576
def calcMaxObsTime(dictionary): """ Calculates the maximum observing time, using the number of gridpoints in one atmosphere strip, the gridwidth, the number of atmosphere strips and the windspeed """ # maximum time # every strip has 32768 x-values max_obs_time = (dictionary['x_length_strip'] - 3 * dictionary['separation'])* \ dictionary['max_num_strips']*dictionary['grid'] \ /dictionary['windspeed'] return max_obs_time
6dcd43b1e8206b40214fc69776c55dcc12ae9143
455,206
def remove_dot_files(files): """Removes hidden dot files from file list Args: files (list): List of file names Returns: (list): List of filenames with dot files, .stuff, removed """ new_list = [] for l in files: if not l.startswith('.'): new_list.append(l) return new_list
b1c85094decb02c1cfcc565b82d3a4c8dfb01151
294,927
def get_seconds(ts): """ Converts the time object to number of seconds from beginning of the day :param ts: the time object :return: the number of seconds from beginning of the day """ return ts.second + 60 * ts.minute + 3600 * ts.hour
32cb19bb44b4ef9eea3df17a7db867b43cacc3ff
479,625
def opposite_player(player): """Finds the opposite player based on the input. Assumes only two players Args: player (string): Input player to invert Returns: (string): Opposite player from the input """ if player == "X": return "O" else: return "X"
09a7306e15d0d05fbe6d89134b3186c29163bce0
587,133
def get_vocabulary(dataset, min_word_count=0): """ Filter out words in the questions that are <= min_word_count and create a vocabulary from the filtered words :param dataset: The VQA dataset :param min_word_count: The minimum number of counts the word needs in order to be included :return: """ counts = {} print("Calculating word counts in questions") for d in dataset: for w in d["question_tokens"]: counts[w] = counts.get(w, 0) + 1 vocab = [w for w, n in counts.items() if n > min_word_count] # cw = sorted([(n, w) for w, n in counts.items() if n > min_word_count], reverse=True) # print('\n'.join(map(str, cw[:20]))) # Add the 'UNK' token vocab.append('UNK') # UNK has it's own ID return vocab
1ab05b1ba4df9251ce6077f9e3bc20b590bafe93
31,998
def dmse(f_x, y): """ dMSE(f_x,y) = 2* (f_x-y) """ return 2 * (f_x - y)
367bed342770e5d4e49623cab34e35504530003e
504,571
def connect_streets(st1, st2): """ Tells if streets `st1`, `st2` are connected. @param st1 street 1 @param st2 street 2 @return tuple or tuple (0 or 1, 0 or 1) Each tuple means: * 0 or 1 mean first or last extremity or the first street * 0 or 1 mean first or last extremity or the second street ``((0, 1),)`` means the first point of the first street is connected to the second extremity of the second street. """ a1, b1 = st1[0], st1[-1] a2, b2 = st2[0], st2[-1] connect = [] if a1 == a2: connect.append((0, 0)) if a1 == b2: connect.append((0, 1)) if b1 == a2: connect.append((1, 0)) if b1 == b2: connect.append((1, 1)) return tuple(connect) if connect else None
77ee8f4c344277b09340cc3a38e16ee5ae11f702
50,842
def nb_emprunts(data): """ calcule le nombre d'emprunts dans le jeu de données argument arg: data (list of lists) return: int """ res = 0 for item in data: res += int(item[-1]) return res
67532d4deba1902f8376f714d10be80181a120dc
608,282
from typing import Any import json def load_json(filename: str) -> Any: """ Load json file from disk and transform in dict :param filename: Name of json file :return: Json loaded """ with open(filename, 'rb') as json_file: json_data = json_file.read() return json.loads(json_data)
2f84cd43f3187094b9854c1bb05fc7c17539fb09
181,540
import math def calc_camera_angle( x_distance: float, mount_height: float, target_height: float, limelight ): """ Calculate the camera's mounted angle from known properties. Set the robot to a fixed distance away from the target and pass in the other properties and it will calculate the angle to put into the calc_distance function Args: x_distance: The known distance away from the wall the target is on mount_height: The height that the camera is mounted off the floor target_height: The height the target is from the floor limelight: The limelight object to pull networktables values from Returns: Gives the angle (in degrees) that the camera is mounted at """ a1 = -limelight.vertical_offset + math.degrees( math.atan((target_height - mount_height) / x_distance) ) return a1
e3fc779efb23f83d6c1fe7f32684fd3a46918401
258,432
def decode_conditions(conditions: str): """ Decode the conditions from a JCN mnemonic to a decimal value. Parameters ---------- conditions: str, mandatory List of a maximum of 4 conditions Returns ------- int_conditions: int Integer value of the conditions Raises ------ N/A Notes ------ N/A """ int_conditions = 0 if 'I' in conditions: int_conditions = 8 if 'A' in conditions: int_conditions = int_conditions + 4 if 'C' in conditions: int_conditions = int_conditions + 2 if 'T' in conditions: int_conditions = int_conditions + 1 return int_conditions
4206232b992f916531cca9bb8c7a8281946e6cd1
564,866
def get_calculation_annotation(calculation_field, calculation_method): """ Returns the default django annotation @param calculation_field: the field to calculate ex 'value' @param calculation_method: the aggregation method ex: Sum @return: the annotation ex value__sum """ return '__'.join([calculation_field.lower(), calculation_method.name.lower()])
504ed600a6b58dc6d622ae6603ac4212e63dec93
360,964
def wrap_check_start(l, start): """Check that start index falls in [-l, l) and wrap negative values to l + start.""" if (start < -l) or (start >= l): raise IndexError('start index out of range') if start < 0: start = start % l return start
4a83914c3a0b4a9209cb902bea9b8eca560de1db
281,154
import json def json_description_metadata(description): """Return metatata from JSON formated image description as dict. Raise ValuError if description is of unknown format. >>> description = '{"shape": [256, 256, 3], "axes": "YXS"}' >>> json_description_metadata(description) # doctest: +SKIP {'shape': [256, 256, 3], 'axes': 'YXS'} >>> json_description_metadata('shape=(256, 256, 3)') {'shape': (256, 256, 3)} """ if description[:6] == 'shape=': # old style 'shaped' description; not JSON shape = tuple(int(i) for i in description[7:-1].split(',')) return dict(shape=shape) if description[:1] == '{' and description[-1:] == '}': # JSON description return json.loads(description) raise ValueError('invalid JSON image description', description)
0cf9994197edf1365e001a5bdee4c3d9c3215b83
397,994
import random def shuffle_fields(fields): """ Shuffle all fields in a dict together. Each field should be a list. """ keys, values = zip(*fields.items()) zipped = list(zip(*values)) random.shuffle(zipped) unzipped = list(zip(*zipped)) for k, v in zip(keys, unzipped): fields[k] = list(v) return fields
d89a18504be5b1f3eafc1ea518b6b577261b7c35
197,437
def mul_vector_by_scalar(vector, scalar): """ Multiplies a vector by a scalar :param vector: vector :param scalar: scalar :return: vector * scalar """ return tuple([value * scalar for value in vector])
5e1215a7cd03265272eff78966d0273b180ad3eb
69,636
def parse_setupapi(setup_log): """ Read data from provided file for Device Install Events for USB Devices :param setup_log: str - Path to valid setup api log :return: list of tuples - Tuples contain device name and date in that order """ device_list = list() with open(setup_log) as in_file: for line in in_file: lower_line = line.lower() # if 'Device Install (Hardware initiated)' in line: if 'device install (hardware initiated)' in \ lower_line and ('ven' in lower_line or 'vid' in lower_line): device_name = line.split('-')[1].strip() if 'usb' not in device_name.split( '\\')[0].lower(): continue # Remove most non-USB devices # This can remove records that may be # relevant so please always validate that # the data reduction does not remove results # of interest to you. date = next(in_file).split('start')[1].strip() device_list.append((device_name, date)) return device_list
a57c9031a03881bdb9adfeacec0858174631e96e
497,799
def find_sensor_with_id(sensors, chip_id=None, sensor_id=None): """Find a sensor index with first matching chip_id or sensor_id. Parameters: sensors (list[AQData]): list of sensors chip_id (int): the chip id to look for. sensor_id (int): the sensor id to look for. Return: index if sensor in sensors with matching id or None if not found. """ for i, sensor in enumerate(sensors): if chip_id is not None: if sensor.chip_id == chip_id: return i if sensor_id is not None: for sid in sensor.sensor_ids: if sid == sensor_id: return i return None
3e073eb676de20883606ae78d52ab8c8fa6f11c1
435,061
def isAnagram(string1, string2): """Checks if two strings are an anagram An anagram is a word or phrase formed by rearranging the letters of a different word or phrase. This implementation ignores spaces and case. @param string1 The first word or phrase @param string2 The second word or phrase @return A boolean representing if the strings are an anagram or not """ # Remove spaces str1_nospace = string1.replace(" ", "") str2_nospace = string2.replace(" ", "") # Convert to lowercase and sort list1 = list(str1_nospace.lower()) list1.sort() list2 = list(str2_nospace.lower()) list2.sort() # Check for equality return (list1 == list2)
820390d6fa1ca18b0a20dbd3d2faad08b656680c
92,357
def empty(region): """ Check if a region is empty or inconsistent :param region: region as an array [xmin, ymin, xmax, ymax] :type region: list of four float :returns: True if the region is considered empty (no pixels inside), False otherwise :rtype: bool""" return region[0] >= region[2] or region[1] >= region[3]
ef09b70a9e543936465a450a09a21f7cd242b576
321,136
def _get_num_gpus_on_instance(instance_type_info): """ Return the number of GPUs attached to the instance type. instance_type_info is expected to be as returned by DescribeInstanceTypes: { ..., "GpuInfo": { "Gpus": [ { "Name": "M60", "Manufacturer": "NVIDIA", "Count": 2, "MemoryInfo": { "SizeInMiB": 8192 } } ], } ... } """ return sum([gpu_type.get("Count") for gpu_type in instance_type_info.get("GpuInfo").get("Gpus")])
882bd4aa8675b6d3001859fdf4b08d47fd622eeb
458,155
def matrix2vec(m, axis='x'): """Calculate axis vector from rotation matrix Parameters ---------- m : numpy.ndarray rotation matrix axis : str, optional axis x, y, z, by default 'x' Returns ------- vec : numpy.ndarray Raises ------ ValueError axis shoule be x, y, z """ if axis == 'x': vec = m[:, 0] elif axis == 'y': vec = m[:, 1] elif axis == 'z': vec = m[:, 2] else: raise ValueError("Valid axis are 'x', 'y', 'z'") return vec
9c6cda577f35158a8756e866a1db9a4d5f851ab4
42,527
def parse_instruction(raw_instruction): """ ABCDE (0)1002 DE - two-digit opcode, 02 == opcode 2 C - mode of 1st parameter, 0 == position mode B - mode of 2nd parameter, 1 == immediate mode A - mode of 3rd parameter, 0 == position mode, omitted due to being a leading zero :param raw_instruction: Assuming int value :return: op_code, param_1_mode, param_2_mode, param_3_mode, """ instruction = list(f"{raw_instruction:05}") op_code = int(''.join(instruction[3:5])) mode_1 = int(instruction[2]) mode_2 = int(instruction[1]) mode_3 = int(instruction[0]) return op_code, mode_1, mode_2, mode_3
bcf4995be4e03f30dfe7a502f958ae2746cb0ec7
73,229
from datetime import datetime from typing import Literal def get_shift(date: datetime) -> Literal["1", "2", "3", "?"]: """ According to the time, it is assigned the shift 07:00 - 14:59 = 1 15:00 - 22:59 = 2 23:00 - 06:59 = 3 Args: date: Python datetime, extract the hour an eval Returns: Return a string with the corresponding shift; 1,2,3 or ? """ try: hour = date.hour except AttributeError: # If date is not datetime like None or str set hour to -1 hour = -1 # By default shift is equal to ? and override in the conditional block shift = "?" if hour >= 7 and hour <= 14: shift = "1" elif hour >= 15 and hour <= 22: shift = "2" elif (hour >= 0 and hour <= 7) or hour >= 23: shift = "3" return shift
e335cef0a3aef0b7faaac37b747f8d6a8d7613ab
234,913
import csv def get_csv_data(file, row_checker=lambda x: True): """ Args: file (str): File path for opening the csv file. row_checker (function): Function that returns True if row is valid, otherwise False. Default function that returns True. Returns: List[List]: A list containing each row from the CSV file. """ data = [] with open(file, "r", encoding="utf-8") as f: reader = csv.reader(f) for row in reader: if not row_checker(row): continue data.append(row) return data
b3dc205432f0aac175d437467c03021f4dce9735
206,929
def vals_are_multiples(num, vals, digits=4): """decide whether every value in 'vals' is a multiple of 'num' (vals can be a single float or a list of them) Note, 'digits' can be used to specify the number of digits of accuracy in the test to see if a ratio is integral. For example: vals_are_multiples(1.1, 3.3001, 3) == 1 vals_are_multiples(1.1, 3.3001, 4) == 0 return 1 if true, 0 otherwise (including error)""" if num == 0.0: return 0 try: l = len(vals) vlist = vals except: vlist = [vals] for val in vlist: rat = val/num rem = rat - int(rat) if round(rem,digits) != 0.0: return 0 return 1
3ffc25020c87c41a1b2d678b71081d73d7613874
649,024
def personal_top_three(scores): """Return the three highest scores.""" scores = sorted(scores, reverse=True) return scores[:3]
4f83da03a127ecfcd37bf449434a246763b9ee09
358,612
def bubble_sort(lst: list) -> list: """Sort a list in ascending order. The original list is mutated and returned. The sort is stable. Design idea: Swap adjacent out-of-order elements until the list is sorted. Complexity: O(n^2) time, O(1) space. Stable and in-place. See quicksort, merge sort and heapsort for sorting algorithms with a better time complexity. """ # The first iteration will bring the largest element to the end, the second # iteration will bring the second largest element to the end - 1, and so on, # so we need no more than n iterations to put every element in the proper # place. for _ in range(len(lst)): swapped = False for i in range(1, len(lst)): # If you changed this comparison to >=, the algorithm would still # be correct but the sort would no longer be stable. if lst[i-1] > lst[i]: lst[i-1], lst[i] = lst[i], lst[i-1] swapped = True # If no swaps occurred, the list is sorted and we can exit early. if not swapped: break return lst
0acc4561a7c1cbdd04ff9e3f054533b5a3e1aea4
641,514