content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def create_dead_recover_reactions_model( vaccination_states, non_vaccination_state, virus_states, areas, ): """Create death and recover reactions for the model. Parameters ---------- species : list of strings List containing the names of all species. vaccination_states : list of strings List containing the names of the vaccinations containing a state for non-vaccinated individuals. non_vaccination_state : str Name of state indicates non-vaccinated individuals. virus_states : list of strings List containing the names of the virus types. areas : list of strings List containing the names of the areas. Returns ------- dead_recover_reactions: dict Dictionary that contains the dead/recover reaction names as keys and dicitonaries that contain the reactants, products and formulas as values. """ dead_recover_reactions = {} for index_areas in areas: for index_vaccination in vaccination_states: for index_virus in virus_states: numb_infected = ( f"infectious_{index_areas}_{index_vaccination}_{index_virus}" ) numb_recovered = ( f"recovered_{index_areas}_{index_vaccination}_{index_virus}" ) numb_dead = f"dead_{index_areas}_{index_vaccination}_{index_virus}" key_recover = f"{numb_infected}_to_{numb_recovered}" key_death = f"{numb_infected}_to_{numb_dead}" if index_vaccination == non_vaccination_state: omega_term = "0" else: omega_term = f"omega_{index_vaccination}_{index_virus}" dead_recover_reactions[key_recover] = { "reactants": {f"{numb_infected}": 1}, "products": {f"{numb_recovered}": 1}, "formula": f" (1 - (1 - {omega_term}) * prob_deceasing) * lambda1 * {numb_infected}", } dead_recover_reactions[key_death] = { "reactants": {f"{numb_infected}": 1}, "products": {f"{numb_dead}": 1}, "formula": f"(1 - {omega_term}) * prob_deceasing * lambda1 * {numb_infected}", } return dead_recover_reactions
bbab9c5ccbb1d1e7dba50ff3637b6966cc5e86d6
354,699
def is_member(user, group): """Checks if the user object is a member of the group or not.""" return user.groups.filter(name=group)
c50ea4ee7a40a6a78811c8b0d40a57dd93e0897c
415,460
from typing import Dict from typing import Any def mk_ehvi_default_optimizer_options() -> Dict[str, Any]: """Makes a copy of dictionary for generic default optimizer options for EHVI-based acquisition function, used when optimizer options for a given acquisition function are not registered. NOTE: Return of this function is safe to modify without affecting the default options returned subsequently. """ return { "sequential": True, "num_restarts": 40, "raw_samples": 1024, "options": { "init_batch_limit": 128, # Used in `gen_batch_initial_conditions`. "batch_limit": 5, # Batch limit prevents memory issues in initialization. }, }
e7a05d0bad1857b162ba078b1550e438a552b4f8
150,285
def _loop_action_in_command_line(command_line: str) -> bool: """Does the command line contain a loop statement Args: command_line (str): The command line to test. Returns: bool: True if there's a loop in this command line. """ return any( word in command_line.split() for word in ["loop", "loop_until", "loop_for"] )
8cf6c17805dc25be41ecbc32664bd396d13e3df1
643,925
def vlan_range_to_list(vlans): """Converts single VLAN or range of VLANs into a list Example: Input (vlans): 2-4,8,10,12-14 OR 3, OR 2,5,10 Returns: [2,3,4,8,10,12,13,14] OR [3] or [2,5,10] Args: vlans (str): User input parameter of a VLAN or range of VLANs Returns: list: ordered list of all VLAN(s) in range """ final = [] list_of_ranges = [] if ',' in vlans: list_of_ranges = vlans.split(',') else: list_of_ranges.append(vlans) for each in list_of_ranges: # check to see if it's a single VLAN (not a range) if '-' not in each: final.append(each) else: low = int(each.split('-')[0]) high = int(each.split('-')[1]) for num in range(low, high+1): vlan = str(num) final.append(vlan) return final
739c4213516cc4bbce486cde3206b81db523b94e
434,700
def _sum(array): """ Recursively find the sum of array""" if len(array) == 1: return array[0] else: return array[0] + _sum(array[1:])
a9897f996e22bdf9738c6f64a1807d191b2647ef
522,398
def round_safe(value, precision): """Standard `round` function raises TypeError when None is given as value to. This function just ignores such values (i.e., returns them unmodified). """ if value is None: return value return round(value, precision)
5f1ee265cec3e249e599318cfa10ce0e8ef5ca6e
385,337
import math def multiply(int1, int2): """ An implementation of the Karatsuba algorithm for integer multiplication. Returns product of int1 and int2. """ # Base case if (int1 < 10 and int1 > -10) or (int2 < 10 and int2 > -10): return int1 * int2 # Set up strInt1 = str(int1) strInt2 = str(int2) len1 = len(strInt1) len2 = len(strInt2) mid1 = math.floor(len1/2) mid2 = math.floor(len2/2) m = max(len1, len2) m2 = math.floor(m/2) # Get primitives lhs1 = int(strInt1[:mid1]) rhs1 = int(strInt1[mid1:]) lhs2 = int(strInt2[:mid2]) rhs2 = int(strInt2[mid2:]) # Get components n0 = multiply(lhs1, lhs2) n1 = multiply(lhs1 + rhs1, lhs2 + rhs2) n2 = multiply(rhs1, rhs2) return (n2 * (10**(2*m2))) + ((n1 - n2 - n0) * 10**(m2)) + n0
da4ca6d0e96420283542d335f37762c35f1c397a
659,352
def limit_to_bounding_box(df, coords): """ Drop rows with coordinates outside of the bounding box. """ min_lat, max_lat, min_lon, max_lon = coords df = df[df['LAT'] < max_lat] df = df[df['LAT'] > min_lat] df = df[df['LON'] < max_lon] df = df[df['LON'] > min_lon] return df
cc76cbe7bad15e60fe68fd32e664481298db2b53
207,019
def call_vtk(obj, method, args=None): """ Invoke a method on a vtk object. Parameters ---------- obj : object VTK object. method : str Method name. args : None ot tuple or list Arguments to be passed to the method. If None, the method is called with no arguments. Returns ------- result : Any Return the results of invoking `method` with `args` on `obj`. Notes ----- Use a tuple to pass a None to the method: (None,). Examples -------- >>> import vtk >>> from brainspace.vtk_interface.wrappers.base import call_vtk >>> m = vtk.vtkPolyDataMapper() Get array id of the mapper: >>> call_vtk(m, 'GetArrayId', args=None) -1 >>> m.GetArrayId() -1 Set array id of the mapper to 2: >>> call_vtk(m, 'SetArrayId', args=(2,)) # same as m.SetArrayId(2) >>> m.GetArrayId() 2 """ # Function takes no args -> use None # e.g., SetColorModeToMapScalars() -> colorModeToMapScalars=None if args is None: try: return getattr(obj, method)() except: return getattr(obj, method)(None) if isinstance(args, dict): return getattr(obj, method)(**args) # If iterable try first with multiple arguments if isinstance(args, (tuple, list)): try: return getattr(obj, method)(*args) except TypeError: pass return getattr(obj, method)(args)
ebff1ca9e5881e1a4fa5e17a9b766d3bb2fed030
229,697
def oucru_convert_dtypes(tidy, columns=[]): """Helper method to apply convert_dtypes. Helper method to apply convert_dtypes() to a specific set of columns which might or might not be included in the DataFrame. Parameters ---------- tidy: pd.DataFrame The DataFrame columns: list The columns to apply convert_dtypes() Returns ------- pd.DataFrame The DataFrame with columns converted. """ # Find intersection intersection = \ list(set(columns).intersection(tidy.columns)) # The intersection is empty if not intersection: return tidy # Convert dtypes tidy[intersection] = \ tidy[intersection].convert_dtypes() # Return return tidy
af0c5576a19d922e465d5e460055dee0142845e5
361,629
def match_to_int(match): """Returns trace line number matches as integers for sorting. Maps other matches to negative integers. """ # Hard coded string are necessary since each trace must have the address # accessed, which is printed before trace lines. if match == "use-after-poison" or match == "unknown-crash": return -2 elif match == "READ": return -1 # Cutting off non-integer part of match return int(match[1:-1])
c2daab64bc4a2ae258b7ac6152a949b48d8d7906
679,348
import hashlib def get_sha256(string): """Get sha256 of a string. :param: (string) the string to be hashed. :return: (string) the sha256 of string. """ return str(hashlib.sha256(string).hexdigest())
17db82f505405f5edc636c70c5e074b4ffb7daa4
459,318
def is_vertex_field_name(field_name): """Return True if the field's name indicates it is a non-root vertex field.""" return field_name.startswith('out_') or field_name.startswith('in_')
0d19000a3f34cd78677b41e52cdeee835186888b
587,561
def s_curve(CurrTime, Amp, RiseTime, StartTime=0.0): """ Function to generate an s-curve command Arguments: CurrTime : The current timestep or an array of times Amp : The magnitude of the s-curve (or final setpoint) RiseTime : The rise time of the curve StartTime : The time that the command should StartTime Returns : The command at the current timestep or an array representing the command over the times given (if CurrTime was an array) """ scurve = 2.0 * ((CurrTime - StartTime)/RiseTime)**2 * (CurrTime-StartTime >= 0) * (CurrTime-StartTime < RiseTime/2) \ +(-2.0 * ((CurrTime - StartTime)/RiseTime)**2 + 4.0 * ((CurrTime - StartTime)/RiseTime) - 1.0) * (CurrTime-StartTime >= RiseTime/2) * (CurrTime-StartTime < RiseTime) \ + 1.0 * (CurrTime-StartTime >= RiseTime) return Amp * scurve
19c198dd823b4fd5d4c382bcb76249aa2e438638
126,212
def to_bool(value: str) -> bool: """ Converts a string argument to the bool representation (or throws a ValueError if the value is not one of '[Tt]rue' or '[Ff]alse'. """ if value in ["True", "true", True]: return True if value in ["False", "false", False]: return False raise ValueError(f"Value {value} cannot be converted into a bool")
579df9985701e8081e8c753206c72d2c0afc6851
585,981
import math def convert_bytes(size): """Make a human readable size.""" label = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB') try: i = int(math.floor(math.log(size, 1024))) except ValueError: i = 0 p = math.pow(1024, i) readable_size = round(size/p, 2) return '{}{}'.format(readable_size, label[i])
b4b070e85d31c0c607422667aa7f79bc0c21bcfc
647,594
from datetime import datetime def recast_timestamp(ms: int): """Recast millisecond epoch offsets to DateTime""" try: return datetime.fromtimestamp(ms / 1000.0) except TypeError: return None
6d89074ad13e7eb0e96949b8463015fda1124e45
73,874
import copy def confusion_matrix(y_true, y_pred, labels): """Compute confusion matrix to evaluate the accuracy of a classification. Args: y_true(list of obj): The ground_truth target y values The shape of y is n_samples y_pred(list of obj): The predicted target y values (parallel to y_true) The shape of y is n_samples labels(list of str): The list of all possible target y labels used to index the matrix Returns: matrix(list of list of int): Confusion matrix whose i-th row and j-th column entry indicates the number of samples with true label being i-th class and predicted label being j-th class Notes: Loosely based on sklearn's confusion_matrix(): https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html """ matrix = [] for i in range(len(labels)): new_row = [] for i in range(len(labels)): new_row.append(0) matrix.append(copy.deepcopy(new_row)) for i in range(len(y_true)): matrix[labels.index(y_true[i])][labels.index(y_pred[i])] += 1 return matrix
a836ac91173ba61857bc1f2cd33b888976ebc32d
539,246
def FirstSignificant(pvals, thresh): """Given a list of temporal p-values for peptide phoshorylation changes return the time point at which the peptide is first significant or 'Not significant' """ for t in range(len(pvals)): if pvals[t] <= thresh: return str(2**(t+1)) return "Not significant"
f181c930a68cd632cce82592901ec5e7a1c9fa04
304,628
def flatten(list_of_lists:list[list])-> list: """flatten list of lists Args: list_of_lists (list[list]): list of lists Returns: list: list """ return [item for sublist in list_of_lists for item in sublist]
3579023bdf103cf6942c8bbe1c681319b2ce386d
268,022
from typing import Dict def build_item(hex_color: str, frequencies: Dict, html_colors: Dict) -> Dict: """ :param hex_color: '000000' :param frequencies: {'000000':0.3, 'FFFFFF':0.2, ... } :param html_colors:{'000000':{'name':'Black', 'hex':'000000', 'r':0, 'g':0, 'b':0}, ...} :return: {'color':{'name':'Black', 'hex':'000000', 'r':0, 'g':0, 'b':0}, 'frequency':5} """ return {'color': html_colors[hex_color], 'frequency': frequencies[hex_color]}
9b0d4b123c26d5e8cae0517f837c405d4411cadf
510,635
def cantor_pairing(a, b): """ A function returning a unique positive integer for every pair (a,b) of positive integers """ return (a+b)*(a+b+1)/2 + b
1b2c93165a333594efdc198a91145d9eeef5fbb1
475,999
def get_child_schema(self): """An optional function which returns the list of child keys that are associated with the parent key `docs` defined in `self.schema`. This API returns an array of JSON objects, with the possible fields shown in the example. Hence the return type is list of lists, because this plugin returns a list of objects, each with this possible set of keys. Returns: [['year', 'title', 'description', 'mediatype', 'publicdate', 'downloads', 'week', 'month', 'identifier', 'format', 'collection', 'creator', 'score']] Example of one of the child objects in the array associated with `docs`: { year: 1998, title: "AAPL CONTROL ROOM AERO ACOUSTIC PROPULSION LABORATORY AND CONTROL ROOM PERSONNEL", description: "AAPL CONTROL ROOM AERO ACOUSTIC PROPULSION LABORATORY AND CONTROL ROOM PERSONNEL", mediatype: "image", publicdate: "2009-09-17T17:14:53Z", downloads: 5, week: 0, month: 0, identifier: "GRC-C-1998-853", format: [ "JPEG", "JPEG Thumb", "Metadata" ], collection: [ "nasa", "glennresearchcentercollection" ], creator: [ "NASA/Glenn Research Center" ], score: 2.617863 } """ return [['year', 'title', 'description', 'mediatype', 'publicdate', 'downloads', 'week', 'month', 'identifier', 'format', 'collection', 'creator', 'score']]
4d91ff18ab8e3f6cec5610169dc6d52e7a647960
25,052
def is_int(in_obj): """ Checks if the input represents an integer and returns true iff so """ try: int(in_obj) return True except ValueError: return False
4e5837e136932d70c65e55b5f5959e61c0c89619
329,484
def xywh_to_xyxy(box): """Convert xywh bbox to xyxy format.""" return box[0], box[1], box[0]+box[2], box[1]+box[3]
53b31a43cfee6bb5efdf7875af66d21e2657ee1e
231,769
def get_class_by_name(name: str) -> type: """ Returns type object of class specified by name Args: name: full name of the class (with packages) Returns: class object """ components = name.split('.') mod = __import__(components[0]) for comp in components[1:]: mod = getattr(mod, comp) return mod
23f55bffc3c78b2cd0e922760a1968871e76b468
157,724
def get_end_pos(cls, start_pos, dimensions, left=False, up=False): """ calculate the end position if were to build an array of items :param cls: Type :param start_pos: (int, int) :param dimensions: (int, int) :param left: bool: default builds rightward :param up: bool: default builds downward :return: (int, int) """ dx = (dimensions[0] - 1) * cls.width if left: dx = -dx dy = (dimensions[1] -1 ) * cls.height if up: dy = -dy end_pos = start_pos[0] + dx, start_pos[1] + dy return end_pos
a03b45f7137e835a8709da933a7a7e4f509f8569
655,690
def _clean_scale(scale): """Cleanup a 'scaling' string to be matplotlib compatible. """ scale = scale.lower() if scale.startswith('lin'): scale = 'linear' return scale
5e67783ae6f8b9df704c3b2e7f68dcd3b6c9e47d
656,764
def num_to_uix(n): """Convert number to subscript character. Args: n (int): Single-digit number Returns: str: The Unicode superscript for the letter at the number. """ return "ᵃᵇᶜᵈᵉᶠᵍʰⁱ"[n]
7a942cf22b9f3068741414d5dcfcdfc52bae37f0
556,430
def _check_if_StrNotBlank(string): """ check if a sting is blank/empty Parameters ---------- Returns ------- : boolean True if string is not blank/empty False if string is blank/empty """ return bool(string and string.strip())
e5de1d902f8e3931d23e04c6ba825b17d90e8d1d
10,553
def dummy_decorator(func): # noqa: D401 """Dummy property decorator, to test if chained decorators are handled correctly.""" return func
3cc6c14b387a9a81283ca26c802792e008e6cf72
609,447
def get_role_part(rawsource): """Helper function to get role name from the instruction. Args: rawsource: Role raw text. Example: :role:`text` -> :role:` """ return rawsource[:rawsource.find('`') + 1]
3e1c46ef3e8b2e58782b10ec59cdc447bacc76a4
105,607
def _parse_ports(ports_text): """ Handle the case where the entry represents a range of ports. Parameters ---------- ports_text: str The text of the given port table entry. Returns ------- tuple A tuple of all ports the text represents. """ ports = ports_text.split('-') try: if len(ports) == 2: ports = tuple(range(int(ports[0]), int(ports[1]) + 1)) else: ports = (int(ports[0]),) except ValueError: return () return ports
eb6eed9a5f8ea91d448be9c0eede9f5b258cf358
687,432
def get_book_title(soup): """ Return book title""" return soup.find('h1', attrs={'class': 'bookTitle'}).get_text()
7b18b5fdcb82216ffdc1fff5a92ae5b70856cdd1
328,232
def remove_granular_connector_edges(inner_g_strongly_connected): """ Removes granular connector edges from graph Args: inner_g_strongly_connected (NetworkX MultiDiGraph): strongly_connected street network graph Returns: inner_g_strongly_connected (NetworkX MultiDiGraph): strongly_connected street network graph with granular connector edges removed (contracted connector edges intact) GranularConnector_EdgeList (list): list of granular connector edges """ inner_g_strongly_connected_copy = inner_g_strongly_connected.copy() GranularConnector_EdgeList = [] for e in inner_g_strongly_connected.edges(data=True, keys=True): if e[3].get('granular_type') == 'connector': u = e[0] v = e[1] key = e[2] inner_g_strongly_connected_copy.remove_edge(u, v, key=key) GranularConnector_EdgeList += [[u, v]] print('\n{} Granular Connector Edges'.format(len(GranularConnector_EdgeList))) return inner_g_strongly_connected_copy, GranularConnector_EdgeList
eab9010996e58fd9ca5ec6074323da357f5e1a19
461,927
from functools import reduce from operator import getitem def get_key_by_path(tree, keys): """ Function to get keys from a tree by path. Parameters ---------- tree : dict Instance containing the keys keys : Multiple Key to be extracted Returns ------- keys : Multiple Path extracted key """ return reduce(getitem, keys, tree)
abefed035673905f67db6b292b1f0b6aa9027d6e
343,618
import functools def skippable(*prompts, argument=None): """ Decorator to allow a method on the :obj:`CustomCommand` to be skipped. Parameters: ---------- prompts: :obj:iter A series of prompts to display to the user when the method is being skipped. argument: :obj:`str` By default, the management command argument to indicate that the method should be skipped will be `skip_<func_name>`. If the argument should be different, it can be explicitly provided here. """ def decorator(func): @functools.wraps(func) def inner(instance, *args, **kwargs): parameter = argument or "skip_%s" % func.__name__ if parameter in kwargs and kwargs[parameter] is True: instance.prompt(*prompts, style_func=instance.style.HTTP_NOT_MODIFIED) return False else: return func(instance, *args, **kwargs) return inner return decorator
879106f4cc0524660fb6639e56d688d40b115ac4
1,464
def get_files_from_drive(drive_service, name=None, substring_name=None, mime_type=None, custom_metadata=None, parent_id=None, trashed=False, result_fields=["name", "id"]): """Gets files from Google Drive based on various search criteria Arguments: drive_service -- a Google Drive service name -- name of file(s) being searched for (default None) mime_type -- MIME type of file(s) being searched for, e.g. 'application/vnd.google-apps.folder' (default None) parent_id -- the ID of the parent folder for the file(s) being searched for (default None) trashed -- whether or not the file being searched for is trashed (default False) result_fields -- specifies what data is returns (default ["name", "id"]) Returns: A dictionary containing the requested fields of the files found using the specified search criteria """ query_list = [] if name: query_list.append("name = '{}'".format(name)) if substring_name: query_list.append("name contains '{}'".format(substring_name)) if mime_type: if mime_type == "folder": query_list.append("mimeType = 'application/vnd.google-apps.folder'") elif mime_type == "file": query_list.append("mimeType = 'application/vnd.google-apps.file") else: raise ValueError("'mime_type' argument must be 'folder' or 'file'.") exit(0) if custom_metadata: for key, value in custom_metadata.items(): q = "properties has {key='" + key + "' and value='" + value + "'}" query_list.append(q) if parent_id: query_list.append("'{}' in parents".format(parent_id)) if trashed == False: query_list.append("trashed = false") elif trashed == True: query_list.append("trashed = true") q = query_list[0] for criterion in query_list[1:]: q = q + " and " + criterion result_fields_string = ','.join(result_fields) results = drive_service.files().list(q=q, spaces='drive', fields='nextPageToken, files({})'.format(result_fields_string)).execute() return results.get('files', [])
1499296106a19f26b9ed2d685392cc6ae615461b
435,703
def rc_str(dna): """Reverse complement a string """ complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'} ks = list(complement.keys()) for k in ks: complement[k.lower()] = complement[k].lower() return ''.join([complement[base] for base in dna[::-1]])
8508078209b1136f8405608bae331a877e13c9dc
395,238
def find_duplicates(iterable): """Find duplicate elements in an iterable. Parameters ---------- iterable : iterable Iterable to be searched for duplicates (i.e., elements that are repeated). Returns ------- set Repeated elements in `iterable`. """ # modified from qiita.qiita_db.util.find_repeated # https://github.com/biocore/qiita # see licenses/qiita.txt seen, repeated = set(), set() for e in iterable: if e in seen: repeated.add(e) else: seen.add(e) return repeated
b5d5bc6b85cd2cfafe3bd0f88ef2daa0e28e538e
63,166
import ipaddress def max_usable_hosts(addr): """ compute total number of client connections that can be made with a given network """ return ipaddress.ip_network(addr).num_addresses - 2
2d6ebf39b700bdc618d0abfca1c389f8dd760a18
209,381
def relative_deviation(simulated, observed, threshold): """Return the relative deviation of a simulated value from an observed value.""" try: 1 / observed except ZeroDivisionError: observed = 0.00001 dev = abs(simulated - observed) / observed if dev > threshold: return (True, dev) else: return (False, dev)
84559c29b2b3e8f87aa55ca7fbd42ad69fcb0409
198,885
def has_valid_fastq_extension(file_location): """ Determines whether or not the passed file location has a valid FASTQ or GZIP'd FASTQ file extension. ARGUMENTS file_location (str): the location of the file to check RETURNS valid (bool): whether or not the file location has a valid FASTQ extension """ valid_extensions = ("fastq", "fq", "fastq.gz", "fq.gz") valid = file_location.lower().endswith(valid_extensions) # Case insensitive matching return valid
89603a0868ad2dc28cabc92a08503c8b7b40b654
503,431
def unset_bit(string: int, pos: int) -> int: """Return bitstring with the bit at the position unset Args: string (int): bit string pos (int): position in the bit string Returns: int: string with the pos bit set to 0 """ return int(string) & ~(1 << pos)
10c3ae77a19a9323758e5679f358c0f55c19c962
616,765
def get_ckpt_filename(node_name, epoch): """Returns the checkpoint filename. Args: node_name: A string. The name of the node. epoch: An integer. The checkpoint epoch. Returns: ckpt_filename: A string. The filename of the checkpoint. """ return node_name + '.' + str(epoch)
5f72a924503ae7ae33cd912ad72495118fde2f6e
484,327
def unbind(instance_id, binding_id): """ Unbind an existing instance associated with the binding_id provided DELETE /v2/service_instances/<instance_id>/service_bindings/<binding_id>: <instance_id> is the Cloud Controller provided value used to provision the instance <binding_id> is the Cloud Controller provided value used to bind the instance return: As of API 2.3, an empty JSON document is expected """ print("inside unbinding") return {}
bd43f393a10a9114b44d125f97271293ba483fdd
187,316
def cluster_vertical(P): # Used in form_segment_(). """ Cluster P vertically, stop at the end of segment """ if len(P['down_fork_']) == 1 and len(P['down_fork_'][0]['up_fork_']) == 1: down_fork = P.pop('down_fork_')[0] # Only 1 down_fork. down_fork.pop('up_fork_') # Only 1 up_fork. down_fork.pop('y') down_fork.pop('sign') return [P] + cluster_vertical(down_fork) # Plus next P in segment return [P]
d2bbd35795c349985e9d1a28c6a5b9e5476ce2f0
209,684
def get_not_downloaded_isbns(all_isbns, downloaded_isbns): """ Identifiy ISBNs, which haven't been downloaded @params: all_isbns - Required : all isbns (pd.Series) all_isbns - Required : already downlaoded isbns (pd.Series) @returns: to_download : isbns to download (list) """ all_isbns = all_isbns.to_list() downloaded_isbns = downloaded_isbns.to_list() to_download = list(set(all_isbns).difference(downloaded_isbns)) to_download.sort() return to_download
f92f11e3b9ac47eacd50c79a436ee4e5ce3260f1
423,914
import base64 def embed_mp4(filename): """Embeds an mp4 file in the notebook.""" video = open(filename, 'rb').read() b64 = base64.b64encode(video) tag = ''' <video width="640" height="480" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>'''.format(b64.decode()) # return IPython.display.HTML(tag) return tag
ba41e6f7518e26dc6781eb2f321b301863896b41
232,025
def _map_channels_to_measurement_lists(snirf): """Returns a map of measurementList index to measurementList group name.""" prefix = "measurementList" data_keys = snirf["nirs"]["data1"].keys() mls = [k for k in data_keys if k.startswith(prefix)] def _extract_channel_id(ml): return int(ml[len(prefix) :]) return {_extract_channel_id(ml): ml for ml in mls}
d6d83c01baec5f345d58fff8a0d0107a40b8db37
4,806
def _request_get_json(response): """ Get the JSON from issuing a ``request``, or try to produce an error if the response was unintelligible. """ try: return response.json() except ValueError as e: return {"error": str(e)}
1b6a6d823c23f036ef3c2a06ed3a421544797bc5
83,249
from typing import Tuple import textwrap def prep_for_xml_contains(text: str) -> "Tuple[str]": """Prep string for finding an exact match to formatted XML text.""" # noinspection PyRedundantParentheses return (textwrap.indent(textwrap.dedent(text), " ",),)
1d3170c41f1688a853b91784b5f6f17465d885f7
141,913
import random def filter_random_values(n, df, col_name): """ Filter randomly chosen part of DataFrame. :param n: Sample size :param df: Pandas DataFrame. :param col_name: DataFrame column name :return filtered DataFrame """ # check if sample size is lower than 1 assert n < 1 # list of unique values in column col_name val_list = list(df[col_name].unique()) # randomly choose part of DataFrame chosen_val = random.sample(val_list, int(n * len(val_list))) return df[df[col_name].isin(chosen_val)]
b8730c0251a7bdb1a501b92ab450bdb6c1b8d9b8
340,465
import hashlib def get_file_hash(filepath): """Reads the file and returns a SHA-1 hash of the file. Args: filepath: String of the path to the file to be hashed Returns: String: The SHA-1 hash of the file. """ hasher = hashlib.sha1() blocksize = 65536 with open(filepath, "rb") as f: buf = f.read(blocksize) while buf: hasher.update(buf) buf = f.read(blocksize) return hasher.hexdigest()
af966bca8125adbd48a36b3ada3e5226bdfd890c
577,401
from typing import List from typing import Set from typing import Dict def get_samples_in_studies(samples: List[str], studies: Set[str], sample_to_study: Dict[str, str]) -> List[str]: """ Find which samples from the list were generated by the given studies Arguments --------- samples: The accessions of all samples studies: The studies of interest that generated a subset of samples in the list sample_to_study: A mapping between each sample and the study that generated it Returns ------- subset_samples: The samples that were generated by a study in `studies` """ subset_samples = [sample for sample in samples if sample_to_study[sample] in studies] return subset_samples
2990853024b9006df27b002d98705db55e94ed30
567,092
def norm_whitespace(s: str) -> str: """normalize whitespace in the given string. Example: >>> from hearth.text.utils import norm_whitespace >>> >>> norm_whitespace('\tthere\t\tshould only be one space between \twords. ') 'there should only be one space between words.' """ return ' '.join(s.split())
0f1194008abfececff6ed72181e28e7204e63713
335,510
from pathlib import Path def read_scanners(filename: Path) -> list[list]: """Read the raw scanner inputs. Args: filename (Path): filename Returns: list[list] scanned beacon positions for each scanner """ result = [] with filename.open("r") as file: for line in file: if "scanner" in line: result.append([]) else: pos = list( map( int, "".join(c if c in "1234567890-" else " " for c in line).split(), ) ) if len(pos) == 3: result[-1].append(tuple(pos)) return result
4c189882b8e141fde9c67e26b3a0b15b3def90c0
265,876
from typing import Any from typing import Iterable def get_config_name_for_args(*args) -> str: """ Get a name string by concatenating the given args. Usage:: >>> get_config_name_for_args("123456", None, "+remindme") '123456__None__+remindme' >>> get_config_name_for_args("123456", None, ("a_tag",)) '123456__None__a_tag' >>> get_config_name_for_args("123456", None, ("a_tag", "b_tag")) '123456__None__a_tag,b_tag' >>> get_config_name_for_args("123456") Traceback (most recent call last): RuntimeError: ... """ # sanity check if len(args) == 1: raise RuntimeError("get_config_name_for_args requires more > 1 arguments") def format_(obj: Any) -> str: if isinstance(obj, str): return obj elif isinstance(obj, Iterable): return ",".join(map(str, obj)) return str(obj) return "__".join(map(format_, args))
1a389888f09b35e7f4188d29fc1411c78b1ac3e3
331,011
def oif_axis_size(oifsettings): """Return dict of axes sizes from OIF main settings.""" scale = {'nm': 1000.0, 'ms': 1000.0} result = {} i = 0 while True: try: axis = oifsettings[f'Axis {i} Parameters Common'] except KeyError: break size = abs(axis['EndPosition'] - axis['StartPosition']) size /= scale.get(axis['UnitName'], 1.0) result[axis['AxisCode']] = size i += 1 return result
8488e440898e5cd649261d67370b662ceddd5934
377,142
def is_nova_server(resource): """ checks resource is a nova server """ return ( isinstance(resource, dict) and "type" in resource and "properties" in resource and resource.get("type") == "OS::Nova::Server" )
c412455c6a22f3ac1b1eb074caeec70d54bf0fbd
89,083
def get_kappa(searchers: dict): """Retrieve kappa from each searcher and return as list""" kappa_list = [] for s_id in searchers.keys(): s = searchers[s_id] kappa_list.append(s.kappa) return kappa_list
899ebd8b9ca6259f253e380e8000e33550d838b2
594,192
def rgb2gray(rgb_img): """ Parameters ---------- rgb_img : numpy array with shape as (3, X, Y) image to convert in gray Returns ------- gray_img , the grayscale image """ gray_coef = [0.2989, 0.5870, 0.1140] r = rgb_img[0] / 255 g = rgb_img[1] / 255 b = rgb_img[2] / 255 gray_img = gray_coef[0] * r + gray_coef[1] * g + gray_coef[2] * b return gray_img * 255
d6b582253eed18c1810f584bda815a7b98f74ec4
645,274
def to_snake_case(field: str) -> str: """Return string converted to snake case (e.g. "Given Name" -> "given_name").""" return "".join("_" if c == " " else c.lower() for c in field)
833a107f3a6c9b24a44d47c73851c7e2ce0ffb38
95,473
import functools def repeat(num): """ decorator for repeating tests several times """ def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): for _ in range(num): f(*args, **kwargs) return wrapper return decorator
070b301b2ea47ab72e183d0fcfcbb60a0c34b52e
474,655
import operator import math def unit_vector(vec1, vec2): """ Return a unit vector pointing from vec1 to vec2 """ diff_vector = map(operator.sub, vec2, vec1) scale_factor = math.sqrt( sum( map( lambda x: x**2, diff_vector ) ) ) if scale_factor == 0: scale_factor = 1 # We don't have an actual vector, it has zero length return map(lambda x: x/scale_factor, diff_vector)
79e2cff8970c97d6e5db5259801c58f82075b1a2
704,506
from typing import Mapping from typing import Union from typing import List def map_params_to_arg_list(params: Mapping[str, Union[str, float, int]]) -> List[str]: """Method to map a dictionary of params to a list of string arguments""" arg_list = [] for key in params: arg_list.append(str(key) + "=" + str(params[key])) return arg_list
17fed9dd821abd5e6e3a9664ae6713cacec35d4f
565,284
from typing import OrderedDict def dict_sort(dic, by="value", topk=None, reverse=True): """ Sort dictionary by key or value Params ====== dic (dict) : Input dictionary by (str) : String indicating the sorting procedure (value = sort by value, key = sort by key) topk (int)(Optional) : Return top k results after sorting reverse (bool)(Optional) : Whether to return sorted array in ascending (reverse=False) or descending (reverse=True) order """ if topk == None: topk = len(dic) if by == "value": return { k: v for k, v in sorted(dic.items(), key=lambda item: item[1], reverse=reverse)[ :topk ] } elif by == "key": return dict(OrderedDict(sorted(dic.items(), reverse=reverse)[:topk]))
4590f8d703f7ac733a2833189905806e057cb25d
506,930
def read_lines(file_name): """ Read lines from given file and return as list """ with open(file_name, 'r') as fobj: lines = fobj.readlines() return lines
d5fbf26b61029a6cfd1738b8d3f44b71bbf6e0a2
646,561
def parse_float(v: str) -> float: """Parse float type. :param v: An environment variable to parse. :type v: :class:`str` :return: A float type value :rtype: :class:`float` .. versionadded:: 0.5.6 """ return float(v)
e52b78808fa41f2190495c31ca0981cd9b34fbbb
220,028
def prune(d): """If possible, make the multi-valued mapping `d` single-valued. If all value sets of `d` have exactly one member, remove the set layer, and return the single-valued dictionary. Else return `d` as-is. """ if all(len(v) == 1 for v in d.values()): return {k: next(iter(v)) for k, v in d.items()} return d
b0c60b00a5bd47d248f558db6d96b8569b9e94bb
670,384
def get_top_pb_type(element): """ Returns the top level pb_type given a subelement of the XML tree.""" # Already top-level parent = element.getparent() if parent is not None and parent.tag == "complexblocklist": return None # Traverse while True: parent = element.getparent() if parent is None: return None if parent.tag == "complexblocklist": assert element.tag == "pb_type", element.tag return element element = parent
fefe2cb04612c3ab5ed99f7dc6ec774ce990b320
306,757
from math import log2 def human_readable_file_size(size): """ Returns a human readable file size string for a size in bytes. Adapted from https://stackoverflow.com/a/25613067) """ _suffixes = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB'] # determine binary order in steps of size 10 # (coerce to int, // still returns a float) order = int(log2(size) / 10) if size else 0 # format file size # (.4g results in rounded numbers for exact matches and max 3 decimals, # should never resort to exponent values) return '{:.4g} {}'.format(size / (1 << (order * 10)), _suffixes[order])
171b9039885222c6d17edd923b9d9e5fa17e2933
427,334
from functools import reduce from operator import getitem def _get_by_path(tree, keys): """ 访问tree(config:json)结构的,所有keys项 (为了设置新配置,要访问旧配置时使用) Access a nested object in tree by sequence of keys. :param tree: (config:json) :param keys: keys项 :return: (json的k:v) """ """ getitem(),根据keys从tree(config:json)中获取k:v reduce(function, sequence[, initial]) -> value Apply a function of two arguments cumulatively to the items of a sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If initial is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. """ return reduce(getitem, keys, tree)
d55ef3f41061a7323adfd8434e239c3f8febb120
262,843
def maneuverToDir(str: str) -> int: """ maps dubins curve action to an interger {-1, 0, 1} Paramters --------- str: str dubins curve action Returns ------- int L -> 1, R -> -1, S -> 0 """ if str == 'L': return 1 if str == 'R': return -1 return 0
a70d65f89c20e281eae6b69443a9bdc9fbe04eb1
56,677
async def index(): """ Index of the Metadata Repository Service that redirects to the API documentation. """ return "Index of the Metadata Repository Service"
6e8f751c2a92a764362aa19c03803b7db843905b
257,619
import random def generate_trip_data(num_existing_trips, num_trips, num_destinations): """Generates a list of tripData to be inserted into the database. Keyword arguments: num_existing_trips -- number of trips in the evolutions num_trips -- number of trips being created by the script num_destinations -- number of destinations being created by the script Returns: trip_data -- a list of strings to be added to the SQL script trip_destination_data -- a dictionary mapping a trip id to all the destinations visited along the trip """ trip_data = ["INSERT IGNORE INTO TripData (trip_id, position, destination_id) VALUES\n"] trip_data_template = "({0}, {1}, {2}),\n" trip_destination_data = {} for i in range(num_trips): trip_id = num_existing_trips + i + 1 # +1 because SQL ids start from 1 used_destinations = set() # The destinations this trip has been given for j in range(random.randint(2, 4)): position = j + 1 # +1 because SQL ids start from 1 destination_id = random.randint(1, num_destinations) # Ensure that each destination is unique while destination_id in used_destinations: destination_id = random.randint(1, num_destinations) used_destinations.add(destination_id) trip_data.append(trip_data_template.format(trip_id, position, destination_id)) trip_destination_data.setdefault(trip_id, []).append(destination_id) # Make the last commas semi-codes instead, with a newline for formatting trip_data[-1] = trip_data[-1][:-2] + ";\n" return trip_data, trip_destination_data
f44de12397bb90d3ef5e8184c5eb9bccfa1f5270
346,710
import torch def safe_power(x, exponent, *, epsilon=1e-6): """ Takes the power of each element in input with exponent and returns a tensor with the result. This is a safer version of ``torch.pow`` (``out = x ** exponent``), which avoids: 1. NaN/imaginary output when ``x < 0`` and exponent has a fractional part In this case, the function returns the signed (negative) magnitude of the complex number. 2. NaN/infinite gradient at ``x = 0`` when exponent has a fractional part In this case, the positions of 0 are added by ``epsilon``, so the gradient is back-propagated as if ``x = epsilon``. However, this function doesn't deal with float overflow, such as 1e10000. Parameters ---------- x : torch.Tensor or float The input base value. exponent : torch.Tensor or float The exponent value. (At least one of ``x`` and ``exponent`` must be a torch.Tensor) epsilon : float A small floating point value to avoid infinite gradient. Default: 1e-6 Returns ------- out : torch.Tensor The output tensor. """ # convert float to scalar torch.Tensor if not torch.is_tensor(x): if not torch.is_tensor(exponent): # both non-tensor scalars x = torch.tensor(x) exponent = torch.tensor(exponent) else: x = torch.tensor(x, dtype=exponent.dtype, device=exponent.device) else: # x is tensor if not torch.is_tensor(exponent): exponent = torch.tensor(exponent, dtype=x.dtype, device=x.device) exp_fractional = torch.floor(exponent) != exponent if not exp_fractional.any(): # no exponent has a fractional part return torch.pow(x, exponent) x, x_lt_0, x_eq_0, exponent, exp_fractional = torch.broadcast_tensors( x, x < 0, x == 0, exponent, exp_fractional) # deal with x = 0 if epsilon != 0: mask = x_eq_0 & exp_fractional if mask.any(): # has zero value x = x.clone() x[mask] += epsilon # deal with x < 0 mask = x_lt_0 & exp_fractional if mask.any(): x = x.masked_scatter(mask, -x[mask]) out = torch.pow(x, exponent) out = out.masked_scatter(mask, -out[mask]) else: out = torch.pow(x, exponent) return out
c384c43482fd9cba4957b115555c58f1c6fa50ce
689,830
def mocked_compute_cell_size(data_dict, by_image): """Mocks compute cell size so we don't need to create synthetic data with correct cell size""" X = data_dict['X'] constant_val = X[0, 0, 0, 0] # The default resize is 400. We want to create median cell sizes that divide evenly # into that number when computing the desired resize ratio # even constant_vals will return a median cell size 1/4 the size of the target, odds 4x if constant_val % 2 == 0: cell_size = 100 else: cell_size = 1600 return cell_size
656d879ca962aa45bd18ab0ba1aba045c48f29bd
179,876
def degree(G, nbunch=None, t=None): """Return the degree of a node or nodes at time t. The node degree is the number of edges adjacent to that node. Parameters ---------- G : Graph opject DyNetx graph object nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. t : snapshot id (default=None) If None will be returned the degree of nodes on the flattened graph. Returns ------- nd : dictionary, or number A dictionary with nodes as keys and degree as values or a number if a single node is specified. Examples -------- >>> G = dn.DynGraph() >>> G.add_path([0,1,2,3], t=0) >>> dn.degree(G, 0, t=0) 1 >>> dn.degree(G, [0,1], t=1) {0: 0, 1: 0} >>> list(dn.degree(G, [0,1], t=0).values()) [1, 2] """ return G.degree(nbunch, t)
cb2f47a453051a6021444486b16e9f238dcadda4
350,215
def get_pkg_name(req): """Return the name of the package in the given requirement text.""" # strip env markers req = req.partition(';')[0] # strip version req = req.partition('==')[0] req = req.partition('>=')[0] return req
f82fc633d2d089485e7873ba3b31c93b3df9c734
145,984
def _models_count_all_function_name(model): """Returns the name of the function get the number of models in the database""" return '{}_count_all'.format(model.get_table_name())
22ea542e640e1b53fefe22d467c2dd852203b8a9
635,919
def lagrange2(N, i, x, xi): """ Function to calculate Lagrange polynomial for order N and polynomial i [0, N] at location x at given collacation points xi (not necessarily the GLL-points) """ fac = 1 for j in range(-1, N): if j != i: fac = fac * ((x - xi[j + 1]) / (xi[i + 1] - xi[j + 1])) return fac
c37284ac5a8446484d625b1f41c0f67381a3b5d6
431,725
def semPad(ver: list[str], length: int) -> list[str]: """Pad a semver list to the required size. e.g. ["1", "0"] to ["1", "0", "0"]. Args: ver (list[str]): the semver representation length (int): the new length Returns: list[str]: the new semver """ char = "0" if ver[-1] == "*": char = "*" return ver + [char] * (length - len(ver))
6e794fddc961d16c5f24ba66624360ec13394739
584,848
def tstr_to_float(tstr): """ Convert time from 12-hour string (with AM/PM) to agenda-compatible float. :param tstr: 12-hour time string :returns: Float like: 8.0 for '8:00AM' """ afloat = float(tstr.rstrip("APM").split(":")[0]) if "PM" in tstr and tstr.split(":")[0] != "12": afloat += 12.0 if ":" in tstr: afloat += float(tstr.rstrip("APM").split(":")[1][0:2]) / 60 return afloat
df063e6fdac8cc60457ed70dbfebdb9ac51e77b4
161,745
def telescopic_direct(L, R, n, limits): """Returns the direct summation of the terms of a telescopic sum L is the term with lower index R is the term with higher index n difference between the indexes of L and R For example: >>> telescopic_direct(1/k, -1/(k+2), 2, (k, a, b)) -1/(b + 2) - 1/(b + 1) + 1/(a + 1) + 1/a """ (i, a, b) = limits s = 0 for m in range(n): s += L.subs({i: a + m}) + R.subs({i: b - m}) return s
1a902ade1c58867e2ad2ee4e1adc8ae1eb03f861
402,583
def sindex(i,j,qi,qj): """ Returns the location of {i,j} in the set {{0,0},{0,1},...,{0,qj-1},...{qi-1,qj-1}}. """ return int((i * qj) + j)
489d5046611311d29f9c87344013468585e71c66
194,681
def _check_name_should_break(name): """ Checks whether the passed `name` is type `str`. Used inside of ``check_name`` to check whether the given variable is usable, so we should stop checking other alternative cases. Parameters ---------- name : `Any` Returns ------- should_break : `bool` If non empty `str` is received returns `True`, meanwhile if `None` or empty `str` is received `False`. Raises ------ TypeError If `name` was not passed as `None` or type `str`. """ if (name is None): return False if type(name) is not str: raise TypeError(f'`name` should be `None` or type `str`, got `{name.__class__.__name__}`.') if name: return True return False
4e6981fa840b89bf69a1a0e6c6401b1e2387e17d
18,115
def encode_remainining_length(remaining_length): # type: (int) -> bytes """Encode the remaining length for the packet. :returns: Encoded remaining length :rtype: bytes """ encoding = True encoded_bytes = bytearray() encoded_byte = 0 while encoding: encoded_byte = remaining_length % 128 remaining_length //= 128 if remaining_length: encoded_byte |= 0x80 else: encoding = False encoded_bytes.append(encoded_byte) return bytes(encoded_bytes)
ddb7ea7f98753375a8f5d817f4f07d23c09f83f4
393,147
def chain(*args): """Applies a list of chainable update transformations. Given a sequence of chainable transforms, `chain` returns an `init_fn` that constructs a `state` by concatenating the states of the individual transforms, and returns an `update_fn` which chains the update transformations feeding the appropriate state to each. Args: *args: a sequence of chainable (init_fn, update_fn) tuples. Returns: A single (init_fn, update_fn) tuple. """ init_fns, update_fns = zip(*args) def init(params): return [fn(params) for fn in init_fns] def update(updates, state): new_state = [] for s, fn in zip(state, update_fns): updates, new_s = fn(updates, s) new_state.append(new_s) return updates, new_state return init, update
74643ddf7d88e05640c9549048ddd19f9616c2c2
687,461
from tempfile import mkdtemp def create_temporary_directory(prefix_dir=None): """Creates a temporary directory and returns its location""" return mkdtemp(prefix='bloom_', dir=prefix_dir)
b2a1ddeb8bcaa84532475e3f365ab6ce649cd50c
14,705
import re def _get_data_glob(data): """ Construct a glob expression from the data expression """ return re.sub(r'{[^{}]*}', '*', data)
b4add01693f3147849dc14f642d2acdd09c962c1
67,418
def collection_2_sentence(list_str): """ Given a container (list or set) of strings with the names of cities, states or any string (elements), returns a string where each element is listed in a sentence Note: returned objects should be in alphabetical order Ex1: {"LIVERPOOL"} -> LIVERPOOL Ex2: ["LIVERPOOL", "KINGSTON"] -> KINGSTON and LIVERPOOL Ex3: {"BROOKLYN", "LIVERPOOL", "KINGSTON"} -> BROOKLYN, LIVERPOOL, and KINGSTON """ # Sorts the set (converts to a list) elements = sorted(list_str) # Gets the number of cities in the set num_elements = len(list_str) # If the set is empty, return None if not list_str: return 'None Available' # If the set has one cities, return the string of the one city elif num_elements == 1: return elements[0] # If the set has two cities, return the two cities as "city1 and city2" elif num_elements == 2: return elements[0] + ' and ' + elements[1] # If the set has three or more cities, return cities like saying a list in a sentence "cityA, ... cityY, and cityZ" else: string = "" # Initialize empty string for i in range(num_elements - 1): # Loop over each city in the set except the last one string += elements[i] + ', ' # Add the city to the string with a comma and space return string + 'and ' + elements[-1] # Add the final city with an "and" and return string
56326ed61cdc3356ea25d6a3df899869d62ad5d7
442,629
def is_inline(tag): """ A filter function, made to be used with BeautifulSoup. Makes sure the tag is `inline`, or has both data-inline-type and at least one of data-inline-{id,ids,filter} attributes. """ check_type = lambda attr: attr[0] == 'data-inline-type' check_attrs = lambda attr: attr[0] == 'data-inline-id' \ or attr[0] == 'data-inline-ids' \ or attr[0] == 'data-inline-filter' checks_out = filter(check_type, tag.attrs) and filter(check_attrs, tag.attrs) return checks_out or tag.name == 'inline'
ad207188de13c487c420d84645f460b175a16e88
123,025
def gcd(a, b): """ gcDen(a, b) -> number A traditional implementation of Euclid's algorithm for finding the greatest common denominator. """ while b > 0: a, b = b, a % b return a
3e484c53dec87c0932d55f63ab734cdde44efb5f
289,111
def strip_whitespace(string: str) -> str: """ Strip leading and trailing whitespace from a string """ if string[-1] == " ": string = "".join(string.rstrip()) if string[0] == " ": string = "".join(string.lstrip()) return string
9de016e162b45e3b444610008eab996cfdcfb198
605,938
def xor(b1, b2): """xor takes two byte buffers and returns their XOR combination, b2 can be shorter in length than b1, if so b2 will repeat""" l = len(b1) if len(b2) > l: raise ValueError("Xor does not accept b2 longer than b1, " "args len(b1)=%d, len(b2)=%d" % (len(b1), len(b2))) out = bytearray(l) b2Index = 0 for i in range(0, l): out[i] = b1[i] ^ b2[b2Index] b2Index += 1 if b2Index == len(b2): b2Index = 0 return out
28a2f342fef30411c13d0a4fa26ea7feaec18d32
273,938
from pathlib import Path def get_genbank_paths(folder): """Generate a collection of paths to GenBank files in a specified folder.""" if not Path(folder).is_dir(): raise ValueError("Expected valid folder") valid_extensions = (".gb", ".gbk", ".genbank") return [ file for file in Path(folder).iterdir() if str(file).endswith(valid_extensions) ]
31b3dde450c4c5182d9ec9718d1bdc70901b738b
604,698
import random def create_random_string(length): """Create random string from a character set. Parameters ---------- length : int Size of the expected string. Returns ------- str Random string. Notes ----- String is created from unambiguous letters. """ return ''.join(random.choice("ACDEFGHJKMNPQRTWXYZ") for _ in range(length) )
03473c24cd954d464ddd9fafa5ce760e61675553
282,487
def internal_params_1D( pixels_per_clock: int, window_width: int, image_size: int, output_stride: int, origin: int ): """Calculate "internal" parameters of linebuffer based on user parameters. This includes the window_count (number of total windows outputted), the parallelism (width of the output bus in number of windows), and the valid_count (number of times valid should be asserted). Return as tuple (window_count, parallelism, valid_count). """ stride = output_stride # Total number of windows outputted. window_count = image_size//stride # Number of parallel window outputs. parallelism = pixels_per_clock//stride if parallelism == 0: parallelism = 1 else: assert parallelism*stride == pixels_per_clock, \ "Expected integer throughput (stride evenly dividing px/clk)." # Number of times valid should be asserted. valid_count = window_count//parallelism assert valid_count * parallelism == window_count, \ "Expected window count (img/stride) to be divisible by parallelism " \ "(px/clk / stride)" return window_count, parallelism, valid_count
625897c740c7d0fd39eeb0d4f306963b53170621
304,879