content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def isbn13to10(isbn13: str) -> str: """ Convert ISBN-13 to ISBN-10 (without checking validity of input). """ isbn9 = isbn13[3:12] # for the checksum, isbn9 is zipped with [10, 9, ... 2] weights = range(10, 1, -1) checksum: int = sum(weight * int(digit) for weight, digit in zip(weights, isbn9)) checkdigit: int = 11 - (checksum % 11) checkdigit_str: str = "X" if checkdigit == 10 else str(checkdigit) return isbn9 + checkdigit_str
85c0f305ab35e36ac74463b4ddcfc1fa3ed4bacf
304,960
def hex_to_rgb(value): """Hex value to (r, g, b) triple. """ return [((value >> (8 * k)) & 255) / 255.0 for k in (2, 1, 0)]
43adea9f3ad15ee25843bd810d4ce26a314a7d07
372,252
from typing import List def parse_config_lines(raw: str) -> List[str]: """Returns the lines in a config section.""" return [x for x in raw.split("\n") if x]
960381e362dc7e0743e4526fb772bae62c5ffe0e
594,464
def key( *args ): """ join the arguments in the format of donkey Arguments --------- args: list of string Examples -------- >>>key( 'a', 'b', 'c' ) 'a__b__c' """ return '__'.join( args )
9094dd19fd221073fe9cdac84a3616e71eb6fbcc
649,440
def strike_text(text): """Add a strikethtough effect to text.""" striked = '' for char in text: striked = striked + char + '\u0336' return striked
22a404f58ea07a1d08e0af6f1f450156f34c8f13
519,002
import torch def xywh2YOLO(box: torch.Tensor, stride: float, anchor: tuple): """ Returns the bounding box in a format similar to the last output layer of the YOLO Arguments: box (torch.Tensor): Boudning box tensor in xywh format stride (float): stride of the current layer to decrease the size anchor (list): it is a list of tuples of anchor boxes Outputs: x_coor (int): x coordinate of the detection grid for the given box y_coor (int): y coordinate of the detection grid for the given box x (float): x output of the YOLO layer for the corresponding box y (float): y output of the YOLO layer for the corresponding box w (float): w output of the YOLO layer for the corresponding box h (float): h output of the YOLO layer for the corresponding box """ x = box[..., 0].item()/stride x_coor = int(box[..., 0].item()/stride) y = box[..., 1].item()/stride y_coor = int(box[..., 1].item()/stride) x -= x_coor y -= y_coor w = torch.log(box[..., 2] / anchor[0] + 1e-16).item() h = torch.log(box[..., 3] / anchor[1] + 1e-16).item() return y_coor, x_coor, y, x, w, h
74ad50f5d61b206d53469020a53aafcb5eb29533
246,181
def flipHit(hit): """Returns a new hit where query and subject are flipped""" return [hit[1], # 0. Query id, hit[0], # 1. Subject id, hit[2], # 2. % identity, hit[3], # 3. alignment length, hit[4], # 4. mismatches, hit[5], # 5. gap openings, hit[8], # 6. q. start, hit[9], # 7. q. end, hit[6], # 8. s. start, hit[7], # 9. s. end, hit[10], # 10. e-value, hit[11]]
34543613ae229b2d68734c903656814a0db19a51
237,486
def binary_freq(data, expression, feature_name=str, analyze=True): """Search data for occurrences of a binary feature as a regex. Args: data (pd.Series): a series with text instances. expression (re.compile): a regex or string to search for. feature_name (str, optional): a name for the feature to extract. Defaults to str. Returns: list: a list with a dict mapping feature name to 1 or 0 (true/false) based on occurrence in texts. """ b = data.str.contains(expression).astype(int) # cast bools to 0/1 if analyze == True: bList = [{feature_name: x[1]} for x in b.items()] return bList else: return b
3211b7bfb37aae912816e81f8eb92792afd7a4e1
676,425
def _spectrogram_mp_helper(ooi_hyd_data_obj, win, L, avg_time, overlap, verbose, average_type): """ Helper function for compute_spectrogram_mp """ ooi_hyd_data_obj.compute_spectrogram(win, L, avg_time, overlap, verbose, average_type) return ooi_hyd_data_obj.spectrogram
759fc929df6309f8fd48d668a66c6dfe473dcb46
554,726
def get_and_update_or_create(model, filter_params, defaults): """ get or create with default values applied to existing instances {model}.objects.get_or_create(*{filter_params}, defaults={defaults}) Arguments: model {django.db.models.Model} -- The model the queryset gets applied to filter_params {dict} -- The parameters that the queryset gets filter against defaults {dict} -- The default values that will be applied to the instance (create, update) Returns: django.db.models.Model -- The created/updated instance of the model """ instance, created = model.objects.get_or_create(**filter_params, defaults=defaults) if not created: for attr, value in defaults.items(): setattr(instance, attr, value) instance.save() return instance
3bea14911f6a690606813aa29caa8a463918564a
632,838
def isUnivariate(signal): """ Returns True if `signal` contains univariate samples. Parameters ---------- signal : np.array, shape (NUM_SAMPLES, NUM_DIMS) Returns ------- is_univariate : bool """ if len(signal.shape) == 1: return True if signal.shape[1] > 1: return False return True
d282ed739526e4ecdd17e5ca429e037ee56415e3
165,230
def replace_all(text, dic): """ replaces multiple strings based on a dictionary replace_all(string,dictionary) -> string """ for i, j in dic.items(): text = text.replace(i, str(j)) return text
cbfde8ed2bfc8273ec7f4618a8067fe394b6ae9e
185,501
def remove_stopwords(texts, stop_words): """ Parameters: - `texts` a list of documents - `stop_words` a list of words to be removed from each document in `texts` Returns: a list of documents that does not contain any element of `stop_words` """ return [[word for word in doc if word not in stop_words] for doc in texts]
b70828b328abe1e0e59698307adc8ececeac368e
688,922
def trim_variable_postfixes(scope_str): """Trims any extra numbers added to a tensorflow scope string. Necessary to align variables in graph and checkpoint Args: scope_str: Tensorflow variable scope string. Returns: Scope string with extra numbers trimmed off. """ idx = scope_str.find(':') return scope_str[:idx]
eb4d4dc14d903129a7dcddd2df7419724aed425a
117,090
def removeall(item, seq): """Return a copy of seq (or string) with all occurences of item removed.""" if isinstance(seq, str): return seq.replace(item, '') else: return [x for x in seq if x != item]
96f47b79c9f23fe84e873339d7ee49595006bb0c
306,325
def get_triangulars(num): """ Get the first n triangular numbers. """ return [int(i * (i + 1) / 2) for i in range(1, num + 1)]
35071a218c49609085a6c26c067fcee481b24f3d
530,988
def _to_str(pvs): """Ensure argument is a string or list of strings.""" # The logic follows that from the cothread library. # If is it NOT a string then assume it is an iterable. if isinstance(pvs, str): return str(pvs) else: return [str(pv) for pv in pvs]
dea36c6b7b402190f38a2d89ebc6478b337a28db
447,457
def set_name_line(hole_lines, name): """Define the label of each line of the hole Parameters ---------- hole_lines: list a list of line object of the slot name: str the name to give to the line Returns ------- hole_lines: list List of line object with label """ for ii in range(len(hole_lines)): hole_lines[ii].label = name + "_" + str(ii) return hole_lines
a57667f269dac62d39fa127b2a4bcd438a8a989b
705,895
def capitalize_first_letter(text): """ Given a string, capitalize the first letter. """ chars = list(text.strip()) chars[0] = chars[0].upper() return "".join(chars)
c690a4e8392d7eedbc539c800c69e9addb60d0ef
219,205
def main(spark, file_path, subsampling=1): """ This function splits a dataframe into the train/valid/test set. - train: randomly sample 60% of users and include all of their interactions + 50% of interactions from users in the valid/test set - valid: randomly sample 20% of users and include 50% of their interactions - test : randomly sample 20% of users and include 50% of their interactions Random sampling of users and interactions results in mutually exclusive splits. Parameters ---------- spark : spark session object file_path : string; The path (in HDFS) to the CSV file, e.g., `hdfs:/user/bm106/pub/people_small.csv` subsampling: subsample 1% if True ---------- """ # Load the CSV/parquet file and Set the column name in case it's missing df = spark.read.parquet(file_path) ##### df = df.toDF('user_id', 'book_id', 'is_read', 'rating', 'is_reviewed') # Create a single-column dataframe with distinct user_ids and Randomly split into train/valid/test user groups user_list = df.select("user_id").distinct() # Subsample if true print("start subsampling:", subsampling) user_list = user_list.sample(False, fraction=subsampling, seed=42) user_train, user_valid, user_test = user_list.randomSplit([0.6, 0.2, 0.2], seed = 42) # Create X_train df.createOrReplaceTempView('df') user_train.createOrReplaceTempView('user_train') X_train = spark.sql('SELECT * FROM df WHERE user_id IN (SELECT user_id FROM user_train)') # Create X_valid user_valid.createOrReplaceTempView('user_valid') X_valid = spark.sql('SELECT * FROM df WHERE user_id IN (SELECT user_id FROM user_valid)') X_valid_sampled = X_valid.sampleBy("user_id", fractions={k['user_id']: 0.5 for k in user_valid.rdd.collect()}, seed=42) X_valid_to_train = X_valid.subtract(X_valid_sampled) # This dataframe will be concatenated with X_train # Create X_test user_test.createOrReplaceTempView('user_test') X_test = spark.sql('SELECT * FROM df WHERE user_id IN (SELECT user_id FROM user_test)') X_test_sampled = X_test.sampleBy("user_id", fractions={k['user_id']: 0.5 for k in user_test.rdd.collect()}, seed=42) X_test_to_train = X_test.subtract(X_test_sampled) # Concatenate remaining records of valid/test to X_train X_train = X_train.union(X_valid_to_train).union(X_test_to_train) return X_train, X_valid_sampled, X_test_sampled
d6096392c0ef965f4cd16b37eddf8e6f34e0d572
472,427
from bs4 import BeautifulSoup def getNewData(response): """Extracts hidden input data from a response, returning a data dictionary. Extracts hidden input data from a response body, returning a dictionary of key-value pairs representing data to send to subsequent requests. Args: response: The Python requests response object, typically corresponding to a simple GET request on the main refugee explorer page. Returns: A dictionary with key-value pairs representing the data. """ b = BeautifulSoup(response.text, 'html5lib') inputs = b.findAll('input', type='hidden') newdata = {elem.get('name'): elem.get('value') for elem in inputs} return newdata
5cc2dab183d5aea69d0afeac68879ebaaa8b637a
351,215
def gcs_url_for_backup_directory(backup_bucket_name, fuzzer_name, project_qualified_target_name): """Build GCS URL for corpus backup directory. Returns: A string giving the GCS URL. """ return 'gs://%s/corpus/%s/%s/' % (backup_bucket_name, fuzzer_name, project_qualified_target_name)
85ac9d935515dda426d4579112733e9e8f5f1e09
231,026
def read_parameters_dict_lines_from_file_header( outfile, comments="#", strip_spaces=True ): """Load a list of pretty-printed parameters dictionary lines from a commented file header. Returns a list of lines from a commented file header that match the pretty-printed parameters dictionary format as generated by `BaseSearchClass.get_output_file_header()`. The opening/closing bracket lines (`{`,`}`) are not included. Newline characters at the end of each line are stripped. Parameters ---------- outfile: str Name of a PyFstat-produced output file. comments: str Comment character used to start header lines. strip_spaces: bool Whether to strip leading/trailing spaces. Returns ------- dict_lines: list A list of unparsed pprinted dictionary entries. """ dict_lines = [] with open(outfile, "r") as f_opened: in_dict = False for line in f_opened: if not line.startswith(comments): raise IOError( "Encountered end of {:s}-commented header before finding closing '}}' of parameters dictionary in file '{:s}'.".format( comments, outfile ) ) elif line.startswith(comments + " {"): in_dict = True elif line.startswith(comments + " }"): break elif in_dict: line = line.lstrip(comments).rstrip("\n") if strip_spaces: line = line.strip(" ") dict_lines.append(line) if len(dict_lines) == 0: raise IOError( "Could not parse non-empty parameters dictionary from file '{:s}'.".format( outfile ) ) return dict_lines
6f06fade43b69da083b95b4ddc0d10f168779118
63,732
def resize_quota_delta(context, new_flavor, old_flavor, sense, compare): """Calculate any quota adjustment required at a particular point in the resize cycle. :param context: the request context :param new_flavor: the target instance type :param old_flavor: the original instance type :param sense: the sense of the adjustment, 1 indicates a forward adjustment, whereas -1 indicates a reversal of a prior adjustment :param compare: the direction of the comparison, 1 indicates we're checking for positive deltas, whereas -1 indicates negative deltas """ def _quota_delta(resource): return sense * (new_flavor[resource] - old_flavor[resource]) deltas = {} if compare * _quota_delta('vcpus') > 0: deltas['cores'] = _quota_delta('vcpus') if compare * _quota_delta('memory_mb') > 0: deltas['ram'] = _quota_delta('memory_mb') return deltas
25d65a5e60ab6665674dfb47ca2c573861432fc1
93,419
def align2local(seq): """ Returns list such that 'ATG---CTG-CG' ==> [0,1,2,2,2,3,4,5,5,6,7] Used to go from align -> local space """ i = -1 lookup = [] for c in seq: if c != "-": i += 1 lookup.append(i) return lookup
aa914a60d5db7801a3cf1f40e713e95c98cd647e
3,313
def subset_sum_squeeze(data, subset={}, sum_dims=None, squeeze=False): """ Take an xarray DataArray and apply indexing, summing and squeezing, to prepare it for analysis. Parameters ---------- data : xarray DataArray A Calliope model data variable, either input or output, which has been reformatted to deconcatenate loc_techs (or loc_tech_carriers/loc_carriers) using calliope.Model().get_formatted_array(original_data) subset : dict, default {} key:value pairs for indexing data. Uses xarray `loc[]` to index. sum_dims : str or list of strings, default None Names of dimensions over which to sum the data. squeeze : bool, str, or list of strings, default False If True, remove all dimensions of length 1 If string, try to remove that dimension, if it is of length 1 If list of strings, try to remove all listed dimensions, if they are length 1 Returns ------- data : xarray DataArray Examples -------- (in) data = carrier_prod, dimensions = (locs: 2, techs: 5, carriers: 1, timesteps: 100) subset_sum_squeeze( data, subset={'techs': ['ccgt', 'csp']}, sum_dims='locs', squeeze=True ) (out) data = carrier_prod, dimensions = (techs: 2, timesteps: 100) """ if subset: # first, subset the data allowed_subsets = {k: v for k, v in subset.items() if k in data.dims} allowed_items = {} for k, v in allowed_subsets.items(): if isinstance(v, str): v = [v] allowed_items[k] = [i for i in v if i in data[k].values] data = data.loc[allowed_items] if sum_dims: # second, sum along all necessary dimensions data = data.sum(sum_dims) if squeeze: # finally, squeeze out single length dimensions if len(data.techs) == 1: dims_to_squeeze = [i for i in data.dims if len(data[i]) == 1 and i != 'techs'] data = data.squeeze(dims_to_squeeze) else: data = data.squeeze() return data
182c8ddab3642c5fe9bec8520cbc5badb43b8f4e
664,566
def intersect(list1, list2) -> bool: """Do list1 and list2 intersect""" if len(set(list1).intersection(set(list2))) == 0: return False return True
78ce4dc472e621d5fe950bc0446b05372fda31b3
155,248
def remove_tweet_id(tweet): """ DESCRIPTION: removes the id from a string that contains an id and a tweet e.g "<id>,<tweet>" returns "<tweet>" INPUT: tweet: a python string which contains an id concatinated with a tweet of the following format: "<id>,<tweet>" OUTPUT: only the tweet is returned as a python string """ return tweet.split(',', 1)[-1]
be9d6f8481c735574763888ab4db8d5b0627320f
234,166
def read_lwostring(raw_name): """Parse a zero-padded string.""" i = raw_name.find(b'\0') name_len = i + 1 if name_len % 2 == 1: # Test for oddness. name_len += 1 if i > 0: # Some plugins put non-text strings in the tags chunk. name = raw_name[0:i].decode("utf-8", "ignore") else: name = "" return name, name_len
d22fc8ddf6b1ca5f3e855cc45c5debd0f1ac4e54
493,499
def send_typed_media(resource_path, bot, channel): """Send file as bytes by `resource_path`. Send type based on file extension.""" ext = resource_path.suffix.lower() media_resource = open(resource_path, 'rb') if ext in ('.jpeg', '.jpg', '.png'): return bot.send_photo(chat_id=channel, photo=media_resource) elif ext in ('.mp4', '.mov', '.gif', '.webp'): return bot.send_animation(chat_id=channel, animation=media_resource)
285f49f5078fafef7d14eb1d71c89cac6511a62c
500,249
def get_appliance_software_version( self, ne_pk: str, cached: bool, ) -> list: """Get appliance software version information .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - appliancesSoftwareVersions - GET - /appliancesSoftwareVersions/{nePK}?cached={cached} :param ne_pk: Network Primary Key (nePk) of existing appliance, e.g. ``3.NE`` :type ne_pk: str :param cached: ``True`` retrieves last known value to Orchestrator, ``False`` retrieves values directly from Appliance :type cached: bool :return: Returns list of dictionaries of software version information per partition. \n [`dict`]: Software version info object \n * keyword **partition** (`int`): integer of partition where software is installed * keyword **build_version** (`str`): software version number * keyword **build_time** (`str`): timetstamp of build * keyword **active** (`bool`): ``True`` if this partition is the active software * keyword **next_boot** (`bool`): ``True`` if this partition will be active for next boot * keyword **fallback_boot** (`bool`): ``True`` if this partition will boot if another active partition fails :rtype: list """ return self._get( "/appliancesSoftwareVersions/{}?cached={}".format(ne_pk, cached) )
8d5ef977893d48480d194985cc2e15d51edaac71
535,152
from typing import MutableMapping from typing import Any def walk_the_tree(tree: MutableMapping[str, Any], stem: list[str] = []) -> list[list[str]]: """Return the leaves of the branches.""" leaves = [] for branch, branches in tree.items(): leaf = stem + [branch, ] if isinstance(branches, dict): leaves.extend(walk_the_tree(branches, leaf)) else: leaves.append(leaf) return leaves
76bddf00a649ebd5070c1df87a727210696e8eab
478,137
def enum(*args, **kwargs): """ Create an enumeration having the given values. As the concept of enumeration does not exist in python, this will actually create a new type, like a class that is not instanciated (and has no reason to be). The class will hold a class attribute for each possible value of the enumeration. On the top of this, a special, reserved attribute `_fields` will hold the set of all possible values for the enumeration (by 'value' we actually mean 'fields' here, see example below). *args -- if the actual value for each enumerated value does not matter, a unique integer will be assigned for each one. **kwargs -- if the actual value does matter, you can provide it using keyword arguments Example of use: ``` >>> from tools import utils >>> Positivity = utils.enum('positive', 'negative', 'neutral', unknown=-1) >>> Positivity._fields {'negative', 'neutral', 'positive', 'unknown'} >>> Positivity.positive 0 >>> Positivity.neutral 2 >>> Positivity.unknown -1 ``` """ fields = set(args) | set(kwargs.keys()) enums = dict(zip(args, range(len(args))), _fields=fields, **kwargs) return type(str('Enum'), (), enums)
f06ad6302b326a778acf88d860bb1e43849f8942
547,339
def getEditDistance(str1, str2): """Return the edit distance between two strings. >>> getEditDistance("abc", "abcd") 1 >>> getEditDistance("abc", "aacd") 2 If one of the strings is empty, it will return the length of the other string >>> getEditDistance("abc", "") 3 The order of strings is not important, it will return the same output when strings are swapped. >>> getEditDistance("rice", "raise") 2 >>> getEditDistance("raise", "rice") 2 """ # if one of the strings is empty, the edit distance equals to the length of the other string # as all we need to do is insert all the characters from one string to other if len(str1)==0: return len(str2) if len(str2)==0: return len(str1) # neither one is empty # we will use wagner-fischer algorithm # matrix is one character bigger for each string, because we will start from 0 # matrix[y+1][x+1] will hold the Levenshtein distance between first y chars of str1 # and the first x chars of str2 matrix = [ [i for k in range(len(str2)+1)] for i in range(len(str1)+1)] # we want to start by putting the numbers 0 to length of the string in the first column and row for i in range(len(str2)+1): matrix[0][i]=i # as the difference between any string and an empty string is the length of that string, # we start from 0 (no difference between two empty strings) and go up to its length for i in range(len(str2)): # now that we completed the first row and column of our matrix, # proceed to process the rest for j in range(len(str1)): if str2[i] == str1[j]: matrix[j+1][i+1] = matrix[j][i] # no difference in this character, edit distance will equal to previous else: # this char is different, get the lowest edit distance to acquire the previous string and add one matrix[j+1][i+1] = min(matrix[j][i+1]+1,matrix[j+1][i]+1,matrix[j][i]+1) # as stated earlier, matrix[y+1][x+1] will hold the Levenshtein distance between first y chars of str1 # and the first x chars of str2. So the latest cell will hold the final edit distance return matrix[-1][-1]
9e03ba29f26017990e131ea6485bf3885975c28d
82,431
def is_prop(value): """Check whether a field is a property of an object""" return isinstance(value, property)
9cdf553e229f72c5f1cbe587646669000665d676
297,691
def string_list(argument): """ Converts a space- or comma-separated list of values into a python list of strings. (Directive option conversion function) Based in positive_int_list of docutils.parsers.rst.directives """ if ',' in argument: entries = argument.split(',') else: entries = argument.split() return entries
ca6e7eba4e750f220915a61ca9ee0e25abb1ce03
324,066
def overlaps(when, spans): """ Checks an overlap of a datetime with list of datetime spans. """ for start, stop in spans: if start <= when <= stop: return True return False
58a5b09e093224ae1d67257f00986c43a3c1f63c
74,412
from typing import Tuple def _pos_from_offset(col: int, msg: bytes, offset: int) -> Tuple[int, int]: """Calculate the line and column of a given offset.""" msg = msg[:offset] lines = msg.split(b"\n") line = len(lines) - 1 col = len(lines[-1]) + (col if line == 0 else 0) return (line, col)
01c9f35e94443b028308d3129e774b777d393723
498,879
def factorialTrailingZeros(n): """ Function to count the number of trailing 0s in a factorial number. Parameters: n (int); the number for which the factorial and trailing 0s are to be calculated. Returns: trailingZeros (int); the number of 0s in the calculated factorial number. """ try: if not(isinstance(n,int)) or (n<0): #If n is not a positive int raise TypeError ans = 1 trailingZeros = 0 # Calculating the factorial of 'n' while n >= 1: # Loop stops when n becomes 0 ans *= n n -= 1 # Calculating the number of 0s in 'ans' while float(ans % 10) == 0: # Loop stops when 'ans' is not divisible by 10, in other words it no longer has 0s in it. trailingZeros += 1 ans = ans // 10 return trailingZeros except: print("Error: Invalid input. Please try again with a positive integer only.") return "Failed"
86a17c160ff8d14a934fbd77bfa8f925fc1a1124
112,880
def install_conda_target(conda_target, conda_context, skip_environment=False): """ Install specified target into a its own environment. Return the process exit code (i.e. 0 in case of success). """ if not skip_environment: create_args = [ "--name", conda_target.install_environment, # environment for package conda_target.package_specifier, ] return conda_context.exec_create(create_args) else: return conda_context.exec_install([conda_target.package_specifier])
3daf3dc55fab83c4da9b749bd2dcce537b091b12
120,854
import asyncio import functools def aio_run(func): """ Decorate an async function to run as a normal blocking function. The async function will be executed in the currently running event loop (or automatically create one if none exists). Example: .. code-block:: async def coroutine(timeout): await asyncio.sleep(timeout) return True @aio_run async def runner(*args, **kwargs): return await coroutine(*args, **kwargs) # Call coroutine in a blocking manner result = runner(timeout=1.0) print(result) """ def _wrapper(*args, **kwargs): loop = asyncio.get_event_loop() return loop.run_until_complete(func(*args, **kwargs)) return functools.update_wrapper(_wrapper, func)
768df2de0ef06f5e87560a61689710a84d397214
372,533
from pathlib import Path def clean_path(path): """ Cleans the path to allow it to be used to search with glob. It does a few different things: - Replaces ~ with the users home directory. - Makes sure the path ends with a / param: path: The path to clean. returns: The cleaned path. """ path = path.replace("~", str(Path.home())) if path[-1] != "/": path += "/" return path
5ac069f9791f1a953926a553e4690ca1c6f01339
136,828
def rename_coords_to_lon_and_lat(ds): """ Rename Dataset spatial coord names to: lat, lon """ if 'latitude' in ds.coords: ds = ds.rename({'latitude': 'lat'}) if 'longitude' in ds.coords: ds = ds.rename({'longitude': 'lon'}) elif 'long' in ds.coords: ds = ds.rename({'long': 'lon'}) if 'z' in ds.coords: ds = ds.drop('z').squeeze() return ds
8ba286e441f2a32a96fbbddc5c1112a6ed890f84
16,652
import re from typing import Counter def _get_table_width(table_spec): """Calculate the width of a table based on its spec. :param table_spec: :type table_spec: str :return: :rtype: int """ column_letters = ['l', 'c', 'r', 'p', 'm', 'b'] # Remove things like {\bfseries} cleaner_spec = re.sub(r'{[^}]*}', '', table_spec) spec_counter = Counter(cleaner_spec) return sum(spec_counter[l] for l in column_letters)
97764d26434fbbcd1564538fe60d789059631a7a
517,986
import pickle def serialize(x): """Return a pickled object.""" return pickle.dumps(x)
c3debbc8df9b457879a784344ab7885b95b9ecd3
628,259
def format_number_latex(number: float, sig_figures: int = 3) -> str: """ Formats the number in latex format and round it to defined significant figures. If the result is in the exponential format, it will be formatted as ``[number] \\times 10^{[exponent]}``. Parameters ---------- number : Number to format. sig_figures: Number of significant figures. Optional. Default 3. """ formatted_num = f'{number:#.{sig_figures}g}' if 'e' in formatted_num: num_str, exponent = formatted_num.split('e') return f'{num_str} \\times 10^{{{int(exponent)}}}' return formatted_num
4891a78b10640022bb732c2e94c1a2e0ae72232f
543,545
def addresses(intcodes, ip): """ Return the three address (a, b, c) at positions 1,2,3 past ip. """ return intcodes[ip+1 : ip+4]
585f789ba5d095af20d87374c7f2ed8d1b2a3770
515,159
def color_normalization(image, mean, stddev): """ Perform color normalization on the image with the given mean and stddev. Args: image (ndarray): image to perform color normalization. mean (ndarray): mean value to subtract. dtype is 'float' stddev (ndarray): stddev to devide. """ # Input image should in format of CHW assert len(mean) == image.shape[0], "channel mean not computed properly" assert len(stddev) == image.shape[0], "channel stddev not computed properly" return (image - mean) / stddev
4bcd8b89560138d709a727bd5020aef275ece119
452,088
import re def remove_slash(value): """ Removes slash from beginning and end of a string """ assert isinstance(value, str) return re.sub('(^\/|\/$)', '', value)
1227d83c37acde1c71d24c9ac12056af6f501004
384,878
from typing import OrderedDict def dict_to_table(dct, titles=('col1', 'col2'), margin=3, sort=True): """ Formats a dict where key:val is str:str into a two column table. """ if sort: dct = OrderedDict({key: dct[key] for key in sorted(dct.keys())}) col1width = max([len(str(s)) for s in dct.keys()] + [len(titles[0])]) col2width = max([len(str(s)) for s in dct.values()] + [len(titles[1])]) width = col1width + col2width + margin baseline = ('%%-%ds' % col1width) + ' ' * margin + ('%%-%ds' % col2width) output = '\n' + baseline % titles + '\n' + '-' * width for key, val in dct.items(): output += '\n' + (baseline % (key, val)) return output
4955301143c7247d82f6ae50e30b9114af798ff6
308,753
def is_all_nan(tensor): """ Checks if all entries of a tensor is nan. """ return (tensor != tensor).all()
c181bee59308a85c8b6797c8e8319b7dc4f2e0fa
545,361
def only_dna(seq: str) -> str: """Return ``str`` with all ACGTN characters from ``seq``.""" return "".join(filter(lambda x: x in "ACGTNacgtn", seq))
b2c067bc9ace2a84fedc3e694a1fc54b076a3906
540,289
def stray(arr): """ You are given an odd-length array of integers, in which all of them are the same, except for one single number. :param arr: an array of integers. :return: the single different number in the array. """ a, b = set(arr) return a if arr.count(a) == 1 else b
17e99c2d22baceb89c16c01a138896918ab3a9e6
664,833
import torch def count_duplicates(cover_index: torch.LongTensor, normalize=False): """Count the number of node repetitions in the cover sets. Args: cover_index (torch.LongTensor): Cover assignment matrix, in sparse coordinate form. normalize (bool, optional): Normalize the results with respect to the total nodes in the graph. Defaults to `False`. Returns: int or float: Node repetitions. """ num_nodes = cover_index[0].max().item() + 1 duplicates = cover_index.size(1) - num_nodes if normalize: duplicates /= num_nodes return duplicates
bb53f6d5ae2af7cce980bacd95277837aac65953
588,774
def t03_SharingIsPassByReference(C, pks, crypto, server): """Verifies that updates to a file are sent to all other users who have that file.""" alice = C("alice") bob = C("bob") alice.upload("k", "v") m = alice.share("bob", "k") bob.receive_share("alice", "k", m) score = bob.download("k") == "v" bob.upload("k", "q") score += alice.download("k") == "q" return score / 2.0
ffdcde1c6dd9fcb6053715789287efabbe7ed6f1
678,163
def _get_first_msgtype(elem): """Returns type identifier for this transaction""" return elem["tx"]["value"]["msg"][0]["type"]
80ebbfa07609ef673dabd16f8881d1e9562b4eb0
519,319
from datetime import datetime def parse_date(date_string, format='%Y-%m-%d'): """ parse a date string :param date_string: a string representing a date :param format: format of the string, default to '%Y-%m-%d' :return: a date object """ return datetime.strptime(date_string, format).date()
860e327bbeefd703708900d703016d8981e5c514
485,186
from typing import List def filter_out_trivial_gotos(disasm: List[str]) -> List[str]: """Filter out gotos to the next opcode (they are no-ops).""" res = [] # type: List[str] for i, s in enumerate(disasm): if s.startswith(' goto '): label = s.split()[1] if i + 1 < len(disasm) and disasm[i+1].startswith('%s:' % label): # Omit goto continue res.append(s) return res
81bba9e6abd6f5e58ae214be46774f875996e3b5
262,684
from typing import Sequence def pool_keep_low(pool: Sequence[int], keep: int) -> tuple[int, ...]: """Keep a number of the lowest dice.""" pool = list(pool) remove = len(pool) - keep for _ in range(remove): high_value = min(pool) high_index = 0 for i, n in enumerate(pool): if n > high_value: high_value = n high_index = i pool.pop(high_index) return tuple(pool)
b8c5877767c246191c972756f151b3ce41e45b3b
197,205
def curtail_string(s, length=20): """Trim a string nicely to length.""" if len(s) > length: return s[:length] + "..." else: return s
48b6624983b810517651d89185761210ba95ad27
669,629
def source_link_type(url): """ Get an URL and will return the URL type for display the true text in the wiki view. :param url: Ruleset source's URL :type url: str :return: A string of URL type (github, patreon or unknown) """ if ("github.com" or "www.github.com") in url: result = "github" elif ("patreon.com" or "www.patreon.com") in url: result = "patreon" else: result = "unknown" return result
65f2984e612887b75635885bf674d6c2f5360fb9
358,017
def TypeCodeToType(typeCode): """ Convert a type code to the class it represents """ if typeCode in ["b", "d", "f", "s"]: return float elif typeCode in ["i", "l"]: return int elif typeCode in ["c"]: return str else: raise Exception("Unrecognised type code: " + typeCode) return
7ac115f94958842c47168dc6ff9e432caac48404
636,488
def nullprep(*args, **kwrags): """NULL preperation function which always returns ``None``""" return None
a1614c5ad0c79a59ff6850007128aa31ed7c242b
163,575
def process_txt(label_path: str): """Read label & split shapes appropriately. Arguments: label_path {str} -- File path to label. Returns: tuple -- Splitted shapes: square, circle, triangle. """ # Read, strip white space & split label. with open(label_path, mode='r') as f: labels = str.strip(f.read()).split(', ') # Square, Circle, Triangle. shapes = map(lambda l: int(l.split(':')[1]), labels) return tuple(shapes)
ece587ce705a023dc7c31876fca598bf5f68a2f2
399,915
from typing import List from typing import Any from typing import Tuple def _longest_repeat_with_size(tokens: List[Any], start: int, span_len: int) -> Tuple[int, int]: """Get longest repeat start at some id with certain repeat size. For example, _longest_repeat_with_size([2,2, 3, 3, 4, 1, 5, 5], 0, 2) returns (0, 4) Args: tokens: list of tokens. start: search start index. span_len: length of repeating tokens. Returns: Span of longest repeating subsequence starting at start, with repeat size of span_len. """ j = start + span_len while j < len(tokens): if tokens[j] != tokens[start + (j - start) % span_len]: break j += 1 return (j - start) // span_len, j
2d9b3322a274788383d515702b201ce9864f50e4
496,166
def get_user_list(cursor): """ Get the list of the users and their roles/permissions Parameters: ----------- cursor: Psycopg2 cursor cursor in the connection to the postgres database Returns: -------- res: List List of dictionaries with the following elements: uid: Int identifier of the user in the database username: Str name of the user roles: List(Str) roles/permissions of the user (admin, edit, user) """ SQL = "SELECT id as uid, username,apiuser AS user,edit_auth AS edit, admin FROM users" cursor.execute(SQL) res=cursor.fetchall() for i in res: i['roles']=[] if i.pop('user'): i['roles']+=['user'] if i.pop('admin'): i['roles']+=['admin'] if i.pop('edit'): i['roles']+=['edit'] return res
168e08b68c2cdcc5601d31613a370f228026612b
462,149
import torch def loss_uGLAD(theta, S): """The objective function of the graphical lasso which is the loss function for the unsupervised learning of glad loss-glasso = 1/M(-log|theta| + <S, theta>) NOTE: We fix the batch size B=1 for `uGLAD` Args: theta (tensor 3D): precision matrix BxDxD S (tensor 3D): covariance matrix BxDxD (dim=D) Returns: loss (tensor 1D): the loss value of the obj function """ B, D, _ = S.shape t1 = -1*torch.logdet(theta) # Batch Matrix multiplication: torch.bmm t21 = torch.einsum("bij, bjk -> bik", S, theta) # getting the trace (batch mode) t2 = torch.einsum('jii->j', t21) # print(t1, torch.det(theta), t2) # regularization term # tr = 1e-02 * torch.sum(torch.abs(theta)) glasso_loss = torch.sum(t1+t2)/B # sum over the batch return glasso_loss
a5af2f2c189cde499f6516ca3585fd97ab0269d6
268,352
def get_images_and_fieldstrength_indices(data, source_field_strength, target_field_strength): """ extract images and indices of source/target images for the training and validation set gives back indices instead of subsets to use hdf5 datasets instead of ndarrays (to save memory) :param data: hdf5 dataset of ADNI data :param source_field_strength: value of the magnetic field strength [T] in the source domain :param target_field_strength: value of the magnetic field strength [T] in the target domain :return:images_train: training images hdf5 dataset contains ndarray with shape [number_of_images, x, y, z] source_images_train_ind: indices of the images from the source domain in images_train target_images_train_ind: indices of the images from the target domain in images_train analogous for validation set """ images_train = data['images_train'] images_val = data['images_val'] source_images_train_ind = [] target_images_train_ind = [] source_images_val_ind = [] target_images_val_ind = [] for train_ind, _ in enumerate(images_train): field_str = data['field_strength_train'][train_ind] if field_str == source_field_strength: source_images_train_ind.append(train_ind) elif field_str == target_field_strength: target_images_train_ind.append(train_ind) for val_ind, _ in enumerate(images_val): field_str = data['field_strength_val'][val_ind] if field_str == source_field_strength: source_images_val_ind.append(val_ind) elif field_str == target_field_strength: target_images_val_ind.append(val_ind) return images_train, source_images_train_ind, target_images_train_ind, \ images_val, source_images_val_ind, target_images_val_ind
8cf83138dbea033bc5515722f3db90940badf5ca
287,970
def drop_columns(cols): """Drop columns in a DataFrame.""" def dropper(data): return data.drop(columns=cols) return dropper
e93b1e45b9eda800aa812a41a02c78f459d65172
645,179
def get_list_as_str(list_of_objs): """ Returns the list as a string. """ return '[' + ' '.join([str(x) for x in list_of_objs]) + ']'
5b747f727d87db2ea4edd3b6aeedd27b25a7b49e
72,808
from typing import List from typing import Dict def enrich_asset_properties(properties: List, properties_to_enrich_dict: Dict) -> Dict: """ Receives list of properties of an asset, and properties to enrich, and returns a dict containing the enrichment Args: properties (List): List of properties of an asset. properties_to_enrich_dict (Dict): Properties to be enriched. Returns: (List[Dict]) List of new assets with enrichment. """ return { properties_to_enrich_dict.get(prop.get('name')): { 'Value': prop.get('value'), 'LastUser': prop.get('last_reported_by') } for prop in properties if prop.get('name') in properties_to_enrich_dict }
5aba396d96ad1b14f9099a4c023ce1acac67a4a7
112,004
from typing import Tuple def relative(current_module: str, reference: str) -> Tuple[str, str]: """Find relative module path.""" current_module_path = current_module.split('.') if current_module else [] *reference_path, name = reference.split('.') if current_module_path == reference_path: return '', '' i = 0 for x, y in zip(current_module_path, reference_path): if x != y: break i += 1 left = '.' * (len(current_module_path) - i) right = '.'.join(reference_path[i:]) if not left: left = '.' if not right: right = name elif '.' in right: extra, right = right.rsplit('.', 1) left += extra return left, right
92d63137a94a936b66a4293adcc656cd4cbad1b7
199,253
from datetime import datetime def pretty_date(time): """ Get a datetime object or a int() Epoch timestamp and return a pretty string like 'an hour ago', 'Yesterday', '3 months ago', 'just now', etc Based on https://stackoverflow.com/a/1551394/713980 Adapted by sven1103 """ now = datetime.now() if isinstance(time, datetime): diff = now - time else: diff = now - datetime.fromtimestamp(time) second_diff = diff.seconds day_diff = diff.days pretty_msg = { 0: [(float('inf'), 1, 'from the future')], 1: [ (10, 1, "just now"), (60, 1, "{sec} seconds ago"), (120, 1, "a minute ago"), (3600, 60, "{sec} minutes ago"), (7200, 1, "an hour ago"), (86400, 3600, "{sec} hours ago") ], 2: [(float('inf'), 1, 'yesterday')], 7: [(float('inf'), 1, '{days} days ago')], 31: [(float('inf'), 7, '{days} weeks ago')], 365: [(float('inf'), 30, '{days} months ago')], float('inf'): [(float('inf'), 365, '{days} years ago')] } for days, seconds in pretty_msg.items(): if day_diff < days: for sec in seconds: if second_diff < sec[0]: return sec[2].format(days=round(day_diff/sec[1], 1), sec=round(second_diff/sec[1], 1)) return '... time is relative anyway'
5b56f1bb911cac34f245258d84adbc8d927b9a49
617,989
def most_frequent(series): """Get most frequent value""" counts = series.value_counts() if not counts.empty: return counts.index[0]
aa209394cdadda042206a15f7f26c3dd94960edf
248,931
import codecs def get_constant_string(bv, addr): """ Returns the full string in memory :param bv: the BinaryView: :param addr: Address where the string is: :return string: """ str_len = 0 curr = codecs.encode(bv.read(addr, 1), "hex").decode() while (curr != "2e") and (curr != "00"): str_len += 1 curr = codecs.encode(bv.read(addr + str_len, 1), "hex").decode() return bv.read(addr, str_len).decode()
d1fe728ad7435cd74d8d035082b629405e5b4b2a
292,804
from typing import OrderedDict import json def dump_json(obj, separators=(', ', ': '), sort_keys=False): """Dump object into a JSON string.""" if sort_keys is None: sort_keys = not isinstance(obj, OrderedDict) # Let it sort itself. return json.dumps(obj, separators=separators, sort_keys=sort_keys)
9c800ceee12cbe5b3cf4eda9530b018ecdf22dc8
81,806
def calc_invest_cost_deg(length, nb_con, nb_sub, share_lhn=0): """ Calculate investment cost into multiple deg networks Parameters ---------- length : float Total length of deg cables nb_con : int Number of buildings connected to deg (defines number of smart meters) nb_sub : int Number of sub-degs (defines number of controllers (one per deg)) share_lhn : float, optional Share of cables, which are installed parallel with local heating network (LHN). (default: 0) E.g. share_lhn = 0.3 means, that 30 % of deg cables are installed together with lhn pipes. Thus installation cost is saved. Returns ------- invest_deg : float Investment cost into decentralized electrical grid (deg) system in Euro (including cables, controllers, meters) Annotations ----------- Cable cost based on: G. Kerber, Aufnahmefähigkeit von Niederspannungsverteilnetzen für die Einspeisung aus Photovoltaikkleinanlagen: Dissertation, 2011. Smart meter cost based on: Ernst & Young, Kosten-Nutzen-Analyse für einen flächendeckenden Einsatz intelligenter Zähler, 2013. Micro controller cost (per deg) based on: E.D. Mehleri, H. Sarimveis, N.C. Markatos, L.G. Papageorgiou, A mathematical programming approach for optimal design of distributed energy systems at the neighbourhood level, Energy 44 (1) (2012) 96–104. """ assert length > 0, 'Length should be larger than zero' assert nb_con >= 2, '' # Cable cost cable_cost = share_lhn * 26 * length + \ (1 - share_lhn) * (26 + 35) * length # Meter cost meter_cost = nb_con * 500 # Controller cost con_cost = nb_sub * 1500 return cable_cost + meter_cost + con_cost
8e168c1f024c0c31613f41d457cbf978729bdb38
278,972
def _valid_char_in_line(char: bytes, line: bytes) -> bool: """Return True if a char appears in the line and is not commented.""" comment_index = line.find(b"#") char_index = line.find(char) valid_char_in_line = char_index >= 0 and ( comment_index > char_index or comment_index < 0 ) return valid_char_in_line
fc9c371171d19b012b1d157b85274a9dd3c6cc13
114,949
from datetime import datetime def get_run_name(config): """Returns timestamped name for this run based on config file. Args: config (dict): Overall config dict. Returns: str: Timestamped name for this run. """ dataset_name = config["dataset"]["name"] model_name = config["model"]["name"] metric_names = "-".join(list(config["metrics"].keys())) if config.get("unique"): timestamp = datetime.now().strftime("%y-%m-%d-%H-%M-%S") return "_".join([dataset_name, model_name, metric_names, timestamp]) return "_".join([dataset_name, model_name, metric_names])
32cd1f64f86360d748586f52b8720634a668427d
137,212
def unique_id(ID, IDList): """ Assigns a unique ID to each spectral target. A spectral target may appear in multiple files so unique_id assigns IDs by appending _<New number> to the spectral target ID. Parameters ---------- ID : String Spectral target ID. IDList : Dictionary Keys are original IDs and the values are the numbers that were last used for ID generation. returns ------- ID : String Unique ID. IDList : Dictionary Updated IDList. """ keys = IDList.keys() if ID not in keys: IDList[ID] = 0 return ID, IDList IDList[ID] += 1 ID = ID+"_%s" % (IDList[ID]) return ID, IDList
1b0b4097eeb493414b33aa55e4409d63f90af3eb
606,883
def get_builder_image_url(benchmark, fuzzer, docker_registry): """Get the URL of the docker builder image for fuzzing the benchmark with fuzzer.""" return '{docker_registry}/builders/{fuzzer}/{benchmark}'.format( docker_registry=docker_registry, fuzzer=fuzzer, benchmark=benchmark)
ba427129c65e12c00221c9b6849f0daa68343f3b
218,858
def constructLimitsOffsets(limit, offset): """ Create a list of limit and offset pairs for partial fetching of maximum 100 apps. Arguments: limit -- the number of apps to fetch offset -- the offset from where to start fetching Returns: A list of limit,offset pairs where limit is no larger than 100 """ limitOffsets = [] while limit > 100: limitOffsets.append(('100', str(offset) if offset > 0 else None)) offset += 100 limit -= 100 limitOffsets.append((str(limit), str(offset) if offset > 0 else None)) return limitOffsets
8c8417d91e9eed9cca9b396783619f8f113e831b
268,667
def stringify_keys(d): # taken from https://stackoverflow.com/a/51051641 """Convert a dict's keys to strings if they are not.""" keys = list(d.keys()) for key in keys: # check inner dict if isinstance(d[key], dict): value = stringify_keys(d[key]) else: value = d[key] # convert nonstring to string if needed if not isinstance(key, str): try: d[str(key)] = value except Exception: try: d[repr(key)] = value except Exception: raise # delete old key d.pop(key, None) return d
5e235823af70107eb96ddde91f7175442b32efb3
673,997
import math def pure_replication(organism, population_dict, world, position_hash_table=None): """ Replace organism with two organism with similar parameters. Essentially, only differences in parameters are organism id, ancestry, age, and water / food levels. """ new_organism_list = [] # Generate new organisms for i in range(2): child = organism.get_child() if organism.drinking_type is not None: child.update_parameter('water_current', math.floor(organism.water_current / 2), in_place=True) if organism.eating_type is not None: child.update_parameter('food_current', math.floor(organism.food_current / 2), in_place=True) new_organism_list.append(child) new_organism_list.append(organism.die('replication')) return new_organism_list
5bdaa505324e55cebd907231bf189de947cc3418
464,837
import random def extract_words(text, word_count): """ Extract a list of words from a text in sequential order. :param text: source text, tokenized :param word_count: number of words to return :return: list list of words """ text_length = len(text) if word_count > text_length: raise RuntimeError('Cannot extract {} words from a text of {} words.'.format(word_count, text_length)) # Determine start index max_range = text_length - word_count start_range = random.randrange(max_range) return text[start_range:start_range + word_count]
f84f8b4148380d6c6e29dc0742e42481dda2d11a
700,042
def size_of(rect): """Return size of list|tuple `rect` (top, left, bottom, right) as tuple (width, height)""" return (rect[3] - rect[1], rect[2] - rect[0])
07f50d974e74efca3b7985822fe3b3c84cdc2538
679,177
def create_evasion_ground_truth(user_data, evasive_spams): """Assign label 1 to evasive spams and 0 to all existing reviews; Assign labels to accounts accordingly Args: user_data: key = user_id, value = list of review tuples. user_data can contain only a subset of reviews (for example, if some of the reviews are used for training) evasive_spams: key = product_id, value = list of review tuples Return: user_ground_truth: key = user id (not prefixed), value = 0 (non-spam) /1 (spam) review_ground_truth: review id (not prefixed), value = 0 (non-spam) /1 (spam) """ old_spammers = set() old_spams = set() user_ground_truth = {} review_ground_truth = {} # assign label 0 to all existing reviews and users for user_id, reviews in user_data.items(): user_ground_truth[user_id] = 0 for r in reviews: prod_id = r[0] label = r[2] review_ground_truth[(user_id, prod_id)] = 0 if label == -1: old_spams.add((user_id, prod_id)) old_spammers.add(user_id) # exclude previous spams and spammers, since the controlled accounts are selcted from the normal accounts. for r_id in old_spams: review_ground_truth.pop(r_id) for u_id in old_spammers: user_ground_truth.pop(u_id) # add label 1 to the evasive spams for prod_id, spams in evasive_spams.items(): for r in spams: user_id = r[0] review_ground_truth[(user_id, prod_id)] = 1 # this user now has posted at least one spam, so set its label to 1 user_ground_truth[user_id] = 1 return user_ground_truth, review_ground_truth
15802d7b773ec4ed4445203932af9dbb6761559c
458,894
def find_item(item_to_find, items_list): """ Returns True if an item is found in the item list. :param item_to_find: item to be found :param items_list: list of items to search in :return boolean """ is_found = False for item in items_list: if item[1] == item_to_find[1]: is_found = True return is_found
e3eb3d81eff44c6daae201222751d596d11c35ad
451,928
from pathlib import Path def genome_fasta_dir(data_dir: Path) -> Path: """Genome fasta direcotry""" return data_dir / "genome_fasta"
5ae94b3123e728d8e19ac132896fd1aa256c0d5e
90,892
def process_message_buffer(curr_message_buffer): """ Description =========== Helper function to process the communication between the master _hole_finder_multi_process and _hole_finder_worker processes. Since communication over a socket is only guaranteed to be in order, we have to process an arbitrary number of bytes depending on the message format. The message format is as such: the first byte gives the number of 8-byte fields in the message. So a first byte of 3 means the message on the head of the buffer should be 1 + 3*8 = 25 bytes long. Right now there are 3 types of message: type 0 - "status" messages from the workers, where field 0's value should be 2 for 2 fields, field 1 is the number of results the worker has written, and field 2 (currently unused) was the number of new holes the worker has found type 1 - "worker finished" message type 2 - "sync acknowledge" message Parameters ========== curr_message_buffer : bytes the current string of bytes to process Returns ======= messages : list list of parsed messages curr_message_buffer : bytes any remaining data in the current message buffer which was not yet able to be parsed, likely due to a socket read ending not perfectly on a message border """ messages = [] if len(curr_message_buffer) > 0: messages_remaining_in_buffer = True else: messages_remaining_in_buffer = False while messages_remaining_in_buffer: #https://stackoverflow.com/questions/28249597/why-do-i-get-an-int-when-i-index-bytes #implicitly converts the 0th byte to an integer msg_fields = curr_message_buffer[0] msg_len = 1 + 8*msg_fields if len(curr_message_buffer) >= msg_len: curr_msg = curr_message_buffer[1:msg_len] messages.append(curr_msg) curr_message_buffer = curr_message_buffer[msg_len:] if len(curr_message_buffer) > 0: messages_remaining_in_buffer = True else: messages_remaining_in_buffer = False else: messages_remaining_in_buffer = False return messages, curr_message_buffer
e0793a6a70acd22070398b190a68e8718414d2ca
486,185
def createFunctionArgs(args): """ Converts args to a tuple we can pass to a function using the *args method. """ if args is None: return tuple() if isinstance(args, str): return (args,) # then it must be some kind of list, return as (a,b, ...) return tuple(args)
bb4653854d917bdec28425edb6083d29c2242a06
122,093
def readline_comment(file, symbol='#'): """Reads line from a file object, but ignores everything after the comment symbol (by default '#')""" line = file.readline() if not line: return '' result = line.partition(symbol)[0] return result if result else readline_comment(file)
bd964dfb2c9bc877e9c8ed3c9f280131468a7a10
675,189
def _get_iso_name(node, label): """Returns the ISO file name for a given node. :param node: the node for which ISO file name is to be provided. :param label: a string used as a base name for the ISO file. """ return "%s-%s.iso" % (label, node.uuid)
3d73236bfa2b8fab8af39b9a3083b540e93eb30d
24,510
def _str_strip(string): """Provide a generic strip method to pass as a callback.""" return string.strip()
24c1a4d8b9f3046a3729e1f65b6514c3cebf280f
216,566
def get_dictionary_from_flags(params, input_flags): """Generate dictionary from non-null flags. Args: params: Python dictionary of model parameters. input_flags: All the flags with non-null value of overridden model parameters. Returns: Python dict of overriding model parameters. """ if not isinstance(params, dict): raise ValueError('The base parameter set must be a dict. ' 'Was: {}'.format(type(params))) flag_dict = {} for k, v in params.items(): if isinstance(v, dict): d = get_dictionary_from_flags(v, input_flags) flag_dict[k] = d else: try: flag_value = input_flags.get_flag_value(k, None) if flag_value is not None: flag_dict[k] = flag_value except AttributeError: flag_dict[k] = v return flag_dict
ad19134912b34dc2a5c3b38d01df5ee159cc6272
340,661
def item_sum(seq, name): """Return the sum of an iterable by attribute or key""" if seq and isinstance(seq[0], dict): return sum(i[name] for i in seq) return sum(getattr(i, name) for i in seq)
420142f365721801fc39941bce5c1a5ecd8c2ae7
224,952
def compute_profile_updates(local_profiles, remote_profiles): """ Compare a local set of profiles with a remote set. Return a list of profiles to add, and a list of profiles that have been updated. """ # Note: no profile will ever be removed, I guess we don't care new = list() updated = list() for remote_profile in remote_profiles.values(): if remote_profile.name in local_profiles: local_profile = local_profiles.get(remote_profile.name) if local_profile != remote_profile: updated.append(remote_profile) else: new.append(remote_profile) return new, updated
f546f38156e845b0e589f0fcf2eab22ebc1fa697
246,330
import math def bounding_hues_from_renotation(hue, code): """ Returns for a given hue the two bounding hues from *Munsell Renotation System* data. Parameters ---------- hue : numeric *Munsell* *Colorlab* specification hue. code : numeric *Munsell* *Colorlab* specification code. Returns ------- tuple Bounding hues. References ---------- .. [11] **The Munsell and Kubelka-Munk Toolbox**: *MunsellAndKubelkaMunkToolboxApr2014*: *MunsellSystemRoutines/BoundingRenotationHues.m* Examples -------- >>> bounding_hues_from_renotation(3.2, 4) ((2.5, 4), (5.0, 4)) """ if hue % 2.5 == 0: if hue == 0: hue_cw = 10 code_cw = (code + 1) % 10 else: hue_cw = hue code_cw = code hue_ccw = hue_cw code_ccw = code_cw else: hue_cw = 2.5 * math.floor(hue / 2.5) hue_ccw = (hue_cw + 2.5) % 10 if hue_ccw == 0: hue_ccw = 10 code_ccw = code if hue_cw == 0: hue_cw = 10 code_cw = (code + 1) % 10 if code_cw == 0: code_cw = 10 else: code_cw = code code_ccw = code return (hue_cw, code_cw), (hue_ccw, code_ccw)
72d247c9418b1c6e51ec56e6e10f79203631ae78
675,505
from typing import Union from pathlib import Path def is_binary_file(filename: Union[str, Path]) -> bool: """ Check if file is a binary file. Args: filename (`str` or `Path`): The filename to check. Returns: `bool`: `True` if the file passed is a binary file, `False` otherwise. """ try: with open(filename, "rb") as f: content = f.read(10 * (1024**2)) # Read a maximum of 10MB # Code sample taken from the following stack overflow thread # https://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python/7392391#7392391 text_chars = bytearray( {7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7F} ) return bool(content.translate(None, text_chars)) except UnicodeDecodeError: return True
5292802082f0b93096ea3e044305c0219a7e1a01
582,791
from datetime import datetime import pytz def utc_from_timestamp(timestamp: float) -> datetime: """Return a UTC time from a timestamp.""" return pytz.utc.localize(datetime.utcfromtimestamp(timestamp))
09d81910f23fa9d7a081d5e39857c5160c743dd2
39,375