Dataset Viewer
Auto-converted to Parquet
content
stringlengths
42
6.51k
def pysiphash(uint64): """Convert SipHash24 output to Py_hash_t """ assert 0 <= uint64 < 1 << 64 if uint64 > (1 << 63) - 1: int64 = uint64 - (1 << 64) else: int64 = uint64 uint32 = (uint64 ^ uint64 >> 32) & 4294967295 if uint32 > (1 << 31) - 1: int32 = uint32 - (1 << 32) else: int32 = uint32 return int32, int64
def is_public_function(function_name): """ Determine whether the Vim script function with the given name is a public function which should be included in the generated documentation (for example script-local functions are not included in the generated documentation). """ is_global_function = ':' not in function_name and function_name[0].isupper() is_autoload_function = '#' in function_name and not function_name[0].isupper() return is_global_function or is_autoload_function
def gaussian_sum(number: int) -> int: """ Gets the sum of all numbers up to the provided number. E.g. gaussian_sum(5) == sum([1, 2, 3, 4, 5]) :param number: :return: """ return number * (1 + number) // 2
def _flatten_results_with_err(results_with_err): """flatten results with error Args: results_with_err ([(Error, result)]): results with error Returns: (Error, [result]): error, results """ err_msg_list = [] results = [] for idx, (each_err, each_result) in enumerate(results_with_err): if each_err: err_msg_list.append('(%s/%s) e: %s' % (idx, len(results_with_err), each_err)) results.append(each_result) err = None if not err_msg_list else Exception(','.join(err_msg_list)) if err: return err, results return None, results
def timestr_to_int(time_str): """ Parse the test time set in the yaml configuration file and convert it to int type """ # time_int = 0 if isinstance(time_str, int) or time_str.isdigit(): time_int = int(time_str) elif time_str.endswith("s"): time_int = int(time_str.split("s")[0]) elif time_str.endswith("m"): time_int = int(time_str.split("m")[0]) * 60 elif time_str.endswith("h"): time_int = int(time_str.split("h")[0]) * 60 * 60 else: raise Exception("%s not support" % time_str) return time_int
def ContentTypeTranslation(content_type): """Translate content type from gcloud format to API format. Args: content_type: the gcloud format of content_type Returns: cloudasset API format of content_type. """ if content_type == 'resource': return 'RESOURCE' if content_type == 'iam-policy': return 'IAM_POLICY' if content_type == 'org-policy': return 'ORG_POLICY' if content_type == 'access-policy': return 'ACCESS_POLICY' if content_type == 'os-inventory': return 'OS_INVENTORY' if content_type == 'relationship': return 'RELATIONSHIP' return 'CONTENT_TYPE_UNSPECIFIED'
def list_account(account_id): """For a specific account list the subfolders that are available.""" return ['transactions', 'balance']
def str2num(s): """User input is always received as string, str2num will try to cast it to the right type (int or float)""" try: return int(s) except ValueError: pass try: return float(s) except ValueError: # Fallback to the original type return s
def count_digits_recursion(number: int) -> int: """ >>> count_digits_recursion(-123) 3 >>> count_digits_recursion(-1) 1 >>> count_digits_recursion(0) 1 >>> count_digits_recursion(123) 3 >>> count_digits_recursion(123456) 6 """ number = abs(number) return 1 if number < 10 else 1 + count_digits_recursion(number // 10)
def factorial(n: int): """ >>> factorial(5) 120 >>> factorial(4) 24 """ resultado = 1 for i in range(1, n + 1): resultado = resultado * i return resultado
def __parse_request_range(range_header_text): """ Return a tuple describing the byte range requested in a GET request If the range is open ended on the left or right side, then a value of None will be set. RFC7233: http://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7233.html#header.range Examples: Range : bytes=1024- Range : bytes=10-20 Range : bytes=-999 """ left = None right = None if not range_header_text: return left, right range_header_text = range_header_text.strip() if not range_header_text.startswith('bytes'): return left, right components = range_header_text.split("=") if len(components) != 2: return left, right components = components[1].split("-") try: right = int(components[1]) except: pass try: left = int(components[0]) except: pass return left, right
def _calc_resolelement(w, fcol, r, A): """Calculate the resolution element using dw=r*w/A/fcol returns resolution element in mm """ return r * w / A / fcol
def resolve_uri(uri): """ Resolves uri if it's not absolute :param uri: str uri :return: str url """ if not(uri.startswith('http://') or uri.startswith('https://')): return '127.0.0.1%s' % uri else: return uri
def contains_only_char(s, char): """ Check whether a str contains only one kind of chars :param s: str, the string for checking :param char: str, the char for checking :return: """ for c in s: if c != char: return False return True
def scale_relative_risks_for_equivalence(proportions, relative_risks): """ :param proportions: dictionary :param relative_risks: dictionary :return: dictionary """ new_reference_deno = 0.0 for stratum in proportions.keys(): new_reference_deno += proportions[stratum] * relative_risks[stratum] new_reference = 1.0 / new_reference_deno for stratum in relative_risks.keys(): relative_risks[stratum] *= new_reference return relative_risks
def lookup_tlv_type(type_id): """Convert TLV type to human readable strings by best guess""" result = 'unknown' if type_id == 0x01: result = 'MAC address' elif type_id == 0x02: result = 'address' elif type_id == 0x03: result = 'software' elif type_id == 0x06: result = 'username' elif type_id == 0x07: result = 'salt' elif type_id == 0x08: result = 'challenge' elif type_id == 0x0a: result = 'uptime' elif type_id == 0x0b: result = 'hostname' elif type_id == 0x0c: result = 'model name' elif type_id == 0x0d: result = 'essid' elif type_id == 0x0e: result = 'wmode' elif type_id == 0x12: result = 'counter' elif type_id == 0x13: result = 'MAC address (UniFi)' elif type_id == 0x15: result = 'model name (UniFi)' elif type_id == 0x16: result = 'firmware revision' elif type_id == 0x17: result = 'unknown (UniFi)' elif type_id == 0x18: result = 'unknown (UniFi)' elif type_id == 0x19: result = 'DHCP enabled (UniFi)' elif type_id == 0x1b: result = 'min firmware (UniFi)' # ? elif type_id == 0x1a: result = 'unknown (UniFi)' result += ' (' + str(type_id) + ')' return result
def _to_lowercase(ftype, fname, *_): """Transforms a feature to it's lowercase representation.""" return ftype, fname if fname is ... else fname.lower()
def encode_problem_index(function_idx, dimension_idx, instance_idx): """ Compute the problem index for the bbob suite with 15 instances and 24 functions. """ return instance_idx + (function_idx * 15) + (dimension_idx * 15 * 24)
def mergesort(unsorted_list): """Sort a list.""" if len(unsorted_list) > 1: mid = len(unsorted_list) // 2 left_half = unsorted_list[:mid] right_half = unsorted_list[mid:] mergesort(left_half) mergesort(right_half) i = 0 j = 0 k = 0 while i < len(left_half) and j < len(right_half): if left_half[i] < right_half[j]: unsorted_list[k] = left_half[i] i = i + 1 else: unsorted_list[k] = right_half[j] j = j + 1 k = k + 1 while i < len(left_half): unsorted_list[k] = left_half[i] i = i + 1 k = k + 1 while j < len(right_half): unsorted_list[k] = right_half[j] j = j + 1 k = k + 1 return unsorted_list
def greatest_common_divisor(value1: int, value2: int): """Calcula o maior divisor comum de dois valores""" value1 = abs(value1) value2 = abs(value2) if value1 < value2: value1, value2 = value2, value1 remainder = value1 % value2 if remainder == 0: return value2 return greatest_common_divisor(value2, remainder)
def batches(batch_size, features, labels): """ Create batches of features and labels :param batch_size: The batch size :param features: List of features :param labels: List of labels :return: Batches of (Features, Labels) """ assert len(features) == len(labels) output_batches = [] sample_size = len(features) for start_i in range(0, sample_size, batch_size): end_i = start_i + batch_size batch = [features[start_i:end_i], labels[start_i:end_i]] output_batches.append(batch) return output_batches
def calc_acc(fn: float, fp: float, tp: float, tn: float) -> float: """ :param fn: false negative miss :param fp: false positive or false alarm :param tp: true positive or hit :param tn: true negative or correct rejection :return: accuracy """ return (tp + tn) / (tp + tn + fn + fp)
def get_next_player(players, currentplayer): """Returns the next player in an ordered list. Cycles around to the beginning of the list if reaching the end. Args: players -- List currentplayer -- Element in the list """ i = players.index(currentplayer) + 1 return players[i % len(players)]
def preprocessing(list_of_tokens, head_indexes_2d): """ Get head indexes only for word-pieces tokenization """ batch_size = len(list_of_tokens) xx = [[]] * batch_size for i in range(batch_size): xx[i] = [0] * len(list_of_tokens[i]) for idx, item in enumerate(head_indexes_2d[i]): if item == 0: xx[i][idx] = 0 else: xx[i][idx] = list_of_tokens[i][head_indexes_2d[i][idx]] return xx
def average_dominant_color(colors, mitigate=175, max_margin=140): """This function is used to calculate the dominant colors when given a list of colors There are 5 steps : 1) Select dominant colors (highest count), isolate its values and remove it from the current color set. 2) Set margins according to the prevalence of the dominant color. 3) Evaluate the colors. Similar colors are grouped in the dominant set while others are put in the "remaining" list. 4) Calculate the average color for the dominant set. This is done by averaging each band and joining them into a tuple. 5) Mitigate final average and convert it to hex :param colors: list of tuples having: [0] color count in the image [1] actual color: tuple(R, G, B, A) -> these can be extracted from a PIL image using image.getcolors() :param mitigate: maximum value a band can reach :param max_margin: maximum difference from one of the dominant values :returns: a tuple with two items: [0] the average color of the dominant set as: tuple(R, G, B) [1] list of remaining colors, used to evaluate subsequent dominant colors """ dominant_color = max(colors) dominant_rgb = dominant_color[1][:3] dominant_set = [dominant_color] remaining = [] margins = [max_margin * (1 - dominant_color[0] / sum([col[0] for col in colors]))] * 3 colors.remove(dominant_color) for color in colors: rgb = color[1] if (rgb[0] < dominant_rgb[0] + margins[0] and rgb[0] > dominant_rgb[0] - margins[0] and rgb[1] < dominant_rgb[1] + margins[1] and rgb[1] > dominant_rgb[1] - margins[1] and rgb[2] < dominant_rgb[2] + margins[2] and rgb[2] > dominant_rgb[2] - margins[2]): dominant_set.append(color) else: remaining.append(color) dominant_avg = [] for band in range(3): avg = total = 0 for color in dominant_set: avg += color[0] * color[1][band] total += color[0] dominant_avg.append(int(avg / total)) final_dominant = [] brightest = max(dominant_avg) for color in range(3): value = dominant_avg[color] / (brightest / mitigate) if brightest > mitigate else dominant_avg[color] final_dominant.append(int(value)) return tuple(final_dominant), remaining
def clean_zip_city(word): """Remove leading comma and line break from zip_city""" return word.lstrip(',').strip()
def get_pandas_df(): """ Gets pandas DataFrame if we can import it """ try: import pandas as pd df = pd.DataFrame except (ModuleNotFoundError, ImportError): df = None return df
def fahrenheit_to_celsius(fahrenheit): """ Convert temperature from Fahrenheit to Celsius PARAMETERS ---------- fahrenheit: float A temperature value in units of Fahrenheit RETURNS ------- celsius: float A temperature value in units of Celsius """ #convert temperature: return (fahrenheit-32)*(5/9)
def transform_data(scores): """Convert list of dicts to a list of lists""" transformed_data = [list(col) for col in zip(*[d.values() for d in scores])] return transformed_data[0], transformed_data[1]
def get_parent_language(lang): """If the passed language is a variant, return its parent Eg: 1. zh-TW -> zh 2. sr-BA -> sr """ is_language_variant = "-" in lang if is_language_variant: return lang[:lang.index("-")]
def get_vidhost(url): """ Trim the url to get the video hoster :return vidhost """ parts = url.split('/')[2].split('.') vidhost = '%s.%s'%(parts[len(parts)-2],parts[len(parts)-1]) return vidhost
def _c(s,modifier=0,intensity=3,color=0): """ mod ::= 0(reset)|1(bold)|2(faint)|3(italic)|4(underline) int ::= 9(intense fg) | 3(normal bg) clr ::= 0(blk)|1(red)|2(grn)|3(ylw)|4(blu)|5(mag)|6(cyn)|7(wht) """ escape = "\x1b[" reset_modifier = 0 ns = f"{escape}{modifier};{intensity}{color}m{s}{escape}{reset_modifier}m" return ns
def bkp_log_all(args_array, server): """Function: bkp_log_all Description: bkp_log_all function. Arguments: (input) args_array (input) server """ status = True if args_array and server: status = True return status
def nextDay(year, month, day): """ Returns the year, month, day of the next day. Simple version: assume every month has 30 days. """ years = year months = month days = day + 1 if days > 30: days = 1 months = month + 1 if months > 12: months = 1 years = year + 1 return (years, months, days)
def getTemperature(U_code,y_helium,ElectronAbundance): """U_codes = res['u'] y_heliums = res['z'][:,1] ElectronAbundance=res['ne']""" U_cgs = U_code*1e10 gamma=5/3. kB=1.38e-16 #erg /K m_proton=1.67e-24 # g mu = (1.0 + 4*y_helium) / (1+y_helium+ElectronAbundance) mean_molecular_weight=mu*m_proton return mean_molecular_weight * (gamma-1) * U_cgs / kB
def get_data_from_response(response: dict, capture_headers: bool = True) -> dict: """ Capture response data from lambda return """ result = {} if "statusCode" in response: try: result["status_code"] = int(response["statusCode"]) except ValueError: # statusCode wasn't formed as an int # we don't log here, as we will have already logged at transaction.result handling result["status_code"] = 500 if capture_headers and "headers" in response: result["headers"] = response["headers"] return result
def get_share_for_user(shares, user): """Gets the share for a specific user. :param shares: iterable of share objects :param user: the user the share is for """ if not shares: return None for share in shares: if share.for_user_id == user.id: return share return None
def niceformat(ret): """ Converts to percentage and format to 1 decimal place """ return round(ret*100,1)
def remove_vibrots(vibrot, modes): """ Removes specified modes from anharmonic constant matrix INPUTS: xmat - anharmonic constant matrix modes - the modes to delete from the matrix (with 1 being the first mode) OUTPUTS: xmat - anharmonic constant matrix with columns and rows deleted for specified modes """ modes.sort() # reverse=True) vibrot = vibrot.splitlines() modeindex = [mode-1 for mode in modes] vibrots = [] for index in range(len(vibrot)): if index not in modeindex: vibrots.append(vibrot[index]) return '\n'.join(vibrots)
def int2str(integer): """Representation of an integer as character""" if integer < 26: return chr(integer + ord('a')) elif integer < 52: return chr(integer - 26 + ord('A')) elif integer < 62: return chr(integer - 52 + ord('0')) else: raise ValueError("Invalid integer, can't convert")
def contains_non_letters(word): """Helper for :meth:`analyze`""" for char in word: if not char.isalpha(): if not char in ["'", "-"]: return True return False
def pairs(sequence): """Returns the list of pairs that you can create from the sequence. The results are ordered and no duplicates will be included, also, if <a,b> is in the result then <b,a> won't be.""" sequence = sorted(list(set(sequence))) indices = list(range(len(sequence))) return [(sequence[i], sequence[j]) for i in indices for j in indices if i < j]
def DetermineLocaleType(locale_str): """Determines the locale 'type' for a given locale name. Returns: (string) Always one of the following strings, 'world' If the locale name refers to the world. 'country' If the locale name looks like a country ID. 'region' If the locale name looks like a region ID. 'city' If the locale name looks like a city ID. """ if locale_str == 'world': return 'world' depth_map = {1: 'country', 2: 'region', 3: 'city'} depth = len(locale_str.split('_')) return depth_map[depth]
def sum_cubes(n): """Sum the first N cubes of natural numbers. >>> sum_cubes(5) 225 """ total, k = 0, 1 while k <= n: total, k = total + pow(k, 3), k + 1 return total
def calc_upsampling_size(input_size: int, dilation: int = 1, tconv_kernel_size: int = 3, tconv_stride: int = 2, tconv_padding: int = 1, output_padding: int = 1) -> int: """ Helper function to calculate the upsampling size desired from the convolution parameters. This is the same formula as used by the transposed 3D convolution of PyTorch Args: - input_size (int): size of the input tensor - dilation (int): dilation of the convolution - tconv_kernel_size (int): kernel of the convolution - tconv_stride (int): stride of the convolution - tconv_padding (int): padding of the convolution - output_padding (int): Output padding to add """ return (input_size - 1) * tconv_stride - 2 * tconv_padding + dilation * ( tconv_kernel_size - 1) + output_padding + 1
def _is_interactive(module): """ Decide whether this is running in a REPL or IPython notebook """ return not hasattr(module, '__file__')
def fixName(name): """PMML tag name substitutions to avoid conflicts with Python syntax.""" out = name.replace("-", "_") if out == "True": out = "AlwaysTrue" elif out == "False": out = "AlwaysFalse" elif out == "from": out = "isfrom" return out
def parabolic(f, x): """Quadratic interpolation for estimating the true position of an inter-sample maximum when nearby samples are known. f is a vector and x is an index for that vector. Returns (vx, vy), the coordinates of the vertex of a parabola that goes through point x and its two neighbors. Example: Defining a vector f with a local maximum at index 3 (= 6), find local maximum if points 2, 3, and 4 actually defined a parabola. In [3]: f = [2, 3, 1, 6, 4, 2, 3, 1] In [4]: parabolic(f, argmax(f)) Out[4]: (3.2142857142857144, 6.1607142857142856) """ if x < 1 or x+1 >= len(f): return None denom = (f[x-1] - 2 * f[x] + f[x+1]) if denom == 0.0: return None xv = 1/2. * (f[x-1] - f[x+1]) / denom + x yv = f[x] - 1/4. * (f[x-1] - f[x+1]) * (xv - x) return (xv, yv)
def del_fake_nums(intList, step): #8 """ Delete fake numbers added by the fake_nums function (only used in decryption) """ placeToDelNum = [] for index in range(0, len(intList), step+1): placeToDelNum.append(index) newIntList = [item for item in intList] for index in reversed(placeToDelNum): del newIntList[index] return newIntList
def log2lin(dlogyl,dlogyu,logy): """ From a given uncertainty in log-space (dex) and the value of y, calculates the error in linear space. Returns a sequence with the lower and upper arrays with errors. """ # Upper error bar dyu=10**logy*(10.**dlogyu-1.) # Lower error bar dyl=-10**logy*(10.**-dlogyl-1.) return dyl, dyu
def ask_for_continuation(iteration: int) -> bool: """ Ask the user if we can proceed to execute the sandbox. :param iteration: the iteration number. :return: True if the user decided to continue the execution, False otherwise. """ try: answer = input( "Would you like to proceed with iteration {}? [y/N]".format(iteration) ) if answer != "y": return False else: return True except EOFError: return False
def sign(x): """ Returns 1 or -1 depending on the sign of x """ if x >= 0: return 1 else: return -1
def _isleap(year): """Return True for leap years and False otherwise. """ return year % 4 == 0 and (year % 100 !=0 or year % 400 == 0)
def get_first(values: list): """ Function that takes in a list and returns the first value (and the .text attribute if it exists), otherwise returns nothing @param values: list that contains 0-many results @returns: the first item of the list or None """ out=None if len(values) > 0: out=values[0] if out.text: out=out.text.strip() return(out)
def format_bytes(size): """ Convert bytes to KB/MB/GB/TB/s """ # 2**10 = 1024 power = 2 ** 10 n = 0 power_labels = {0: 'B/s', 1: 'KB/s', 2: 'MB/s', 3: 'GB/s', 4: 'TB/s'} while size > power: size /= power n += 1 return " ".join((str(round(size, 2)), power_labels[n]))
def pad_key(padname, prefix='pad'): """Redis Pad key generator. Key contains Pad name and prefix. :param padname: redis pad name. :param prefix: redis key prefix. """ return '%s:%s' % (prefix, padname)
def db_to_ascii(field): """ converts an db style atom name to ascii """ field = field.replace('_','-') return field
def get_capped_integer(number, min_value=1, max_value=100): """ A helper function to limit an integer between an upper and lower bound :param number: Number to keep limited :type number: int or str :param min_value: Lowest possible value assigned when number is lower than this :type min_value: int or str :param max_value: Highest possible value assigned when number is larger than this :type max_value: int or str :return: Integer that adheres to min_value <= number <= max_value :rtype: int >>> get_capped_integer(42) 42 >>> get_capped_integer(0, min_value=7) 7 >>> get_capped_integer(42, max_value=7) 7 >>> get_capped_integer('0', min_value='7') 7 >>> get_capped_integer('42', max_value='7') 7 """ return min(max(int(number), int(min_value)), int(max_value))
def twice_x(x): """Callback to fill the marketing example value.""" return float(x) * 2
def tilted_L1(u, quantile=0.5): """ tilted_L1(u; quant) = quant * [u]_+ + (1 - quant) * [u]_ """ return 0.5 * abs(u) + (quantile - 0.5) * u
def lowercase_term_id(term_key: str) -> str: """Lowercase the term value (not the namespace prefix) Args: term_id (str): term identifier with namespace prefix, e.g. MESH:Atherosclerosis Returns: str: lowercased, e.g. MESH:atherosclerosis """ (ns, val) = term_key.split(":", 1) term_key = f"{ns}:{val.lower()}" return term_key
def get_p2p_scatter_2praw(model): """ Get ratio of variability (sum of squared differences of consecutive values) of folded and unfolded models. """ return model['scatter_2praw']
def is_symbol_included_for_completeness(symbol: str) -> bool: """ Determine whether a symbol is listed for sake of type completeness. """ return symbol.endswith(':completeness')
def test_regions(regions,x,y): """ Determines whether point (x,y) falls within any of regions """ for region in regions: if x > region[0] and x < region[2] and y > region[1] and y < region[3]: return True return False
def get_smallest_divisible_number_brute_force(max_factor): """ Get the smallest divisible number by all [1..max_factor] numbers by brute force. """ number_i = max_factor while True: divisible = True for factor_i in range(1, max_factor+1): if number_i % factor_i > 0: divisible = False break if divisible: return number_i number_i += 1
def calculate_fcc_nc(listA, listB): """ Calculates the fraction of common elements between two lists not taking into account chain IDs. Much Slower. """ largest,smallest = sorted([listA, listB], key=len) ncommon = len([ele for ele in largest if ele in smallest]) return (ncommon, ncommon)
def _extra_langs(): """Define extra languages. Returns: dict: A dictionnary of extra languages manually defined. Variations of the ones generated in `_main_langs`, observed to provide different dialects or accents or just simply accepted by the Google Translate Text-to-Speech API. """ return { # Chinese 'zh-TW': 'Chinese (Mandarin/Taiwan)', 'zh': 'Chinese (Mandarin)' }
def unquote_wordtree(wtree): """Fold the word tree while removing quotes everywhere. Other expansion sequences are joined as such. """ def unquote(wtree): unquoted = [] if wtree[0] in ('', "'", '"', '\\'): wtree = wtree[1:-1] for part in wtree: if isinstance(part, list): part = unquote(part) unquoted.append(part) return ''.join(unquoted) return unquote(wtree)
def recursive_convert_to_unicode(replace_to_utf): """Converts object into UTF-8 characters ignores errors Args: replace_to_utf (object): any object Returns: object converted to UTF-8 """ try: if isinstance(replace_to_utf, dict): return {recursive_convert_to_unicode(k): recursive_convert_to_unicode(v) for k, v in replace_to_utf.items()} if isinstance(replace_to_utf, list): return [recursive_convert_to_unicode(i) for i in replace_to_utf if i] if not replace_to_utf: return replace_to_utf return str(replace_to_utf, 'utf-8', 'ignore') except TypeError: return replace_to_utf
def _get_min_and_index(lst): """ Private function for obtaining min and max indicies. """ minval, minidx = lst[0], 0 for i, v in enumerate(lst[1:]): if v < minval: minval, minidx = v, i + 1 return minval, minidx
def rgb_to_hex(r, g, b): """ Convert RGB color to an Hexadecimal representation """ return "%02x%02x%02x" % (r, g, b)
def is_hex(s): """ Test if a string is a hexadecimal in string representation. :param s: The string to test. :return: True if hexadecimal, False if not. """ try: int(s, 16) return True except ValueError: return False
def uniq(iterable, key=lambda x: x): """ Remove duplicates from an iterable. Preserves order. :type iterable: Iterable[Ord => A] :param iterable: an iterable of objects of any orderable type :type key: Callable[A] -> (Ord => B) :param key: optional argument; by default an item (A) is discarded if another item (B), such that A == B, has already been encountered and taken. If you provide a key, this condition changes to key(A) == key(B); the callable must return orderable objects. """ keys = set() res = [] for x in iterable: k = key(x) if k in keys: continue res.append(x) keys.add(k) return res # Enumerate the list to restore order lately; reduce the sorted list; restore order # def append_unique(acc, item): # return acc if key(acc[-1][1]) == key(item[1]) else acc.append(item) or acc # srt_enum = sorted(enumerate(iterable), key=lambda item: key(item[1])) # return [item[1] for item in sorted(reduce(append_unique, srt_enum, [srt_enum[0]]))]
def get_create_indexes_queries(graph_name, backend): """Format all SQlite CREATE INDEXES statements with the name of the RDF graph to insert.""" if backend == "sqlite" or backend == "sqlite-catalog": return [ f"CREATE UNIQUE INDEX IF NOT EXISTS {graph_name}_spo_index ON {graph_name} (subject,predicate,object);", f"CREATE UNIQUE INDEX IF NOT EXISTS {graph_name}_osp_index ON {graph_name} (object,subject,predicate);", f"CREATE UNIQUE INDEX IF NOT EXISTS {graph_name}_pos_index ON {graph_name} (predicate,object,subject);" ] else: raise Exception(f"Unknown backend for SQlite: {backend}")
def median(iterable, sort=True): """ Returns the value that separates the lower half from the higher half of values in the list. """ s = sorted(iterable) if sort is True else list(iterable) n = len(s) if n == 0: raise ValueError("median() arg is an empty sequence") if n % 2 == 0: return float(s[(n // 2) - 1] + s[n // 2]) / 2 return s[n // 2]
def base40_to_interval_name(base40): """Base40 number to interval name conversion :param base40: Base40 interval number :type base40: int :returns: Interval name :rtype: str **Examples** >>> base40_to_interval_name(46) '+M9' """ direction = '-' if base40 < 0 else '+' octave, interval = divmod(abs(base40), 40) names = [ 'P1', 'A1', None, None, 'd2', 'm2', 'M2', 'A2', None, None, 'd3', 'm3', 'M3', 'A3', None, None, 'd4', 'P4', 'A4', None, None, None, 'd5', 'P5', 'A5', None, None, 'd6', 'm6', 'M6', 'A6', None, None, 'd7', 'm7', 'M7', 'A7', None, None, 'd8', 'P8', ] name = names[interval] if octave: name = name.replace(name[1], str(octave * 8 + int(name[1]) - 1)) return ''.join([direction, name])
def write(path, data): """Write data to a text file. Args: path (str): Full path to file data (str): File text Returns: int: Number of characters written """ with open(path, 'w') as handle: return handle.write(data)
def translate_country(country_name): """Tries to account for the fact that there's different ways to write the name of the same country, and the slugification alone doesn't not standardise it. Also, it'll be of help to some spanish users. """ translation_dict = { "us": "usa", "united-states": "usa", "united-states-of-america": "usa", "estados-unidos": "usa", "eeuu": "usa", "united-kingdom": "uk", "great-britain": "uk", "arg": "argentina", "ar": "argentina", "brasil": "brazil" } return translation_dict.get(country_name, country_name)
def mask_dotted_from_bits(bits): """Creates a subnet mask string in dotted notation from number of bits.""" assert bits > 0 assert bits <= 32 mask = '0' * 32 mask = mask.replace('0', '1', bits) dot_mask = '%s.%s.%s.%s' % ( int(mask[:8], 2), int(mask[8:16], 2), int(mask[16:24], 2), int(mask[24:], 2) ) return dot_mask
def collision(state, n): """returns true if boulder collides with alien""" for i in range(n): p=False if state[0:n][i]==state[-n:][i] and state[0:n][i]=='1': return True return False
def example_to_features_predict(input_ids, attention_masks, token_type_ids): """ Convert the test examples into Bert compatible format. """ return {"input_ids": input_ids, "attention_mask": attention_masks, "token_type_ids": token_type_ids}
def get_e_young_nu_poisson(mu, lambda_): """ Get the young's module and the poisson ratio from 2D plane stress Lame coefficients lambda and mu (Note: used formulas in get_lambda_mu and solved for e_young and nu_poisson) Parameters ---------- mu : float, np.float Lame coefficients mu. lambda_ : float, np.float Lame coefficients lambda. Returns ------- e_young : float young's module. nu_poisson : float poisson ratio. """ nu_poisson = lambda_ / (lambda_ + 2 * mu) e_young = 4 * (lambda_ * mu + mu * mu) / (lambda_ + 2 * mu) return e_young, nu_poisson
def split_path(path): """ Splits the text and build a nice import statement from it. Note: only well defined import paths are supported. Not something invalid like '..foo.bar..'. >>> split_path('foo.bar.Batz') ('foo.bar', 'Batz') >>> split_path('..lel') ('..', 'lel') >>> split_path('.lul') ('.', 'lul') >>> split_path('lol') ('', 'lol') >>> split_path('...na.na.na.na.na.na.Batman') ('...na.na.na.na.na.na', 'Batman') >>> split_path('...........yolo.swagger') ('...........yolo', 'swagger') :param path: The path to split. :type path: str :return: The import text, like `from x import y` or `import z` :rtype: tuple(str)|Tuple[str, str] """ last_dot_position = path.rfind('.') if last_dot_position == -1: # no dot found. import_path = '' import_name = path else: import_path = path[:last_dot_position + 1] import_name = path[last_dot_position + 1:] # handle 'foo.bar.Baz' not resulting in 'foo.bar.', i.e. remove the dot at the end. if import_path.rstrip('.') != '': # e.g. not '....' import_path = import_path.rstrip('.') # end if return import_path, import_name
def restructure(transactions): """ Restructure transactions, so that each month every possible positin gets listed, even if its value is zero. transactions: ordered dict of transactions """ all_months = [tr.items() for month,tr in transactions.items()] all_groups_listed = [[x[0] for x in g] for g in all_months] all_groups = set([item for sublist in all_groups_listed for item in sublist]) all_transactions = [] for month,tr in transactions.items(): months_transactions = {month: {}} for group in all_groups: value = tr.get(group, 0) months_transactions[month][group] = value all_transactions.append(months_transactions) return all_transactions
def pick_attributes(old_dict, attributes, skip_non_existing=True): """ Pick requested key value pairs from a dictionary and return a new dictionary """ new_dict = {} for attribute in attributes: if attribute in old_dict: new_dict[attribute] = old_dict[attribute] elif not skip_non_existing: new_dict[attribute] = None return new_dict
def find_possible(key, possible): """ Given a key and a list, find possible matches Right now it just checks for case """ if key in possible: return key possible = [x for x in possible if x.lower() == key] if possible == []: return None return possible[0]
def solution(x, y): """Returns ID that is only present in one of the two lists passed as args Args: x: list of prisoner IDs y: list of prisoner IDs Returns: int value of the additional prisoner ID """ try: a = set(x) b = set(y) except TypeError: raise TypeError("Args must be lists of IDs") c = a.symmetric_difference(b) # c is a set containing the ID that is only present in one of the lists if len(c) == 0: raise ValueError("Args have same set of IDs. " + "One additional ID expected.") if len(c) > 1: raise ValueError("More than one additional ID " + "found: %s One expected." % list(c)) return c.pop()
def dr_evil(amount): """ >>> dr_evil(10) '10 dollars' >>> dr_evil(1000000) '1000000 dollars (pinky)' >>> dr_evil(2000000) '2000000 dollars (pinky)' """ if amount >= 1000000: return f"{amount} dollars (pinky)" else: return f"{amount} dollars"
def compose(im, y, fns): """ apply a collection of transformation functions fns to images """ for fn in fns: #pdb.set_trace() im, y =fn(im, y) return im if y is None else (im, y)
def _make_particle_visible_svg(text,particles,plidx): """ Takes svg file and makes particle visible at specified file location. """ for particle in particles: lidx = text.find("label=\"%s\""%str(particle+1),plidx) text = text[:lidx]+text[lidx:].replace("display:none","display:inline",1) return text
def hashable(obj): """Convert `obj` into a hashable object.""" if isinstance(obj, list): # Convert a list to a tuple (hashable) return tuple(obj) elif isinstance(obj, dict): # Convert a dict to a frozenset of items (hashable) return frozenset(obj.items()) return obj
def find_secondary_lithology(tokens_and_primary, lithologies_adjective_dict, lithologies_dict): """Find a secondary lithology in a tokenised sentence. Args: tokens_and_primary (tuple ([str],str): tokens and the primary lithology lithologies_adjective_dict (dict): dictionary, where keys are exact, "clear" markers for secondary lithologies (e.g. 'clayey'). Keys are the lithology classes. lithologies_dict (dict): dictionary, where keys are exact markers as match for lithologies. Keys are the lithology classes. Returns: str: secondary lithology if dectected. empty string for none. """ tokens, prim_litho = tokens_and_primary if prim_litho == '': # cannot have a secondary lithology if no primary return '' # first, let's look at adjectives, more likely to semantically mean a secondary lithology keys = lithologies_adjective_dict.keys() for x in tokens: if x in keys: litho_class = lithologies_adjective_dict[x] if litho_class != prim_litho: return litho_class # then, as a fallback let's look at a looser set of terms to find a secondary lithology keys = lithologies_dict.keys() for x in tokens: if x in keys: litho_class = lithologies_dict[x] if litho_class != prim_litho: return litho_class return ''
def args_validity_check(*unknown_args): """ Raise an Error if any args are unrecognized by all the parsers unknown_args: unknown_args1, unknown_args2, ... """ if len(unknown_args) == 1 and len(unknown_args[0]) > 0: return False base_unknown_args = unknown_args[0] for arg in base_unknown_args: # assume each arg contains "--" if '--' not in arg: continue # if an argument contained by all members in the unknown_args # raise error for the first detected unrecognized argument contained = [(arg in ua) for ua in unknown_args[1:]] assert not all(contained), ( f"unrecognized argument {arg} by all parsers")
def new_sleep_summary(timezone, model, startdate, enddate, date, modified, data): """Create simple dict to simulate api data.""" return { "timezone": timezone, "model": model, "startdate": startdate, "enddate": enddate, "date": date, "modified": modified, "data": data, }
def popcnt(b): """ Return number of "1" bits set in 'b' """ return len([x for x in bin(b) if x == "1"])
def bessel_fw3d(x, **kwargs): """ Fullwave3D's approximation of the modified zero-order Bessel's function of the first kind. Parameters ---------- x : float Argument of the function. Returns ------- s : float Value of the function. Named identical to fullwave3D. Notes ----- From the original source code: 'accuracy is <1/4%, which is quite adequate for distributed source'. There are discrepancies compared to Python's built-in function but these are for x < 0 which does not matter in Kaiser window (we look only at x > 0). FIXME: d-c. """ v = x * 0.5 a = 1.0 s = 1.0 i = 0 while a > 0.03: i = i + 1 a = a * (v / i) s = s + a ** 2 return s
def selection_sort(arr): """ Selection sort repeatedly finds the minimum and moves it to the front """ l = len(arr) if l == 0: return arr for i in range(l): min_i = i for j in range(i+1, l): if arr[j] < arr[min_i]: min_i = j temp = arr[i] arr[i] = arr[min_i] arr[min_i] = temp return arr
def calc_distance(v_i, v_f, a): """ Computes the distance given an initial and final speed, with a constant acceleration. :param: v_i: initial speed (m/s) v_f: final speed (m/s) a: acceleration (m/s^2) :return d: the final distance (m) """ d = (v_f**2 - v_i**2) / (2 * a) return d
def _is_ipv6_addr_link_local(ip_addr): """Indicates if a given IPv6 address is link-local""" return ip_addr.lower().startswith('fe80::')
def fibonacci(n: int) -> int: """ Returns n-th Fibonacci number n must be more than 0, otherwise it raise a ValueError. >>> fibonacci(0) 0 >>> fibonacci(1) 1 >>> fibonacci(2) 1 >>> fibonacci(10) 55 >>> fibonacci(-2) Traceback (most recent call last): ... ValueError: n must be more or equal than 0 """ if n < 0: raise ValueError('n must be more or equal than 0') elif n == 0: return 0 fib = [0, 1] + [0] * (n - 1) for i in range(2, n + 1): fib[i] = fib[i - 1] + fib[i - 2] return fib[n]
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
3