content
stringlengths
42
6.51k
def _func_star_single(func_item_args): """Equivalent to: func = func_item_args[0] item = func_item_args[1] args = func_item_args[2] kwargs = func_item_args[3] return func(item,args[0],args[1],..., **kwargs) """ return func_item_args[0](*[func_item_args[1]] + func_item_args[2], **func_item_args[3])
def _rpm_split_filename(filename): """Taken from yum's rpmUtils.miscutils.py file Pass in a standard style rpm fullname Return a name, version, release, epoch, arch, e.g.:: foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386 1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64 """ if filename[-4:] == '.rpm': filename = filename[:-4] archIndex = filename.rfind('.') arch = filename[archIndex+1:] relIndex = filename[:archIndex].rfind('-') rel = filename[relIndex+1:archIndex] verIndex = filename[:relIndex].rfind('-') ver = filename[verIndex+1:relIndex] epochIndex = filename.find(':') if epochIndex == -1: epoch = '' else: epoch = filename[:epochIndex] name = filename[epochIndex + 1:verIndex] return name, ver, rel, epoch, arch
def human_bytes(size): """Formats size, a number of bytes, in a human-readable way.""" suffices = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'HB'] for suffix in suffices: if size < 1024: return "%3.1f %s" % (size, suffix) size /= 1024.0 return "big"
def _calculate_downsampling_rate(initial_sampling_rate, maximum_f0): """ Determines downsampling rate to apply to the audio input passed for RAPT processing """ """ NOTE: Using Python 2.7 so division is integer division by default Different default behavior in in Python 3+. That said, keeping the round() around the denominator of this formula as it is specified in the formula in David Talkin's paper: """ try: aReturn = (initial_sampling_rate / round(initial_sampling_rate / (4 * maximum_f0))) except ZeroDivisionError: raise ValueError('Ratio of sampling rate and max F0 leads to ' 'division by zero. Cannot perform 1st pass of nccf ' 'on downsampled audio.') return int(aReturn)
def split_volume_from_journal(citation_elements): """Split volume from journal title We need this because sometimes the volume is attached to the journal title instead of the volume. In those cases we move it here from the title to the volume """ for el in citation_elements: if el['type'] == 'JOURNAL' and ';' in el['title']: el['title'], series = el['title'].rsplit(';', 1) el['volume'] = series + el['volume'] return citation_elements
def get_unwise_image_url(ra, dec, npix, band, data_release, filetype="image"): """ Construct the UNWISE specific URL to download UNWISE cutouts. :param ra: float Right ascension of target :param dec: float Declination of target :param npix: float Cutout image size in pixels :param band: str Passband of the image :param data_release: str String specifying the unwise data release. Possible values are: neo1, neo2, neo3, neo4, neo5. neo6, allwise :param verbosity: int Verbosity > 0 will print verbose statements during the execution :return: str Returns the url to the DES DR1 image cutout """ # Maximum cutout size for unWISE cutouts is 256 pixel if npix >=256: npix=256 datatype = {"image":"&file_img_m=on", "std":"&file_std_m=on", "invvar":"&file_invvar_m=on"} file_type = datatype[filetype] base = "http://unwise.me/cutout_fits?version={}&".format(data_release) ra = "ra={:0}&".format(ra) dec = "dec={:0}&".format(dec) size = "size={:0}&".format(npix) if data_release in ['neo4', 'neo5', 'neo6'] and band in ['3', '4']: print('[ERROR] Download of band w{} in unwise-{} not ' 'available'.format(band, data_release)) return None else: band = "bands={0:s}".format(band) url = base + ra + dec + size + band + file_type return url
def is_between(low, x, high): """Determine whether x is between X1 and X2""" return (low <= x) and (x <= high)
def unwrap_distributed(state_dict): """ Unwraps model from DistributedDataParallel. DDP wraps model in additional "module.", it needs to be removed for single GPU inference. :param state_dict: model's state dict """ new_state_dict = {} for key, value in state_dict.items(): new_key = key.replace('module.', '') new_state_dict[new_key] = value return new_state_dict
def _sort_key_min_confidence_sd(sample, labels): """Samples sort key by the minimum confidence_sd.""" min_confidence_sd = float("+inf") for inference in sample["inferences"]: if labels and inference["label"] not in labels: continue confidence_sd = inference.get("confidence_sd", float("+inf")) if confidence_sd < min_confidence_sd: min_confidence_sd = confidence_sd return min_confidence_sd
def to_ascii(s): """ Force string to ascii :param s: :return: """ s = s.split(b'\x00', 1)[0] return s.decode('ascii', 'ignore').lower()
def get_j2k_parameters(codestream): """Return some of the JPEG 2000 component sample's parameters in `stream`. .. deprecated:: 1.2 Use :func:`~pydicom.pixel_data_handlers.utils.get_j2k_parameters` instead Parameters ---------- codestream : bytes The JPEG 2000 (ISO/IEC 15444-1) codestream data to be parsed. Returns ------- dict A dict containing the JPEG 2000 parameters for the first component sample, will be empty if `codestream` doesn't contain JPEG 2000 data or if unable to parse the data. """ try: # First 2 bytes must be the SOC marker - if not then wrong format if codestream[0:2] != b'\xff\x4f': return {} # SIZ is required to be the second marker - Figure A-3 in 15444-1 if codestream[2:4] != b'\xff\x51': return {} # See 15444-1 A.5.1 for format of the SIZ box and contents ssiz = ord(codestream[42:43]) parameters = {} if ssiz & 0x80: parameters['precision'] = (ssiz & 0x7F) + 1 parameters['is_signed'] = True else: parameters['precision'] = ssiz + 1 parameters['is_signed'] = False return parameters except (IndexError, TypeError): return {}
def group_consecutives(vals, step=0): """Return list of consecutive lists of numbers from vals (number list).""" run = [] result = [run] expect = None for v in vals: if (v == expect) or (expect is None): run.append(v) else: run = [v] result.append(run) expect = v + step return result
def get_offset(num, columns, spacing): """Return offset from prototype position. Positional arguments: num -- the number of the object, starting from 0 columns -- how many columns before wrapping spacing -- a tuple of (x,y), spacing between objects """ x_offset = (num % columns) * spacing[0] # x-spacing y_offset = (num // columns) * spacing[1] # y-spacing return (x_offset, y_offset)
def first(a, b): """ Compare two iterable objects compares each element in 'a' with every element in 'b' (Elements in 'a' are prioritized) Returns None if there is no match """ for elem in a: if elem in b: return a return None
def city_country(city, country): """Try it yourself 8-6. City names.""" return city.title() + ", " + country.title()
def _is_valid_make_var(varname): """Check if the make variable name seems valid.""" if len(varname) == 0: return False # According to gnu make, any chars not whitespace, ':', '#', '=' are valid. invalid_chars = ":#= \t\n\r" for n in range(0, len(invalid_chars)): if invalid_chars[n] in varname: return False return True
def linear_func(x, a, b): """a * x + b""" return a * x + b
def replaceMultiple(mainString, toBeReplaces, newString): """ Replace a set of multiple sub strings with a new string """ # Iterate over the strings to be replaced for elem in toBeReplaces: # Check if string is in the main string if elem in mainString: # Replace the string mainString = mainString.replace(elem, newString) return mainString
def CMakeStringEscape(a): """Escapes the string 'a' for use inside a CMake string. This means escaping '\' otherwise it may be seen as modifying the next character '"' otherwise it will end the string ';' otherwise the string becomes a list The following do not need to be escaped '#' when the lexer is in string state, this does not start a comment The following are yet unknown '$' generator variables (like ${obj}) must not be escaped, but text $ should be escaped what is wanted is to know which $ come from generator variables """ return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def apply_pred_id(x, labels): """Map Categories to Numeric Labels""" try: return int(labels[x]) except: return -1
def get_four_count_nums(candidates, disallowed_nums): """ if there are four of a given number in the rows and columns not shared by the square under test, and the number isn't already in the block, that number has to be in the square under test (I think) """ collector = {} for num in candidates: if num not in collector.keys(): collector[num] = 1 else: old_value = collector[num] collector[num] = old_value + 1 output = [] for key, value in collector.items(): if value == 4 and [key] not in disallowed_nums: output.append(key) return output
def create_custom_var_from_popt(model_image, popt): """Creates variance map from the model image, given the 2nd poly fit parameters Introduced in 0.50 (PIPE2D-931) Parameters ---------- modelImg : `np.array` Model image popt : `np.array` 2d polyfit parameters Returns ---------- custom_var_image : `np.array` Recreated variance map """ # I am using lambda expression to avoid superflous definition of quadratic function f = lambda x, *p: p[0] * x**2 + p[1] * x + p[2] # noqa : E373 custom_var_image = f(model_image, *popt) return custom_var_image
def args_to_dict(args): """ Transforms the list of arguments received in the subcommand in a dictionary of option names and values (as a list, to cope with options with multiple values like --tag). """ full_args = [] for arg in args: if arg.startswith("--"): full_args.extend(arg.split("=")) else: full_args.append(arg) args_dict = {} last_arg = None for arg in full_args: if arg.startswith("--"): last_arg = arg args_dict[arg] = args_dict.get(arg, []) else: if last_arg is not None: args_dict[last_arg].append(arg) return args_dict
def guess_bytes(bstring): """ NOTE: Using `guess_bytes` is not the recommended way of using ftfy. ftfy is not designed to be an encoding detector. In the unfortunate situation that you have some bytes in an unknown encoding, ftfy can guess a reasonable strategy for decoding them, by trying a few common encodings that can be distinguished from each other. Unlike the rest of ftfy, this may not be accurate, and it may *create* Unicode problems instead of solving them! It doesn't try East Asian encodings at all, and if you have East Asian text that you don't know how to decode, you are somewhat out of luck. East Asian encodings require some serious statistics to distinguish from each other, so we can't support them without decreasing the accuracy of ftfy. If you don't know which encoding you have at all, I recommend trying the 'chardet' module, and being appropriately skeptical about its results. The encodings we try here are: - UTF-16 with a byte order mark, because a UTF-16 byte order mark looks like nothing else - UTF-8, because it's the global standard, which has been used by a majority of the Web since 2008 - "utf-8-variants", because it's what people actually implement when they think they're doing UTF-8 - MacRoman, because Microsoft Office thinks it's still a thing, and it can be distinguished by its line breaks. (If there are no line breaks in the string, though, you're out of luck.) - "sloppy-windows-1252", the Latin-1-like encoding that is the most common single-byte encoding """ if isinstance(bstring, str): raise UnicodeError( "This string was already decoded as Unicode. You should pass " "bytes to guess_bytes, not Unicode." ) if bstring.startswith(b'\xfe\xff') or bstring.startswith(b'\xff\xfe'): return bstring.decode('utf-16'), 'utf-16' byteset = set(bstring) try: if 0xed in byteset or 0xc0 in byteset: # Byte 0xed can be used to encode a range of codepoints that # are UTF-16 surrogates. UTF-8 does not use UTF-16 surrogates, # so when we see 0xed, it's very likely we're being asked to # decode CESU-8, the variant that encodes UTF-16 surrogates # instead of the original characters themselves. # # This will occasionally trigger on standard UTF-8, as there # are some Korean characters that also use byte 0xed, but that's # not harmful. # # Byte 0xc0 is impossible because, numerically, it would only # encode characters lower than U+0040. Those already have # single-byte representations, and UTF-8 requires using the # shortest possible representation. However, Java hides the null # codepoint, U+0000, in a non-standard longer representation -- it # encodes it as 0xc0 0x80 instead of 0x00, guaranteeing that 0x00 # will never appear in the encoded bytes. # # The 'utf-8-variants' decoder can handle both of these cases, as # well as standard UTF-8, at the cost of a bit of speed. return bstring.decode('utf-8-variants'), 'utf-8-variants' else: return bstring.decode('utf-8'), 'utf-8' except UnicodeDecodeError: pass if 0x0d in byteset and 0x0a not in byteset: # Files that contain CR and not LF are likely to be MacRoman. return bstring.decode('macroman'), 'macroman' else: return bstring.decode('sloppy-windows-1252'), 'sloppy-windows-1252'
def format_match_string(string, fm_stopwords): """ function that converts to lower case and removes stop words """ string = string.lower().split() string = [word for word in string if word not in fm_stopwords] # remove stop words string = ' '.join(string) return string
def parse_commastr(str_comma): """Read comma-sperated string. """ if '' == str_comma: return None else: a, b = map(int, str_comma.split(',')) return [a, b]
def remove_block_hashtags(caption): """attempt to remove hidden hashtags at the bottom of captions""" caption = caption.split('\n', 1)[0] clean_caption = caption.split('\u2022', 1)[0] return clean_caption.strip()
def _cv_delta(x, eps=1.): """Returns the result of a regularised dirac function of the input value(s). """ return eps / (eps**2 + x**2)
def getUserDatabasePath(): """ Returns the path of the user database depending on whether or not this file is being run on reader.py or app.py. """ if __name__ == "__main__": database_path = "../../database/user_database.xlsx" else: database_path = "../database/user_database.xlsx" return database_path
def filter_one_letter_word(tweets): """remove one letter word""" for index in range(len(tweets)): tweets[index] = list( filter(lambda x: len(x) > 1, tweets[index])) return tweets
def _extract_version_number(bazel_version): """Extracts the semantic version number from a version string Args: bazel_version: the version string that begins with the semantic version e.g. "1.2.3rc1 abc1234" where "abc1234" is a commit hash. Returns: The semantic version string, like "1.2.3". """ for i in range(len(bazel_version)): c = bazel_version[i] if not (c.isdigit() or c == "."): return bazel_version[:i] return bazel_version
def get_happy_stack_name(deployment) -> str: """ Returns the name of the Happy stack for the specified deployment Note: This will only work with deployment={dev,stage,prod} and will not work with rdev! :param deployment: dev, stage or prod :return: """ return f"{deployment}-{deployment}stack"
def kelvin_to_rgb(K): """converts color temperature in Kelvin to RGB values according to http://www.vendian.org/mncharity/dir3/blackbody/UnstableURLs/bbr_color.html""" table = {4000: (1.0000, 0.6636, 0.3583), 5000: (1.0000, 0.7992, 0.6045), 6000: (1.0000, 0.9019, 0.8473), 7000: (0.9337, 0.9150, 1.0000), 8000: (0.7874, 0.8187, 1.0000), 9000: (0.6693, 0.7541, 1.0000), 0: (1, 1, 1) } rgb = table[K] return rgb
def get_merged_gaps(gaps): """Get gaps merged across channels/streams Parameters ---------- gaps: dictionary contains channel/gap array pairs Returns ------- array_like an array of startime/endtime arrays representing gaps. Notes ----- Takes an dictionary of gaps, and merges those gaps across channels, returning an array of the merged gaps. """ merged_gaps = [] for key in gaps: merged_gaps.extend(gaps[key]) # sort gaps so earlier gaps are before later gaps sorted_gaps = sorted(merged_gaps, key=lambda gap: gap[0]) # merge gaps that overlap merged_gaps = [] merged_gap = None for gap in sorted_gaps: if merged_gap is None: # start of gap merged_gap = gap elif gap[0] > merged_gap[2]: # next gap starts after current gap ends merged_gaps.append(merged_gap) merged_gap = gap elif gap[0] <= merged_gap[2]: # next gap starts at or before next data if gap[1] > merged_gap[1]: # next gap ends after current gap ends, extend current merged_gap[1] = gap[1] merged_gap[2] = gap[2] if merged_gap is not None: merged_gaps.append(merged_gap) return merged_gaps
def printFinalSolutionToFile(resFileName,outputFileName, key='u_dof',component=0,meshLevel=0,verbose=0): """ write solution component at last (or only time step) on a given mesh level as simple text file """ import os if not os.path.exists(resFileName): print("""resFileName= %s not found! """ % resFileName) return True import shelve results = shelve.open(resFileName) try: value = results['solutionData'][component][meshLevel][key] if hasattr(value,"tofile"): value.tofile(outputFileName,sep="\n",format="%12.5e") else: output = open(outputFileName,'w') for val in value: output.write("%12.5e \n" % val) output.close() return False except KeyError: print("""results['solutionData'][%s][%s][%s] not found """ % (component, meshLevel, key)) if verbose > 0: print("""results.keys() = %s """ % list(results.keys())) print("""results['solutionData'].keys() = %s """ % list(results['solutionData'].keys())) return True return True
def make_command_line_arguments(bam_file_name, bed_file_name, config_file_name, transcript_file_name, gui_output_file_name): """ Utility function to construct a list of command-line arguments in the form that is stored in sys.argv. This can then be fed to the main function and used to run CoverView. """ arguments_list = [ "-i", bam_file_name ] if bed_file_name is not None: arguments_list.extend([ "-b", bed_file_name ]) if config_file_name is not None: arguments_list.extend([ "-c", config_file_name ]) if transcript_file_name is not None: arguments_list.extend([ "-t", transcript_file_name ]) if gui_output_file_name is not None: arguments_list.extend([ "--gui_json_output_file", gui_output_file_name ]) return arguments_list
def scale3(a,c): """ 3 vector, vector ''a'' times scalar ``c``, `a * c`""" return [a[0]*c,a[1]*c,a[2]*c,1.0]
def time_formatter(milliseconds: int) -> str: """Time Formatter""" seconds, milliseconds = divmod(int(milliseconds), 1000) minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) days, hours = divmod(hours, 24) tmp = ( ((str(days) + " day(s), ") if days else "") + ((str(hours) + " hour(s), ") if hours else "") + ((str(minutes) + " minute(s), ") if minutes else "") + ((str(seconds) + " second(s), ") if seconds else "") + ((str(milliseconds) + " millisecond(s), ") if milliseconds else "") ) return tmp[:-2]
def tuple_bool(x): """Implementation of `tuple_bool`.""" return len(x) != 0
def get_routing_keys(*args, **kwargs): """Get a list of routing keys for a plugin in order from least specific to most specific. Will return all possible routing keys to get a message to a particular system. args is used to specify routing words. The correct order is System, Version, Instance, Clone ID For instance: ['test_system'], is_admin=True: ['admin', 'admin.test_system'] ['test_system', '1.0.0'], is_admin=True: ['admin', 'admin.test_system', 'admin.test_system.1-0-0'] ['test_system', '1.0.0', 'default'], is_admin=True: ['admin', 'admin.test_system', 'admin.test_system.1-0-0', 'admin.test_system.1-0-0.default'] ['test_system', '1.0.0', 'default', 'random_text'], is_admin=True: ['admin', 'admin.test_system', 'admin.test_system.1-0-0', 'admin.test_system.1-0-0.default', 'admin.test_system.1-0-0.default.random_text'] NOTE: Because RabbitMQ uses '.' as the word delimiter all '.' in routing words will be replaced with '-' :param args: List of routing key words to include in the routing keys :param kwargs: is_admin: Will prepend 'admin' to all generated keys if True :return: List of routing keys, ordered from general to specific """ routing_keys = ["admin"] if kwargs.get("is_admin", False) else [] for arg in (y for y in args if y is not None): # Make sure we don't have any extra word delimiters new_key = arg.replace(".", "-") routing_keys.append( routing_keys[-1] + "." + new_key if len(routing_keys) else new_key ) return routing_keys
def isStrictPubKeyEncoding(pubKey): """ isStrictPubKeyEncoding returns whether or not the passed public key adheres to the strict encoding requirements. """ if len(pubKey) == 33 and (pubKey[0] == 0x02 or pubKey[0] == 0x03): # Compressed return True if len(pubKey) == 65 and pubKey[0] == 0x04: # Uncompressed return True return False
def get_labels(data): """ Returns the list of labels for the given issue or PR data. """ return [edge["node"]["name"] for edge in data["node"]["labels"]["edges"]]
def get_user_attributes(cls, exclude_methods:bool=True)-> list: """ Get Attributes of a Class :param cls: Class Object :param exclude_methods: Exclude Methods :return: """ base_attrs = dir(type('dummy', (object,), {})) this_cls_attrs = dir(cls) res = [] for attr in this_cls_attrs: if base_attrs.count(attr) or (callable(getattr(cls,attr)) and exclude_methods): continue res += [attr] return res
def is_cyclic(x, y): """Are these four-digit numbers cyclic?""" # We can safely truncate to int as x and y come from the polygonal funcs. return str(int(x))[2:] == str(int(y))[:2]
def get_best_scaling(target_width, filter_width ): """ target_width: integer width for feature in face. For example width of bounding box for eyes. filter_width: integer width of filter """ # Scale width by 1.1 return 1.1 * (target_width / filter_width)
def email_parser(email): """[Email parsing function] Arguments: email {[str]} -- [email or list of email addresses] """ return [i.strip() for i in email.split(',')]
def byte_to_megabyte(byte): """Convert byte value to megabyte """ return (byte / 1048576)
def str2bool(string_, default='raise'): """ Convert a string to a bool. Parameters ---------- string_ : str default : {'raise', False} Default behaviour if none of the "true" strings is detected. Returns ------- boolean : bool Examples -------- >>> str2bool('True') True >>> str2bool('1') True >>> str2bool('0') False """ true = ['true', 't', '1', 'y', 'yes', 'enabled', 'enable', 'on'] false = ['false', 'f', '0', 'n', 'no', 'disabled', 'disable', 'off'] if string_.lower() in true: return True elif string_.lower() in false or (not default): return False else: raise ValueError('The value \'{}\' cannot be mapped to boolean.' .format(string_))
def __map_scene_labels_biwi_crowds__(_labels=None): """ map labels from scenes in biwi and crowds dataset to a list of labels that are expected to coincide with the labels present in the paths to the data :param _labels: actual provided labels; if nothing is provided, a default list order is used; if an actual list is provided, then the returned list will be in the same order as the provided one :return: a list of scene labels that are expected to coincide with the labels present in the paths to the data """ eth_label, hotel_label, univ_label, zara_label = 'biwi_eth', 'biwi_hotel', 'crowds_univ', 'crowds_zara' if not _labels: return [eth_label, hotel_label, univ_label, zara_label] scene_labels = [] for label in _labels: label_low = label.lower() if 'eth' in label_low: scene_labels.append(eth_label) elif 'hotel' in label_low: scene_labels.append(hotel_label) elif 'univ' in label_low: scene_labels.append(univ_label) elif 'zara' in label_low: scene_labels.append(zara_label) else: raise Exception(f'Received an invalid scene label for a biwi/crowds scene - {label}') return scene_labels
def queue_get_for(topic, host): """ Get a queue name for given topic and host """ return '%s.%s' %(topic, host) if host else topic
def aspcapStar_url(location_id, file_, url_header=None): """ aspcapStar url generator which in principle is able to generate file path Parameters ---------- location_id: int for 'apo1m', it's 1 for 'apo25m', it's like PLATE file_: string FILE url_header: string if None|'sas', it's set to be "https://data.sdss.org/sas/dr13/apogee/spectro/redux/%s/stars/l30e/l30e.2"%version Returns ------- url: string the url of apStar file Note ---- version: string currently it's 'r6' @20161031 """ if url_header is None or url_header is "sas": url_header = ("https://data.sdss.org/sas/dr13/apogee" "/spectro/redux/r6/stars/l30e/l30e.2") url_header = url_header.strip() file_ = file_.strip() try: url = "%s/%s/%s" % (url_header, location_id, file_) except: raise (ValueError("@Cham: This is not an option!")) return url
def pluralize(num, singular): """Return the proper plural version. Examples: >>> pluralize(2, "meme") '2 memes' >>> pluralize(1, "thing") '1 thing' >>> pluralize(1, "class") '1 class' >>> pluralize(0, "class") '0 classes' """ if num == 1: return f"{num} {singular}" plural_form = singular + ("es" if (singular[-1] == "s") else "s") return f"{num} {plural_form}"
def _get_cols(fields, schema): """ Get column metadata for Google Charts based on field list and schema. """ typemap = { 'STRING': 'string', 'INT64': 'number', 'INTEGER': 'number', 'FLOAT': 'number', 'FLOAT64': 'number', 'BOOL': 'boolean', 'BOOLEAN': 'boolean', 'DATE': 'date', 'TIME': 'timeofday', 'DATETIME': 'datetime', 'TIMESTAMP': 'timestamp' } cols = [] for col in fields: if schema: f = schema[col] t = 'string' if f.mode == 'REPEATED' else typemap.get(f.type, 'string') cols.append({'id': f.name, 'label': f.name, 'type': t}) else: # This will only happen if we had no rows to infer a schema from, so the type # is not really important, except that GCharts will choke if we pass such a schema # to a chart if it is string x string so we default to number. cols.append({'id': col, 'label': col, 'type': 'number'}) return cols
def check_port(port): """Verifies port value given is valid. Args: port (int): port number to verify Raises: ValueError: if port number provided is invalid Returns: int: port number """ if port < 0 or port > 65535: raise ValueError("Port {} out of range".format(port)) return port
def show_explore_network_btn(enrichment_results): """Shows explore network button after enrichment is done.""" return {'display': 'inline-block'} if enrichment_results else {'display': 'none'}
def _drop_image_percentage(angle): """ We have alot of images that have small steering angles. This function drops angles that are low with a higher percentage, so we have more images with higher angles to train on """ absangle = abs(angle) percentage = 0.5 - 0.05 * absangle return max(0.0, percentage)
def _count_number_of_children_recursively(event): """Recursively steps down the children of an event to calculate the number of children. Args: event (json): Json representing the current event. Returns: The number of children of the current event. """ if len(event['children']) == 0: return 0 children = 0 for child in event['children']: children += 1 + _count_number_of_children_recursively(child) return children
def extract_comments(comments): """ Utility method for parsing JIRA comments represented as JSON Args: comments: A variable containing JIRA comments in JSON representation. Returns: A string containing all of the JIRA comments tied to an issue """ size = len(comments) addtional_notes = "" for n in range(0, size): addtional_notes = addtional_notes + comments[n]['body'] + "\n" return addtional_notes
def normalise_dict(d): """ Recursively convert dict-like object (eg OrderedDict) into plain dict. Sorts list values. """ out = {} for k, v in dict(d).items(): if hasattr(v, "iteritems"): out[k] = normalise_dict(v) elif isinstance(v, list): out[k] = [] for item in sorted(v): if hasattr(item, "iteritems"): out[k].append(normalise_dict(item)) else: out[k].append(item) else: out[k] = v return out
def calculate_multiple_choice_task_metrics(pred_dict, labels_dict): """Calculate accuracy for multiple choice tasks. Args: pred_dict: mapping subinstance ids to prediction, where subinstance id is like "0_0", which stands for the 0-th option for the 0-th instance labels_dict: mapping subinstance ids to labels Return: accuracy: measuring the percentage of correct predicted instances """ assert len(pred_dict) == len(labels_dict) instance = dict() for sub_id in pred_dict: ins_id, choice_id = sub_id.split("_") prob = pred_dict[sub_id] if not ins_id in instance: instance[ins_id] = (choice_id, prob) elif prob > instance[ins_id][1]: # whenever the new option has a higher probability, replace the choice instance[ins_id] = (choice_id, prob) correct_count = 0 for sub_id in labels_dict: ins_id, choice_id = sub_id.split("_") label = int(labels_dict[sub_id]) if label == 1 and choice_id == instance[ins_id][0]: correct_count += 1 return correct_count / len(instance)
def get_urn_from_raw_update(raw_string): """ Return the URN of a raw group update Example: urn:li:fs_miniProfile:<id> Example: urn:li:fs_updateV2:(<urn>,GROUP_FEED,EMPTY,DEFAULT,false) """ return raw_string.split("(")[1].split(",")[0]
def fibonacci(v): """ Computes the Fibonacci sequence at point v. """ if v == 0: return 0 if v == 1: return 1 return fibonacci(v - 1) + fibonacci(v - 2)
def gray (px, *weights, cast = int): """Converts the pixel to grayscale using the given weights (or 1 by default) casting to int""" y = w = cast(0) for x in range(len(px)): z = cast(px[x]) try: y += z * weights[x] w += weights[x] except Exception: y += z w += 1 return y/w
def _format_unit_output(unit): """ Formats an unit to get outputed by the system (Format: "<name> (<function_name to enter>)") :param unit: :return: """ return unit["name"] + " (" + unit["_internal_function_"] + ")"
def test_rast(h, f): """Sun raster file""" if h.startswith(b'\x59\xA6\x6A\x95'): return 'rast'
def _buildstr(D, transpose=False, replace=None): """Construct a string suitable for a spreadsheet. D: scalar, 1d or 2d sequence For example a list or a list of lists. transpose: Bool Transpose the data if True. replace: tuple or None If tuple, it is two strings to pass to the replace method. ('toreplace', 'replaceby') """ try: D[0] except (TypeError, IndexError): D = [D] try: D[0][0] except (TypeError, IndexError): D = [D] if transpose: D = zip(*D) if not replace: rows = ['\t'.join([str(v) for v in row]) for row in D] else: rows = ['\t'.join([str(v).replace(*replace) for v in row]) for row in D] S = '\n'.join(rows) return S
def enumerate_with_prefix(a_list, prefix='pre_'): """ given a list, return a list enumerated with prefix. """ num_digits = len(str(len(a_list))) # eg 5 -> 1, 15 -> 2, 150 -> 3 etc. enum_list = [prefix + str(idx).zfill(num_digits) for idx, el in enumerate(a_list)] return enum_list
def remove_empty_line(text): """Remove empty line within a multiline string Args: text (str): Mutliline string to process Returns: str: String with empty lines removed """ res = list() for line in text.splitlines(): if line.strip(): res.append(line) return res
def FormatThousands(value): """Format a numerical value, inserting commas as thousands separators. Args: value: An integer, float, or string representation thereof. If the argument is a float, it is converted to a string using '%.2f'. Returns: A string with groups of 3 digits before the decimal point (if any) separated by commas. NOTE: We don't deal with whitespace, and we don't insert commas into long strings of digits after the decimal point. """ if isinstance(value, float): value = '%.2f' % value else: value = str(value) if '.' in value: head, tail = value.split('.', 1) tail = '.' + tail elif 'e' in value: head, tail = value.split('e', 1) tail = 'e' + tail else: head = value tail = '' sign = '' if head.startswith('-'): sign = '-' head = head[1:] while len(head) > 3: tail = ',' + head[-3:] + tail head = head[:-3] return sign + head + tail
def es_par(numero): """ (num) -> boolean Valida si un numero es par >>> es_par(10) True >>> es_par(20) True >>> es_par(189) False :param numero: el numero a evaluar :return: True si el numero es par, False de lo contrario """ return numero % 2 == 0
def match(s: str, substring: str) -> bool: """Return True if substring in str""" return s.find(substring) != -1
def weight_pp(perc): """Compute the weighted percentage. The gravity is near 0%.""" if perc > 75.0: return perc elif perc > 50.0: return perc * 0.75 else: return perc * 0.5
def IntToRgb(RGBint: int): # -> typing.Tuple[int,int,int]: """Converts a integer color value to a RGB tuple :param RGBint: :class:`int` The integer color value. :returns: :class:`tuple[int,int,int]` RGB tuple """ blue = RGBint & 255 green = (RGBint >> 8) & 255 red = (RGBint >> 16) & 255 return red, green, blue
def steering_constraint(steering_angle, steering_velocity, s_min, s_max, sv_min, sv_max): """ Steering constraints, adjusts the steering velocity based on constraints Args: steering_angle (float): current steering_angle of the vehicle steering_velocity (float): unconstraint desired steering_velocity s_min (float): minimum steering angle s_max (float): maximum steering angle sv_min (float): minimum steering velocity sv_max (float): maximum steering velocity Returns: steering_velocity (float): adjusted steering velocity """ # constraint steering velocity if (steering_angle <= s_min and steering_velocity <= 0) or (steering_angle >= s_max and steering_velocity >= 0): steering_velocity = 0. elif steering_velocity <= sv_min: steering_velocity = sv_min elif steering_velocity >= sv_max: steering_velocity = sv_max return steering_velocity
def stateIsChange(stateOld,stateNew): """ stateIsChange stateIsChange - check is state change or not """ if stateOld == None or stateNew == None: return False if stateOld != stateNew: stateOld = stateNew print('value is changed {}'.format(stateOld)) return True return False
def get_if_exist(data, keys): """ Recursively get a value from a nested dictionary Parameters ---------- data : dict The (nested) dictionary keys : list The list of keys to fetch Returns ------- any or None The value at data[keys[0]][keys[1]] etc. or None if a key is not found. """ if keys[0] in data: if len(keys) == 1: return data[keys[0]] else: return get_if_exist(data[keys[0]], keys[1:]) else: return None
def get_pbf_url(region, subregion): """Returns the URL to the PBF for the region / subregion. Parameters ---------------------- region : str subregion : str Returns ---------------------- pbf_url : str """ base_url = 'https://download.geofabrik.de' if subregion is None: pbf_url = f'{base_url}/{region}-latest.osm.pbf' else: pbf_url = f'{base_url}/{region}/{subregion}-latest.osm.pbf' return pbf_url
def points_from_bbox(minx, miny, maxx, maxy): """Construct polygon coordinates in page representation from a numeric list representing a bounding box.""" return "%i,%i %i,%i %i,%i %i,%i" % ( minx, miny, maxx, miny, maxx, maxy, minx, maxy)
def as_list(x, length=1): """Return x if it is a list, else return x wrapped in a list.""" if not isinstance(x, list): x = length*[x] return x
def setup_java_class(content_to_add): """ returns an example java class with the given content_to_add contained within a method. """ template = """ public class Lambda { public static void main(String args[]) { %s } } """ return template % content_to_add
def dash_to_slash(datetime_str: str) -> str: """Convert d-m-y to y-m-d where original data recorded day in year format """ date, time = datetime_str.split() date_arr = date.split('-') if len(date_arr[0]) > 2: date_arr[0] = date_arr[0][-2:] date_str = '/'.join(date_arr) ret_string = date_str + ' ' + time return ret_string
def unquote_header_value(value, is_filename=False): """Unquotes a header value. Reversal of :func:`quote_header_value`. This does not use the real un-quoting but what browsers are actually using for quoting. :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] # if this is a filename and the starting characters look like # a UNC path, then just return the value without quotes. Using the # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. if not is_filename or value[:2] != '\\\\': return value.replace('\\\\', '\\').replace('\\"', '"') return value
def bin_dec(bin): """Conversion binary -> decimal. Needed to calculate decimal variable value from binary coded genome.""" dec=0.0 bin.reverse() for i in range(0, len(bin)): dec+=(bin[i]*(2**i)) return dec
def bfs(graph, start, goal): """ Breath first search on a given graph >>> bfs({'A': set(['B']), ... 'B': set(['C']), ... 'C': set()}, 'A', 'C') ['A', 'B', 'C'] """ queue = [(start, [start])] while queue: (vertex, path) = queue.pop(0) for next_node in graph[vertex] - set(path): if next_node == goal: return path + [next_node] else: queue.append((next_node, path + [next_node])) return []
def align_on_left(txt: str) -> str: """ Remove all leading/trailing spaces for each line. """ txt_out = [] for curr_line in txt.split("\n"): curr_line = curr_line.rstrip(" ").lstrip(" ") txt_out.append(curr_line) res = "\n".join(txt_out) return res
def _get_jsonld_property(jsonld, property, default=None): """Return property value from expanded JSON-LD data.""" value = jsonld.get(property) if not value: return default if isinstance(value, list) and len(value) == 1 and isinstance(value[0], dict) and "@value" in value[0]: value = value[0]["@value"] return value
def get_min_max_words(input): """ returns the words with the least and maximum length. Use min and max and pass another function as argument """ return (min(input,key=len),max(input,key=len))# replace this calls to min and max #(sorted(input,key=len)[0],sorted(input,key=len)[len(input)-1])---------------we can use this too
def _in_ranges(x, bins): """Function for pandas.apply() that assigs values into bins """ return [((x >= lower) & (x <= upper)) for lower, upper in bins]
def get_fuzzer_setting(fuzzer_settings, fuzzer_setting_name): """Read the fuzzer setting from the list of dict.""" for fuzzer_setting in fuzzer_settings: if "Name" in fuzzer_setting and fuzzer_setting["Name"] == fuzzer_setting_name: return fuzzer_setting return None
def mapLists(first, second): """ Make a dictionary from two lists with elements of the first as the keys and second as values. If there are more elements in the first list, they are assigned None values and if there are more in the second list, they're dropped. """ index = 0 dict = {} # Read through every index of the @first list and build the map. while index < len(first): f = first[index] s = second[index] if index < len(second) else None dict[f] = s index += 1 return dict
def non_increasing(py_list): """ check if elements of a list are increasing monotonically. """ return all(x >= y for x, y in zip(py_list, py_list[1:]))
def expandCigar(cigar): """ Turns the abbreviated cigar into the full array 0 = M 1 = I 2 = D """ ret = [] for t,s in cigar: ret.extend([t]*s) return ret
def underscore_to_camel(string): """ Converts an undescored_name @string to UnderscoredName @string: #str object .. from vital.tools import underscore_to_camel underscore_to_camel("ted_koppel") # -> TedKoppel .. """ return "".join(s.capitalize() for s in string.split("_"))
def bisect(slist, value): """ Use the bisection method to find the index of a word in a list. Precondition: list is sorted """ if not all(slist[i] <= slist[i+1] for i in range(len(slist)-1)): print('Please supply a sorted list.') return None start = 0 end = len(slist) middle = int(end / 2) while slist[middle] != value: if slist[middle] > value: end = middle elif slist[middle] < value: start = middle middle = start + int((end - start) / 2) return middle
def homogenize(xyz, w=1.0): """Homogenise a list of vectors. Parameters ---------- xyz : sequence[[float, float, float] | :class:`~compas.geometry.Point`] | sequence[[float, float, float] | :class:`~compas.geometry.Vector`] A list of points or vectors. w : float, optional Homogenisation parameter. Use ``1.0`` for points, and ``0.0`` for vectors. Returns ------- list[[float, float, float, `w`]] Homogenised data. Notes ----- Vectors described by XYZ components are homogenised by appending a homogenisation parameter to the components, and by dividing each component by that parameter. Homogenisatioon of vectors is often used in relation to transformations. Examples -------- >>> vectors = [[1.0, 0.0, 0.0]] >>> homogenize(vectors) [[1.0, 0.0, 0.0, 1.0]] """ return [[x * w, y * w, z * w, w] if w else [x, y, z, 0.0] for x, y, z in xyz]
def determine_issue_types(warnings): """ Get a list of issue types. :rtype: list """ issue_types = warnings["Report"]["IssueTypes"]["IssueType"] if not isinstance(issue_types, list): return [issue_types] return issue_types
def factorial_3(n, acc=1): """ Replace all recursive tail calls f(x=x1, y=y1, ...) with (x, y, ...) = (x1, y1, ...); continue """ while True: if n < 2: return 1 * acc (n, acc) = (n - 1, acc * n) continue break
def _CalcDelta(from_ts, to_ts): """ Calculates the delta between two timestamps. """ return to_ts[0] - from_ts[0] + (to_ts[1] - from_ts[1]) / 1000000.0
def str_to_list(string): """ Parameters ---------- string String representation of a list Returns A List ------- """ if "[" and "]" in string: string = string[1:-1] spt = string.split(",") lis = [] for f in spt: lis.append(float(f)) return lis return []
def getsize(datadescriptor): """Get the size of a data descriptor tuple.""" if datadescriptor[0] == 'reg': size = datadescriptor[1][2] elif datadescriptor[0] == 'mem': size = datadescriptor[1][1] elif datadescriptor[0] == 'heap': size = datadescriptor[1][2] elif datadescriptor[0] == 'perp': size = datadescriptor[1][2] elif datadescriptor[0] == 'pmem': size = datadescriptor[1][2] else: return (15, "Not a supported destination type.") return (0, size)