content
stringlengths
42
6.51k
def is_block_there(x:int, y:int, all_blocks) -> bool: """ Checks if the block is present there or not in landed blocks """ if len(all_blocks) == 0: return False for block in all_blocks: if block['x'] == x and block['y'] == 'y': return True return False
def status2css(status): """Converts a status into css style""" return {"0": "fa fa-circle-o ", "1": "fa fa-dot-circle-o", "2": "fa fa-circle"}[ status ]
def _vasp5_lines_is_selective_dynamics(poscar_lines): """Returns true if selective dynamics has been turned on for the poscar files Parameters ---------- poscar_lines : list of str Returns ------- bool """ tag=poscar_lines[7].split()[0][0] if tag in "sS": return True else: return False
def check_if_numbers_are_negatives(quantity, price): """ Helper function to check if any negative numbers were supplied in the JSON object Returns True if any number is less than 0, else returns False """ if quantity < 0 or price < 0 : return True return False
def compute_data_type_dist_score(stats, columns, col_name): """ # Creates a quality socre based on the data type distribution, this score is based on the difference between the nr of values with the "main" data type and all the nr of values with all other data types :param stats: The stats extracted up until this point for all columns :param columns: All the columns :param col_name: The name of the column we should compute the new stats for :return: Dictioanry containing: data_type_distribution_score: A quality score based on the nr of empty cells, ranges from 1 to 0, where 1 is lowest quality and 0 is highest quality. """ vals = stats[col_name]['data_type_dist'].values() principal = max(vals) total = len(columns[col_name]) data_type_dist_score = (total - principal)/total return {'data_type_distribution_score': round(10 * (1 - data_type_dist_score)) ,'data_type_distribution_score_description':""" This score indicates the amount of data that are not of the same data type as the most commonly detected data type in this column. Note, the most commonly occuring data type is not necessarily the type mindsdb will use to label the column when learning or predicting. """}
def func_xy_pq(x, y, *, p="p", q="q"): """func. Parameters ---------- x, y: float p, q: str, optional Returns ------- x, y: float p, q: str """ return x, y, None, None, None, p, q, None
def title(word, capitalize=False): """Convert a string to a title format, where the words are capitalized. Args: word (str): The string to format. Returns: word (str): The formatted word. """ def _capitalize(w): return '{0}{1}'.format(w[0].upper(), w[1:]) if word is None: return '' words = word.split(' ') for i, word in enumerate(words): if i == 0 or capitalize: words[i] = _capitalize(word) return ' '.join(words)
def iced_vim_repr(x): """ >>> iced_vim_repr('foo') '"foo"' >>> iced_vim_repr('"foo"') '"\\\\"foo\\\\""' >>> iced_vim_repr('foo\\n') '"foo\\\\n"' >>> iced_vim_repr('foo\d') '"foo\\\\\\\\d"' >>> iced_vim_repr('foo\\d') '"foo\\\\\\\\d"' >>> iced_vim_repr(123) '123' >>> iced_vim_repr(['foo', 123]) '["foo",123]' >>> iced_vim_repr({'foo': 123}) '{"foo":123}' """ t = type(x) if t is str: return '"' + x.replace('\\', '\\\\').replace('\n', '\\n').replace('"', '\\"') + '"' elif t is int: return str(x) elif t is list: ret = [iced_vim_repr(e) for e in x] return '[' + ','.join(ret) + ']' elif t is dict: ret = [iced_vim_repr(k) + ':' + iced_vim_repr(v) for k,v in x.items()] return '{' + ','.join(ret) + '}' raise Exception
def correct_sentence(text: str) -> str: """ returns a corrected sentence which starts with a capital letter and ends with a dot. """ text = text[0].capitalize() + text[1:] if text[-1] != '.': text += '.' return text
def check_power_of_2(value: int) -> bool: """ Returns `True` if `value` is power of 2 else `False`. See code explanation in [StackOverflow ](https://stackoverflow.com/questions/57025836). """ return (value != 0) and (not value & (value - 1))
def convert_24hour(time): """ Takes 12 hour time as a string and converts it to 24 hour time. """ time_raw, meridiem = time.split() if len(time_raw.split(':')) < 2: hour = time_raw minute = '00' else: hour, minute = time_raw.split(':') if meridiem.lower() in ['am','noon']: time_formatted = hour + ':' + minute return time_formatted elif meridiem.lower() == 'pm': time_formatted = str(int(time_raw)+ 12) + ':' + minute return time_formatted
def create_audit_with_control( program, control_mapped_to_program, audit ): """Create Program and Control, map Control to Program, create Audit under Program via REST API and return dictionary of executed fixtures. """ return {"program": program, "control": control_mapped_to_program, "audit": audit}
def final_amt(p, r, n, t): """ Apply the compound interest formula to p to produce the final amount. """ a = p * (1 + r/n) ** (n*t) return a # This is new, and makes the function fruitful.
def _docstring_summary_default(docstring: str) -> str: """ This function was taken from codesearchnet. Get the first lines of the documentation comment up to the empty lines. """ if '\n\n' in docstring: return docstring.split('\n\n')[0] elif '@' in docstring: return docstring[:docstring.find('@')] # This usually is the start of a JavaDoc-style @param comment. return docstring
def type_contains(typ, needle): """(haystack, needle) -> bool NOTE: this is unlikely to work if the type you're searching for is anything but one of the simple CWL types (int, File, ...). To maximise the chances of correctly detecting the type, use the fully-expanded form of the type, i.e. `{"type": "array", "items": "int"}` rather than `"int[]"`. """ if typ == needle: return True if isinstance(typ, str): if typ.endswith("?"): return needle == "null" or type_contains(typ[:-1], needle) if typ.endswith("[]"): return type_contains({"type": "array", "items": typ[:-2]}, needle) return isinstance(needle, str) and needle == typ assert isinstance(typ, dict) if typ["type"] == "array": assert "items" in typ if isinstance(typ["items"], str): return type_contains(typ["items"], needle) return any(type_contains(item, needle) for item in typ["items"]) if typ["type"] == "record": return any( type_contains(field["type"], needle) for field in ( typ["fields"] if isinstance(typ["fields"], list) else typ["fields"].values() ) ) if typ["type"] == "enum": return needle.get("type") == "enum" and set(needle["symbols"]) == set(typ["symbols"]) raise TypeError(f"Invalid (or unknown) type: {typ!r}")
def get_grid(rows, columns): """Get grid with number rows and columns.""" return [[-1 for _ in range(columns)] for _ in range(rows)]
def key_split(s): """ >>> key_split('x-1') 'x' >>> key_split('x-1-2-3') 'x' >>> key_split(('x-2', 1)) 'x' >>> key_split(None) 'Other' """ if isinstance(s, tuple): return key_split(s[0]) try: return s.split('-', 1)[0] except: return 'Other'
def freq_to_chan(frequency,bandwidth,n_chans): """ Returns the channel number where a given frequency is to be found. @param frequency : frequency of channel in sane units as bandwidth. @type frequency : float @param bandwidth : upper limit of spectrometer passband @type bandwidth : float @param n_chans : number of channels in the spectrometer @type n_chans : int @return: channel number (int) """ if frequency < 0: frequency = bandwidth + frequency if frequency > bandwidth: raise RuntimeError("that frequency is too high.") return round(float(frequency)/bandwidth*n_chans) % n_chans
def update(dct, dct_merge): """Recursively merge dicts.""" for key, value in dct_merge.items(): if key in dct and isinstance(dct[key], dict): dct[key] = update(dct[key], value) else: dct[key] = value return dct
def mag(f, u=None, v=None): """returns the magnification value (without the sign) from focal-length and either one of the value of object or image distance Parameters ---------- f : float focal length u : float, optional object distance v : float, optional image distance Returns ------- m : float magnification (without the sign) """ if v is not None: return (v - f)/f else: return f/(u - f)
def insertion_sort(items: list) -> list: """ Examples: >>> items = [random.randrange(0, 100) for _ in range(100)] >>> assert(insertion_sort(items.copy()) == sorted(items)) """ for unsorted_start_idx in range(1, len(items)): insertion_val = items[unsorted_start_idx] ## LINEAR SEARCH for insertion idx ## insertion_idx = unsorted_start_idx while insertion_idx > 0 and items[insertion_idx - 1] > insertion_val: insertion_idx -= 1 ## BISECT LEFT ## # Shift elements greater than `insertion_val` one position to the right for src_idx in reversed(range(insertion_idx, unsorted_start_idx)): items[src_idx + 1] = items[src_idx] items[insertion_idx] = insertion_val return items
def python_trailing(n): """Count the number of trailing zero bits in abs(n).""" if not n: return 0 t = 0 while not n & 1: n >>= 1 t += 1 return t
def is_in(obj, l): """ Checks whether an object is one of the item in the list. This is different from ``in`` because ``in`` uses __cmp__ when present. Here we change based on the object itself """ for item in l: if item is obj: return True return False
def bin_extend_in_U2(binint, amount): """ Extends number in U2 :param binint: Number to extend in U2 :type binint: string :param amount: number of characters to be added :type amount: int :return: Number in U2 :rtype: string """ for _ in range(amount): binint = binint[0] + binint return binint
def enumsort(things): """ sorts things by value if all same type; otherwise by name """ if not things: return things sort_type = type(things[0]) if not issubclass(sort_type, tuple): # direct sort or type error if not all((type(v) is sort_type) for v in things[1:]): raise TypeError('cannot sort items of different types') return sorted(things) else: # expecting list of (name, value) tuples sort_type = type(things[0][1]) try: if all((type(v[1]) is sort_type) for v in things[1:]): return sorted(things, key=lambda i: i[1]) else: raise TypeError('try name sort instead') except TypeError: return sorted(things, key=lambda i: i[0])
def decodeParameterValue(value): """ RFC6868 parameter decoding. """ # Test for encoded characters first as decoding is expensive and it is better to # avoid doing it if it is not required (which is the common case) if value is not None and "^" in value: decoded = [] last = '' for c in value: if last == '^': if c == 'n': decoded.append('\n') elif c == '\'': decoded.append('"') elif c == '^': decoded.append('^') c = '' else: decoded.append('^') decoded.append(c) elif c != '^': decoded.append(c) last = c if last == '^': decoded.append('^') return "".join(decoded) else: return value
def weight(prefix): """ Return vm weight (default to 2) """ return { 'foo' : 8, 'foobar' : 8, 'bar' : 3, }.get(prefix, 2)
def solution(n: int = 4000000) -> int: """ Returns the sum of all even fibonacci sequence elements that are lower or equal to n. >>> solution(10) 10 >>> solution(15) 10 >>> solution(2) 2 >>> solution(1) 0 >>> solution(34) 44 """ even_fibs = [] a, b = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(b) a, b = b, a + b return sum(even_fibs)
def normalize_url(url): """Make sure the url is in correct form.""" result = url if not result.endswith("/"): result = result + "/" return result
def rmax(a, b): """ Max function for real numbers Parameters ---------- a: scalar b: scalar Returns ------- scalar """ # seems to be much faster than the built-in return a if a > b else b
def post_info_as_dict(post_info): """ converts list into dictionary (key,value,link) """ key, value, link = post_info return {key: value, "link": link}
def sliced_data(data: list, start=None, end=None) -> list: """Dataset safe slicing method Parameters ---------- data : list Data stored in list of dicts start : [type], optional Start index, by default None end : [type], optional End index, by default None Returns ------- list Sliced data stored in list of dicts """ if start: data = data[start:] if end: data = data[:end] return data
def beautifyName(person): """remove domain prefix like mydomain\\user and lowercase everything""" person = person.lower() if '\\' in person: person = person.split('\\')[1] if '@' in person: person = person.split('@')[0] return person
def CutOnCondition(Kz): """Determine if mode is cuton based on wave number Must be real to propagate (pos. imag.: decay exponential, neg.: expand) Must be negative for upstream propagation Kz --> axial wavenumber for current mode """ cuton = 'No' #Test if no imaginary part (propagation) if Kz.imag == 0: #Test if upstream propagation if Kz.real < 0: cuton = 'Yes' return cuton
def is_cubefree_string(s): """check if s is a cubefree string, i.e. there is no substring of the form ttt""" l = len(s) for i in range(l - 2): for j in range(1, (l - i) // 3 + 1): if s[i : i + 2 * j] == s[i + j : i + 3 * j]: return False return True
def _freeze(thing): """ Freezes something to a hashable item. """ if isinstance(thing, dict): return frozenset([(_freeze(k), _freeze(v)) for k, v in thing.items()]) elif isinstance(thing, list): return tuple([_freeze(i) for i in thing]) return thing
def is_target_in_tags(tag, topic_list): """Check if tags contains target""" return True if set(tag).intersection(topic_list) else False
def norme_inv_angle(angle): """Norme inverse de l'angle. On attend un angle entre -180 et 180. On retourne un angle entre 0 et 360. """ if angle < 0: angle += 360 return angle
def clean_hotel_room_name(string): """ """ if string is not None: r = string.strip() else: r = "-" return r
def mag(initial, current): """ Calculates the magnification of a specified value **Parameters** intial: *float* initial value (magnificiation of 1) current: *float* current value **Returns** magnification: *float* the magnification of the current value """ return float(initial) / float(current)
def contains(begin, end, node): """Check node is contained between begin and end in a ring.""" if begin < node <= end: return True elif (begin > end) and (node > begin or node < end): return True return False
def training_progress(progress): """ Translates a string in format {x}, {x}E or {x}B to a Blocks extension calling frequency :param progress: :return: """ progress = progress.lower() if progress.endswith('e'): return {"every_n_epochs": int(progress[:-1])} elif progress.endswith('b'): return {"every_n_batches": int(progress[:-1])} else: return {"every_n_epochs": int(progress)}
def seq_mult_scalar(a, s): """Takes a list of numbers a and a scalar s. For the input a=[a0, a1, a2,.., an] the function returns [s * a0, s * a1, s * a2, ..., s * an]""" return [s * i for i in a]
def read(buf, n): """ :type buf: Destination buffer (List[str]) :type n: Number of characters to read (int) :rtype: The number of actual characters read (int) """ # fake api def read4(buf): pass idx = 0 while n >= 4: read4_buf = [''] * 4 read4(read4_buf) for i in range(4): buf[idx] = read4_buf[i] idx += 1 n -= 4 if n > 0: read4_buf = [''] * 4 read4(read4_buf) for i in range(n): buf[idx] = read4_buf[i] idx += 1 return idx
def try_fix_num(n): """ Return ``n`` as an integer if it is numeric, otherwise return the input """ if not n.isdigit(): return n if n.startswith("0"): n = n.lstrip("0") if not n: n = "0" return int(n)
def check_intersection(set_a, set_b): """ check the intersection between 2 sets set_a: set set_b: set return: boolean """ common_set = set_a.intersection(set_b) if (len(common_set) == 0): return False return True
def how_many_days(month_number): """Returns the number of days in a month. WARNING: This function doesn't account for leap years! """ days_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] result = days_in_month[month_number - 1] return result
def get_logging_cfg_file(log_level): """ Retrieves the logging configuration file. :param log_level: Log level [ 'debug' | 'info' | 'off' ] :return: Logging configuration file """ logging_cfg_file = 'logging.json' cfg_files = { 'debug': 'logging_debug.json', 'info': 'logging_off.json', 'off': 'logging_off.json' } if log_level in cfg_files: logging_cfg_file = cfg_files[log_level] return logging_cfg_file
def _indent(string, times=1): """ A dumb version of :func:`textwrap.indent` from Python 3.3. """ return "\n".join(" " * (4 * times) + line for line in string.splitlines())
def _isStr(s): """ Internal method to determine if an object is a string """ return isinstance(s, type('')) or isinstance(s, type(u''))
def MapNestedDict(data, func): """Map a nested dictionary with specified func @example: >>> a = {"a": {"b": 1 , "c": 2 }, "d": 3 } >>> MapNestedDict(a, str) == {'a': {'b':'1', 'c':'2'}, 'd':'3'} True """ if not isinstance(data, dict): return func(data) return {k:MapNestedDict(e, func) for k, e in data.items()}
def price_format_conversion(digital_value, rounding_threshold): """ example: >>> price_format_conversion(0.000000082725, 9) '0.000000082' """ if rounding_threshold <= 0: return str(int(digital_value)) string_value = str(float(digital_value)) if 'e' in string_value: string_value_1 = string_value.split('e-') string_value_2 = '' for i in range(int(string_value_1[1]) - 1): if i == '.': continue string_value_2 = string_value_2 + '0' string_value_3 = '' for i in string_value_1[0]: if i == '.': continue string_value_3 = string_value_3 + i string_value = '0.' + string_value_2 + string_value_3 return string_value[:rounding_threshold+2] else: string_value = string_value.split('.') return string_value[0] + '.' + string_value[1][:rounding_threshold]
def kostraFagdataFilter( mittfilter={} ): """ Returnerer mal for filter for NVDB fagdata til KOSTRA-rapport """ if not 'vegsystemreferanse' in mittfilter: mittfilter['vegsystemreferanse'] = 'Fv' if not 'tidspunkt' in mittfilter: mittfilter['tidspunkt'] = '2021-12-15' return mittfilter
def split_inp(regex): """splits the regex""" list_of_op = ["*", "?", "+"] close_brac = 0 front, end, opr = None, None, None flag = False # if starting part is an square bracket if(regex[0] == "["): close_brac = regex.find("]") front = regex[:close_brac+1] end = regex[close_brac+1:] opr = "[" flag = True # if starting part is an small bracket elif(regex[0] == "("): close_brac = regex.find(")") front = regex[:close_brac+1] end = regex[close_brac+1:] flag = True opr = "(" # If it is any other opearator else: front = regex[0] end = regex[1:] # If the next charcter is any symbol if(len(regex) > 1 and (close_brac+1) < len(regex) and regex[close_brac+1] in list_of_op): front = regex[:close_brac+1] end = regex[close_brac+2:] opr = regex[close_brac+1] # print("front = ") # print(front) # print("opr = ") # print(opr) # print("end = ") # print(end) return [front, opr, end]
def _device_matcher(device): """Check if device is monsoon. Method copied from official Monsoon API. """ # pylint: disable=bare-except try: return device.idVendor == 0x2AB9 and device.idProduct == 0x0001 except: return False
def split_ids(ids, existing=None): """Split the child or parent ID fields from the MAG FoS ORM. Args: ids (str): Comma-delimited string of ids existing (set): Only return IDs found in this set. Returns: ids (set): A set of ids split out of the input. """ if ids is None: return set() found = set(int(x) for x in ids.split(',')) if existing is None: existing = found missing = found - existing return found - missing
def translate_name(name): """Return stream name from Mateu+2018""" name_dict = {'atlas': 'ATLAS', 'acheron': 'Ach', 'cocytos': 'Coc', 'gd1': 'GD1', 'kwando': 'Kwa', 'lethe': 'Let', 'molonglo': 'Mol', 'murrumbidgee': 'Mur', 'ngc5466': 'NGC5466', 'ophiuchus': 'Oph', 'orinoco': 'Ori', 'ps1a': 'PS1A', 'ps1b': 'PS1B', 'ps1c': 'PS1C', 'ps1d': 'PS1D', 'ps1e': 'PS1E', 'pal5': 'Pal5', 'sangarius': 'San', 'scamander': 'Sca', 'styx': 'Sty', 'tri': 'TriPis'} return name_dict[name]
def check_dict_of_str_dict(input_dict, contains_type=False): """ Check input dict has typing Dict[str, dict] Args: input_dict (dict): Dict to check. contains_type (bool): Check if sub dict contains key 'type'. Returns: Bool. """ if not isinstance(input_dict, dict): return False for key, value in input_dict.items(): if not isinstance(key, str): return False if not isinstance(value, dict): return False if contains_type and 'type' not in value: return False return True
def _call_callables(d): """ Helper to realize lazy scrubbers, like Faker, or global field-type scrubbers """ return {k.name: (callable(v) and v(k) or v) for k, v in d.items()}
def get_max_value_key(dic): """Fast method to get key for maximum value in dict.""" v = list(dic.values()) k = list(dic.keys()) return k[v.index(max(v))]
def binary_search(arr, item): """ Binary Search Complexity: O(log(n)) Only works on sorted arrays """ first = 0 last = len(arr)-1 found = False # Note that first and last will get closer! while first <= last and not found: # Divide problem set mid = (first+last)//2 if arr[mid] == item: found = True else: # Decide which half to search next if item < arr[mid]: last = mid - 1 else: first = mid + 1 return found
def _parse_path(error_path): """Parses path to error into javascript json index reference notation""" final_string = '' for field in error_path: if isinstance(field, int): final_string += f'[{field}]' else: final_string += f'.{field}' if final_string.startswith("."): final_string = final_string[1:] return final_string
def transform1(x): """ Given an input value, transforms data with transformation logic 1 Returns a new list of transformed values """ x = int(x) # Transform into 'fizz' for multiples of 3 if x % 15 == 0: return 'fizzbuzz' # Transform into 'buzz' for multiples of 5 elif x % 5 == 0: return 'buzz' # Transform into 'fizzbuzz' for multiples for 15 elif x % 3 == 0: return 'fizz' # If no criteria is met, then return original value else: return x
def pattern_from_multiline(multiline, pattern): """Return only lines that contain the pattern Parameters ---------- multiline - multiline str pattern - str Returns ------- multiline str containing pattern """ return "\n".join([line for line in multiline.splitlines() if pattern in line])
def get_dbot_level(threat_level_id: str) -> int: """ MISP to DBOT: 4 = 0 (UNDEFINED to UNKNOWN) 3 = 2 (LOW to SUSPICIOUS) 1 | 2 = 3 (MED/HIGH to MALICIOUS) Args: threat_level_id (str): Returns: int: DBOT score """ if threat_level_id in ('1', '2'): return 3 if threat_level_id == '3': return 2 if threat_level_id == '4': return 0 return 0
def get_genes_xml(gene_data, gene_ids, dataseries, offset, metric): """Returns XML nodes for all predicted gene from one taxon. Args: gene_data (defaultdict[str,defaultdict[str,dict[str,float]]]): outer key is gene identifier, middle key is function identifier, inner key is in [metric, 'count', 'identity', 'coverage', 'Length', 'Completeness'], value is float. gene_ids (list of str): gene identifiers dataseries (list of str): either sample identifiers or function identifiers, depending on profile type (functional or taxonomic) offset (int): number of starting tabs metric (str): scoring metric Returns: ret_val (str): XML node attribute_values (defaultdict[str,dict[str,float]]): outer key is one of dataseries members, inner key is in [metric, 'count', 'identity' 'hit_count'], value is float. """ # gene data: gene_data[gene_id][function][parameter] = parameter_value ret_val = '' for gene_id in gene_ids: ret_val += '\t'*offset + '<node name="' + gene_id + '">\n' offset += 1 if metric != 'readcount': ret_val += '\t'*offset + '<readcount>' for datapoint in dataseries: if datapoint in gene_data[gene_id]: ret_val += '<val>' + gene_data[gene_id][datapoint]['count'] + '</val>' else: ret_val += '<val>0</val>' ret_val += '</readcount>\n' ret_val += '\t'*offset + '<' + metric + '>' for datapoint in dataseries: if datapoint in gene_data[gene_id]: ret_val += '<val>' + gene_data[gene_id][datapoint][metric] + '</val>' else: ret_val += '<val>0</val>' ret_val += '</' + metric + '>\n' ret_val += '\t'*offset + '<coverage>' for datapoint in dataseries: if datapoint in gene_data[gene_id]: ret_val += '<val>' + gene_data[gene_id][datapoint]['coverage'] + '</val>' else: ret_val += '<val>0</val>' ret_val += '</coverage>\n' ret_val += '\t'*offset + '<identity>' for datapoint in dataseries: if datapoint in gene_data[gene_id]: ret_val += '<val>' + gene_data[gene_id][datapoint]['identity'] + '</val>' else: ret_val += '<val>0</val>' ret_val += '</identity>\n' ret_val += '\t'*offset + '<Length>' for datapoint in dataseries: if datapoint in gene_data[gene_id]: ret_val += '<val>' + gene_data[gene_id][datapoint]['Length'] + '</val>' else: ret_val += '<val>0</val>' ret_val += '</Length>\n' ret_val += '\t'*offset + '<Completeness>' for datapoint in dataseries: if datapoint in gene_data[gene_id]: ret_val += '<val>' + gene_data[gene_id][datapoint]['Completeness'] + '</val>' else: ret_val += '<val>0</val>' ret_val += '</Completeness>\n' ret_val += '\t'*offset + '<best_hit>' for datapoint in dataseries: if datapoint in gene_data[gene_id] and 'Best hit' in gene_data[gene_id][datapoint]: ret_val += '<val href="' + gene_data[gene_id][datapoint]['Best hit'] + '">'\ + gene_data[gene_id][datapoint]['Best hit'] + '</val>' else: ret_val += '<val></val>' ret_val += '</best_hit>\n' offset -= 1 ret_val += '\t'*offset + '</node>\n' return ret_val
def power(x, y): """ Question 5.7: Given a double x and an integer y, return x ^ y """ result = 1.0 # handling negative case if y < 0: x = 1.0 / x y = -y while y: if y & 1: result *= x x *= x y >>= 1 return result
def sort_categories(a, b, categories_list): """Sorts a list of dictionaries with category keys according to their value and order in the categories_list. If not found, alphabetic order is used. """ index_a = categories_list.index(a["category"]) index_b = categories_list.index(b["category"]) if index_a < 0 and index_b < 0: index_a = a['category'] index_b = b['category'] if index_b < index_a: return 1 if index_b > index_a: return -1 return 0
def is_prime(n): """returns True if n is prime, False otherwise""" for i in range(2, n): if n % i == 0: # not a prime number return False # if we reached so far, the number should be a prime number return True
def remove_unnecessary_punctuation(list_check): """ Removes any unnecessary punctuation in the list such as '', '-', ' ' as these disrupt link pattern :param list_check: list that is going to be checked :return: list_check after removing any redundancies """ # list to store any values which should be removed from list_check remove = [] # iterates through list_check and removes all unnecessary punctuation for num, i in enumerate(list_check): if i == '' or i == '-' or i == ' ': remove.append(num) for i in sorted(remove, reverse=True): del list_check[i] # returns edited list return list_check
def remove_string(str_list, substr): """Given list of strings, remove substring from each string""" return [s.replace(substr, '') for s in str_list]
def line_as_comment(line): """Change a C code line to a comment by adding the prefix "// " If the last character of the line is a backslash, certain characters are appended. This silences some compiler warnings. """ line = "// " + line if line[-2:] == "\\\n": line = line[:-2] + "\\ //\n" return line
def find_parity(x): """ Problem 5.1: Computing the parity of a word """ parity = 0 while x: parity += x & 1 x >>= 1 return parity % 2
def _any_exist_in(select_from, find_in): """ :param select_from: iterable keys to find :param find_in: iterable to search in :return: True if any item in the first iterable exists in the second """ for target in select_from: if target in find_in: return True return False
def filter_out_out_pkgs(event_in_pkgs, event_out_pkgs): """ In case of a Split or Merge PES events some of the "out" packages can be the same as the "in" packages. We don't want to remove those "in" packages that are among "out" packages. For example in case of a split of gdbm to gdbm and gdbm-libs, we would incorrectly mandate removing gdbm without this filter. But for example in case of a split of Cython to python2-Cython and python3-Cython, we will correctly mandate removing Cython. """ out_pkgs_keys = {p.name for p in event_out_pkgs} return {p for p in event_in_pkgs if p.name not in out_pkgs_keys}
def sum_n(n): """Function that sums up the integers from 1 to n Notebook: PCP_04_control.ipynb Args: n: Integer number Returns: s: Sum of integers from 1 to n """ s = 0 for n in range(1, n+1): s = s + n return s
def rectify(x): """Rectify activation function :math:`\\varphi(x) = \\max(0, x)` Parameters ---------- x : float32 The activation (the summed, weighted input of a neuron). Returns ------- float32 The output of the rectify function applied to the activation. """ # The following is faster than T.maximum(0, x), # and it works with nonsymbolic inputs as well. # Thanks to @SnipyHollow for pointing this out. Also see: # https://github.com/Lasagne/Lasagne/pull/163#issuecomment-81765117 return 0.5 * (x + abs(x))
def ID_False_ConfluenceLocs(false_confluence: list, nogo: list): """ Function to identify all the cells in nogo associated with the false conflunces """ false_cnum=[] false_points=[] for cell in false_confluence: false_cnum.append(cell[2]) for cell in nogo: if cell[2] in false_cnum: false_points.append(cell) false_points=list(set(false_points)-set(false_confluence)) return false_points
def get_line_slope_intercept(x1, y1, x2, y2): """Returns slope and intercept of lines defined by given coordinates""" m = (y2 - y1) / (x2 - x1) b = y1 - m*x1 return m, b
def format_errors(errors): """ Use this function to format serializer.errors. :param errors: :return: A `\n` separated string which can be displayed properly as a human-readable error message. """ messages = [] for field, error_detail in errors.items(): if isinstance(error_detail, dict): error_message = format_errors(error_detail) else: error_message = ', '.join([str(e) for e in error_detail]) messages.append(': '.join([str(field), error_message])) return '\n'.join(messages)
def almost(a, b): """ return true if the values are almost equal """ SMALL_VAL = 1e-4 try: return all(almost(i, j) for i, j in zip(list(a), list(b))) except BaseException: if isinstance(a, (int, float)) and isinstance(b, (int, float)): return abs(a - b) < SMALL_VAL raise NotImplementedError
def get_angle_between_azimuths(azimuth1, azimuth2) -> float: """Get the angle between two azimuths. :param azimuth1: The first azimuth angle :type azimuth1: float :param azimuth2: The second azimuth angle :type azimuth2: float :return: Angle between the given azimuths """ tmp = abs(azimuth1 - azimuth2) % 360 return 360 - tmp if tmp > 180 else tmp
def formula_has_multi_any_bfunc(formula, bfunc_set): """Returns true if the total times any of the provided basis functions appear in the formula is more than once per per normalization. :formula: str :bfunc_set: list of str :returns: bool """ equivalents=formula.count("+")+1 instances=0 for b in bfunc_set: instances+=formula.count(b) return instances/equivalents>1
def _option_boolean(arg): """ Check boolean options. """ if not arg or not arg.strip(): # no argument given, assume used as a flag return True if arg.strip().lower() in ("no", "0", "false"): return False if arg.strip().lower() in ("yes", "1", "true"): return True raise ValueError('f"{arg}" unknown boolean')
def get_col(color_name=None): """Material Design color palettes (only '100' and '900' variants). Help: call with no arguments to see the list of available colors, these are also returned into a list Kwarg: - color_name: string representing the color's name Output: - color: list of two elements [0] = lightest color '100'-variant (RGB-triplet in [0, 1]) [1] = darkest color '900'-variant (RGB-triplet in [0, 1]) """ colors = { "red": [[255, 205, 210], [183, 28, 28]], "pink": [[248, 187, 208], [136, 14, 79]], "purple": [[225, 190, 231], [74, 20, 140]], "deep_purple": [[209, 196, 233], [49, 27, 146]], "indigo": [[197, 202, 233], [26, 35, 126]], "blue": [[187, 222, 251], [13, 71, 161]], "light_blue": [[179, 229, 252], [1, 87, 155]], "cyan": [[178, 235, 242], [0, 96, 100]], "teal": [[178, 223, 219], [0, 77, 64]], "green": [[200, 230, 201], [27, 94, 32]], "light_green": [[220, 237, 200], [51, 105, 30]], "lime": [[240, 244, 195], [130, 119, 23]], "yellow": [[255, 249, 196], [245, 127, 23]], "amber": [[255, 236, 179], [255, 111, 0]], "orange": [[255, 224, 178], [230, 81, 0]], "deep_orange": [[255, 204, 188], [191, 54, 12]], "brown": [[215, 204, 200], [62, 39, 35]], "gray": [[245, 245, 245], [33, 33, 33]], "blue_gray": [[207, 216, 220], [38, 50, 56]], } if not color_name: print("\n=== Colors available are:") for key, _ in colors.items(): print("- " + key) return list(colors.keys()) else: color = [ [colors[color_name][i][j] / 255 for j in range(3)] for i in range(2) ] return color
def is_None( arg ): """ Return true if NoneType, otherwise return false """ return isinstance( arg , type( None ) )
def flatten(l: list): """Flattens list""" return [item for sublist in l for item in sublist]
def add_host_config_params_dns(docker_host, host_config_params=None): """Add dns input params This is not a generic implementation. This method will setup dns with the expectation that a local consul agent is running on the docker host and will service the dns requests. Args: ----- docker_host (string): Docker host ip address which will be used as the dns server host_config_params (dict): Target dict to accumulate host config inputs Returns: -------- Updated host_config_params """ if host_config_params == None: host_config_params = {} host_config_params["dns"] = [docker_host] host_config_params["dns_search"] = ["service.consul"] host_config_params["extra_hosts"] = { "consul": docker_host } return host_config_params
def versionate(s): """ Assumes s is a slug-type string. Returns another slug-type string with a number at the the end. Useful when you want unique slugs that may have been hashed to the same string. """ words = s.split("-") if len(words) > 1: try: # Check if the last element is a number. If no exception, it is. # We'll substitute the number on the slug num = int(words[-1]) words[-1] = str(num+1) except ValueError: #Not a number. We'll append the number 1 to create a new version. words.append('1') return '-'.join(words)
def bubble_sort(lst: list) -> list: """This is a bubble sort algorithm implementation Parameters ---------- lst: list The unsorted list Returns ------- list A sorted list in ascending order References ---------- https://en.wikipedia.org/wiki/Bubble_sort """ for i in range(0, len(lst)): for m in range(len(lst) - i - 1): if lst[m] > lst[m+1]: lst[m], lst[m + 1] = lst[m + 1], lst[m] return lst
def GetCounts(nucleotides, val): """count number of occurances of val in array. can't find a way without running into memory troubles. """ t = 0 for x in nucleotides: if x == val: t += 1 return t
def get_directory_size(path): """ This will return the filesize in bytes of the contents in 'path' """ import os total_size = 0 for dirpath, dirnames, filenames in os.walk(path): for f in filenames: fp = os.path.join(dirpath, f) total_size += os.path.getsize(fp) return total_size
def is_valid_layer(src_list, tgt_list): """Check if there is a valid layer in both layers.""" for i, val in enumerate(src_list): if val and tgt_list[i]: return True return False
def split_dict_equally(input_dict, chunks=2): """ Splits dict by keys. Returns a list of dictionaries. """ return_list = [dict() for idx in range(chunks)] idx = 0 for k,v in input_dict.items(): return_list[idx][k] = v if idx < chunks-1: idx += 1 else: idx = 0 return return_list
def _polymul(a, b): """ Returns the product of two polynomials using carry-less addition. """ product = 0 while a: product ^= (a & 1) * b b <<= 1 a >>= 1 return product
def if_none(value, default): """Return default if value is None.""" return value if value is not None else default
def in_range(r1, r2): """check if r1 is in the range of r2""" return r1.start >= r2.start and r1.end <= r2.end
def dateIsBefore(year1, month1, day1, year2, month2, day2): """ Returns True if year1-month1-day1 is before year2-month2-day2. Otherwise, returns False. """ if year1 < year2: return True if year1 == year2: if month1 < month2: return True if month1 == month2: return day1 < day2 return False
def line2d(x, y, coeffs=[1]*3, return_coeff=False): """Returns the result of a plane, or returns the coefficients""" a0 = (x*0+1)*coeffs[0] a1 = x*coeffs[1] a2 = y*coeffs[2] if return_coeff: return a0, a1, a2 else: return a0+a1+a2
def trihex_to_tris(a, b, c): """Given a trihex, returns the co-ordinates of the 6 triangles it contains, using co-ordinates as described in updown_tri""" n = a + b + c if n == 0: return [ (a * 2 + 1, b * 2, c * 2), (a * 2 + 1, b * 2 + 1, c * 2), (a * 2, b * 2 + 1, c * 2), (a * 2, b * 2 + 1, c * 2 + 1), (a * 2, b * 2, c * 2 + 1), (a * 2 + 1, b * 2, c * 2 + 1), ] if n == 1: return [(a * 2, b * 2, c * 2)] if n == -1: return [(a * 2, b * 2, c * 2)]