content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def potential(element_symbol,functional): """ :param element_symbol: string, symbol of the element :param functional: string, functional name :return: string, GTH potential name to be used in the simulation """ return "GTH-"+functional
37b3e56899da39780ed732e5afaa03652a5bf940
434,095
import grp def gid_exists(gid): """Check if a gid exists""" try: grp.getgrgid(gid) gid_exists = True except KeyError: gid_exists = False return gid_exists
7eee1b22cc6991579c644261e2b20fb42d967ecf
564,879
import re def kebab_case(inp): """ Convert from `CamelCase` to `kebab-case`. """ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', inp) return re.sub('([a-z0-9])([A-Z])', r'\1-\2', s1).lower()
6e8d38188c213fe2efc2b8bfa43dd81b697cbcd0
66,282
def count_params(model): """returns (total n° of parameters, n° of trainable parameters)""" total_params = sum(p.numel() for p in model.parameters()) trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) return total_params, trainable_params
122d86d7c58afc7a2033edf71ad0e21eee3f6fce
421,791
def sqrt(x, tolerance=0.0000001): """ Find a value r with r*r within tolerance of x. """ l = 0.0 r = float(x) while r-l > tolerance: m = l + 0.5*(r-l) s = m*m if s < x: l = m else: r = m return l
1d3933d7d390403c92feb26e56ad6ba28b2e7e72
559,112
def _get_match(proto, filter_fn): """Finds and returns the first element that matches a query. If no element matches the query, it throws ValueError. If more than one element matches the query, it returns only the first. """ query = [elm for elm in proto if filter_fn(elm)] if len(query) == 0: raise ValueError('Could not find element') elif len(query) > 1: raise ValueError('Too many matches') return query[0]
2e3ed82cf0d12afaccae6863129cc6d81a2b7981
291,806
from typing import Dict def get_default_metas() -> Dict: """ Get a copy of default meta variables. NOTE: DO NOT ADD MORE ENTRIES HERE! :return: a deep copy of the default metas in a new dict """ # NOTE: DO NOT ADD MORE ENTRIES HERE! return { 'name': '', #: a string, the name of the executor 'description': '', #: a string, the description of this executor. It will be used in automatics docs UI 'workspace': '', #: a string, the workspace of the executor 'py_modules': '', #: a list of strings, the python dependencies of the executor }
f05e2744cd4ea22ad3c25e9132d7508ccc94daac
504,056
import math def Critical(n1, n2): """Calculate critical angle in degrees.""" assert n1 > n2, "\nWarning: Critical angle is not defined, since n1 <= n2!" return math.degrees(math.asin(n2/n1))
674c3588285b73b6a1be13810cc0bc2e924a1baa
83,954
def openslide_can_load(file_extension: str) -> bool: """ Checks if the given file extension can be loaded by openslide. :param file_extension: The file extension should be checked :return: If the file extension can be loaded by openslide or not """ OPENSLIDE_FORMATS = [ "svs", "tif", "vms", "vmu", "ndpi", "scn", "mrxs", "tiff", "svslide", "bif", ] return file_extension.lower() in OPENSLIDE_FORMATS
7bb5fceb373eef1cedcdef7896101728b1c84dfa
372,011
def alt_text_to_curly_bracket(text): """ Converts the text that appears in the alt attribute of image tags from gatherer to a curly-bracket mana notation. ex: 'Green'->{G}, 'Blue or Red'->{U/R} 'Variable Colorless' -> {XC} 'Colorless' -> {C} 'N colorless' -> {N}, where N is some number """ def convert_color_to_letter(color): if color.lower() not in ('red', 'white', 'blue', 'green', 'black', 'colorless', 'tap', 'energy'): # some cards have weird split mana costs where you can pay N colorless # or one of a specific color. # Since we're ending up here, and what we're given isn't a color, lets assume its N return color else: if color.lower() == 'blue': return 'U' else: return color[0].upper() try: val = int(text, 10) except Exception: pass else: # This is just a number. Easy enough. return f"{{{text}}}" if ' or ' in text: # this is a compound color, not as easy to deal with. text = text.replace('or', '') text = '/'.join([convert_color_to_letter(x) for x in text.split()]) else: if 'Variable' in text: text = 'X' else: # hopefully all that's left is just simple color symbols. text = convert_color_to_letter(text) # at this point we've hopefully return f"{{{text}}}"
c604b236a8d0baeff244e0e246176a406674c9e2
709,995
def diffusion_coefficient(mol_vol, wat_viscosity = 10**-3): """ Return the diffusion coefficent [m²/s] source : (HNS-MS) Parameters ---------- mol_vol : Molar volume of the component [mol/m³] wat_viscosity : Dynamic viscosity of water [Pa s] """ return (13.26*10**-5)/((wat_viscosity*1000)**1.14 * (mol_vol*100**3)**0.589)/(100*100)
6bf3d4559404cff016ff8e70d8ae0f1041f3a1f0
302,468
def bacon_strategy(score, opponent_score, margin=8, num_rolls=4): """This strategy rolls 0 dice if that gives at least MARGIN points, and rolls NUM_ROLLS otherwise. """ # BEGIN PROBLEM 9 "*** REPLACE THIS LINE ***" return 4 # Replace this statement # END PROBLEM 9
2188aeba464bb5b5796e6a0c2837165f04d5dc07
388,880
def project_to_2D(xyz): """Projection to (0, X, Z) plane.""" return xyz[0], xyz[2]
c6cdb8bd6dce65f6ce39b14b9e56622832f35752
2,634
def get_unique_identifier(item): """Return unique item identifier The complete format is {database}/{uri}_{channel}: * prefixed by "{database}/" only when `item` has a 'database' key. * suffixed by "_{channel}" only when `item` has a 'channel' key. Parameters ---------- item : dict Item as yielded by pyannote.database protocols Returns ------- identifier : str Unique item identifier """ IDENTIFIER = "" # {database}/{uri}_{channel} database = item.get('database', None) if database is not None: IDENTIFIER += "{database}/" IDENTIFIER += "{uri}" channel = item.get('channel', None) if channel is not None: IDENTIFIER += "_{channel:d}" return IDENTIFIER.format(**item)
62425e89441553756e5ebe557b8d503e0e898b5c
526,953
def find_zeros(matrix): """Returns a dict with list of rows and columns that contain zeros within the matrix >>> zero_coordinates = find_zeros([['0', 'w', 'e'], ['a', 's', 'd'], ['z', 'x', '0']]) >>> sorted(zero_coordinates.items()) [('columns', [0, 2]), ('rows', [0, 2])] """ rows = [] columns = [] coordinates = {'rows': rows, 'columns': columns} for list_number, list in enumerate(matrix): for element_number, element in enumerate(list): if element == '0': coordinates['rows'].append(list_number) coordinates['columns'].append(element_number) return coordinates
768f2622a4943a9555d3b3057eb2cfe3d88dd4d1
547,791
import torch def _get_strided(waveform, window_size, window_shift, snip_edges): """ Given a waveform (1D tensor of size num_samples), it returns a 2D tensor (m, window_size) representing how the window is shifted along the waveform. Each row is a frame. Inputs: sig (Tensor): Tensor of size num_samples window_size (int): Frame length window_shift (int): Frame shift snip_edges (bool): If True, end effects will be handled by outputting only frames that completely fit in the file, and the number of frames depends on the frame_length. If False, the number of frames depends only on the frame_shift, and we reflect the data at the ends. Output: Tensor: 2D tensor of size (m, window_size) where each row is a frame """ assert waveform.dim() == 1 num_samples = waveform.size(0) strides = (window_shift * waveform.stride(0), waveform.stride(0)) if snip_edges: if num_samples < window_size: return torch.empty((0, 0)) else: m = 1 + (num_samples - window_size) // window_shift else: reversed_waveform = torch.flip(waveform, [0]) m = (num_samples + (window_shift // 2)) // window_shift pad = window_size // 2 - window_shift // 2 pad_right = reversed_waveform if pad > 0: # torch.nn.functional.pad returns [2,1,0,1,2] for 'reflect' # but we want [2, 1, 0, 0, 1, 2] pad_left = reversed_waveform[-pad:] waveform = torch.cat((pad_left, waveform, pad_right), dim=0) else: # pad is negative so we want to trim the waveform at the front waveform = torch.cat((waveform[-pad:], pad_right), dim=0) sizes = (m, window_size) return waveform.as_strided(sizes, strides)
f5ed429866926afc7a3915439fd84dbdd6352357
77,149
def true(m): """The membership-value is its own truth-value.""" return m
dea1e84a82f4578b7ab5fb97d18228525017b68d
432,507
def euclidean_gcd(a, b): """ 使用欧几里得算法, 计算两个数的最大公约数 :param a: :param b: :return: gcd(a,b) """ if a == 0 and b == 0: return 0 elif a == 0 and b != 0: return b elif a != 0 and b == 0: return a while b != 0: # 不使用 % , 因为 % 在不同的编程语言中的结果可能是不同的(虽然都是同余的) # a, b = b, a % b q = int(a / b) r = a - q * b a, b = b, r return a
64cde498d858fee9073b88e82e0caf5d90f5cb92
457,887
def unique_trees(msts): """ Test whether a list of minimum spanning trees are unique. Args: msts (list, nx.Graph): list of minimum spanning trees. Returns: (boolean): whether all trees in the list are unique. """ for i, mst1 in enumerate(msts): for j, mst2 in enumerate(msts): if i != j: mst1_edges = set(mst1.edges) mst2_edges = set(mst2.edges) if len(mst1_edges.difference(mst2_edges)) == 0: return False return True
9d7e2231adf376317f49e7ed438da632553ce76f
597,967
def get_rank(c, domain): """Gets the alexa rank for a domain. Returns False if not found""" entry = c['domains'].find_one({'domain':domain.replace(".", "#")}, {'alexa.rank.latest':1}) try: return entry['alexa']['rank']['latest'] except Exception: return False
c7b531ab9c431be4b440f8d14499ce5902dd3e30
191,963
def lazy_property(func): """ Decorator that makes a property lazy-evaluated, i.e. only set on first access. """ attr_name = '_%s' % func.__name__ docstring = func.__doc__ @property def _lazy_property(self): try: return getattr(self, attr_name) except AttributeError: value = func(self) setattr(self, attr_name, value) return value return _lazy_property
f8aad68b0469c135a748e8583057b652f66770bc
248,062
import functools import logging import time def RetryOnException(exc_type, retries): """Decorator to retry running a function if an exception is raised. Implements exponential backoff to wait between each retry attempt, starting with 1 second. Note: the default number of retries is defined on the decorator, the decorated function *must* also receive a "retries" argument (although its assigned default value is ignored), and clients of the funtion may override the actual number of retries at the call site. The "unused" retries argument on the decorated function must be given to keep pylint happy and to avoid breaking the Principle of Least Astonishment if the decorator were to change the signature of the function. For example: @retry_util.RetryOnException(OSError, retries=3) # default no. of retries def ProcessSomething(thing, retries=None): # this default value is ignored del retries # Unused. Handled by the decorator. # Do your thing processing here, maybe sometimes raising exeptions. ProcessSomething(a_thing) # retries 3 times. ProcessSomething(b_thing, retries=5) # retries 5 times. Args: exc_type: An exception type (or a tuple of them), on which to retry. retries: Default number of extra attempts to try, the caller may also override this number. If an exception is raised during the last try, then the exception is not caught and passed back to the caller. """ def Decorator(f): @functools.wraps(f) def Wrapper(*args, **kwargs): wait = 1 kwargs.setdefault('retries', retries) for _ in range(kwargs['retries']): try: return f(*args, **kwargs) except exc_type as exc: logging.warning( '%s raised %s, will retry in %d second%s ...', f.__name__, type(exc).__name__, wait, '' if wait == 1 else 's') time.sleep(wait) wait *= 2 # Last try with no exception catching. return f(*args, **kwargs) return Wrapper return Decorator
ea87117cd202cbc6ce5bc4fc9fb25a5b8a324e1f
34,974
def write(*args, **kwargs): """ Wrapper for print function or print to ensure compatibility with python 2 The arguments are used similarly as the print_function They can also be generalized to python 2 cases """ return print(*args, **kwargs)
f6bfcc9f72bf1fcfad0dbec56f4bce185df1fa04
667,489
def get_object_attrs(obj): """ Get the attributes of an object using dir. This filters protected attributes """ attrs = [k for k in dir(obj) if not k.startswith('__')] if not attrs: attrs = dir(obj) return attrs
0fdc4d9e7c889493ce7e471f86ccfaa889151bdc
522,762
def getPath(keyword, topicNames, topicPaths, topicIDs): """ Function to get the path of a particular keyword in ACM Tree. Parameters: keyword (string) - the keyword for which we want the path. topicNames (dictionary) - the name of the topic as the key and the topic ID number as value. topicPaths (dictionary) - the topic ID as the key, and its parent as value. topicIDs (dictionary) - the topic ID as the key, and the topic name as the value. Returns: path (list) - the path of that keyword (backwards) """ topicId = topicNames[keyword] path = [keyword] topicParent = topicPaths[topicId] #Start from the keyword and backtrack until the first parent. while topicParent != 0: curr = topicIDs[topicParent] path.append(curr) topicParent = topicPaths[topicParent] return path
22dd07a92290c4a7b9e1baf70ef0cc98960b4520
693,871
def wrapper(f, *arg, **kwargs): """ Wrap a function and its arguments into a mapping function for ImageCollections. The first parameter of the functions must be an Image, and it must return an Image. :param f: the function to be wrapped :type f: function :return: a function to use in ee.ImageCollection.map :rtype: function """ def wrap(img): return f(img, *arg, **kwargs) return wrap
ae22f84131d0b32b4bc2e353b91203f2ff7badd3
183,520
def append(l: list, obj: object) -> list: """Extend or append to list""" if isinstance(obj, list): l.extend(obj) else: l.append(obj) return l
21996e2f36baf323a44459b8d9eb9602f87df761
52,295
import random def new_path(existing_path): """Switch two random consecutive points on a path Arguments received: existing_path -- list of coordinates, e.g. [(0, 0), (10, 5), (10, 10)], representing a path Arguments returned: path -- list of coordinates representing the mutated path """ path = existing_path[:] point = random.randint(0, len(path)-2) # randomly choose a point between 1st and 2nd-to-last points on path path[point+1], path[point] = path[point], path[point+1] # switch this point with the next point return path
a3a28f2e3570c66b37263f5408a774cac65410fc
549,367
def format_node(node, indent, depth, to_str=str): """Return string of graph node based on arguments. Args node: tuple of two items indent: string of tree indentation chars depth: int of tree depth, 0 = root to_str: function to convert node to string, by default str Returns String representaiton of node. """ space = ' ' * ((len(indent) + 1) * (depth - 1)) leader = '|' + indent if depth > 0 else '' return space + leader + to_str(node)
50a63b9ec8b663b91a3161db4776ed5d719b6599
471,002
import typing def is_type(input_object, expected_type) -> bool: """ Check if an object corresponds to a given type. Works similarly to isinstance but also works on select 'typing' types. """ try: module = expected_type.__module__ except AttributeError: module = "default" if module == "typing": if isinstance(input_object, dict): right_types = [] for key, value in input_object.items(): right_types.append( all([ isinstance(key, expected_type.__args__[0]), isinstance(value, expected_type.__args__[1]) ]) ) return all(right_types) if isinstance(input_object, (list, tuple)): accepted_types = expected_type.__args__ if not isinstance(accepted_types, list): accepted_types = [accepted_types] * len(input_object) right_types = [is_type(element, accepted_types[i]) for i, element in enumerate(input_object)] return all(right_types) if expected_type.__origin__ == typing.Union: for accepted_type in expected_type.__args__: if isinstance(input_object, accepted_type): return True return False if input_object is None: if expected_type is None: return True return False return isinstance(input_object, expected_type)
027ccfb42b41d53ee6d222563a54917ce7c9a184
285,584
def snapToGround(world, location): """Mutates @location to have the same z-coordinate as the nearest waypoint in @world.""" waypoint = world.get_map().get_waypoint(location) location.z = waypoint.transform.location.z + 0.5 return location
2827231c7f2b491ca59f23a48c4420a945bcf7a3
214,887
def time_series_estimates(data_y, nei_x, wei_x): """Return estimates of variable $Y$ from variable $X$'s shadow data cloud. Params ------ data_y (np.ndarray): 1D array of variable $Y$'s time series data. nei_x (np.ndarray): $M \times (N+1)$ array of time indices of nearest neighbors in $X$'s shadow data cloud, where $M$ is the number of points in the shadow data cloud. wei_x (np.ndarray): Array of corresponding weights of the nearest neighbors in $X$'s shadow data cloud. Returns ------- ests (np.ndarray): Length-$M$ 1D array of estimates of $Y$'s time series. Notes ----- Let $t_1, t_2, ..., t_{N+1}$ be the time indices of nearest neighbor of point $t$ in $X$'s shadow data cloud. Its corresponding estimates of $Y$ is $\hat{Y}(t) = \sum_{k=1}^{N+1} w(t_k) Y(t_k)$, where $w$s are weights of nearest neighbors. """ ests = (data_y[nei_x] * wei_x).sum(axis=1) return ests
929121aec4aab9d9a32fdfe67a9093c30e2cd573
473,036
import re def shortstr(string): """ Shorten string by removing long floats :param string: string, e.g. '#810002 scan eta 74.89533603616637 76.49533603616636 0.02 pil3_100k 1 roi2' :return: shorter string, e.g. '#810002 scan eta 74.895 76.495 0.02 pil3_100k 1 roi2' """ #return re.sub(r'(\d\d\d)\d{4,}', r'\1', string) def subfun(m): return str(round(float(m.group()), 3)) return re.sub(r'\d+\.\d{5,}', subfun, string)
377a9a5ed8f7d2831eb505cbe72910fd688addcf
105,492
def _Chunk(items, chunk_size): """Breaks a long list into sub-lists of a particular size.""" chunks = [] for i in range(0, len(items), chunk_size): chunks.append(items[i:i + chunk_size]) return chunks
c45884833f77a5ecfc9852764f7dc8282fdc0ae2
236,206
def are_words_in_word_list( words, word_list, case_sensitive=False, get_score=False, all_must_match=True ): """Checks if word(s) are contained in another word list. The search can be performed with or without case sensitivity. The check words can contain wildcards, e.g. "abc*" to allow a wider range of matches against the word list.""" if not isinstance(words, list): check_words = [words] else: check_words = words found = {} for w in check_words: word = w.lower() if not case_sensitive else w if "*" in word: idx = word.find("*") - 1 word = word[:idx] for wl in word_list: wl = wl.lower() if not case_sensitive else wl if wl.startswith(word): found[word] = True if all_must_match and len(found) == len(check_words): if get_score: return True, len(found) return True if not all_must_match and len(found) > 0: if get_score: return True, len(found) return True if get_score: return False, len(found) return False
f1fa12e313fb65cf8606c7f81cc16b99b9e35c58
684,252
import math def rounding_repeats(repeats, d_multiplier): """ Round number of filters based on depth multiplier. """ if not d_multiplier: return repeats return int(math.ceil(d_multiplier * repeats))
76cd3208ca3fd86fcc87bab45c35f09a8cba6f57
198,934
def str_to_bool(str_bool): """ Helper function to convert string to boolean. """ if str_bool == "False": return False return True
c38a2d9ea15f1cf37edcf890c1e79216befe3ef7
261,176
def make_matrix(num_rows, num_cols, entry_fn): """returns a num_rows x num_cols matrix whose (i,j)th entry is entry_fn(i, j)""" return [[entry_fn(i, j) # given i, create a list for j in range(num_cols)] # [entry_fn(i, 0), ...] for i in range(num_rows)]
e3a1719dd884ffdd5f7e81c50957943d9dd91ca5
481,481
def retain_truthy(iterable): """Retain those items which evaluate to True in a boolean context. Args: iterable: The iterable series of items to be filtered. Returns: An iterable series of items for which bool(item) is True. """ return filter(None, iterable)
4a518b9820e0d14357dc6e8f998df8beb111a90d
242,490
import csv def make_run_list(run_csvs): """ Reads list of csvs to give performance of the different hyperparameter settings. Returns ------- data_list : list of numpy.ndarrays performance of different hyperaparameter settings for the csvs given as input. """ data_list = [] for a_csv in run_csvs: with open(a_csv, 'r') as f: reader = csv.reader(f) data_as_list = list(reader) f.close() data_list.append(data_as_list) return data_list
e8d931ff89f6a68a3bc7d4bd28537560b1ac7faa
366,493
def elem_to_dict(elem): """ Used to convert the 'attributes' element to a dict for easy value access during beat slicing. This is a dict of str:list. Each list contains more dicts like this. The reason we have a list of dicts (as opposed to a single dict) is because we index by tag name, but we can have multiple of the same tag as children (e.g. multiple <clef>s in <attributes>) :param elem: The element to convert. :type elem: ET.Element :return: A nested dict representation of the element. The key (tag) maps to a list of elements with that tag (each with its own dictionary). :rtype: dict[str, list[dict]] """ d = {'text': elem.text, 'tail': elem.tail, 'attrib': elem.attrib} if elem: for child in elem: if child.tag not in d: d[child.tag] = [] d[child.tag].append(elem_to_dict(child)) return d
5d4e96d4a4c7987a335d8b2aee21a85ad2e09228
501,663
def _get_resource_path(path): """Transform the path into a URL.""" return u'/%s' % path
4d8ba3d6f643dd04492ea25b01b6633f0a51e910
108,934
def A2CLoss(x, log_prob_fn, **unused_kwargs): """Definition of the Advantage Actor Critic (A2C) loss.""" (predictions, actions, advantages, old_log_probs) = x del old_log_probs # Not used in A2C. action_log_probs = log_prob_fn(predictions, actions) return -(action_log_probs * advantages).mean()
0b653965eb7bbac1b03535a0edddc0c66ce7920d
281,631
def create_url(users): """Create GET endpoint to get fetch handle information Args: users: List of twitter handles Returns: URL for GET call to get handle information """ str1 = ','.join(users) usernames = "usernames=" + str1 user_fields = "user.fields=description,created_at,location,pinned_tweet_id,profile_image_url,protected," \ "public_metrics,url,verified" url = f"https://api.twitter.com/2/users/by?{usernames}&{user_fields}" return url
b3dd3a4d62ea64485215f7bd54fe947674209b22
343,316
import hashlib def string_to_md5(content: str) -> str: """ Take a string and calculate its md5 hash (as a string). """ encoded = content.encode("utf8") return hashlib.md5(encoded).hexdigest()
1949385c5f95af092147b6576647769f79318109
77,809
def ppi_pm(g): """ True if any rows in the group have source PPI/PM :param g: DataFrameGroupBy instance with a 'source' column """ return g['source'].isin(['PPI/PM']).any()
aeff1b16719ab1ffb2e4be0cd7ed2722930b39e6
402,357
def get_branch_col(svn_look_line, branches_map): """ For each project we have a list of branches which should be merged for it. this method will take the svn_look_line returned by svn look command and return the relevant branches map for it. Args: svn_look_line: Will deduce the relevant branches and project for this svn look line. BRANCHES_MAP: All projects with all their assigned branches. Returns: The relevant branches for the provided svn look line. """ for branches_col in branches_map: for branch in branches_map[branches_col]: if svn_look_line.find(branch) != -1: return branches_map[branches_col]
e6eeb6481aab3fa7f5bf747c985025568682b429
454,121
def _clear_entity_type_registry(entity, **kwargs): """Clear the given database/collection object's type registry.""" codecopts = entity.codec_options.with_options(type_registry=None) return entity.with_options(codec_options=codecopts, **kwargs)
e8901dba01bdebae3415673568cf85c7134a6161
635,796
def _eth(dst, src, data): """Builds an Ethernet frame with and IP packet as payload""" packet = ( dst + # dst src + # src b'' + # vlan b'\x08\x00' # type ) + data return packet
05380967005e5608c9dc83430cc43e9f08096e0b
78,450
import hashlib def _digest(data): """SHA1 hash digest of message data. Implements RFC2437, 9.2.1 EMSA-PKCS1-v1_5, Step 1. for "Hash = SHA1" Args: data: str of bytes to digest Returns: str: of bytes of digest from "data" """ hasher = hashlib.sha1() hasher.update(data) return hasher.digest()
fc34c0c8ba7373d841b5255b31f8f56d31af6ecc
177,480
def int_addr(addr): """Gets the integer representation of an address""" return int(addr[1:])
3f00ae151bff20516fbaddda73feb147aa1c8424
50,783
def get_lastfm_high_water_mark(config): """Get the marker for the latest last.fm track processed.""" if (not config.has_section('lastfm') or not config.has_option('lastfm', 'last_timestamp')): return 1 return config.getint('lastfm', 'last_timestamp')
ad5eab3b59e9b642cc3964af77bdf7d868d6914f
582,333
def resolve_value(obj, _): """Convert 'value' from bytes to string.""" return obj.value.decode()
e1a3b14d5f49ef06fae51604fd549997b102a60a
381,463
def step(x): """ A neighbor of x is either 2*x or x+3""" return [x+3, 2*x]
49a9139e452bbc0abbc5ba86a5dba86a35302495
152,495
def get_config_part(config_parent, match_key, match_value): """ helper function to filter a dict by a dict key :param dict_: ``dict`` :param key: dict key :param value: dict key value :returns: filtered ``dict`` """ return {k: v for (k, v) in config_parent.items() if v[match_key] == match_value}
d92a5ad9135df3fb827701ecf0fdc31ba69c4f8d
634,136
def read_scalar(group, dataset_name): """ Read a HDF5 `SCALAR` as a dict. All attributes will be assigned as key: value pairs, and the scalar value will be assigned the key name 'value'. :param group: A h5py `Group` or `File` object from which to write the dataset to. :param dataset_name: A `str` containing the pathname of the dataset location. :return: A `dict` containing the SCALAR value as well as any attributes coupled with the SCALAR dataset. """ dataset = group[dataset_name] data = {k: v for k, v in dataset.attrs.items()} data["value"] = dataset[()] return data
6d9908e64f6584d0128756778679f87ffc8cb46f
697,094
def decompressCmd(path, default="cat"): """"return the command to decompress the file to stdout, or default if not compressed, which defaults to the `cat' command, so that it just gets written through""" if path.endswith(".Z") or path.endswith(".gz"): return "zcat" elif path.endswith(".bz2"): return "bzcat" else: return default
4d3a56219497a0daa74fb7d3cf8f373157184d8b
421,016
def _merge_meta(epi_ref, meta_list): """Prepare a tuple of EPI reference and metadata.""" return (epi_ref, meta_list[0])
3b9795c39d53a28251e9b08b89544e013619c908
447,069
def fast_tokenize(text, ln, tokenizer, mark=True): """ Tokenizes a text given a language and a tokenizer. In addition to the tokenization provided by the passed tokenizer: - lowercases the text; - replaces all digits with 0s; - marks tokens with a langaguage marker ("_ln"). Returns a list of strings. """ text = text.lower() text = ''.join("0" if c.isdigit() else c for c in text) tokens = tokenizer.tokenize(text) if mark: for i, t in enumerate(tokens): tokens[i] = t + '_' + ln return tokens
5f01e948fe50b4096c87ec6f680ae246428c6fab
478,080
def get_sample_count(frame_count, samples_per_frame): """Get sample count. :param frame_count: Count of frames. :type frame_count: int :param samples_per_frame: Samples per frame. :type samples_per_frame: int :return: Sample count :rtype: int """ return frame_count * samples_per_frame
bdfd79305f9f3900cc2baa8122909e314cab9aaa
205,831
def discretize(y_pred): """ Converts the predicted results from a continuous variable to five readability levels. """ for i in range(len(y_pred)): if y_pred[i] < 0.5: y_pred[i] = 0.0 elif y_pred[i] < 1.5: y_pred[i] = 1.0 elif y_pred[i] < 2.5: y_pred[i] = 2.0 elif y_pred[i] < 3.5: y_pred[i] = 3.0 else: y_pred[i] = 4.0 return y_pred
29d9dc108c26444af468536b32aa2a4e2d2eed83
357,792
import math def rad2deg(angle): """Converts value in radians to degrees.""" return angle * 180.0 / math.pi
9f2390f3f9c97a7ce5db87c7a1dbbee0a5818099
275,550
import torch def broadcast(tensor, devices): """Broadcasts a tensor to a number of GPUs. Arguments: tensor (Tensor): tensor to broadcast. devices (Iterable): an iterable of devices among which to broadcast. Note that it should be like (src, dst1, dst2, ...), the first element of which is the source device to broadcast from. Returns: A tuple containing copies of the ``tensor``, placed on devices corresponding to indices from ``devices``. """ return torch._C._broadcast(tensor, devices)
3ded00742909d67e4cea0275e053af7c260bfad4
235,923
from typing import Set from typing import Tuple from typing import Dict def _valid_bond(used_partitions: Set[int], bond: Tuple[int, int], current_partition: int, comp_map: Dict[int, int]) -> Tuple[bool, int]: """Helper method to find next partition to explore. Used to check if a bond goes from the current partition into a partition that is not yet explored Parameters ---------- used_partions: Set[int] Partitions which have already been used bond: Tuple[int, int] The bond to check if it goes to an unexplored partition. This tuple is (from_atom, to_atom). current_partition: int The current partition of the DFS comp_map: Dict[int, int] Maps atom ids to component ids Returns ------- is_valid: bool Whether to exist the next partition or not next_partition: int The next partition to explore """ part1 = comp_map[bond[0]] part2 = comp_map[bond[1]] if part1 != current_partition and part2 != current_partition: return False, 0 if part1 == current_partition: next_partition = part2 else: next_partition = part1 return next_partition not in used_partitions, next_partition
f8b9bea8e2d8f27f2eeaf852bdf826fef3b97ae8
461,045
def get_loss_group(self, group, felec): """Get loss power for given group from coefficients stored in coeff dict Parameter --------- self : OutLoss an OutLoss object group: str Name of part for which to calculate loss function Return ------ Ploss : float loss power for given group [W] """ if group in self.coeff_dict: coeff_dict = self.coeff_dict[group] Ploss = ( coeff_dict["A"] * felec ** coeff_dict["a"] + coeff_dict["B"] * felec ** coeff_dict["b"] + coeff_dict["C"] * felec ** coeff_dict["c"] ) else: Ploss = 0 return Ploss
edd650ddc6e3e5c3cb7f13a6943a0a6379b3cba8
265,719
def _evalues_score_gap_tuple(ranked_scores): """ ranked list of 3-part tuples, returns a 2-part tuple with max score and gap to next best socre :param ranked_scores: ranked list of 3-part tuples [(id,score,evalue),...,(id,score,evalue)] :return: tuple (float, float) 0: ranked alignment score, 1: the gap between best and next best """ if len(ranked_scores) > 1: # if there was more than one hit # ranked_scores of form [(id, score, evalue),(id, score, evalue)] score_gap = ranked_scores[0][1] - ranked_scores[1][1] r = (ranked_scores[0][2], score_gap) elif len(ranked_scores) == 1: # for the case where there was only one hit r = (ranked_scores[0][2], -99) else: r = (-99, -99) return(r)
8e95704f67026038eef324a184d77f959b37655a
531,204
def readFile(file, defStr = None, mode = "r"): """ read a file and return its contents """ ret = defStr try: with open(file, mode) as f: ret = f.read() except Exception as e: if defStr is not None: ret = defStr pass else: raise e return ret
296532a8a8682ac5a9b51477e9be71b864a5fd3c
386,665
def get_arbitrage(odds): """ Calculate Arbitrage to find out, whether a book is worth it. If the arbitrage is greater than 1 you are guaranteed to loose money, if it's below 1 you are guaranteed to make money. """ a = 0 for odd in odds: a += 1/odd return a
43aa9bc5bdd4786951391c4509ec51076b32901e
382,654
from typing import List from typing import Any import random def generate_random_batches(values: List[Any], min_batch_size: int, max_batch_size: int) -> List[List[Any]]: """ Generate random batches of elements in values without replacement and return the list of all batches. Batch sizes can be anything between min_batch_size and max_batch_size including the end points. """ shuffled_values = values.copy() random.shuffle(shuffled_values) start_id = 0 grouped_values_list = [] while start_id < len(shuffled_values): num_values = random.randint(min_batch_size, max_batch_size) grouped_values_list.append(shuffled_values[start_id : start_id + num_values]) start_id += num_values assert sum([len(_) for _ in grouped_values_list]) == len(values) return grouped_values_list
ab122041de32fb731251dacc5cfac77888e8ae03
642,653
def _set_cdict(HighRGB, MediumRGB, LowRGB): """ Helper function used to set color map from 3 RGB values. Args: HighRGB: RGB with highest values MediumRGB: RGB with medium values LowRGB: RGB with lowest values Returns: dictionary with colors and localizations on color bar. """ cdict = {'red': ((0.0, LowRGB[0], LowRGB[0]), (0.5, MediumRGB[0], MediumRGB[0]), (1.0, HighRGB[0], HighRGB[0])), 'green': ((0.0, LowRGB[1], LowRGB[1]), (0.5, MediumRGB[1], MediumRGB[1]), (1.0, HighRGB[1], HighRGB[1])), 'blue': ((0.0, LowRGB[2], LowRGB[2]), (0.5, MediumRGB[2], MediumRGB[2]), (1.0, HighRGB[2], HighRGB[2]))} return cdict
b3525e65f88d196989b2bbf8da9e108b8f9f6bde
336,755
from pathlib import Path def paths_to_relative(root, paths): """ Converts a list of absolute paths to paths relative to the the root project path. """ return list(map(lambda x: str(Path(x).relative_to(root)), paths))
d0bdc68c76b6953d629f99f0c5db4d26174be390
477,155
def format_sequence_search_terms(sequence, filter_terms=None): """ Format parameters for a sequence search :param str sequence: one letter sequence :param lst filter_terms: Terms to filter the results by :return str: search string """ params = { 'json.nl': 'map', 'start': '0', 'sort': 'fasta(e_value) asc', 'xjoin_fasta': 'true', 'bf': 'fasta(percentIdentity)', 'xjoin_fasta.external.expupperlim': '0.1', 'xjoin_fasta.external.sequence': sequence, 'q': '*:*', 'fq': '{!xjoin}xjoin_fasta' } if filter_terms: for term in ['pdb_id', 'entity_id', 'entry_entity', 'chain_id']: filter_terms.append(term) filter_terms = list(set(filter_terms)) params['fl'] = ','.join(filter_terms) return params
64dba7ea091723e92eece9c00668e7ef1b2654a9
187,721
def get_regex(keyword, template): """ Turn a keyword into a regex, according to a template id: - template 0 is for stems - template 1 is for one-word expressions - template 2 is for two-word expressions; the two words can appear in the sentence in different order with optional other words in between them Parameters ---------- keyword: str template: int Returns ------- regex: str """ if template == 0: return rf"\b{keyword}.*?\b" elif template == 1: return rf"\b{keyword}\b" elif template == 2: kwd1, kwd2 = keyword.split(' ') return rf"\b{kwd1}\b[^.]*?\b{kwd2}\b|\b{kwd2}\b[^.]*?\b{kwd1}\b"
7f6cc46db9bf0d91b0ccd6baba6b35ebc81fa8c6
452,688
def get_parameter_for_suite(suite_name): """Return a parameter for which suite to run the tests for. Args: suite_name: str. The suite name whose tests should be run. If the value is `full`, all tests will run. Returns: list(str). A list of command line parameters for the suite. """ return ['--suite', suite_name]
36944bd9f68f376cacb3c375ba3b74f958f18916
417,278
def next_field_pad(pos_prev, offset, width, display): """ Local helper calculates padding required for a given previous position, field offset and field width. pos_prev Position following previous field offset Offset of next field width Width of next field display True if next field is to be displayed, otherwise False. Returns a tuple containing:: [0] True if next field is to be placed on next row [1] Amount of padding (0..11) to be added from end of last field or start of row [2] Position of next free space after new element """ if not display: # Field not to be displayed return (False, 0, pos_prev) if (offset < pos_prev) or (offset+width > 12): # Force to next row next_row = True padding = offset else: # Same row next_row = False padding = offset - pos_prev pos_next = offset + width return (next_row, padding, pos_next)
1f3d503d57813c51a3303a1049f0364d9aedf221
117,578
def ToText(value): """ Covert int/float to a string :param any value: int or float. :return: string(str) """ if not isinstance(value,str): text=str(value) else:text=value return text
de2b190ed1ae409e203b23360ef5bf566391a3c1
119,294
def select_fields(dic, fields): """ Selects items from a row, if the row doesn't exist, None is used. """ return {field: dic.get(field, None) for field in fields}
58efb67ecdad6930ff9eaf6a59f53e3d0d018dbe
429,033
def subtract(x,y): """Subtract x from y and return value""" return y-x
708c3de6a50e7ca2fd08722133d901b79d8bac15
643,079
def _rename_time_index(dict_da): """Rename the dim time of each datarray to ensure proper merge. Each time dim is associated to its probe. Parameters ---------- dict_da : a dictionary of timeserie datarray Return ------ new_dict : dict A dictionary of renamed DataArray """ new_dict = {} for i, (key, xr_da) in enumerate(dict_da.items()): renamed_xr_da = xr_da.rename({'time': 'time{}'.format(i+1)}) new_dict[key] = renamed_xr_da return new_dict
1d7233840918035cfaddd444a804673f6fe2f486
567,737
def strip(s): """Convert `s` to a string and return it white-space stripped.""" return str(s).strip()
4ba378366b988738a542baa77b364598808b7dca
325,080
def diff_dicts(dicts, ignore=None): """ Given a sequence of dicts, returns common, [difference1, difference2, ...] where `commmon` is a dict containing items in all dicts, and `differenceN` is a dict containing keys unique to the corresponding dict in `dicts`, ignoring any keys in `ignore`. """ dicts = [d.copy() for d in dicts] ignore = [] if ignore is None else ignore for key in ignore: for d in dicts: d.pop(key, None) keyvals = [set(zip(d.keys(), d.values())) for d in dicts] common = keyvals[0].intersection(*keyvals[1:]) differences = [dict(sorted(b.difference(common))) for b in keyvals] return dict(common), differences
b457894020c0b47e1fdabcd20d9e1af5aec0617d
632,653
from typing import Iterable from typing import Sized def max_sequence_length(sequences: Iterable[Sized]): """Computes the length of the longest sized element in an iterable. Args: sequences (Iterator): An iterator of some sized type Returns: max_len (int): The length of the longest sized type in sequences """ max_len = 0 for entry in sequences: if len(entry) > max_len: max_len = len(entry) return max_len
4101ccb8fb0ed6d5ca2f49e3d45ed729d2ffcd4b
91,449
def part_2(input_file): """ forward value always add to horizontal vertical value = pitch * forward value where pitch = down - up when pitch is positive: submarine is facing down and depth increases when pitch is negative: submarine is facing up and depth decreases """ horizontal = 0 vertical = 0 pitch = 0 with open(input_file) as file: for line in file: line = line.split() if line[0] == "forward": horizontal += int(line[1]) if pitch == 0: pass elif pitch > 0: vertical += int(line[1]) * pitch else: vertical -= int(line[1]) * pitch elif line[0] == "down": pitch += int(line[1]) else: # line[0] == 'up' pitch -= int(line[1]) return horizontal * vertical
69db616db602b7b20ef54ff71ba59828ccb4b120
180,681
def apply_if_valid(name, trial, callback=None, raise_if_not=True): """Detect a parameter in trial and call a callback on it if provided Parameters ---------- name: str Name of the param to look for trial: `orion.core.worker.trial.Trial` Instance of trial to investigate callback: None of callable Function to call with (trial, param) with a parameter is found. Defaults to None raise_if_not: bool raises RuntimeError if no parameter is found. Defaults to True. Returns ------- bool False if parameter is not found and `raise_if_not is False`. True if parameter is found and callback is None. Else, output of callback(trial, item). """ for param in trial.params: if param.name == name: return callback is None or callback(trial, param) if raise_if_not: raise RuntimeError("Provided trial does not have a compatible configuration. " "A dimension named '%s' should be present.\n %s" % (name, trial)) return False
503e8b6b20aae50703259e036a0922a66d52e8ac
417,862
def none_if_empty(tup): """Returns None if passed an empty tuple This is helpful since a SimpleVar is actually an IndexedVar with a single index of None rather than the more intuitive empty tuple. """ if tup is (): return None else: return tup
26ee7bb9720eaa532d901b9c1f6c4a0fb6f7a340
8,358
from typing import Tuple import colorsys def rgb_to_hsv(rgb: int) -> Tuple[float, float, float]: """ Converts rgb int value to hue, saturation and value parts in percents. :param rgb: int value :return: Tuple[float, float, float] """ r = (rgb & 0xFF0000) >> 16 g = (rgb & 0x00FF00) >> 8 b = rgb & 0x0000FF r, g, b = r / 255, g / 255, b / 255 return colorsys.rgb_to_hsv(r, g, b)
ba3b2270cff9fc4e87388fe2bd7067d4a10918b8
273,860
def _bytes(_str): """ Convert ordinary Python string (utf-8) into bytes object (should be considered as c-string). @rtype: bytes """ return bytes(str(_str), "utf-8")
19470c93f6eac81947e852c9b716a4b9956c4bbb
317,642
import hashlib def md5sum(filename, first_block_only = False, blocksize = 65536): """Gets md5checksum of a file in memory-safe manner. The file is read in blocks/chunks defined by the blocksize parameter. This is a safer option to reading the entire file into memory if the file is very large. @param filename <str>: Input file on local filesystem to find md5 checksum @param first_block_only <bool>: Calculate md5 checksum of the first block/chunk only @param blocksize <int>: Blocksize of reading N chunks of data to reduce memory profile @return hasher.hexdigest() <str>: MD5 checksum of the file's contents """ hasher = hashlib.md5() with open(filename, 'rb') as fh: buf = fh.read(blocksize) if first_block_only: # Calculate MD5 of first block or chunck of file. # This is a useful heuristic for when potentially # calculating an MD5 checksum of thousand or # millions of file. hasher.update(buf) return hasher.hexdigest() while len(buf) > 0: # Calculate MD5 checksum of entire file hasher.update(buf) buf = fh.read(blocksize) return hasher.hexdigest()
22ed8fef6eecba2c600a5ee6f9ff3fb0a3c48474
507,366
def _get_zip_filename(xml_sps, output_filename=None): """ Obtém o nome canônico de um arquivo ZIP a partir de `xml_sps: packtools.sps.models.sps_package.SPS_Package`. Parameters ---------- xml_sps : packtools.sps.models.sps_package.SPS_Package output_filename: nome do arquivo zip Returns ------- str "1414-431X-bjmbr-54-10-e11439.zip" """ if not output_filename: return f'{xml_sps.package_name}.zip' else: return output_filename
2545fc02849d73da99a2d6c4bda3d69307f8afa1
675,223
def remove_control_char(pdf_content): """ 移除控制字符,换行符、制表符、转义符等)python-docx是不支持控制字符写入的 :param pdf_content: PDF 文件内容 :return: 返回去除控制字符的内容 """ # 使用str的translate方法,将ASCII码在32以下的都移除 http://ascii.911cha.com/ return pdf_content.translate(dict.fromkeys(range(32)))
84dff48a5654b12f7446f77cbc4d132716d2018c
697,523
def lower_words(docs): """Convert words in corpus to lowercase""" lowered_words = [] for doc in docs: lowered_words.append([word.lower() for word in doc]) return lowered_words
1f07d8360e43875a7df253e2354305b65467af58
538,288
import random import re def generate_unigram_sentence(word_list): """Generates a sentence using the unigram frequency table""" sentence = '' while True: rand = random.random() val_sum = 0 for k, v in word_list.items(): val_sum += v # Words value is added to val_sum if val_sum > rand: # First word to surpass the random value is used if re.match(r'([\.\,\!\?\;\:\=\+\/\*\\]+)', k): # Prevent punct. spacing sentence += k else: sentence += ' ' + k break if re.search(r'([\.\!\?]+)', sentence): # Sentence is complete once it has proper punct. break sentence = sentence.strip() # Strip whitespace for cleanliness sentence = sentence.capitalize() # Capitalize first letter return sentence
8767db1614a578f38fa1a3317e80249eac8cc24c
563,152
def _multiply_large_number_and_digit(number: str, digit: int) -> str: """Multiply a large number (as string) and a digit.""" large_product = '' remainder = 0 for num_digit in reversed(number): current_product_digit = remainder + int(num_digit) * digit large_product = str(current_product_digit % 10) + large_product remainder = current_product_digit // 10 if remainder: large_product = str(remainder) + large_product return large_product
14375259c956ead6e3d7379fb0b1af6d9a680e81
189,118
def convert_dms_to_decimal(elem): """Convert latitude/ longitude dms form to dms form Keyword arguments: elem -- latitude or longitude : (degree, min, sec) """ return elem[0] + elem[1]/60 + elem[2]/3600
bc35c69dd4035495996c1c08ad83a3effa940dbd
159,567
def is_true(value): """ Check wether value contains the string 'true' in a variety of capitalizations (e.g. "True" or "TRUE"). """ return value.lower() == 'true'
51cc0ba42bfef3a8206d4a294364aca0708aa054
583,030
def get_data_type(datum): """Determines the data type to set for the PostgreSQL database""" if datum.isdigit(): return "integer" elif datum.replace(".", "", 1).isdigit(): return "decimal" return "text"
220e3e02a879f3920349d2280f75c45ecfa4a37d
484,551
def to_byte_string(value, count=2, signed=False, byteorder='little'): """Take bytes and return string of integers. Example: to_byte_string(123456, count=4) = '64 226 1 0' """ byte_value = value.to_bytes(count, byteorder=byteorder, signed=signed) return ' '.join([str(x) for x in byte_value])
d1b2cc12000958a3858f73271b300ebe15480a43
26,157
import csv def read_csv(filename): """ Function that reads csv data and returns data in an array INPUTS: filename (str): name of csv file to be read OUTPUTS: data (list): csv data """ with open(filename,'r') as dest_f: data_iter = csv.reader(dest_f, delimiter = ',', quotechar = '"') data = [data for data in data_iter] return data
721ba3f12e5769c25f634e2e9d8259629ce21277
314,711
def add_until_100(array: list): """add numbers to a sum from list: array unless makes sum > 100""" if not array: # base case return 0 sum_remaining = add_until_100(array[1:]) if array[0] + sum_remaining > 100: return sum_remaining else: return array[0] + sum_remaining
123b4d7e9e7d833055ae84692f8dec119cf03317
61,143
def _get_dict_subset(dic, keys): """ Return a subset of a dictionary containing only the specified keys. """ return dict((k, dic[k]) for k in keys if k in dic)
78625cdb47e94ba588f6f4ae6df8d4222825c2fb
322,829