content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def pseudo_display(obj): """ Display the pseudo of a user. """ return obj.get_pseudo()
2b7b2caad2f31acda72de1e3c62c477635f31510
406,763
import glob def get_coverage_reports(report_loc): """ Gets all reports in a given location :type report_loc: string :param report_loc: The location of the reports :rtype: list :return: A list of reports in the given location """ return [report for report in glob.glob(u"{0}/*.txt".format(report_loc))]
3433848d275c8e92c2336b11653d6d4a9434b5c5
312,724
def build_content_type(format, encoding='utf-8'): """ Appends character encoding to the provided format if not already present. """ if 'charset' in format: return format return "%s; charset=%s" % (format, encoding)
771eecd94e37cab1f3ffbf9a671bd77fe3639be1
666,553
import plistlib def parse_plist_file(file_path): """ Parse the given plist file returning a dictionary containing the data. :param file_path: Path to the plist file. :return: Return the parsed plist_file. """ with open(file_path, 'rb') as file_pointer: plist_file = plistlib.load(file_pointer) return plist_file
fc849572428157b028e4d024de2819cdc2900481
581,813
def remove_www(hostname): """ Removes ``www``. from the beginning of the address. Only for routing purposes. ``www.test.com/login/`` and ``test.com/login/`` should find the same tenant. """ if hostname.startswith("www."): return hostname[4:] return hostname
eb2454bfafd416cc3fee3b735917c4ca5bbf1ea1
555,155
def getTimeData(sample_rate:float, num_samples:int) -> list: """Create and return an array containing the time each sample is taken. This assumes equal sampling periods. Used to set the time axis for plotting.""" time_array = [] for i in range(num_samples): # Iterate through each sample sample_time = i/sample_rate # Calculate the time this sample is taken time_array.append(sample_time) # Add time to results array return time_array
e327f291f1ac356fe3c62a45f04335b6e8eeef59
360,918
def inclusion(first_x, second_x): """ Check if a list is included in another. Parameters ---------- first_x : list List to evaluate. second_x : list Reference list to compare with. Returns ------- bool True if first_x is contained in second_x. """ return all(elem in second_x for elem in first_x)
ad8113c9b6fa189be2fb2ba722f21e4b34615dd0
351,944
def var_is_pathogenic(variant_data): """Check if variant is listed as pathogenic in ClinVar :param variant_data: A GeminiRow for a single variant. :type variant_data: GeminiRow. :returns: bool -- True or False. """ if variant_data.INFO.get('clinvar_sig') is not None: if "pathogenic" in variant_data.INFO.get('clinvar_sig'): return True else: return False else: return False
d146c11693cad47310399595c5062d2bea728877
577,282
def flatten(x): """flatten a list of lists into a list.""" return [item for sublist in x for item in sublist]
f0dd50cd3519eab104421e0867d35f22e37ee2d8
310,973
import typing import math def _quadratic_distance_solution(d_0: float, v_0: float, a: float, d: float) -> typing.Tuple[float, float]: """ Given an equation of the form: position = initial_position + initial_velocity * t + acceleration * t**2 / 2.0 Using the quadratic equation: a * x**2 + b * x + c = 0 And the solution: x = (-b ± math.sqrt(b**2 - 4 * a * c)) / (2 * a) Where: x = t a = acceleration / 2.0 b = initial_velocity c = initial_position - position Thus: t = (-initial_velocity ± math.sqrt(initial_velocity**2 - 4 * acceleration / 2.0 * (initial_position - position))) / (2 * acceleration / 2.0) This returns the roots of t: t = (-v_0 ± sqrt(v_0**2 - 2 * a * (d_0 - d)) / a """ part_two = math.sqrt(v_0**2 - 2 * a * (d_0 - d)) / a part_one = v_0 / a return part_one + part_two, part_one - part_two
eb32a07e0055c2a01a78a8f5565a94cd620f06fc
244,245
def read_data(path, histories): """ Function that reads the data in a TOPAS simulation scorer output file. Assumes the simulation scored a quantity and its standard deviation, meaning only two values follow the three coordinates of the scoring voxel. Returns the two scored quantities as an arrays of floats along with the header lines and coordinates of the scoring voxels. The latter two are used to re- constuct the file into the same format using create_new_file(). """ with open(path) as file: value, std_dev, header, coords = [], [], [], [] # initialize empty arrays for line in file: if "#" in line: # only include header lines header += [line] if not "#" in line: # only include data lines coords += [line.split(",")[:3]] # save x,y,z coordinates values = line.split(",")[3:] # cut off x,y,z values values = [ float(value.replace("e", "E").replace("\n", "")) for value in values ] # convert to float value += [values[0]] # unpack value std_dev += [values[1]] # unpack standard deviation return value, std_dev, header, coords
9ba8591540867081d0051af531ab0d88c5db1a2e
210,556
def resize_image(image, percent): """ Resize an image to a scale amount. image: (PIL.Image) The image to scale. percent: (float) The percentage of the original size to scale to. Returns the scaled image. """ if percent == 100: return image prop = percent / 100.0 (width, height) = image.size return image.resize( (int(width * prop), int(height * prop)) )
f222c998559b7ed56d5599402c2fa6f74efe85ae
390,358
def convert_raw_cookie2dict(raw_cookie: str) -> dict: """ Convert cookie string which copied from browser :param raw_cookie: string :return: dict """ return {i.split("=")[0]: i.split("=")[-1] for i in raw_cookie.split("; ")}
7617d9ffb3871529c8cc0e0c52afb8382daef3cf
188,325
def remove_superior_symbol(text): """Remove superior and inferior symbols from text""" text = text.replace(">", "") text = text.replace("<", "") return text
942b850642b4addb34dae1eb2e8585310570b489
355,450
def get_strptime_pattern(s): """ :param s: str :return: get strptime pattern NOTE: be careful with microseconds. It is not handled properly """ if len(s) > 20: raise Exception("Too big string") return "%Y%m%d%H%M%S%f"[: int(len(s) - 2)]
3c376bc9c5f5ad9ff3aad1212701bc25a1261177
620,165
def nearest_2n_indices(x, i, n): """Calculates the nearest 2n indices centred at i in an array x, or as close as possible to i, taking into account that i might be within n indices of an endpoint of x. The function returns the limiting indices as a pair, and always returns an interval that contains 2n+1 indices, assuming x is long enough. I.e., away from endpoints, the function returns (i-n, i+n). If i is within n of index 0, the function returns (0, 2n). If i is within n of last index L, the function returns (L-2n, L). Remember to add one to the upper limit if using it in a slice. """ assert len(x) > 2 * n, "x is not long enough" # ixlo = 0 ixhi = len(x) - 1 if i < n: # too close to low end return (0, 2 * n) elif i > ixhi - n: # too close to high end return (ixhi - 2 * n, ixhi) else: return (i - n, i + n)
590e3fa2055faa18d99bcbddb326bd00fae5ea28
371,602
def remove_multiple(title: str, to_remove: list) -> str: """ Removes unnecessary words from the movie's/serie's title, i.e words that will not have give an appropriate emoji """ title = title.lower() for word in to_remove: title = title.replace(word, "") return title
80a147a74752a66b50aedd6595cba1cdf309914f
437,335
def red_bold(payload): """ Format payload as red. """ return '\x1b[31;1m{0}\x1b[39;22m'.format(payload)
2b1769b7705dcd2a976ce3d92892754e123ce2b3
412,321
import six def calc_pct(value, bounds, is_float): """ Helper function to calculate a scaling tuple value from a percentage. """ if isinstance(value, six.string_types): value = float(value[:-1]) * (bounds[1] - bounds[0]) / 100.0 + bounds[0] return value if is_float else int(value)
8c713226ddb73e027878c891fdecedacd0664e96
512,501
def dict_to_list(dict): """Transform a dictionary into a list of values.""" calc_moments_list = list() [calc_moments_list.append(func) for func in dict.values()] return calc_moments_list
fcf2b0b10639acbd7fceec44dfaf43c1ba3915ae
420,422
import importlib def get_object(module, name): """ Gets object from given module by name :param module: full module name :param name: object name :return: loaded object """ m = importlib.import_module(module) f = getattr(m, name) return f
1425549d1b92b31ed61424541e3edd20d65b3c19
336,060
import time def time_to_date(timestamp): """ timestamp to date. :param timestamp :int,e.g.1537535021 :return:Year-Month-Day Hour:Minute:Second """ timearr = time.localtime(timestamp) otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timearr) return otherStyleTime
66c7f14ef78d329874017eafe9a798d02464227d
301,586
from typing import Mapping from typing import Callable from typing import Any from typing import Dict def translate_dict_func(d: Mapping[str, Callable[..., Any]]) -> Callable[..., Dict[str, Any]]: """ Overview: Transform dict with funcs to function generating dict. Arguments: - d (:obj:`Mapping[str, Callable[..., Any]]`): Dict with funcs Returns: - func (:obj:`Callable[..., Dict[str, Any]]`): Function generating dict Example: >>> f1 = lambda x, y: x + y >>> f2 = lambda x, y: x - y >>> f3 = lambda x, y: x * y >>> fx = translate_dict_func({'a': f1, 'b': f2, 'c': f3}) >>> fx(2, 3) # {'a': 5, 'b': -1, 'c': 6} >>> fx(5, 11) # ('a': 16, 'b': -6, 'c': 55} """ def _func(*args, **kwargs) -> Dict[str, Any]: return {k: f(*args, **kwargs) for k, f in d.items()} return _func
0902ff2966191b8bd3ba7674cca62dd8ec7ffe2c
526,532
def entity_data_cleanse(entity: str, type: str, term: str, ): """ Ignores twitter handles, quantity, date, original search term, links """ return "@" not in entity and \ type != "QUANTITY" and \ type != "DATE" and \ entity != term.lower() and \ "http:" not in entity and \ "https:" not in entity
6ced03cd669510c5b6e491b7480c270025219642
253,889
def get_acabout(core_data, soup): """ Returns the "about" text from aircraft landing page. The about ptag data is at a +1 index from the aircraft core data. Param: get_coredata() function response bs4 soup object for aircraft site link Returns: text for about data """ if core_data != 'error': # find all ptags from soup object acptags = soup.find_all('p') try: core_data_index = core_data[0] # The about data index is +1 after core data ptag index about_index = core_data_index + 1 # Get text response acabout = acptags[about_index].text.strip() except: acabout = 'N/A' else: acabout = 'N/A' return acabout
34dc3868b22e862fbcb3ec85a9431f48bb5daf60
180,020
def copper_thermal_conductivity_CRC(T): """ Copper thermal conductivity as a function of the temperature from [1]. References ---------- .. [1] William M. Haynes (Ed.). "CRC handbook of chemistry and physics". CRC Press (2014). Parameters ---------- T: :class:`pybamm.Symbol` Dimensional temperature Returns ------- :class:`pybamm.Symbol` Thermal conductivity """ lambda_th = -5.409e-7 * T ** 3 + 7.054e-4 * T ** 2 - 0.3727 * T + 463.6 return lambda_th
d7917b31d28f5d151e837614783e5906c0bcc546
263,445
def calc_Br(Y, A_r, beta, in_edges_r, neg_edges_r): """ Calculates a new bias vector for a single edge type according to Eq 4. Parameters ---------- Y : np.array, entity embeddings A_r : np.array, linear mapping for this edge type beta : func from in_edges.keys() to float in_edges_r : dict of incoming edges for this edge type neg_edges_r : dict of incoming non-edges for this edge type Returns ------- np.array """ num = 0. denom = 0. for i, neighbors in in_edges_r.items(): for j in neighbors: num += beta(i, j)*(A_r.dot(Y[j]) - Y[i]) denom += beta(i, j) for i, neighbors in neg_edges_r.items(): for j in neighbors: num -= beta(i, j)*(A_r.dot(Y[j]) - Y[i]) denom -= beta(i, j) return num / denom
8ad7e2f56e3cc16ca641d0b5f88a34b9654e7c9e
425,550
def is_ogc(name): """Checks if name is a valid ogc entity type""" return name in ["Things", "Sensors", "Locations", "HystoricalLocations", "Datastreams", "ObservedProperties", "Observations", "FeaturesOfInterest"]
99f27f612383a1823b675dc07670e0ef56148f16
447,083
import math def g2d2(h, tw, E, Fy): """Web shear coefficient, Case (a) For webs of rolled I-shaped member with if h/tw <= 2.24*math.sqrt(E,Fy): Cv = 1.0 else: Cv = None Args: h (float): web height tw (float): web thickness E (float): modulus of elasticity Fy (float): yield strength of web Returns: Cv (tuple(float, str)): web shear coefficient """ if h/tw <= 2.24*math.sqrt(E/Fy): Cv = 1.0 text = () else: Cv = None text = () return Cv, text
cc0d0bcf39dbffbbd93e4f8637d95c7e9125056d
560,253
def calc_col_diff(bbox): """Calculates difference in columns given bounding box.""" _, col_min, _, col_max = bbox return col_max - col_min
97cdd9a2c6fb131fb0d772ddbc7198bedcf7e3ea
397,512
def _replace_angle_brackets(url_string): """ Replace angle brackets with braces in a given url. Swaggerhub prefers URLs with variables to have curly braces instead of angle brackets, so this replaces all instances of angle brackets with curly braces. For example: /users/<user_id> -> /users/{user_id} """ return url_string.replace("<", "{").replace(">", "}")
117eb3081c0db9e4f32fb71b059cd923c297d208
612,575
from typing import Dict from typing import Any import yaml def load_config(config_path: str) -> Dict[str, Any]: """Load a YAML configuration file given its path.""" with open(config_path, "r") as fh: config_yaml = yaml.safe_load(fh) return config_yaml
ce0788a5e9668df287c31cc557f618f17f020051
550,989
def filter_for_dataset(dataset_hash, items): """ Return only the subset of items that is related to the dataset_hash """ filtered = [] for item in items: if item['file'].startswith(dataset_hash): filtered.append(item) return filtered
6f797a9ca021f0d88f9742434928a787c445bf41
197,671
import re def ggpht_s1600_extender(pipeline_index, finder_image_urls, extender_image_urls=[], *args, **kwargs): """ Example: http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s640/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg to http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s1600/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg """ now_extender_image_urls = [] search_re = re.compile(r'/s\d+/', re.IGNORECASE) for image_url in finder_image_urls: if 'ggpht.com/' in image_url.lower(): if search_re.search(image_url): extender_image_url = search_re.sub('/s1600/', image_url) now_extender_image_urls.append(extender_image_url) output = {} output['extender_image_urls'] = extender_image_urls + now_extender_image_urls return output
c9d6e9a99d15a1e432b040758ae8280c91d71c87
323,972
def get_hiscore(data: dict) -> int: """Get the current highest score.""" scores = data.get('scores', []) if scores: return max(scores) return 0
5df27b66ca9a8bd756695c0571f7ed0ffc6f3082
209,782
def choose_caffe_model_files(skeleton_pose_model: str): """Returns Caffe model files name based on the given input model name. Args: skeleton_pose_model: Caffe model name which is either 'COCO' or 'MPI'. Returns: A tuple which contains the names for proto & weights files and the number of skeleton points. """ if skeleton_pose_model == 'coco': proto_file_name = 'pose_deploy_linevec.prototxt' weights_file_name = 'pose_iter_440000.caffemodel' n_skeleton_points = 18 else: proto_file_name = 'pose_deploy_linevec_faster_4_stages.prototxt' weights_file_name = 'pose_iter_160000.caffemodel' n_skeleton_points = 14 return proto_file_name, weights_file_name, n_skeleton_points
ac6709b4502305c597fb731b6bd89260d9a75704
438,240
def get_static_data(modelSpec): """ Return a dictionary of static values that all objects of this model have. This applies only to kubernetes resources where ``kind`` and ``apiVersion`` are statically determined by the resource. See the `Kubernetes OpenAPI Spec Readme`__. For example for a v1 Deployment we return :: { 'kind': 'Deployment', 'apiVersion': 'apps/v1' } .. __: https://github.com/kubernetes/kubernetes/blob/master/api/openapi-spec/README.md#x-kubernetes-group-version-kind """ if 'x-kubernetes-group-version-kind' in modelSpec: values = modelSpec['x-kubernetes-group-version-kind'] if len(values) == 1: group = values[0].get('group', '') if group: group = group + '/' return { 'kind': values[0]['kind'], 'apiVersion': group + values[0]['version'] } else: return {} else: return {}
6b06d3fc05bc53ed0a6436f57ff7a1f89ef46b65
644,516
def freeze_one_half(basis): """ Split the structure into two parts along the z-axis and then freeze the position of the atoms of the upper part (z>0.5) by setting selective dynamics to False. Args: basis (pyiron_atomistics.structure.atoms.Atoms): Atomistic structure object Returns: pyiron_atomistics.structure.atoms.Atoms: Atomistic structure object with half of the atoms fixed """ basis.add_tag(selective_dynamics=None) _, _, z = basis.scaled_pos_xyz() for selector, ind in zip(z < 0.5, range(len(basis))): if selector: basis.selective_dynamics[ind] = [True, True, True] else: basis.selective_dynamics[ind] = [False, False, False] return basis
f48f1c3e1b9b73b203ea9dec0091e6500c5a338c
673,939
def get_utt_id(segment): """ Gives utterance IDs in a form like: en_4156-a-36558-37113 """ return "{}-{}-{}-{}".format(segment.filename, segment.channel, int(segment.begin * 100), int(segment.end * 100),)
bfb8507a17e4a35f6daa0df8eebf06141d47f1a0
652,081
def _parse_ctrl_d_all_show(output, regexp): """Function to parse lines like: array C physicaldrive 1I:1:2 (port 1I:box 1:bay 2, SATA, 1 TB, OK) into an associative array with the matching result like: [('array C', [('1I:1:2', 'SATA', '1 TB', 'OK')]),] The regexp arg must extract the 4 informations from description lines. """ arr = [] cur = [] idx = None for line in output.split('\n'): line = line.strip() if line[:5].lower() == 'array' or line.lower() == 'unassigned': if idx: arr.append((idx, cur)) cur = [] idx = line else: res = regexp.search(line) if res: cur.append(res.groups()) if idx: arr.append((idx, cur)) return arr
c69249a1054c3d8565d95ad1b3390fe77fce8403
282,768
import importlib def get_class(string): """Import class from string. Parameters ---------- string : str absolute python import path for the class. (i.e. apf.consumers.GenericConsumer) Returns ------- object Class imported. """ module_name, class_name = string.rsplit(".", 1) module = importlib.import_module(module_name) class_ = getattr(module, class_name) return class_
d94c03e426e27b4be5a6f5e7fbe83584874565d1
246,856
def scramble(mouvs): """ Converts movements from boolean and str to human comprehensive Parameters ---------- mouvs: list The list of movements (tuple : (f, cw, r180)) Returns ------- mvts: str The movements <F B L R U D> [' 2] [_] """ mvts = "" for mvt in mouvs: # Add LETTER + possibly ' or 2 + SPACE mvts += mvt[0] + ("2" if mvt[2] else "" if mvt[1] else "'") + " " return mvts.strip()
165357ce2c1ac230231b6b5d08693de3050a475d
328,178
def normalize_scores(scores): """ Normalizes predictions scores to a probabilities-like format. Args: scores (list): Contains the predictions scores as predicted by the \ model Returns: list: The normalized scores """ s = sum(scores) normalized = [score/s for score in scores] return normalized
14ce754bd81fc7c597d34bd199d68c6e8bbd0c54
296,149
def _clean_pattern(rv: str) -> str: """Clean a regular expression string.""" rv = rv.rstrip("?") if not rv.startswith("^"): rv = f"^{rv}" if not rv.endswith("$"): rv = f"{rv}$" return rv
c73f7d8edd4a50ae8352c65b47964db1afb57c74
223,388
def ecio_quality_rating(value, unit): """ ECIO (Ec/Io) - Energy to Interference Ratio (3G, CDMA/UMTS/EV-DO) """ if unit != "dBm": raise ValueError("Unsupported unit '{:}'".format(unit)) rating = 0 if value > -2: rating = 4 elif -2 >= value > -5: rating = 3 elif -5 >= value > -10: rating = 2 elif value <= -10: rating = 1 return rating
4cc21012464b8476d026f9dfbc35b8b1ea3c2d85
279
import re def entry_in_first_week(line): """ Takes a line of a log file and checks if the entry happened in the first 7 days of July 1995. :param line: The log file entry, eg '129.94.144.152 - - [01/Jul/1995:00:00:13 -0400] "GET / HTTP/1.0" 400 7074' :return: True | False """ pattern = re.compile(r"^(\[0[1-7]/Jul/1995.*?)$") return bool(pattern.match(line))
855376c7638338a62aa1a8a353d2ec5d9d8ce40b
497,122
def layers(model_size): """Returns the layer specification for a given model name.""" if model_size == 'tiny': return ( ('linear', 100), ('activation', 'relu')) elif model_size == 'small': return ( ('conv2d', (4, 4), 16, 'VALID', 2), ('activation', 'relu'), ('conv2d', (4, 4), 32, 'VALID', 1), ('activation', 'relu'), ('linear', 100), ('activation', 'relu')) elif model_size == 'medium': return ( ('conv2d', (3, 3), 32, 'VALID', 1), ('activation', 'relu'), ('conv2d', (4, 4), 32, 'VALID', 2), ('activation', 'relu'), ('conv2d', (3, 3), 64, 'VALID', 1), ('activation', 'relu'), ('conv2d', (4, 4), 64, 'VALID', 2), ('activation', 'relu'), ('linear', 512), ('activation', 'relu'), ('linear', 512), ('activation', 'relu')) elif model_size == 'large_200': # Some old large checkpoints have 200 hidden neurons in the last linear # layer. return ( ('conv2d', (3, 3), 64, 'SAME', 1), ('activation', 'relu'), ('conv2d', (3, 3), 64, 'SAME', 1), ('activation', 'relu'), ('conv2d', (3, 3), 128, 'SAME', 2), ('activation', 'relu'), ('conv2d', (3, 3), 128, 'SAME', 1), ('activation', 'relu'), ('conv2d', (3, 3), 128, 'SAME', 1), ('activation', 'relu'), ('linear', 200), ('activation', 'relu')) elif model_size == 'large': return ( ('conv2d', (3, 3), 64, 'SAME', 1), ('activation', 'relu'), ('conv2d', (3, 3), 64, 'SAME', 1), ('activation', 'relu'), ('conv2d', (3, 3), 128, 'SAME', 2), ('activation', 'relu'), ('conv2d', (3, 3), 128, 'SAME', 1), ('activation', 'relu'), ('conv2d', (3, 3), 128, 'SAME', 1), ('activation', 'relu'), ('linear', 512), ('activation', 'relu')) else: raise ValueError('Unknown model: "{}"'.format(model_size))
a391b99cbe133911f27216161a1e22426753c064
268,980
def _compare_baseline_results(baseline, results): """Compare a baseline list of issues to list of results This function compares a baseline set of issues to a current set of issues to find results that weren't present in the baseline. :param baseline: Baseline list of issues :param results: Current list of issues :return: List of unmatched issues """ return [a for a in results if a not in baseline]
31bf636a2305fd986deda834b640dafc51e74a71
611,496
def find_arg_index_by_name(decls, name): """Return index of argument in decls with name. Args: decls - list of Declaration name - argument to find """ if decls is None: return -1 for i, decl in enumerate(decls): if decl.name == name: return i return -1
269f6518d5cf8a6ca7ff1c4e5e8889f5fe7941b8
186,754
def get_first(items_list: list): """Safely return the first element of a list.""" return items_list[0] if items_list else None
eb7276d4392c3ad6e1f32090acb3ad69fdd35107
122,741
def image(title, desc, image_name, group=None, height=None): """ Builds an image element. Image elements are primarily created and then wrapped into an image gallery element. This is not required behavior, however and it's independent usage should be allowed depending on the behavior required. The Javascript will search for the `image_name` in the component's `imgs` directory when rendering. For example, all verification images are output to `vv_xxxx-xx-xx/verification/imgs` and then the verification case's output page will search for `image_name` within that directory. Args: title: The title to display desc: A description of the image or plot image_name: The filename of the image group: (optional) Title of lightbox group to join height: (optional) Height of image thumbnail to draw Returns: A dictionary with the metadata specifying that it is to be rendered as an image element """ ie = { 'Type': 'Image', 'Title': title, 'Description': desc, 'Plot File': image_name, } if group: ie['Group'] = group if height: ie['Height'] = height return ie
301f7831567e8845ef32ec33da4d5d273e50ac65
670,202
import requests from bs4 import BeautifulSoup def get_movies_libraries( token, fullURL = 'http://localhost:32400' ): """ Returns a :py:class:`list` of the key numbers of all Plex movie libraries on the Plex_ server. :param str token: the Plex_ server access token. :param str fullURL: the Plex_ server address. :returns: a :py:class:`list` of the Plex_ movie library key numbers. :rtype: list. """ params = { 'X-Plex-Token' : token } response = requests.get( '%s/library/sections' % fullURL, params = params, verify = False ) if response.status_code != 200: return None html = BeautifulSoup( response.content, 'lxml' ) library_dict = { int( direlem['key'] ) : ( direlem['title'], direlem['type'] ) for direlem in html.find_all('directory') } return sorted(set(filter(lambda key: library_dict[ key ][1] == 'movie', library_dict ) ) )
bc7cd3f3fe56a45d22310b9078cebc624e97ee29
362,351
def maprange( range_from, range_to, value):# {{{ """Map value from range_from -> range_to range_from and range_to are tuples """ (f1, f2) = range_from (t1, t2) = range_to new_value = t1 + ((value - f1) * (t2 - t1) / (f2 - f1)) return new_value
5d00a5b5cd983dd8702d37997357f99b81bc6944
584,750
def _ProcessProjectSD(fmt): """Convert a 'project' sort directive into SQL.""" left_joins = [] order_by = [(fmt('Issue.project_id {sort_dir}'), [])] return left_joins, order_by
0b25786a6446cf3f737f3822e12b675ed774a7ad
167,231
def create_value_dict(data_agg, super_star_avg_prices_2_agents, super_star_avg_prices_3_agents ): """ A function to create a comprehensive dictionary with all values that we need to create the plot. Args: data_agg (DataFrame): DataFrame with experimental results super_star_avg_prices_2_agents (array): Average prices of super star markets upon convergence (2 firm market). super_star_avg_prices_3_agents (array): Average prices of super star markets upon convergence (3 firm market). Returns: dict: Dictionary with values for plotting """ # To make the plotting easier, we write the data into a structured dict plotting_dict = {} # First the fully human markets plotting_dict['Humans'] = {} plotting_dict['Humans']['means'] = {} plotting_dict['Humans']['std'] = {} # Loop over all super games for sg in range(1,4): # Create subset of data for current sg current_subset_sg = data_agg.loc[data_agg['super_game'] == sg] # get means and std plotting_dict['Humans']['means'][sg] = list() plotting_dict['Humans']['means'][sg].extend( [current_subset_sg.loc[current_subset_sg['treatment'] == '2H0A']['winning_price'].mean(), current_subset_sg.loc[current_subset_sg['treatment'] == '3H0A']['winning_price'].mean()] ) plotting_dict['Humans']['std'][sg] = list() plotting_dict['Humans']['std'][sg].extend( [current_subset_sg.loc[current_subset_sg['treatment'] == '2H0A']['winning_price'].std(), current_subset_sg.loc[current_subset_sg['treatment'] == '3H0A']['winning_price'].std()] ) # Now the algorithm markets plotting_dict['Algorithm'] = {} plotting_dict['Algorithm']['means'] = list() plotting_dict['Algorithm']['means'].extend( [super_star_avg_prices_2_agents.mean(), super_star_avg_prices_3_agents.mean()] ) plotting_dict['Algorithm']['std'] = list() plotting_dict['Algorithm']['std'].extend( [super_star_avg_prices_2_agents.std(), super_star_avg_prices_3_agents.std()] ) return plotting_dict
fc7797ff939f25214c53d4fe359fe488a45bf233
671,373
import torch def lincomb(rp: torch.Tensor, coeff: torch.Tensor) -> torch.Tensor: """Returns the normal distributions for w linear combinations of p portfolios Args: rp (torch.Tensor): p-by-n matrix where the (i, j) entry corresponds to the j-th return of the i-th portfolio coeff (torch.Tensor): w-by-p matrix where the (i, j) entry corresponds to the i-th set and j-th coefficient for the j-th portfolio Returns: torch.Tensor: w-by-n matrix where the (i, j) entry corresponds to the j-th return for i-th portfolio from the linear combination """ return coeff @ rp
b1328f9004ee18ad1a8f0ea9e84ed10d31681ff3
652,961
def bytes_per_pixel(pixel_type): """ Return the number of bytes per pixel for the given pixel type @param pixel_type: The OMERO pixel type @type pixel_type: String """ if (pixel_type == "int8" or pixel_type == "uint8"): return 1 elif (pixel_type == "int16" or pixel_type == "uint16"): return 2 elif (pixel_type == "int32" or pixel_type == "uint32" or pixel_type == "float"): return 4 elif pixel_type == "double": return 8 else: raise Exception("Unknown pixel type: %s" % (pixel_type))
654b542d0a7cafa50c2f41e4456b19c62bb0efc3
584,153
def _ensure_list_of_lists(entries): """Transform input to being a list of lists.""" if not isinstance(entries, list): # user passed in single object # wrap in a list # (next transformation will make this a list of lists) entries = [entries] if not any(isinstance(element, list) for element in entries): # user passed in a list of objects, not a list of lists # wrap in a list entries = [entries] return entries
a1866fdd62861f40e7f5aa9ce491ba14fc5a6cc9
672,222
import hashlib def sha256(hash_input): """ Return value of SHA256 hash of input bytearray hash_input, as a nonnegative integer. """ assert isinstance(hash_input, bytearray) return int(hashlib.sha256(hash_input).hexdigest(), 16)
89e38f97e6d6958d903b3feaffb1b6808706f435
174,374
def _codon_slicing(nt_pos): """Return (AA_position, codon_index_start, codon_index_stop)""" aa_pos, reminder = divmod(nt_pos, 3) if reminder == 0: start = nt_pos - 3 else: start = nt_pos - reminder aa_pos+=1 return (aa_pos, start, start + 3)
357d968278801f4ceca4cdc57add564d16a65143
491,046
def char_waveform(length): """ Helper function for creating a char waveform PV. Args: length: The length of the array. Return: The dictionary to add to the PVDB. """ return {'type': 'char', 'count': length, 'value': [0]}
026332a96659edf672519aaab9db6fa49f99837a
613,916
def build_align_view(lines_layout): """Convert the lines_layout to alignement list (for UI)""" return ['<' if c == 0 else '>' for c, _ in enumerate(lines_layout)]
3e85cb74a1e1c2c45ad5584f6cb9d21757abacaa
488,718
def get_expected_value_r(sum_fo_r, sum_fo, fg): """ Get the expected frequency from the given parameters. Parameters ---------- > `sum_fo_r`: sum of frequencies for a particular observed response (row sum) > `sum_fo`: sum of all observed frequencies > `fg`: frequency of an observed group / category Returns ------- The expected frquency value from the given parameters. """ return (sum_fo_r / sum_fo) * fg
5c2c0763b6cbb339a92c90a931b2f69a7944cb28
480,312
def fmt_numeric(value: float, precision=10) -> str: """Format any numeric value. Args: value: The numeric value to format. precision: The numeric precision Returns: The numeric value with the given precision. """ return "{{:.{precision}g}}".format(precision=precision).format(value)
6def7729b4ae8907ee9d610768025163bf9546d0
572,115
def is_float_str(string): """ Checks if a given str can be successfully converted to a float value. :param str string: String to be evaluated. :return: Returns true if the string is float convertible and false otherwise. :rtype: bool """ try: float(string) return True except ValueError: return False
d0d695d8d431e9850b38d98c6aadabf69a1569c9
551,537
def is_int(i): """Checks whether the given object can be converted to an integer""" try: int(i) return 1 except ValueError: return 0
46b82b5f6c6b68fdf4ed2952f98b41153366d436
345,738
def is_list(node: dict) -> bool: """Check whether a node is a list node.""" return 'listItem' in node
ad01033afe51391db2e5966247080e7263baa5e4
678,146
import torch def merge_lists(input_list): """ input_list = list:time[ list:level[4D tensor] ] output_list = list:level[ 4D tensor (batch*time, channel, height, width)] """ len_tt = len(input_list) len_ll = len(input_list[0]) output_list = [] for ll in range(len_ll): list_ll = [] for tt in range(len_tt): list_ll.append(input_list[tt][ll]) tensor_ll = torch.stack(list_ll, dim=1) tbb, ttt, tcc, thh, tww = tensor_ll.size() output_list.append(tensor_ll.reshape(tbb * ttt, tcc, thh, tww)) return output_list
3de262e4cd6da6a79988b329d33f82831d65d494
606,915
import random import socket def find_unbound_port(start=None, increment=False, port_range=(10000, 50000), verbose=False, logger=None): """ Find a unbound port. Parameters ---------- start : int The port number to start with. If this port is unbounded, return this port. If None, start will be a random port. increment : bool If True, find port by incrementing start port; else, random search. port_range : tuple The range of port for random number generation verbose : bool Verbose flag for logging logger: logging.Logger """ while True: if not start: start = random.randint(*port_range) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.bind(("127.0.0.1", start)) # Make sure we clean up after binding del sock return start except socket.error as e: if verbose and logger: logger.info("Socket error: {}".format(e)) logger.info( "randomly generated port %d is bound. Trying again." % start) if increment: start += 1 else: start = random.randint(*port_range)
e1fe772c054887ada0744e9764e166365eb6165b
643,442
def _get_matched_row(rows, lookup): """ Return row that matches lookup fields. """ for index, row in enumerate(rows): matched = True for field, value in lookup.items(): if str(row.get(field, '')).strip() != value: matched = False break if matched: return index, row return None, None
111d89516798aab3646488af7ea577723295e7a6
602,548
def excel_column_name(index): """ Converts 1-based index to the Excel column name. Parameters ---------- index : int The column number. Must be 1-based, ie. the first column number is 1 rather than 0. Returns ------- col_name : str The column name for the input index, eg. an index of 1 returns 'A'. Raises ------ ValueError Raised if the input index is not in the range 1 <= index <= 18278, meaning the column name is not within 'A'...'ZZZ'. Notes ----- Caches the result so that any repeated index lookups are faster, and uses recursion to make better usage of the cache. chr(64 + remainder) converts the remainder to a character, where 64 denotes ord('A') - 1, so if remainder = 1, chr(65) = 'A'. """ if not 1 <= index <= 18278: # ensures column is between 'A' and 'ZZZ'. raise ValueError(f'Column index {index} must be between 1 and 18278.') col_num, remainder = divmod(index, 26) # ensure remainder is between 1 and 26 if remainder == 0: remainder = 26 col_num -= 1 if col_num > 0: return excel_column_name(col_num) + chr(64 + remainder) else: return chr(64 + remainder)
ea561c36058a5132734328f19e99733b83e272aa
317,769
def _service_account_name(service_acct_email): """Gets an IAM member string for a service account email.""" return 'serviceAccount:{}'.format(service_acct_email)
f30ee00671c28e4f73aca899a57f69e7d69de68c
361,783
def NewLogProbs(dist_inputs, actions, log_prob_fun): """Given distribution and actions calculate log probs.""" new_log_probs = log_prob_fun(dist_inputs, actions) return new_log_probs
bfdd8df9bcb4ff7afa0e2d5b2506dfff6a71c8e3
361,600
def is_nonterminal(symbol): """nonterminals are formatted as this: [X]""" return symbol[0] == '[' and symbol[-1] == ']'
f4a345abf2f565b55cee302b4dc9d731c4e7f8df
543,432
def invert_bitstring(string): """ This function inverts all bits in a bitstring. """ return string.replace("1", "2").replace("0", "1").replace("2", "0")
9e5e4ff6af107213bf4cab10a2069332fc50c3cc
105,133
def network_is_bogon(network): """ Returns if the passed ipaddress network is a bogon Arguments: - network <ipaddress.IPv4Network|ipaddress.IPv6Network> Return: - bool """ return not network.is_global or network.is_reserved
b729a6dcd907a833d4bdedf581e58b8b515f3e07
116,118
def character_talents(talent_data): """Accepts a JSON object containing a players talents and returns the players current active specalization.""" talents = talent_data["talents"] # Starts empty just incase the player hasn't got a spec selected. active_spec = "" for talent in talents: # The API returns the selected key only if it's selected, therefore this check # makes sure we're not looking for something that doesn't exist. if "selected" in talent.keys(): if talent["selected"] == True: active_spec = talent["spec"]["name"] talent_data = {"active_spec": active_spec} return talent_data
6cadb2df2241469270d0e616b95b0176b6abbf45
451,182
def read_txt(fname, separator=None, **kwargs): """ Read a text-file's non-zero lines and split them using a blank-space separator. Parameters ---------- fname : str Name of the file including the path if needed. separator : str Character separating different words. By default None, which effectively is a space (or more spaces). Returns ------- content : list List of split lines (lists of words). """ content = [] with open(fname, 'r') as f: for line in f: line = line.split(separator) if len(line) != 0: content.append(line) return content
8b292dba4d101aa8a214f8226bb7e0c71e3b69f7
112,436
from typing import Any def is_list_like(x: Any) -> bool: """Check whether `x` is list like, e.g., Tuple or List. Parameters: ---------- x: A python object to check. Returns: ---------- `True` iff `x` is a list like sequence. """ return isinstance(x, (list, tuple))
a59a26e64e1aed696ec6a75b9fd9ee147a2cf55e
437,404
def list_insert_list(l, to_insert, index): """ This function inserts items from one list into another list at the specified index. This function returns a copy; it does not alter the original list. This function is adapted from: http://stackoverflow.com/questions/7376019/ Example: a_list = [ "I", "rad", "list" ] b_list = [ "am", "a" ] c_list = list_insert_list(a_list, b_list, 1) print( c_list ) # outputs: ['I', 'am', 'a', 'rad', 'list'] """ ret = list(l) ret[index:index] = list(to_insert) return ret
3a14dab4a78798e2a04452024cabc7b2a7a0b567
100,724
import re def parse_tags(s): """ Return a list of tags (e.g. {tag_a}, {tag_b}) found in string s """ return re.findall('{(\w+)\}*', s)
28849f326ff6019b9e41ee6fa0f48cfebff0811e
28,724
def hexstr(s): """Convert a string to hexadecimal.""" return "%02x"*len(s) % tuple(map(ord, s))
fc075bbec77dacf164516fa2e694467236caaab9
397,668
import re def regXpParse(data, regex): """Use Regex to parse data Input: data the string need to be parsed Input: regex the pattern to parse Output: return the matching string """ ret = None findResult = re.match(regex, data) if findResult: print("parsing result: %s" %findResult.group(1)) ret = findResult.group(1) else: ret = 'NotFound' return ret
cc29c0bac9ae32a096df62e49d0fe4dde5c35252
559,583
def get_from_class(module_name, class_name): """ Given a module name and a class name, return an object corresponding to the class retrieved as in `from module_class import class_name`. :param module_name: str: name of module (may have . attributes) :param class_name: str: name of class :return: object pointer to class """ mod = __import__(module_name, fromlist=[class_name]) class_obj = getattr(mod, class_name) return class_obj
50902122b0416233d0d1963e7df8c73302880aa5
280,232
import glob def expand_args(args): """ Takes an argv and expand it (under Windows, cmd does not convert *.tif into a list of files. :param list args: list of files or wildcards :return: list of actual args """ new = [] for afile in args: if glob.has_magic(afile): new += glob.glob(afile) else: new.append(afile) return new
3d1614c12469ded5f8e88fa9728d4f305eb8fb6d
175,493
import difflib def same_list_order(first, second): """ Return whether the items in the second list appear in the same order as the first list. """ matcher = difflib.SequenceMatcher(a=first, b=second) for (opcode, _, _, _, _) in matcher.get_opcodes(): if opcode in {"insert", "replace"}: return False return True
c6bfdc958310305d36262b2f15e1fe37febb6fec
271,812
def multi_key_gitlab(value): """ Returns the username, if an exception occurs None is returned. Parameters ---------- value: dict A dictionary of GitLab. Returns ------- value: str or None Username or none. """ try: return value['owner']['username'] except (KeyError, TypeError): return None
865b62e1db969f4a36482cf0f3b1845a02171da5
514,383
def get_differential(element): """Return the differential as (x,y) between element's current on-screen spot and its real spot. """ transMatrix = element.getCTM() return transMatrix.e, transMatrix.f
a80b79050a6d65463b285b531e3bb83c3e6d5a31
414,954
def dict_raise_on_duplicates(ordered_pairs): """Reject duplicate keys. Python does not do this by default.""" dictionary = {} for key, value in ordered_pairs: if key in dictionary: raise ValueError("Duplicate key: %r" % (key,)) else: dictionary[key] = value return dictionary
0acc225831173e67968660f968f08058c82adc09
437,875
def m22ft2(msq): """m^2 -> ft^2""" return msq/0.09290304
dd09b3c4f2fd4dd9443a0a6053457a1881c52e70
205,468
def compute_stoch_gradient(y, tx, w): """ Compute a stochastic gradient from just few examples n and their corresponding y_n labels. inputs: y = labels tx = feature matrix w: weight output: gradient : Gradient for loss function of Mean Squared Error evaluated in w """ N = len(y) e = y - tx.dot(w) gradient = -tx.T.dot(e) / N return gradient
5b17ffe6b1d6cbedb6396a1a0bec4f98a2b9f289
540,220
import string import re def encode_string(s='') -> str: """Encodes a string The encoding algorithm: 1. remove all vowels 2. keep only the first occurance of each letter 3. encode the rest following the rules below: Table A is 'abcdefghijklmnopqrstuvwxyz'. It is zero-based indexed. Given string 'bcd', encode b by taking the indices from Table A of b and the next character - in this case, c (1 and 2, respectively). Add the indices and use to index Table A for the encode letter, in this case, d. If an indices sum exceeds length of Table A, repeat the table and values. When encoding the last letter of the given string, use the first letter of the given string as the next letter. Args: s: a string of alpha characters Returns: A string of alpha characters that represent the encoding of 's'. Example: >>> encode_string('bcd') dfe """ table: str = string.ascii_lowercase # remove vowels chars: str = re.compile('[{}]'.format('aeiou')).sub('', s.lower()) # remove dups chars = ''.join(dict.fromkeys(chars)) # encode result = '' for i, c in enumerate(chars): p1: int = table.find(c) p2: int = table.find(chars[0]) if i == len(chars) - \ 1 else table.find(chars[i + 1]) np = p1 + p2 if np >= len(table): np -= len(table) result += table[np] return result
8fa36deae7a9598a90aac9acce72bef56e4c5590
328,356
def get_mu(lam, a, beta1, beta2, lam_c, verbose = 0): """Function to compute the latent parameter mu. Args: lam (float): The latent parameter lambda of a node in the WHSCM. a (float): The a parameter of the WHSCM. beta1 (float): The beta1 parameter of the WHSCM. beta2 (float): The beta2 parameter of the WHSCM. lam_c (float): The switching point between the two exponents of the double power-laws in the WHSCM. Returns: mu (float): The latent parameter mu. """ if lam >= 1 and lam <= lam_c: mu = a * lam**(-beta1) elif lam > lam_c: mu = a * (lam_c**(beta2 - beta1)) * lam**(-beta2) else: if verbose == 1: print("ERROR (get_mu): Lambda parameter has to be between 1 and infinity.") mu = None return mu
484a14a12fef29a48ac1c6b4626edc9288fbf0c0
470,182
def script_that_increments_and_returns_scan_id(tmpdir): """ Pytest fixture to return a path to a script with main() that increments the scan ID and adds the value to a queue. """ path = tmpdir.join("script_for_scan_id.py") path.write( """ from oet.command import SCAN_ID_GENERATOR def main(queue): queue.put(SCAN_ID_GENERATOR.next()) """ ) return f"file://{str(path)}"
adce85e6b3be683d5e4d1ecd210ba36031ea6d43
518,004
def kolibri_userinfo(claims, user): """ Fill claims with the information available in the Kolibri database """ claims["name"] = user.full_name return claims
29f64178481de1b06250c2171e5ea8c51170298c
639,096
def overlap(a1, a2): """ Check if the given arrays have common elements """ return len(set(a1).intersection(set(a2))) > 0
75d40ab32b56db942c4ba84d3bac852a7619df07
637,556
def author_id_string(aob): """ Produce a string representation of an author id :param aob: author object :return: string representation of author id """ return u"{x}: {y}".format(x=aob.get("type"), y=aob.get("id"))
be8aac97538fc2146a79f4ac53aa35eb6096045d
692,099
def getID(file): """ This function takes the PDB ID from DSSP file :param file: input file in DSSP format :return: PDB ID as string """ file.seek(0,0) splited = [] for line in file: splited = line.split() if "HEADER" in line: return splited[-2]
997881515805d5e8c7eee3e04ffd7316b6412da5
608,072
from typing import OrderedDict def get_nlls(col_sample, class_nades): """ Get likelihood of each class for this row Return as dict. :param col_sample: Sample being evaluated as column vector :param class_nades: Map from class name to trained NADE """ class_nll = OrderedDict() for class_name in class_nades.keys(): cnade = class_nades[class_name] log_density = cnade.logdensity(col_sample)[0] class_nll[class_name] = log_density return class_nll
64773af1939f4a7d357e8329660c0f8313acf129
557,122
def year_cv_split(X, year_range): """Split data by year for cross-validation for time-series data. Makes data from each year in the year_range a test set per split, with data from all earlier years being in the train split. """ return [ ((X["year"] < year).to_numpy(), (X["year"] == year).to_numpy()) for year in range(*year_range) ]
f5c7348a6589c180281de35bc9893f1de00252f1
668,825