content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def make_list_if_not(thing): """ Checks to see if an item is a list, and if not, turns it into one. --- IN thing: a thing of any datatype OUT thing as a list (list) """ if type(thing) != list: thing_list = [] thing_list.append(thing) return thing_list else: return thing
d6206af8b50218fa702d6902e9a8d7be8e05d3d5
251,505
def object_name(x): """Get a human readable name for an object.""" if hasattr(x, "__name__"): return x.__name__ elif hasattr(x, "__func__"): return object_name(x.__func__) else: return str(x)
e6075ea864e838eb097662a3fed973110af1b686
509,330
def divisors(num): """ Takes a number and returns all divisors of the number, ordered least to greatest :param num: int :return: list (int) """ divlist = [] counter = 0 for item in range(1, num + 1): if num % item == 0: divlist.append(item) counter = counter + 1 # Fill in the function and change the return statment. return divlist
d904273e6e988f8643f39a1fcf45ad9f2699a87a
635,828
from datetime import datetime def extract_sitename_date(directory_path, sitename_location, datetime_location): """Extract sitename and datetime from directory path name. Parameters ----------- directory_path : string A path to the directory name sitename_location : index list Index of sitename location in directory path name datetime_location : index list Index of datetime location in directory path name Returns ----------- list : list of site names and datetime information """ # Create an empty list to append sitename and date information site_name_date_list = [] # Assign datetime location to an object date_location = directory_path[datetime_location[0]: datetime_location[1]] # Specify datetime format format = "%Y%m%d" # Use datetime and format to create date varibale date = datetime.strptime(date_location, format) # Assign sitename information to a variable site = directory_path[sitename_location[0]: sitename_location[1]] # Append site variable to list site_name_date_list.append(site) # Append date variable to list site_name_date_list.append(date) return site_name_date_list
86a2085ba68b234585ef9855da64fa1fdd5459ce
10,115
def prep_file(file_name): """Take a file, extracts items line-by-line, and returns a list of them. Args: file_name (str): The file name to open Returns: items (list): A list of items extracted from the file """ items = [] with open(file_name) as files: for newline in files: items.append(newline) return items
267bab14557a85a60f600cc8908262f928df8b75
606,076
def parse_input(event): """Parses all input required from step function.""" input_request = event["input"] return { "batch_id": input_request["transformation_step_output"]["batch_id"], "output_sns_arn": input_request.get("destinationSnsArn"), "execution_id": event["execution_id"], }
ed85b61e7c9e68dbbee910d7d6c1eaf342255aa0
695,331
def ssh_no_error(ssh, cmd, sudo=False): """Execute a command over ssh channel, and log and exit if the command fails. :param ssh: SSH() object connected to a node. :param cmd: Command line to execute on remote node. :param sudo: Run command with sudo privileges. :type ssh: SSH() object :type cmd: str :type sudo: bool :returns: stdout from the SSH command. :rtype: str :raises RuntimeError: In case of unexpected ssh command failure """ if sudo: ret, stdo, stde = ssh.exec_command_sudo(cmd, timeout=60) else: ret, stdo, stde = ssh.exec_command(cmd, timeout=60) if ret != 0: print('Command execution failed: "{}"'.format(cmd)) print('stdout: {0}'.format(stdo)) print('stderr: {0}'.format(stde)) raise RuntimeError('Unexpected ssh command failure') return stdo
1f674e9e4cea60605a815e19f3b7ab6288e7993e
378,199
import torch def pack_pathway_output(cfg, frames): """ Prepare output as a list of tensors. Each tensor corresponding to a unique pathway. Args: frames (tensor): frames of images sampled from the video. The dimension is `channel` x `num frames` x `height` x `width`. Returns: frame_list (list): list of tensors with the dimension of `channel` x `num frames` x `height` x `width`. """ if cfg.MODEL.ARCH in cfg.MODEL.SINGLE_PATHWAY_ARCH: frame_list = [frames] elif cfg.MODEL.ARCH in cfg.MODEL.MULTI_PATHWAY_ARCH: fast_pathway = frames # Perform temporal sampling from the fast pathway. slow_pathway = torch.index_select( frames, 1, torch.linspace( 0, frames.shape[1] - 1, frames.shape[1] // cfg.SLOWFAST.ALPHA ).long(), ) frame_list = [slow_pathway, fast_pathway] else: raise NotImplementedError( "Model arch {} is not in {}".format( cfg.MODEL.ARCH, cfg.MODEL.SINGLE_PATHWAY_ARCH + cfg.MODEL.MULTI_PATHWAY_ARCH, ) ) return frame_list
061d093d0f0e38ba4ab8c038f28d2a3b95df231c
342,805
def ihead(store, n=1): """Get the first item of an iterable, or a list of the first n items""" if n == 1: for item in iter(store): return item else: return [item for i, item in enumerate(store) if i < n]
6a72ee47e1416b751b1725e5f7448fa408169f59
111,231
def wt_av(x, xw, y, yw): """ Calculate a weighted average """ return (x*xw+y*yw)/(xw+yw)
56424282bf21040993e84a4958c2027218afd561
127,381
def determine_device(kal_out): """Extract and return device from scan results.""" device = "" while device == "": for line in kal_out.splitlines(): line = line.decode("utf-8") if "Using device " in line: device = str(line.split(' ', 2)[-1]) if device == "": device = None return device
98f8bb3c160a4c7fe6f6f65c91307103d124d3de
188,014
def moyenne_ponderee(a : float, b : float, c : float, pa : float, pb : float, pc : float) -> float: """Hypothèse : pa + pb + pc != 0 Retourne la moyenne des trois nombres a, b, c, pondérés respectivement par pa (pondération pour a), pb et pc. """ return ((a * pa) + (b * pb) + (c * pc)) / (pa + pb + pc)
2931cb3289bf9c241f7e64c8210fca8b836c96c6
640,900
def bizect(l, steps="a"): """ given a list, select the a/b n-th group plus the last element >>> l = list(range(10)) >>> bizect(l) [0, 1, 2, 3, 4, 9] >>> bizect(l, steps="b") [5, 6, 7, 8, 9] >>> bizect(l, "ba") [5, 6, 9] >>> bizect(l, "bb") [7, 8, 9] """ r = l.copy() for key in steps: if key == "a": r = r[:len(r)//2] else: r = r[len(r)//2:-1] r += [l[-1]] return r
8d8a159d5961b025881ce7aa097fb45f6242fc28
609,241
from typing import Optional from typing import Any def safe_max(*values) -> Optional[Any]: """ Find the max value in a list. Ignore None values. Args: *values: all values to be compared Returns: max value in the list or None Examples: >>> safe_max(None, 5, 3, 7) 7 >>> safe_max(None, None) is None True """ max_value = None for value in values: if value is None: continue if max_value is None: max_value = value elif value > max_value: max_value = value return max_value
aa30507235faa7978610b95b337d135c30f98b63
482,300
def human_readable_size(size): """Return a string for better assessing large number of bytes.""" if size < 2**10: return "%s" % size elif size < 2**20: return "%.2f KB" % (size / float(2**10)) elif size < 2**30: return "%.2f MB" % (size / float(2**20)) elif size < 2**40: return "%.2f GB" % (size / float(2**30)) else: return "%.2f TB" % (size / float(2**40))
a32d991ec79d9d68872ba3be16d03faa7f613f7a
321,126
def _label(label_fmt, value, fac_name=None): """Return the appropriate label for a factor. :param label_fmt: The format string, or a function that returns the label. :param value: The value of the faceting variable. :param fac_name: The (optional) name of the faceting variable, used in error messages. :raises ValueError: if ``label_fmt`` is not a format string or a function. """ if hasattr(label_fmt, 'format'): return label_fmt.format(value) elif callable(label_fmt): return label_fmt(value) else: if fac_name is None: msg = "invalid `label_fmt`: {}".format(label_fmt) else: msg = "invalid `label_fmt` for {}: {}".format(fac_name, label_fmt) raise ValueError(msg)
d1e8c9f64220d9f69a399737327ecb31cdae3838
412,553
def _parse_for_text_tag(xml_element, name=None): """Inspect an xml.dom.Element with a child 'name' to get its text value. NCX file has many element with a child likes "navLabel" > "text" > TEXT_NODE and this function allow to avoid some boilerplate code. First parameter must be an xml.dom.Element, having one child named by the second parameter (by default a "text" tag). If nothing is founded, an empty string '' is returned. Whitespaces and tabulations are stripped.""" name = name or 'text' tags = [e for e in xml_element.childNodes if e.nodeType == e.ELEMENT_NODE and e.tagName == name] text = '' if len(tags) > 0: tag = tags[0] if tag.firstChild and tag.firstChild.data: tag.normalize() text = tag.firstChild.data.strip() return text
48ab46ef592cb1583212869e903895ce63cec8e5
519,493
def howManyWithMedianHouseValue(df)->int: """ Returns the number of houses with median house value greater than $80,000 and longitude higher or equal than -120 and less or equal to -118. """ valueHigherThan80k = df[df['median_house_value'] > 80000] latitudebet120And118 = valueHigherThan80k[((valueHigherThan80k['longitude']>= -120) & (valueHigherThan80k['longitude']<= -118))] print(f"- Cantidad de casas con media superior a 80K: {latitudebet120And118.shape[0]}") return latitudebet120And118
8dd9926faf6aa6c7943e61f3aae72545d4d346dc
304,613
import requests import tempfile import tarfile import io def extract_tar_gz(response: requests.Response) -> tempfile.TemporaryDirectory: """Extract a tar.gz source from a response and return the filepath.""" temp_dir = tempfile.TemporaryDirectory() with tarfile.open(fileobj=io.BytesIO(response.content), mode="r:gz") as tar: tar.extractall(temp_dir.name) return temp_dir
66a9753330d4b0b1dc8e5c3704f8f67710733285
352,297
import random def __d10() -> int: """Roll a d10.""" return random.randint(1, 10)
21c3c6f791b9c2995fefd59b745946f5625db31c
242,076
def face_rect_to_coords(rectangle): """ Takes in a (x, y, w, h) array and transforms it into (x, y, x2, y2) """ return [ rectangle[0], rectangle[1], rectangle[0] + rectangle[2], rectangle[1] + rectangle[3], ]
1cc08a055c7de09ac0a5a0c96b28462776e6b287
560,260
def get_left_strip(chonk): """ Compute the left vertical strip of a 2D list. """ return [chonk[_i][0] for _i in range(len(chonk))]
dfc57a0776e5ab97a808a75170634fa6a072d9d3
698,554
def _MakeCounterName(key_id, tag): """Helper to create a sharded Counter name. Args: key_id: Int unique id (usually a model entity id). tag: String tag which hints at the counter purpose. Returns: String to be used as a sharded Counter name. """ return '%s_%s' % (key_id, tag)
5b1ebf20f4a59fe5b2e0ed076a83355a61ce97a1
52,093
import hashlib def sha256sum(filename): """Return the SHA256 string representation of file specified by filename.""" CHUNK_SIZE = 65536 sha256 = hashlib.sha256() with open(filename, 'rb') as file: while True: chunk = file.read(CHUNK_SIZE) if not chunk: break sha256.update(chunk) return sha256.hexdigest()
c137d56eadd978215b041a81d17402103cffdb6a
447,850
from datetime import datetime def getUniqueTaskId(prefix=None): """ unique id generator for varius DLS tasks :param prefix: prefix for task type :return: unique string index (list of indexes can be sorted by date) """ tidx = datetime.now().strftime('%Y%m%d-%H%M%S-%f') if prefix is not None: tret = '%s-%s' % (prefix, tidx) else: tret = tidx return tret
754218ec0a175f0a0bd20ea1a068b7b4aa76ec63
108,045
def rectangles_square(n: int): """Return `n` square rectangles from (1, 1) to (n, n)""" return [(i, i) for i in reversed(range(1, n + 1))]
786ddafe21eada9c20f24612987e21885760ed08
420,173
def is_affirmative(key: str, config: dict, default=False) -> bool: """ Checks if the config value is one of true, yes, or on (case doesn't matter), default is False Args: key: to check config: to lookup key in default: if not found, False if not specified Returns: if the config is present and is one of true, yes, or on """ value = str(config.get(key, default)).lower() return value in ['yes', 'true', 'on']
0b716f9d58b5e2bcca883985c37d47082899d8d5
386,982
def convert_to_space_location(space_num: int) -> list[int]: """Convert int to grid location""" grid_map = { 1: [2, 0], 2: [2, 1], 3: [2, 2], 4: [1, 0], 5: [1, 1], 6: [1, 2], 7: [0, 0], 8: [0, 1], 9: [0, 2], } return grid_map[space_num]
89e9fe1a86e36a083e00d33dfec943ea444be5a3
439,573
def true_pred(x) -> bool: """ A predicate that is always L{True}. """ return True
486867cb10d980097ce3aa936442448521f54194
287,631
def uniq(ls): """ uniqify a list """ return list(set(ls))
04ca82e898a446a931c44327f1d55146f8cbfce5
117,008
def valid_octet (oct): """ Validates a single IP address octet. Args: oct (int): The octet to validate Returns: bool: True if the octet is valid, otherwise false """ return oct >= 0 and oct <= 255
9dd2346bb5df5bc00bb360013abe40b8039bdc45
4,865
def get_policy_targets(context, presentation): """ Returns our target node templates and groups if we have them. """ node_templates = [] groups = [] our_targets = presentation.targets if our_targets: all_node_templates = \ context.presentation.get('service_template', 'topology_template', 'node_templates') \ or {} all_groups = \ context.presentation.get('service_template', 'topology_template', 'groups') \ or {} for our_target in our_targets: if our_target in all_node_templates: node_templates.append(all_node_templates[our_target]) elif our_target in all_groups: groups.append(all_groups[our_target]) return node_templates, groups
f483b9749c25b7d56c0e0a02a6787d936782e470
703,948
import six def binary(v, encoding='utf-8', errors='strict'): """cast value to binary type, Args: v (typing.Any): value encoding (str, optional): encoding when value is not binary. Defaults to 'utf-8'. errors (str, optional): errors setting when value is not binary. Defaults to 'strict'. Returns: six.binary_type: encoded value """ if isinstance(v, six.binary_type): return v if isinstance(v, six.text_type): return v.encode(encoding, errors) return six.text_type(v).encode(encoding, errors)
523015253a2fea2c690ee2d57fd932ccccf05e63
631,573
def consensus_decision(consensus): """ Given a consensus dict, return list of candidates to delete/archive. """ delete = [] archive = [] for k, v in consensus.items(): tags = v['tags'] if 'delete' in tags or 'rfi' in tags: delete.append(k) elif 'archive' in tags or 'astrophysical' in tags or 'notify' in tags: archive.append(k) return delete, archive
789526ef84e6522651a2e13f676688320445fc95
633,735
def __indent_text_block(text): """ Indent a text block """ lines = text.splitlines() if len(lines) > 1: out = lines[0] + "\r\n" for i in range(1, len(lines)-1): out = out + " " + lines[i] + "\r\n" out = out + " " + lines[-1] return out return text
9d2e933a7b8ae2a82d91e18ca67f795f6db7d089
654,395
def replace_chars(target_str, char_list, replacement_str): """ Replaces characters in a given string with a given string :param target_str: string - String needing character replacement :param char_list: list - List of characters to be replaced :param replacement_str: string - String to replace given characters """ for char in char_list: target_str = target_str.replace(char, replacement_str) return target_str
67bc62863462ef169315a00da3f687dec1ab381b
417,130
import math def _Prefix(quantity, unit, precision, scale_callable, **args): """Formats an integer and a unit into a string. Args: quantity: A number. unit: A string, the dimension for quantity, with no multipliers (e.g. "bps"). If quantity is dimensionless, the empty string. precision: An integer, the minimum number of digits to display. scale_callable: A callable, scales the number and units. **args: named arguments passed to scale_callable. Returns: A string. """ separator = ' ' if unit else '' if not quantity: return '0%s%s' % (separator, unit) if quantity in [float('inf'), float('-inf')] or math.isnan(quantity): return '%f%s%s' % (quantity, separator, unit) scaled_quantity, scaled_unit = scale_callable(quantity, unit, **args) if scaled_unit: separator = ' ' print_pattern = '%%.%df%%s%%s' % max(0, (precision - int( math.log(abs(scaled_quantity), 10)) - 1)) return print_pattern % (scaled_quantity, separator, scaled_unit)
74ea1a7d075cab3519d5dbd875154d2bdd56fea5
567,805
def get_labels_of_types(asset, label_types): """ Extracts the latest labels from an asset Parameters ---------- - asset: the asset to extract the labels from - label_types: type of label, either DEFAULT or REVIEW """ labels = [label for label in asset['labels'] if label['labelType'] in label_types and label['isLatestLabelForUser']] return sorted(labels, key=lambda label: label['createdAt'])
f0437953db5b712b8c3989aec1d0eb5f2fe1481b
269,753
def calculate_issn_checkdigit(s): """ Given a string of length 7, return the ISSN check digit. """ if len(s) != 7: raise ValueError('seven digits required') ss = sum([int(digit) * f for digit, f in zip(s, range(8, 1, -1))]) _, mod = divmod(ss, 11) checkdigit = 0 if mod == 0 else 11 - mod if checkdigit == 10: checkdigit = 'X' return '{}'.format(checkdigit)
b8b3e78361e1bd630a0d38888619b151f0ce7a5a
618,305
import string def letter_frequency(paragraph): """Count frequency of letters in argument string. Count the frequency of letters occurring in the argument string. Ignore capitalization (i.e. Anna - a: 2, n: 2). Ignore all whitespace, non-alpha characters. Args: paragraph (str): The paragraph to analyze Returns: a dict containing all lowercase letters of the alphabet and their occurrence count in the given paragraph. Letters that did not appear should have an occurrence count of 0. """ all_lower = paragraph.lower() # filter out non-letters all_alphas = [c for c in all_lower if c.isalpha()] # create empty alpha dict with all 26 letters of count 0 alpha_dict = dict() for c in string.ascii_lowercase: alpha_dict.setdefault(c, 0) for c in all_alphas: alpha_dict[c] += 1 return alpha_dict
9ff02495d4b14630194a5a713af4f284fa531932
552,564
from typing import Any def empty_dict() -> dict[Any, Any]: """ Returns an empty dict Returns: dict[Any, Any]: an empty dict """ return {}
13cadc3ef149ca9de1ba153904ffa7c0e826a79b
272,275
def compute_taw(fc, pwp, depth, fraction): """ Compute total available water :param fc: Field capacity :param pwp: permanent wilting point :param depth: depth of soil in mm :param fraction: float value :return: a float value for TAW """ return depth * fraction * (fc - pwp)
d97a1e4cc918228fc7b0e457f1fac3ce2502f62e
37,346
def ask_for_confirmation(prompt="Are you sure? ", default=True): """ Display a prompt asking for confirmation from the user before continuing; accepts any form of "yes"/"no" :param prompt: the prompt to display before accepting input :param default: the default value if CR pressed with no input :returns: True if "yes", False if "no" """ yes, no = ("Y", "n") if default else ("y", "N") prompt += f"[{yes}/{no}] " while True: ans = input(prompt).lower().strip() if not ans: return default elif not ("yes".startswith(ans) or "no".startswith(ans)): print("Please enter yes or no.") continue else: return "yes".startswith(ans)
fcd97e6b046b9cdb61ac40419fe87a9eae73d091
220,593
import csv def get_csv_file(fieldnames=None): """ Get CSV file. It adds three entries for default field names. Parameters ---------- fieldnames : list Names for fields to write to the header of a file. Returns ------- FILE Generated CSV file. """ if fieldnames is None: fieldnames = ['ID', 'Geometry', 'Name', 'Short Description'] with open('test_csv.csv', 'w') as file: csv_writer = csv.DictWriter(file, fieldnames=fieldnames) csv_writer.writeheader() csv_writer.writerow({ 'ID': 1, 'Geometry': 'POINT (30 10)', 'Name': 'Meat', 'Short Description': 'Meat is good.' }) csv_writer.writerow({ 'ID': 2, 'Geometry': 'LINESTRING (30 10, 10 30, 40 40)', 'Name': 'Fish', 'Short Description': 'Fish is healthy.' }) csv_writer.writerow({ 'ID': 3, 'Geometry': 'POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))', 'Name': 'Vegetables', 'Short Description': 'Vegetables are even healthier.' }) return file
4d23e37bd2c80f0f96afd50ab1f720cd0b9e84ca
403,127
def calculate_mean(numbers): """Calculates the mean of a list of numbers Parameters ---------- numbers: iterable[numbers] Returns ------- number """ return sum(numbers) / len(numbers)
919ac8abf38f5469e422fe8720b61189a23d19d5
604,579
import re def _valid_date(date): """ Whether the date requested matches the desired format (starts between 1 and 8 digits) :param date: The date to check :return: True if valid, false otherwise """ return re.compile('^\d{1,8}').search(date)
c18d8c4f22f2f028b5c0d66aec9afad733d7c79b
67,828
def invertx(x, y, **kwargs): """Multiply x by -1""" return -x, y
9bf88da135687f8a853af16dc39876547d436539
590,748
def load_body_data(smpl_data, gender='female', idx=0, n_sh_bshapes=10): """ Loads MoSHed pose data from CMU Mocap (only the given idx is loaded), and loads all CAESAR shape data. Args: smpl_data: Files with *trans, *shape, *pose parameters gender: female | male. CAESAR data has 2K male, 2K female shapes idx: index of the mocap sequence n_sh_bshapes: number of shape blendshapes (number of PCA components) """ # create a dictionary with key the sequence name and values the pose and trans cmu_keys = [] for seq in smpl_data.files: if seq.startswith('pose_'): cmu_keys.append(seq.replace('pose_', '')) name = sorted(cmu_keys)[idx % len(cmu_keys)] cmu_parms = {} for seq in smpl_data.files: if seq == ('pose_' + name): cmu_parms[seq.replace('pose_', '')] = { 'poses': smpl_data[seq], 'trans': smpl_data[seq.replace('pose_', 'trans_')] } # load all SMPL shapes fshapes = smpl_data['%sshapes' % gender][:, :n_sh_bshapes] return (cmu_parms, fshapes, name)
4b1f240046388e0a0415ceb59384949355f03fcc
90,137
import torch def flat2mat(H): """ Converts a flattened homography with shape '[N, 8]' to its corresponding homography matrix with shape '[N, 3, 3]'. """ return torch.reshape(torch.cat((H, torch.ones(H.shape[0],1)), dim=1), [-1,3,3])
b9cbb36359c0a380e38e1b2d37583d63956cbfc2
559,471
def r_in(td, r_0): """Calculate incident countrate given dead time and detected countrate.""" tau = 1 / r_0 return 1. / (tau - td)
ced519ed2c9f2f546f5d06605213488ede9c2f21
570,732
import requests def get_data(name=None, url=None): """ Get data about a series from TVMaze. If the status is not OK or the content is empty, an assertion takse care of that. :param name: a string, the name of a show :pram url: a string, the url from the TVMaze API :return: a dict, parsed from a JSON """ if name: data = requests.get( 'http://api.tvmaze.com/singlesearch/shows?q={}'.format(name) ) elif url: data = requests.get(url) else: raise TypeError("No input given. Give a name of a show or a URL.") assert data.status_code == 200, "Problems with the query or connection." assert data, "No results. Try to refine your query." return data.json()
77dc701d460f4beace5a0189bde4fff15050acc4
501,004
from collections import OrderedDict import collections from typing import Dict from typing import Any def dict_from_od(od : collections.OrderedDict) -> Dict[Any, Any]: """ Convert an ordered dict to a dict. Does not mutate the original OrderedDict. """ _d = dict(od) for k, v in od.items(): if isinstance(v, OrderedDict) or ( issubclass(type(v), OrderedDict) ): _d[k] = dict_from_od(v) return _d
b8218c2968afd19a7138d8354e38ec0ac71df85e
503,523
def parse_text_input(text_input): """ Parses the text input received over the client's websocket connection. Returns: (input_type (str), params (list)) """ tokens = text_input.strip().split("|") if len(tokens) == 1: inp_type = "rawtext" params = tokens else: inp_type = tokens[1].lower() params = tokens[2:] return inp_type, params
e225b07a017d80d06e67f8984630c6d637c21279
238,807
def dict_to_cmd(cmd, flags, **kwargs): """Create a command string for cmd and parameters 'kwargs'.""" cmd_str = cmd + " " + " ".join(["-{} {}".format(k, kwargs[k]) for k in kwargs]) if flags: flag_str = " ".join(["-" + f for f in flags]) else: flag_str = "" return cmd_str + " " + flag_str
fa9b5292e210051eab0ca9aaea1e76c69d125605
439,225
def Reynolds(rho, U, L, mu): """ Calculates flow Reynolds number """ Re = (rho * U * L) / mu return Re
f8e985e74d951f570489f0700a20d18906de3213
177,279
def remap(degreeinput,degreemin,degreemax,dmxmin=0,dmxmax=65536): """ Convert the degree value to a 16 bit dmx number. """ DMXvalue = ((degreeinput - degreemin) * (dmxmax-dmxmin) / (degreemax - degreemin) + dmxmin) return DMXvalue
31f3d26342f7c5cc08964f2ff0430d51dd5c5672
653,985
def limit(x, y, d, nx, ny): """ limit x,y values to edge of canvas. """ if x < 0: x, d = 0, 0 if x > nx - 1: x, d = nx - 1, 2 if y < 0: y, d = 0, 3 if y > ny - 1: y, d = ny - 1, 1 return x, y, d
862f31c0e7d30553b04d7658d5f6a7187434dbde
129,090
def _inline_volume_check(inputs, claim_name): """Returns either an emptyDir or PVC volumeSpec """ if "emptyDir" in inputs.get("spec", {}): return {"emptyDir": {}} else: return { "persistentVolumeClaim": {"claimName": claim_name}, }
2a1e5accebaa463c47c9c66799e595ead288d0b0
459,613
def BitWidth(n: int): """ compute the minimum bitwidth needed to represent and integer """ if n == 0: return 0 if n > 0: return n.bit_length() if n < 0: # two's-complement WITHOUT sign return (n + 1).bit_length()
46dcdfb0987268133d606e609d39c641b9e6faab
4,116
import six def morph_dict(d, convert_function): """ Convert a nested dictionary from one convention to another. Args: d (dict): dictionary (nested or not) to be converted. convert_function (func): function that takes the string in one convention and returns it in the other one. Returns: Dictionary with the new keys. """ # Attribution: https://stackoverflow.com/a/33668421/633213 new = {} for k, v in six.iteritems(d): new_v = v if isinstance(v, dict): new_v = morph_dict(v, convert_function) elif isinstance(v, list): new_v = list() for x in v: new_v.append( morph_dict(x, convert_function) ) new[convert_function(k)] = new_v return new
47e7c74f68b0eab57934449c9028b04f06a677b9
667,084
from datetime import datetime def get_issues_and_prs_for_repo(repo, time_delta): """ Retrieve new issues and PRs for the provided user and time period. """ result = {"issues": [], "pulls": []} since_dt = datetime.now() - time_delta issues = list(repo.get_issues(since=since_dt)) for issue in issues: # Convert Python object in a native Python type (dict) issue_dict = issue.raw_data issue_dict["repository"] = issue.repository.raw_data if issue.pull_request: result["pulls"].append(issue_dict) else: result["issues"].append(issue_dict) # Remove fields which we don't need to spin things up if "body" in issue_dict: del issue_dict["body"] return result
847a57c9f4bf5165936f4937d76216bee32a0220
196,266
def cropBorderFraction(img, crop_left=.1, crop_right=.1, crop_top=.1, crop_bot=.1): """ Crop a fraction of the image at its borders. For example, cropping 10% (.1) of a 100x100 image left border would result in the leftmost 10px to be cropped. The number of pixels to be cropped are computed based on the original image size. """ w, h = img.shape[0], img.shape[1] nleft = int(round(crop_left * w)) nright = int(round(crop_right * w)) ntop = int(round(crop_top * h)) nbot = int(round(crop_bot * h)) return img[ntop:-nbot, nleft:-nright]
a3cabd88834f949c24bbb32021afa12b0891051b
123,691
def one_line(long_str): """ Make maybe mutli-line `long_str` into one long line """ return ' '.join(line.strip() for line in long_str.splitlines())
bd02515bdb76947807227c3f7ba91434c26183ff
204,339
import hashlib def filehash(filepath): """ Compute sha256 from a given file. Parameters ---------- filepath : str File path. Returns ------- sha256 : str Sha256 of a given file. """ BUF_SIZE = 65536 sha256 = hashlib.sha256() with open(filepath, "rb") as f: while True: data = f.read(BUF_SIZE) if not data: break sha256.update(data) return sha256.hexdigest()
7e0af85ef132b19a18c4ba7956f58e46256d6445
693,838
def has_two_adjacent(password): """Check if the password contains a pair of adjacent digits""" last_char = None num_adjacent = 0 for char in password: if char == last_char: num_adjacent += 1 else: if num_adjacent == 1: return True last_char = char num_adjacent = 0 if num_adjacent == 1: return True else: return False
debd82c62f859296ed2227de8ed1705fc9f7c288
681,184
def inst_dictionary(instrument_name, hostname_prefix="NDX", hostname=None, pv_prefix=None, is_scheduled=True, groups=None): """ Generate the instrument dictionary for the instrument list Args: instrument_name: instrument name hostname_prefix: prefix for hostname (defaults to NDX) hostname: whole host name overrides prefix, defaults to hostname_prefix + instrument name pv_prefix: the pv prefeix; default to IN:instrument_name is_scheduled: whether the instrument has scheduled users and so should have user details written to it; default to True groups (List[str]): which science groups (e.g. SANS, MUONS) this instrument is in. Defaults to empty list Returns: dictionary for instrument """ if hostname is not None: hostname_to_use = hostname else: hostname_to_use = hostname_prefix + instrument_name if pv_prefix is not None: pv_prefix_to_use = pv_prefix else: pv_prefix_to_use = "IN:{0}:".format(instrument_name) if groups is None: groups_to_use = [] else: groups_to_use = groups return {"name": instrument_name, "hostName": hostname_to_use, "pvPrefix": pv_prefix_to_use, "isScheduled": is_scheduled, "groups": groups_to_use, }
f8307a7223dc191b8bd24105613588d1f48c63a1
507,783
def is_from_webpack(request) -> bool: """Check if the request was forwarded from webpack.""" from_webpack = request.META.get("HTTP_X_FROM_WEBPACK", None) if from_webpack == "true": return True else: return False
cda1762650496a1edf1371a15a771a02bd6a3527
96,576
def list_from_comma_separated_string(s): """ Convert a comma separated string such as '1,2,3' to a list. :param s: string of items separated by commas. :type s: str :return: list of separated values. :rtype: list """ return s.replace(" ", "").split(",")
b9c22a245a87d17e18c7a5d415dfd976863a04a8
131,842
import torch def jacobian(f, x): """Computes the Jacobian of f w.r.t x. This is according to the reverse mode autodiff rule, sum_i v^b_i dy^b_i / dx^b_j = sum_i x^b_j R_ji v^b_i, where: - b is the batch index from 0 to B - 1 - i, j are the vector indices from 0 to N-1 - v^b_i is a "test vector", which is set to 1 column-wise to obtain the correct column vectors out ot the above expression. :param f: function R^N -> R^N :param x: torch.tensor of shape [B, N] :return: Jacobian matrix (torch.tensor) of shape [B, N, N] """ B, N = x.shape x.requires_grad = True in_ = torch.zeros(B, 1) y = f(in_, x) jacobian = list() for i in range(N): v = torch.zeros_like(y) v[:, i] = 1. dy_i_dx = torch.autograd.grad(y, x, grad_outputs=v, retain_graph=True, create_graph=True, allow_unused=True)[0] # shape [B, N] jacobian.append(dy_i_dx) jacobian = torch.stack(jacobian, dim=2).requires_grad_() return jacobian
913053f45b25d6f88d95cf26a757e7b4ba586089
674,283
def find_set(x): """Finds representant of the given data structure x.""" if x.parent is None: return x x.parent = find_set(x.parent) return x.parent
47b332938e6a648f0d353d027979b63b0c1e8826
47,116
def barcode_size(codes): """Returns the barcode size in modules.""" num_rows = len(codes) num_cols = len(codes[0]) # 17 bodules per column, last column has an additional module width = num_cols * 17 + 1 height = num_rows return width, height
c1bb318fe156bf5f645e82abea2ec18066f20d2d
572,657
def augment_data(meta, response_left, stimulus): """Augment meta data with fields for specific cases Args: meta: DataFrame response_left: ndarray 1 if subject made a left_response / yes response stimulus: ndarray 1 if a left_response is correct """ # add columns: meta["all"] = 1 meta["left"] = response_left.astype(int) meta["right"] = (~response_left).astype(int) meta["hit"] = ((response_left == 1) & (stimulus == 1)).astype(int) meta["fa"] = ((response_left == 1) & (stimulus == 0)).astype(int) meta["miss"] = ((response_left == 0) & (stimulus == 1)).astype(int) meta["cr"] = ((response_left == 0) & (stimulus == 0)).astype(int) return meta
2f6d1c91c7124947600be214decbf47fd0e935b9
171,652
def _grid_list_uuid_list(model): """Returns list of uuid's for the grid objects in the cached grid list.""" uuid_list = [] for grid in model.grid_list: uuid_list.append(grid.uuid) return uuid_list
f9548d45527f491c043419a4d459dc1485a0b129
598,396
def to_sparse_fillna(df, mean): """fills Nan's in sparse df with mean of df, returns dataframe""" df_filled = df.fillna(mean) return df_filled
67454022568dde28023590b16f621415f134bf32
620,436
import functools def synchronize(lock): """ Decorator that invokes the lock acquire call before a function call and releases after """ def sync_func(func): @functools.wraps(func) def wrapper(*args, **kwargs): lock.acquire() res = func(*args, **kwargs) lock.release() return res return wrapper return sync_func
e9ac48d67cf45e1b0cf9b6e776a93f569726b5d4
698,501
def get_last_opening_parenthesis(string): """ Returns index of the the parenthesis that opens the last closing parenthesis For example, in "∀x ∈ f(x) (Px v (Qx v Rx))" will return the index of the parenthesis before Px """ num_parentheses_left = 0 num_parentheses_right = 0 for char_index in range(len(string)-1, -1, -1): if string[char_index] == '(': num_parentheses_left += 1 if num_parentheses_left == num_parentheses_right: return char_index elif string[char_index] == ')': num_parentheses_right += 1
d66581b6530104767ed9bd58251c8429cf21486c
589,075
import struct def encode_uint32(number: int) -> bytes: """ Encode unsigned 32-bit integer. """ return struct.pack('<I', number)
dd9637cce558b73f3348b426eadf64930162c3cd
241,075
import re def sanitize_file_name(file_name): """ Sanitize a file name by removing extra spaces, replaces spaces with underscores and escapes special characters """ file_name = str(file_name).strip().replace(' ', '_') return re.sub(r'(?u)[^-\w.]', '', file_name)
8bc9cac8f14e67e092b011c99b701a8298c49c83
334,931
def get_role_features_from_annotations(role_annotations): """Splits the verb and role information (in original annotations file) to separate values""" head, role = role_annotations.split(")] ") head_pos, head_wf = head.lstrip("[(").split() span, tokens = role.split(maxsplit=1) span, label = span.rstrip(":").split(":") role_features = (head_wf, head_pos, span, label, tokens) return role_features
2d7f3c012c9469ec9e492063237e8ce54a1b9d41
46,494
from typing import Dict def stations() -> Dict: """Provide example configuration of radio stations.""" return { "example_fm": {"url": "http://example.org/stream.mp3", "name": "Example FM"} }
0a071366540ac64c0973ef4c451dc1e909d39b9f
92,780
import pytz def datetime_to_string(dttm): """Given a datetime instance, produce the string representation with microsecond precision""" # 1. Convert to timezone-aware # 2. Convert to UTC # 3. Format in ISO format with microsecond precision if dttm.tzinfo is None or dttm.tzinfo.utcoffset(dttm) is None: # dttm is timezone-naive; assume UTC zoned = pytz.UTC.localize(dttm) else: zoned = dttm.astimezone(pytz.UTC) ts = zoned.strftime("%Y-%m-%dT%H:%M:%S.%fZ") return ts
d947e06cd75b9e57853fe6cca891911e7d819128
665,095
from typing import Union def is_close( actual: Union[int, float], expected: Union[int, float], threshold: Union[int, float] ) -> bool: """Compare the difference between two input numbers with specified threshold.""" return abs(round(actual, 2) - round(expected, 2)) <= threshold
f2edf48d6475429b230fd751a3ef899461c84e1f
671,142
def get_line(basespace_autoimport): """Build a list of relevant data to be printed. Args: basespace_autoimport (BaseSpaceProjectImport): instance of BaseSpace autoimport Returns: list(str): list of relevant data to be printed """ return "\t".join( [ str(basespace_autoimport.id), str(basespace_autoimport.project_id), str(basespace_autoimport.identifier), ] )
78a3b36fe6287b8a5c0a83fc4d78ade31dc0b6c4
394,934
def _get(result, field, mandatory=False, default="", transform=lambda x: x): """Retrieve a given field if available, return default or exception otherwise. Result may be manipulated by transformation function""" if field in result: return transform(result[field]) else: if mandatory: raise KeyError("Field '"+field+"' not found in dictionary") else: return default
94f4975036a210fa1b2882438e5f05faababfe5a
65,435
def load_list(fn="list.txt", as_type=str, skip_remarks=True, skip_empty=True): """Read the lines of a text file into a list. Parameters: ========== as_type: Convert the values in the file to the given format. (Default: str). skip_remarks: Skip lines starting with `#` (default: True). skip_empty: Skip empty lines. (Default: True). Returns: ======== A list of values of the given type. """ result = [] with open(fn) as f: for line in f: line = line.strip() if skip_empty and len(line) == 0: continue if skip_remarks and line.startswith("#"): continue result.append(as_type(line)) return result
b49d75a42d3c97c799010849a0141bd61476749d
216,050
def __parse_sql(sql_rows): """ Parse sqlite3 databse output. Modify this function if you have a different database setup. Helper function for sql_get(). Parameters: sql_rows (str): output from SQL SELECT query. Returns: dict """ column_names = ['id', 'requester', 'item_name', 'custom_name', 'quantity', 'crafting_discipline', 'special_instruction', 'status', 'rarity', 'resource_provided', 'pub-date', 'crafter', 'stats'] request_dict = {str(row[0]): {column_names[i]: row[i] for i,_ in enumerate(column_names)} for row in sql_rows} return request_dict
09c61da81af069709dd020b8643425c4c6964137
709,850
def get_record_as_json(cur, tablename, row_id): """ Get a single record from the database, by id, as json. """ # IMPORTANT NOTE: Only use this function in trusted input. Never on data # being created by users. The table name is not escaped. q = """ SELECT row_to_json(new_with_table) FROM (SELECT {t}.*, '{t}' AS tablename FROM {t}) new_with_table WHERE id=%s;""".format( t=tablename, ) cur.execute(q, (row_id,)) return cur.fetchone()[0]
00521ae582a013f97b77494944e0f6e04069ed05
699,358
import fnmatch def find_file(contents, pattern): """ Find the file matching the given filename pattern. Searches the dictionary of Debian package archive entries reported by :func:`deb_pkg_tools.package.inspect_package()`. :param contents: The dictionary of package archive entries. :param pattern: The filename pattern to match (:mod:`fnmatch` syntax). :returns: The metadata of the matched file. :raises: :exc:`exceptions.AssertionError` if zero or more than one archive entry is found. """ matches = [] for filename, metadata in contents.items(): if fnmatch.fnmatch(filename, pattern): matches.append(metadata) assert len(matches) == 1, "Expected to match exactly one archive entry!" return matches[0]
efd0a5e718e8a487f653247b1dba06b2f39e3292
42,304
def resolve_wikipedia_link(link): """ Given a link like [[813 (film)|813]] Return the string 813 """ link = link.strip() # first remove the brackets if link.startswith("[[") and link.endswith("]]"): link = link.strip()[2:-2] # split the link into the title and the link if there is one _, link = link.split("|") if "|" in link else (None, link) return link.strip()
0a40c110874baefdadf3ed611a17fef630479d23
514,710
import torch def depth_map_to_3d_torch(depth, cam_K, cam_W): """Derive 3D locations of each pixel of a depth map. Args: depth (torch.FloatTensor): tensor of size B x 1 x N x M with depth at every pixel cam_K (torch.FloatTensor): tensor of size B x 3 x 4 representing camera matrices cam_W (torch.FloatTensor): tensor of size B x 3 x 4 representing world matrices Returns: loc3d (torch.FloatTensor): tensor of size B x 3 x N x M representing color at given 3d locations mask (torch.FloatTensor): tensor of size B x 1 x N x M with a binary mask if the given pixel is present or not """ depth = torch.from_numpy(depth) cam_K = torch.from_numpy(cam_K) cam_W = torch.from_numpy(cam_W) N, M = depth.size() device = depth.device # Turn depth around. This also avoids problems with inplace operations depth = -depth.permute(1,0) zero_one_row = torch.tensor([[0., 0., 0., 1.]]) zero_one_row = zero_one_row.expand(1, 4).to(device) # add row to world mat cam_W = torch.cat((cam_W, zero_one_row), dim=0) # clean depth image for mask # upperlimit = 1.e+10 upperlimit = float("Inf") mask = (depth.abs() != upperlimit).float() depth[depth == upperlimit] = 0 depth[depth == -1*upperlimit] = 0 # 4d array to 2d array k=N*M d = depth.reshape(1,N * M) # create pixel location tensor px, py = torch.meshgrid([torch.arange(0, N), torch.arange(0, M)]) px, py = px.to(device), py.to(device) p = torch.cat(( px.expand(px.size(0), px.size(1)), (M - py).expand(py.size(0), py.size(1)) ), dim=0) p = p.reshape(2, py.size(0) * py.size(1)) p = (p.float() / M * 2) # create terms of mapping equation x = P^-1 * d*(qp - b) P = cam_K[:2, :2].float().to(device) q = cam_K[2:3, 2:3].float().to(device) b = cam_K[:2, 2:3].expand(2, d.size(1)).to(device) Inv_P = torch.inverse(P).to(device) rightside = (p.float() * q.float() - b.float()) * d.float() x_xy = torch.matmul(Inv_P, rightside) # add depth and ones to location in world coord system x_world = torch.cat((x_xy, d, torch.ones_like(d)), dim=0) # derive loactoion in object coord via loc3d = W^-1 * x_world Inv_W = torch.inverse(cam_W) Inv_W_exp = Inv_W.expand(4, 4) loc3d = torch.matmul(Inv_W_exp, x_world.double()) loc3d = loc3d.reshape(4, N, M) loc3d = loc3d[:3,:,:].to(device) mask = mask.to(device) loc3d = loc3d.view(3, N * M) return loc3d, mask
adfa95abb2cf5be5ccf499f8231743a1416c9c50
687,367
import requests def query_endpoint(username, **kwargs): """ Args: username (str): Lichess username kwargs (dict): Any of the query parameters for the Lichess apiGamesUser API user endpoint. Keyword Args: All Lichess API endpoint parameters are allowed, see Reference notes. Returns: (requests.response) References: https://lichess.org/api#operation/apiGamesUser Note: Consider implimenting streaming as suggested but Lichess API notes: https://requests.readthedocs.io/en/master/user/advanced/#streaming-requests """ return requests.get( url=f"https://lichess.org/api/games/user/{username}", params=kwargs, )
4d0bd06ffac59c5baef2678338a70ff4559de19d
314,179
from pathlib import Path def file_extension(path: str) -> str: """ Extracts canonical file extension from path (no leading dot and all lowercase) e.g. mp4, avi, jpeg, ts """ return Path(path).suffix[1:].lower()
a085044a19de507844412ea4027fda32c0f2f979
53,696
def from_json(json_data: dict, delimeter: str = "|") -> str: """Transforms JSON into a plain text :param json_data: JSON object that needs to be converted to plain text :param delimeter: Delimeter to be used in the plain text :type json_data: dict :type delimeter: str :return: Plain text from JSON :rtype: str """ kvs = [] for key, value in json_data.items(): kvs.append("{}:{}".format(key, value)) return delimeter.join(kvs)
a6eb193a91fbd346f4235a8829614d7dbbb8df8b
586,846
def removeSpecialsCharacters(text): """ Removes specials characters in string (\n, \r and \l). """ text = str.replace(text, '\n', '') text = str.replace(text, '\r', '') text = str.replace(text, '\l', '') return text
5e6a8bd44631f5aa611abf29b8842b81f62c0fdf
283,916
def p2roundup(val: int, align: int) -> int: """ Round up `val` to the next `align` boundary. """ return ((val - 1) | (align - 1)) + 1
639793ab502297ecdfbf243084ccba31fdcc2a31
33,411
def floatable(x): """Check if value can be changed to float type.""" try: float(x) except ValueError: return False return True
44c32fbf7dd51120e715628e1df8eb40e8ad3d8d
175,486
from typing import Dict from typing import Any from typing import List def assert_dict_has_keys(obj: Dict[str, Any], expected_keys: List[str]) -> bool: """Check if the obj has all the expected_keys. Args: obj (Dict[str, Any]): Object to be checked. expected_keys (List[str]): Keys expected to contained in the keys of the obj. Returns: bool: Whether the obj has the expected keys. """ return set(expected_keys).issubset(set(obj.keys()))
3f9d398b0adea26dc69652da43c22e5f2fce494d
445,346
def _cpp_integer_type_for_range(min_val, max_val): """Returns the appropriate C++ integer type to hold min_val up to max_val.""" # The choice of int32_t, uint32_t, int64_t, then uint64_t is somewhat # arbitrary here, and might not be perfectly ideal. I (bolms@) have chosen # this set of types to a) minimize the number of casts that occur in # arithmetic expressions, and b) favor 32-bit arithmetic, which is mostly # "cheapest" on current (2018) systems. Signed integers are also preferred # over unsigned so that the C++ compiler can take advantage of undefined # overflow. for size in (32, 64): if min_val >= -(2**(size - 1)) and max_val <= 2**(size - 1) - 1: return "::std::int{}_t".format(size) elif min_val >= 0 and max_val <= 2**size - 1: return "::std::uint{}_t".format(size) return None
771856d3b6df07df9460e7fae2d21afeeb8f214c
84,650
def departition(ls): """Reverse of partition. Takes list of lists and turn it into a list. :param ls: List to be departitioned :return: departitioned list """ return [item for sublist in ls for item in sublist]
954cdd59c561879fc74f46271da201f77c80b035
509,013
def convert_ip(address: str) -> bytes: """ Converts IP to bytes :param str address: IP address that should be converted to bytes :return bytes: IP converted to bytes format """ res = b"" for i in address.split("."): res += bytes([int(i)]) return res
e60a7af6bfed10faef738faa238bf8370a78713e
487,264