content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import calendar def month_name(month_number): """ Returns the full name of a month based on the integer order. Straight from http://stackoverflow.com/a/7385976/122291 """ return calendar.month_name[month_number]
6ac9eccd669a4991fbd1bdf795afc746adab8db7
398,253
def has_gaps(gaps): """Returns True if gaps dictionary has gaps in it. Parameters ---------- gaps: dictionary Dictionary of Channel:gaps arrays """ for channel in gaps: if len(gaps[channel]): return True return False
b24e9403b90fee7f29ab3f12ded95d4d13b20df6
440,610
def strip_unexecutable(lines): """Remove all code that we can't execute""" valid = [] for l in lines: if l.startswith("get_ipython"): continue valid.append(l) return valid
f52fcf9c4afd0262f39bbe51d13860e5826606fb
65,476
def update_dictionary(default_dict, overwrite_dict=None, allow_unknown_keys=True): """Adds default key-value pairs to items in ``overwrite_dict``. Merges the items in ``default_dict`` and ``overwrite_dict``, preferring ``overwrite_dict`` if there are conflicts. Parameters ---------- default_dict: `dict` Dictionary of default values. overwrite_dict: `dict` or None, optional, default None User-provided dictionary that overrides the defaults. allow_unknown_keys: `bool`, optional, default True If false, raises an error if ``overwrite_dict`` contains a key that is not in ``default_dict``. Raises ------ ValueError if ``allow_unknown_keys`` is False and ``overwrite_dict`` has keys that are not in ``default_dict``. Returns ------- updated_dict : `dict` Updated dictionary. Returns ``overwrite_dicts``, with default values added based on ``default_dict``. """ if overwrite_dict is None: overwrite_dict = {} if not allow_unknown_keys: extra_keys = overwrite_dict.keys() - default_dict.keys() if extra_keys: raise ValueError(f"Unexpected key(s) found: {extra_keys}. " f"The valid keys are: {default_dict.keys()}") return dict(default_dict, **overwrite_dict)
503934d42362ea2b7ff7732bb9f752de45140898
35,974
def conventional_naming(st: str, is_package=True) -> str: """If st is not a package name then conventional_naming treats it as a project name. Parameters ---------- st : str Underscores are allowed. No spaces are allowed. is_package : bool, optional Considered project if not package, by default True Returns ------- str String with underscores removed. """ res = ''.join(st.split("_")).lower() if is_package: return res else: return res.capitalize()
a753fa2339b130db8c3d8ee2ba8e9437fe103221
315,185
import pathlib def lstat(path: str): """Gets stat of path `path` (handles links) e.g j.sals.fs.lstat("/home/rafy/testing_link") -> os.stat_result(st_mode=41471, st_ino=7081257, st_dev=2049, st_nlink=1, st_uid=1000, st_gid=1000, st_size=16, st_atime=1586445737, st_mtime=1586445734, st_ctime=1586445734) Args: path (str): path to get its stat Returns: stat_result: returns stat struct. """ return pathlib.Path(path).lstat()
321752f5b64d08619f79494671744359343ee67a
249,571
def translate_aws_action_groups(groups): """ Problem - AWS provides the following five groups: - Permissions - ReadWrite - ListOnly - ReadOnly - Tagging The meaning of these groups was not immediately obvious to me. Permissions: ability to modify (create/update/remove) permissions. ReadWrite: Indicates a data-plane operation. ReadOnly: Always used with ReadWrite. Indicates a read-only data-plane operation. ListOnly: Always used with [ReadWrite, ReadOnly]. Indicates an action which lists resources, which is a subcategory of read-only data-plane operations. Tagging: Always used with ReadWrite. Indicates a permission that can mutate tags. So an action with ReadWrite, but without ReadOnly, is a mutating data-plane operation. An action with Permission never has any other groups. This method will take the AWS categories and translate them to one of the following: - List - Read - Tagging - ReadWrite - Permissions """ if "Permissions" in groups: return "Permissions" if "ListOnly" in groups: return "List" if "ReadOnly" in groups: return "Read" if "Tagging" in groups: return "Tagging" if "ReadWrite" in groups: return "Write" return "Unknown"
25d54f886f40f4ccd890bf142905a39f730a3ca5
247,890
def GenerateSwitchStatement(codepoint_list, var_name, num_indent, return_type): """Generates a switch-case statement for given arguments. This method generates a switch-case statement, which checks if the value of the given 'var_name' variable is in 'codepoint_list' and returns 'return_type' if contained. The generated code would be something like: switch (var_name) { case 0xXXXX: case 0xXXXX: : case 0xXXXX: return RETURN_TYPE; } Args: codepoint_list: a sorted list of code points. var_name: a variable name to be checked. num_indent: the indent depth. return_type: a return category type which should be returned if 'var_name' is in the 'codepoint_list' Returns: a list of lines of the generated switch-case statement. """ lines = [' ' * num_indent + ('switch (%s) {\n' % var_name)] for codepoint in codepoint_list: lines.append(' ' * (num_indent + 2) + 'case 0x%08X:\n' % codepoint) lines.extend([' ' * (num_indent + 4) + ('return %s;\n' % return_type), ' ' * num_indent + '}\n']) return lines
f1c013b81d5df95724a0486cad869e1dfcd6208c
358,282
def process_row( row, value_col ): """ Looks at a processed CSV row returning the date, time and then value of the specified column. """ date_time = row[3].split('T') # [:5] so that we just get hh:mm return (date_time[0], date_time[1][:5], row[value_col])
6e3397e8a10e975dc998a0894e130262d879a79f
521,566
def gfile(self, size="", **kwargs): """Specifies the pixel resolution on Z-buffered graphics files. APDL Command: /GFILE Parameters ---------- size Pixel resolution. Defaults to a pixel resolution of 800. Valid values are from 256 to 2400. Notes ----- Defines the pixel resolution on subsequently written graphics files (Jobname.GRPH) for software Z-buffered displays [/TYPE]. Lowering the pixel resolution produces a "fuzzier" image; increasing the resolution produces a "sharper" image but takes a little longer. This command is valid in any processor. """ command = f"/GFILE,{size}" return self.run(command, **kwargs)
4ca1c2f7f7ef30bd3678f39d43eb18edf6faed83
363,265
from typing import Any def is_number(obj: Any) -> bool: """ Test if the JSON object is a JSON number Args: obj: object to test the type of Returns: `True` if ``obj`` is a json number, `False` otherwise """ return type(obj) in (int, float)
c12bd8d0ed0cbb4d6281adf6ccfbd1bc13b0b617
530,220
def split(df, target): """Split off target col to make X and y""" if not isinstance(target, list): target = [target] return df.drop(columns=target), df[target].values.ravel()
1d0c03c5a4dfd5ced9f609e9f39368a7848755b1
608,614
def week_cal(cal: str, current_day: int) -> str: """Function used to transform a HTML month calendar into a week calendar. Args: `cal`(str): the html calendar to transform. `current_day`(int): the current day to select the current week. Returns: `str`: the calendar transformed. """ lines = cal.splitlines() w = "" # string to store the line of the desired week for line in lines: # check each lines for the current day if line.__contains__(f">{current_day}<"): w = line ## build the week calendar ## new_cal = lines[0] + "\n" + lines[1] + "\n" + lines[2] + "\n" + w + "\n" + lines[-1] return new_cal
11cad36d9c92e38f10152b2c97039a9d5d16c344
670,052
def plot_identifier(name, plot_id): """Return file,plot_id identifier.""" prefix, number = name.split("_") return "{}-{}".format(number, plot_id)
26d86dba9a222ad648a42119aff37ccad939e05c
534,075
import torch def precreate_targ_mask_encoding(nintervals): """Precreate 1 + 2*nintervals different target/mask encodings (depending whether item is a boundary, or whether each interval is censored or not). """ nencodes = 1 + nintervals * 2 targ_tensor = torch.zeros(nencodes, nintervals) mask_tensor = torch.zeros(nencodes, nintervals) # For bounds, leave both all-zeros # Next uncensored then censored ones: offset = 1 for idx in range(nintervals): # Mark point where item suffered hazard: targ_tensor[idx + offset][idx] = 1 # Include up to and incl. this one in the loss: mask_tensor[idx + offset][:idx+1] = 1 offset = 1 + nintervals for idx in range(nintervals): # No hazard seen, but include survival points (leave target # values as zero) via the mask: mask_tensor[idx + offset][:idx] = 1 return targ_tensor, mask_tensor
e92f660c9070b262af93eff075660c04ed5d2d9e
448,103
def file_contains(path, text): """Does the file at path contain text?""" with open(path) as stream: for line in stream: if text in line: return True return False
74a12505d40bfc317efb0f650fa36f63d7847c36
201,777
def energy_balance_rule(mod, p, h): """ **Constraint Name**: DR_Energy_Balance_Constraint **Enforced Over**: DR_OPR_HRZS The sum of all shifted load up is equal to the sum of all shifted load down within an horizon, i.e. there are no energy losses or gains. """ return sum(mod.DR_Shift_Up_MW[p, tmp] for tmp in mod.TMPS_BY_BLN_TYPE_HRZ[ mod.balancing_type_project[p], h]) \ == sum(mod.DR_Shift_Down_MW[p, tmp] for tmp in mod.TMPS_BY_BLN_TYPE_HRZ[ mod.balancing_type_project[p], h])
a3812eb1c38145576483c3013bd61a16e3b61df8
495,072
import binascii def unhexlify(blob): """ Takes a hexlified script and turns it back into a string of Python code. """ lines = blob.split('\n')[1:] output = [] for line in lines: # Discard the address, length etc. and reverse the hexlification output.append(binascii.unhexlify(line[9:-2])) # Check the header is correct ("MP<size>") if (output[0][0:2].decode('utf-8') != u'MP'): return '' # Strip off header output[0] = output[0][4:] # and strip any null bytes from the end output[-1] = output[-1].strip(b'\x00') script = b''.join(output) try: result = script.decode('utf-8') return result except UnicodeDecodeError: # Return an empty string because in certain rare circumstances (where # the source hex doesn't include any embedded Python code) this # function may be passed in "raw" bytes from MicroPython. return ''
074edf1359f404e9de14da1cccf3952812ca7cb7
653,476
def null_count(df) -> int: """ Get the total number of nulls in a dataframe :param df: the pandas DataFrame to inspect :returns: The total number of nulls as an int """ return df.isna().sum().sum()
fb8837feed4848a7fdd9bb91d4e8412681888b9b
205,874
import re def extract_request(message, slack_id): """Extract request from the message. Keywoard arguments: message -- user's message slack_id -- bot slack id """ request = message.replace("{} ".format(slack_id), "").strip() request = re.sub(" +", " ", request) return request
e7f0b22dd45f32d33094f80b6e47a6b1ce60ab65
108,300
from typing import Any from typing import List def ensure_list(item: Any) -> List: """Ensure string is converted to a Path. Args: item (Any): Anything. Returns: The item inside a list if it is not already a list. """ if not isinstance(item, list): item = [item] return item
c7895c87121f0265080cac42ab9b3ca1c23cca23
637,593
def getOrElseUpdate(dictionary, key, opr): """If given key is already in the dictionary, returns associated value. Otherwise compute the value with opr, update the dictionary and return it. None dictionary are ignored. >>> d = dict() >>> getOrElseUpdate(d, 1, lambda _: _ + 1) 2 >>> print(d) {1: 2} @type dictionary: dictionary of A => B @param dictionary: the dictionary @type key: A @param key: the key @type opr: function of A => B @param opr: the function to compute new value from keys @rtype: B @return: the value associated with the key """ if dictionary is None: return opr(key) else: if key not in dictionary: dictionary[key] = opr(key) return dictionary[key]
95454d7ca34d6ae243fda4e70338cf3d7584b827
705,915
def where_am_i(cmd_line, word_before_cursor): """ Tells the autocomplete which word it is completing. It requires a little extra care because we want to differentiate when a space is pressed. :param cmd_line: the list of command line words :param word_before_cursor: word_before_cursor parsed from the document :return: the position of the word we are on. """ if len(cmd_line) == 1 and cmd_line[0] == '': return 0 elif word_before_cursor == '': return len(cmd_line) + 1 else: return len(cmd_line)
132fcdac296d0ff941d26ef1ea860b60e504187e
316,968
def sorensen(s1,s2) -> float: """ Sorensen similarity. Parameters ---------- s1 : first set. s2 : second set. Returns ------- similarity coefficient (0<=x<=1). """ return 2*len(s1&s2)/(len(s1)+len(s2))
a5670b1a9da6cdc3b1f5488016033c9e68c0de37
452,650
def count_sdf_mols(file_path): """ This function returns the number of molecules in an sdf-file. Parameters ---------- file_path : str Full path to sdf file. Returns ------- counter : int Number of molecules. """ print('Counting molecules in {}...'.format(file_path)) counter = 0 with open(file_path, 'r', errors='backslashreplace') as sdf_file: for line in sdf_file: if '$$$$' in line: counter += 1 return counter
15915b892aa8149b6ca230bb20d21e7f7c18af5f
151,568
def create_nine_digit_product(num): """ Create a nine digit string resulting from the concatenation of the product from num and multipliers (1, 2, 3,).... Return 0 if string cannot be length 9. """ result = '' counter = 1 while len(result) < 9: result += str(num * counter) counter += 1 if len(result) > 9: result = 0 return result
9c6765349edfa7e03dc8d2ffe7bf6a45155f3ad0
13,566
def get_mission_area(label): """ Search a PDS4 label for a Mission_Area. Parameters ---------- label : Label or ElementTree Element Full label for a PDS4 product with-in which to look for a mission area. Returns ------- Label, ElementTree Element or None Found Mission_Area section with same return type as *label*, or None if not found. """ return label.find('*/Mission_Area')
aec04ef2fb93824fe5bbebff50123fc7d923e80a
343,682
def get_layer_info(layer): """ Extracts information that makes up the layer, as well as its weights and bias. """ hyperparameters = dict() parameters = dict() # Convolutional layer if str(layer).split('(')[0] == 'Conv2d': hyperparameters['in_channels'] = layer.in_channels hyperparameters['out_channels'] = layer.out_channels hyperparameters['kernel_size'] = layer.kernel_size hyperparameters['stride'] = layer.stride hyperparameters['padding'] = layer.padding if layer.bias is not None: hyperparameters['bias'] = True parameters['bias'] = layer.bias.clone() else: hyperparameters['bias'] = False parameters['bias'] = None parameters['weight'] = layer.weight.clone() # Batch normalization layer elif str(layer).split('(')[0] == 'BatchNorm2d': hyperparameters['num_features'] = layer.num_features hyperparameters['eps'] = layer.eps hyperparameters['momentum'] = layer.momentum hyperparameters['affine'] = layer.affine hyperparameters['track_running_stats'] = layer.track_running_stats parameters['bias'] = layer.bias.clone() parameters['weight'] = layer.weight.clone() return hyperparameters, parameters
43673409542a9540e7d764960808137c39caf213
320,389
def labels_name(labels): """Returns a comma-separate list of label names Args: labels: list of label objects. Returns: string with comma-separated labels names. """ if type(labels) is list: return ','.join([ l['name'] for l in labels ]) else: return labels['name']
35ad15d47056eea6343cc9267a50ff673680f9a6
108,467
from typing import Counter import re def build_wordlist(input_file): """Build a wordlist Counter from lines of the corpus file""" wordlist = Counter() for line in input_file: words = re.findall(r'\w+', line) wordlist.update(words) return wordlist
575a6fb872750dc83ac8b6f8b66f4b779962b71d
31,720
def get_instrument(program): """Return the instrument inferred from the program number.""" if 0 <= program < 8: return "Piano" if 24 <= program < 32: return "Guitar" if 32 <= program < 40: return "Bass" if 40 <= program < 46 or 48 <= program < 52: return "Strings" if 56 <= program < 64: return "Brass" return None
4e2cceb3b09b6e255241976e492659e9addb77d8
441,638
def groupms_byenergy(microstates, ticks): """ This function takes in a list of microstates and a list of energy numbers (N values), divide the microstates into N bands by using the energy number as lower boundaries. The list of energy will be sorted from small to large. """ N = len(ticks) ticks.sort() ticks.append(1.0e100) # add a big number as the rightest-most boundary resulted_bands = [[] for i in range(N)] for ms in microstates: it = -1 for itick in range(N): if ticks[itick] <= ms.E < ticks[itick+1]: it = itick break if it >= 0: resulted_bands[it].append(ms) return resulted_bands
39c84e02f36946d2cff77b2ecb66794205be28a7
51,824
def get_snapshot_name(publish_name, dist, temporary=False): """ Returns a aptly snapshot name Args: dist (str): The distribution to be used (stable, unstable) temporary (bool): use tempporary extension """ return "{}-{}{}".format(publish_name, dist, "-tmp" if temporary else "")
bbc365677dfcd96aba59224bdb575f63d59c7253
282,444
def counter(listy): """ Input: A list of numbers. Output: The number of entries in the list, interation. """ counter = 0 for i in listy: counter += 1 return counter
ded974e0a9d1201342b34c877eaf1c5182be8e7b
225,954
from typing import Dict from typing import List from typing import Tuple def _is_explanation_equal(dict1: Dict[str, List[Tuple[str, float]]], dict2: Dict[str, List[Tuple[str, float]]]) -> bool: """ Tests if the two dictionaries of a given structure are equal. The both of the input parameters must be a dictionary with string keys and list values. The latter is composed of 2-tuples of strings and floats. The keys in the dictionary and the tuples must match exactly, while the floats only need to be approximately equal. The ordering of the tuples in the list does not need to be the same. Parameters ---------- dict1 : Dictionary[string, List[Tuple[string, float]]] The first dictionary to be compared. dict2 : Dictionary[string, List[Tuple[string, float]]] The second dictionary to be compared. Returns ------- equal : boolean ``True`` if the dictionaries are the same, ``False`` otherwise. """ if set(dict1.keys()) == set(dict2.keys()): equal = True for key in dict1: val1 = sorted(dict1[key]) val2 = sorted(dict2[key]) if len(val1) != len(val2): equal = False break for i in range(len(val1)): if val1[i][0] != val2[i][0]: equal = False break if abs(val1[i][1] - val2[i][1]) > 1e-1: equal = False break if not equal: break else: equal = False return equal
92fedbf80441db270934017a67c7f5b88702381b
125,954
import asyncio async def stop_exposure_after(command, delay: float): """Waits ``delay`` before closing the shutter.""" command.info(text="dark exposing . . .") await asyncio.sleep(delay) return True
c2674802bc059c3862de4aff22270677a091962c
138,213
def set_intersection(*sets): """Return the intersection of all the given sets. As of Python 2.6 you can write ``set.intersection(*sets)``. Examples ======== >>> from sympy.core.compatibility import set_intersection >>> set_intersection(set([1, 2]), set([2, 3])) set([2]) >>> set_intersection() set() """ if not sets: return set() rv = sets[0] for s in sets: rv &= s return rv
0e8639af9a00d0d57e0855bbdfd19a32c8f5786b
684,758
import math def squared_loss(y, y_hat): """ squared loss function :param y: :type numpy vector: label :param y_hat: :type numpy vector: observation :return: :type float: loss """ loss = 0 for i in range(len(y)): loss += math.pow(sum(y[i] - y_hat[i]), 2)/2 return float(loss)
494451125be0f525091203dd52bd82b57960351e
214,574
def compute_center(detections): """Compute the center for each detection. Args: detections: A matrix of shape N * 4, recording the pixel coordinate of each detection bounding box (inclusive). Returns: center: A matrix of shape N I 2, representing the (x, y) coordinate for each input bounding box center. """ center = detections[:, [0, 1]] + detections[:, [2, 3]] center /= 2. return center
794b64de7ef7b1327bb0bb90982ac8d67c0a0fea
27,484
import hashlib def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128): """ Read file and produce md5 hash of contents as string. :param filename: file name :param hash_factory: factory for producing hash :param chunk_num_blocks: number of blocks, factor of 128 :return: str md5 hash of file contents """ h = hash_factory() with open(filename,'rb') as f: for chunk in iter(lambda: f.read(chunk_num_blocks*h.block_size), b''): h.update(chunk) return h.hexdigest()
522ff4289b185a550daffb068206589b9676a671
154,139
def _default_metric_compare_fn(last_metrics, new_metrics): """Compare two metrics Args: last_metrics: previous metrics. A comparable object. new_metrics: new metrics. A comparable object. Returns: True if new_metrics is equal to or better than the last_metrics. False, otherwise. """ if last_metrics is None: return True if new_metrics is None: return False try: better = not (last_metrics > new_metrics) return better except TypeError: raise TypeError( 'Metrics are not comparable: {} vs {}'.format( type(last_metrics).__name__, type(new_metrics).__name__ ) ) except Exception: # unknown error raise return False
e1609199a372a6fc2392f4c0021f9461a73ac55e
199,758
def selection_sort(li): """ [list of int] => [list of int] Selection sort: finds the minimum value, swaps it with the value in the first position, and repeats these steps for the remainder of the list. """ sorted_list = li # iterate as many times as the list is long for i in range(len(sorted_list)): # initial minimum is just the first value of the unsorted list min_index = i # find the minimum in the unsorted list for j in range(i, len(sorted_list)): # if current is less than current min set new minimum if sorted_list[j] < sorted_list[min_index]: min_index = j # swap the minimum and start of unsorted list sorted_list[i], sorted_list[min_index] = sorted_list[min_index], sorted_list[i] return sorted_list
38422c7f5a6c89a24a1dadfe89fa96bdfde7b02b
357,625
def _clock_to_mjd( clock_seconds ): """ Helper function which converts clock time in seconds to MJD in days """ # Modified Julian date of the "zero epoch" of the system clock (1/1/70) MJD_ZEROPOINT = 40587.0 # Number of seconds per day. SECONDS_PER_DAY = 86400.0 mjd_days = MJD_ZEROPOINT + (clock_seconds/SECONDS_PER_DAY) return mjd_days
0859818bb6a2a0e5b4a549236944ed8894625a6d
493,975
from typing import Tuple from pathlib import Path def check_path_params(paths: Tuple) -> bool: """Checks if all paths in 'paths' exist.""" for path in paths: if not Path(path).is_dir(): print(f"Path does not exist: {path}") return False return True
cdca8da458452c07045af6e3291c228a7630c452
424,874
import re def _image_name_from_url(url): """ Create a nice image name from the url. """ find = r'https?://|[^\w]' replace = '_' return re.sub(find, replace, url).strip('_')
47d064c2387d52e76554416db23861e4c00f4c1b
592,513
def write_clusters(filehandle, clusters, max_clusters=None, min_size=1, header=None, rename=True): """Writes clusters to an open filehandle. Inputs: filehandle: An open filehandle that can be written to clusters: An iterator generated by function `clusters` or a dict max_clusters: Stop printing after this many clusters [None] min_size: Don't output clusters smaller than N contigs header: Commented one-line header to add rename: Rename clusters to "cluster_1", "cluster_2" etc. Outputs: clusternumber: Number of clusters written ncontigs: Number of contigs written """ if not hasattr(filehandle, "writable") or not filehandle.writable(): raise ValueError("Filehandle must be a writable file") # Special case to allows dicts even though they are not iterators of # clustername, {cluster} if isinstance(clusters, dict): clusters = clusters.items() if max_clusters is not None and max_clusters < 1: raise ValueError("max_clusters must None or at least 1, not {}".format(max_clusters)) if header is not None and len(header) > 0: if "\n" in header: raise ValueError("Header cannot contain newline") if header[0] != "#": header = "# " + header print(header, file=filehandle) clusternumber = 0 ncontigs = 0 for clustername, contigs in clusters: if len(contigs) < min_size: continue if rename: clustername = "cluster_" + str(clusternumber + 1) for contig in contigs: print(clustername, contig, sep="\t", file=filehandle) filehandle.flush() clusternumber += 1 ncontigs += len(contigs) if clusternumber == max_clusters: break return clusternumber, ncontigs
2a93908fd0a91d7c04304fdf100e27a1089c2ae2
359,861
def _compute_position(input, index): """Compute line/column position given an index in a string.""" line = 1 col = 1 eol = None # last end of line character for c in input[:index]: if c == '\n' or c == '\r': if eol is None or eol == c: eol = c line += 1 col = 1 else: # ignore second of '\n\r' and '\r\n' sequences eol = None else: col += 1 return (line, col)
f08217651d11ed09c1e100368aa8cc869c37e386
697,713
def flatten_beam_dim(x): """Flattens the first two dimensions of a non-scalar array.""" if x.ndim == 0: # ignore scalars (e.g. cache index) return x return x.reshape((x.shape[0] * x.shape[1],) + x.shape[2:])
4480ef5421407030f5cf217883600dbd4e63e196
687,558
def close(conn): """This closes the database connection. """ try: conn.close() except: pass return True
28cc152a60e2bdb4321f1e0fb310d31c3575d46f
451,050
import requests def check_product_existence(p_id): """ Função para verificar a existencia do produto na API externa do LuizaLabs :param p_id: id único do produto :return: bool: True caso exista, se não False """ url = "http://challenge-api.luizalabs.com/api/product/{}" response = requests.get(url.format(p_id)) if response.status_code == 200: return True return False
7fe23d26ac44f9f6056dabc4391ff358ed23323f
326,735
def translate_bbox(bbox, y_offset=0, x_offset=0): """Translate bounding boxes: (0, 0) to (y_{offset}, x_{offset}). bbox (~numpy.ndarray): see before y_offset (int or float): The offset along y axis. x_offset (int or float): The offset along x axis. Return : ~numpy.ndarray: """ out_bbox = bbox.copy() out_bbox[:, :2] += (y_offset, x_offset) out_bbox[:, 2:] += (y_offset, x_offset) return out_bbox
cf917cfa6beee40bf6dd02a520e8afb39e0e3359
400,706
def sequence_delta(previous_sequence, next_sequence): """ Check the number of items between two sequence numbers. """ if previous_sequence is None: return 0 delta = next_sequence - (previous_sequence + 1) return delta & 0xFFFFFFFF
8580c26d583c0a816de2d3dfc470274f010c347f
7,623
from typing import List def add_line_continuation_characters(command_tokens: List[str]) -> List[str]: """ Add a \\ to the end of each command token that precedes an argument token (starting with -) """ command_tokens = list(command_tokens) # make a copy for i, token in enumerate(command_tokens): if i > 0 and token.startswith('-'): command_tokens[i-1] += ' \\' return command_tokens
9bc9575c53431f889dfa08c00c1a138c6cc1ddaa
256,303
def horner (x, ai) : """Value of polynomial `a0 + a1 * x + a2 * x**2 + ... an * x ** n` for `x`. >>> horner (3, (-1, 2, -6, 2)) 5 >>> horner (-3, [8, -1, 0, 13, 4]) -16 """ ia = reversed (ai) result = next (ia) for a in ia : result *= x result += a return result
770dea6ffbc2459745af650b7b72f5722b8fd6cb
566,411
def remove_padding(data): """Remove the padding of the plain text (it assumes there is padding)""" pad_len = ord(data[-1]) return data[:-pad_len]
7d8530451ee292e1561b1dc66fbea1668c72dbc2
320,220
def parse_keyids(key): """Return parsed keyid string(s) or tuple of.""" if isinstance(key, str): return key elif isinstance(key, (tuple, list)): return tuple(parse_keyids(keyid) for keyid in key) return getattr(key, "keyid")
3ac2a2d2561d4f0778b5e79b847da930803797e5
434,756
def rm_eol(line, /): """Return line with end of line removed""" return line.rstrip("\n\r")
5ae8b3c88473e619a666a7dec755ebbaf3a34948
498,985
from pathlib import Path def corpus_file(path): """ Get a data file from a standard location. Check a few possible locations until the file is found. """ my_location = Path(__file__).parent root_location = my_location.parent nfs_location = Path('/nfs/mg-cache/language_id/corpus') paths_to_try = [root_location / 'corpus', nfs_location] for location in paths_to_try: path_to_try = location / path if path_to_try.exists(): return str(path_to_try) raise FileNotFoundError(f"Can't find {path!r} in any of {paths_to_try!r}")
1672d59b8cbb3af2c246cfcac89d73a1aae1536b
333,299
def imgtype2ext(typ): """Converts an image type given by imghdr.what() to a file extension.""" if typ == 'jpeg': return 'jpg' if typ is None: raise Exception('Cannot detect image type') return typ
c75de1ec236a9bd33b2906618dd357c5e6bbaac8
489,826
def filter_not_empty_values(value): """Returns a list of non empty values or None""" if not value: return None data = [x for x in value if x] if not data: return None return data
c4f927b3faeb201a70c99654a404011083d3658d
608,602
from typing import Tuple def format_addr(addr: Tuple[str, int]) -> str: """ Format an address from a (host, port) tuple :param addr: the address tuple to format :return: a string representing address as "host:port" """ return ":".join(map(str, addr))
9aa8dc27d0cadbc32c46d330262f52f1a0de8318
444,728
def pkcs7_pad(data, block_size=16): """Calculate and append pkcs7 padding. Block size can be defined dynamically.""" last_block = data[block_size*(len(data)/block_size):] if not last_block: return data + chr(block_size)*block_size rem = block_size - len(last_block) return data + chr(rem)*rem
199ab83c2a29370a2c965203d45c069098670b3c
359,263
def time_to_secs(t): """Convert a datetime.time object to seconds""" return t.hour*3600 + t.minute*60 + t.second
37b12e42b6841c6ce3c9a45ab90c0e33257ae2b1
641,395
def _return_arg(__arg: ..., /) -> ...: """Return the singular positional argument unchanged.""" return __arg
b4f8b9b8a7b7d344f15c99fdeaf71f938dca5892
628,689
def split_line(line, min_line_length=30, max_line_length=100): """ This is designed to work with prettified output from Beautiful Soup which indents with a single space. :param line: The line to split :param min_line_length: The minimum desired line length :param max_line_length: The maximum desired line length :return: A list of lines """ if len(line) <= max_line_length: # No need to split! return [line] # First work out the indentation on the beginning of the line indent = 0 while line[indent] == ' ' and indent < len(line): indent += 1 # Try to split the line # Start looking for a space at character max_line_length working backwards i = max_line_length split_point = None while i > min_line_length: if line[i] == ' ': split_point = i break i -= 1 if split_point is None: # We didn't find a split point - search beyond the end of the line i = max_line_length + 1 while i < len(line): if line[i] == ' ': split_point = i break i += 1 if split_point is None: # There is nowhere to split the line! return [line] else: # Split it! line1 = line[:split_point] line2 = ' ' * indent + line[split_point + 1:] return [line1] + split_line(line2, min_line_length, max_line_length)
09cb5385b2d433579ad825652ccbb27862271cf8
587,346
import json def get_workcell_io_json() -> str: """Return a test workcell_io.json.""" workcell_io = { "Proto": ("ChoKBnZhY3V1bRoCdXIoAjoKEghzdGFuZGFyZAodCgdibG93b2ZmGgJ1cigC" "OgwSCHN0YW5kYXJkIAEKSwoNdmFjb2ZmLWJsb3dvbhoCdXIoAjABOhgKCnZh" "Y3V1bS1vZmYSCHN0YW5kYXJkGAE6GAoKYmxvd29mZi1vbhIIc3RhbmRhcmQg" "AQolCg92YWN1dW0tcHJlc3N1cmUaAnVyKAE6DBIIc3RhbmRhcmQgAwogCgx2" "YWN1dW0tZ2F1Z2UaAnVyKAM6ChIIc3RhbmRhcmQSCHN0YW5kYXJkEgxjb25m" "aWd1cmFibGUSBHRvb2waDgoIc3RhbmRhcmQQASAHGg4KCHN0YW5kYXJkEAIg" "BxoSCgxjb25maWd1cmFibGUQASAHGhIKDGNvbmZpZ3VyYWJsZRACIAcaDgoI" "c3RhbmRhcmQQAyABGg4KCHN0YW5kYXJkEAQgARoKCgR0b29sEAEgARoKCgR0" "b29sEAIgARoKCgR0b29sEAMgASIhChp2YWN1dW0tZ2F1Z2UtbWluLXRocmVz" "aG9sZB0AAPpEIhwKFXF1YWxpdHktbWluLXRocmVzaG9sZB0zMzM/IiIKG3Nl" "bnNvci1oZWFsdGgtbWluLXRocmVzaG9sZB0zMzM/"), "created": "2020-08-27T22:50:32.513053Z", "createdBy": "Z2qqLaQ3WVckV8IClWGwuEWOckD2" } return json.dumps(workcell_io)
31e385fc0927dde3e215534533a583d0381c9a8b
516,827
def protein_sp_cterm(protein, drange): """ Return protein sequence with SP placed at end """ sp_start, sp_end = drange.split("-") return protein[int(sp_end):] + protein[:int(sp_end)]
1af28db6378f38adb1b0818f193f0dd85f47d381
279,977
def segments_intersect(s1, s2): """ Return *True* if *s1* and *s2* intersect. *s1* and *s2* are defined as:: s1: (x1, y1), (x2, y2) s2: (x3, y3), (x4, y4) """ (x1, y1), (x2, y2) = s1 (x3, y3), (x4, y4) = s2 den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1)) n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3)) n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3)) if den == 0: # lines parallel return False u1 = n1/den u2 = n2/den return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
66a2149f3f09eb584728853e006e74db6a56698e
491,155
import re def clean_review(text: str): """ Removes a set of special characters from the review text for the sake of dependency parsing. Parameters: text (str): Review text. Returns: str: Cleaned review text. """ return re.sub(r'[$%+^&@#*]+', r'', text)
85324af96a937312a5af6b40ce27a07189d1b103
408,311
def probability_string(probability: float) -> str: """ Converts a probability to a string. """ return f"{probability:.2f}".replace("0.", "0,").replace("0,0", "0,0")
2489fb558837663c0ceed80a4d3171b8243eaa69
105,398
def get_all_indexes_of(value, iterable): """ Return all indexes for a value. """ # Convert the given object to a list. iterable = list(iterable) index_list = [] # Get each index and store them in the list of indexes. for index in range(len(iterable)): if value in iterable[index]: index_list.append(index) # Return the list of indexes. return index_list
48223b2a7c5a2cbf417eba25ed33204913be9f83
276,508
def parse_float(a): """ Converts to a float value; raise TypeError. """ try: res = float(a) return res except: msg = 'Cannot convert value to a float.\nValue: %s' % a.__repr__() raise TypeError(msg)
08b8a8010385c8f48d3db239d54b401599245fc4
587,250
def walk(f, d, num_steps): """Assumes: f a Field, d a Drunk in f, and num_steps an int >= 0. Moves d num_steps times; returns the distance between the final location and the location at the start of the walk.""" start = f.get_loc(d) for s in range(num_steps): f.move_drunk(d) return start.dist_from(f.get_loc(d))
51555593f73ad4b543ba768b6a77169627223b37
437,837
import re def get_tagged_functions_in_file(filename): """Given a filename, return a set of the target function names tagged with @coverage in that file.""" token_regex = r"^.*@coverage(( (\w+))+).*$" token_pattern = re.compile(token_regex, re.IGNORECASE) cov_functions = set() line = "" with open(filename, "r") as f: while True: line = f.readline() if not line: break match = re.match(token_pattern, line) if match: loc = match.group(1).strip().split() for i in loc: cov_functions.add(i) return cov_functions
4f8964cae341d7e3e2317ee44690104ca0dfcd16
232,921
def strip_preparer(value): """Colander preparer that trims whitespace around argument *value*.""" if isinstance(value, str): return value.strip() else: return value
e6e1f0f3db717fde54256414e1c6ff2fd3eaca6f
545,571
from typing import Dict from typing import Any def _find_host_port(ports: Dict[str, Any], container_port: int) -> str: """Find host port from container port mappings. `ports` is a nested dictionary of the following structure: {'8500/tcp': [{'HostIp': '0.0.0.0', 'HostPort': '32769'}], '8501/tcp': [{'HostIp': '0.0.0.0', 'HostPort': '32768'}]} Args: ports: Dictionary of docker container port mapping. container_port: Corresponding container port you're looking for. Returns: A found host port. Raises: ValueError: No corresponding host port was found. """ mappings = ports.get('{}/tcp'.format(container_port)) if mappings: return mappings[0].get('HostPort') else: raise ValueError( 'No HostPort found for ContainerPort={} (all port mappings: {})' .format(container_port, ports))
516db707bf0474842c3732b34a522dcc725a1213
100,776
def get_policies(key, service_account): """Returns Machine Provider policies governing the given instance. Args: key: ndb.Key for a models.Instance entity. service_account: Name of the service account the instance will use to talk to Machine Provider. """ return { 'backend_attributes': { 'key': 'key', 'value': key.urlsafe(), }, 'machine_service_account': service_account, 'on_reclamation': 'DELETE', }
95ecfdaedc80e148dcde466581fbcdf8727dd166
281,007
import re def validate_cron_string(cron_string, error_on_invalid=False): """ Validate that a string is a Unix cron string. """ # Note: This is also a Temporal function, but I'm trying to avoid making Temporal a dependency of BTU. crontab_time_format_regex = re.compile( r"{0}\s+{1}\s+{2}\s+{3}\s+{4}".format( r"(?P<minute>\*(\/[0-5]?\d)?|[0-5]?\d)", r"(?P<hour>\*|[01]?\d|2[0-3])", r"(?P<day>\*|0?[1-9]|[12]\d|3[01])", r"(?P<month>\*|0?[1-9]|1[012])", r"(?P<day_of_week>\*|[0-6](\-[0-6])?)") # end of str.format() ) # end of re.compile() if crontab_time_format_regex.match(cron_string) is None: if error_on_invalid: raise Exception(f"String '{cron_string}' is not a valid Unix cron string.") return False return True
4cc4594bfc2fd2743f20fa9662a4083497d5e1fc
43,399
def child_elem_as_str(elem,qname): """Returns the content of the child element as str.""" child = elem.find_child_element(qname) if child: return str(child.schema_actual_value) return None
5d8555ccef1a2222b6e7b67e18e6a5a5b2392810
495,713
def uint64_tag(name, value): """Create a DMAP tag with uint64 data.""" return name.encode('utf-8') + \ b'\x00\x00\x00\x08' + \ value.to_bytes(8, byteorder='big')
dc5ea3d37f9864f318b48e179f201388614d7c33
94,803
from typing import Callable import re def remove_link_ids() -> Callable[[str], str]: """Create function to remove link ids from rendered hyperlinks.""" def _remove_link_ids(render: str) -> str: """Remove link ids from rendered hyperlinks.""" re_link_ids = re.compile(r"id=[\d\.\-]*?;") subsituted_render = re_link_ids.sub("id=0;", render) return subsituted_render return _remove_link_ids
cbddaca54c205d54f8c2e1baba504c14d31e7676
677,000
import torch def calc_output_to_cum_min_cov( cross_covariance: torch.Tensor, prob_is_min: torch.Tensor, ) -> torch.Tensor: """ Given two sets of jointly Gaussian random variables [Y_1, ..., Y_d] and [Z_1, ..., Z_m], calculate the approximate covariance between min(Y_1, ..., Y_d) and [Z_1, ..., Z_m]. Args: cross_covariance: Covariance between Gaussian variables [Y_1, ..., Y_d] and [Z_1, ..., Z_m] of shape [batch_dim..., d, m] prob_is_min: (approximate) probability that each of [Y_1, ..., Y_d] is the minimum. Shape [batch_dim..., d] Returns: torch.Tensor: Approximate covariance between min(Y_1, ..., Y_d) and [Z_1, ..., Z_m]. Shape [batch_dim..., m] """ return (cross_covariance * prob_is_min[..., None]).sum(dim=-2)
affd8180523e4dcc475caeeb03c0635819eec88c
371,563
def _2DprintInxRow(inxRow, lSpacesIndR): """ Function prints one index of a row of a 2D array Input: - 1 **inxCol** (*int*) Index of the row to be printed - 2 **lSpacesIndR** (*list*) A list with spaces which should be added to indices of rows Output: - 1 **strArray** (*string*) The string with printed requested index of a row """ # Print index of the row strRowInx = ('%d:') % inxRow # Pick up a correct space which is added before the index strSpaceBefore = lSpacesIndR[len(strRowInx) - 1] # Connect the above together strArray = strSpaceBefore + strRowInx strArray = strArray + ' ' return strArray
f108007a9fcb25a6aa32e22297a654d6d262e247
39,312
def _split_tag_path_pattern(tag_path_pattern): """ Returns a list of tag patterns and the prefix for a given tag path pattern. Args: tag_path_pattern (str) """ if tag_path_pattern.startswith('+/') or tag_path_pattern.startswith('*/'): prefix = tag_path_pattern[:2] tag_path_pattern = tag_path_pattern[2:] else: prefix = '' return tag_path_pattern.split('.'), prefix
39252b07ce2c63bcb7c75a6a48dc7cab5b311c83
327,174
from typing import List def define_foot_vertices(robot: str) -> List: """Define the robot-specific positions of the feet vertices in the foot frame.""" if robot != "iCubV2_5": raise Exception("Feet vertices positions only defined for iCubV2_5.") # For iCubV2_5, the feet vertices are not symmetrically placed wrt the foot frame origin. # The foot frame has z pointing down, x pointing forward and y pointing right. # Origin of the box which represents the foot (in the foot frame) box_origin = [0.03, 0.005, 0.014] # Size of the box which represents the foot box_size = [0.16, 0.072, 0.001] # Define front-left (FL), front-right (FR), back-left (BL) and back-right (BR) vertices in the foot frame FL_vertex_pos = [box_origin[0] + box_size[0]/2, box_origin[1] - box_size[1]/2, box_origin[2]] FR_vertex_pos = [box_origin[0] + box_size[0]/2, box_origin[1] + box_size[1]/2, box_origin[2]] BL_vertex_pos = [box_origin[0] - box_size[0]/2, box_origin[1] - box_size[1]/2, box_origin[2]] BR_vertex_pos = [box_origin[0] - box_size[0]/2, box_origin[1] + box_size[1]/2, box_origin[2]] # Vertices positions in the foot (F) frame F_vertices_pos = [FL_vertex_pos, FR_vertex_pos, BL_vertex_pos, BR_vertex_pos] return F_vertices_pos
b03971a978e15539f0e84dd0e672c7d2c4200026
277,860
def _get_exception_key(exc_info): """ Returns unique string key for exception. Key is filename + lineno of exception raise. """ exc_tb = exc_info[2] if exc_tb is None: return repr(exc_info[1]) return "{}:{}".format(exc_tb.tb_frame.f_code.co_filename, exc_tb.tb_lineno)
3d6423946b2512440569523889e189f0718ef866
494,050
def nodot(item): """Can be used to ignore hidden files, starting with the . character.""" return item[0] != '.'
f0b053ae024016454e457ef183e540b354d25463
482,793
def _get_element_subnodes(tree_element): """ Get all of the sublements of the tree_element with a 'node' tag :param tree_element: Element object from the ElementTree package :return: From ElementTree documentation: "Returns a list of all matching elements in document order." """ return tree_element.findall('node')
6b304151f86a398e9bc24f9705795ef67453fbf3
500,710
import difflib def lines_diff(lines1, lines2): """Show difference between lines.""" is_diff = False diffs = list() for line in difflib.ndiff(lines1, lines2): if not is_diff and line[0] in ('+', '-'): is_diff = True diffs.append(line) return is_diff, diffs
50916d46871980fadfd854dc698481a4b0f35834
1,150
def if_none(value, default): """ Returns value or default if value is None. """ if (value is None): return default return value
7c7e83c2c633ae809a2b1a75a6e16fc7fee3eb21
47,377
import torch def compute_accuracy(logits, labels, mask): """Compute the accuracy""" logits = logits[mask] labels = labels[mask] _, indices = torch.max(logits, dim=1) correct = torch.sum(indices == labels) return correct.item() * 1.0 / len(labels)
a7ee234837024598fc95fa9c54c55802ea411577
701,770
def encode_data(data): """ Helper that converts :class:`str` or :class:`bytes` to :class:`bytes`. :class:`str` are encoded with UTF-8. """ # Expect str or bytes, return bytes. if isinstance(data, str): return data.encode('utf-8') elif isinstance(data, bytes): return data else: raise TypeError("data must be bytes or str")
3cd54389719439e8f18cf02b110af07799c946b5
707,158
import json def load_params(file): """ Load the train config parameters. Args: file (str): path to config file Returns: out (dict): config dictionary """ with open(file, "r") as f: out = json.load(f) return out
d108ea11759299b70eec65eea976466fd164e5f6
541,677
def is_response(event): """Check whether an event is a response indicating a message was successfully sent""" return 'reply_to' in event and 'ok' in event and event['ok']
6906e8364862f6ed88faa2f2f249db8901df4a51
61,593
def basic_python_loop(n): """Runs a simple native python loop which is memory light and involves no data input or output Args: n (int): count of number of loops """ mydict = {} for i in range(n): mydict[i % 10] = i return None
73d2762e487fc47dbea397e22e1a7ae0db79a2d6
467,528
def plural_of (noun) : """Returns the plural from of the (english) `noun`. >>> print (plural_of ("house")) houses >>> print (plural_of ("address")) addresses >>> print (plural_of ("enemy")) enemies """ result = noun if result.endswith ("s") : result += "es" elif result.endswith ("y") : result = result [:-1] + "ies" else : result += "s" return result
9bd1a0dc16d06245df8c42de3a8ce76717dff812
205,353
import glob def filebrowser(ext='', directory=''): """ returns files with an extension """ return [f for f in glob.glob(f"{directory}**/*{ext}", recursive=True)]
cb7cc8935c68bd003286e55fb5d5f5dcd6d37c52
345,525
def d_hyd(width, height): """Calculates hydraulic diameter for rectangular profiles""" return 4 * (width * height) / (2 * (width + height))
ceaeb1344b3183b907fcc38c8e1b617889a1381d
369,629
def calc_centroid(bolts): """Calculate the centroid of the bolt group. Args: bolts (data struct): list of the bolt data structure Returns: x_centroid (float): x-coordinate of bolt group centroid y_centroid (float): y-coordinate of bolt group centroid This coordinate pair is returned as a tuple, as follows: (x_centroid, y_centroid) """ sum_x = 0.0 sum_y = 0.0 num_bolts = len(bolts) for bolt in bolts: x = bolt[1][0] y = bolt[1][1] sum_x = sum_x + x sum_y = sum_y + y x_centroid = sum_x/num_bolts y_centroid = sum_y/num_bolts return x_centroid, y_centroid
006c9a9c8105cf332f338817d7aab281853b081f
310,055
import torch def make_pad_mask(lengths: torch.Tensor, le : bool = True) -> torch.Tensor: """Make mask tensor containing indices of padded part. See description of make_non_pad_mask. Args: lengths (torch.Tensor): Batch of lengths (B,). Returns: torch.Tensor: Mask tensor containing indices of padded part. Examples: >>> lengths = [5, 3, 2] >>> make_pad_mask(lengths) # masks = [[0, 0, 0, 0 ,0], # [0, 0, 0, 1, 1], # [0, 0, 1, 1, 1]] masks = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [1, 1, 0, 0, 0]] """ batch_size = int(lengths.size(0)) max_len = int(lengths.max().item()) seq_range = torch.arange(0, max_len, dtype=torch.int64, device=lengths.device) seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) seq_length_expand = lengths.unsqueeze(-1) # mask = seq_range_expand >= seq_length_expand # fix: torch.float32 -> torch.int32 if le: mask = (seq_range_expand < seq_length_expand).type(torch.int32) else: mask = (seq_range_expand >= seq_length_expand).type(torch.int32) # print(mask) return mask
43b32a4dc7b1053ad80a8d6c47ea39d1835d5a71
32,886