content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import mimetypes def bundle_media_description(key, filename): """Bundle the media description necessary for uploading. :param key: form-data key name :param filename: Local file name or path. :return: tuple of ('key name', ('file name', 'file object', 'MIME content-type') :rtype: tuple """ content_type, _ = mimetypes.guess_type(filename) media_description = (key, (filename, open(filename, 'rb'), content_type)) return media_description
8c160a9c767d86a1c1867d22f018d6342239e68d
701,537
from typing import List def list_to_str(x: List[float]) -> str: """ Produces a string of formatted floating point numbers. Args: x: List of floating point numbers Returns: s: The formatted string. """ strs = '' for x_i in x: strs += '{:.2e}, '.format(x_i) return strs[0:-2]
bd578f8be50dbeae43df1be5d21d3f4b56d56f63
321,773
def _keep_only_summer(df, summer_months): """ Keep only the summer period. Parameters ---------- df : DataFrame It should have a DateTime Index. summer_months : tuple(int, int) Returns ------- DataFrame """ return df.loc[df.index.month.isin(summer_months)].copy()
c61243fde9ed46f3c18cbea23ade8fe2da78aa25
186,321
def findBiggestRecord(vendorRDD): """ Find and return the record with the largest number of tokens Args: vendorRDD (RDD of (recordId, tokens)): input Pair Tuple of record ID and tokens Returns: list: a list of 1 Pair Tuple of record ID and tokens """ return vendorRDD.takeOrdered(1, lambda x: -1 * len(x[1]))
8de5ca9814924f48de1c7f5b265f74da4882eeaf
538,823
import torch def make_weights(diag_val, offdiag_val, n_units): """Get a connection weight matrix with "diag-offdial structure" e.g. | x, y, y | | y, x, y | | y, y, x | where x = diag_val, and y = offdiag_val Parameters ---------- diag_val : float the value of the diag entries offdiag_val : float the value of the off-diag entries n_units : int the number of LCA nodes Returns ------- 2d array the weight matrix with "diag-offdial structure" """ diag_mask = torch.eye(n_units) offdiag_mask = torch.ones((n_units, n_units)) - torch.eye(n_units) weight_matrix = diag_mask * diag_val + offdiag_mask * offdiag_val return weight_matrix.float()
290627086012f025e84541befba7c2c6c49e2cc0
536,912
def fisbHexErrsToStr(hexErrs): """ Given an list containing error entries for each FIS-B block, return a string representing the errors. This will appear as a comment in either the result string, or the failed error message. Args: hexErrs (list): List of 6 items, one for each FIS-B block. Each entry will be the number of errors in the message (0-10), or 98 for a packet that failed, and 99 for a packet that wasn't tried. Returns: str: String containing display string for error messages. """ return f'{hexErrs[0]:02}:{hexErrs[1]:02}:{hexErrs[2]:02}:{hexErrs[3]:02}:' + \ f'{hexErrs[4]:02}:{hexErrs[5]:02}'
7f81dc250b6e780f384720d98bb53574b1da86f9
646,553
def get_sheets_name(calcObject): """Get sheets names in a tuple.""" return calcObject.Sheets.ElementNames
23500fbb712d7fbed78b7e4870becac7167500be
277,937
def hass_to_hue_brightness(value): """Convert hass brightness 0..255 to hue 1..254 scale.""" return max(1, round((value / 255) * 254))
fcadea06d13968069983091bc2660810b948e13e
505,342
def splitmessage(message): """Returns a tuple containing the command and arguments from a message. Returns None if there is no firstword found """ assert isinstance(message, str) words = message.split() if words: return (words[0], words[1:])
d8db56ef55097f9f8858de95ee3d7799c0dc127e
700,315
def get_enabled_bodies(env): """ Returns a C{set} with the names of the bodies enabled in the given environment @type env: orpy.Environment @param env: The OpenRAVE environment @rtype: set @return: The names of the enabled bodies """ enabled_bodies = [] with env: for body in env.GetBodies(): if body.IsEnabled(): enabled_bodies.append(body.GetName()) return set(enabled_bodies)
ea5a86538edefaacf5b47ea22b9abc3ee87bba81
81,774
def _isCharEnclosed(charIndex, string): """ Return true if the character at charIndex is enclosed in double quotes (a string) """ numQuotes = 0 # if the number of quotes past this character is odd, than this character lies inside a string for i in range(charIndex, len(string)): if string[i] == '"': numQuotes += 1 return numQuotes % 2 == 1
70d70617fa869cc7b28abd3a7def28ea77b6a896
680,624
def is_upvoted(submission): """ If a comment is upvoted, we assume the question is welcomed, and that there's no need for a template answer. """ min_score = 3 min_comment_count = 1 return ( submission.score > min_score and len(submission.comments) > min_comment_count )
90fe43e6cd681a15daa97dba039e7fa94ac617ca
24,572
def format_elapsed_seconds(elapsed_seconds): """ Helper function to convert number of seconds to a string of hours, minutes, and seconds :param elapsed_seconds: float or int of the number of elapse seconds to format into a string :return: formatted time string """ hours = int(elapsed_seconds / 3600.0) minutes = int(elapsed_seconds / 60.0 % 60) seconds = elapsed_seconds % 60.0 time_string = '{:0.0f} hour{}, '.format(hours, 's' if hours != 1 else '') if hours >= 1 else '' time_string += '{:0.0f} minute{}, and '.format(minutes, 's' if minutes != 1 else '') if minutes >= 1 else '' time_string += '{:0.0f} second{}'.format(seconds, 's' if int(seconds) != 1 else '') return time_string
c865645920f1c32f22022b156b9ed709ad38a717
531,435
import re def is_project_issue(text): """ Issues/pull requests from Apache projects in Jira. See: https://issues.apache.org/jira/secure/BrowseProjects.jspa#all >>> is_project_issue('thrift-3615') True >>> is_project_issue('sling-5511') True >>> is_project_issue('sling') False >>> is_project_issue('project-8.1') False Special cases: >>> is_project_issue('utf-8') False >>> is_project_issue('latin-1') False >>> is_project_issue('iso-8858') False """ return bool(re.match(r''' (?!utf-) # Some special cases... (?!latin-) (?!iso-) \w+-\d+$ ''', text, re.VERBOSE | re.UNICODE))
f96f63488c97311cdc79f3fa2dd526023e300969
683,893
def _validate_inputs_outputs_var_format(value: str) -> str: """Validate inputs/outputs variables Arguments: value {str} -- A '.' seperated string to be checked for inputs/outputs variable formatting Returns: str -- A string with validation error messages """ add_info = '' parts = value.split('.') if len(parts) > 0 and parts[0] != 'inputs': add_info = f'Inputs and outputs variables can only refer to an input value' \ f' not: {parts[0]}' elif len(parts) > 1 and parts[1] != 'parameters': add_info = f'Inputs and outputs variables can only refer to an input parameter' \ f' not: {parts[1]}' elif len(parts) != 3: add_info = 'Inputs and outputs variables must have 3 segments.' return add_info
e6d6cb0cf77a4b75ae5c6915f28d7f3ed16142b8
265,886
def _slice(tensor, dim, start, end): """Slices the tensor along given dimension.""" # Performs a slice along the dimension dim. E.g. for tensor t of rank 3, # _slice(t, 1, 3, 5) is same as t[:, 3:5]. # For a slice unbounded to the right, set end=0: _slice(t, 1, -3, 0) is same # as t[:, -3:]. rank = tensor.shape.rank slices = rank * [slice(None)] if end == 0: end = None slices[dim] = slice(start, end) return tensor[slices]
c2897621afb2adefe8da78e56763cc3f0822f9bc
399,546
from typing import Callable from typing import Union def trapezoidal_area( fnc: Callable[[Union[int, float]], Union[int, float]], x_start: Union[int, float], x_end: Union[int, float], steps: int = 100, ) -> float: """ Treats curve as a collection of linear lines and sums the area of the trapezium shape they form :param fnc: a function which defines a curve :param x_start: left end point to indicate the start of line segment :param x_end: right end point to indicate end of line segment :param steps: an accuracy gauge; more steps increases the accuracy :return: a float representing the length of the curve >>> def f(x): ... return 5 >>> '%.3f' % trapezoidal_area(f, 12.0, 14.0, 1000) '10.000' >>> def f(x): ... return 9*x**2 >>> '%.4f' % trapezoidal_area(f, -4.0, 0, 10000) '192.0000' >>> '%.4f' % trapezoidal_area(f, -4.0, 4.0, 10000) '384.0000' """ x1 = x_start fx1 = fnc(x_start) area = 0.0 for _ in range(steps): # Approximates small segments of curve as linear and solve # for trapezoidal area x2 = (x_end - x_start) / steps + x1 fx2 = fnc(x2) area += abs(fx2 + fx1) * (x2 - x1) / 2 # Increment step x1 = x2 fx1 = fx2 return area
3ac16c64b77180aff453c71f8495d9a7dc4dd599
270,838
def coco_annfile(dir, subset, year=2014): """Construct coco annotation file.""" annfile = '{}/annotations/instances_{}{}.json'.format(dir, subset, year) print(annfile) return annfile
e63bdfc40318190cc22b095455dee4a57f7f5325
121,099
def get_list_registry(from_space, capacity=None, initializer=0, flatten=True, add_batch_rank=False): """ Creates a list storage for a space by providing an ordered dict mapping space names to empty lists. Args: from_space: Space to create registry from. capacity (Optional[int]): Optional capacity to initialize list. initializer (Optional(any)): Optional initializer for list if capacity is not None. flatten (bool): Whether to produce a FlattenedDataOp with auto-keys. add_batch_rank (Optional[bool,int]): If from_space is given and is True, will add a 0th rank (None) to the created variable. If it is an int, will add that int instead of None. Default: False. Returns: dict: Container dict mapping core to empty lists. """ if flatten: if capacity is not None: var = from_space.flatten( custom_scope_separator="-", scope_separator_at_start=False, mapping=lambda k, primitive: [initializer for _ in range(capacity)] ) else: var = from_space.flatten( custom_scope_separator="-", scope_separator_at_start=False, mapping=lambda k, primitive: [] ) else: if capacity is not None: var = [initializer for _ in range(capacity)] else: var = [] return var
080277fac9493d9588d084ad0cfe25e711cfb61b
416,476
import re def clean(str, include='', alpha='a-zA-Z', numeric='0-9'): """Filter unwanted characters. Returns the filtered string. Examples: >>> clean('1.20€') '120' >>> clean('1.20€', '.') '1.20' >>> clean('1.20€', '.€') '1.20€' >>> clean('Hello', alpha='a-z') 'ello' Args: str (str): input string include (str): Regular-expression raw string. Defaults to ''. This parameter defines which additional characters to include to the result. alpha (str): Regular-expression raw string. Defaults to 'a-zA-Z'. This parameter defines which alpha characters should be in the result. numeric (str): Regular-expression raw string. Defaults to '0-9'. This parameter defines which numeric characters should be in the result. Returns: str: string which only contains accepted characters. """ return re.sub('[^{}{}{}]+'.format(alpha, numeric, include), '', str)
31e5652d0dd863017daa4dbcb7de06efeffd9f60
405,758
def to_bool(val): """Convert a value to a bool.""" if isinstance(val, bool): return val if isinstance(val, str): return val.lower() not in ("", "0", "false") return bool(val)
8050ebdc58a9774f1b023b2ee1a5f76897cb6f13
518,332
def format_seconds(s): """ Format a seconds value into a human-readable form """ years, s = divmod(s, 31556952) min, s = divmod(s, 60) h, min = divmod(min, 60) d, h = divmod(h, 24) return '%sy, %sd, %sh, %sm, %ss' % (years, d, h, min, s)
f004d9f2cef8b3b9eee967ebd9d4811cbc80ae6c
27,113
import re def parse_map_file(path): """ Parse libgccjit.map, returning the symbols in the API as a list of str. """ syms = [] with open(path) as f: for line in f: m = re.match('^\s+([a-z_]+);$', line) if m: syms.append(m.group(1)) return syms
b0f78cf1a7ebe45ae845fbacef3b7712a9d53fdc
695,155
def return_tag_action(row, action_dict): """ Default action function for tables. This function returns the tag for the row of data. Used by the **TABLE_RETURN_TAG** action. :param List row: the data associated with the selected row :param Dict action_dict: the dictionary of values associated with the action - ignored in this function :return: The tag for the selected row of the table. """ return row.tag
0dea8c52e45041f7741717453887b75009c10367
296,136
def get_charge(lines): """ Searches through file and finds the charge of the molecule. """ for line in lines: if 'ICHRGE' in line: return line.split()[2] return None
913ec1403cdf1dba5ad4b549b61cec23d3546dc7
654,993
def make_array(dims): """ Creates an array (dictionary with tuple coordinates as keys) of the given dimensions (the 3 dims together indicate how many values are to be inserted into outarr) initialized with None (value=None). For example, if dims given = (1 2 1), returned arr = {(0,0,0): None, (0,1,0): None}. If dims = (1,2,2), 4 values are inserted. """ arr = {} dim1, dim2, dim3 = dims for i in range(dim1): for j in range(dim2): for k in range(dim3): arr[i, j, k] = None return arr
aa3b0a482ad804a77a1d263b32b9f47d600f5e0f
504,851
def duplicates(list, item): """Returns index locations of item in list""" return [i for i, x in enumerate(list) if x == item]
b6aa59d4bc7cc869544b18a5322c05cdaa1f4d49
157,520
def populate_frames(num_frames, animation_data): """ Takes a dictionary of frame_id: data pairs and produces a list of length num_frames with data inserted at indices specified by frame_id, and None everywhere else. """ frame_data = {} for bone_id, keyframes in animation_data.items(): frame_data[bone_id] = [None] * num_frames for frame_id, frame_value in keyframes.items(): frame_data[bone_id][frame_id] = frame_value return frame_data
cc5e20cc04d6e5b2798497ee9d3053be62ec9d7e
487,910
def IsInstalled(vm): """Checks whether docker is installed on the VM.""" resp, _ = vm.RemoteCommand('command -v docker', ignore_failure=True, suppress_warning=True) return bool(resp.rstrip())
7d020d23b6e4ec242169e8ef0602f7ff601cef34
487,531
def van_der_corput(n_sample, base=2, start_index=0): """Van der Corput sequence. Pseudo-random number generator based on a b-adic expansion. Parameters ---------- n_sample : int Number of element of the sequence. base : int Base of the sequence. start_index : int Index to start the sequence from. Returns ------- sequence : list (n_samples,) Sequence of Van der Corput. """ sequence = [] for i in range(start_index, start_index + n_sample): n_th_number, denom = 0., 1. quotient = i while quotient > 0: quotient, remainder = divmod(quotient, base) denom *= base n_th_number += remainder / denom sequence.append(n_th_number) return sequence
fd29d56dea836df616fc42b894a25961ab42ebb1
230,921
def downright(i,j,table): """Return the product to down-right-diagonal""" product = 1 for num in range(4): if i+num>19 or j+num>9: product*=1 else: product *= int(table[i+num][j+num]) return product
efa618d32892b3fb68217fd41907f5d874172ecf
59,833
import random def create_networkcopy_with_missing_nodes(graph, list_missing_percentages): """ Function to generate copies of a graph with varying proportion p of missing nodes given by a list P. The function returns a list of graphs, where each graph is a network copy with p% of the nodes missing. graph = true network list_missing_percentages = list that contains different values for p """ lst = [] for p in list_missing_percentages: # Generate copy of the graph, randomly remove p% of the nodes from the set and append to list G = graph.copy() G.remove_nodes_from(random.sample(list(G.nodes()), int(len(graph.nodes()) * p))) lst.append((G, p)) return lst
4830bcf73e5b8bbc6fec2b59d093435f86c3884b
484,390
def cut_lines_to_n(string, n = 200): """ Takes a string and breaks it into lines with <= n characters per line. Useful because psql queries have to have < 212 chars per line. """ out = [] lines = string.split("\n") for line in lines: newline = "" words = line.split(" ") for word in words: if len(newline + word) + 1 > n: out.append(newline.strip()) newline = word else: newline += " " + word out.append(newline) return "\n".join(out)
2ecad131d994838fae0cc8db5381541736e4dbd3
212,381
import re def wktfmt(wkt: str) -> str: """ Round numbers in WKT str to 4 decimal places of accuracy. """ return re.sub(r"([+-]*\d*\.\d\d\d\d)(\d*)", r"\1", wkt)
3e4fe65a5dd8f972fbb5bc8f13c0a0475f65fa85
260,282
def limit_results(numResults, results): """ Limits results to numResults """ if numResults < results.count(): return list(results[0:numResults]) else: return list(results)
11773e17aa9e4eefdf675f1b20f7310bdb09e76d
226,363
def mean_pool(data): """Simple mean pool function for transforming 3D features of shape [T]imesteps x [B]atch_size x [F]eature_size into 2D BxF features. (author: @klmulligan) Arguments: data (tuple): Encoder result of form (data: Tensor(TxBxF), mask: Tensor(TxB)) Returns: pooled_data (Tensor): Mean pooled data of shape BxF. """ # Unpack x, mask = data if mask is not None: return x.sum(0) / mask.sum(0).unsqueeze(1) else: return x.mean(0)
8eb0904d549be7345fecdeace2c7c4169bcc78c2
178,213
def create_compile_command(file_name): """ Creates the bash command for compiling a JUnit test. Params: file_name (str): The file name of the test to compile. Return: str: The bash command for compiling. """ return f"javac -d classes -cp classes/:junit-jupiter-api-5.7.0.jar:apiguardian-api-1.1.1.jar {file_name}"
eda1d873d5a35e6f294070a294cc428d083ec200
330,212
import time def thread_worker_example(item): """Example worker function for ThreadPoolExecutor.""" print(item) time.sleep(1) return item
acc23102bfe05199f3b073cc28006389b1d6343d
284,570
def readSEEDTree(treeFile): """ Return nested dictionary where first dict is map from levels (1,2,3) and next dict is map from role to name. This is a simply formatted file with 4 columns: "role\tsubsystem\tlevel 2\t level 1" """ seedTree = {'1': {}, '2': {}, '3': {}} with open(treeFile) as f: for line in f: (role, l3, l2, l1) = line.rstrip().split('\t') seedTree['1'][role] = l1 seedTree['3'][role] = l3 seedTree['2'][role] = l2 return seedTree
5ae053078d990d4b9eb92bd5a5fc35d64f771b9f
460,468
import re from typing import Tuple async def match_splitter(match: re.Match) -> Tuple[str, str, str, str]: """Splits an :obj:`re.Match` to get the required attributes for substitution. Unescapes the slashes as well because this is Python. Args: match (:obj:`Match<re.match>`): Match object to split. Returns: (``str``, ``str``, ``str``, ``str``): A tuple of strings containing line, regexp, replacement and flags respectively. """ li = match.group(1) fr = match.group(3) to = match.group(4) if match.group(4) else '' to = re.sub(r'\\/', '/', to) to = re.sub(r'(?<!\\)\\0', r'\g<0>', to) fl = match.group(5) if match.group(5) else '' return li, fr, to, fl
c5e72c9b34364f8e5dbd34c8e3129b442b6e6514
358,227
from typing import Counter def countKmers(seq): """Returns frequencies of kmers""" counts = Counter(seq) return counts
5569c8c8a7b2104734964889a46dc4b30ba7501c
176,495
import re def WinEventSearch(regexp, event_string, remove=True): """Searches inside an event string for a matching regular expression. Note: it assumes the regexp has one group search. Args: regexp: the regular expression string to be used. event_string: the search target string. remove: flag, if True causes the matching substring to be removed Returns: The matched group and the source string, stripped if remove is True. """ match_group1 = u'' re_match = re.search(regexp, event_string) if re_match: match_group1 = re_match.group(1) if remove: event_string = event_string.replace(re_match.group(0), u'') return match_group1, event_string
51a154b544be50a0f83722e7453e0e386870b9b6
270,586
def eq(max_x, x): """Returns equally separated decreasing float values from 1 to 0 depending on the maximu value of x Parameters ---------- max_x : int maximum x value (maximum number of steps) x : int current x value (step) Returns ------- float y value Example ------- >>> eq(3, 0) 1.0 >>> eq(3, 1) 0.6666666666666667 >>> eq(3, 3) 0.0 """ return ((-1/max_x) * x) + 1
c0076295dadf280db472f32d664eeca3a49a1780
99,024
import logging def read_n_bytes(s, n): """Reads n bytes from socket s. Returns the bytearray of the data read.""" bytes_read = 0 _buffer = [] while bytes_read < n: data = s.recv(n - bytes_read) if data == b'': break bytes_read += len(data) _buffer.append(data) result = b''.join(_buffer) if len(result) != n: logging.warning("expected {} bytes but read {}".format(n, len(result))) return b''.join(_buffer)
c0957afbd7595eb2e841fe0690e1f5b67d9a7945
395,151
import torch def hot_to_indices(hot): """Convert an element of one-hot encoding to indices """ hot = torch.tensor(hot) _,max_index=hot.max(1) return max_index.data.cpu().numpy().tolist()
71a911d1ecb809c439bf42e8e8503e968f3d5cf8
575,605
import base64 def base64_string_decode(data): """ Decodes a base64 encoded string into a string :param data: str: string to decode :return: str """ return base64.b64decode(data).decode('utf-8')
03194e11c13e740064bc42142442df59dc1d4bc7
197,498
def pot_for_column(cls, column, summary=False): """Translatable texts get categorized into different POT files to help translators prioritize. The pots are: - flavor: Flavor texts: here, strings from multiple versions are summarized - ripped: Strings ripped from the games; translators for "official" languages don't need to bother with these - effects: Fanon descriptions of things; they usually use technical language - misc: Everything else; usually small texts Set source to true if this is a flavor summary column. Others are determined by the column itself. """ if summary: return 'flavor' elif column.info.get('ripped'): return 'ripped' elif column.name.endswith('effect'): return 'effects' else: return 'misc'
4c6ee265b4330a1b020a3a2902d4ffa7a3932392
270,399
def make_adder(n): """Return a function that takes an argument K and returns N + K. >>> add_three = make_adder(3) >>> add_three(1) + add_three(2) 9 >>> make_adder(1)(2) 3 """ return lambda x: x + n
e14170199d4503ebe5f91eca6c8fe3bc19637089
506,435
def auto_cmap(labels): """ Find an appropriate color map based on provide labels. """ assert len(labels) <= 20, "Too many labels to support" cmap = "Category10_10" if len(labels) <= 10 else "Category20_20" return cmap
99bdf74197b17d5908237e6a45b0882330c96024
39,639
def parse_str_to_types(string): """ Converts string to different object types they represent. Supported formats: True,Flase,None,int,float,list,tuple""" if string == 'True': return True elif string == 'False': return False elif string == 'None': return None elif string.lstrip('-+ ').isdigit(): return int(string) elif string == '': return '' elif (string[0] in '[(') and (string[-1] in ')]'): # Recursively parse a list/tuple into a list if len(string.strip('()[]')) == 0: return [] else: return [parse_str_to_types(s) for s in string.strip('()[]').split(',')] else: try: return float(string) except ValueError: return string
7b9287d6febf15d1111ddd7f2a217978c646ac3f
261,114
def mk_str(mk): """Replace class path for backwards compatibility of matches keys.""" return str(mk).replace('indra.statements.statements', 'indra.statements')
c6bf41071e3533489c763a178331a913c115e1e8
426,881
def calc_an_sparsity(L, q, K): """Calculates sparsity of Adjacent Neighborhood scheme""" return 1 + L*(q-1)*q**(K-1)
0fc6f63a1f417c46a26721fcf82e9dc0c827cc6c
646,391
def spline_grid_from_range(spline_size, spline_range, round_to=1e-6): """ Compute spline grid spacing from desired one-sided range and the number of activation coefficients. Args: spline_size (odd int): number of spline coefficients spline_range (float): one-side range of spline expansion. round_to (float): round grid to this value """ if int(spline_size) % 2 == 0: raise TypeError('size should be an odd number.') if float(spline_range) <= 0: raise TypeError('spline_range needs to be a positive float...') spline_grid = ((float(spline_range) / (int(spline_size) // 2)) // round_to) * round_to return spline_grid
0e5a100e644786a33ba92172d2b0ccaf6ef4971c
592,789
from typing import Dict from typing import Any def strip_empty_params(params: Dict[str, Any]) -> Dict[str, Any]: """Remove any request parameters with empty or ``None`` values.""" return {k: v for k, v in params.items() if v or v is False}
aa8e320b93524ef13d25d522464ab22b48226e79
83,623
import torch def torch_equals_ignore_index(tensor, tensor_other, ignore_index=None): """ Compute ``torch.equal`` with the optional mask parameter. Args: ignore_index (int, optional): Specifies a ``tensor`` index that is ignored. Returns: (bool) Returns ``True`` if target and prediction are equal. """ if ignore_index is not None: assert tensor.size() == tensor_other.size() mask_arr = tensor.ne(ignore_index) tensor = tensor.masked_select(mask_arr) tensor_other = tensor_other.masked_select(mask_arr) return torch.equal(tensor, tensor_other)
1da45ac80a373c55b453fcc6327af0b5287640b2
127,844
def decode_line(s,l,func = float): """ split a string and convert each token with <func> and append the result to list <l> """ x = s.split() for v in x: l.append(func(v)) return(l)
d4b602b4c44d60916c56a1ca1c04b102e4b186fb
239,039
from typing import List def simulate_day(school: List[int]) -> List[int]: """Simulates a school of fish for one day and returns the school.""" return [*school[1:7], school[0] + school[7], school[8], school[0]]
126f430920474d54651bf7a19a18006f66792ff1
643,171
from typing import Callable def finite_difference_derivative(func: Callable, x: float, h: float=1e-6) -> float: """Estimate the derivative of a function at a given value using the finite difference method. Parameters ---------- func : Callable The function for which the derivative will be estimated. x : float The value where func's derivative will be estimated. h : float The step size for the finite difference. Returns ------- estimated derivative : float """ return (func(x + h) - func(x - h)) / (2 * h)
cf9674140be80342f63624297fcc1d3d4b9412e6
417,823
from typing import Dict from typing import Any from typing import Optional def b(field: str, kwargs: Dict[str, Any], present: Optional[Any] = None, missing: Any = '') -> str: """ Return `present` value (default to `field`) if `field` in `kwargs` and Truthy, otherwise return `missing` value """ if kwargs.get(field): return field if present is None else str(present) return str(missing)
1733631d6a8bc7790f7ec4cb63a4648ee34a4dd4
431,383
def hex2rgb(hex_: str) -> tuple: """ hex2rgb from https://stackoverflow.com/a/29643643/8608146 """ hex_ = hex_.lstrip('#') return tuple(int(hex_[i:i + 2], 16) for i in (0, 2, 4))
47e7591c5ec2f21de7edfb39aa91fbc9a6b94bec
199,159
def dotted_name(cls): """Dotted name for a class. Example: ``my.module.MyClass``. :param cls: the class to generate a dotted name for. :return: a dotted name to the class. """ return f"{cls.__module__}.{cls.__name__}"
2d37d2b8e3f42946923ddd1f621b2aeaa289c266
557,721
def parallel_mean(mean_a, count_a, mean_b, count_b): """Compute the mean based on stats from two partitions of the data. See "Parallel Algorithm" in https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance Args: mean_a: the mean of partition a count_a: the number of elements in partition a mean_b: the mean of partition b count_b: the number of elements in partition b Return: the mean of the two partitions if they were combined """ mean = (count_a * mean_a + count_b * mean_b) / (count_a + count_b) return mean
7f09e422431cb34b3cbdf72baf1ba4a69ecdd050
507,327
import pickle def load_object(filename): """ Load a python object stored in a file using pickle :param filename: File from which to load the object :return: """ with open(filename, 'rb') as input_file: # Overwrites any existing file. obj = pickle.load(input_file) return obj
e9b8c147ede279d7aa61f0d584b5dfa82728c8de
206,511
def fft_suitable(N: int) -> bool: """Check whether `N` has only small prime factors. Return True if the prime factorization of `N` is suitable for efficient FFTs, that is contains only 2, 3, 5 and 7.""" for p in [2, 3, 5, 7]: while N % p == 0: N //= p # All suitable prime factors taken out # --- a suitable N should be left with just 1 return N == 1
aa6d1fd3b51a3fafb9e05f0203d0649e4a2080e8
189,376
import random def SplitGaps(zs): """Splits zs into xs and ys. zs: sequence of gaps Returns: tuple of sequences (xs, ys) """ xs = [random.uniform(0, z) for z in zs] ys = [z-x for z, x in zip(zs, xs)] return xs, ys
087e95faa10c8b519aed6c018a215fa5620d8382
114,764
def millis_offset_between_epochs(reference_epoch, target_epoch): """ Calculates the signed milliseconds delta between the reference unix epoch and the provided target unix epoch :param reference_epoch: the unix epoch that the millis offset has to be calculated with respect to :type reference_epoch: int :param target_epoch: the unix epoch for which the millis offset has to be calculated :type target_epoch: int :return: int """ assert isinstance(reference_epoch, int) assert isinstance(target_epoch, int) return (target_epoch - reference_epoch)*1000
3cb20d46ccf860dc21a2327d8520711c1720ef83
145,496
import re def count_words_in_markdown(markdown: str) -> int: """ Count the words in a block of Markdown text. Strips off the markup before doing the word count. From https://github.com/gandreadis/markdown-word-count/blob/master/mwc.py and simplified just a bit. """ text = markdown text = re.sub(r'<!--(.*?)-->', '', text, flags=re.MULTILINE) text = text.replace('\t', ' ') text = re.sub(r'[ ]{2,}', ' ', text) text = re.sub(r'^\[[^]]*\][^(].*', '', text, flags=re.MULTILINE) text = re.sub(r'^( {4,}[^-*]).*', '', text, flags=re.MULTILINE) text = re.sub(r'{#.*}', '', text) text = text.replace('\n', ' ') text = re.sub(r'!\[[^\]]*\]\([^)]*\)', '', text) text = re.sub(r'</?[^>]*>', '', text) text = re.sub(r'[#*`~\-–^=<>+|/:]', '', text) text = re.sub(r'\[[0-9]*\]', '', text) text = re.sub(r'[0-9#]*\.', '', text) return len(text.split())
f15d84c93975607b7c9ceb66946b688901ec6f6b
297,947
def parse_ref(fxy) -> tuple: """Returns a tuple of the FXXYYYY string parsed out into integers.""" f = int(fxy[0]) x = int(fxy[1:3]) y = int(fxy[3:6]) return f, x, y
2b191e8205cf7943dd6c962a7f172457f78208d5
164,195
def get_item_number(soup): """ Returns the product's unique item_number """ item_number = soup.find('div', attrs={'id': 'descItemNumber'}) if not item_number: return "N/A" item_number = item_number.get_text() return item_number
5460610ea099c61574ee1988695e116c5b4a3b51
483,489
def has_resource(cobj, resource_label): """ Check to see if a CachedImageObject has a specified resource :param cobj: CachedImageObject object from XnatUtils :param resource_label: label of the resource to check :return: True if cobj has the resource and there is at least one file, False if not. """ has_it = False res_list = [r for r in cobj.get_resources() if r['label'] == resource_label] if len(res_list) > 0: # We have resources, so grab the first one res = res_list[0] # If the resource is empty, the file_count will be blank, not 0 if res['file_count'] != '': # Convert the file count to an integer file_count = int(res_list[0]['file_count']) if file_count > 0: # at least one file, so yep has_it = True return has_it
016a26e6660208bb92daebafb8c1e54f74dc5fc5
463,605
def check(move, occupied, width, height, n=1): """ _Purpose: takes a move coord and returns a score based on how many free spaces are around the move of distance n _Parameters: move (tuple): xy coord of the move occupied (list): xy coords all occupied spaces on board width (int): width of board height (int): height of board n (int): how many spaces ahead to look (default 1) _Returns: score (int): score of move """ if (n==0): # base case return 0 up = (move[0], move[1]-1) down = (move[0], move[1]+1) left = (move[0]-1, move[1]) right = (move[0]+1, move[1]) score = 0 occupied.append(move) if not (( left in occupied) or (left[0] < 0)): #left score += 1 + check(left, occupied, width, height, n-1) if not (( right in occupied) or (right[0] > width-1)): # right score += 1 + check(right, occupied, width, height, n-1) if not (( up in occupied) or (up[1] < 0)): # up score += 1 + check(up, occupied, width, height, n-1) if not (( down in occupied) or (down[1] > height-1)): #down score += 1 + check(down, occupied, width, height, n-1) return score
1d2034e28a940fae4d67495d30f83046f363c8f5
579,838
from typing import List from pathlib import Path def remove_external_imports(imports: List[str], root: str) -> List[str]: """ removes imports from external libraries. this is achieved by checking if the given import name contains the stem of the root directory (main directory of repo to analyze). :param imports: list of imports as strings. :param root: main directory to look for imports :return: list of imports (as strings) without external libraries. """ return [match for match in imports if Path(root).stem in match]
683ce8591966daa15bf838bc33c9b64c11c0276c
506,843
def find_change(plan, category, unit): """ finds the change in the given category and unit in the given plan :return: Change object """ for change in plan.area_changes: if change.category == category and change.unit == unit: return change return None
b1adbbb34e2f1f41578d47e16eebd51c4980b14b
577,294
def msvcrt_rand(s=0, size=1): """ Emulate interplay of srand() and rand() :param int s: The seed. :param int size: Desired length of returned data. :return: A sequence of bytes computed using the supplied arguments. :rtype: bytearray """ result = bytearray() for i in range(0, size): s = (214013*s + 2531011) & 0x7fffffff result.append(s >> 16 & 0xff) return result
0de2527a86aefd6dfb5a40e07e294844cf2d111f
622,175
def CalculateRollingMax(raw_scores, total_samples, window_samples): """Calculates a rolling maximum across the array, in windows of the same size. The scores returned from open-cv matchTemplate are calculated as if each value in the array matches with the first position in the window. Here, for each position in the original array, scores across the window are aggregated, to take into account the match with the first, second, third, ... position in the window. The aggegation method is max(), it performed better than avg(). Args: raw_scores: array of scores calculated with open-cv matchTemplate function. It should have length total_samples - window_samples + 1. total_samples: total number of samples of the original data. window_samples: number of samples in the window. Returns: Array of scores calculated aggregating with the maximum. """ scores = [] left = 0 current_max = 0 for right in range(total_samples): if right < len(raw_scores): if raw_scores[right] > current_max: current_max = raw_scores[right] current_size = right - left + 1 if current_size > window_samples: pop_value = raw_scores[left] left += 1 if pop_value >= current_max: current_max = 0 for c in range(left, min(right + 1, len(raw_scores))): if raw_scores[c] > current_max: current_max = raw_scores[c] score = current_max scores.append(score) return scores
e5fc81a57d6b51c983c798a51d552af76e30a8fb
61,613
from typing import List from typing import Any def flatten(deep_list: List[Any]) -> List[Any]: """ Recursively flatten the list into 1D list containing all nested elements """ if len(deep_list) == 0: return deep_list if isinstance(deep_list[0], list): return flatten(deep_list[0]) + flatten(deep_list[1:]) return deep_list[:1] + flatten(deep_list[1:])
bdcfd66f360468cf9a683e7078aa28be0e5a39ca
210,538
def license(_): """Return the contents of the LICENSE.txt file.""" with open('LICENSE.txt') as flicense: return flicense.read()
50e9572537dee41a36b0859cee5596f136d14fbb
625,191
def _find_valid_path(options): """Find valid path from *options*, which is a list of 2-tuple of (name, path). Return first pair where *path* is not None. If no valid path is found, return ('<unknown>', None) """ for by, data in options: if data is not None: return by, data else: return '<unknown>', None
2f156fd1d592fb3a44c5280a53180b4066fe7d18
68,875
def getRankAttribute(attribute, reverse = False): """ Takes as input an attribute (node or edge) and returns an attribute where each node is assigned its rank among all others according to the attribute values. The node/edge with lowest input value is assigned 0, the one with second-lowest value 1, and so on. Keyword arguments: attribute -- the input node/edge attribute reverse -- reverses the ranking, if set to True """ #Example input: [0.1, 0.05, 0.9, 0.2], ascending #Example output: [1, 0, 3, 2] _attribute = zip([x for x in range(0, len(attribute))], attribute) _attribute = sorted(_attribute, key=lambda x: x[1], reverse=reverse) _index = 0 result = [0] * len(attribute) for (i, v) in _attribute: result[i] = _index _index = _index + 1 return result
b11f79dea4cb0497a9505571f299b1e73bc47429
258,600
def obj(request): """Default values for the statdyn analysis command line.""" return { "keyframe_interval": 1_000_000, "keyframe_max": 500, "wave_number": request.param, }
10c05bd99c0aa687bf6c279b2ed31c94dfc49542
260,015
from typing import Union from typing import Tuple from typing import List def increase_version_number(version_buffer: Union[Tuple[int, int, int], List[int]], semantic_version: str = "patch") \ -> List[int]: """ Increases the number of the version based on the 'release_type' value in the release_type.yaml Args: version_buffer: (Union[Tuple[int, int, int], List[int]]) the version to be increased semantic_version: (str) the semantic version/release type e.g. patch, minor, major, defaults to patch if not recognised Returns: (List[int]) the updated version """ first: int = version_buffer[0] second: int = version_buffer[1] third: int = version_buffer[2] if semantic_version == "patch": third += 1 elif semantic_version == "minor": second += 1 third = 0 elif semantic_version == "major": first += 1 second = 0 third = 0 else: third += 1 return [first, second, third]
98171df679ad907145b82f393f19f739cd6f0782
359,710
def stringify(item): """ Returns a quoted string item if passed argument is a string, else returns a string representation of that argument. If passed argument is None, returns None. Parameters ---------- item : Any type Item to be parsed. Returns ------- str or None """ if item is None: # THIS IS ACTUALLY NOT NEEDED? HMMMMM return 'NULL' elif isinstance(item, str): item = item.replace("'","''") return f"'{item}'" else: return str(item)
6ab8bd9cca99b50c8de84b790aa0a7a3b70a9ecc
140,466
def _create_weather_key(lat, lng): """ Creates a properly formatted key for storing in a JSON database. Args: lat (string or float) -- latitude component of coordinate lng (string or float) -- longitude component of coordinate Returns: string -- key value """ tmp = "%s,%s" % (lat, lng) return tmp.replace(".", "")
0e75358d4319d3e1919bda0a46d82a84bcf6a3da
374,996
from typing import Callable def get_values_by_keys(k: list, default=None)->Callable[[dict], list]: """ Filter dictionary by list of keys. Parameters ---------- k: list default: any, optional Set as default value for key not in dict. Default value is None """ return lambda d: list(map(lambda key: d.get(key, default), k))
2306493ef30753cac90d16529a4f128c45877fba
650,643
def _ensure_width(inp: str, width: int): """ Ensure that string `inp` is exactly `width` characters long. """ return inp[:width].ljust(width)
ed8ef2e184d2ea00027ed2648adc33927580e694
606,920
import re def read_csv(filename): """Reads data from a space or comma delimited file. :param filename: path of the file :type filename: str :return: data from file :rtype: List[List[str]] """ data = [] regex = re.compile(r'(\s+|(\s*,\s*))') with open(filename, encoding='utf-8-sig') as csv_file: for line in csv_file: line = regex.sub(' ', line) row = line.split() if not row: continue data.append(row) if not data: raise ValueError('The file is empty') return data
9835e3887e8e3599e94ebe5e633a329fcec03d35
114,986
import math import torch def cp_init(weights, factors, std=0.02): """Initializes directly the weights and factors of a CP decomposition so the reconstruction has the specified std and 0 mean Parameters ---------- weights : 1D tensor factors : list of 2D factors of size (dim_i, rank) std : float, default is 0.02 the desired standard deviation of the full (reconstructed) tensor Notes ----- We assume the given (weights, factors) form a correct CP decomposition, no checks are done here. """ rank = factors[0].shape[1] # We assume we are given a valid CP order = len(factors) std_factors = (std/math.sqrt(rank))**(1/order) with torch.no_grad(): weights.fill_(1) for i in range(len(factors)): factors[i].normal_(0, std_factors) return weights, factors
387608d192911a8d89d95f7bf53b79cf5b2a0773
348,732
def cmmdc(x, y): """Computes CMMDC for two numbers.""" if y == 0: return x return cmmdc(y, x % y)
f1d731ca0e1942e33b4fdf2d792e16c8ccddd13c
58,891
import pwd def get_uid(name): """Returns an uid, given a user name.""" try: result = pwd.getpwnam(name) except KeyError: result = None if result is not None: return result[2] return None
cdbbe8ee3cd49a1f19762aad981cafb07a088bf1
436,954
def min_delta(delta): """ Minimum delta criteria Parameters ---------- delta : float The minimum height of a leaf above its merger level """ def result(structure, index=None, value=None): if value is None: if structure.parent is not None: return (structure.height - structure.parent.height) >= delta return (structure.vmax - structure.vmin) >= delta return (structure.vmax - value) >= delta return result
05bd53883234e1a7df5120b80c670c928697000b
194,882
def _joinregexes(regexps): """gather multiple regular expressions into a single one""" return b'|'.join(regexps)
de3b51de059659b6870e2fadfcdf8e4787bc2e20
307,460
from typing import List import traceback def catch_errors_as_message(function): """Catches errors as a list of messages Parameters ---------- function : Coroutine[Any, Any, List[str]] Function to wrap. In case of an error the message is returned. """ async def wrapper(*args, **kwargs) -> List[str]: try: return await function(*args, **kwargs) except Exception: trace = traceback.format_exc() return [trace] return wrapper
d187e2a060d0a62697c91149640d6480bd5a89a9
174,867
def Sub(a1, a2, ctx=None): """Subtract two numbers""" return a1 - a2
e563c81eb0f8b01f5f93b49b259954df6efd486f
582,540
def _cut_if_too_long(text: str, max_length: int) -> str: """Cut a string down to the maximum length. Args: text: Text to check. max_length: The maximum length of the resulting string. Returns: Cut string with ... added at the end if it was too long. """ if len(text) <= max_length: return text return text[: max_length - 3] + "..."
72745efb8a4fd7d6b2af7356d00e1e5bc554ff62
679,860
import operator def genderdecode(genderTag): """ one-hot decoding for the gender tag predicted by the classfier Dimension = 2. """ index, value = max(enumerate(genderTag), key=operator.itemgetter(1)) if index == 0: return 'm' if index == 1: return 'f' if index == 2: return 'any'
ba1ed2ea3e8504d601692ff764d871d4508b1a36
292,923
import math def _radians_to_angle(rad): """ Convert radians into angle :param rad: :return: angle """ return rad * 180 / math.pi
f88aa0a2da9f0dd31a0576afcd1ffd80ba9eafc8
63,320
import gzip def load_twitter_dict(path): """ Loading archived cPickled dict Note: this function here is for the reference Args: path - str: path to cPickle Returns: tweet_list - list: list of json files """ with gzip.open(path, 'rb') as f: tweet_list = f.readlines() return tweet_list
7e2ffa9de54af8aa370f2f0e8b86b66689214a1c
561,111
import base64 def _b64(text): """Encodes text as base64 as specified in ACME RFC.""" return base64.urlsafe_b64encode(text).decode("utf8").rstrip("=")
6385bdb0d3129157409f4e0a9717778728d7f63f
236,086
from datetime import datetime def get_datetime(date, time, *, microseconds=True): """ Combine date and time from dicom to isoformat. Parameters ---------- date : str Date in YYYYMMDD format. time : str Time in either HHMMSS.ffffff format or HHMMSS format. microseconds: bool, optional Either to include microseconds in the output Returns ------- datetime_str : str Combined date and time in ISO format, with microseconds as if fraction was provided in 'time', and 'microseconds' was True. """ if '.' not in time: # add dummy microseconds if not available for strptime to parse time += '.000000' td = time + ':' + date datetime_str = datetime.strptime(td, '%H%M%S.%f:%Y%m%d').isoformat() if not microseconds: datetime_str = datetime_str.split('.', 1)[0] return datetime_str
641f91292da33de6d516bfa06749c98bee1dc028
645,437
import json def load_dictionary_from_file(file_path): """Load a dictionary from a JSON file. Parameters ---------- file_path : string The JSON file path to load the dictionary from. Returns ------- dictionary : dict The dictionary loaded from a JSON file. """ with open(file_path, 'r') as f: return json.load(f)
f273b13e43f0df1b3cab4c0f8097bec401b83c23
602,074