content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def gather(x, indices): """ Return the sequence [x[i] for i in indices]. >>> gather([8, 16, 32, 64, 128], [3, 0, 2]) [64, 8, 32] >>> gather([8, 16, 32, 64, 128], []) [] """ return [x[i] for i in indices]
821a28ddae859fb60e0228eef16622691a83dbd0
198,336
from typing import Sequence from typing import Union from typing import List from typing import Tuple def scale_with_abs_strides(seq: Sequence[float], strides: Sequence[Union[Sequence[Union[int, float]], Union[int, float]]], dim_idx: int, ) -> List[Tuple[float]]: """ Scale values with absolute stride between feature maps Args: seq: sequence to scale strides: strides to scale with. dim_idx: dimension index for stride """ scaled = [] for stride in strides: if not isinstance(stride, (float, int)): _stride = stride[dim_idx] else: _stride = stride _scaled = [i * _stride for i in seq] scaled.append(tuple(_scaled)) return scaled
a191ef66508e81681cc90c98f60d4e7f2a12445f
512,425
from typing import List def load_words(file: str) -> List[str]: """ Loads words from input file where there is a word in every line. :param file: path to file :return: List of words """ with open(file) as f: words: List[str] = f.readlines() return words
ced5398f3232f1a55d8c44d2f362fbb06366df3b
570,585
import torch def AbsDepthError_metrics(depth_est: torch.Tensor, depth_gt: torch.Tensor, mask: torch.Tensor) -> torch.Tensor: """Calculate average absolute depth error Args: depth_est: estimated depth map depth_gt: ground truth depth map mask: mask """ depth_est, depth_gt = depth_est[mask], depth_gt[mask] return torch.mean((depth_est - depth_gt).abs())
9a65c5c089ad1c43035ac01c1ca91885328c22a7
296,442
import random def SimpleRandomSample(sample_set, sample_size): """ A simple random sample is a sample in which every member of the population has an equal chance of being chosen. :param sample_set: set of values to select from :param sample_size: number of values to select :return: result sample """ return random.sample(sample_set, sample_size)
946da14fbeec5fb79ab005e9a2ecbd0f1fbbb89b
325,878
def clean_xml_attribute(element, attribute, default=None): """ Get an XML attribute value and ensure it is legal in XML :param element: :param attribute: :param default: :return: """ value = element.attrib.get(attribute, default) if value: value = value.encode("utf-8", errors="replace").decode("utf-8", errors="backslashreplace") value = value.replace(u"\ufffd", "?") # strip out the unicode replacement char return value
c236a33b124b697cad35a17977b711b2f799737c
311,984
def bisection_search(arr, x, lo=0, hi=None): """ Assmues arr is sorted in *ascending* order. Returns if x in arr, x else: the index where x should go if we insert by moving everything after idx to the right This is the same as bisect.bisect_left(arr, x) """ if not hi: hi = len(arr) while lo < hi: mid = (lo + hi) // 2 #// is integer division if x < arr[mid]: hi = mid else: lo = mid + 1 return lo
6fe894920f24734b69fad4186f9d89c817a1ab88
482,905
from pathlib import Path import secrets def _get_download_token() -> str: """Return a download token from ~/.deeppavlov/token file. If token file does not exists, creates the file and writes to it a random URL-safe text string containing 32 random bytes. Returns: 32 byte URL-safe text string from ~/.deeppavlov/token. """ token_file = Path.home() / '.deeppavlov' / 'token' if not token_file.exists(): if token_file.parent.is_file(): token_file.parent.unlink() token_file.parent.mkdir(parents=True, exist_ok=True) token_file.write_text(secrets.token_urlsafe(32), encoding='utf8') return token_file.read_text(encoding='utf8').strip()
357edba8297b5aae8ee9f176911fdd55455320f0
553,361
import socket import struct def int_to_ip_address(i): """ Convert an integer to an IP address :param i: The number to convert :return: The IP address in octet notation """ return socket.inet_ntoa(struct.pack('!L', i))
6647cadbc2c17d093abf6be68975947a37417b16
374,293
def get_module_names(thread): """ Returns a sequence of module names from the stack frames of this thread. """ def GetModuleName(i): return thread.GetFrameAtIndex( i).GetModule().GetFileSpec().GetFilename() return list(map(GetModuleName, list(range(thread.GetNumFrames()))))
101470cb441acf445c1d084d13698564d7d858a3
582,159
import random def probability(p): """Return true with probability p.""" return p > random.uniform(0.0, 1.0)
010f68e2aed25e24bdf9d24f737c1bd5d1da3da0
461,220
import torch def split(value, num_or_size_splits, axis=0): """ Splits a tensor into sub tensors. Parameters ---------- value : tensor The Tensor to split. num_or_size_splits : list Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or Python list containing the sizes of each output tensor along split_dim. axis : int The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0. num : int used to specify the number of outputs when it cannot be inferred from the shape of size_splits. Returns ------- Tensor objects resulting from splitting value. """ return torch.split(value, num_or_size_splits, dim=axis)
b997285da46db1e20ca92916f46ebc51b8840786
31,366
def get_len_of_range(start, stop, step): """Get the length of a (start, stop, step) range.""" if start < stop: return ((stop - start - 1) // step + 1) return 0
7335a322a2c4d7d56e5e0cec2264f57235c0c6a9
156,035
def flatten_path(path): """Returns the file name in the path without the directory before""" return path.split("/")[-1]
05863693d1bc0dc46671de55d7d9d0842d282fae
173,011
from pathlib import Path import tempfile def tmp_file(**kwargs) -> Path: """ Creates NamedTemporaryFile and returns associated pathlib.Path :param kwargs: NamedTemporaryFile arguments :return: pathlib.Path(temp_file) """ kwargs['delete'] = kwargs.get('delete', False) return Path(tempfile.NamedTemporaryFile(**kwargs).name)
9be1934f68e6c5510e7e1b0591149ec3e83d5cb5
460,581
import re def find_values_in_quote(question): """ We try find all values in quotas, as this are almost always values we need later. We support a variety of different quota utf-8 characters. """ matches = re.findall(r"\s[\"'‘“’](.+?)[\"'’”]", question) return [m for m in matches]
80052f0b8f85cd432d73146f3a9a680b87abc8e7
390,591
from typing import Callable from typing import Any from typing import Sequence def foldl(fun: Callable[[Any, Any], Any], acc: Any, seq: Sequence[Any]) -> Any: """Implementation of foldl in Python3. This is an implementation of the left-handed fold function from functional programming. If the list is empty, we return the accumulator value. Otherwise, we recurse with a new accumulator value which is a result of the function application on the old accumulator value and the head of the list. Below are the implementations of the len and sum functions using foldl to demonstrate how foldl function works. >>> foldl((lambda x, _: x + 1), 0, [0, 1, 2, 3, 4]) 5 >>> foldl((lambda x, y: x + y), 0, [0, 1, 2, 3, 4]) 10 foldl takes the second argument and the first item of the list and applies the function to them, then feeds the function with this result and the second argument and so on. """ return acc if not seq else foldl(fun, fun(acc, seq[0]), seq[1:])
b21d0dd4ee203f913987809eb438fad668260ef1
420,629
def calculate_pos_deviation_square(error, sl): """ Calculate the square deviation between a given error value and a significance level if the deviation is positive (>0) Parameters ---------- error : error sl : significance level Returns ------- square deviation or 0 """ if error > sl: return (error-sl)**2 else: return 0
cfa327207202e23db855daa17fd48ee29648ce50
312,473
def mpi_isend_object(comm, data, dest_rank): """ Send Python object to another MPI rank in a non-blocking way. Parameters ---------- comm : object MPI communicator object. data : object Data to send. dest_rank : int Target MPI process to transfer data. Returns ------- object A handler to MPI_Isend communication result. """ return comm.isend(data, dest=dest_rank)
9831fceb8164bd6591ab01f6891f965272a3bbf9
429,976
def _diff_dict(orig, new): """Return a dict describing how to change orig to new. The keys correspond to values that have changed; the value will be a list of one or two elements. The first element of the list will be either '+' or '-', indicating whether the key was updated or deleted; if the key was updated, the list will contain a second element, giving the updated value. """ # Figure out what keys went away result = {k: ['-'] for k in set(orig.keys()) - set(new.keys())} # Compute the updates for key, value in new.items(): if key not in orig or value != orig[key]: result[key] = ['+', value] return result
72626d30b81333486d919045604262c9e6c7fb11
543,982
def datetime_format(datetime_obj, fmtstring = '%Y-%m-%d'): """ A function to string format a datetime.datetime object. """ return datetime_obj.strftime(fmtstring)
a64ce81cb28c59489bff3d57751feebb818e04c5
694,658
def create_html_link(url, text): """Wrap a text into an html anchor element with a specified url.""" line = f"<a href='{url}'>{text}</a>" return line
43d0085a826b23b8441bb3e6ccf8035268be9ae8
320,696
from datetime import datetime def format_t(t): """ Formats the date strings of a Wikidata entry """ if t is not None: return datetime.strptime(t[1:11].replace("-00", "-01"), "%Y-%m-%d").date() else: return t
e230aee8999edf63f88ca64e2cac03a709a6e931
433,488
def union_probability(p_a, p_b, p_intersection): """Compute the probability of P(A or B) given P(A), P(B) and P(A and B)""" return p_a + p_b - p_intersection
a280e62008a2bf3dfeb4b7df5d31f5eb531f9341
574,219
import re def regstrip(string, char_to_remove=None): """Does the same thing as the strip() method using regex: - without 2nd argument it removes all the whitespace characters - 2nd argument provides possibility to remove custom characters, must be given as a string""" if char_to_remove: pattern = str('[' + char_to_remove + ']') repl = re.compile(pattern) else: repl = re.compile(r'^\s+|\s+$') done = repl.sub('', string) return done
77f86d650325710c06b126eff482632a898092f7
607,364
def calc_suma_recursiva(n:int=10) -> int: """ Calcula la suma recursiva de los n primeros números Valor predefinido: 10 :param n: número de números a sumar :type n: int :return: suma de los n primeros números :rtype: int """ if n == 0: return 0 else: return n + calc_suma_recursiva(n-1)
d17b92531270160601981f0c8683a8e324fa8a3d
49,800
import platform def get_command(command): """ @brief Ensures command can be executed on each platform @param command The commands name @return A platform appropriate command """ if platform.system() == "Windows": command += ".exe" return command
e761078dcf70216283fcab4967fde42393d3d699
611,143
import re def parse_python_func_imports(func_str): """Get a list of import statement lines from a (string) Python function.""" import_lines = func_str.split('def ')[0].strip() match = re.search(r'((?:import|from)[\S\s]*)', import_lines) out = [] if match: out = match.group(1).splitlines() return out
bfe6966cf090642a057f3a9e135c3bdca30adf1e
405,473
def get_pk_column(self): """ Gets the primary key column. """ return self.query.get_meta().pk.column
143d0cd7383186631f615eeaf4d9c67579c7067e
61,112
def fromname(self,name): """Return the particle data given the PDG name. Returns None if not found.""" for id in self.ids(): if self[id].name == name: return self.get(id) return None
266631bd84ad0b59c56f5aa5032ec3bfcb5c0fbe
103,520
def _filter_chain(accumulated, additional): """ Given two functions that take a block_key and return a boolean, yield a function that takes a block key, and 'ands' the functions together """ return lambda block_key: accumulated(block_key) and additional(block_key)
70479841e85eaf2cee40a5e48232eafb8592324c
365,965
def is_same_week( date1, date2 ): """Checks whether two dates are within the same week""" return date1.year == date2.year and date1.strftime( '%W' ) == date2.strftime( '%W' )
96706989bc6912c1e1d079013af28f5f2828499f
494,278
def hr_bytes(n): """ Human readable bytes value Notes: http://code.activestate.com/recipes/578019 Args: n (int): bytes Returns: string: human readable bytes value """ symbols = (u'K', u'M', u'G', u'T', u'P', u'E', u'Z', u'Y') prefix = {} for i, s in enumerate(symbols): prefix[s] = 1 << (i + 1) * 10 for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] return u'%.1f%s' % (value, s) return u'%sB' % n
3f84e4b19b153cf40da7893e1bb80a89621606d6
84,217
import re def FillParagraph(text,width=70,indentLinesBy=0): """ >>> txt = "123 567 90 123\\n456 89\\n" >>> print FillParagraph(txt,width=10) 123 567 90 123 456 89 >>> print FillParagraph(txt,width=10,indentLinesBy=3) 123 567 90 123 456 89 >>> txt = '123 567 \\n' >>> print FillParagraph(txt,width=10) 123 567 """ expr = re.compile('[\ \t\n\r]+') words = expr.split(text) res = [] line = '' for word in words: if word: if len(line)+len(word)>=width: res.append(line) line = ' '*indentLinesBy line += word else: if line: line += ' %s'%word else: line = word if line: res.append(line) return '\n'.join(res)
d5cbc05c090ecca5f64fa519e2772b2154b029bd
671,646
def num_periods(period_duration, last_fixed_date, current_date): """Return the number of periods in a date range. Arguments: period_duration - the timedelta of a period. last_fixed_date - the date that the daystate was last specified. current_date - the date in question. """ return (current_date - last_fixed_date) / period_duration
4a6233d66a4928383ceeaf91c0066959188de409
327,498
def min_shape(args): """Returns the minimum shape that encompasses the shape of all *args*.""" args = tuple(args) if len(args) <= 0: raise ValueError("args must not be empty.") elif not all(len(a) == len(args[0]) for a in args): raise ValueError("All shapes must have the same number of dimensions.") return tuple(max(x) for x in zip(*args))
d1494a6a8f00f8dab84c57fea44bb91dac7df805
609,560
def _int(data): """Convert byte data to big-endian int.""" return int.from_bytes(data, byteorder="big")
cba161951138486a8bbe8ef21a7de4a15cc9cc5b
484,113
def is_start_of_new_sequence_item(line): """ Checks whether line is the first line of sequence item data """ return line[0] == '>'
f9b3a43bac52b4333b5d4bba24e266351fa807a9
163,463
import operator def get_operator_function(op_sign): """ Get operator function from sign Args: op_sign (str): operator sign Returns: operator function """ return { '>': operator.gt, '<': operator.lt, '>=': operator.ge, '<=': operator.le, '==': operator.eq, '!=': operator.ne, }[op_sign]
6b3a3f23335d499400e12071b421a4f34b1acffc
673,595
def hex_rotate_60(x, y, z, n=1): """Rotates the given hex n * 60 degrees counter clockwise around the origin, and returns the co-ordinates of the new hex.""" n = n % 6 if n == 0: return x, y, z if n == 1: return -y, -z, -x if n == 2: return z, x, y if n == 3: return -x, -y, -z if n == 4: return y, z, x if n == 5: return -z, -x, -y
247e3d47877003e4ad260e74f8cbc7451f5ac168
172,765
def exp_lr_scheduler(optimizer, epoch, init_lr=0.001, lr_decay_epoch=7): """Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.""" lr = init_lr * (0.1**(epoch // lr_decay_epoch)) #if epoch % lr_decay_epoch == 0: #print('LR is set to {}'.format(lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr return optimizer
ba61d1cef53971a5cad0b9187e7e405c6338e43d
353,187
from typing import Union def count_occurrences(lst: list, value: Union[bool, str, int, float]) -> int: """Function to count occurrences of value in a list""" return len([x for x in lst if x == value and type(x) == type(value)])
498df555356bb08d3c01eac29892e54334bb22ef
226,598
def income1(households): """ Dummy for for income group 1 """ return (households['income_category'] == 'income group 1').astype(int)
26f4c5f9f1cb6c56bfc4d5bb8410d4b31516d628
658,288
def fibonacci(i): """ This function finds the ith number in the Fibonaci series, -1 otherwise. """ if i <= 0: return -1 #Fixing the code to see if test cases go through if i <= 2: return 1 prev = 1 curr = 1 for k in range(i-2): temp = curr curr += prev prev = temp return curr
223b5cdeea2e94623317304ae1fafd8921772e13
618,718
def same_base_index(a, b): """Check if the base parts of two index names are the same.""" return a.split("_")[:-1] == b.split("_")[:-1]
6186230a9cb982be4cd113c2e8098e8ab472159b
38,776
def format_hex(row_hash, col_hash, size=8): """Format dhash integers as hex string of size*size//2 total hex digits (row_hash and col_hash concatenated). >>> format_hex(19409, 14959, size=4) '4bd13a6f' >>> format_hex(1, 2, size=4) '00010002' """ hex_length = size * size // 4 return '{0:0{2}x}{1:0{2}x}'.format(row_hash, col_hash, hex_length)
c0b6ad3ba0967e13f48eb2a7d7cfd82be6574834
363,139
import torch def adj_to_seq(adj, device='cpu'): """ Convert a dense adjacency matrix into a sequence. Parameters ---------- adj : torch.Tensor The dense adjacency tensor. device : str, optional The device onto which to put the data. The default is 'cpu'. Returns ------- adj_seq : torch.Tensor The sequence representing the input adjacency tensor. """ B, N = adj.shape[0], adj.shape[1] adj_seq = torch.zeros(B,int(((N-1)*N)/2)).to(device) for b in range(B): for i in range(1,N): for j in range(i): adj_seq[b,i+j] = adj[b,i,j] return adj_seq
6b967962d5ba61a0ad45d5197ca23a7278fccca9
17,145
def is_comp_node(graph, node): """Returns True for Component or PseudoComponent nodes.""" return 'comp' in graph.node.get(node, '')
2f6abcb41a54b07a4a6cb1de4637db3d0ac7fa07
581,315
from typing import Tuple def new_location(location: Tuple[int, int], offset: Tuple[int, int]) -> Tuple[int, int]: """ Performs the offset move on the given location and returns the new position. :param location: the location of the piece :param offset: the direction to move piece in :return: the new location after performing the move """ x = location[0] + offset[0] y = location[1] + offset[1] return x, y
8f89d32db7b1162c39d5ddfac4ded3c784b0ab2f
486,499
def get_sig(pval): """Returns asterisk depending on the magnitude of significance""" if pval < 0.001: sig = '***' elif pval < 0.01: sig = '**' elif pval < 0.05: sig = '*' else: sig = 'ns' # non-significant return sig
99ccf5b4d4c78e21d8b0d7179e0143eee4bd5736
399,414
import json def to_json_string(my_obj): """Returns the JSON representation of an object (string). Arguments: my_obj {obj} -- object Returns: str -- Serialize obj to a JSON formatted str """ return json.dumps(my_obj)
17aa3155454c0a5e957bd0ad9cd3a14752fc0378
460,156
def create_document1(args): """ Creates document 1 -- an html document""" return f""" <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> </head> <body style="font-family:sans-serif;margin-left:2em;"> <h1 style="font-family: 'Trebuchet MS', Helvetica, sans-serif; color: darkblue;margin-bottom: 0;">World Wide Corp</h1> <h2 style="font-family: 'Trebuchet MS', Helvetica, sans-serif; margin-top: 0px;margin-bottom: 3.5em;font-size: 1em; color: darkblue;">Order Processing Division</h2> <h4>Ordered by {args['signer_name']}</h4> <p style="margin-top:0em; margin-bottom:0em;">Email: {args['signer_email']}</p> <p style="margin-top:0em; margin-bottom:0em;">Copy to: {args['cc_name']}, {args['cc_email']}</p> <p style="margin-top:3em;"> Candy bonbon pastry jujubes lollipop wafer biscuit biscuit. Topping brownie sesame snaps sweet roll pie. Croissant danish biscuit soufflé caramels jujubes jelly. Dragée danish caramels lemon drops dragée. Gummi bears cupcake biscuit tiramisu sugar plum pastry. Dragée gummies applicake pudding liquorice. Donut jujubes oat cake jelly-o. Dessert bear claw chocolate cake gummies lollipop sugar plum ice cream gummies cheesecake. </p> <!-- Note the anchor tag for the signature field is in white. --> <h3 style="margin-top:3em;">Agreed: <span style="color:white;">**signature_1**/</span></h3> </body> </html> """
9f75267e13440cfbf98b0bdc94bdc86f50ffda92
647,841
def find_parent_fnames(var): """ Find full names of parents, recursively. These names will be forbidden in the subsequent resolution to make sure there's no back branch in the constructed DAG. """ names = [var.fname] # No self-referential if var.parent: names.append(var.parent.fname) names += find_parent_fnames(var.parent) return names
e0b32984275a80b14851bba9ecb357a4628d1d54
665,206
import re def ParseErrorDetail(e): """Parse <Message> and/or <Details> text from XML content. Args: e: The GSResponseError that includes XML to be parsed. Returns: (exception_name, m, d), where m is <Message> text or None, and d is <Details> text or None. """ exc_name_parts = re.split("[\.']", str(type(e))) if len(exc_name_parts) < 2: # Shouldn't happen, but have fallback in case. exc_name = str(type(e)) else: exc_name = exc_name_parts[-2] if not hasattr(e, 'body') or e.body is None: return (exc_name, None) match = re.search(r'<Message>(?P<message>.*)</Message>', e.body) m = match.group('message') if match else None match = re.search(r'<Details>(?P<details>.*)</Details>', e.body) d = match.group('details') if match else None return (exc_name, m, d)
071e7d2388b7d4538103556e4badaa4902b65a2e
429,591
from collections import Counter def most_common(words, n=10): """ Returnes the most common words in a document Args: words (list): list of words in a document n (int, optional): Top n common words. Defaults to 10. Returns: list: list of Top n common terms """ bow = Counter(words) ncommon = bow.most_common(n) return(ncommon)
c4f0adecdec09cb3b2a83e9edbd39eb1789ad592
688,948
def option_name(path, delimiter="--"): """ Returns a cli option name from attribute path **Arguments** - path (`list`): attribute path - delimiter (`str`): delimiter for nested attributes **Returns** cli option name (`str`) """ return "--{}".format(delimiter.join(path).replace("_", "-"))
0beab1b60f5ec479e70a4e12af799ca5efe0230a
679,656
def get_scale_at_res(base_scale, res, iso=False): """Returns voxel resolution at a given resolution of precomptued volume res=0 is base resolution Raises: ValueError if any element in base_scale < 1 """ try: assert all([s >= 1 for s in base_scale]) except AssertionError: raise ValueError("voxel resolution is < 1") if iso: factor = (2, 2, 2) else: factor = (2, 2, 1) scale = [s * f ** res for s, f in zip(base_scale, factor)] return [int(s) if s == int(s) else s for s in scale]
bf1fd19e6ef4ef542a7388a548485c084fbafa33
382,387
def text_to_string(filename): """Read a text file and return a string.""" with open(filename) as infile: return infile.read()
dbd79e78c84c3374c0252544086885b909ae9bd9
4,590
def ptfhost(testbed_devices): """ Shortcut fixture for getting PTF host """ return testbed_devices["ptf"]
25fdbc9520c4cda9719887ff213ccb2b88918dcc
11,397
def convert(dataset: str) -> str: """Translates the CLI argument name into the Metadata value for the ISIC archive.""" return { "bcn_20000": "BCN_20000", "bcn_2020_challenge": "BCN_2020_Challenge", "brisbane_isic_challenge_2020": "Brisbane ISIC Challenge 2020", "dermoscopedia_cc_by": "Dermoscopedia (CC-BY)", "ham10000": "HAM10000", "isic_2020_challenge_mskcc_contribution": "ISIC 2020 Challenge - MSKCC contribution", "isic_2020_vienna_part_1": "ISIC_2020_Vienna_part_1", "isic_2020_vienna_part_2": "ISIC_2020_Vienna_part2", "jid_editorial_images_2018": "2018 JID Editorial Images", "msk_1": "MSK-1", "msk_2": "MSK-2", "msk_3": "MSK-3", "msk_4": "MSK-4", "msk_5": "MSK-5", "sonic": "SONIC", "sydney_mia_smdc_2020_isic_challenge_contribution": "Sydney (MIA / SMDC) 2020 ISIC challenge contribution", "uda_1": "UDA-1", "uda_2": "UDA-2" }.get(dataset, dataset).upper()
cb5114817d1fa7a195156b75c699865cabac7432
257,737
def __get_target(flag_iterator, details): """ This function consumes --target or -target flag which is followed by the compilation target architecture. This target might be different from the default compilation target collected from the compiler if cross compilation is done for another target. This is then collected to the buildaction object. """ if flag_iterator.item in ['--target', '-target']: next(flag_iterator) details['compilation_target'] = flag_iterator.item return True return False
f71fef6ab0987c371a217f7702bf097233f32034
162,575
def all_same(items): """Quick check if all items are the same. Identical to a check like len(set(items)) == 1 but should be more efficient while working on generators, since would return False as soon as any difference detected thus possibly avoiding unnecessary evaluations """ first = True first_item = None for item in items: if first: first = False first_item = item else: if item != first_item: return False # So we return False if was empty return not first
c78007ade57a1e1cdec42a41e76cd9b1b4830287
526,942
def create_stop_poll_text() -> str: """Returns message for stopping a poll.""" return 'Вы прервали прохождение опроса.'
d3bedcdd77801f507aade01ecac42c739ddb3e15
377,940
import unicodedata def remove_diacritics(input_str: str) -> str: """Remove diacritics and typographical ligatures from the string. - All diacritics (i.e. accents) will be removed. - Typographical ligatures (e.g. ffi) are broken into separated characters. - True linguistic ligatures (e.g. œ) will remain. - Non-latin scripts will remain. Args: input_str (str): The original string with diacritics and ligatures. Returns: str: The string without diacritics and typographical ligatures. """ nfkd_form = unicodedata.normalize('NFKD', input_str) return u"".join([c for c in nfkd_form if not unicodedata.combining(c)])
23c3e9ce0029704f0012a825460f10f370e3c681
5,099
import torch def _jitable_shape(tensor): """Gets shape of ``tensor`` as ``torch.Tensor`` type for jit compiler .. note:: Returning ``tensor.shape`` of ``tensor.size()`` directly is not torchscript compatible as return type would not be supported. Args: tensor (torch.Tensor): Tensor Returns: torch.Tensor: Shape of ``tensor`` """ return torch.tensor(tensor.shape)
1e639002da90c1890a52db99947e728aaa6d184e
375,729
def parse_software_uuid(s): """parses '/software/lalala/' or '/software/lalala' into 'lalala' if input is already 'lalala', returns 'lalala' as well. if something else, returns None """ ss = s.split('/') if len(ss) == 4 or len(ss) == 3: return(ss[2]) elif len(ss) == 1: return(ss) else: return(None)
56e7a06daededa4294a8181992cef1562dbda069
526,452
def split_feature_targets(train_df, test_df): """ Splits the training dataframe and test dataframe into X_train, y_train & X_test, y_test Parameters: train_df (pandas DataFrame object): the training dataframe test_df (pandas DataFrame object): the test dataframe Returns: X_train (pandas DataFrame object): the training dataframe. Just features and no target. y_train (pandas DataFrame object): the training target values. X_test (pandas DataFrame object): the test dataframe. Just features and no target y_test (pandas DataFrame object): the test target values. """ X_train = train_df.drop(columns=["Churn"]) X_test = test_df.drop(columns=["Churn"]) y_train = train_df["Churn"] y_test = test_df["Churn"] return X_train, y_train, X_test, y_test
b6a609414c481a66b65f7c6ac26b7363c59bab85
466,685
def n_fib(n): """ Find the nth Fibonacci Number. :param n: Nth element in the fibonacci series. :return: returns None if n is 0 otherwise returns the nth Fibonacci Number. """ if n == 0: return None if n == 1 or n == 2: return n - 1 a, b = 0, 1 for i in range(2, n): a, b = b, a+b return b
542cb2dec18f4bd418a412c3255900e784b99f31
144,721
import time import requests import uuid import webbrowser def _check_for_api(port: int = 10000, launch_browser: bool = False, timeout: int = 5): """Check for the API to be live for up to `timeout` seconds, then optionally launch a browser window Args: port(int): port the server is running on launch_browser(bool): flag indicating if the browser should be launched on success == True timeout(int): Number of seconds to wait for the API before returning Returns: bool: flag indicating if the API is ready """ protocol = "http" if port == 443: protocol = "https" success = False for _ in range(timeout): time.sleep(1) try: resp = requests.get("{}://localhost:{}/api/ping?v={}".format(protocol, port, uuid.uuid4().hex), verify=False) if resp.status_code == 200: success = True break except requests.exceptions.ConnectionError: # allow connection errors, which mean the API isn't up yet. pass if success is True and launch_browser is True: time.sleep(1) # If here, things look OK. Start browser webbrowser.open_new("{}://localhost:{}".format(protocol, port)) return success
e63a499b556d4b67331b4bb6fa4c8078a6f59b11
426,259
def rename_datetimes(df): """Renaming tpep_pickup_datetime and tpep_dropoff_datetime into pickup_datetime, dropoff_datetime""" print("renaming datetimes") df = df.rename(columns={'tpep_pickup_datetime': 'pickup_datetime', 'tpep_dropoff_datetime': 'dropoff_datetime'}) return df
40a42343aa6cb915b7f35ac8b11d237702941e0e
155,384
def human_string(text: str) -> str: """ Transform text to human string Args: text: string to be converted Returns: converted string Examples: >>> human_string('test_str') 'Test Str' >>> human_string("") '' """ if not text: return "" return ' '.join(word.title() for word in text.split('_'))
5bc851e88228345ad67334b7405c765d34a01aff
370,438
def nearest(geom, gdf): """Find the element of a GeoDataFrame nearest a shapely geometry""" matches_idx = gdf.sindex.nearest(geom.bounds) nearest_geom = min( [gdf.iloc[match_idx] for match_idx in matches_idx], key=lambda match: geom.distance(match.geometry), ) return nearest_geom
f7354c81b2e4f6e46e4841bc906bd36be7910397
395,288
def set_tr_te(nifti_image, repetition_time, echo_time): """ Set the tr and te in the nifti headers :param echo_time: echo time :param repetition_time: repetition time :param nifti_image: nifti image to set the info to """ # set the repetition time in pixdim nifti_image.header.structarr['pixdim'][4] = repetition_time / 1000.0 # set tr and te in db_name field nifti_image.header.structarr['db_name'] = '?TR:%.3f TE:%d' % (repetition_time, echo_time) return nifti_image
78ea174f9b234fbe248b5205bab604fefbb944fc
376,331
def _request_cache(request, key): """ Returns (or create) a linked object dictionary usable past as cache with visibility equal to that of the request. """ try: return request._conf_cache[key] except KeyError: request._conf_cache[key] = {} except AttributeError: request._conf_cache = {key: {}} return request._conf_cache[key]
e2a12ebff2e3d4d7ff9326581def48a042df1df0
364,719
def return_period_from_string(arg): """ Takes a string such as "days=1,seconds=30" and strips the quotes and returns a dictionary with the key/value pairs """ period = {} if arg[0] == '"' and arg[-1] == '"': opt = arg[1:-1] # remove quotes else: opt = arg for o in opt.split(","): key, value = o.split("=") period[str(key)] = int(value) return period
f3f70df82c1567d0b2f329c5add15d64b9b8b115
120,440
def aic(k,lnL): """ Akaike Information Criterion -- the lowest value is preferred. INPUT: k - number of parameters in the model lnL - log likelihood NOTES: The relative likelihood is definied as exp(Delta AIC/2) Grunblatt et al. 2015 https://en.wikipedia.org/wiki/Akaike_information_criterion """ return 2.*(k-lnL)
8720aeebdd0827533a3c523a19cca93e30abc945
354,114
def error_tex_in_output(output: str) -> bool: """parses tex output looking for fatal errors""" return ("Fatal error" in output) or ("no output PDF" in output)
a853257ec2f47b467ab486b7838e0ed6ec7803d7
392,790
import glob def get_unprocessed_cves(directory: str) -> list: """Returns a list of CSV files to upload and process. Args: directory (str): Directory to look in for CSVs. Returns: cve_files (list): List of files to be processed and uploaded. """ cve_files = [] paths = glob.glob(f"{directory}/*.csv") for path in paths: try: cve_file = open(path, "rb") cve_files.append(cve_file) except (OSError, IOError) as error: print(f"Error: Could not open a CSV. {error}") print(f"Found {len(cve_files)} file(s) to upload.") return cve_files
bc026f21456d15a20b7fab9df5f7bc47f648b4ae
303,006
def top_nucs(df, top_n): """ loops through the rows of a dataframe and keeps a list of the top_n nuclides (by concentration) from each row Parameters ---------- df : dataframe of nuclide concentrations top_n : number of nuclides to sort and filter by Returns ------- nuc_set : set of the top_n nucs as determined """ # Get a set of top n nucs from each row (instance) nuc_set = set() for case, conc in df.iterrows(): top_n_series = conc.sort_values(ascending=False)[:top_n] nuc_list = list(top_n_series.index.values) nuc_set.update(nuc_list) return nuc_set
fd30d66be6dc93db28f5c92ade6fcfff254f0331
500,912
def string(value: str = ''): """ A constant string. Parameters ---------- value : str The string value. """ return value
67237062f7f13a978aadb6256504ff45db40fbd0
419,844
import math def translate_random_number(number, start, stop): """ Helper function to translate a random number 0 <= x <= 1 to integer between start <= x <= stop """ return int(math.floor(number * (stop - start) + start))
0302bb9c02cbc1a51b4b3411ded9e3c51c44055c
489,265
def db2lin(value): """Convert logarithimic units to linear >>> round(db2lin(10.0), 2) 10.0 >>> round(db2lin(20.0), 2) 100.0 >>> round(db2lin(1.0), 2) 1.26 >>> round(db2lin(0.0), 2) 1.0 >>> round(db2lin(-10.0), 2) 0.1 """ return 10**(value / 10)
8840989dc365eee46e7836749d3fc8fff14dc048
627,850
def login_to_dict(login): """Creates a Python dict for a Login database entity. This is not a method of the Login model because it is not normally needed by the Liblio server. It's admin-only, so it can be considered private to this controller. Note: This does expose user email addresses, so use with care. Passwords are not returned, but they're stored as hashes anyway. """ return dict( id=login.id, username=login.username, email=login.email, last_login=login.last_login, last_action=login.last_action, role=login.role.name, userdata=login.user.to_dict() )
f623f1a8724140c7be0b11d613ae422dc94d63d1
261,698
import time def yearmonthday(t): """ Returns a tuple (year, month, day) with 1 <= month <= 12 and 1 <= day <= 31 """ return tuple(int(x) for x in time.strftime("%Y %m %d", time.gmtime(t)).split())
18df233c98ba7c530e8d73b8f61752432cc5cc66
548,594
def count_lines_exception(filename): """ Count the number of lines in a file. If file can't be opened, treated the same as if it was empty """ try: return len(open(filename, 'r').readlines()) except (EnvironmentError, TypeError): print('Exception error') return 0
cce821eacd535235165301aa23126bf93b8b49ff
488,316
def rreplace(s, old, new, occurrence=-1): """ Replace old with new starting from end of the string :param s: The string to be transformed :param old: Search string :param new: Replacement string :param occurrence: Number of replacements to do :return: """ li = s.rsplit(old, occurrence) return new.join(li)
132a2b376f623726bb06dad48c8598a208b1d5c1
264,182
def _fetch_json(sess, url): """\ Fetches the provided url and returns the JSON result as dict. :param sess: requests.Session :param url: The URL to fetch. :rtype: dict """ res = sess.get(url) dct = res.json() if dct['status'] != 'OK': raise Exception('Unexpected result "%s"' % dct['status']) return dct
fa4bcb4ed052031bb37611db46c722015c3f4685
532,454
def list_index(ls, indices): """numpy-style creation of new list based on a list of elements and another list of indices Parameters ---------- ls: list List of elements indices: list List of indices Returns ------- list """ return [ls[i] for i in indices]
7e5e35674f48208ae3e0befbf05b2a2e608bcdf0
703,770
def _merge_sequences(inp): """Merge a list of input sequence patterns for use in a regular expression. Order by lengthyness (full sequence set precedent over subset), and exclude any empty (u'') sequences. """ return sorted(list(filter(None, inp)), key=len, reverse=True)
681adecf257c08d1080915832d79d4cbd46cdadb
403,961
def convert_kg_to_target_units(data_kg, target_units, kg_to_kgC): """ Converts a data array from kg to one of several types of target units. Args: data_kg: numpy ndarray Input data array, in units of kg. target_units: str String containing the name of the units to which the "data_kg" argument will be converted. Examples: 'Tg', 'Tg C', 'Mg', 'Mg C', 'kg, 'kg C', etc. kg_to_kg_C: float Conversion factor from kg to kg carbon. Returns: data: numpy ndarray Ouptut data array, converted to the units specified by the 'target_units' argument. Remarks: At present, only those unit conversions corresponding to the GEOS-Chem benchmarks have been implemented. This is an internal routine, which is meant to be called directly from convert_units. """ # Convert to target unit if target_units == "Tg": data = data_kg * 1e-9 elif target_units == "Tg C": data = data_kg * kg_to_kgC * 1.0e-9 elif target_units == "Gg": data = data_kg * 1e-6 elif target_units == "Gg C": data = data_kg * kg_to_kgC * 1.0e-6 elif target_units == "Mg": data = data_kg * 1e-3 elif target_units == "Mg C": data = data_kg * kg_to_kgC * 1.0e-3 elif target_units == "kg": data = data_kg elif target_units == "kg C": data = data_kg * kg_to_kgC elif target_units == "g": data = data_kg * 1e3 elif target_units == "g C": data = data_kg * kg_to_kgC * 1.0e3 else: msg = "Target units {} are not yet supported!".format(target_units) raise ValueError(msg) # Return converted data return data
c39022c862b48cd8f8bd47da2b9e004828fd7994
183,831
import torch def get_last_conv(m): """ Get the last conv layer in an Module. """ convs = filter(lambda k: isinstance(k, torch.nn.Conv2d), m.modules()) # print('convs:', convs) # print('list(convs)[-1]:', list(convs)[-1]) return list(convs)[-1]
1a8c7fa6f37fd7dabd766f7ad9f78eab2f7401a2
575,387
def format_timestamp(dt): """ Format the given timestamp. :param datetime.datetime dt: A datetime.datetime object to be formatted. """ return dt.strftime('%Y-%m-%dT%H:%M:%S.999-05:00')
62fb92acb57d7a2c4c1b89630a42859f52d44784
507,506
def parse_rank_specification(s): """ Parses info about rank specification, used to filter games by player's ranks. Returns None (all ranks allowed), or a set of possible values (None as a possible value in the set means that we should include games without rank info) # returns None, all ranks possible parse_rank_specification('') # returns set([1, 2, 3, None])), 1, 2, 3 allowed, as well as missing rank info parse_rank_specification('1..3,') # returns set([None])), only games WITHOUT rank info are allowed parse_rank_specification(',') See test for more examples. """ if not s: return None ret = [] s = s.replace(' ','') categories = s.split(',') for cat in categories: cs = cat.split('..') try: if len(cs) == 1: if not cs[0]: ret.append(None) else: ret.append(int(cs[0])) elif len(cs) == 2: fr, to = map(int,cs) if to < fr: raise RuntimeError('Empty range %s'%(cat)) ret.extend(range(fr, to+1)) else: raise ValueError() except ValueError: raise RuntimeError('Could not parse rank info on token "%s"'%(cat)) return set(ret)
ff7ce2b9940b6cd55bf0fab7bd2f633637fd7cc8
651,409
def _find_child_node(node, name): """Return child with given name from parent node""" for c in node.children: if c.frame.get("name") == name: return c return None
455148bb38225fe3e83c182503c48b7189fb3eed
572,401
def get_ext_info(hdu): """ Returns name, type and size of a specified HDU extension :param hdu: a single extension of a multiextension HDU item :type hdu: astropy.io.fits.hdu.image class :return: 3- element dictionary with text values for extension name, extension type, and extension size. """ outDict={} outDict["name"]=hdu.name outDict["type"] = str(hdu).split(" object")[0].split(".")[-1] if ((outDict["type"] == 'ImageHDU') and (hdu.shape)): outDict["size"] = "{} x {}".format(hdu.shape[0],hdu.shape[1]) elif "table" in outDict["type"].lower(): nRows = len(hdu.data) nCols = len(hdu.data[0]) outDict["size"] = "{}R x {}C".format(nRows,nCols) else: outDict["size"] = "()" return(outDict)
67b98d488ee4b2fabc05856c60049356d8586df1
113,726
def split_by_whitespace(text): """Runs basic whitespace cleaning and splitting on a piece of text. Examples: >>> _text = '我爱python,我爱编程;I love python, I like programming.' >>> split_by_whitespace(_text) ['我爱python,我爱编程;I', 'love', 'python,', 'I', 'like', 'programming.'] """ text = text.strip() if not text: return [] tokens = text.split() return tokens
4ede92215545aab7a55eac5b0733b91497340c60
614,624
def convert_metadata_1_0_to_1_1(metadata): """ Convert 1.0 to 1.1 metadata format :arg metadata: The old metadata :returns: The new metadata Changes from 1.0 to 1.1: * ``supported_by`` field value ``curated`` has been removed * ``supported_by`` field value ``certified`` has been added * ``supported_by`` field value ``network`` has been added """ new_metadata = {'metadata_version': '1.1', 'supported_by': metadata['supported_by'], 'status': metadata['status'] } if new_metadata['supported_by'] == 'unmaintained': new_metadata['supported_by'] = 'community' elif new_metadata['supported_by'] == 'curated': new_metadata['supported_by'] = 'certified' return new_metadata
16fa60107c36897141d1db490869a20807cbedc2
437,585
def ReadBlackList(path): """Read a blacklist of forbidden directories and files. Ignore lines starting with a # so we can comment the datafile. Args: path: file to load the blacklist from. Returns: dictionary of path:True mappings """ blacklist_file = open(path, 'r') catalog = [] for entry in blacklist_file: if not entry or entry[:1] == '#': pass # ignore comment and empty lines in blacklist file else: catalog.append(entry.strip()) return catalog
694b9bd8c09385677d49e8563ac8f08b923cadb0
7,018
def unit(aClass, value): """ Calls the 'unit' method of `aClass` with `value`. """ return aClass.unit(value)
35189da86beb1fc584ff41a5ead63aec6351633f
592,037
import click def get_help_msg(command): """ Print full help message of click def <function> Ex: print_help_msg(testcmd) :param command: function that has the click command decorator and help option :return: a string containting the @click.command() <function> help message """ with click.Context(command) as ctx: # click.echo(command.get_help(ctx)) return command.get_help(ctx)
a477d3641c52657df18880d2490ff94ff2ebd422
238,260