content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import collections def _merge_compiler_kwds(list_of_kwds): """ Merges a list of keyword dictionaries. Values in these dictionaries are lists of values, the merged dictionaries will contain the concatenations of lists specified for the same key. Parameters ---------- list_of_kwds : list of dict A list of compiler keyword dictionaries that should be merged. Returns ------- merged_kwds : dict The merged dictionary """ merged_kwds = collections.defaultdict(list) for kwds in list_of_kwds: for key, values in kwds.items(): if not isinstance(values, list): raise TypeError( "Compiler keyword argument '{}' requires a " "list of values.".format(key) ) merged_kwds[key].extend(values) return merged_kwds
0538be77849a061d04a2678f15c77fff9b07c5dd
468,710
def largest_odd_times(L): """ Assumes L is a non-empty list of ints Returns the largest element of L that occurs an odd number of times in L. If no such element exists, returns None """ l = L.copy() l.sort() l.reverse() for x in l: if l.count(x) % 2: return x return None
280d071bebeec911efbcbbae2eb28090e2e7870e
45,960
def cut_out(s, phrase): """ Returns the input string <s> but with all occurrences of <phrase> deleted <phrase> should be one or more words, separated by whitespace. Effort is made to preserve one space between words, which makes it better than s.replace(phrase, '') >>> s = 'the quick brown fox, which is the brownest ever, jumped over the lazy dog' >>> cut_out(s, 'the') 'quick brown fox, which is brownest ever, jumped over lazy dog' >>> s.replace('the', '') ' quick brown fox, which is brownest ever, jumped over lazy dog' Note the extra spaces in the s.replace version """ return ' '.join(map(str.strip, s.split(phrase))).strip()
4f7cc45f6a384087665120e821ca569059273150
435,869
def read_converged(content): """Check if calculation terminated correctly""" if "Normal termination of Gaussian" in content.strip().split("\n")[-1]: return True return False
f6e02a8be9f774636b5c5f211a967afa3541e6ac
397,925
import re def is_valid_hostname(hostname): """ Takes a hostname and tries to determine if it is valid or not Args: hostname: String of the hostname to check Returns: Boolean if it is valid or not """ # Borrowed from https://stackoverflow.com/questions/2532053/validate-a-hostname-string if len(hostname) > 255: return False if hostname == 'localhost': return True if hostname.endswith("."): # A single trailing dot is legal hostname = hostname[:-1] # strip exactly one dot from the right, if present disallowed = re.compile(r"[^A-Z\d-]", re.IGNORECASE) return all( # Split by labels and verify individually (label and len(label) <= 63 # length is within proper range and not label.startswith("-") and not label.endswith("-") # no bordering hyphens and not disallowed.search(label)) # contains only legal characters for label in hostname.split("."))
fd56f664cbd41483b006f832c83f4e0d15e35098
593,796
def propagate_options(options): """Propagate child option values depending on selected parent options being specified""" if options.all: options.implemented = True options.missing = True if options.implemented: options.implemented_ovals = True options.implemented_fixes = True options.assigned_cces = True if options.missing: options.missing_ovals = True options.missing_fixes = True options.missing_cces = True return options
e7170c6fe385f264535ac57274d47a1eca9859a2
364,856
import logging def greenlet_exception_logger(logger, level=logging.CRITICAL): """ Return a function that can be used as argument to Greenlet.link_exception() that will log the unhandled exception to the given logger. """ def exception_handler(greenlet): logger.log(level, "Unhandled exception in greenlet: %s", greenlet, exc_info=greenlet.exc_info) return exception_handler
98f413f5f8432214d051f306490014e6927562f2
16,577
import pickle def extract_features_from_file(file): """Extracts features from a file using `pickle.load`.""" return pickle.load(file)
7fd0befa2551f8c282f76435ae08d2cea68a50e7
274,049
import math def quantile(x, q): """ Compute quantile/percentile of the data Parameters ---------- x : list of float Data set q : float Quantile to compute, 0 <= q <= 1 """ if not 0 <= q <= 1: raise ValueError("Invalid quantile") y = sorted(x) n = len(y) z = (n - 1) * q j = int(math.floor(z)) z -= j if j == n - 1: m = y[-1] else: m = (1 - z) * y[j] + z * y[j + 1] return m
a19a6cfa97e5e55fc095c032cf86d51d1f2e2098
399,764
from typing import Tuple def _get_func_expr(s: str) -> Tuple[str, str]: """ Get the function name and then the expression inside """ start = s.index("(") end = s.rindex(")") return s[0:start], s[start + 1:end]
8ac212727859fc2df01cc12164118d9cfcf66d9f
654,209
def A070939(i: int = 0) -> int: """Length of binary representation of n.""" return len(f"{i:b}")
31b12e493645c3bdf7e636a48ceccff5d9ecc492
1,350
def _read_file(path, mode='r'): """Return the contents of a file as a string. Returns '' if anything goes wrong, instead of throwing an IOError. """ contents = '' try: with open(path, mode) as f: contents = f.read() except IOError: pass return contents
050760a1c583cd8a07ded4437fc53ad461b22f98
254,830
from typing import List from typing import Optional def peek(tokens: List[str]) -> Optional[str]: """Return the next token without consuming it, if tokens is non-empty.""" if tokens: return tokens[-1] return None
89163cfb252977a52bb6ad0a1ba5cc6153bfcd08
505,596
def getManagedObjectTypeName(mo): """ Returns the short type name of the passed managed object e.g. VirtualMachine Args: mo (vim.ManagedEntity) """ return mo.__class__.__name__.rpartition(".")[2]
85d846402d2ce6baf6af2f4c6b006a1926c41b8d
562,850
def telescopes_unicity(tel_list): """ Check if all the telescopes in a list are unique Parameters ---------- tel_list: list of telescopes classes Returns ------- Boolean: True if all telescopes in the list are unique """ are_unique = True for tel1 in tel_list: for tel2 in tel_list: if ((tel1.mirror_center == tel2.mirror_center).all() and tel1.id != tel2.id): print("Telescopes {0} and {1} are both at the same position {3}".format(tel1.id, tel2.id, tel1.mirror_center)) are_unique = False return are_unique
5f8be803ca23541595d1e341957594d77ecfbd65
184,149
import math def get_camera_side_radius(angle: float, camera_distance: float) -> float: """Gets the length of the sides of the triangle FOV when rendering the triangle vertices :param angle: The angle of the camera in radians in our own coordinate space, where 0 is East and PI / 2 is North :param camera_distance: The distance that the camera can see on average in meters (this is not an exact measurement, this is just a general statement about how far the camera can see) :return: Returns the length of the side of the triangle FOV """ return camera_distance / math.cos(angle)
abc954573bcc3200d36f9e1ee952116e31598f75
522,041
def _SNR(coef): """ Return the signal-to-noise ratio for each constituent. """ if "Lsmaj" in coef: SNR = (coef["Lsmaj"] ** 2 + coef["Lsmin"] ** 2) / ( (coef["Lsmaj_ci"] / 1.96) ** 2 + (coef["Lsmin_ci"] / 1.96) ** 2 ) else: SNR = (coef["A"] ** 2) / (coef["A_ci"] / 1.96) ** 2 return SNR
ac3bdd752d062a72ffc64fb9171e3f636d0e26e8
687,444
def config_parser_to_dict(config): """ Translates the items in the config parser object to dict. :param config: RawConfigParser :return: Dictionary with the contents :rtype: dict """ contents = {} for section in config.sections(): contents.update({section: {item[0]: item[1] for item in config.items(section)}}) return contents
b093af16a4b775b1b0eb8e7d8f2ff603f2257827
606,514
from pathlib import Path def seed_root_fixture() -> Path: """ This fixture returns a path to the seed data defined for tests. :return: the path to the seed data root directory """ return Path(__file__).resolve().parent / 'data' / 'seed'
0876cbce1bea77c8a9f0c435391556238197ca49
140,670
def abr_step(N,E,Q): """Run the expectation (abr) step of the EM algorithm. This variant assumes that the true-positive and false-postive rates are global, namely not node-specific. Parameters ---------- N : numpy.array The number of times that each edge has been measured (i.e., the number of trials or experiments in which each edge could have been observed). E : numpy.array The actual observed number of edges between every pair of nodes. Q : numpy.array The current (algorithmically generated) estimate that each edge is actually present. Output of q_step. Returns ------- alpha : float The estimate of the true-positive rate. beta : float The estimate of the false-positive rate. rho : float The estimate of network density, i.e. the probability of an edge existing between any two nodes chosen at random. """ # Step 0: establish variables n = E.shape[0] #the number of nodes in the network, following Newman's notation #Temporary variables to hold the numerator and denominator sums of the alpha, ## beta. and rho estimates. anum = 0 aden = 0 bnum = 0 bden = 0 rnum = 0 #Step 1: loop through the upper triangle of matrix Q to calculate the sums for j in range(1,n): for i in range(j): anum += E[i,j]*Q[i,j] bnum += E[i,j]*(1-Q[i,j]) rnum += Q[i,j] aden += N[i,j]*Q[i,j] bden += N[i,j]*(1-Q[i,j]) # Step 2: calculate alpha, beta, and rho alpha = anum*1./(aden) beta = bnum*1./(bden) rho = 2./(n*(n-1))*rnum # Step 3: return alpha, beta, and rho return (alpha,beta,rho)
5793c8be4f0d4b1cb345c73e57b60d2f71d46bc1
366,222
def CheckStructuralModelsValid(rootGroup, xyzGridSize=None, verbose=False): """ **CheckStricturalModelsValid** - Checks for valid structural model group data given a netCDF root node Parameters ---------- rootGroup: netCDF4.Group The root group node of a Loop Project File xyzGridSize: [int,int,int] or None The 3D grid shape to test data in this node to adhere to verbose: bool A flag to indicate a higher level of console logging (more if True) Returns ------- bool True if valid structural model data in project file, False otherwise. """ valid = True if "StructuralModels" in rootGroup.groups: if verbose: print(" Structural Models Group Present") smGroup = rootGroup.groups.get("StructuralModels") # if verbose: print(smGroup) if "easting" in smGroup.ncattrs() and "northing" in smGroup.ncattrs() and "depth" in smGroup.ncattrs(): if xyzGridSize != None: # Check gridSize from extents matches models sizes smGridSize = [smGroup.dimensions["easting"].size,smGroup.dimensions["northing"].size,smGroup.dimensions["depth"].size] if smGridSize != xyzGridSize: print("(INVALID) Extents grid size and Structural Models Grid Size do NOT match") print("(INVALID) Extents Grid Size : ", xyzGridSize) print("(INVALID) Structural Models Grid Size : ", smGridSize) valid = False else: if verbose: print(" Structural Models grid size adheres to extents") else: if verbose: print("No structural models extents in project file") else: if verbose: print("No Structural Models Group Present") return valid
d11ce42b041b8be7516f827883a37b40f6f98477
708,475
def quick_loc_df(loc_list, target_df, attribute=None): """ faster replacement for target_df.loc[loc_list] or target_df.loc[loc_list][attribute] pandas DataFrame.loc[] indexing doesn't scale for large arrays (e.g. > 1,000,000 elements) Parameters ---------- loc_list : list-like (numpy.ndarray, pandas.Int64Index, or pandas.Series) target_df : pandas.DataFrame containing column named attribute attribute : name of column from loc_list to return (or none for all columns) Returns ------- pandas.DataFrame or, if attribbute specified, pandas.Series """ if attribute: target_df = target_df[[attribute]] df = target_df.reindex(loc_list) df.index.name = target_df.index.name if attribute: # return series return df[attribute] else: # return df return df
a4991757e58982d5fa256d6a1ac3a6b96e17ac00
418,494
def cleanBin(s, fixspacing=False): """ Cleans binary data to make it safe to display. If fixspacing is True, tabs, newlines and so forth will be maintained, if not, they will be replaced with a placeholder. """ parts = [] for i in s: o = ord(i) if (o > 31 and o < 127): parts.append(i) elif i in "\n\t" and not fixspacing: parts.append(i) else: parts.append(".") return "".join(parts)
92f53bb1fa966bf18c107f37be5f9158c00f52f5
471,943
import ast def _get_long_name(node): """ Return a name (possibly dotted) corresponding to the give node or None. If the node is a Name node or an Attribute node that is composed only of other Attribute or Name nodes, then return the full dotted name for this node. Otherwise, i.e., if this node contains other expressions. Parameters ---------- node : ASTnode A node of an abstract syntax tree. Returns ------- str or None Name corresponding to the given node. """ if isinstance(node, ast.Name): return node.id elif not isinstance(node, ast.Attribute): return None val = node.value parts = [node.attr] while True: if isinstance(val, ast.Attribute): parts.append(val.attr) val = val.value elif isinstance(val, ast.Name): parts.append(val.id) break else: # it's more than just a simple dotted name return None return '.'.join(parts[::-1])
24e3b6e33e63e7a71a046f33b47243a69982e1f6
238,247
import torch def get_all_pairs_indices(labels): """ Given a tensor of labels, this will return 4 tensors. The first 2 tensors are the indices which form all positive pairs The second 2 tensors are the indices which form all negative pairs """ labels1 = labels.unsqueeze(1) labels2 = labels.unsqueeze(0) matches = (labels1 == labels2).byte() diffs = matches ^ 1 matches -= torch.eye(matches.size(0)).byte().to(labels.device) a1_idx = matches.nonzero()[:, 0].flatten() p_idx = matches.nonzero()[:, 1].flatten() a2_idx = diffs.nonzero()[:, 0].flatten() n_idx = diffs.nonzero()[:, 1].flatten() return a1_idx, p_idx, a2_idx, n_idx
b388c79990267bf52519fd583b822ade988abe97
255,544
def intt(tup): """Returns a tuple components as ints""" return (int(tup[0]),int(tup[1]))
4a596a707d175a7ef1caeca5b1bf8d48ed760b75
285,505
def step_name_str(yaml_stem: str, i: int, step_key: str) -> str: """Returns a string which uniquely and hierarchically identifies a step in a workflow Args: yaml_stem (str): The name of the workflow (filepath stem) i (int): The (zero-based) step number step_key (str): The name of the step (used as a dict key) Returns: str: The parameters (and the word 'step') joined together with double underscores """ # Use double underscore so we can '__'.split() below. # (This should work as long as yaml_stem and step_key do not contain __) return f'{yaml_stem}__step__{i+1}__{step_key}'
2b4a3abd0334f4834c18b9e99bce070983667e0a
203,736
def check_special_non_rotor_cases(mol, top1, top2): """ Check whether one of the tops correspond to a special case which could not be rotated `mol` is the RMG Molecule to diagnose `top1` and `top2` are indices of atoms on each side of the pivots, the first index corresponds to one of the pivots Special cases considered are: - cyano groups: ``R-C#N`` - azide groups: ``N-N#N`` These cases have a 180 degree angle and torsion is meaningless, but they are identified by our methods since they have a single bond Returns `True` if this is indeed a special case which should not be treated as a rotor """ for top in [top1, top2]: # check cyano group if len(top) == 2 and mol.atoms[top[0] - 1].isCarbon() and mol.atoms[top[1] - 1].isNitrogen() \ and mol.atoms[top[1] - 1].atomType.label == 'N3t': return True for tp1, tp2 in [(top1, top2), (top2, top1)]: # check azide group if len(tp1) == 2 and mol.atoms[tp1[0] - 1].atomType.label == 'N5tc' \ and mol.atoms[tp1[1] - 1].atomType.label == 'N3t' and mol.atoms[tp2[0] - 1].atomType.label == 'N1sc': return True return False
8b2dc235f6b75591e532ee579b977b3899decce5
579,809
from datetime import datetime def itl_to_datetime(itltime): """ convert EPS ITL time format to python datetime object Args: itltime (string): EPS ITL time string formt Returns: datetime: python datetime """ return datetime.strptime(itltime, '%d-%b-%Y_%H:%M:%S')
693c9bea0743c62f017e8a086c862b2dfbf34ede
498,553
import re def tokenize(library): """ Function that takes a string of text a tokenizes it. The text is returned tokenized. """ # Make sure all words are written with lower cases library = library.lower() # Remove non-alphabetic characters tokenizer = re.compile(r"\W+") # tokenize tokenized_library = tokenizer.split(library) return(tokenized_library)
b638cda4945aee3ed377614968998314121735cf
31,605
import re def extract_answer_text(options_text: str, answer_tag: str): """Extracts correct answer's text from all options. Args: options_text: all options as text in various format. answer_tag: correct answers tag a, b, c, ... Returns: parsed option text corresponding to the correct answer. """ if options_text.startswith('[') and options_text.endswith(']'): options = eval(options_text) # pylint: disable = eval-used options = [re.sub('[abcde] \\)', '', x).strip() for x in options] else: options = re.split('[abcde] \\)', options_text) if options[0]: raise ValueError(f'Expects first segment to be empty in {options}.') options = [x.strip().rstrip(',').strip() for x in options[1:]] correct_id = ord(answer_tag) - ord('a') if correct_id >= len(options): raise ValueError(f'Ill parsed dictionary {options} from {options_text}.') return options[correct_id]
63c2027087d405c99831b2e6ea922d1989f51c20
41,415
def find_argument_target(xmrs, nodeid, rargname): """ Return the target of an argument (rather than just the variable). Args: xmrs: The [Xmrs] object to use. nodeid: The nodeid of the argument. rargname: The role-argument name of the argument. Returns: The object that is the target of the argument. Possible values include: | Arg value | e.g. | Target | | ------------------ | ----- | ----------------------------- | | intrinsic variable | x4 | nodeid; of the EP with the IV | | hole variable | h0 | nodeid; HCONS's labelset head | | label | h1 | nodeid; label's labelset head | | unbound variable | i3 | the variable itself | | constant | "IBM" | the constant itself | Note: If the argument value is an intrinsic variable whose target is an EP that has a quantifier, the non-quantifier EP's nodeid will be returned. With this nodeid, one can then use find_quantifier() to get its quantifier's nodeid. """ tgt = xmrs.args(nodeid)[rargname] if tgt in xmrs.variables(): try: return xmrs.nodeid(tgt) except KeyError: pass try: tgt = xmrs.hcon(tgt).lo return next(iter(xmrs.labelset_heads(tgt)), None) except KeyError: pass try: return next(iter(xmrs.labelset_heads(tgt))) except (KeyError, StopIteration): pass return tgt
741fd56df510cb16f1e34baa0545a6307554b1cd
482,214
from typing import Mapping def is_dict(x): """Checks if argument is a dictionary.""" return isinstance(x, Mapping)
25a675a868083d787f35e17a60cff02fee72cbc8
95,466
def test_sequence(N): """ Compute the infinite sum of 2^{-n} starting from n = 0, truncating at n = N, returning the value of 2^{-n} and the truncated sum. Parameters ---------- N : int Positive integer, giving the number of terms in the sum Returns ------- limit : float The value of 2^{-N} partial_sum : float The value of the truncated sum Notes ----- The limiting value should be zero, and the value of the sum should converge to 2. """ # Start sum from zero, so give zeroth term limit = 1.0 partial_sum = 1.0 # At each step, increment sum and change summand for n in range(1, N+1): partial_sum += limit limit /= 2.0 return limit, partial_sum
ccd026374e353a55e23cbb9866c5c7a4c31a6f71
607,522
from datetime import datetime def today_yyyymmdd() -> str: """ Get the current date in YYYYMMDD format. Args: None. Returns: (str): Today's date in YYYYMMDD format. """ return datetime.now().strftime("%Y%m%d")
ce3db969773606aa0c2fee1aa99b08826e3db6cb
358,160
from typing import Union from typing import Dict from typing import Any from typing import List from typing import Tuple import math def computeWaveDataMaxTime(data: Union[Dict[str, Any], List[Dict[str, Any]]], dt: float) -> Tuple[float, int]: """ Compute the pulse duration time of all the wave data input. :param data: waveData or waveData list :param dt: sampling time step :return: a tuple of duration time in Nano-second and dt (AWG sampling interval) """ if isinstance(data, dict): maxNs = data["insert_ns"] + data["duration_ns"] maxDt = math.floor(maxNs / dt) else: # Find the max time maxNs = 0 for waveform in data: finalNs = waveform["insert_ns"] + waveform["duration_ns"] if maxNs < finalNs: maxNs = finalNs maxDt = math.floor(maxNs / dt) return maxNs, maxDt
be65c6c6199847a0f2cca9ceedb74152675c506e
443,355
def range_to_level_window(min_value, max_value): """Convert min/max value range to level/window parameters.""" window = max_value - min_value level = min_value + .5 * window return (level, window)
0a388ff48a29f0daff20a7cdfd200f8330a32a15
701,209
def obj_frequencies(df): """Total number of occurrences of individual objects.""" return df.OId.value_counts()
96f2805e8f35fd549527751413fb8f84d93fbf6f
463,718
from typing import Union def gb_to_gib(gb: Union[int, float]) -> float: """ Gigabyte (Gb) to Gibibyte (GiB) :param gb: Gigabytes (Gb) :return: Gibibyte (GiB) """ return gb * 1.07374
9a5060e9e40d9fb462a9f720d377a0fa18e3d7f1
170,090
def no_parenthesis_usage(decorator_function, decorated): """ called with no arg NOR parenthesis: @foo_decorator we have to directly apply the decorator :param decorated: :param decorator_function: :return: """ return decorator_function()(decorated)
b18f5bcf7036584c3f9f64ddec377a37c0ed096b
549,547
def neuter_name(name): """ Makes a string suitable to be file name + id (no equality signs) Args: name: string to process Returns: A string without special characters """ reserved_chars = '\\/:*?"<>|=' for char in reserved_chars: name = name.replace(char, '') return name
cb0351b09656c3d9475408362f9acfd42905ccfc
476,654
def wraps_class(orig, suffix): """ Rename the decorated class to match the ``orig`` with a suffix added. :see: `functools.wraps` """ def decorator(cls): cls.__name__ = orig.__name__ + suffix cls.__qualname__ = orig.__qualname__ + suffix cls.__module__ = orig.__module__ return cls return decorator
3293c90db7cf066b43a841a0139ef146ed326697
588,912
import random def guess_number(start: int, end: int) -> int: """Return a number in specified range.""" result = random.choice(range(start, end + 1)) return result
f1545fe7264c3a14484450884b47b256756957b5
660,779
def get_kafka_topics(consumer): """Return a set with the name of topics found in kafka backend Args: consumer (obj): Kafka consumer object Returns: set(str): A set of strings (topic names) """ return consumer.topics()
9a55a2241c78f46a3577513a92c0b1f0965b669d
385,546
import re def get_edu_text(text_subtree): """return the text of the given EDU subtree, with '_!'-delimiters removed.""" assert text_subtree.label() == 'text', "text_subtree: {}".format(text_subtree) edu_str = ' '.join(word for word in text_subtree.leaves()) return re.sub('_!(.*?)_!', '\g<1>', edu_str)
4add5d6697f5bede2636fb7d2704c5b72827ed97
650,526
def groom_model(model): """Reset the feature indicators.""" model.commits, model.ticket = 0, None return model
257ff18f50e320179018be8b886bc9f17df2c76e
169,963
def parse_grid(grid): """ converts a grid like K F A L G B M H C N I D O J E to ABCDEFGHIJKLMNO """ rows = [row.strip().split(" ") for row in grid] return "".join(rows[row][col] for col in range(2, -1, -1) for row in range(0, 5))
f1cb825e3d20edd2db92fee4104204e9bcb1f54a
11,018
from typing import Dict def _get_credentials(credentials_name: str, credentials: Dict) -> Dict: """Return a set of credentials from the provided credentials dict. Args: credentials_name: Credentials name. credentials: A dictionary with all credentials. Returns: The set of requested credentials. Raises: KeyError: When a data set with the given name has not yet been registered. """ try: return credentials[credentials_name] except KeyError: raise KeyError( "Unable to find credentials '{}': check your data " "catalog and credentials configuration. See " "https://kedro.readthedocs.io/en/latest/kedro.io.DataCatalog.html " "for an example.".format(credentials_name) )
ef5dfe295022a549a040af264993cbf742e69db0
517,237
import re def multi_replace(txt, find, repl, ignore_case=False, whole_word_only=False): """ caastools.utils.multi_replace(text, repl, ignore_case=False, whole_word_only=False) -> str Performs simultaneous multi-replacement of substrings within a string :param txt: string in which replacements are to be performed :param find: sequence of strings for which replacments are to be made :param repl: sequence of strings with with to replace corresponding entry in find, or a function which achieves the same result which achieves the same :param ignore_case: specifies whether to ignore case in search/replacement. Default False :param whole_word_only: specifies whether to replace only on whole word matches. Default False :return: string with replacements made """ repl_str = "{0}{{0}}{0}".format("\\b" if whole_word_only else '') # The problem is that there is the risk of having one replacement be # the substring of another. Deal with this issue by sorting long to short replacements = sorted(find, key=len, reverse=True) replace_re = re.compile("|".join(map(lambda x: repl_str.format(re.escape(x)), replacements)), re.IGNORECASE if ignore_case else 0) if callable(repl): result = replace_re.sub(repl, txt) else: if len(repl) < len(find): raise ValueError("each entry in 'find' must have a corresponding entry in 'repl'") repl_dict = {f: r for f, r in zip(find, repl)} result = replace_re.sub(lambda match: repl_dict[match.group(0)], txt) return result
cdeb5d7668b03d11e0c5a1c1a3759cdcc245bfa1
208,714
def idle_time(boutlist, idle_threshold=15): """Takes list of times of bouts in seconds, returns idle time in seconds, i.e. time spent without bout for longer than idle_threshold in seconds. """ idle_time = 0 for i in range(0, len(boutlist) - 2): inter_bout_time = boutlist[i + 1] - boutlist[i] if inter_bout_time > idle_threshold: idle_time += inter_bout_time return idle_time
8d13872860433752b5171032d60299f380fd2557
355,077
def read_file(filename): """ Read input file and save the locations into a list. :param filename: input file :return: list of locations """ locations = [] with open(filename, 'r', encoding='UTF-8') as file: for line in file: locations.append([[int(location), False, False] for location in line.strip()]) return locations
29134cf716c599fe94dde6b656143c0ab845e29e
292,788
import torch def gce(logits, target, q = 0.8): """ Generalized cross entropy. Reference: https://arxiv.org/abs/1805.07836 """ probs = torch.nn.functional.softmax(logits, dim=1) probs_with_correct_idx = probs.index_select(-1, target).diag() loss = (1. - probs_with_correct_idx**q) / q return loss.mean()
baa9f8c4984328a2c9702fdfa9f338afd57b7ce4
364,964
import re def is_string_valid_tarball_unpack_directory_name(dirname: str, package_name: str, version_number: str) -> bool: """ Check if the folder obtained by unpacking the source tarball is compliant with debian packaging :param dirname: directory to check :param package_name: name of the source package name :param version_number: name of the version :return: True if the directory obtained by unpacking the tarball has a valid name, false otherwise """ m = re.match(r"^(?P<name>[a-z0-9\-]+)-(?P<version>[a-z0-9\.\-]+)$", dirname) if m is None: return False if m.group("name") != package_name: return False if m.group("version") != version_number: return False return True
df86b85bb47edbd0eebf5e96fab2b329983289f1
80,470
def parse_int(string): """ Like int(), but just returns None if it doesn't parse properly """ try: return int(string) except ValueError: pass
3438121f722c3d9541a20735b38c758cfce2a1c6
400,484
def en_wiki_url(name): """ Create an english wikipedia page url from a persons name """ name = name.replace(' ', '_') return 'https://en.wikipedia.org/wiki/'+name
8bcae19352fc0795647fafe6fee8c41050f93f26
328,183
def basicFitness(individual, env): """ The trivial case, where fitness is just the result of passing through the environment. """ return individual.result
7d108bac92ce390699b66e1ead5b08080856c5be
10,344
from typing import Optional from typing import Dict from typing import Any def define_by_run_func(trial) -> Optional[Dict[str, Any]]: """Define-by-run function to create the search space. Ensure no actual computation takes place here. That should go into the trainable passed to ``tune.run`` (in this example, that's ``easy_objective``). For more information, see https://optuna.readthedocs.io/en/stable\ /tutorial/10_key_features/002_configurations.html This function should either return None or a dict with constant values. """ # This param is not used in the objective function. activation = trial.suggest_categorical("activation", ["relu", "tanh"]) trial.suggest_float("width", 0, 20) trial.suggest_float("height", -100, 100) # Define-by-run allows for conditional search spaces. if activation == "relu": trial.suggest_float("mult", 1, 2) # Return all constants in a dictionary. return {"steps": 100}
f8237b135e3a6467bdefd397f1da0ebaec0f4380
29,608
def sum_non_reds(s): """ Sum all numbers except for those contained in a dict where a value is "red". Parameters ---------- s : int, list, or dict An item to extract numbers from. Returns ------- int The total of all valid numbers. """ if isinstance(s, int): return s elif isinstance(s, list): return sum(sum_non_reds(i) for i in s) elif isinstance(s, dict): if "red" in s.values(): return 0 else: return sum(sum_non_reds(i) for i in s.values()) return 0
8d5ffb05b3575d9d3b34d3836b5ef7e728471ddd
652,536
import torch def inverse_transformation(trans_12): """ Function that inverts a 4x4 homogeneous transformation """ if not trans_12.dim() in (2, 3) and trans_12.shape[-2:] == (4, 4): raise ValueError("Input size must be a Nx4x4 or 4x4. Got {}" .format(trans_12.shape)) # unpack input tensor rmat_12 = trans_12[..., :3, 0:3] # Nx3x3 tvec_12 = trans_12[..., :3, 3:4] # Nx3x1 # compute the actual inverse rmat_21 = torch.transpose(rmat_12, -1, -2) tvec_21 = torch.matmul(-rmat_21, tvec_12) # pack to output tensor trans_21 = torch.zeros_like(trans_12) trans_21[..., :3, 0:3] += rmat_21 trans_21[..., :3, -1:] += tvec_21 trans_21[..., -1, -1:] += 1.0 return trans_21
324f8f387197df0bf30963297b28aa7af727e305
417,064
import inspect def getargspec(function): """ Extends inspect.getfullargspec to allow us to decorate functions with a signature. :Parameters: ---------------- function : callable function for which we want to know argspec. :Returns: ------- inspect.FullArgSpec """ if hasattr(function, 'fullargspec'): return function.fullargspec else: return inspect.getfullargspec(function)
9b5633d4fa3c7d9fdc9e41ad7e77f147d3bec9de
213,925
def divisors(n): """ Count the number of divisors of a positive integer n. :param n: an integer input. :return: the number of divisors of an integer.count_sheeps """ return sum(1 for x in range(1, n + 1) if n % x == 0)
b0367d30f21f8184a73170a6e768ae2954e72afd
303,960
import textwrap def wrap_paragraphs(text, width=72): """Wrap all paragraphs in a text string to the specified width. ``width`` may be an int or a ``textwrap.TextWrapper`` instance. The latter allows you to set other options besides the width, and is more efficient when wrapping many texts. """ if isinstance(width, textwrap.TextWrapper): wrapper = width else: wrapper = textwrap.TextWrapper(width=width) result = [] lines = text.splitlines(True) lines_len = len(lines) start = 0 end = None while start < lines_len: # Leave short lines as-is. if len(lines[start]) <= width: result.append(lines[start]) start += 1 continue # Found a long line, peek forward to end of paragraph. end = start + 1 while end < lines_len and not lines[end].isspace(): end += 1 # 'end' is one higher than last long lone. paragraph = ''.join(lines[start:end]) paragraph = wrapper.fill(paragraph) + "\n" result.append(paragraph) start = end end = None return "".join(result)
a1c3fb77530e9247ad2e3568eb0d3ea917520e71
150,833
import re def find_line(line: str, pattern: str, i_case: bool, is_regexp=False) -> bool: """ Check if line contains matches for pattern, with case and regexp flags. Returns True if pattern is found in line. """ found = False if i_case: line = line.lower() pattern = pattern.lower() if is_regexp: matches = re.findall(pattern, line) FIND_COND = len(matches) > 0 else: FIND_COND = line.find(pattern) != -1 if FIND_COND: line = line.strip() found = True return found
7c1e1f98ec618f37479fe36e48304850ca1741c2
581,609
def scenario_base(data, vertex, edge): """Base scenario: change nothing-""" return data, vertex, edge
46ba07863d793294c47f240b0840c4023516ed49
453,961
import torch def l1_loss(pre, gt): """ L1 loss """ return torch.nn.functional.l1_loss(pre, gt)
c552224b3a48f9cde201db9d0b2ee08cd6335861
6,358
def set_tags(template: str, analysisname: str, era: str, sample_group: str) -> str: """ Function used to set the tags in the template. Args: template: The template to be modified. analysisname: The name of the analysis. era: The era of the analysis. sample_group: The sample group of the analysis. Returns: The modified template. """ return ( template.replace("{ANALYSISTAG}", '"Analysis=%s"' % analysisname) .replace("{ERATAG}", '"Era=%s"' % era) .replace("{SAMPLETAG}", '"Samplegroup=%s"' % sample_group) )
2d3e5ee0f321e5c7a5d6e8446936519c65f12bda
483,178
def get_useful_tags(xlsform): """Return the ODK tags from 'survey' to retrieve the value for. This function compares each ODK 'name' to a pre-approved list and keeps that name if there is a match. Args: xlsform (pmix.Xlsform): An xlsform (workbook) object Returns: A list of ODK tags to track in analytics. """ useful_tags = ( "your_name", "name_typed", "level1", "level2", "level3", "level4", "EA", "structure", "household", "level1_unlinked", "level2_unlinked", "level3_unlinked", "level4_unlinked", "EA_unlinked", "facility_type", "deviceid", "start", "end", "HHQ_result", "FRS_result", "SDP_result", ) odk_name = set(str(c) for c in xlsform["survey"].column("name")) keepers = [t for t in useful_tags if t in odk_name] return keepers
b1aea6e27ef07e69d0c58d13a12ae9e6bf565751
433,736
def flatten(iterable, tr_func=None, results=None): """Flatten a list of list with any level. If tr_func is not None, it should be a one argument function that'll be called on each final element. :rtype: list >>> flatten([1, [2, 3]]) [1, 2, 3] """ if results is None: results = [] for val in iterable: if isinstance(val, (list, tuple)): flatten(val, tr_func, results) elif tr_func is None: results.append(val) else: results.append(tr_func(val)) return results
a2c54e77632ecba09cc57fbd5c3b059f28ce8eb3
397,374
def generator_to_list(generator, max_count=None): """ Convert generator to list :param max_count|int: the maximum element count to be joined. """ datas = [] count = 0 for r in generator: count += 1 datas.append(r) if max_count is not None and count >= max_count: break return datas
30c26d4787279a8e3f77c6c20997ba89e26b64dc
285,890
def pagination_page_numbers(current_page): """ Return a list of page numbers which should be shown for page navigation. Example: When there are seven pages in total, the progression of page numbers which should be shown (where the current page is highlighted) are as follows: [1] 2 3 4 7 1 [2] 3 4 7 1 [3] 4 5 7 1 [4] 5 6 7 1 4 [5] 6 7 1 4 5 [6] 7 1 4 5 6 [7] """ num_pages = current_page.paginator.num_pages # always show the first and last page number page_numbers = [1] if num_pages > 1: page_numbers.append(num_pages) # begin is the first of the incrementing sequence of numbers in the middle # which shouldn't be less than 2 if current_page.number == 1: begin = 2 elif current_page.number <= num_pages - 3: begin = current_page.number else: begin = max(num_pages - 3, 2) # end is the last of the incrementing sequence which shouldn't be greater # than num_pages - 1 end = min(begin + 2, num_pages - 1) page_numbers.extend(range(begin, end + 1)) return sorted(page_numbers)
d3867609111d4766af3969355eefc093321e3b94
531,484
from typing import Tuple import math def reduce_to_coprime(number1: int, number2: int) -> Tuple[int, int]: """Divides given numbers by their greatest common divisor, thus making them coprime.""" gcd = math.gcd(number1, number2) return number1 // gcd, number2 // gcd
3ba0296ed9f62fda33a7d8dfd735692fb76e6317
604,959
def to_tensor_item(value): """ Transform from None to -1 or retain the initial value. :param value: a value to be changed to an element/item in a tensor :return: a number representing the value, tensor with value -1 represents the None input """ if value is None: value = -1.0 return value
55eccdd964cb7dda9449f69080f36f1f6f9cddf4
363,727
def format_line_obj(line_obj, ordered_attributes, delimiter): """Formats an object into delimited string If attribute not found, a ':(' will be inserted instead """ return str(delimiter).join((str(line_obj.__dict__.get(attr, ':(')) for attr in ordered_attributes))
be7da1fa6bd080047bdd235d332b72e770dcfa48
13,073
def cities_in_EU(cities_data, countries_data): """Returns a dictionary whose key:value pair is "name of city":"EU membership", e.g., "Graz":"yes" or 'Aalborg':'no'; the size of the dictionary must equal the number of cities represented in cities_data """ # Hint: # Use nested for in loops to generate the dictionary: # # for city in cities_data: # for country in countries_data: city = [] country = [] eu = [] for i in range(len(cities_data)): city.append(cities_data[i]["city"]) country.append(cities_data[i]["country"]) for i in range(len(country)): for k in range(len(countries_data)): if country[i] == countries_data[k]["country"]: eu.append(countries_data[k]["EU"]) answer_dict = {} for i in range(len(city)): answer_dict.update({city[i]: eu[i]}) return answer_dict
57d0b5f58dd2218fb06d358c7262edf7af9724bd
168,702
def filter_out_nonred(list_of_pixels): """ Takes a 1-d list of pixels and filters out the pixels that aren't "red-colored". Returns the list of red pixels. """ return [pixel for pixel in list_of_pixels if pixel[0] > 160./255. and max(pixel[1], pixel[2]) < 60./255.]
8e4bd91fbf1ffbb4896abc2ce6726e52fbcbced3
526,015
def train_test_split(df, frac): """Create a Train/Test split function for a dataframe and returns both the Training and Testing sets. Frac referes to the precent of data you would like to set aside for training""" train_split = df[: (int(len(df) * frac))] test_split = df[(int(len(df) * frac)):] return train_split, test_split
3f92e3c0da5d58238e87eb2d39b33473fdc8682f
208,597
def get_region_for_chip(x, y, level=3): """Get the region word for the given chip co-ordinates. Parameters ---------- x : int x co-ordinate y : int y co-ordinate level : int Level of region to build. 0 is the most coarse and 3 is the finest. When 3 is used the specified region will ONLY select the given chip, for other regions surrounding chips will also be selected. Returns ------- int A 32-bit value representing the co-ordinates of the chunk of SpiNNaker chips that should be selected and the blocks within this chunk that are selected. As long as bits (31:16) are the same these values may be OR-ed together to increase the number of sub-blocks selected. """ shift = 6 - 2*level bit = ((x >> shift) & 3) + 4*((y >> shift) & 3) # bit in bits 15:0 to set mask = 0xffff ^ ((4 << shift) - 1) # in {0xfffc, 0xfff0, 0xffc0, 0xff00} nx = x & mask # The mask guarantees that bits 1:0 will be cleared ny = y & mask # The mask guarantees that bits 1:0 will be cleared # sig bits x | sig bits y | 2-bit level | region select bits region = (nx << 24) | (ny << 16) | (level << 16) | (1 << bit) return region
3b018413e57fc7baa3d36e816b47b545c9d8c1e5
673,321
import math def get_radian_angle_btwn_vecs(vec1_x, vec1_y, vec2_x, vec2_y): """Compute the angle between two vectors (vec1, vec2) in radians. :param vec1_x: the x-coordinate of vec1 :param vec1_y: the y-coordinate of vec1 :param vec2_x: the x-coordinate of vec2 :param vec2_y: the y-coordinate of vec2 :return: the angle between vec1 and vec2 """ test = (vec1_x * vec2_x + vec1_y * vec2_y) / math.sqrt(vec1_x ** 2 + vec1_y ** 2) / math.sqrt( vec2_x ** 2 + vec2_y ** 2) if test > 1: test = 1 if test < -1: test = -1 rad = math.acos(test) return rad
f8365b19a4a38eefd3e750556322a1c6522a9e2a
596,803
def g(x, y): """ A multivariate function for testing on. """ return -x**2 + y
a134054e8731bd81c2f91980e71ede051dfc305f
637,728
def nodesets2affiliations(communities): """ Transform community format to "snapshot_affiliations" Representation expected in input: dictionary, key= community ID, value= node set Representation in output: dictionary, key=node, value=set of snapshot_affiliations ID :param communities: dictionary, key=node set, value= community ID :return: dictionary, key=node, value=list of snapshot_affiliations ID """ node2com = dict() for id, nodes in communities.items(): for n in nodes: node2com.setdefault(n,set()) node2com[n].add(id) return node2com
028ed5573e079f964452249fe4abc17e2654c546
592,094
def correct_thresholds(p): """ Checks that the thresholds are ordered th_lo < th < th_hi """ return ( (p['th_lo'] < p['th']) or p['th'] == -1 ) and \ ( (p['th'] < p['th_hi']) or p['th_hi'] == -1 )
e2e840f14cab1b46bb2ce5b6ad9fb45bfa0510bf
124,904
def most_prolific(dict): """ Takes a dict formatted like "book->published year" {"Please Please Me": 1963, "With the Beatles": 1963, "A Hard Day's Night": 1964, "Beatles for Sale": 1964, "Twist and Shout": 1964, "Help": 1965, "Rubber Soul": 1965, "Revolver": 1966, "Sgt. Pepper's Lonely Hearts Club Band": 1967, "Magical Mystery Tour": 1967, "The Beatles": 1968, "Yellow Submarine": 1969 ,'Abbey Road': 1969, "Let It Be": 1970} and returns the year in which the most albums were released. If you call the function on the Beatles_Discography it should return 1964, which saw more releases than any other year in the discography. If there are multiple years with the same maximum number of releases, the function should return a list of years. """ # Make map: value -> count # For example: 1963 -> 3, 1963 -> 2 . value_counts = {} for key in dict: value = dict[key] current_count = value_counts.get(value, 0) current_count += 1 value_counts[value] = current_count # Make map: count -> list of key # For example: 3 -> {1964}, 2 -> {1963, 1969, 1965, 1967} count_rankings = {} for key in value_counts: count = value_counts[key] ranking_bucket = count_rankings.get(count, []) ranking_bucket.append(key) count_rankings[count] = ranking_bucket max_count = sorted(count_rankings).pop() result_list = count_rankings[max_count] if len(result_list) > 1: return result_list else: return result_list[0]
3cad35f0b06dad0f68a674857be80db30e142e83
77,063
import json def dethemify(topicmsg): """ Inverse of themify() """ json0 = topicmsg.find('{') topic = topicmsg[0:json0].strip() msg = json.loads(topicmsg[json0:]) return topic, msg
527cd8c3bb5a9ae75600b19050ca388b9321c630
678,389
def str2float(str_val, err_val=None): """ Convert a string to a float value, returning an error value if an error occurs. If no error value is provided then an exception is thrown. :param str_val: string variable containing float value. :param err_val: value to be returned if error occurs. If None then exception returned. Default None. :return: float """ str_val = str(str_val).strip() out_flt = 0.0 try: out_flt = float(str_val) except ValueError: if not err_val is None: out_flt = float(err_val) else: raise Exception("Could not convert string to float: \'" + str_val + '\'.') return out_flt
e4eeda0335b47fd1eeda9e3c49349975a8e1744b
658,681
from typing import List def valid_path(n: int, edges: List[List[int]], start: int, end: int) -> bool: """ Finds a valid path in a graph from start to end and returns true if a valid path can be found, else returns false. Will use a BFS as we can be able to traverse the vertices 1 level at a time until we find if there is a valid path from start to end. @param n: Number of vertices in graph @param edges: List of edge pairs between nodes/vertices in a graph @param start: Start Vertex @param end: End Vertex @return: True if there is a valid path from start to end, false otherwise """ if not edges: return True if [start, end] in edges or [end, start] in edges or n == 1: return True graph = [[] for _ in range(n)] queue = [] for node_1, node_2 in edges: graph[node_1].append(node_2) graph[node_2].append(node_1) queue.append(start) visited = set() while queue: current_node = queue.pop(0) for neighbour in graph[current_node]: if neighbour in visited: continue visited.add(neighbour) if neighbour == end: return True for x in graph[neighbour]: queue.append(x) return False
5ad6dd72fc735b34ae442cc2d651b4ef0b129e50
497,328
def object_type(r_name): """ Derives an object type (i.e. ``user``) from a resource name (i.e. ``users``) :param r_name: Resource name, i.e. would be ``users`` for the resource index URL ``https://api.pagerduty.com/users`` :returns: The object type name; usually the ``type`` property of an instance of the given resource. :rtype: str """ if r_name.endswith('ies'): # Because English return r_name[:-3]+'y' else: return r_name.rstrip('s')
b74e373691edf8a8b78c2a3ff5d7b9666504330a
707,183
def parseFraction(f): """Parse a fraction (a string of the form N/D) returning a float. Returns None if f is not in the form N/D, or if D is 0.""" p = f.find("/") if p < 1: return None s1 = f[:p] s2 = f[p+1:] try: v1 = int(s1) v2 = int(s2) except ValueError: return None if v2: return 1.0 * v1 / v2 else: return None
12002963d09e2096453d0b3a316934004ff7074a
290,146
def linear(x, a, b): """Linear function Parameters ---------- x, a, b: float linear function f = ax + b Returns ------- f: float linear function f = ax + b """ return a * x + b
4c4db0e7020db2ef24b70133be99a0b444d7d096
252,218
def body_method(name): """ Wrapper to expose a method on a Block object's compressed_body """ def wrapper(self, *args, **kwargs): if not hasattr(self, '_compressed_body'): self._compress_body() wrapped_method = getattr(self._compressed_body, name) return wrapped_method(*args, **kwargs) return wrapper
c0c833635b85b2f34cc735bc4a46831b48b73fb9
387,508
def check_distance_to_permuted_principal_subrows_lb(r_distribution, s_distribution, d): """ For vectors r of size m and s, a row of some n×n distance matrix D, check if l∞-distance from r to the set P, comprised of permutations of those subvectors of s that are a row of some m×m principal submatrix of D, is ≥ d. Parameters ---------- r_distribution: np.array (max_distance) Frequency distribution of r, positive integer vector. s_distribution: np.array (max_distance) Frequency distribution of s, positive integer vector no smaller in length than r; d: int Lower bound candidate for l∞-distance from r to P; d > 0. Returns -------- d_is_distance_to_permuted_principal_subrows_lb: bool Whether l∞-distance from r to P is ≥ d. """ def next_i_and_j(min_i, min_j): # Find reversed r distribution index of smallest r entries yet # to be assigned. Then find index in reversed s distribution of # smallest s entries to which the r entries can be assigned to. try: i = next(i for i in range(min_i, len(reversed_r_distribution)) if reversed_r_distribution[i] > 0) except StopIteration: # All r entries are assigned. i = None j = min_j else: j = next_j(i, max(i - (d - 1), min_j)) return i, j def next_j(i, min_j): # Find reversed s distribution index of smallest s entries to # which r entries, corresponding to a given reversed r # distribution index, can be assigned to. try: j = next(j for j in range(min_j, min(i + (d - 1), len(reversed_s_distribution) - 1) + 1) if reversed_s_distribution[j] > 0) except StopIteration: # No s entries left to assign the particular r entries to. j = None return j # Copy to allow modifications and stay pure; reverse for the # frequencies of smaller entries to come first, to be compatible # even with distributions of different lengths. reversed_r_distribution = list(r_distribution[::-1]) reversed_s_distribution = list(s_distribution[::-1]) # Injectively assign r entries to s entries if their difference # is < d, going from smallest to largest entries in both r and s, # until all r entries are assigned or such assignment proves # unfeasible. i, j = next_i_and_j(0, 0) while i is not None and j is not None: if reversed_r_distribution[i] <= reversed_s_distribution[j]: reversed_s_distribution[j] -= reversed_r_distribution[i] reversed_r_distribution[i] = 0 i, j = next_i_and_j(i, j) else: reversed_r_distribution[i] -= reversed_s_distribution[j] reversed_s_distribution[j] = 0 j = next_j(i, j) # The assignment is feasible if and only if |r - p| < d for some # p ∈ P, and therefore infeasible if and only if l∞-distance from # r to P is ≥ d. d_is_distance_to_permuted_principal_subrows_lb = (j is None) return d_is_distance_to_permuted_principal_subrows_lb
2ebc23c06cd630fa4baae4763719a9b5fa07fe73
332,904
import unicodedata def remove_accented_chars(text): """ Removes accented characters from the test """ new_text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore') return new_text
b3a4e5b2bed54b17ed1d406f37009cacfe029217
76,277
import json import requests def create_destination_connection_profile(project, location, destination_config, token): """ This function will create the destination connection profile in Google Cloud DataStream :param project: Google Cloud project id mentioned in variables.py :param location: Google Cloud resource location, for example us-central1 :param destination_config: destination config from variables.py :param token: Google Cloud auth token :return: True or False """ d_profile_name = destination_config["destination_profile_name"] d_profile_id = destination_config["destination_profile_id"] bucket_name = destination_config["storage_bucket_name"] bucket_prefix = destination_config["storage_bucket_prefix"] url = f"https://datastream.clients6.google.com/v1alpha1/" \ f"projects/{project}/locations/{location}" \ f"/connectionProfiles?connectionProfileId={d_profile_id}" payload = json.dumps({ "displayName": d_profile_name, "gcsProfile": { "bucketName": bucket_name, "rootPath": bucket_prefix } }) headers = { 'Authorization': token, 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) if response.status_code == 200: print(f"Destination connection profile {d_profile_id} created successfully") destination_connection_profile_stat = True elif response.status_code == 409: print(f"Destination connection profile {d_profile_id} already exist") destination_connection_profile_stat = True else: print(f"Issue while creating destination connection profile: {response.text}") destination_connection_profile_stat = False return destination_connection_profile_stat
8c66947e0e2164fc73be11525ac66d929292e6dd
409,254
import torch def count_params(model: torch.nn.Module) -> int: """ Count the number of parameters in a model. Args: model (torch.nn.Module): PyTorch model. Returns: int: Number of learnable parameters. """ return sum(p.numel() for p in model.parameters() if p.requires_grad)
1479868bf255ff89a4e9e43ba2783ae53f845d7b
183,401
def highlight(msg): """ Highlight in yellow provided message :param msg: Message to highlight :return: Highlighted message """ return "\033[1;33m{}\033[0m".format(msg)
415fa5ca6ed91b15fba847bf771d6b15c529b51b
551,158
def ROS_ANY_TO_BB_STRING(anyTypeValue): """ Converts a ROS value to a BB string type. Receives: anyTypeValue: the ROS value to convert Returns: the converted BB String value """ return str(anyTypeValue)
e58d9f0a9523fb0d5fc52ca6376df612f52ea4f4
563,765
def rosen_suzuki(x): """ Evaluate polynomial from CONMIN manual. """ return x[0]**2 - 5.*x[0] + x[1]**2 - 5.*x[1] + \ 2.*x[2]**2 - 21.*x[2] + x[3]**2 + 7.*x[3] + 50
873ffb4ed0945ea1101f738349e93276741205bb
132,865
from typing import Iterator from typing import List import unicodedata import re def preprocess(strings: Iterator[str]) -> List[str]: """Preprocess a collection of strings. The following preprocessing steps are taken: Convert to lowercase, normalize unicode, and remove special characters (non-alphanumeric characters, except for '.', '!', and '?'). :param strings: Iterator[str] A sequence of un-processed strings. :return: List[str] A list of preprocessed strings. """ def preprocess_string(s: str) -> str: s = s.lower().strip() s = ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') s = re.sub(r'[^a-zA-Z.!?]+', r' ', s) return s return [preprocess_string(s) for s in strings]
6c2f7388ff078f1c2eef6a549d5270c3c03bdecf
323,511
def get_coordinates_from_token(token, mod): """ A function which takes a mod and a token, finds the token in the mod, and then returns the coordinate (tuple) at which the token is found. If it is not found, it returns None. """ for coordinates, token_in_mod in mod.items(): # No possibility of duplicate tokens. If found, return the token. if token_in_mod == token: return coordinates return None
429e42a6439972d23bc54ac5108f196aa0ca93b7
45,780
import warnings def resample_nanmean(data, dim='time', resample='M', nmin=15, **kwargs): """ Resample Dataset and apply a minimum for resampling mean Args: data (DataArray, Dataset): Input fields dim (str): datetime dimension resample (str): upsampling frequency nmin (int): minimum of samples per frequency **kwargs: Returns: DataArray, Dataset : means on freq """ with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) dd = data.resample(**{dim: resample}).count(dim) return data.resample(**{dim: resample}).mean(dim).where(dd > nmin)
c6ace88d87664af4d5b313c866a4b25334e489fc
593,071
def bin_widths(edges): """Return the widths of a set of bins given the edges""" return edges[1:] - edges[:-1]
7563fe31731a54d7fd6e85f63ce70797f3781a24
250,011