content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import torch def unscaled_sign(x): """ This is the standard sign compression. It has been experimented to give worse test accuracies than the scaled counter part. :param x: torch Tensor :return: sign(tensor) """ return torch.sign(x)
738dafd410314bc6b07d20ad8ce931c59b0309fc
333,043
def _prepare_body_remove_subports(subports): """Prepare body for PUT /v2.0/trunks/TRUNK_ID/remove_subports.""" return {'sub_ports': [{'port_id': sp['port_id']} for sp in subports]}
7427a77553efef92d82218f87060a1a4d5c3375c
103,098
def default_list_format(index: int, in_str: object) -> str: """ Makes the first character in the given string uppercase Also shows the position of the item in the list (1-based) :param index: The index of the item (0-based) :type index: int :param in_str: The string to format :type in_str: object :returns: The given string numbered and with the first character in uppercase :rtype: str """ raw_str = str(in_str) return (str(index + 1) + '. ') + (raw_str[0].upper() + raw_str[1:])
35c63b62c3a73f280466c17649c364913246c46e
530,345
def default_window(win_id): """return default config for one window""" win = {} win['win_id'] = win_id win['mfact'] = 0.55 win['nmaster'] = 1 win['layout'] = 'horizontal' win['zoomed'] = False win['num_panes'] = 1 win['last_master_pane_id'] = None win['last_non_master_pane_id'] = None return win
37c3a4753a8d2590676937e9355fb405ddc70dd9
659,903
async def fetch(session, url): """Fetch the url.""" async with session.get(url) as response: return await response.text(encoding='utf-8')
9c3d901ccd59808461534139502d2498451bf302
473,349
import importlib.util def is_core_installed(package_name: str): """ Check to see if the Pyntel4004 core is installed Parameters ---------- package_name: str, mandatory Name of the Pyntel4004 core package Returns ------- True - if the core package is installed False - if not Raises ------ N/A Notes ----- N/A """ spec = importlib.util.find_spec(package_name) if spec is None: return False else: return True
5e5241e545a91fda026c608ade5ef0baa837e2b9
433,687
def score_tup(t): """ Score an ngram tuple returned from a database ngram table. A higher scoring term is more deserving of inclusion in the resulting acrostic :param t: (Term string, initials string, Corpus count, Used count) :return: Fitness score for this term """ term = t[0] inits = t[1] pop = t[2] used = t[3] raw = (len(term)*pop/(10*used*used*used))**(len(inits)) if "#" in term: score = 2*raw else: score = raw return max(score, 1)
eea34dbf2d6a7dec37dc95cde289560a77c72f7e
14,163
import yaml def load_yaml(filename): """Load a yaml file. Args: filename (str): Filename. Returns: dict: Dictionary. """ try: with open(filename, "r") as f: config = yaml.load(f, yaml.SafeLoader) return config except FileNotFoundError: # for file not found raise except Exception as e: # for other exceptions raise IOError("load {0} error!".format(filename))
702ff5ca3321c7ec2ffc3d09363b6c4e22092837
43,353
def add_mention(user_mention: str, response: str) -> str: """Return response mentioning the person who mentioned you. Args: user_mention (str): Slack mention. response (str): response message. Returns: str: response message with added mention. """ response_template = "{mention} " + response return response_template.format(mention=user_mention)
9696043aa14e04a0de4d87724d2bd5a750ea3ccb
521,593
import math def length(v): """Length of vector""" return math.sqrt(v.dot(v))
f52f755772f8b5c106d49a9bee334eabe91a19c4
398,570
from typing import Iterable def concatenate_lists(*list_of_lists: Iterable) -> list: """Combines the Iterables provided as arguments into one list. Examples -------- Normal usage:: concatenate_lists([1, 2], [3, 4], [5]) # returns [1, 2, 3, 4, 5] """ return [item for lst in list_of_lists for item in lst]
e630fd31888753814e3f486c02b0fc1e67f269ef
15,041
def is_iterable(obj): """ Returns True if an object is iterable and False if it is not. This function makes the assumtion that any iterable object can be cast as an iterator using the build-in function `iter`. This might not be the case, but works within the context of PySCeSToolbox. Parameters ---------- obj : object Any object that might or might not be iterable. Returns ------- bool A boolean indicating if `ob` is iterable. """ try: iter(obj) return True except TypeError: return False
ee3fc37285761f174d0bf56d0cd9a755f8ee90b4
189,978
def _skip_object(obj_type, options): """Check to see if we skip this object type obj_type[in] Type of object for the --skip_* option (e.g. "tables", "data", "views", etc.) options[in] Option dictionary containing the --skip_* options Returns (bool) True - skip the object, False - do not skip """ obj = obj_type.upper() if obj == "TABLE": return options.get("skip_tables", False) elif obj == "VIEW": return options.get("skip_views", False) elif obj == "TRIGGER": return options.get("skip_triggers", False) elif obj == "PROCEDURE": return options.get("skip_procs", False) elif obj == "FUNCTION": return options.get("skip_funcs", False) elif obj == "EVENT": return options.get("skip_events", False) elif obj == "GRANT": return options.get("skip_grants", False) elif obj == "CREATE_DB": return options.get("skip_create", False) elif obj == "DATA": return options.get("skip_data", False) elif obj == "BLOB": return options.get("skip_blobs", False) else: return False
83f949ef8d8fc5b8bdf61f003f0fcec12ba4e42a
277,809
import math def calculateGridPositionFromRangeBearing(easting, northing, distance, bearing): """given an east, north, range and bearing, compute a new coordinate on the grid""" point = (easting, northing) angle = 90 - bearing bearing = math.radians(bearing) angle = math.radians(angle) # polar coordinates dist_x = distance * math.cos(angle) dist_y = distance * math.sin(angle) xfinal = point[0] + dist_x yfinal = point[1] + dist_y # direction cosines cosa = math.cos(angle) cosb = math.cos(bearing) xfinal = point[0] + (distance * cosa) yfinal = point[1] + (distance * cosb) return [xfinal, yfinal]
080289d6d2e8c7cf8deccf6759c3584f82fa59da
443,873
def is_odd(val): """ Confirms if a value if odd. :param val: Value to be tested. :type val: int, float :return: True if the number is odd, otherwise false. :rtype: bool Examples: -------------------------- .. code-block:: python >>> even_numbers = list(filter(is_odd, range(20))) >>> print(even_numbers) [1, 3, 5, 7, 9, 11, 13, 15, 17, 19] >>> print(is_odd(10)) False >>> print(is_odd(-3)) True >>> print([value for value in range(20) if is_odd(value)]) [1, 3, 5, 7, 9, 11, 13, 15, 17, 19] >>> print([is_odd(value) for value in range(4)]) [False, True, False, True] """ return (val % 2) != 0
b5379333fcf80460bb3d1317d325c3eacc2c10c2
563,574
def Split_Info(info): """Splits necessary information out from the info vcf column Parameters ---------- info : Series Info column from a vcf as a series Returns ------- dict A dict of necessary fields and their values from the vcf """ fields = ['QD=', 'MQ=', 'MQRankSum=', 'FS=', 'ReadPosRankSum=', 'SOR='] # split out all necessary fields from info columns parts = dict(part.split('=') for part in info.split(';') if any(field in part for field in fields)) return parts
e58d2dad51d34a7644a7d5bf307449194aec9ca3
695,362
async def demo_handler_with_validation(request, *args): """ Test of docstring2 And here is the description2 """ return ""
f47bd125a38d76cdc2e340999386fcaea2933536
351,390
def is_same_website(base, url): """ Checks if the URL is on the same website """ if url.startswith('http'): return url.startswith(base) else: return True
d5d23ab5febb7ca6b8517fb1d23f08cb48df5a18
420,734
def readfile(filename: str) -> str: """Read a whole file into a string and return it.""" with open(filename) as file: return file.read()
f187fefd32938ab698f20b4473c61f412aac2d54
582,634
import torch def flipfb(tensor): """ Flips a given tensor along the third dimension (front to back) Parameters ---------- tensor a tensor at least three-dimensional Returns ------- Tensor the flipped tensor """ return torch.flip(tensor, dims=[2])
b29f0838c5ffcd2d3bde985d7c3a4531d2f508f1
589,158
def removeMacChars(macAddress): """ Return MAC Address with symbols (:,-) removed and characters compacted. eg: 11:22:33:AA:BB:CC -> 112233AABBCC """ if '-' in macAddress: return macAddress.replace('-', '') elif ':' in macAddress: return macAddress.replace(':', '') else: return macAddress
8973a5ab0ff746daf8df50cffa878435307a5b35
232,148
import warnings def get_link_to_assembly(species_list): """ Get the path for assembly download from NCBI Select the most up-to-date Complete Genome of the each organisms and returns the FTP path for that one. ----------------------------------------------------------------- Argument: species_list--list of dictionary obtained from get_entries_for_species function Return: link_assembly: str, FTP path to the genome assembly page of the selected model: e.g ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/010/448/615/GCA_010448615.1_ASM1044861v1 """ genomes = [] link_assembly = '' for i in species_list: if i['status'] == 'Complete Genome' and i['assembly'] not in ('', '-'): genomes.append(i) date_list = [] if len(genomes) != 0: for i in genomes: if len(genomes) == 1: link_assembly += i['assembly'] elif len(genomes) >1: date_list.append(i['date']) else: # consider eliminating the options below: if there is no # genomic information, there isn't any cds_from_genomic file warnings.warn('There is no complete genome for this species') if len(date_list) != 0: latest = max(date_list) for i in species_list: if i['date'] == latest: link_assembly += i['assembly'] break # to pick the first match in case more than one entry have the same date return link_assembly
50c086d714786c67335715f4005ecd3eb1338e55
16,640
def ansible_stdout_to_str(ansible_stdout): """ The stdout of Ansible host is essentially a list of unicode characters. This function converts it to a string. Args: ansible_stdout: stdout of Ansible Returns: Return a string """ result = "" for x in ansible_stdout: result += x.encode('UTF8') return result
d74e95981d2f3ba6c896103b0bffb8372ada5f08
340,482
def determine_axes(f, *vars): """ Determine the axes along which the FT should be performed. """ if len(vars) != len(f.shape): raise TypeError('The number of variables has to match the dimension of ' '`f`. Use `None` for axis with respect to which no ' 'transform should be performed.') return [i for i, var in enumerate(vars) if var is not None]
324605cebf0550824eacb048e6ea6e94d5c293a6
58,033
def jd_to_sec(jd): """jd to seconds since J2000""" return (jd - 2451545.0) * 86400.0
d827597aae833f4648fb4c12d571f1fb2dd9bae5
138,914
def to12hrs(militarytime, pad=False): """Convert simple military time string to AM/PM format Example: input: 2130 output: 9:30 pm """ hours = int(militarytime[0:2]) minutes = int(militarytime[2:]) meridiem = '' formater = '%d:%02d %s' if hours > 12: hours = hours - 12 meridiem = 'pm' elif hours < 12: meridiem = 'am' if pad: formater = '%02d:%02d %s' return formater % (hours, minutes, meridiem)
80993faf85adde8616324427be93db210054b31a
575,580
def area_squa(l): """Calculates the area of a square with given side length l. :Input: Side length of the square l (float, >=0) :Returns: Area of the square A (float).""" if l < 0: raise ValueError("The side length must be >= 0.") A = l**2 return A
11f0c93ca7f276c1ad2fb06c7959f802d0788282
80,523
def hello_user(name=""): """Returns a greeting message with the user name. This is an example of a parameterized function with a default value. If no name is provided the function returns **"Hello Everyone!"** by default. Args: name (str): The user name typed in the Console. Returns: str: The greeting message. """ return f"Hello { name or 'Everyone' }!"
11a06ea5aad7d5649e862deac6ee5ef5a1f2cb31
514,239
def pattern_const32(context, tree): """ Small 64 bit constants as a constant """ return tree.value
dfe05b0beec449842841ca799c545a5121f90c6e
214,236
from typing import Dict from typing import Any def _format_order_item(item: Dict[str, Any]) -> str: """Return ORDER BY item with sort direction.""" if "sort" in item: return f"{item['value']} {item['sort'].lower()}" return f"{item['value']}"
33dec9d1e783ffaed53d868b29a000148e9a3b16
387,037
def polyeval(a, x): """ p(x) = polyeval(a, x) = a[0] + a[1]x + a[2]x^2 +...+ a[n-1]x^{n-1} + a[n]x^n = a[0] + x(a[1] + x(a[2] +...+ x(a[n-1] + a[n]x)...) """ p = 0 a.reverse() for coef in a: p = p*x + coef a.reverse() return p
2e6f476db0dd02d30d2275bb5d4156ac877e95f4
417,704
import six def clear_dict_empty_lists(to_clear_dict): """ Removes entries from a nested dictionary which are empty lists. param to_clear_dict dict: python dictionary which should be 'compressed' return new_dict dict: compressed python dict version of to_clear_dict Hints: recursive """ new_dict = {} if not to_clear_dict: return new_dict if not isinstance(to_clear_dict, dict): return to_clear_dict for key, value in six.iteritems(to_clear_dict): if value: new_value = clear_dict_empty_lists(value) if new_value: new_dict[key] = new_value return new_dict
4f2e7461fab27487dcd493e4af886d4ca7332b26
641,014
def units(self, label="", lenfact="", massfact="", timefact="", tempfact="", toffset="", chargefact="", forcefact="", heatfact="", **kwargs): """Annotates the database with the system of units used. APDL Command: /UNITS Parameters ---------- label Label to denote the system of units used in this job: USER - User-defined system (default). SI - International system (m, kg, s, K). MKS - MKS system (m, kg, s, °C). uMKS - μMKS system (μm, kg, s, °C). CGS - CGS system (cm, g, s, °C). MPA - MPA system (mm, Mg, s, °C). BFT - U. S. Customary system using feet (ft, slug, s, °F). BIN - U. S. Customary system using inches (in, lbf*s2/in, s, °F). Notes ----- Allows the user to set a marker in the database indicating the system of units used. The setting may be reviewed with the /STATUS command at the Begin level. The units label and conversion factors on this command are for user convenience only and have no effect on the analysis or data. That is, /UNITS will not convert database items from one system to another (e.g., from U. S. Customary to SI, etc.). The units setting will be written to the file of IGES data [IGESOUT or CDWRITE], which can then be read by many programs that read IGES files. The user must still use consistent units for the results to be valid. If you choose the MKS system of units, the EPZRO option for the EMUNIT command is set to 8.85 e-12 F/m. (EPZRO specifies alternate free-space permittivity.) For micro-electromechanical systems (MEMS), where dimensions are on the order of microns, see the conversion factors in System of Units in the Coupled-Field Analysis Guide. If you use the ANSYS ADAMS Interface to export model information to the ADAMS program, the /UNITS command is required to ensure the correct transfer of data between ANSYS and ADAMS. You may choose a predefined unit system label (Label = SI, CGS, etc.) or you can select the user- defined system option (Label = USER) and input the appropriate conversion factors (LENFACT, MASSFACT, TIMEFACT, and FORCEFACT). The conversion factors will be written to the ADAMS input file Jobname.MNF in order to correctly generate the load. For more information, see Export to ADAMS in the Substructuring Analysis Guide. All differences between the base solution units used by the ANSYS and CFX solvers will be noted in the ANSYS output file. Unit conversions are automatically applied to all loads transferred unless Label = USER. Unit conversions are not applied to any of the loads transferred between the ANSYS and CFX solvers if they use a user-defined unit system. This command is valid in any processor. """ command = "/UNITS,%s,%s,%s,%s,%s,%s,%s,%s,%s" % (str(label), str(lenfact), str(massfact), str( timefact), str(tempfact), str(toffset), str(chargefact), str(forcefact), str(heatfact)) return self.run(command, **kwargs)
a7f98325ab1d192dc4903e417a3813f17d515824
446,530
def _should_ignore(class_reference, property_name): """Check if a property should be ignored.""" if not hasattr(class_reference, "__deserialize_ignore_map__"): return False return class_reference.__deserialize_ignore_map__.get(property_name, False)
91868a403427779da1f803ae08e273f04e30f8e4
638,875
import csv def read_project_list(filename): """ Read the list into a dict. First row is header. """ dset_reader = csv.DictReader(open(filename)) project_list = [] for line in dset_reader: project_list.append(line) return project_list
95c381022e63ae15e38d9e8106e006552539ec25
456,869
def _get_num_reads(ngs_alignment): """ Returns the number of reads in the template of an NCBI/NGS alignment Args: ngs_alignment (ngs.Alignment): aligned read Returns: int: the number of reads in the template """ if ngs_alignment.hasMate(): return(2) else: return(1)
93a3e9076a758c0e0943b8d6b9426e9de463b52f
636,015
def GetExtent(ds): """ Return list of corner coordinates from a gdal Dataset """ xmin, xpixel, _, ymax, _, ypixel = ds.GetGeoTransform() width, height = ds.RasterXSize, ds.RasterYSize xmax = xmin + width * xpixel ymin = ymax + height * ypixel return (xmin, ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin)
5dfcb2074b2fd5efb4019cf403bd026296079efc
673,564
import math def interval95(acc:float, n_data:int) -> float: """ Calculates the confidence interval 95% on a given number of samples. Args: acc (float): The accuracy. n_data (int): The number of samples. Returns: float: The confidence interval. """ bound = 1.96 * math.sqrt((acc*(1-acc)) / n_data) return bound
bf1c3dcd1ab40a5597c78c824b2a621593d5f7ae
302,471
def is_int_like(val): """Check if a value looks like an int.""" try: return str(int(val)) == str(val) except Exception: return False
d06f096b3324fdf2189c5ec3b8027e4ec422520d
571,232
def _format_error(error: list) -> dict: """ Convert the error type list to a dict. Args: error (list): a two element list with the error type and description Returns: dict: explicit names for the list elements """ return {'error_type': error[0], 'description': error[1]}
842dd6b456ac25e90c79f720ea59794188001234
681,095
def invcdf_uniform(val: float, lb: float, ub: float) -> float: """Returns the inverse CDF lookup of a uniform distribution. Is constant time to call. Args: val: Value between 0 and 1 to calculate the inverse cdf of. lb: lower bound of the uniform distribution ub: upper bound of the uniform distribution Returns: Inverse CDF of a uniform distribution for the provided value. Examples: >>> invcdf_uniform(1, 5, 10) 10 >>> invcdf_uniform(0, 5, 10) 5 >>> invcdf_uniform(0.5, 5, 10) 7.5 """ if val < 0: return -float("inf") elif val > 1: return float("inf") else: return val * (ub - lb) + lb
a113171c9d4ced58d06e41c1289e5e30200b8d09
641,209
def _parse_vw_output(text): """Get dict of useful fields from Vowpal Wabbit's output. Currently returns field 'average_loss', which is a lower bound on mean per-word log-perplexity (i.e. same as the value LdaModel.bound() returns). """ data = {} for line in text.splitlines(): if line.startswith('average loss'): data['average_loss'] = float(line.split('=')[1]) break return data
9b1a625884636190cb3186cbbbea722ff3882715
489,570
def sanitize(value): """Makes sure filenames are valid by replacing illegal characters :param value: string """ return value.replace('/', '_')
94f72e2bf1d1cf08dde9ae6c192ed1b68a875900
52,478
def normalize_Import(node): """ Return a list of strings of Python 'import' statements, one import on each line. """ statements = [] children = node.getChildren() for name, asname in children[0]: line = 'import %s' % (name) if asname is not None: line += ' as %s' % asname line += '\n' statements.append(line) return statements
f2757deca48f2aeda90da2b667984190d9e3416d
144,579
def normalize_cpu_arch(arch_specifier: str) -> str: """Normalize the string used for the CPU kernel architecture. Different systems will report the CPU architecture differently and many software downloads will expect one or the other formats. This function allows us to have a single location for being able to map back and forth. :param arch_specifier: The CPU architecture string returned from commands such as `uname -p` :type arch_specifier: str :returns: The common specifier used for the given architecture. :rtype: str """ return {"amd64": "amd64", "x86_64": "amd64", "i386": "386", "i686": "386"}[ arch_specifier ]
9d71af9efd9d0925954e0df2af8ed1760d319dd1
407,778
def fit_model(model, X, y): """Fit model Args: model (sklearn obj): X (pd dataframe): X, shape (n_obs, n_predictors) y (series): y, shape (n_obs, 1) Returns: fitted sklearn model """ return model.fit(X, y)
3f662f0f6c1898a11a6451312c34d49f49b8488b
548,133
def get_clean_text(html): """ Removes extra blank spaces and nbsp from html text. """ return " ".join(html.text_content().split())
df0f789ddbbaee51ea807465bf7e286b2a2a4096
76,375
import math def to_clock(time_secs): """ Converts match time in seconds to a clock time in the format: m:ss. Args: time_secs: Number of seconds (float). Returns: Clock time in format: m:ss (str). """ mins = math.floor(time_secs / 60) secs = str(math.floor(time_secs) % 60) secs_left_pad = "0{}".format(secs) if len(secs) == 1 else secs time_clock = "{}:{}".format(mins, secs_left_pad) return time_clock
803c57913e8301cbad1ee2ba5515ce78ed47c250
562,393
def Find_IPV6_In_Text(text): """Find and return all IPv6 addresses in the given string. :param text: string to search. :type text: str :return: IPv6 addresses found in string. :rtype: list of str """ ipv6 = [] for word in text.split(): if (word.count(':') >= 2) and (word.count('/') == 1): ipv6.append(word) return ipv6
73c1b5a151b809aa9cb3f79d67d933db87a7cdfa
486,591
def _encode_features(features: dict) -> str: """Encodes features from a dictionary/JSON to CONLLU format.""" return '|'.join(map(lambda kv: f'{kv[0]}={kv[1]}', features.items()))
a9cea4cdf7a0b46c66d981c3389cc6c8821bf399
498,950
def is_weekend(time): """ datetime -> bool 입력한 날짜가 토요일 또는 일요일인지를 반환 """ return time.dayofweek in [5, 6]
7553ea63622668a7d356c0a55334f4e897440f82
532,672
def _totuple(size, dim): """ Converts size to tuple. """ if not isinstance(size, tuple): if dim == 2: size = (size, size) elif dim == 3: size = (size, size, size) return size
2f6c514838bed8508d9cac951796c5e00bf65ef3
570,356
import math def exponential_growth(p, r, t): """ Models exponential growth over discrete generations. :param p: initial population :param r: growth rate :param t: number of generations :type p: numeric :type t: numeric :type r: numeric :returns: population size at time t :rtype: numeric """ return p * math.exp(r * t)
faff6f818ff60e7f1567320c2da8d1633e5a315e
191,859
def clean_sentence(sentence: str): """ Clean OpenIE sentences by replacing the bracket shortcuts back to valid brackets :param sentence: a sentence :return: cleaned sentence """ s = sentence.replace('-LRB- ', '(') s = s.replace('-LSB- ', '(') s = s.replace(' -RRB-', ')') s = s.replace(' -RSB-', ')') return s
e6800c757d6a485d1b386ff1ecc055a1e191614d
56,578
import curses def create_hline_window(y_start, x_start, length): """Create a window with a single hline and return prepared window.""" line = curses.newwin( 1, length+1, y_start, x_start, ) line.hline(0,0,"#", length) line.noutrefresh() return line
809eb5fd0182c2678b427133a5363867d00bcdf7
295,045
def __getitem__(self, key): """Gets the item of the dictionary according to the specified key. Parameters ---------- self : ee.Dictionary Dictionary to get the items from. key : str Key used to get the specified item. It gets the value of the specified key. Returns ------- ee.Element Selected value. Examples -------- >>> import ee, eemont >>> ee.Authenticate() >>> ee.Initialize() >>> eeDict = ee.Dictionary({"a":1,"b":2,"h":4}) >>> eeDict["a"].getInfo() 1 >>> eeDict["h"].getInfo() 4 """ return self.get(key)
181e6f59066d1fbe2b8ddcdf7bfd63c528d1459d
384,009
from typing import cast import toml def open_toml(fname: str) -> dict: """Toml opening function""" with open(fname) as f: return cast(dict, toml.load(f))
8e4acd43871402c69794a1bba7b560ed9ce0b211
236,471
import copy def correct_Q(action, state, reward, old_Q, next_Q): """ Produce a corrected Q(s,a) estimate according to: Q(s,a) = R + gamma*Q(s+1,a+1) """ gamma = 0.5 # weights importance of future reward new_Q = copy.copy(old_Q) new_Q[action] = reward + gamma*next_Q[action] # action indexes are 0,1,2, corresponding to position in Q-function return new_Q
10cf7bedb4d8bdd67b63eb9bc199ca60b9fd119e
63,607
def replicate_z_samples(t, n_z_samples): """Replicates a tensor `n_z_samples` times on a new first dim.""" return t.unsqueeze(0).expand(n_z_samples, *t.shape)
9285dddb7029e05382a46613ce4cf0d3b7244393
686,005
import json def parseZones(s): """ Takes a json string for Zones item type, returns answers. :param s: the json structure with responses :return: answer string """ try: RespDict = json.loads(s) return RespDict['Response'] except: return None
95bcc30aa29bb1f3f4e4b2032121a15a2cee7b38
537,857
def get_derived_unit(registry, key): """Get the unit of a physcial quantity in a provided unit system. Parameters ---------- registry: dict (str: unit) mapping 'length', 'mass', 'time', 'current', 'temperature', 'luminous_intensity', 'amount'. If registry is ``None`` the function returns 1.0 unconditionally. key: str one of the registry keys or one of: 'diffusivity', 'electricalmobility', 'permittivity', 'charge', 'energy', 'concentration', 'density', 'radiolytic_yield'. Examples -------- >>> m, s = default_units.meter, default_units.second >>> get_derived_unit(SI_base_registry, 'diffusivity') == m**2/s True """ if registry is None: return 1.0 derived = { "diffusivity": registry["length"] ** 2 / registry["time"], "electrical_mobility": ( registry["current"] * registry["time"] ** 2 / registry["mass"] ), "permittivity": ( registry["current"] ** 2 * registry["time"] ** 4 / (registry["length"] ** 3 * registry["mass"]) ), "charge": registry["current"] * registry["time"], "energy": registry["mass"] * registry["length"] ** 2 / registry["time"] ** 2, "concentration": registry["amount"] / registry["length"] ** 3, "density": registry["mass"] / registry["length"] ** 3, } derived["diffusion"] = derived["diffusivity"] # 'diffusion' is deprecated derived["radiolytic_yield"] = registry["amount"] / derived["energy"] derived["doserate"] = derived["energy"] / registry["mass"] / registry["time"] derived["linear_energy_transfer"] = derived["energy"] / registry["length"] try: return derived[key] except KeyError: return registry[key]
14e6db8b4fb304a5c270450fceb01f56edbe68e2
280,894
from typing import List def is_hat_shaped(learning_rates: List[float]): """ Check if the list of learning rates is "hat" shaped, i.e., increases then decreases """ # sufficient conditions: # has both an increasing and decreasing segment # decrease segment occurs after increasing segment # once start decreasing, can't increase again has_increasing_segment = False has_decreasing_segment = False for k in range(1, len(learning_rates)): delta = learning_rates[k] - learning_rates[k - 1] if delta > 1e-8: has_increasing_segment = True if has_decreasing_segment: # can't increase again after hitting the max return False elif delta < -1e-8: if not has_increasing_segment: # can't decrease without have an increasing segment return False has_decreasing_segment = True else: # no change pass return has_increasing_segment and has_decreasing_segment
5da66f5a367ccc75c82972d89dc5a92e875c409d
649,511
def ispandigital0(m, n): """return (True, s) if m is pandigital in base n and (False, False) otherwise where s is true iff m has a zero digit""" s = set() z = False while m > 0: m, b = divmod(m, n) if b in s: return False, False if b == 0: z = True s.add(b) return True, z
6df42bd55a930cf36f11088ea26b1c78c952d69c
366,465
def get_formatted_authors(result, characters_remaining): """ Args: result: (dict) entry to get authors from characters_remaining: (int) number of characters remaining in line Returns: (str), (int): Formatted author list and number of remaining characters in line """ authors_obj = result.get('authors', []) if authors_obj is None: authors_obj = [] full_author_list = [] for author in authors_obj: if "name" in author: full_author_list.append(author["name"]) elif "Name" in author: full_author_list.append(author["Name"]) num_authors = len(full_author_list) reduced_author_list = [] while len(full_author_list) > 0: author = full_author_list.pop(0) if characters_remaining > len(author): reduced_author_list.append(author) characters_remaining -= len(author) + 2 authors = ", ".join(reduced_author_list) if len(reduced_author_list) < num_authors: authors += "..." return authors, characters_remaining
7fb5ca5aed81e54ef60dd7fec80ff9859ec7f263
399,859
import torch def _signed_area(path: torch.Tensor) -> torch.Tensor: """ Calculates the signed area / Lévy area of a 2D path. If the path is closed, i.e. ends where it starts, this is the integral of the winding number over the whole plane. If not, consider a closed path made by adding a straight line from the end to the start; the signed area is the integral of the winding number (also over the plane) with respect to that closed path. If this number is positive, it indicates in some sense that the path turns anticlockwise more than clockwise, and vice versa. Args: path: N x 2 tensor of points. Returns: signed area, shape () """ # This calculation is a sum of areas of triangles of the form # (path[0], path[i], path[i+1]), where each triangle is half a # parallelogram. x, y = (path[1:] - path[:1]).unbind(1) return (y[1:] * x[:-1] - x[1:] * y[:-1]).sum() * 0.5
47716d51f981613add08d04ed8d01d5794c89fd6
638,701
import inspect def find_people(person_module): """ Returns the functions prefixed with `get` from a module. """ functions = inspect.getmembers(person_module, predicate=inspect.isfunction) people_functions = filter(lambda x: x[0].startswith('get'), functions) persons = [] for _, function in people_functions: person = function() persons.append(person) return persons
c3b957051fc75acc5bd208a36d2882164e8564cd
81,954
def pad_pkcs5(data, block_size): """ Returns the data padded using PKCS5. For a block size B and data with N bytes in the last block, PKCS5 pads the data with B-N bytes of the value B-N. :param data: Data to be padded. :type data: :class:`str` :param block_size: Size of the block. :type block_size: :class:`int` :return: :class:`str` -- PKCS5 padded string. """ pad = block_size - len(data) % block_size return data + pad * chr(pad)
5b28ef53a2fba394741a040509b5f82b2d947021
95,702
import math def find_roots_quadratic(a: float, b: float, c: float) -> set: """Return a set containing the solutions to the equation ax^2 + bx + c = 0. Each solution is a float. You may ASSUME that: - a != 0 - (b * b) - (4 * a * c) >= 0 >>> find_roots_quadratic(1, -15, 56) == {8.0, 7.0} True >>> find_roots_quadratic(1, -10, 21) == {3.0, 7.0} True >>> find_roots_quadratic(1, 8, 15) == {-3.0, -5.0} True >>> # Have to use isclose to compare floats >>> all([math.isclose(sol, -0.739, abs_tol=0.001) or math.isclose(sol, 1.739, abs_tol=0.001) for sol in find_roots_quadratic(7, -7, -9)]) True Hint: use the quadratic formula. """ assert a != 0 assert (b * b) - (4 * a * c) >= 0 part = math.sqrt(b * b - 4 * a * c) return {(-b - part) / (2 * a), (-b + part) / (2 * a)}
664f3ec213200ac2ed3a1cc4f8001da4331938bc
703,918
import torch def quat_mul(q1: torch.Tensor, q2: torch.Tensor): """Computes qout = q1 * q2, where * is the Hamilton product between the two quaternions. Note that the Hamiltonian product is not commutative. Args: q1: Quaternions of shape ([*, ], 4) q2: Quaternions of shape ([*, ], 4) Returns: qout = q1*q2. """ assert q1.shape[-1] == 4 and q2.shape[-1] == 4 qout = torch.stack([ q1[..., 0] * q2[..., 0] - q1[..., 1] * q2[..., 1] - q1[..., 2] * q2[..., 2] - q1[..., 3] * q2[..., 3], q1[..., 0] * q2[..., 1] + q1[..., 1] * q2[..., 0] + q1[..., 2] * q2[..., 3] - q1[..., 3] * q2[..., 2], q1[..., 0] * q2[..., 2] - q1[..., 1] * q2[..., 3] + q1[..., 2] * q2[..., 0] + q1[..., 3] * q2[..., 1], q1[..., 0] * q2[..., 3] + q1[..., 1] * q2[..., 2] - q1[..., 2] * q2[..., 1] + q1[..., 3] * q2[..., 0] ], dim=-1) return qout
5b52307cfd3a11479dc31c1a4e68683558edd7bf
376,153
import torch def collate_fn(data): """ Creates mini-batch from x, ivec, jvec tensors We should build custom collate_fn, as the ivec, and jvec have varying lengths. These should be appended in row form Args: data: list of tuples contianing (x, ivec, jvec) Returns: x: one hot encoded vectors stacked vertically ivec: long vector jvec: long vector """ x, m, ivec, jvec, demo = zip(*data) m = torch.stack(m, dim=1) x = torch.stack(x, dim=1) ivec = torch.cat(ivec, dim=0) jvec = torch.cat(jvec, dim=0) demo = torch.stack(demo, dim=1) return x, m, ivec, jvec, demo
06c25feda297aba9d000e1ffb1e2f648887ae89a
670,605
async def get_n(aiter, n=0): """ Get n items. """ r = [] count = 0 async for _ in aiter: r.append(_) count += 1 if count >= n and n != 0: break return r
b1a7f409ccf1588ffd855c3bede90a903f93af04
277,596
def norm_img(img): """ Normalizes an image to between 0 and 1 :param img: image to normalize :return: nomralized image """ return (img - img.min())/(img.max() - img.min())
1783bab8f27116084d8b9deca7aa219c6743f4a2
310,983
def to_timestamp(ts: int) -> str: """returns a [(h*):mm:]ss timestamp string from `ts: int`""" if ts == 0: return "0" _mm = ts // 60 hh = _mm // 60 mm = _mm - hh * 60 ss = ts % 60 return ":".join( [str(u).rjust(2, "0") for u in (hh, mm) if u != 0] + [str(ss).rjust(2, "0")] ).lstrip("0")
98996cdbca762013bc88ddc5ba04e237484c233d
287,134
import unicodedata import re def preprocess(rule: str) -> str: """ Internal function for pre-processing of rules. @param rule: The rule to be preprocessed. @return: The cleaned, preprocessed rule. """ # 1. Normalize to NFD, as per maniphono rule = unicodedata.normalize("NFD", rule) # 2. Replace multiple spaces with single ones, and remove leading/trailing spaces rule = re.sub(r"\s+", " ", rule.strip()) return rule
649840f609b08fe42ce0fb50016eed0997aa6c03
509,627
def merge_repository_changeset_revisions(repository_list): """ Each installed changeset revision of a tool is listed individually. Merge revisions of the same tool into a list. """ repositories = {} repo_key_template = "{tool_shed_url}|{name}|{owner}|{tool_panel_section_id}|{tool_panel_section_label}" for repo in repository_list: repo_key = repo_key_template.format(**repo) if repo_key in repositories: repositories[repo_key].extend(repo['revisions']) else: repositories[repo_key] = repo['revisions'] new_repository_list = [] for repo_key, changeset_revisions in repositories.items(): changeset_revisions = list(set(changeset_revisions)) tool_shed_url, name, owner, tool_panel_section_id, tool_panel_section_label = repo_key.split('|') new_repository_list.append( {'tool_shed_url': tool_shed_url, 'name': name, 'owner': owner, 'tool_panel_section_id': tool_panel_section_id, 'tool_panel_section_label': tool_panel_section_label, 'revisions': changeset_revisions} ) return new_repository_list
03694a2f32d4eba4fb5cdfed17cdbe918daeaf48
137,641
def to_dict(qresults, key_function=lambda rec: rec.id): """Turns a QueryResult iterator or list into a dictionary. - qresults - Iterable returning QueryResult objects. - key_function - Optional callback function which when given a QueryResult object should return a unique key for the dictionary. This function enables access of QueryResult objects from a single search output file using its identifier. >>> from Bio import SearchIO >>> qresults = SearchIO.parse('Blast/wnts.xml', 'blast-xml') >>> search_dict = SearchIO.to_dict(qresults) >>> sorted(search_dict) ['gi|156630997:105-1160', ..., 'gi|371502086:108-1205', 'gi|53729353:216-1313'] >>> search_dict['gi|156630997:105-1160'] QueryResult(id='gi|156630997:105-1160', 5 hits) By default, the dictionary key is the QueryResult's string ID. This may be changed by supplying a callback function that returns the desired identifier. Here is an example using a function that removes the 'gi|' part in the beginning of the QueryResult ID. >>> from Bio import SearchIO >>> qresults = SearchIO.parse('Blast/wnts.xml', 'blast-xml') >>> key_func = lambda qresult: qresult.id.split('|')[1] >>> search_dict = SearchIO.to_dict(qresults, key_func) >>> sorted(search_dict) ['156630997:105-1160', ..., '371502086:108-1205', '53729353:216-1313'] >>> search_dict['156630997:105-1160'] QueryResult(id='gi|156630997:105-1160', 5 hits) Note that the callback function does not change the QueryResult's ID value. It only changes the key value used to retrieve the associated QueryResult. As this function loads all QueryResult objects into memory, it may be unsuitable for dealing with files containing many queries. In that case, it is recommended that you use either `index` or `index_db`. """ qdict = {} for qresult in qresults: key = key_function(qresult) if key in qdict: raise ValueError("Duplicate key %r" % key) qdict[key] = qresult return qdict
71cde6a3e4ee5dad3a0761f6424d42d2e8f70ab0
340,668
def get_spaceweather_imagefile_name(if_path, if_date, if_filename, \ if_extension, verbose): """Returns a complete image filename string tailored to the spaceweather site by concatenating the input image filename (if) strings that define the path, date, filename root, and the filename extension If verbose is truthy, then print the returned image filename string """ sw_imagefile = if_path + if_date + "_" + if_filename + if_extension if verbose: print("Input image file full path: \n{}\n".format(sw_imagefile)) return sw_imagefile
208b3b1410e0534dd77d588b557220d6b732d833
272,326
def convert_old_description(old_description: str) -> str: """ Format old course descriptions. Parameters ---------- old_description: input course description Returns ------- description: formatted description closer to the new parser """ if old_description[:10] == "Cancelled.": old_description = old_description[10:] old_description = old_description.replace("&quot;", '"') return old_description
73a4091726d01e51356b999d3a5cbd2ff7e9fabf
216,433
def pad_str(s: str, pad: str = '0', length: int = 10) -> str: """ Prepends a string to a desired length @pad - the charater to prepend the string with @length - the total length desired """ while len(s) < length: s = pad + s return s
273c77d7b76f4db8a3bc6d43e2d558b1df00c1be
309,286
from pathlib import Path def default_dataset_location(dataset_name: str) -> Path: """ Return the default location of a dataset. This currently returns "~/.avalanche/data/<dataset_name>" but in the future an environment variable bay be introduced to change the root path. :param dataset_name: The name of the dataset. Consider using a string that can be used to name a directory in most filesystems! :return: The default path for the dataset. """ return Path.home() / f".avalanche/data/{dataset_name}"
296a5022d7e4f1542265937c104a883b1f075abc
644,373
def is_number(val): """Check if a value is a number by attempting to cast to ``float``. Args: val: Value to check. Returns: True if the value was successfully cast to ``float``; False otherwise. """ try: float(val) return True except (ValueError, TypeError): return False
89728d3d199c3ef5885529da5a2d13dd94fa590f
11,329
def is_image(file_path): """ Checks if the specified file is an image :param file_path: Path to the candidate file :return: Whether or not the file is an image """ return any(file_path.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
c793affc6ddedff7b0cf741f18e5cae2c42d5458
202,620
def prepare_index_response_data(response): """Function for serializing response from index request and other objects, which are pydantic objects""" return response.json()
0dfd51c27457cf61a339b2c7a1832957bcf9ea48
563,561
def dimshuffle(value, ndim, axes): """ Shuffle dimension based on the specified number of dimensions and axes. Parameters ---------- value : Theano variable ndim : int axes : tuple, list Returns ------- Theano variable """ pattern = ['x'] * ndim for i, axis in enumerate(axes): pattern[axis] = i return value.dimshuffle(pattern)
30ba0acc7ff19b3b2a10e77324f2820a6754f307
178,092
import torch def sample_flexible(positive, negative, batch_size, pos_percent): """ Sample positives up to pos_percent, if positive num is not enough, sample more negatives to make it up to a batch_size """ expected_pos_num = int(batch_size * pos_percent) if positive.numel() > expected_pos_num: shuffle_pos = torch.randperm(positive.numel(), device=positive.device)[:expected_pos_num] positive = positive[shuffle_pos] expected_neg_num = batch_size - positive.numel() if negative.numel() > expected_neg_num: shuffle_neg = torch.randperm(negative.numel(), device=negative.device)[:expected_neg_num] negative = negative[shuffle_neg] return positive, negative
c65492fe8667c05ded90e13cfd38356b61307eea
141,402
def convert_to_float(float_value): """Try to convert to float, otherwise returns 0.""" try: return float(float_value) except ValueError: return 0
0858adefee7d7438801be4b9c3b0dbe4128b75c9
239,297
import re def cigarToLen(cigar): """ Calculate sequence length from CIGAR string """ # Split "cigar" on capital letters span = re.split('[A-Z]', cigar) ops = re.split('[0-9]+', cigar) len = 0 del(span[-1]) del(ops[0]) for i, span in enumerate(span): if ops[i] in ["M", "I", "S"]: len += int(span) return len
fa7c2b0633a349cc3295519bffcff9965c6ae704
45,466
import colorsys def hsl_to_rgb(hsl): """Convert hsl colorspace values to RGB.""" # Convert hsl to 0-1 ranges. h = hsl[0] / 359. s = hsl[1] / 100. l = hsl[2] / 100. hsl = (h, s, l) # returns numbers between 0 and 1 tmp = colorsys.hls_to_rgb(h, s, l) # convert to 0 to 255 r = int(round(tmp[0] * 255)) g = int(round(tmp[1] * 255)) b = int(round(tmp[2] * 255)) return (r, g, b)
4417ce8468e71b7139b57fe270809c7030b2c3df
707,151
def convert_frac(ratio): """ Converts ratio strings into float, e.g. 1.0/2.0 -> 0.5 """ try: return float(ratio) except ValueError: num, denom = ratio.split('/') return float(num) / float(denom)
5503f0335a9371d4fb008b005fc0b268a86209e8
684,333
def calc_theta_int_inc(theta_int_ini, delta_theta_int_inc): """ Eq. (1) in [prEN 15316-2:2014] :param theta_int_ini: temperature [C] :type theta_int_ini: double :param delta_theta_int_inc: temperature [C] :type delta_theta_int_inc: double :return: sum of temperatures [C] :rtype: double """ return theta_int_ini + delta_theta_int_inc
35ee8827750bb4e74b534c9577d216bc0dc79150
655,261
def _generateSpecial_VectorOpts_orientFlip( overlayList, displayCtx, source, longArg): """Generates the ``VectorOpts.orientFlip`` option. """ flip = source.overlay.isNeurological() != source.orientFlip if flip: return [longArg] else: return []
dc1e8c2f45380e1e3ebb0cb0e61d01dd637760e0
375,658
def dependency_extracting(list_gate_qubits, count_program_qubit: int): """Extract dependency relations between the gates. If two gates g_1 and g_2 both acts on a qubit *and there is no gate between g_1 and g_2 that act on this qubit*, we then say that g2 depends on g1, which means that (1,2) will be in dependency list. Args: list_gate_qubits: a list of gates in OLSQ IR count_program_qubit: the number of logical/program qubit Returns: list_dependency: a list of dependency between the gates """ list_dependency = [] list_last_gate = [-1 for i in range(count_program_qubit)] # list_last_gate records the latest gate that acts on each qubit. # When we sweep through all the gates, this list is updated and the # dependencies induced by the update is noted. for i, qubits in enumerate(list_gate_qubits): if list_last_gate[qubits[0]] >= 0: list_dependency.append((list_last_gate[qubits[0]], i)) list_last_gate[qubits[0]] = i if len(qubits) == 2: if list_last_gate[qubits[1]] >= 0: list_dependency.append((list_last_gate[qubits[1]], i)) list_last_gate[qubits[1]] = i return tuple(list_dependency)
8e0cc44fec8c1b767868c9af24b7354f5842c981
609,341
def lon_to_180(ds): """This is a function for converting longitude from (0, 360) to (-180, 180) ref: https://gis.stackexchange.com/questions/201789/verifying-formula-that-will-convert-longitude-0-360-to-180-to-180/201793 Parameters ---------- ds : xarray.Dataset a dataset with the longitude ranging from 0 to 360 Returns ------- xarray.Dataset a dataset with the longitude ranging from -180 to 180 """ ds=ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180)) ds=ds.reindex(lon=sorted(ds.lon)) return ds
e1ced9ac016aee469832bf928edd7482a8e9f0d3
435,818
def response_map(fetch_map): """Create an expected FETCH response map from the given request map. Most of the keys returned in a FETCH response are unmodified from the request. The exceptions are BODY.PEEK and BODY partial range. A BODY.PEEK request is answered without the .PEEK suffix. A partial range (e.g. BODY[]<0.1000>) has the octet count (1000) removed, since that information is provided in the literal size (and may be different if the data was truncated). """ if not isinstance(fetch_map, dict): fetch_map = dict((v, v) for v in fetch_map) rmap = {} for k, v in fetch_map.items(): for name in ('BODY', 'BINARY'): if k.startswith(name): k = k.replace(name + '.PEEK', name, 1) if k.endswith('>'): k = k.rsplit('.', 1)[0] + '>' rmap[k] = v return rmap
42d992662e5bba62046c2fc1a50f0f8275798ef8
3,107
import re def _get_file_index(filename): """ Extract the numbered index in filename for sorting """ mobj = re.match(r'.+\-(?P<idx>[0-9]+)\.[a-z]+', filename) if mobj: return int(mobj.group('idx')) return -1
e9d694eb1f0b776469f510833cf6b70168c1cec3
263,171
def job_config(job): """Extract config dictionary from GraphQL result""" return {x["key"]: x["value"] for x in job["config"]}
d9500a460c661b4714f0f4b744b4ee1a2c58701d
106,157
def _price_dish(dish): """ Computes the final price for ordering the given dish, taking into account the requested quantity and options. Args: dish (dict): KB entry for a dish, augmented with quantity and options information. Returns: float: Total price for ordering the requested quantity of this dish with options included. """ total_price = dish['price'] if 'options' in dish: total_price += sum([option.get('price', 0) for option in dish['options']]) return total_price * dish['quantity']
1ab29262540a8f875d60efa087ff9bf179bd74f5
681,404
from random import choice def temp_password(length=10, allowed_chars=("abcdefghjkpqrstuvwxyz" "3456789ACDEFGHJKLMNPQRSTUVWXY")): """ Generates a temporary password with the given length and given allowed_chars. """ return ''.join([choice(allowed_chars) for i in range(length)])
d6f6d0635342c77c3fddf330fdb084704e535b53
267,391
import shutil def check_disk_usage(disk, min_absolute, min_percent): """Returns True if there is enough free disk space, false otherwise.""" du = shutil.disk_usage(disk) # Calculate the percentage of free space percent_free = 100 * du.free / du.total # Calculate how many free gigabytes gigabytes_free = du.free / 2**30 if percent_free < min_percent or gigabytes_free < min_absolute: return False return True
77ec190ec09b8cdb41e5096e2780ace08ee87ab4
162,028
def get_class(obj, lower=False): """Return object class as string""" c = obj.__class__.__name__ return c.lower() if lower else c
208853e1bd5c12cdb082ab668d784a687ab5bcbd
203,783