content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import requests def get_rates(url): """Method to get currency rates and returns status of request(boolean) and request response in JSON format. @:param str url: Exchange site url @:return boolean: True for successful response, else False. @:return dict: Response in case of success else empty dict. """ response = requests.get(url) if response.status_code == 200: return True, response.json() return False, dict()
5706d6dd6fd96d9f364c57b69a29e6c7dde791af
647,263
def append_zeros_to_filtname(filtname): """Adds zeros to standardize the size of all filternames Parameters: filtname (str) - name of the filter file Returns: filtname (str) - name of the filter file, possibly now with zeroes inserted """ while len(filtname) < 15: filtname = filtname[:6] + '0' + filtname[6:] return filtname
3f90ed243ab8c538a976ebfe48c183726e14f624
476,501
def class_to_path(cls): """Given a class, returns the class path""" return ':'.join([cls.__module__, cls.__name__])
25476e1e76bcc566c03dedf980be58929c85d213
163,522
def apply_mask(image, mask, fill=0): """ Fills pixels outside the mask with a constant value. :param image: to apply the mask to. :param mask: binary mask with True values for pixels that are to be preserved. :param fill: fill value. :return: Masked image """ masked = image.copy() masked[~mask] = fill return masked
85b65ed3be477e9faaacfd7470870ace2d41ff0e
483,764
import math def hypergeometric_dist(x, N, n, k): """ Evaluate the hypergeometric distribution h(x; N, n, k) as defined in the textbook. Equivalent to the probability of getting x blue socks when picking n socks from a total of N socks where a total of k socks are blue. Found with the equation kCx * (N-k)C(n-x) / NCn """ return math.comb(k, x) * math.comb(N - k, n - x) / math.comb(N, n)
817b754e3871d715420e71246fc67b6183517c84
257,152
def num_existing_reports(ir_json_v6): """ This returns the number of existing clinical reports (i.e. summary of findings) for a case This can be used to check that there aren't any existing clinical reports before submitting a new one, or to check that there is only one clinical report when submitting an exit questionnaire """ return len(ir_json_v6.get("clinical_report"))
178a97d591bee222ba094035c187c5cce498d060
596,890
def split_suffix(symbol): """ Splits a symbol such as `__gttf2@GCC_3.0` into a triple representing its function name (__gttf2), version name (GCC_3.0), and version number (300). The version number acts as a priority. Since earlier versions are more accessible and are likely to be used more, the lower the number is, the higher its priortiy. A symbol that has a '@@' instead of '@' has been designated by the linker as the default symbol, and is awarded a priority of -1. """ if '@' not in symbol: return None data = [i for i in filter(lambda s: s, symbol.split('@'))] _, version = data[-1].split('_') version = version.replace('.', '') priority = -1 if '@@' in symbol else int(version + '0' * (3 - len(version))) return data[0], data[1], priority
b71e96bb2ff9bf694bec541ca3478c696eaee029
62,693
def preprocess_data_for_clustering(df): """Prepare data in order to apply a clustering algorithm Parameters ---------- df : pandas.DataFrame Input data, *i.e.* city-related timeseries, supposed to have `station_id`, `ts` and `nb_bikes` columns Returns ------- pandas.DataFrame Simpified version of `df`, ready to be used for clustering """ # Filter unactive stations max_bikes = df.groupby("station_id")["nb_bikes"].max() unactive_stations = max_bikes[max_bikes == 0].index.tolist() df = df[~ df['station_id'].isin(unactive_stations)] # Set timestamps as the DataFrame index # and resample it with 5-minute periods df = (df.set_index("ts") .groupby("station_id")["nb_bikes"] .resample("5T") .mean() .bfill()) df = df.unstack(0) # Drop week-end records df = df[df.index.weekday < 5] # Gather data regarding hour of the day df['hour'] = df.index.hour df = df.groupby("hour").mean() return df / df.max()
deabc1feb34f1e8bf1fd4b4575964dc666552cfa
72,899
from typing import Iterable import torch def _get_input_sizes(iterable: Iterable): """ Gets the sizes of all torch tensors in an iterable. If an element of the iterable is a non-torch tensor iterable, it recurses into that iterable to continue calculating sizes. Any non-iterable is given a size of None. The output consists of nested lists with the same nesting structure as the input iterables. """ out_list = [] for i in iterable: if isinstance(i, torch.Tensor): out_list.append(list(i.size())) elif isinstance(i, Iterable): out_list.append(_get_input_sizes(i)) else: out_list.append(None) return out_list
60a89aeecc7c44912e71b911ae60c6d3894608fb
588,110
def _OptionsOrNone(descriptor_proto): """Returns the value of the field `options`, or None if it is not set.""" if descriptor_proto.HasField('options'): return descriptor_proto.options else: return None
e3d968ac6103c6d8ec54700ed8cc41a8563b5b37
191,782
import requests def push_transaction(transaction, testnet=False): """ Publish a transaction to the bitcoin blockchain :param transaction: hex string transaction code :param testnet: flag to set publish to mainnet vs testnet :return: the `https://chain.so/api/v2/send_tx/` json response """ data = {'tx_hex': transaction} network = 'BTCTEST' if testnet else 'BTC' response = requests.post(f'https://chain.so/api/v2/send_tx/{network}', data=data) return response.json()
542b7248da4283f2ce73a79f79e3f3ab4623e0b5
502,693
def substrings(a, b, n): """Return substrings of length n in both a and b""" def substring(s, n): subs = set() for i in range(len(s) - 1): if n > len(s): break subs.add(s[i:n]) n += 1 return subs subs_a = substring(a, n) subs_b = substring(b, n) matches = subs_a.intersection(subs_b) return matches
35ebddcab08ed915e04a7d276fcd72af91d4fa92
176,279
def listify(list_or_value): """ Given a list or a value transforms it to a list Arguments: list_or_value (str or list): List or a value Returns: A list """ if isinstance(list_or_value, list): return list_or_value else: return [list_or_value]
a3d28b4d858e391987885dae3b733df6001745e2
116,117
def compute_cell_index(species_list, cell_barcodes): """Produces a cell index dictionary that is keyed on barcode and links cell barcodes to per-species indices """ cell_index = {} for species in species_list: bc_list = cell_barcodes.get(species, {}).keys() bc_list.sort() for index, barcode in enumerate(bc_list): label = "{species}_cell_{index}".format(**locals()) if barcode not in cell_index: cell_index[barcode] = label else: cell_index[barcode] = "_".join([cell_index[barcode], label]) return cell_index
c55be3895a2926bb5b37bb65d76a7b5f232c59d1
242,841
def make_text(rel_links): """ rel_links: list of str with the relevant links for a document. should be what is returned by DocSim.graph[label]. >> make_text(['../../a.md','../b.md']) "link: [a.md](a.md)\nlink: [b.md](b.md)\n" As I have a flat hierarchy, I don't need the full path. """ text = "" for link in rel_links: filename = link.split("/")[-1] text += f"link: [{filename}]({filename})\n" return text
518e864dfa3ec81594d9dcbb2c8a7e2c6241f8ce
296,254
def number_of_neighbours_alive(board_array, cell_neighbours): """ Returns the number of live cell neighbours of the given cell. Parameters: board_array (list(list(int, int...))): The 2D list representing the grid of cells cell_neighbours (list(int, int...)): A list of integers representing the indexes of cells surrounding a given cell. Returns: int: The total number of neighbours that are alive. """ return sum(1 for cell in cell_neighbours if board_array[cell[0]][cell[1]] == 1)
16ac4d48bd7bb93166f5aa31f0166d5fd2042494
286,850
def get_relpron_hyper(testset=False): """ Get mapping from terms to hypernyms, from the RelPron data :param testset: use testset (default, use devset) :return: dict of {term: hypernym} """ if testset: subset = 'testset' else: subset = 'devset' hyper = {} with open('../data/relpron/'+subset, 'r') as f: for line in f: # e.g. # OBJ garrison_N: organization_N that army_N install_V _, noun, head, _, _, _ = line.strip().split() hyper[noun[:-3]] = head[:-2] return hyper
e96d69c1375ed4bc25d7a55957993f8eb8b5b96d
330,791
import re def clean(doc: str) -> str: """ Cleans a string for processing. Removes non-alphanumeric characters and extra whitespace. Converts to lowercase. Parameters: doc (str): String to be cleaned Returns: str: Cleaned string """ # remove non-alphanumeric or space characters doc = re.sub(r'[^a-zA-Z0-9 ]', '', doc) # trim continous whitespace down to 1 space doc = re.sub(r'\s+', ' ', doc) doc = doc.lower().strip() return doc
847b3d5e4281fadeb88132f0431ec9e4b2308493
271,987
import random def any_int(min_value=0, max_value=100, **kwargs): """ Return random integer from the selected range >>> result = any_int(min_value=0, max_value=100) >>> type(result) <type 'int'> >>> result in range(0,101) True """ return random.randint(min_value, max_value)
2253de803c8b577fc8318673805ac85304f20320
658,070
def nth_fibonacci(number: int) -> int: """Returns value of nth fibonacci sequence. Args: number (int): sequential number Examples: >>> assert nth_fibonacci(4) == 2 """ number -= 1 counter, helper = 0, 1 while number: counter, helper = helper, counter + helper number -= 1 return counter
0479b4e6499c70b9a1905707eb6ea189ed1aaf65
103,351
def gaussian_neg_log_likelihood_diag(parameters, target): """Negative log likelihood loss for the Gaussian distribution B = batch size, D = dimension of target (num classes), N = ensemble size Args: parameters (torch.tensor((B, D)), torch.tensor((B, D))): mean values and variances of y|x for every x in batch. target (torch.tensor((B, N, D))): sample from the normal distribution, if not an ensemble prediction N=1. """ B, N, _ = target.size() mu, sigma_sq = parameters prec = sigma_sq.pow(-1) nll = 0.0 for mu_b, prec_b, target_b in zip(mu, prec, target): sample_var = (target_b - mu_b).var(dim=0) trace_term = (prec_b * sample_var).sum() * N / 2 nll += trace_term - N / 2 * prec_b.prod() return nll / B
7af1354c2a9e7a96bc79dd088fb60f7a493a4c5c
304,091
def kilometers_to_miles(kilo): """Convert kilometers to miles PARAMETERS ---------- kilo : float A distance in kilometers RETURNS ------- distance : float """ # apply formula return kilo*0.621371
94b7acb4632daf179a180ca83f93ae997e14c87c
607,490
def custom_args(pytestconfig): """Parse custom arguments.""" incoming_args = { option: pytestconfig.getoption(option) for option in ["level", "log"] } return incoming_args
6eb68b360852dc8b2517bd11fc8c9f71b3f1232f
407,585
def paths_concordance_fna(path_fixtures_base): """Paths to FASTA inputs for concordance analysis.""" return [ _ for _ in (path_fixtures_base / "concordance").iterdir() if _.is_file() and _.suffix == ".fna" ]
fc4d4c7a988db3602a245b57df1727c27ebbf867
79,851
def strictly_decreasing(py_list): """ check if elements of a list are strictly decreasing. """ return all(x > y for x, y in zip(py_list, py_list[1:]))
9ad54291938b046d2d1912125c186dcd6420bfb1
533,377
def has_flag(flag, content): """Determines if a command's content contains a flag. Arguments: flag -- the flag for which to search content -- the content of the command """ return "-" + flag in content.split()
c0fba18fa6a9da448228f3e441b31247dd705720
585,693
def validate_boxes(boxes, width=0, height=0): """Check that a set of boxes are valid.""" assert boxes.shape[1] == 4, 'Func doesnot support tubes yet' x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] return ( (x1 >= 0).all() and (y1 >= 0).all() and (x2 >= x1).all() and (y2 >= y1).all() and (x2 < width).all() and (y2 < height).all())
5f141608ba398b0f4a9f033903d076eb65a0490c
206,179
def boundaries_to_knots(boundaries, degree): """Construct the knot sequences used at the boundaries of a B-spline. # Arguments boundaries : A tuple containing two floats (low, high). degree : The degree of the B-spline pieces. # Returns A 2-tuple containing the lower and upper knot sequences as lists. """ d = int(degree) lo, hi = boundaries return d * [lo], d * [hi]
4c5e7c4ecbe50b1cbddcec2545003165601f7a8f
109,756
import ast import copy def _uniqueify_distinct_ast(tree): """Returns a version of tree where all nodes have a unique identity. i.e. if a given node is in a different position in the tree, it will be given a different identity, even if it is equal on every attribute. Args: tree: An AST node. This is potentially mutated. Returns: An uniqueified version of tree. """ # Only copy when a duplicate is found, to avoid the O(n^2) worst algorithm. seen_ids = set() class CopyTransformer(ast.NodeTransformer): def visit(self, node): node = self.generic_visit(node) if id(node) in seen_ids: return copy.copy(node) else: seen_ids.add(id(node)) return node return CopyTransformer().visit(tree)
d3c5d3cd239d7262d84b5705c7fc1372701a3e0a
447,000
def readVectorRaw(text): """Reads a vector from raw text 'v1 ... vn'""" items = text.split() return [float(v) for v in items]
10f0142227f36b8151da386fd5a72f6e54d4c022
613,531
from pathlib import Path def find_files_of_type(file_or_directory, file_extensions): """Return a list of pathlib.Path of files with chosen extensions Parameters ---------- file_or_directory : str filepath or a directory file_extensions : list A list of lowercase file extensions including '.' e.g. ['.txt', '.paf'] Returns ------- list If files with extension are found return list of pathlib.Path objects, otherwise return empty list """ file_or_directory = Path(file_or_directory).expanduser() if ( file_or_directory.is_file() and "".join(file_or_directory.suffixes).lower() in file_extensions ): return [file_or_directory] elif file_or_directory.is_dir(): return [ x for x in file_or_directory.iterdir() if "".join(x.suffixes).lower() in file_extensions ] else: return []
c6a20ef66ba198d1971fe0e0f8cc2f18acc107d4
522,378
def to_batch_atom_3_shape(positions): """Reshape a tensor or numpy array to have shape (batch_size, n_atoms, 3). This allows converting positions in flattened format, as yielded from the ``TrajectoryDataset`` object into the standard shape used by MDAnalysis trajectories. Parameters ---------- positions : torch.Tensor, numpy.ndarray, or pint.Quantity The input can have the following shapes: (batch_size, n_atoms, 3), (batch_size, n_atoms * 3), (n_atoms, 3). Returns ------- reshaped_positions : torch.Tensor, numpy.ndarray, or pint.Quantity A view of the original tensor or array with shape (batch_size, n_atoms, 3). """ if (len(positions.shape) == 2 and positions.shape[-1] != 3 or len(positions.shape) == 3): # (batch_size, n_atoms * 3) or (batch_size, n_atoms, 3). batch_size = positions.shape[0] else: batch_size = 1 if positions.shape[-1] != 3: n_atoms = positions.shape[-1] // 3 else: n_atoms = positions.shape[-2] standard_shape = (batch_size, n_atoms, 3) if positions.shape != standard_shape: positions = positions.reshape(standard_shape) return positions
1075210da6ac95b765687440cd5f059c5640cc78
560,984
def create_cagr(equity, periods=252): """ Calculates the Compound Annual Growth Rate (CAGR) for the portfolio, by determining the number of years and then creating a compound annualised rate based on the total return. Parameters: equity - A pandas Series representing the equity curve. periods - Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc. """ years = len(equity) / float(periods) return (equity[-1] ** (1.0 / years)) - 1.0
77459332f559f4ee14bd1748644dcc35ab077d63
197,146
import time def formatTime(timeData): """格式化时间字符串 :param timeData: 时间字符串 :return: 格式化后的时间字符串, "2022-03-01" """ try: res = time.strptime(timeData, "%Y-%m-%d") except ValueError: raise ValueError("请输入正确的时间范围,例如 2022-02-01") return time.strftime("%Y-%m-%d", res)
0157fe33b8b4738bc5021f5c4eac45decf874dac
67,430
def count_characters(file): """Count the number of characters in a text file. Args: file (str): Path of a fext file. Returns: int: Return number of characters in a text file. """ with open(file) as f: lines = f.readlines() return len(''.join([l for l in lines]))
2df34244087e603b69afe5993f2a21f171491104
568,192
import io def open_wt(path): """Open a file in text mode for writing utf-8.""" return io.open(path, 'w', encoding='utf-8')
3306ebeb322178cc9aa704110d41afcd9f07deaa
172,373
def convert_proxy_to_requests(proxy): """ This function convert a string ip:port to the requests package proxy format """ return {'http': 'http://{}'.format(proxy), 'https': 'http://{}'.format(proxy)}
b275793c5021cf3e42ed9797ed16c28a13a1fd94
219,745
from typing import List from typing import Any import random def pop_random_element(somelist: List[Any]) -> Any: """Pop off a random element from the list.""" if not somelist: raise IndexError('pop_random_element: list is empty') idx = random.randrange(0, len(somelist)) return somelist.pop(idx)
00625f6b58a0fad264fb2e8467273e1ccd9b8761
377,847
from tkinter import Tk, filedialog def askopenfilename(**kwargs): """Return file name(s) from Tkinter's file open dialog.""" root = Tk() root.withdraw() root.update() filenames = filedialog.askopenfilename(**kwargs) root.destroy() return filenames
e16b58187ffe517ddb02057233309b2e9d075d0d
456,459
import uuid def create_runner(hc, type, datacenter, servertype, image, userdata): """ The create_runner function takes a hetzner cloud handle (session) and some runner definitions to create a new cloud server to be used as a Gitlab CI runner. Parameters: datacenter: The datacenter where the new Cloud VM should be created name: The name of the new Cloud VM (default: type and a UUID, for ex. docker-2288393hd92dh9hd23d8h92d) server_type: The Cloud VM type. This defines the amount of CPU, RAM, Disk of the VM image: The Operating System Base image to be used for spawning the new Cloud VM (e.g. Debian) ssh_keys: A set of SSH Public keys to be installed into the new Cloud VM for remove management user_data: Takes a cloud-init config for further configuration of the new Cloud VM after spawning (e.g. install additional tools) labels: Adds a label depending on the "type" parameter that is true. This is used to easily find corresponding Cloud VMs easier for later removal """ response = hc.servers.create( datacenter=hc.datacenters.get_by_name(name=datacenter), name='{}-{}'.format(type, str(uuid.uuid4())), server_type=hc.server_types.get_by_name(name=servertype), image=hc.images.get_by_name(name=image), ssh_keys=hc.ssh_keys.get_all(), user_data=userdata, labels={type: "true"} ) return(response.action.status)
724e474b6af2888cb507a3f4582629e30f96323e
242,270
import re def _normalize_keyword( query_string, findterms=re.compile(r'"([^"]+)"|(\S+)').findall, normspace=re.compile(r'\s{2,}').sub ): """ Breaks the string into a list based on spaces and double quotes :param query_string: String to be normalized :param findterms: Return all non-overlapping matches of regex pattern in string, as a list of strings :param normspace: Return the string obtained by replacing the leftmost non-overlapping occurrences of regex pattern in string by replacement :return: Normalized keywords in a list """ return [normspace('', (t[0] or t[1]).strip()) for t in findterms(query_string)]
18a674e694a6092222c072e074c40ab673256f63
64,080
def get_naive(dt): """Gets a naive datetime from a datetime. datetime_tz objects can't just have tzinfo replaced with None, you need to call asdatetime. Args: dt: datetime object. Returns: datetime object without any timezone information. """ if not dt.tzinfo: return dt if hasattr(dt, "asdatetime"): return dt.asdatetime() return dt.replace(tzinfo=None)
8ea182436b9bf47ff887f47aaee4a1889b1dbbcf
66,892
def strToDna(seqStr): """Returns str having been reduced to capital ACTG.""" return "".join([c for c in seqStr if c in 'ACGTacgt']).upper()
055064e5f4ce6af91fb54cb5eaec0d246845c343
73,011
import unicodedata def extract_diacritic(*diacritics): """ Given a collection of Unicode diacritics, return a function that takes a Unicode character and returns the member of the collection the character has (or None). """ def _(ch): decomposed_form = unicodedata.normalize("NFD", ch) for diacritic in diacritics: if diacritic in decomposed_form: return diacritic return _
3c9c597c99f23a361c8f7f46ec331af2dcc9563c
341,202
import base64 def check_basic_auth(request, expected_auth_string): """ Returns True if request has Basic Authorization header matching expected_auth_string. Given a request and an expected auth string (user:pass) in an HTTP Authorization header. """ if 'HTTP_AUTHORIZATION' not in request.META: return False parts = request.META['HTTP_AUTHORIZATION'].split(' ') if len(parts) != 2: return False (auth_type, auth_string) = parts if auth_type.lower() != 'basic': return False try: return base64.b64decode(auth_string) == expected_auth_string except TypeError: return False
5e96b37666be39171380707ed03626ae7fa5610f
283,217
def seconds_to_hhmmss(seconds): """Parses the number of seconds after midnight and returns the corresponding HH:MM:SS-string. Args: seconds (float): number of seconds after midnight. Returns: str: the corresponding HH:MM:SS-string. """ if seconds is None: return None int_seconds = int(seconds) m, s = divmod(int_seconds, 60) h, m = divmod(m, 60) return "{:02d}:{:02d}:{:02d}".format(h, m, s)
1ec7e0c8af43054be7d32b3c4330adb144b44002
117,416
import torch def th_rand_rng(low, high, n=1): """ pull uniform random sample(s) from [a,b) """ if n == 1: return (high-low)*float(torch.rand(n)[0])+low else: return (high-low)*torch.rand(n).numpy()+low
a88f4684e8de915254d31622b372912c5a734f3a
567,482
def t2n(torch_tensor): """ Convert torch.tensor to np.ndarray. """ return torch_tensor.cpu().detach().numpy()
c48d8a715acfd832bfbf5a86518d33e20c30cd15
685,212
def is_consistent(x, e): """ Checks for consistency (e.g. if e[n] is v, then x[n] must also be v) """ consistent = True for n in x: if n in e and x[n] != e[n]: consistent = False return consistent
f0622ac66d9ad1871ed08e068074a1104acba48b
110,968
def get_query_string(params) -> str: """ Gets the query string of a URL parameter dictionary.abs :param params: URL params. :return: Query string. """ if not params: return '' return '?' + '&'.join([str(k) + '=' + str(v) for k, v in params.items()])
7bc08e0f68333c2f42d31e8879d3978dd94850ac
651,174
def remove_first_last_sec(wav, sr): """ Removes the first and last second of an audio clip. :param wav: The audio time series data points :param sr: The sampling rate of the audio :return: np.array """ return wav[sr:-sr]
67803c1f037e8e50a0f0c85c5ff1f01a47be1b1d
578,015
def div_tag(div_cls, data) -> str: """ div_cls: name of div class data: data to wrap in div Returns data wrapped in div class """ return ("<div class=\"{}\">\n{}\n</div>\n".format(div_cls, data))
f7b235c4f77362b6266859bc24bb6e451201f3a7
498,689
import struct def parse_boolean(s): """Parses a single boolean value from a single byte""" return struct.unpack('<?', s)[0]
09322cbca1ab9cb04fce03ee3ca976bbef2fa53b
103,734
def detectencoding_unicode(input, final=False): """ Detect the encoding of the unicode string ``input``, which contains the beginning of a CSS file. The encoding is detected from the charset rule at the beginning of ``input``. If there is no charset rule, ``"utf-8"`` will be returned. If the encoding can't be detected yet, ``None`` is returned. ``final`` specifies whether more data will be available in later calls or not. If ``final`` is true, ``detectencoding_unicode()`` will never return ``None``. """ prefix = '@charset "' if input.startswith(prefix): pos = input.find('"', len(prefix)) if pos >= 0: return (input[len(prefix):pos], True) elif final or not prefix.startswith(input): # if this is the last call, and we haven't determined an encoding yet, # (or the string definitely doesn't start with prefix) we default to UTF-8 return ("utf-8", False) return (None, False)
23201258eee7e696b8cf937f1e904207b7b19f83
254,106
def assign_staging_jobs_for_missing_clusters( support_and_staging_matrix_jobs, prod_hub_matrix_jobs ): """Ensure that for each cluster listed in prod_hub_matrix_jobs, there is an associated job in support_and_staging_matrix_jobs. This is our last-hope catch-all to ensure there are no prod hub jobs trying to run without an associated support/staging job. Args: support_and_staging_matrix_jobs (list[dict]): A list of dictionaries representing jobs to upgrade the support chart and staging hub on clusters that require it. prod_hub_matrix_jobs (list[dict]): A list of dictionaries representing jobs to upgrade production hubs that require it. Returns: support_and_staging_matrix_jobs (list[dict]): Updated to ensure any clusters missing present in prod_hub_matrix_jobs but missing from support_and_staging_matrix_jobs now have an associated support/staging job. """ prod_hub_clusters = {job["cluster_name"] for job in prod_hub_matrix_jobs} support_staging_clusters = { job["cluster_name"] for job in support_and_staging_matrix_jobs } missing_clusters = prod_hub_clusters.difference(support_staging_clusters) if missing_clusters: # Generate support/staging jobs for clusters that don't have them but do have # prod hub jobs. We assume they are missing because neither the support chart # nor staging hub needed an upgrade. We set upgrade_support to False. However, # if prod hubs need upgrading, then we should upgrade staging so set that to # True. for missing_cluster in missing_clusters: provider = next( ( hub["provider"] for hub in prod_hub_matrix_jobs if hub["cluster_name"] == missing_cluster ), None, ) prod_hubs = [ hub["hub_name"] for hub in prod_hub_matrix_jobs if hub["cluster_name"] == missing_cluster ] new_job = { "cluster_name": missing_cluster, "provider": provider, "upgrade_support": False, "reason_for_support_redeploy": "", "upgrade_staging": True, "reason_for_staging_redeploy": ( "Following prod hubs require redeploy: " + ", ".join(prod_hubs) ), } support_and_staging_matrix_jobs.append(new_job) return support_and_staging_matrix_jobs
3e8809272046dde50cf148dd6dd9ada3e63df9d8
692,721
import copy import logging def transfer_verification(model_state_dict, partial_state_dict, modules): """Verify tuples (key, shape) for input model modules match specified modules. Args: model_state_dict (OrderedDict): the initial model state_dict partial_state_dict (OrderedDict): the trained model state_dict modules (list): specified module list for transfer Return: (boolean): allow transfer """ partial_modules = [] partial_state_dict_org = copy.deepcopy(partial_state_dict) for key_p, value_p in partial_state_dict_org.items(): if any(key_p.startswith(m) for m in modules): if key_p in list(model_state_dict.keys()): if value_p.shape == model_state_dict[key_p].shape: partial_modules += [(key_p, value_p.shape)] else: logging.info(f'Removing {key_p}...') del partial_state_dict[key_p] return len(partial_modules) > 0, partial_state_dict
ef2733cdaa05f7bac71e4797215b07e1a84d7556
252,304
from typing import List from typing import Union from typing import Set import re def filter(items: List[str], query: Union[str, List[str]]) -> Set[str]: """Filter items in list. Filters items in list using full match, substring match and regex match. Args: items (List[str]): Input list. query (Union[str, List[str]]): Filter expression. Returns: Set[str]: Filtered items. """ matches: Set[str] = set() if isinstance(query, str): query = [query] for query_item in query: # Full match matches = matches.union({item for item in items if query_item == item}) # Substring match matches = matches.union({item for item in items if query_item in item}) # Regular expression match regex = re.compile(query_item, re.IGNORECASE | re.ASCII) matches = matches.union({item for item in items if regex.search(item)}) return matches
5115c6d19a198ae8a1abfe80c0e7669a773af12d
512,293
def maplookup(l, lookup): """Look up all elements of a sequence in a dict-valued variable.""" return [lookup[i] for i in l]
0a6cb898caff0e3b4d69f00f80bae73aa07c0d38
262,825
import math def calc_linear_distance(x1, y1, x2, y2): """ Calculates the distance between 2 given points :param x1: x coordinate at point 1 :param y1: y coordinate at point 1 :param x2: x coordinate at point 2 :param y2: y coordinate at point 2 :return: (float) Linear Distance from point to point """ return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
e78a5d70fa7f5db6d0ffecc03b6183398d7b559f
191,041
def get_output_node(node_tree): """Retrive output node.""" output = [node for node in node_tree.nodes if node.type == 'OUTPUT_MATERIAL'][0] return output
7f61583022f377a746cd0f33e2d5cf7aeacdaf75
501,802
def sweep(name, values): """Sweeps the hyperparameter across different values.""" return [{name: value} for value in values]
f5395dbd8472d1e32a96eaf6e6790094aaba2dbd
464,821
import string def is_curie(value): """Return True iff the value is an OBO-id CURIE like EFO:000001""" return ( value.count(":") and all(len(part) > 0 for part in value.split(":")) and all(c in string.digits for c in value.split(":")[1]) )
5fc4b1c681b0be9e2a36cdc8814cbff35d67a602
411,020
import json def readConfig(config_file_name = "./config.json"): """ Read the configure file (json). """ with open(config_file_name) as json_file: config = json.load(json_file) return config
2c60dd475084844b93d6bdf66a2511672aabb13f
534,794
def read_file(datafile): """ Reads the content of a file and splits the line content into a list """ content = [] with open(datafile, encoding='utf8') as f: for line in f: content.append(line.split()) f.close() return content
cb409410937d599e9527cf8b9bc367703201a5ee
463,169
def strip_hidden(key_tuples, visibilities): """Filter each tuple according to visibility. Args: key_tuples: A sequence of tuples of equal length (i.e. rectangular) visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples. Returns: A sequence equal in length to key_tuples where the items are tuples with a length corresponding to the number of items in visibility which are True. """ result = [] for key_tuple in key_tuples: if len(key_tuple) != len(visibilities): raise ValueError( "length of key tuple {} is not equal to length of visibilities {}".format( key_tuple, visibilities ) ) filtered_tuple = tuple(item for item, visible in zip(key_tuple, visibilities) if visible) result.append(filtered_tuple) return result
bd4ee5d1305299743256c16d9f8a1c9ec3e6715e
597,706
def deduplicate(data): """ Adds the values from any duplicated genes. Args: data (pandas.DataFrame ~ (num_samples, num_genes)) Returns: pandas.DataFrame """ return data.groupby(data.columns, axis=1).sum()
f5d4054d11d772e28b53adbdf1a2070eb9c5c873
454,356
import re def parse_line(line): """ This function is needed to parse one-liner from the controller and extract the information we need :param line: a one line of output from "show ap database long" command :return: object, representing AP and it's key values, like MAC-address or given name """ name, group, apType, ipAddr, status, flags, switchAddr, \ standbyAddr, mac, serNum, *other = re.split(r' {2,}', line) ap = { "invNum": "", "serNum": serNum, "mac": mac, "name": name } return ap
d0193154d2306c305a4a5f058ec388c16b84dced
629,412
import re def _extract_failed_ps_instances(err_msg): """Return a set of potentially failing ps instances from error message.""" tasks = re.findall("/job:ps/replica:0/task:[0-9]+", err_msg) return set(int(t.split(":")[-1]) for t in tasks)
b7cf036584dde71bfdb0bf0ead9017b94b7280e3
648,330
from textwrap import dedent def merged_run_summary(base_test_dir): """Mock merged run summary file for testing.""" summary_content = dedent( """\ # %ECSV 1.0 # --- # datatype: # - {name: date, datatype: string} # - {name: run_id, datatype: int64} # - {name: run_type, datatype: string} # - {name: n_subruns, datatype: int64} # - {name: run_start, datatype: string} # - {name: ra, unit: deg, datatype: float64} # - {name: dec, unit: deg, datatype: float64} # - {name: alt, unit: rad, datatype: float64} # - {name: az, unit: rad, datatype: float64} # meta: !!omap # - __serialized_columns__: # run_start: # __class__: astropy.time.core.Time # format: isot # in_subfmt: '*' # out_subfmt: '*' # precision: 3 # scale: utc # value: !astropy.table.SerializedColumn {name: run_start} # schema: astropy-2.0 date run_id run_type n_subruns run_start ra dec alt az 2019-11-23 1611 DRS4 5 2019-11-23T22:14:09.000 22.0 3.1 0.96 2.3 2019-11-23 1614 PEDCALIB 10 2019-11-23T23:33:59.000 4.6 2.1 1.1 4.5 2019-11-23 1615 DATA 61 2019-11-23T23:41:13.000 8.1 45.1 1.5 4.6 2019-11-23 1616 DATA 62 2019-11-24T00:11:52.000 3.2 4.2 0.9 1.6 2020-01-17 1804 DRS4 35 2020-01-18T00:44:06.000 2.1 42.9 11.3 4.7 2020-01-17 1805 PEDCALIB 62 2020-01-18T00:11:52.000 13.9 21.9 17.9 1.6 2020-01-17 1806 PEDCALIB 35 2020-01-18T00:44:06.000 8.6 29.1 45.5 6.9 2020-01-17 1807 DATA 35 2020-01-18T00:44:06.000 6.6 2.8 70.4 10.1 2020-01-17 1808 DATA 35 2020-01-18T00:44:06.000 8.6 9.2 60.8 3.2 2020-01-17 1809 PEDCALIB 4 2020-01-18T00:44:06.000 6.9 4.2 16.8 11.2""" ) merged_summary_dir = base_test_dir / "OSA/Catalog" merged_summary_dir.mkdir(parents=True, exist_ok=True) file = merged_summary_dir / 'merged_RunSummary.ecsv' file.touch() file.write_text(summary_content) return file
4cce20d706ea085fee9f24aad5b126fc176c0baa
335,900
def stringify_datetime_types(data: dict): """Stringify date, and datetime, types.""" for key in ("date", "timestamp"): if key in data: data[key] = data[key].isoformat() return data
e23b2a4e0921d3e50f40f01d5912d80141496180
406,509
from typing import IO from typing import List def read_expenses(input_io: IO) -> List[int]: """ Read expenses in file and returns as list of integers. Parameters ---------- input_io: IO Stream of expenses. Return ------ list[int] a list of integers (expenses). """ expenses = [] while line := input_io.readline(): expenses.append(int(line)) return expenses
406aefa8e247cc7603c7398cb9a527db0d061658
453,977
import shutil def is_disk_full(path): """Check if the disk is full.""" total, used, free = shutil.disk_usage(path) mib = 1000 * 1000 if free < (50 * mib): return True else: return False
699e410f01753c71bfa6cd88c1a16ed96e31101f
515,718
def get_padding(kernel_size, stride: int, dilation: int) -> int: """Return size of the padding needed Args: kernel_size ([type]): kernel size of the convolutional layer stride (int): stride of the convolutional layer dilation (int): dilation of the convolutional layer Returns: int: padding """ full_kernel = (kernel_size - 1) * dilation + 1 return full_kernel // 2
1a2711d68f738f3f0d1b11620bc29f1e1bcf0680
477,274
def scaled_images(image): """ Scales an image between [-1, 1] """ return (image / 255.0) * 2.0 - 1.0
85885438d501930e7f38c9e981d2c09b0a3b05c3
274,633
def _getText(node): """ Arguments: node: Node to extract the text from. Returns: Text from the node provided. """ return " ".join(child.data.strip() for child in node.childNodes if child.nodeType == child.TEXT_NODE)
2cb28c5d428ca3b24d48c2d58653606ce018fd17
284,063
import math def getXY120(i, j): """ Get xy coords for i,j indices in fig 2.4 DIF3D manual. Assumes triangleside length 1.0 """ height = math.sqrt(3) / 2.0 y = j * height x = -0.5 * j + 0.5 * i return x, y
f39040cd2c2eef19f316a22209b3048b9cf44639
442,919
import torch def aggregate(gradients, **kwargs): """ NaN-resilient median coordinate-per-coordinate rule. Args: gradients Non-empty list of gradients to aggregate ... Ignored keyword-arguments Returns: NaN-resilient, coordinate-wise median of the gradients """ return torch.stack(gradients).median(dim=0)[0]
6528e3b9afab40e2b68e22d9a093acae977c6f5e
289,387
def gettext(text): """Dummy `gettext` replacement to make string extraction tools scrape strings marked for translation """ return text
e2c19411c3c990b76b55cf37d87d7f11ad6c4186
587,679
def standoff(sin_rec, pdyn, a0, a1, a2): """Returns standoff position (s/w) given IMF sine rectifier and dynamic pressure. a0/1/2 are tuning parameters (s0/1/2, w0/1/2)""" d = (a0 + a1 * sin_rec) * (pdyn ** (-1 / a2)) return d
8fcce4447384be95e7b046b6182e1f03dec101c9
218,884
def invert(f): """ Simple function call: invert(val) """ return 1.0 - f
47b47c57f07cdadcc112b57ce22feae7e13d9d18
572,170
def format_pvalue(p_value, alpha=0.05, include_equal=True): """ If p-value is lower than 0.05, change it to "<0.05", otherwise, round it to two decimals :param p_val: input p-value as a float :param alpha: significance level :param include_equal: include equal sign ('=') to pvalue (e.g., '=0.06') or not (e.g., '0.06') :return: p_val: processed p-value (replaced by "<0.05" or rounded to two decimals) as a str """ if p_value < alpha: p_value = "<" + str(alpha) else: if include_equal: p_value = '=' + str(round(p_value, 3)) else: p_value = str(round(p_value, 3)) return p_value
aa6506b14b68746f4fa58d951f246321e8b5a627
709,329
import re def _camel_to_python(name): """Converts camelcase to Python case.""" return re.sub(r'([a-z]+)([A-Z])', r'\1_\2', name).lower()
228f31ed3a6d736e28dfab35733dcc78e12a0634
67,651
from datetime import datetime def get_unix(str_ts, date_format): """Get unix timestamp from a string timestamp in date_format. Args: str_ts: string timestamp in `date_format` date_format: datetime time stamp format Returns: int: unix timestamp """ return datetime.strptime(str_ts, date_format).timestamp()
6d0c591734fa78defed11cd5ed8c66da63ad3b5b
13,457
def _get_youtube_y(insta_y, fa_insta_height, font_size): """ Get YouTube icon's y position given Instagram y for centre-alignment. """ return insta_y + fa_insta_height + font_size//10
7ccd77a17091c93a3db3fdd05f943d1b8427741b
550,333
def function_beginning(graphparser): """ Writes the very beginning of the create function, i.e. prototype and vxCreateGraph""" num_input_imgs = len(graphparser.image_nodes.input_nodes_indexed_names) num_output_imgs = len(graphparser.image_nodes.output_nodes_indexed_names) parsed_string = """\ /* Quick hack to avoid graph_manager dependency while avoiding hacking up the graph_parser too much internally */ #define graphmanager_get_context(graph_manager) context bool %s_create(vx_context context, vx_graph graph_skeleton, vx_image input_images[%s], vx_image output_images[%s], void *userdata) { """ % (graphparser.graphname, num_input_imgs, num_output_imgs) return parsed_string
51761ba425136ab67a2d84c0523b77b4945e373f
410,117
def perc_range(n, min_val, max_val, rounding=2): """ Return percentage of `n` within `min_val` to `max_val` range. The ``rounding`` argument is used to specify the number of decimal places to include after the floating point. Example:: >>> perc_range(40, 20, 60) 50 """ return round( min([1, max([0, n - min_val]) / (max_val - min_val)]) * 100, rounding)
379515f6c0483b4bfed93d0c1012bb2ca111e410
703,032
async def root(): """Weather Advisor Welcome""" return {"message": "Hello, welcome to Weather advisor! Enter a temp, whether there is a chance of rain, and whether there is a chance of snow in the format temp/rain/snow."}
27267348373de0a5d5b72b117de3b9b32351fcd0
407,525
def solution(string): """Returns a reversed string with built in splice function.""" return(string[::-1])
c2bf30453cab27be56133aa62fa7cc5de960ec9f
196,007
def build_proc_info(release, config_name, hostname, proc, port): """ Return a dictionary with exhaustive metadata about a proc. This is saved as the proc.yaml file that is given to the runner. """ build = release.build app = build.app proc_info = { 'release_hash': release.hash, 'config_name': config_name, 'settings': release.config_yaml or {}, 'env': release.env_yaml or {}, 'version': build.tag, 'build_md5': build.file_md5, 'build_url': build.file.url, 'buildpack_url': build.buildpack_url, 'buildpack_version': build.buildpack_version, 'app_name': app.name, 'app_repo_url': app.repo_url, 'app_repo_type': app.repo_type, 'host': hostname, 'proc_name': proc, 'port': port, 'user': release.run_as or 'nobody', 'group': 'nogroup', 'volumes': release.volumes or [], 'mem_limit': release.mem_limit, 'memsw_limit': release.memsw_limit, } if build.os_image is not None: proc_info.update({ 'image_name': build.os_image.name, 'image_url': build.os_image.file.url, 'image_md5': build.os_image.file_md5, }) return proc_info
337470d895db43300ec012143b322ef55f2036e5
212,566
def prod_minus_index(arr, i): """ Product of a list excluding the index i """ total = 1 counter = 0 while counter < len(arr): if counter != i: total *= arr[counter] counter += 1 return total
7c75a69ad89ab8389900038bdc91be1aa768a838
448,849
def lib_ext(shared): """Returns the appropriate library extension based on the shared flag""" return '.a' if not shared else '.so'
9a836922ec3b6572efd2a6e11670ff091a1deb87
474,230
from typing import Callable from typing import Any from typing import Dict import inspect def _get_default_args(func: Callable[..., Any]) -> Dict[str, Any]: """ Get default arguments of the given function. """ return { k: v.default for k, v in inspect.signature(func).parameters.items() if v.default is not inspect.Parameter.empty }
d2ffa3ac2babc1aa21ef8737ac8ed1d11c3af034
53,946
import hashlib def get_hash(s): """ Get the SHA256 hash hex digest of a string input """ return hashlib.sha256(s.encode('utf-8')).hexdigest()
b7463ec6f766b45c8703588b2dbd65858ab1cc69
332,646
def comp_volume(self): """Compute the volume of the Frame Parameters ---------- self : Frame A Frame object Returns ------- Vfra: float Volume of the Frame [m**3] """ Sfra = self.comp_surface() return Sfra * self.Lfra
17b1222c19a4a2a874f474023eb121763fb34359
555,917
import pkg_resources def parse_version(version_string): """ Parse string as a verison object for comparison Example: parse_version('1.9.2') > parse_version('1.9.alpha') See docs for pkg_resource.parse_version as this is just a wrapper """ return pkg_resources.parse_version(version_string)
678554ac2095bd2939f634c7c45bddbac86ec3d4
40,086
def algebraic_equasion_function(x): """ This function makes a calculation for an Algebraic equasion It calculates f(x) with the given equasion and x as a parameter """ formula = x**2 + 6*x + 9 return formula
72762b4c015c3ffed8b7c5896aa0badaaf9518b4
367,950
def drop_constant_column(dataframe): """ Drops constant value columns of pandas dataframe. """ return dataframe.loc[:, (dataframe != dataframe.iloc[0]).any()]
cae8cc18e191a9373425ca679b8e152a5dd6e857
630,609
from pathlib import Path def get_label(iemocap_dir): """Get labels for every utterance under the form "utt_name:label". Parameters ---------- iemocap_dir : str Path to the `IEMOCAP_full_release` directory. Returns ------- dict Dictionary of pairs utt_name:label (e.g., 'Ses01F_impro01_F000': 'neu'). """ emo_dict = dict() def get_info(line): line = line.split() name = line[3] label = line[4] # Convert "exc" (excited) to "hap" (happy) label = "hap" if label == "exc" else label return name, label for filepath in Path(iemocap_dir).rglob("EmoEvaluation/*.txt"): with open(filepath, "r") as fin: lines = fin.readlines() lines = filter(lambda x: x[0] == "[", lines) # filter lines with label emo_dict.update(dict(map(get_info, lines))) return emo_dict
4295342b60376a9d02bc1baba39bc78215f05a67
249,257
def find_field_key(fieldset_fields, field_source, key): """Find a field by source and key""" for field in fieldset_fields: if field.Source == field_source and any(key in s for s in field.Keys): return field return None
2552e856ca8912f47f07ab50ea99c083a363b335
369,763
def has_triple_string_quotes(string_contents: str) -> bool: """Tells whether string token is written as inside triple quotes.""" if string_contents.startswith('"""') and string_contents.endswith('"""'): return True elif string_contents.startswith("'''") and string_contents.endswith("'''"): return True return False
708d6848c8e7f58f8f93818f7f3a5095bf4e2fa3
695,239