content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def lists(iters): """Create a sequence of lists from a mapping / iterator / generator.""" return list(map(list, iters))
7bfb7e5eb974d1afce81c34ed494a922d191c595
363,352
def get_leaf(path): """returns the last element of a path""" return str(path).split('/')[-1]
4c311f98a8225146845e7596e39aad0b15a93252
273,060
import torch def denormalize(tensor, mean, std, inplace=False): """ Denormalize a tensor image with mean and standard deviation. Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform will denormalize each channel of the input ``torch.*Tensor`` i.e. ``input[channel] = (input[channel] * std[channel]) + mean[channel]`` Args: tensor (Tensor): Tensor image of size (C, H, W) to be denormalized. mean (sequence): Sequence of means for each channel. std (sequence): Sequence of standard deviations for each channel. inplace (bool): Perform operation on same tensor. Returns: Tensor: Denormalized Tensor image. """ assert isinstance(tensor, torch.Tensor), f"Input tensor should be a torch tensor. Got {type(tensor)}" assert tensor.is_floating_point(), f"Input tensor should be a float tensor. Get {tensor.dtype}" assert tensor.ndim >= 3, f"Expected tensor to be a tensor image of size (..., C, H, W), got " \ f"tensor.size()={tensor.size()}" if not inplace: tensor = tensor.clone() dtype = tensor.dtype mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device) std = torch.as_tensor(std, dtype=dtype, device=tensor.device) if mean.ndim == 1: mean = mean.view(-1, 1, 1) if std.ndim == 1: std = std.view(-1, 1, 1) tensor.mul_(std).add_(mean) return tensor
a5480f9244ece4e8db9a562ef18333d1ba492201
511,876
from typing import Counter def count_elements(data_lst): """Count how often each element occurs in a list. Parameters ---------- data_lst : list List of items to count. Returns ------- counts : collections.Counter Counts for how often each item occurs in the input list. """ counts = Counter(data_lst) try: counts.pop(None) except KeyError: pass return counts
a12f0a35a228e8a8627a8fcfc703d3231984e3f4
23,467
def support(pauli: str): """Returns indices where the Pauli string is non-identity.""" has_support = lambda c: c != "I" inds = [] for (i, p) in enumerate(pauli): if has_support(p): inds.append(i) return inds
2ef93bcc394b8c845106f4c198a9fb7d2b8db776
318,781
from string import ascii_letters, digits, punctuation def valid_blockname(name): """Tests if a 5-character string is a valid blockname. Allows names with the first three characters either letters, numbers, spaces or punctuation, the fourth character a digit or a space and the last character a digit. """ digit_space = digits + ' ' letter_digit_space_punct = ascii_letters + digit_space + punctuation return all([s in letter_digit_space_punct for s in name[0:3]]) and \ (name[3] in digit_space) and (name[4] in digits)
f91748b310a6503e6e139f5aaec7da49b09cebb7
547,925
def encode_bytearray(bytearray): """Encode a QByteArray in a string.""" return str(bytearray.toBase64())
728ed5897946636ebb5daff6c96218fc05f66ed3
416,105
def is_metoffice(sheet_cell): """Is this variable produced by the Met Office?""" if not sheet_cell: return False not_producing_values = ['CHECK', 'FALSE'] for value in not_producing_values: if value == sheet_cell.upper(): return False return True
64c18e6b8f5f5105fb5f6df9340d2ff03a48c4d0
373,436
def count_sequences(fasta_file: str) -> int: """Count sequences in a fasta file.""" n = 0 with open(fasta_file, "r") as file: for line in file: if line.startswith(">"): n += 1 return n
7ae6446e711fadb79a626589ddb4cf0837c98a19
303,744
import torch def cg(f_Ax, b, cg_iters=10, residual_tol=1e-10): """ Conjugate gradient descent. Algorithm is from page 312 of book "Applied Numerical Linear Algebra" by James W. Demmel Approximately solve x = A^{-1}b, or Ax = b, Here A is F and b is g. A good thing about this algorithm is that, to compute x, we only need to have access of function f: d -> Ad, where function f takes an arbitary vector d and output the Fisher-vector product Ad, where A is the fisher information matrix, i.e. the hessian of KL divergence. This method uses krylov space solver. See "A Brief Introduction to Krylov Space Methods for Solving Linear Systems" http://www.sam.math.ethz.ch/~mhg/pub/biksm.pdf Specifically, it create a sequence of Krylov subspaces K_n for Ax = b, and find approximate solutions x_n in K_n Parameters ---------- f_Ax (function): A funtion takes a vector x as input and output Ax where A is the Fisher Information matrix. b (torch.Tensor): cg_iter (int): A integer, indicate the iteration for conjugate gradient. residual_tol (float): A float, indicate the terminate condition for the algorithm. Returns ------- x (torch.Tensor): result of conjugate gradient """ p = b.clone() r = b.clone() x = torch.zeros(b.size()) rdotr = r.dot(r) for i in range(cg_iters): z = f_Ax(p) v = rdotr / p.dot(z) x += v * p r -= v * z newrdotr = r.dot(r) mu = newrdotr / rdotr p = r + mu * p rdotr = newrdotr if rdotr < residual_tol: break return x
9d4ef8de8900230867fb37d0683385463ac8be52
303,700
def clean_stock_selection(uncleaned_stocks): """ Function to perform some cleaning before using urllib.requests and finance.yahoo.com to obtain all the stockinfo. Inputs: - uncleaned_stocks(pd.DataFrame): pd.DataFrame containing the stocks (name, wkn, exchange, ticker, ISIN, ...) Returns: - pd.DataFrame cleaned s.t. every stock has an industry. """ return uncleaned_stocks[uncleaned_stocks.industry != "not_found"]
faf8e870ce594417c3aae7b73c66d245f49af00f
427,935
def footer() -> str: """ Return the footer of the LaTeX document. :return: LaTeX document footer. """ return "\\end{tikzcd}\n\\end{document}"
f3d6615d089e489a88e6eba3c87a0fa9a655c20c
672,859
def read_file(in_file): """ Read Intput File Args: in_file: input file path Returns: no_of_caches: Number of cache servers , cache_size: Each cache server's size , video_sizes: List of video sizes , ep_dc_latencies: List of latency between Endpoint and Data Center , ep_no_of_caches: List of Number of connected Cache Servers in each Endpoint , ep_cache_latencies: List of each endpoint's latency to a connected cache server , video_ep_requests: Dict of requests of each video from each Endpoint """ no_of_videos = 0 no_of_endpoints = 0 no_of_request_desc = 0 no_of_caches = 0 cache_size = 0 # Each cache server's size video_sizes = [] # List of video sizes ep_dc_latencies = [] # List of latency between Endpoint and Data Center ep_no_of_caches = [] # List of Number of connected Cache Servers in each Endpoint # list of each endpoint's latency to a connected cache server # [{cache_idx: latency, 0: 100, 1: 150, ...}, ...] ep_cache_latencies = [] # dict of requests of each video from each endpoint # {(video_idx, endpoint_idx): no_of_requests), (3, 0): 1500, (0, 1): 1200, ...} video_ep_requests = {} # Read the file into variables with open(in_file, 'r') as infile: # Process lines and save data into variables no_of_videos, no_of_endpoints, no_of_request_desc, no_of_caches, cache_size = [int(x) for x in infile.readline().strip().split(' ')] video_sizes = [int(x) for x in infile.readline().strip().split(' ')] for ep_idx in range(0, no_of_endpoints): ep_dc_latency, no_of_connect_caches = [int(x) for x in infile.readline().strip().split(' ')] ep_dc_latencies.append(ep_dc_latency) ep_cache_latency = {} for cache_cnt in range(0, no_of_connect_caches): cache_idx, cache_latency = [int(x) for x in infile.readline().strip().split(' ')] ep_cache_latency[cache_idx] = cache_latency ep_no_of_caches.append(no_of_connect_caches) ep_cache_latencies.append(ep_cache_latency) for request_cnt in range(0, no_of_request_desc): [video_idx, ep_idx, request_size] = [int(x) for x in infile.readline().strip().split(' ')] video_ep_requests[(video_idx, ep_idx)] = request_size ''' print(no_of_videos) print(no_of_endpoints) print(no_of_request_desc) print(no_of_caches) print(cache_size) print(video_sizes) print(ep_dc_latencies) print(ep_no_of_caches) print(ep_cache_latencies) print(video_ep_requests) ''' return no_of_caches, cache_size, video_sizes, ep_dc_latencies, ep_no_of_caches, ep_cache_latencies, video_ep_requests
01569660b2349d2460ac51d001087dedc846bf03
135,628
def dec_to_bin(dec_val, num_bits): """ Helper function convert decimal value to signed 2's complement binary value. """ mod_fac = (1 << num_bits) format_str = '0{}b'.format(num_bits) return format((dec_val + mod_fac) % mod_fac, format_str)
520f25f8f3f2f14e497cb12eabe72dec2fd486da
524,651
from typing import Optional from typing import Union def type_union(*args: type) -> Optional[type]: """Finds the union of types, eliminating `None` where possible.""" union_t = None for t in args: if union_t is None: union_t = t elif t is not None: union_t = Union[union_t, t] return union_t
1de257308d037460a96d66df8f1daaeb19128fff
600,478
def add_dividers(pitches: list, interval: int) -> list: """ Adds dividing sounds to a list of pitches at regular intervals. This is useful for when the data is periodic (e.g. temperature by month for multiple years), or when it is so long that rests may help with easier listening. :param pitches: prepared list of pitches :param interval: integer index (if 5, a divider will be added after every fifth note, etc.) :return: altered list of pitches """ new_pitches = [] for i, f in enumerate(pitches): new_pitches.append(f) if i % interval == interval - 1: new_pitches.append('div') return new_pitches
ca767febb8be4a3fc21f1387103a89e6adc4ef50
347,679
def key_by_tollbooth_month(element): """ Beam works with (key, value) tuples as rows. tuple's first element is treated as the key; while second element is the value both key and value can be any type; but typically they are constructed of tuples themselves if multiple keys or values are needed. for example: ( (key1, key2, ...) , (value1, value2, ....) ) """ # we're creating a multiple key (tollbooth, month) key with single value (total) tuple return (element['tollbooth'], element['month']), element['total']
746738ef1249bc1dd13ad7f571182b8eb8c80468
147,395
def impl_ret_untracked(ctx, builder, retty, ret): """ The return type is not a NRT object. """ return ret
af6088d5a6a3051456a0ae7171ba4cf7fd9cc592
159,489
def usable(record): """ Helper function to determine if we can use a record We are only interested in protein coding exons. """ return (record['feature'] == 'exon' and record['attribute']['transcript_biotype'] =='protein_coding')
833f72bea6ab6968987d58c8cef318c82a496759
146,505
import itertools def basis_labels(n): """ Generate a list of basis labels for `n` qubits, ordered from least to greatest, in big-endian format: ['00..00', '00..01', ..., '11..11'] :param n: :return: A list of strings of length n that enumerate the n-qubit bitstrings :rtype: list """ return ["".join(labels) for labels in itertools.product('01', repeat=n)]
a677c74f36cf89089a34ecb8b731c2b4a76f77fe
623,478
from typing import Dict def get_aqi_class_pcts(aqi_cl_exps: Dict[int, float], length: float) -> dict: """Returns the percentages of exposures to different AQI classes as a dictionary (e.g. { 1: 75.0, 2: 25.0 }). Args: aqi_cl_exps: A dictionary of exposures to different AQI classes (1, 2, 3...) as distances (m). length: The length of the path. """ return { aqi_class: round(aqi_class_length * 100 / length, 3) for aqi_class, aqi_class_length in aqi_cl_exps.items() }
1f2150624031a7d9738410aafafae9c79d7cbcf3
350,189
def indent_docstr(s, indent, n=1, trim=True): """Add common indentation to all lines except first Args: s (str): docstring starting at indentation level 0 indent (str): text used for indentation, in practice this will be the level of the declaration + 1 n (int): don't indent first n lines trim (bool): trim whitespace (' \t') out of blank lines Returns: s with common indentation applied """ lines = s.splitlines(keepends=True) for i in range(n, len(lines)): if lines[i].strip() or not trim: lines[i] = "{0}{1}".format(indent, lines[i]) else: lines[i] = lines[i].strip(' \t') return "".join(lines)
fb99e0c0608153acc289a317f782c2ec85d5eefa
611,592
def get_min_value(solution, field=0): """ Get the minimum value in a field in a solution. """ min_val = 1e38 for state in solution.states: min_temp = state.q[field, :, :].min() if min_temp < min_val: min_val = min_temp return min_val
719d426022b3f60520cb81f69918d113501fb21a
27,705
import requests from bs4 import BeautifulSoup def get_and_parse_url(param): """ To fetch and parse urls :param param: :type param: url object :rtype: object :return: soup: parsed html object """ result = requests.get(param) soup = BeautifulSoup(result.text, 'html.parser') return soup
a18a74ecac14a276c2f127a70b44b4769d66d56a
438,364
import unicodedata def normalize_str(text): """ Normalizes unicode input text (for example remove national characters) :param text: text to normalize :type text: unicode """ # unicodedata NFKD doesn't convert properly polish ł trans_dict = { u'ł': u'l', u'Ł': u'L' } trans = dict((ord(k), ord(v)) for k, v in trans_dict.items()) text = text.translate(trans) return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore')
40c8f77cdbf08b12a3867cd4a9d9bb91b323b50b
704,309
def cost_function(theta, state, circuit, shots=1000): """Cost function encoding the difference between a state and its up-to-phases canonical form Args: theta (array): parameters of the unitary rotations. state (cplx array): three-qubit random state. circuit (models.Circuit): Qibo variational circuit. shots (int): Shots used for measuring every circuit. Returns: float, cost function """ circuit.set_parameters(theta) measurements = circuit(state, nshots=shots).frequencies(binary=False) return (measurements[1] + measurements[2] + measurements[3]) / shots
d4a491f6661134fdc827b739c1a4e9f37f812bbc
437,200
def generate_op_run_log(op_data): """Returns operator execution log.""" return 'device id={} op={}, ready={}, start={}, end={}\n'.format( op_data['p'], op_data['name'], op_data['ready_ts'], op_data['start_ts'], op_data['end_ts'])
ec1ca1568de433dd9bfb8ff134d68c913436a9c5
374,347
import torch from typing import Dict from typing import Tuple def ngram( spikes: torch.Tensor, ngram_scores: Dict[Tuple[int, ...], torch.Tensor], n_labels: int, n: int, ) -> torch.Tensor: # language=rst """ Predicts between ``n_labels`` using ``ngram_scores``. :param spikes: Spikes of shape ``(n_examples, time, n_neurons)``. :param ngram_scores: Previously recorded scores to update. :param n_labels: The number of target labels in the data. :param n: The max size of n-gram to use. :return: Predictions per example. """ predictions = [] for activity in spikes: score = torch.zeros(n_labels, device=spikes.device) # Aggregate all of the firing neurons' indices fire_order = [] for t in range(activity.size()[0]): ordering = torch.nonzero(activity[t].view(-1)) if ordering.numel() > 0: fire_order += ordering[:, 0].tolist() # Consider all n-gram sequences. for j in range(len(fire_order) - n): if tuple(fire_order[j : j + n]) in ngram_scores: score += ngram_scores[tuple(fire_order[j : j + n])] predictions.append(torch.argmax(score)) return torch.tensor(predictions, device=spikes.device).long()
1b7ddad13bf0da0b5c438aa139e5773ddff0128a
110,285
def add(x, y): """The sum of two numbers. Parameters ---------- x : (int, float) The first number to be added. y : (int, float) The second number to be added. Returns ------- ret : (int, float) The sum of the inputs a and b Notes ----- Python will often convert the types of the input values. For example if the input of x and y are integers the result will be in an integer. However if the input is a integer and a float a float will be returned. Examples -------- Adding two integers together: >>> add(5, 3) 8 An example of mixed input type: >>> add(5.0, 3) 8.0 """ return x + y
2bfca456840782bf5d14c7c5863370d152a86542
344,241
from datetime import datetime def format_datetime(dt: datetime) -> str: """ Format a datetime object as string. :param dt: Date and time to format. :return: String representation. """ return dt.strftime("%Y-%m-%d %H:%M:%S%z")
2e62f4f15ab3f7940bbb616fbc69da29e7aa4944
144,213
from pathlib import Path def get_importfilelist(args): """Convert optional list of import files into a consistent list form Specifying one or more import files to load at startupis optional. If the -i/--import switch isn't provided, then None is passed on. if the -i/--import is specified without a filename, then we will place Path("import_--filename.md") as the only item in the list. Otherwise, the list of files will be passed on as-is. """ if args.importfilelist is not None and len(args.importfilelist) == 0: filename = Path(args.filename).resolve() return [str(Path().joinpath(filename.parent, f'import_{filename.name}'))] return args.importfilelist
9aecea8b65a716c4f5c42bc4caaebbd2def060c8
213,209
import math def TxyzRxyz_2_Pose(xyzrpw): """Returns the pose given the position (mm) and Euler angles (rad) as an array [x,y,z,rx,ry,rz]. The result is the same as calling: H = transl(x,y,z)*rotx(rx)*roty(ry)*rotz(rz) :param xyzrpw: [x,y,z,rx,ry,rz] in mm and radians :type xyzrpw: list of float .. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose` """ [x,y,z,rx,ry,rz] = xyzrpw srx = math.sin(rx); crx = math.cos(rx); sry = math.sin(ry); cry = math.cos(ry); srz = math.sin(rz); crz = math.cos(rz); return([[ cry*crz, -cry*srz, sry, x],[crx*srz + crz*srx*sry, crx*crz - srx*sry*srz, -cry*srx, y],[srx*srz - crx*crz*sry, crz*srx + crx*sry*srz, crx*cry, z],[0,0,0,1]])
d26befb8474c0a24ddd8451a5182c2315851fdd9
272,876
from datetime import datetime def iso_datetime_str_to_datetime(string): """Concert ISO datetime string to datetime object.""" try: return datetime.strptime(string, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: return datetime.strptime(string, "%Y-%m-%dT%H:%M:%S")
21213be4bf940cf7c7432ea7ff23d0b0dc861e7f
331,013
def get_reviewers(commit_infos): """Get a set of authors and reviewers from 'recipes.py autoroll' commit infos. """ reviewers = set() for commits in commit_infos.values(): for commit in commits: reviewers.add(commit['author']) return reviewers
f99920095c6f5e159a1de2fe626d7aecb054d984
245,355
from typing import List def find_new_name( img_name: str, same_date_file_list: List[str], ext: str, max_n_imgs: int = 10000 ) -> str: """Chooses a new filename that is not in the list and contains the img_name in the beginning by appending an int to the filename separated by an underscore. """ for k in range(max_n_imgs): new_name = img_name + "_" + str(k) + ext try: same_date_file_list.index(new_name) except ValueError: return img_name + "_" + str(k) # If there are already more than `max_n_imgs` images, gives up. raise ValueError("ERROR: Way too many fucking images.")
6beac30fa512ea278246a68a47e3af918da35b34
303,425
import re def regex_strip_legalname(raw_names): """Removes legal entity, technical description or firm type from firm name Input raw_names: list of strings with firm names Return list of strings: firm names without legal description """ pattern = r"(\s|\.|\,|\&)*(\.com|Enterprise|Worldwide|Int\'l|N\.V\.|LLC|Co\b|Inc\b|Corp\w*|Group\sInc|Group|Company|Holdings\sInc|\WCo(\s|\.)|plc|Ltd|Int'l\.|Holdings|\(?Class\s\w+\)?)\.?\W?" stripped_names = [re.sub(pattern,'', n) for n in raw_names] return stripped_names
5388324e916b4de25e563383335ec8fbea876d63
169,874
def _avatar_url_from_info(oauth_type, info): """Returns a URL for the user avatar, depending on oauth_type""" if oauth_type == 'facebook': return 'https://graph.facebook.com/{}/picture?type=square'.format( info['id']) elif oauth_type == 'google': return info.get('picture') return None
16b63875f105da6e65601ff59bbb382640186ad9
298,433
from typing import Union from typing import Tuple from typing import cast def convert_ipv6_sockaddr( sockaddr: Union[Tuple[str, int, int, int], Tuple[str, int]] ) -> Tuple[str, int]: """ Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format. If the scope ID is nonzero, it is added to the address, separated with ``%``. Otherwise the flow id and scope id are simply cut off from the tuple. Any other kinds of socket addresses are returned as-is. :param sockaddr: the result of :meth:`~socket.socket.getsockname` :return: the converted socket address """ # This is more complicated than it should be because of MyPy if isinstance(sockaddr, tuple) and len(sockaddr) == 4: host, port, flowinfo, scope_id = cast(Tuple[str, int, int, int], sockaddr) if scope_id: # Add scope_id to the address return f"{host}%{scope_id}", port else: return host, port else: return cast(Tuple[str, int], sockaddr)
0bc72d4d2fc343911a83b81aeb41e699cafb1ece
152,913
def get_first_line(file: str) -> str: """Get first line of file. Parameters ---------- file : str Returns ------- str """ with open(file) as f: return f.readline().split('\n')[0]
07066726499705def66714386253eac268c93f69
549,581
def is_triangle(a, b, c): """ Check if the given length for three sticks can form a triangle a,b,c : int -> the length of every side """ if a < (b + c) and b < (a + c) and c < (a + b): print('Si') return True else: print('No') return False
b7189639b014175ba6a7285d6dd586baca7c1181
554,785
def user_cleaner(user): """Converts none or _ to library, makes username lowercase Args: user (str): Returns: str: cleaned username """ if user == "_" or user == "": return "library" try: return user.lower() except AttributeError: return user
d6a6e6d1c18f83dcd769bf48a2b08251374bfd0b
157,877
def remove_prefix(text: str, prefix: str) -> str: """Removes a prefix from a string, if present at its beginning. Args: text: string potentially containing a prefix. prefix: string to remove at the beginning of text. """ if text.startswith(prefix): return text[len(prefix):] return text
bee742d06b61e0872ea6384340c20b3be272abfe
612,653
from typing import Union from typing import List from typing import Tuple def flatten(xs: Union[List, Tuple]) -> List: """ Flatten a nested list or tuple. """ return ( sum(map(flatten, xs), []) if (isinstance(xs, list) or isinstance(xs, tuple)) else [xs] )
e520fb245e5a681ca446f50c7bb4fd55cfaedc18
688,543
def media_markers(project): """Get all media markers in a project. Args: project: The Project to fetch data from. Returns: An iterable of `(Marker, Media, Track)` tuples. """ return ( (marker, media, track) for track in project.timeline.tracks for media in track.medias for marker in media.markers )
b31ee8e53e76bbcae5805f3f62394d4e47d64ce4
269,868
def getGroupInputDataLength(hg): """ Return the length of a HDF5 group Parameters ---------- hg : `h5py.Group` or `h5py.File` The input data group Returns ------- length : `int` The length of the data Notes ----- For a multi-D array this return the length of the first axis and not the total size of the array. Normally that is what you want to be iterating over. The group is meant to represent a table, hence all child datasets should be the same length """ firstkey = list(hg.keys())[0] nrows = len(hg[firstkey]) firstname = hg[firstkey].name for value in hg.values(): if len(value) != nrows: raise ValueError(f"Group does not represent a table. Length ({len(value)}) of column {value.name} not not match length ({nrows}) of first column {firstname}") return nrows
f049faff66573b9dfeec304f33143ca915a692c6
682,397
import yaml def parse_yaml(path): """From path, parse file as yaml, return data""" with open(path, "r") as stream: data = yaml.safe_load(stream) return data
25d78ae23af9852197dfe42dd1481cb866568d07
313,204
def _verify_signing_id_commands(identity, provisioning_profile): """Returns commands that verify that the given identity is valid. Args: identity: The signing identity to verify. provisioning_profile: The provisioning profile, if the signing identity was extracted from it. If provided, this is included in the error message that is printed if the identity is not valid. Returns: A string containing Bash commands that verify the signing identity and assign it to the environment variable `VERIFIED_ID` if it is valid. """ verified_id = ("VERIFIED_ID=" + "$( " + "security find-identity -v -p codesigning | " + "grep -F \"" + identity + "\" | " + "xargs | " + "cut -d' ' -f2 " + ")\n") # If the identity was extracted from the provisioning profile (as opposed to # being passed on the command line), include that as part of the error message # to point the user at the source of the identity being used. if provisioning_profile: found_in_prov_profile_msg = (" found in provisioning profile " + provisioning_profile.path) else: found_in_prov_profile_msg = "" # Exit and report an Xcode-visible error if no matched identifiers were found. error_handling = ("if [[ -z \"$VERIFIED_ID\" ]]; then\n" + " " + "echo " + "error: Could not find a valid identity in the " + "keychain matching \"" + identity + "\"" + found_in_prov_profile_msg + "." + "\n" + " " + "exit 1\n" + "fi\n") return verified_id + error_handling
46dc26cc1e6f6b69595cf51d8a564acfa40e4e22
430,831
def note2ratio(note, cents=0): """ Converts semitones to a frequency ratio. """ ratio = 2 ** ((note + cents / 100) / 12) return ratio
1c78697d3978d122d8be39e406bc8be8b7684f9d
13,023
def get_users(cluster_config): """Get users from config object""" return list(cluster_config['users'])
b52e5adbad71bef820ce792546c9fb010fbdaf84
216,404
def _trim(string): """Remove one leading and trailing ' or " used in values with whitespace.""" if string[0] in "'\"": return string[1:-1] return string
23e5816e2ad7365d26619718e3b2958a6771e5e8
252,061
def make_iam_policy(role, members): """Create sample IAM policy.""" return {'bindings':[{'role': role, 'members': members}]}
cf1caf896421cd17acd296c353bbbd4136188e66
208,937
from typing import Tuple import math def get_bicubic_params(cubic_filter: str) -> Tuple[float, float]: """Return the parameter b and c for the bicubic filter Source: https://www.imagemagick.org/discourse-server/viewtopic.php?f=22&t=19823 https://www.imagemagick.org/Usage/filter/#mitchell Args: cubic_filter (str): Can be: Spline, B-Spline, Hermite, Mitchell-Netravali, Mitchell, Catmull-Rom, Catrom, Sharp Bicubic, Robidoux soft, Robidoux, Robidoux Sharp. Returns: Tuple: b/c combo """ sqrt = math.sqrt def _get_robidoux_soft() -> Tuple[float, float]: b = (9 - 3 * sqrt(2)) / 7 c = (1 - b) / 2 return b, c def _get_robidoux() -> Tuple[float, float]: sqrt2 = sqrt(2) b = 12 / (19 + 9 * sqrt2) c = 113 / (58 + 216 * sqrt2) return b, c def _get_robidoux_sharp() -> Tuple[float, float]: sqrt2 = sqrt(2) b = 6 / (13 + 7 * sqrt2) c = 7 / (2 + 12 * sqrt2) return b, c cubic_filter = cubic_filter.lower().replace(' ', '_').replace('-', '_') cubic_filters = { 'spline': (1.0, 0.0), 'b_spline': (1.0, 0.0), 'hermite': (0.0, 0.0), 'mitchell_netravali': (1 / 3, 1 / 3), 'mitchell': (1 / 3, 1 / 3), 'catmull_rom': (0.0, 0.5), 'catrom': (0.0, 0.5), 'bicubic_sharp': (0.0, 1.0), 'sharp_bicubic': (0.0, 1.0), 'robidoux_soft': _get_robidoux_soft(), 'robidoux': _get_robidoux(), 'robidoux_sharp': _get_robidoux_sharp() } return cubic_filters[cubic_filter]
8bd8161f536096d029071255e68cf79c58a74a70
581,374
def find_latest_iteration(directories): """ Selects the latest iteration of a workflow based on the numbering of directories. Expects a list of directories of the form <path>/<name>_<number> and will return the directory with the highest number. Will return 0 if no directories are given. """ numbers = [0] for d in directories: number_string = d.split('_')[-1].replace('/','') number = int(number_string) numbers.append(number) return max(numbers)
502f033b7bdc6e6649f4364ca5936b356bc24252
213,109
def isunauthenticated(func): """Checks if the function does not require authentication. Mark such functions with the `@unauthenticated` decorator. :returns: bool """ return getattr(func, 'unauthenticated', False)
1a952eafd06ae567669bb2524e24f908830d6f56
376,690
def _http_unauthorized(start_response): """Responds with HTTP 401.""" start_response('401 Unauthorized', [('Content-Length', '0')]) return []
8d0b1a8e80a2c6b5a73a2a0a714015c843e27e11
399,666
def NativeStr(obj): """A compatibility wrapper for returning native string representation. Args: obj: An object for which we want a string representation. Returns: A native string representation of given object. """ return str(obj)
b02fa69a83e40ab5b7b1d067562345dfc9134479
345,557
def get_content(document: dict) -> str: """ Get the content of a document with the same structure as the json in the COVID-19 dataset. Parameters ---------- document: :class:`dict` A `dict` representing a document. Returns ------- :class:`str` Content of the document. Examples -------- >>> get_content(toy_covid_article) "introduction. There is no-one in the trees. Is there? conclusion. Predators don't like to lose." """ return " ".join( [". ".join( [ chapter["section"], chapter["text"] ] ) for chapter in document["body_text"] ] )
ce1d1a875b5e343b36c0e69af18bcad801eb3033
417,934
from datetime import datetime import pytz def set_last_update(details, *args, **kwargs): # pylint: disable=unused-argument """ Pipeline function to add extra information about when the social auth profile has been updated. Args: details (dict): dictionary of informations about the user Returns: dict: updated details dictionary """ details["updated_at"] = datetime.now(tz=pytz.UTC).timestamp() return details
fb1856f368aa5e87daace63d906cca975ee2c1f1
79,909
import re def validate_cnpj(cnpj): """ Valida CNPJs, retornando apenas a string de números válida. # CNPJs errados >>> validate_cnpj('abcdefghijklmn') False >>> validate_cnpj('123') False >>> validate_cnpj('') False >>> validate_cnpj(None) False >>> validate_cnpj('12345678901234') False >>> validate_cnpj('11222333000100') False # CNPJs corretos >>> validate_cnpj('11222333000181') '11222333000181' >>> validate_cnpj('11.222.333/0001-81') '11222333000181' >>> validate_cnpj(' 11 222 333 0001 81 ') '11222333000181' """ cnpj = ''.join(re.findall(r'\d', str(cnpj))) if (not cnpj) or (len(cnpj) < 14): return False # Pega apenas os 12 primeiros dígitos do CNPJ e gera os 2 dígitos que faltam inteiros = list(map(int, cnpj)) novo = inteiros[:12] prod = [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2] while len(novo) < 14: r = sum([x*y for (x, y) in zip(novo, prod)]) % 11 if r > 1: f = 11 - r else: f = 0 novo.append(f) prod.insert(0, 6) # Se o número gerado coincidir com o número original, é válido if novo == inteiros: return cnpj return False
847527cc3abbbf07a2d938f861e0c436e729a8f6
142,293
def improve_humidity_measurement(raw_humidity, dig_h, t_fine): """Refine the humidity measurement. Adapts the humidity measurement by using the available temperature information, along with the humidity readout details. Args: raw_humidity (int): raw humidity dig_h (list): raw data blocks pertaining to humidity measurement t_fine (float): temperature measurement Returns: float: refined humidity measurement Reference: Bosch data sheet, Appendix A, "BME280_compensate_H_double" """ base_value = t_fine - 76800.0 term1 = raw_humidity - (dig_h[3] * 64.0 + dig_h[4] / 16384.0 * base_value) term2a = base_value * (1.0 + dig_h[2] / 67108864.0 * base_value) term2 = dig_h[1] / 65536.0 * (1.0 + dig_h[5] / 67108864.0 * term2a) humidity = term1 * term2 humidity = humidity * (1.0 - dig_h[0] * humidity / 524288.0) humidity = max(0, min(humidity, 100)) return humidity
bad63a7247b7a067dcc703a8de6c1b1d67b8c66d
186,773
import re def convertd2e (numberstring=" "): """ def convertd2e (numberstring): takes a string and replaces all d's or D's with e's. usefule for reading data files with doubles output by Fortran, which uses d, or D instead of e, for the exponent. """ Dd=re.compile("[Dd]") newstring=Dd.sub("e",numberstring) return newstring # end convertd2e ==================================================
20c85ee0bdcf7c16a666a559f1d966e2e6fd26e7
561,023
def reformat_element_symbol(element_string): """ Reformat the string so the first letter is uppercase and all subsequent letters lowercase. Parameters ---------- element_string : str Inputted element symbol Returns ------- str Returned reformatted element symbol """ return element_string[0].upper() + element_string[1:].lower()
603b6e221b9bfce9ed053f55e79f610ed87a711e
580,624
import itertools def get_copasi_model_obj_sbml_ids(model): """ Get the SBML id of each object of a COPASI model Args: model (:obj:`COPASI.CModel`): model Returns: :obj:`list` of :obj:`str: SBML id of each object of the model """ ids = [] for object in itertools.chain( model.getMetabolites(), model.getModelValues(), model.getCompartments(), model.getReactions() ): ids.append(object.getSBMLId()) return ids
7d26f956e65e28943e17c461de27b860b787f788
584,894
def get_row_indices(S, feature_pairs): """Get row indices of given feature pairs""" return list(map(lambda elem: feature_pairs.index(elem), S))
5d897887b0cbd391cb596aba153e3e97fa78f463
501,153
def _service_account_email(project_id, service_account_id): """Return full service account email.""" return '%s@%s.iam.gserviceaccount.com' % (service_account_id, project_id)
403023294f9f316a784b7184b4822d147dab4f4a
426,263
def humanize_name(name): """ Return a canonical representation of a name in First Last format.""" if not isinstance(name, str): return name elif name.upper() == name: return " ".join([part.strip().title() for part in name.split(",")][::-1]) else: return " ".join([part.strip() for part in name.split(",")][::-1])
31bedd13c9b1903b99a91eaf0e573b89823d23ee
505,020
def get_vep_dict(vep_string, vep_header, allele=None): """Make the vep annotation into a dictionary This dictionary will have the alleles as keys and a list of dictionaries with vep annotations as values. Args: vep_list (string): A string with the CSQ annotation vep_header (list): A list with the vep header allele (str): The allele that is annotated Return: vep_dict (dict): A vep dict as described above """ vep_dict = {} for vep_annotation in vep_string.split(','): inner_dict = dict(zip(vep_header, vep_annotation.split('|'))) #If allele is annotated by vep we use that allele if 'Allele' in inner_dict: allele = inner_dict['Allele'] if allele in vep_dict: vep_dict[allele].append(inner_dict) else: vep_dict[allele] = [inner_dict] return vep_dict
1e355880b0e53fb8e8911aa04d908003b51f3171
532,510
import six import logging def BooleanShellValue(sval, default, msg=None): """See if the string value is a value users typically consider as boolean Often times people set shell variables to different values to mean "true" or "false". For example, they can do: export FOO=yes export BLAH=1 export MOO=true Handle all that user ugliness here. If the user picks an invalid value, you can use |msg| to display a non-fatal warning rather than raising an exception. Args: sval: The string value we got from the user. default: If we can't figure out if the value is true or false, use this. msg: If |sval| is an unknown value, use |msg| to warn the user that we could not decode the input. Otherwise, raise ValueError(). Returns: The interpreted boolean value of |sval|. Raises: ValueError() if |sval| is an unknown value and |msg| is not set. """ if sval is None: return default if isinstance(sval, six.string_types): s = sval.lower() if s in ('yes', 'y', '1', 'true'): return True elif s in ('no', 'n', '0', 'false'): return False if msg is not None: logging.warning('%s: %r', msg, sval) return default else: raise ValueError('Could not decode as a boolean value: %r' % sval)
c7091a1ea4f2ac43b753f66b0f5f0396ba93ab86
290,166
def gerling(rho, C0=6.0, C1=4.6): """ Compute Young's modulus from density according to Gerling et al. 2017. Arguments --------- rho : float or ndarray Density (kg/m^3). C0 : float, optional Multiplicative constant of Young modulus parametrization according to Gerling et al. (2017). Default is 6.0. C1 : float, optional Exponent of Young modulus parameterization according to Gerling et al. (2017). Default is 4.6. Returns ------- E : float or ndarray Young's modulus (MPa). """ return C0*1e-10*rho**C1
78fea7ff01e05ccadfd5ee9968a7c0fd2eab49b9
247,474
def getXofMax(data): """ locates the index of the maximum value found in a list or an array @param data the list or array that should be analyzed @return the index position (zero-based) of the maximum """ valMax = data[0] xOfMax = 0 for i in range(len(data)): if data[i] > valMax: valMax = data[i] xOfMax = i return xOfMax
57f954dfd5f8b0234fc66579e23e390f5e1ffc1e
522,755
import torch def generate_noisy(x, nstd, random_seed=20180102): """ This function generates noisy images from clear ones. Input: x: (N, C, H, W) nstd: noise sigma added to clean image """ shape = x.shape dtype = x.dtype sigma = nstd torch.manual_seed(random_seed) noise = sigma * torch.randn(*shape, dtype=dtype) x_noise = x + noise return x_noise.clamp(0.,1.)
c8310f2eaf63eba07c197348747935d6f6a21817
382,915
import functools def memoize(f): """Decorator that can be applied to a method to memoize the results. Args: f - The function to decorate with memoization. The first argument of the function should be self - the instance of the class. The function can take an arbitrary amount of additional positional arguments. All arguments must be hashable. Returns: A function wrapping `f`. `f` will be executed only once for a given set of input arguments. """ cache = {} @functools.wraps(f) def cached(self, *args): if args in cache: return cache[args] ret = f(self, *args) cache[args] = ret return ret return cached
53033e66e5e9aa0f9817118b54bb83ab3b982b45
606,200
def checksum(line): """Compute the TLE checksum.""" return (sum((int(c) if c.isdigit() else c == '-') for c in line[0:-1]) % 10) == int(line[-1])
1154d10a9c285c1a6c30a9930f97773973b26d9e
110,919
def deg2hr(hr): """Convert hours into degrees.""" return (hr * 15.0)
3d36cead1e8fe9593660c4ea3270828d886819d1
176,540
def _calculate_key(name): """Generate a Redis key with the given name. Args: name: The name of the named actor. Returns: The key to use for storing a named actor in Redis. """ return b"Actor:" + name.encode("ascii")
2272bd2c6acb23e8d44c3aa2ba43815d0dc9398d
87,967
def row(ctx): """Get this cell's row.""" return ctx["cell"].row
4cfc89daa3ca771359acd762d716316209ca0eb4
702,829
def prod(m, p): """ Computes the product of matrix m and vector p """ return (p[0]*m[0] + p[1]*m[1], p[0]*m[2] + p[1]*m[3])
f6d8219d238524f3d1cf90695767bae899bfdbfe
271,028
from datetime import datetime def __log_dt_converter(dt_object): # pragma: no cover """Used to convert datetime objects to strings automatically for the logger.""" return dt_object.__str__() if isinstance(dt_object, datetime) else dt_object
6d9128347181e8f7e2e3ee2f3ef790a54134b0ae
100,094
def get_device_running_config(net_connect): """ GET network device running configuration Args: net_connect (object): Netmiko ConnectHandler object return: Network device running-config rtype: str """ return net_connect.send_command('show running-config')
4a07cadcfa81ecaec5b0f0a1c5d7867c8c02cd81
215,146
def get_uuid_list(conn): """Grab list of Ironic nodes.""" return list(map(lambda node: node.id, conn.bare_metal.nodes()))
0cfbc72af407bb657303f2cdf7f5668a825e44a6
441,969
def w(q2: float, m_parent: float, m_daughter: float) -> float: """ Calculates the recoil variable w, which runs from 1 (zero recoil) to the maximum value (depends on daughter). :param q2: Momentum transfer to the lepton-neutrino system. :param m_parent: Mass of the parent meson, e.g. the B meson. :param m_daughter: Mass of the daughter meson, e.g. the D or D* meson. :return: """ return (m_parent ** 2 + m_daughter ** 2 - q2) / (2 * m_parent * m_daughter)
c7b20722df49224db567681667664e687f506743
638,073
import math def crop_center_square(img): """ Function to crop an image from center into a square of smaller dimension """ height, width = img.shape[:2] if height < width: start = math.floor((width-height)/2) end = start + height img_crp = img[:,start:end,:] else: start = math.floor((height-width)/2) end = start + width img_crp = img[start:end,:,:] return img_crp
44c73aadba2b686259d16be0979b9176047eef05
494,127
def split_string(text, chars_per_string): """ Splits one string into multiple strings, with a maximum amount of `chars_per_string` characters per string. This is very useful for splitting one giant message into multiples. :param text: The text to split :param chars_per_string: The number of characters per line the text is split into. :return: The splitted text as a list of strings. """ return [text[i:i + chars_per_string] for i in range(0, len(text), chars_per_string)]
0846e54ea0a2825c482c7d9147c88c75ce15d3f4
524,037
def find_max_item_support(pattern, supports): """ Returns support of item with maximum support among items in pattern. pattern: List. list of items in pattern. supports: Dict. item -> count dict """ max_support = -1 for item in pattern: max_support = max(max_support, supports[item]) return max_support
c925ec553ab1214e9ce95167885b749c6238b67b
60,508
import typing def _load_credentials(filepath: str) -> typing.Tuple[str, str]: """ Open the provided filepath and read the credentials from it. Expected format is: "EMAIL\nPASSWORD" """ with open(filepath) as file: email = file.readline().strip() password = file.readline().strip() return email, password
143335efe1c81d27d3209e38f736668076cfbf7f
185,335
def extract_app_version(lines): """ Extracts version string from array of lines (content of version.txt file) :param lines: :return: version string """ for line in lines: parts = [elem.strip() for elem in line.split('=')] if len(parts) == 2 and parts[0].lower() == 'version_str': return parts[1].strip("'") return ''
a35b35906b7d0cec7fb9f8232d793f4911947138
330,329
def pnorm(x, p): """ Returns the L_p norm of vector 'x'. :param x: The vector. :param p: The order of the norm. :return: The L_p norm of the matrix. """ result = 0 for index in x: result += abs(index) ** p result = result ** (1/p) return result
110fea5cbe552f022c163e9dcdeacddd920dbc65
5,172
import csv def get_header(file): """ Captures the header line of a given .csv file. Args: file: file name/path for the given .csv file Returns: header: the header line of the given .csv file. """ read_file = open(file, 'r') reader = csv.DictReader(read_file) return reader.fieldnames
6a5b72e614369f5717d8efacc7775f7b3d19cd9c
195,551
def check_result(func): """Decorator that checks if the cursor has results from `execute`.""" def decorator(self, *args, **kwargs): if self._results is None: # pylint: disable=protected-access raise Exception("Called before `execute`") return func(self, *args, **kwargs) return decorator
26fd42a9e5f3645b4ccae071f5c9d204c3b4ac16
370,399
import torch def where(condition, x, y): """ Return a tree of tensors of elements selected from either ``x`` or ``y``, depending on ``condition``. Examples:: >>> import torch >>> import treetensor.torch as ttorch >>> ttorch.where( ... torch.tensor([[True, False], [False, True]]), ... torch.tensor([[2, 8], [16, 4]]), ... torch.tensor([[3, 11], [5, 7]]), ... ) tensor([[ 2, 11], [ 5, 4]]) >>> tt1 = ttorch.randint(1, 99, {'a': (2, 3), 'b': {'x': (3, 2, 4)}}) >>> tt1 <Tensor 0x7f6760ad9908> ├── a --> tensor([[27, 90, 80], │ [12, 59, 5]]) └── b --> <Tensor 0x7f6760ad9860> └── x --> tensor([[[71, 52, 92, 79], [48, 4, 13, 96]], [[72, 89, 44, 62], [32, 4, 29, 76]], [[ 6, 3, 93, 89], [44, 89, 85, 90]]]) >>> ttorch.where(tt1 % 2 == 1, tt1, 0) <Tensor 0x7f6760ad9d30> ├── a --> tensor([[27, 0, 0], │ [ 0, 59, 5]]) └── b --> <Tensor 0x7f6760ad9f98> └── x --> tensor([[[71, 0, 0, 79], [ 0, 0, 13, 0]], [[ 0, 89, 0, 0], [ 0, 0, 29, 0]], [[ 0, 3, 93, 89], [ 0, 89, 85, 0]]]) """ return torch.where(condition, x, y)
54b5addcd6853e6e550745e5fa2c25e16e5c900f
508,089
def sliding_window_regions(start, end, window_size, step_size): """ sliding_window_regions ====================== This method will split a gene into different regions based on a sliding window and step size. Each region is based on the window size. The window is slid down the gene using the step size. Each step size results in a new window. For example, if the gene is ~67000 bp long, the window size is 1.5kb and the step size is 375 bp, then you would get ~180 overlapping regions, with each region having a size of 1.5kb. The first region will start at the start site of the gene and the last region will end at the end site of the gene. Parameters: ----------- 1) start: (int) The genomic start position of a gene 2) end: (int) The genomic end position of a gene 3) window_size: (int) The size of the window/region in bp to use for the sliding window 4) step_size: (int) The sliding step size in bp to slide the window across a gene. Returns: ++++++++ (list) A 2d list of regions created by sliding a window across the gene positions. Each inner list have the region start pos at index 0 and the end pos as index 1 """ start = int(start) end = int(end) window_size = int(window_size) step_size = int(step_size) ## Start and end of first region ## First region will start at the start of the gene window_start = start window_end = start + ( window_size - 1 ) ## The size of the region will include the start position to the end position. This accounts for a off by 1 error. gene_regions = [] ## Iterate over the gene range and get all regions while window_end < end: ## Add region gene_regions.append([window_start, window_end]) ## Slide the window by the step size window_start += step_size window_end += step_size ## Start and end of last region ## Last region will end at the end of the gene window_start = end - (window_size - 1) if end - (window_size - 1) > start else start window_end = end ## Add region gene_regions.append([window_start, window_end]) return gene_regions
775e7a8cfe79239465608133d4ad62d147502fab
691,717
import click def common_mqtt_options(func): """Supply common mqtt gateway options.""" func = click.option( "-r", "--retain", is_flag=True, default=False, help="Turn on retain on published messages at broker.", )(func) func = click.option( "--out-prefix", default="mygateway1-in", show_default=True, help="Topic prefix for outgoing messages.", )(func) func = click.option( "--in-prefix", default="mygateway1-out", show_default=True, help="Topic prefix for incoming messages.", )(func) func = click.option( "-p", "--port", default=1883, show_default=True, type=int, help="MQTT port of the connection.", )(func) func = click.option("-b", "--broker", required=True, help="MQTT broker address.")( func ) return func
13fd8d74add94fa3f0f5cf58f4ebedab965d9e84
594,662
def symsplit(item): """ Splits symmetric molecule atom mapping expression into its component parts. Returns a list of atom mappings in the symmetric molecule. For example: symsplit('(abc;cba)') = ['abc','cba'] """ # Removing white space tmp=item.replace(' ','') # checking format if (item[0] != '(') or (item[-1] != ')'): print("error in", item,"!\n") else: tmp=tmp[1:-1].split(';') # checking sizes to make sure they are all the same sizet=len(tmp[0]) for t in tmp[1:]: if len(t) != sizet: print("Sizes of the symmetric molecules are not the same!\n") print(tmp) return tmp
2dfb508da8642ee765f43ed4bff09ae6fec6f011
419,755
def inr(value): """Formats value as INR.""" return f"₹{value:,.2f}"
cef7e68eedf508fd1990d6f9d315a032aabbfaf3
683,126
def _truncate(words, cutlength): """Group words by stems defined by truncating them at given length. :param words: Set of words used for analysis :param cutlength: Words are stemmed by cutting at this length. :type words: set(str) or list(str) :type cutlength: int :return: Dictionary where keys are stems and values are sets of words corresponding to that stem. :rtype: dict(str): set(str) """ stems = {} for word in words: stem = word[:cutlength] try: stems[stem].update([word]) except KeyError: stems[stem] = {word} return stems
b1b9726116781547d531265ce874de366ccd15ab
194,900
def iteration_layers(model, speedup, session, indepth_layer=None): """ Define the the layers whose activations are enhanced and visualized. Parameters: model: Inception5h model speedup: selects subset of layers to give results faster Returns: layer tensors to iterate through """ if speedup is True: layer_names_reduced = ['conv2d1', 'conv2d2', 'mixed3b', 'mixed4b', 'mixed5b'] layer_tensors = [session.graph.get_tensor_by_name(name + ":0") for name in layer_names_reduced] else: layer_tensors = model.layer_tensors return layer_tensors
b20fc67f54d3c14cedfaeda006236cda61a9e181
447,604
def set_auth_header(auth_token: str) -> dict: """ Creates the auth header for requests :param auth_token :return: dict """ headers = { "Content-Type": "application/json", "Authorization": "Bearer " + auth_token, } return headers
77ed0943c90cdcbe2d69b2fc81758f0f3da36d57
431,795
def _has_callable(type, name) -> bool: """Determine if `type` has a callable attribute with the given name.""" return hasattr(type, name) and callable(getattr(type, name))
fc02bf0ff7e46bb9cae9a55ac7a79ebe114e784b
265,879
import requests def get_full_iri(obo_id): """ Given an ontology id of the form X:01234, look up the full iri using the ebi ols :param obo_id: ontology identifier, e.g. HsapDv:0000087 :type obo_id: string :return: full iri for the term, e.g. http://purl.obolibrary.org/obo/HsapDv_0000087 :rtype: string """ try: obo_id = obo_id.strip() ols_response = requests.get('http://www.ebi.ac.uk/ols/api/terms?obo_id={}'.format(obo_id)) ols_json = ols_response.json() return ols_json['_embedded']['terms'][0]['iri'] except KeyError: print('http://www.ebi.ac.uk/ols/api/terms?id={}'.format(obo_id)) print("Could not find {}.".format(obo_id)) return None except ConnectionError as e: print(e) print("Something went wrong while trying to fetch from api.")
350c68a40288fbc14c1ddfb452a18a4caa461147
278,712
def get_tables(client, dataset): """ Get the names of all tables in a bigquery dataset. client: bigquery connection dataset: a connected bigquery dataset Returns a list of all tables in the dataset """ return [table.table_id for table in client.list_tables(dataset)]
bc4baca1a5f2609996b6458348fcf19746c13db4
649,335