content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import math
def correct_bits_length(bits_expected, number):
"""
Checks if the number has sufficient bits
Keyword arguments:
bits_expected -- Number of bits expected.
number -- Decimal number
Returns:
True if equal False otherwise
"""
length_expected = int(math.log10(2 ** bits_expected))
actual_length = int(math.log10(number))
return length_expected == actual_length | 129cbfd25052fa1f0227573c6a7ffd0a7b150bfe | 229,060 |
import re
def remove_zero_padded_dates(date_string):
"""Mimic %-d on unsupported platforms by trimming zero-padding
For example:
January 01 2018
Becomes:
January 1 2018
The regex finds a space or a hyphen followed by a zero-padded
digit, and replaces with the symbol (space or hyphen) and digit.
"""
return re.sub(r'([ -])0(\d)', r'\1\2', date_string) | 100b06eee8c756f289be7e457be67d2334160a6c | 612,353 |
from datetime import datetime
def parse_date_parts(month, year):
"""
Given a month string and a year string from publisher data, parse apart the month, day and year and create
a standard date string that can be used as input to VIVO
:param month: string from publisher data. May be text such as 'JUN' or 'Jun 15' with day number included
:param year: string of year such as '2015'
:return: date string in isoformat
"""
month_numbers = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6,
'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12,
'SUM': 6, 'FAL': 9, 'WIN': 12, 'SPR': 3, '': 1}
if ' ' in month:
k = month.find(' ')
month_name = month[0:k]
month_day = month[k + 1:]
elif '-' in month:
k = month.find('-')
month_name = month[0:k]
month_day = '1'
else:
month_name = month
month_day = '1'
month_number = month_numbers[month_name.upper()]
date_value = datetime(int(year), month_number, int(month_day))
return date_value.isoformat() | 29b5a162b851b51d1bd9c63c9996c5d3577263f2 | 411,266 |
def score_1(game, player): # 82.14%
"""
Heuristics computing score using #player moves - k * #opponent moves
:param game: game
:param player: player
:return: score
"""
if game.is_winner(player) or game.is_loser(player):
return game.utility(player)
opponent = game.get_opponent(player)
player_moves = game.get_legal_moves(player)
opponent_moves = game.get_legal_moves(opponent)
# return float(len(player_moves) - len(opponent_moves)) # 72.86%
# return float(len(player_moves) - 2 * len(opponent_moves)) # 79.29%
# return float(len(player_moves) - 3 * len(opponent_moves)) # 79.29%
# return float(len(player_moves) - 4 * len(opponent_moves)) # 79.29%
# return float(len(player_moves) - 5 * len(opponent_moves)) # 80.71%
# return float(len(player_moves) - 6 * len(opponent_moves)) # 80.71%
return float(len(player_moves) - 7 * len(opponent_moves)) | 3995237f5d5474660c752c308e1077aad1743d06 | 700,898 |
from typing import List
def whozit_loop(n: int) -> List[int]:
"""This function takes an integer, loop from 0 through to the integer, and returns a list of the numbers containing 3 using for-loop."""
result = []
for i in range(n):
if '3' in str(i):
result.append(i)
return result | 985dc074f6aac5379b3626877d20a4b996347965 | 453,976 |
import re
def is_canonical(version):
"""Return True if `version` is a PEP440 conformant version."""
match = re.match(
r'^([1-9]\d*!)?(0|[1-9]\d*)'
r'(\.(0|[1-9]\d*))'
r'*((a|b|rc)(0|[1-9]\d*))'
r'?(\.post(0|[1-9]\d*))'
r'?(\.dev(0|[1-9]\d*))?$', version)
return match is not None | df3f693aed625c9e1edbf76085571f8f022330d7 | 351,759 |
def validateInputs(dict):
"""
Checks that all necessary inputs to post request are present
----------
Parameters
----------
dict: dictionary received from post request
Returns
-------
1 if there are missing keys
0 if all keys are present
"""
dict_keys = ['username', 'processing', 'filename']
for i in dict_keys:
if i not in dict.keys():
return 1
return 0 | 91d50c7a55132a60861da9ecaba0f0e3da4a8efd | 360,592 |
def _mangle_name(internal_name, class_name):
"""Transform *name* (which is assumed to be an "__internal" name)
into a "_ClassName__internal" name.
:param str internal_name: the assumed-to-be-"__internal" member name
:param str class_name: the name of the class where *name* is defined
:return: the transformed "_ClassName__internal" name
:rtype: str
"""
return "_%s%s" % (class_name.lstrip('_'), internal_name) | 6a97a729437e08f510f2eefc8210c8063d1648a5 | 398,238 |
import torch
def sample_noises(size):
"""
Sample noise vectors (z).
"""
return torch.randn(size) | edce1ff6d618c4e569d725fff4dce81eb9c643d5 | 617,561 |
def pairs_generator(rng):
"""
Creates Generator of ordered pairs
:param rng : Range of the interval, e.g. 3 means pairs (0,1),(0,2),(1,2)
:return:
"""
return ((i, j) for i in range(rng) for j in range(rng) if i < j) | 1d3eb5f1a0a06aeae6df32c222c5321539620da4 | 606,876 |
def run_wsgi(app, environ):
"""Execute a wsgi application, returning (body, status, headers)."""
output = {}
def start_response(status, headers, exc_info=None):
output['status'] = status
output['headers'] = headers
body = app(environ, start_response)
return body, output['status'], output['headers'] | 4f4e2797ebd8f869797458a79df4239096144ff3 | 232,582 |
import pytz
from datetime import datetime
def get_utctimestamp(thedate=None, fmt='%Y-%b-%d %H:%M:%S'):
"""
Returns a UTC timestamp for a given ``datetime.datetime`` in the
specified string format - the default format is::
YYYY-MMM-DD HH:MM:SS
"""
d = thedate.astimezone(pytz.utc) if thedate else datetime.utcnow()
return d.strftime(fmt) | 5cc3650a466a00b542762e8521a984609cfa1b1a | 328,775 |
def get_security_group_id(event):
"""
Pulls the security group ID from the event object passed to the lambda from EventBridge.
Args:
event: Event object passed to the lambda from EventBridge
Returns:
string: Security group ID
"""
return event.get("detail").get("requestParameters").get("groupId") | 8f19704efb2c2b2f2dd99d0c07c95c88948a47a2 | 189,441 |
import tempfile
def downloadDataFromBucket(bucket, fileName):
""" Download a single NEXRAD radar datum.
Args:
bucket: A NOAA NEXRAD level 2 radar data bucket.
fileName: A filename formatted as follows :
'YYYY/MM/DD/KLMN/KLMNYYYYMMDD_HHMMSS_V06.gz' or
'YYYY/MM/DD/KLMN/KLMNYYYYMMDD_HHMMSS_V03.gz'
Returns:
Temporary version of NEXRAD file.
"""
s3key = bucket.get_key(fileName)
localfile = tempfile.NamedTemporaryFile()
s3key.get_contents_to_filename(localfile.name)
return localfile | 6cc214615f896af7bec2833796dc2671dd6b61c7 | 498,438 |
def mergeHeight(feet, inches):
"""
Takes two integer values, feet and inches, and calculates
the total height in inches.
"""
return (feet * 12) + inches | f35c850454b8a458d3c9e33038003aff2dc8ec7d | 422,704 |
def basename(path):
"""Return the file name portion of a file path."""
return path.split("/")[-1] | 15bc5f9471478300dc946621bc525077216230fa | 610,234 |
def unpack(value, number=2, default=None):
"""Unpack given `value` (item/tuple/list) to `number` of elements.
Elements from `value` goes first, then the rest is set to `default`."""
if not isinstance(value, list):
if isinstance(value, tuple):
value = list(value)
else:
value = [value]
assert len(value) <= number
for _ in range(number - len(value)):
value.append(default)
return value | 9f5ec4f64bd81d1a656cdaf8b3e9cfe5ce4f8f6e | 569,767 |
def normalize_column_to_length(col, desired_count):
"""Given the value(s) for a column, normalize to a desired length.
If `col` is a scalar, it's duplicated in a list the desired number of
times. If `col` is a list, it must have 0, 1, or the desired number of
elements, in which cases `None` or the single element is duplicated, or
the original list is returned.
"""
desired_count = max(desired_count, 1)
if isinstance(col, list) and len(col) == desired_count:
return col
elif isinstance(col, list):
assert len(col) in (0, 1), (
'Unexpectedly got a row with the incorrect number of '
'repeated values.')
return (col or [None]) * desired_count
else:
return [col] * desired_count | befb5bffb7f9bdc5c8bb0a69978184eee3f2dff2 | 239,222 |
def format_bids_name(*args):
"""
write BIDS format name (may change this later)
:param args: items to join
:return: name
"""
return ("_").join(args) | 1f8e2ecbf3609093668ad153e75663fe6e3f3f76 | 513,146 |
import torch
def bluebert_load_model(bluebert_model_path: str):
"""
Load fine-tuned Bluebert model.
:param bluebert_model_path: directory with saved model
:return: fine-tuned Bluebert Transformer model
:return device: CPU vs GPU definition for torch
"""
if torch.cuda.is_available():
device = torch.device("cuda")
print('There are ', torch.cuda.device_count(), ' GPU(s) available.') # noqa: T001
print('We will use the GPU:', torch.cuda.get_device_name(0)) # noqa: T001
else:
print('No GPU available, using the CPU instead.') # noqa: T001
device = torch.device("cpu")
model = torch.load(bluebert_model_path, map_location=device)
model.to(device)
return model, device | 2f783a4ade34e67f68bf7a175736c2d7a600fb9c | 153,884 |
def _get_az_function_def(full_command, command_verb, arguments, command_doc):
"""Given a function name, arguments, and doc,returns a formatted string function def."""
if command_doc:
function_def = f"""
def {command_verb}({arguments}):
'''
{command_doc}
'''
return _call_az("az {full_command}", locals())
"""
else:
function_def = f"""
def {command_verb}({arguments}):
return _call_az("az {full_command}", locals())
"""
return function_def | b7ff28aa3336c62f2a87fa0a7d9207c739d15817 | 597,360 |
def check_is_right(name):
"""
Checks if the name belongs to a 'right' sequence (/2). Returns True or
False.
Handles both Casava formats: seq/2 and 'seq::... 2::...'
"""
if ' ' in name: # handle '@name 2:rst'
name, rest = name.split(' ', 1)
if rest.startswith('2:'):
return True
elif name.endswith('/2'): # handle name/2
return True
return False | 1dd9a8edd79fbb3e83038ec15fdb0f02d547acb1 | 383,083 |
def add_one(series, config):
""" Add a count of 1 to every count in the series """
return [ ct+1 for ct in series ] | 3f764718af04b93cd2d5b50ce6a78207bea9e387 | 563,276 |
import re
def remove_tag_and_contents(s, tag=None, tags=None):
"""
>>> remove_tag_and_contents('hi there')
'hi there'
>>> remove_tag_and_contents('<p>hi</p> <style>p {font-weight: 400;}</style><p>there</p>', tag='style')
'<p>hi</p> <p>there</p>'
>>> remove_tag_and_contents('<span class="foo">hi there</span>', tag='span')
''
>>> remove_tag_and_contents('<p>hi</p> <style>p {font-weight: 400;}</style><p>there</p>', tags=('p', 'style'))
' '
>>> remove_tag_and_contents('<p>hi <span>there</span></p> <style>p {font-weight: 400;}</style><p>cat</p>', tags=('span', 'style'))
'<p>hi </p> <p>cat</p>'
>>> remove_tag_and_contents('<p>hi <span class="woot">there</span></p> <style>p {font-weight: 400;}</style><p>cat</p>', tags=('span', 'style'))
'<p>hi </p> <p>cat</p>'
>>> remove_tag_and_contents('<p>Hi There<object classid="clsid:38481807-CA0E-42D2-BF39-B33AF135CC4D" id=ieooui></object></p>', tag='object')
'<p>Hi There</p>'
>>> remove_tag_and_contents('<p>Hi </object>there</p>', tag='object')
'<p>Hi there</p>'
>>> remove_tag_and_contents('<p>Hi <br/>there</p>', tag='br')
'<p>Hi there</p>'
"""
if tag:
tags = [tag]
if isinstance(tags, (list, tuple)):
for t in tags:
# Tries to match a normal tag structure
s = re.sub(pattern=r'<{tag}.*?>.*?</{tag}>'.format(tag=t), repl='', string=s)
# Match any hanging opening or closing versions
s = re.sub(pattern=r'</{tag}[^>]*>'.format(tag=t), repl='', string=s)
s = re.sub(pattern=r'<{tag}[^>]*/ *>'.format(tag=t), repl='', string=s)
return s | 9c6506b39ff6f926cf9b03f691bd1b4ecbed6c4a | 39,805 |
def parse_suppress_errors(params):
"""Returns a list with suppressed error codes."""
val = params.get('suppressErrors', None)
if not val:
return []
return val.split('|') | 10161322d820f009b2b62e8164d22671fcea665f | 283,331 |
import json
def uglify(data):
""" return a string of compressed json text """
return json.dumps(data, separators=(',', ':')) | b704ddc7ce82da15453ab54e3dd58e68649da0f6 | 131,458 |
def armstrong(some_int: int) -> bool:
"""
Accepts an int
Returns whether or not int is an armstrong number
:param some_int:
:return:
"""
string_rep = str(some_int)
sum_val = 0
for digit in string_rep:
sum_val += int(digit) ** 3
return some_int == sum_val | dd0c2b3533e77c29330d750826b328c2b33b2460 | 44,153 |
def add_id_to_dict(doc):
""" Adds the document's id to the document's fields dictionary.
"""
full_dict = doc.to_dict()
full_dict['id'] = doc.id
return full_dict | 3626ee822817fde648e46fcd6862dc689cc20c5a | 679,346 |
def calculate_sum_dice(die1_value, die2_value):
"""
Add up the num of the first two dice
die1_value: The first random value
die2_value: The second random value
Returns: The number of sum in value (int)
"""
sum_dice = int(die1_value) + int(die2_value)
return sum_dice | 77589800f74744dc4119e5d32d9c9a9e7aa4e2f3 | 227,848 |
import logging
def validate_config(config):
"""
Validates given *OpenColorIO* config.
Parameters
----------
config : Config
*OpenColorIO* config to validate.
Returns
-------
bool
Whether the *OpenColorIO* config is valid.
"""
try:
config.validate()
return True
except Exception as error:
logging.critical(error)
return False | 8248d3e94ead22672533908bb7e91a78decaef41 | 287,575 |
import random
def make_id(stringLength=10):
"""Create an id with given length."""
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
return "".join(random.choice(letters) for i in range(stringLength)) | f2b75e9bedd706b8a7901ffb8c8bae5e4f2cbdbb | 414,605 |
def make_nofdel(name):
"""Make a deleter for `property()`'s `fdel` param that raises
`AttributeError`
Args:
name (str): property name
Raises:
AttributeError: always raises exception
Returns:
function: A deleter function
"""
def fdel(self):
raise AttributeError("Property '%s' is not deletable by %r"
% (name, self,))
return fdel | 2e44f43f48a6a22211bc7e268fafa7ff84cc3d66 | 77,375 |
def read_sheet(spreadsheet_id, google, row=0):
"""
Returns the content of the entire sheet. If a row number above 0 is
specified, only returns the contents of that row.
"""
if row > 0:
return google.values().get(
spreadsheetId=spreadsheet_id,
range="Synced-Action-Network-data!A{}".format(row),
).execute().get('values', [])
return google.values().get(
spreadsheetId=spreadsheet_id,
range="Synced-Action-Network-data".format(row),
).execute().get('values', []) | a293db85f3b2ccefa5ee211b363867edac353b8e | 266,312 |
import requests
def ct_get_links(link, platforms='facebook', count=100, start_date= None,
end_date=None, include_history=None, include_summary='false',
offset = 0, sortBy = 'date', api_token=None):
""" Retrieve a set of posts matching a certain link.
Args:
link (str): The link to query by. Required.
platforms (str, optional): The platforms from which to retrieve links. This value can be comma-separated.
options: facebook, instagram, reddit. Defaults to 'facebook'.
count (int, optional): The number of posts to return. Defaults to 100. options [1-100]
start_date (str, optional): The earliest date at which a post could be posted. Time zone is UTC.
Format is “yyyy-mm-ddThh:mm:ss” or “yyyy-mm-dd”
(defaults to time 00:00:00).
end_date (str, optional): The latest date at which a post could be posted.
Time zone is UTC. Format is “yyyy-mm-ddThh:mm:ss”
or “yyyy-mm-dd” (defaults to time 00:00:00).
Defaults to "now".
include_history (str, optional): Includes timestep data for growth of each post returned.
Defaults to null (not included). options: 'true'
include_summary (str, optional): Adds a "summary" section with AccountStatistics for
each platform that has posted this link. It will look beyond
the count requested to summarize across the time searched.
Requires a value for startDate.
Defaults to false. options: 'true' , 'false'
offset (int, optional): The number of posts to offset (generally used for pagination).
Pagination links will also be provided in the response.
Defaults to 0. options >= 0
sortBy (str, optional): The method by which to order posts (defaults to the most recent).
If subscriber_count, a startDate is required.
Defaults to 'date'. options: 'subscriber_count' , 'total_interactions'
api_token (str, optional): you can locate your API token via your crowdtangle dashboard
under Settings > API Access.
Raises:
ValueError: "link was empty"
ValueError: "api_token variable is empty"
Returns:
[dict]: The Response contains both a status code and a result. The status will always
be 200 if there is no error. The result contains an array of post objects and
a pagination object with URLs for both the next and previous page, if they exist
Example:
ct_get_links(link= 'http://www.queenonline.com/', platforms='facebook',
start_date='2019-01-01', include_history = 'true',
sortBy = 'date', api_token="AKJHXDFYTGEBKRJ6535")
"""
# api-endpoint
URL_BASE = "https://api.crowdtangle.com/links"
# defining a params dict for the parameters to be sent to the API
PARAMS = {'link': link, 'count': count,
'token': api_token, 'platforms': platforms,
'includeSummary': include_summary , 'offset': offset,
'sortBy': sortBy
}
# add params parameters
if start_date:
PARAMS['startDate'] = start_date
if end_date:
PARAMS['endDate'] = end_date
if include_history == 'true':
PARAMS['includeHistory'] = 'true'
# sending get request and saving the response as response object
r = requests.get(url=URL_BASE, params=PARAMS)
if r.status_code != 200:
out = r.json()
print(f"status: {out['status']}")
print(f"Code error: {out['code']}")
print(f"Message: {out['message']}")
return r.json() | ad718c9768617eb2f81731aeb0cd0fa8ebf7e9da | 502,727 |
def extract_some_key_val(dct, keys):
"""
Gets a sub-set of a :py:obj:`dict`.
:param dct: Source dictionary.
:type dct: :py:obj:`dict`
:param keys: List of subset keys, which to extract from ``dct``.
:type keys: :py:obj:`list` or any iterable.
:rtype: :py:obj:`dict`
"""
edct = {}
for k in keys:
v = dct.get(k, None)
if v is not None:
edct[k] = v
return edct | 80dff136ada8cfd754e1a02423e7eef364223a48 | 28,013 |
def bdev_compress_delete(client, name):
"""Delete compress virtual block device.
Args:
name: name of compress vbdev to delete
"""
params = {'name': name}
return client.call('bdev_compress_delete', params) | 0ccf183c0f6d97b8e6fdcfa37b7f3c3692967a8a | 596,526 |
def rectangle_dot_count(vertices):
""" Count rectangle dot count include edge """
assert len(vertices) == 2
width = abs(vertices[0][0] - vertices[1][0])
height = abs(vertices[0][1] - vertices[1][1])
dot_count = (width + 1) * (height + 1)
return dot_count | 2e330a5137482e7bf1eb2396936c61a0497bbe62 | 640,267 |
import math
def categorize(distance: float) -> int:
"""Distance binning method to be referenced across data analysis files and classifiers.
Args:
distance (float): The premeasured distance, in meters.
Returns:
The floor of the given distance amount.
"""
return math.floor(distance) | 0014f2096131f03e6c0619e7d8a3ad9526d44fd1 | 42,606 |
import torch
def dot(A, B, dim=-1):
"""
Computes the dot product of the input tensors along the specified dimension
Parameters
----------
A : Tensor
first input tensor
B : Tensor
second input tensor
dim : int (optional)
dimension along the dot product is computed (default is 1)
Returns
-------
Tensor
the tensor containing the dot products
"""
return torch.sum(A*B, dim, keepdim=True) | b8859730dec5987cf566f6292b66d5087b230e97 | 681,186 |
import socket
def is_connectable(port):
"""Trys to connect to the server to see if it is running."""
try:
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.settimeout(1)
socket_.connect(("localhost", port))
socket_.close()
return True
except socket.error:
return False | b5d9e6bd1287a4af497d1124681e6be9533993a5 | 329,604 |
def _setup_residual_graph(G, weight):
"""Setup residual graph as a copy of G with unique edges weights.
The node set of the residual graph corresponds to the set V' from
the Baswana-Sen paper and the edge set corresponds to the set E'
from the paper.
This function associates distinct weights to the edges of the
residual graph (even for unweighted input graphs), as required by
the algorithm.
Parameters
----------
G : NetworkX graph
An undirected simple graph.
weight : object
The edge attribute to use as distance.
Returns
-------
NetworkX graph
The residual graph used for the Baswana-Sen algorithm.
"""
residual_graph = G.copy()
# establish unique edge weights, even for unweighted graphs
for u, v in G.edges():
if not weight:
residual_graph[u][v]['weight'] = (id(u), id(v))
else:
residual_graph[u][v]['weight'] = (G[u][v][weight], id(u), id(v))
return residual_graph | 49c68ad977beb9edc695993cc1fe7fc235f1d69b | 464,774 |
def named_crs(epsg):
"""Construct a named CRS dict given an EPSG code
according to the GeoJSON spec.
"""
return {
'type': 'name',
'properties': {
'name': 'urn:ogc:def:crs:EPSG::{}'.format(epsg)
}
} | 552147bb7a5646f91913deb2bc33181c82d579d4 | 413,681 |
def object_from_args(args):
"""
Turns argparser's namespace into something manageable by an external library.
Args:
args: The result from parse.parse_args
Returns:
A tuple of a dict representing the kwargs and a list of the positional arguments.
"""
return dict(args._get_kwargs()), args._get_args() | e48c0a8a49e4fe106d30d37f03c11a07e479cfc3 | 630,876 |
def read_maze(maze_file):
""" (file open for reading) -> list of list of str
Return the contents of maze_file in a list of list of str,
where each character is a separate entry in the list.
"""
res = []
for line in maze_file:
maze_row = [ch for ch in line.strip()]
res.append(maze_row)
return res | 2084ac891012932774d46d507f550e8070e3cc47 | 42,814 |
def combinedJunctionDist(dist_0, dist_1):
"""Computes the combined genomic distance of two splice junction ends
from the closest annotated junctions. In essence, it is finding the
size indel that could have created the discrepancy between the
reference and transcript junctions.
Examples ('|' character respresents end of exon):
Reference: ----->| |<-----
Transcript: ----->| |<-----
dist_0 = -2, dist_1 = +2, combined dist = 4
Reference: ----->| |<-----
Transcript: ----->| |<-----
dist_0 = 0, dist_1 = +2, combined dist = 2
Reference: ----->| |<-----
Transcript: ----->| |<-----
dist_0 = +1, dist_1 = +4, combined dist = 3
"""
# If dist_0 and dist_1 have different signs, the combined distance is
# the sum of their absolute values
if dist_0*dist_1 <= 0:
combined_dist = abs(dist_0) + abs(dist_1)
else:
combined_dist = abs(abs(dist_0) - abs(dist_1))
return combined_dist | af2a0b0d880f29afb097215a55dfef8426c57556 | 81,005 |
def _create_youtube_video_asset(
client, customer_id, youtube_video_id, youtube_video_name
):
"""Creates a asset with the given YouTube video ID and name.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
youtube_video_id: a str of a YouTube video ID.
youtube_video_name: a str to use for the name of the video asset.
Returns:
an Asset.
"""
asset_operation = client.get_type("AssetOperation")
asset = asset_operation.create
asset.name = youtube_video_name
asset.type_ = client.get_type("AssetTypeEnum").AssetType.YOUTUBE_VIDEO
asset.youtube_video_asset.youtube_video_id = youtube_video_id
asset_service = client.get_service("AssetService")
response = asset_service.mutate_assets(
customer_id=customer_id, operations=[asset_operation]
)
resource_name = response.results[0].resource_name
print(
"A new YouTube video asset has been added with resource name: "
f"'{resource_name}'"
)
return resource_name
# [END add_local_campaign_5] | 459555ea8e67a5a9c8a568e108ec14411ed80692 | 594,061 |
def list_resources(client, resource_group_name=None):
"""
List all Azure Cognitive Services accounts.
"""
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list() | cf013768ed6ed613ef5010b3d7ad8229168cd774 | 462,526 |
def valid_gain(gain):
"""
Use this parse an argument as if it were a float and get back a valid gain as a float.
Raises ValueError if passed an un-parse-able value.
"""
try:
my_gain = round(float(gain), 1)
except TypeError:
raise ValueError("Could not parse value as into a valid float argument (invalid type).")
except ValueError:
raise ValueError("Could not parse value as into a valid float argument.")
if my_gain > 63.5 or my_gain < 0:
raise ValueError("Gain outside allowed range of 0 to 63.5")
return my_gain | 2dd48252812a408e8cc55cd0cbfc768096ca6d81 | 617,067 |
def verify_filetype(filename: str, valid_filetypes=["jpeg", "png", "jpg"]) -> bool:
"""
Helper function which determines the filetype of
a file based on it's filename
valid filetypes = ["jpeg", "png", "jpg"]
Parameters:
filename (str)
Returns:
True if filetype is valid
"""
if filename.split(".")[-1] in valid_filetypes:
return True
else:
return False | 115b5b1c77a5cf5a116fa86aa53c9ac147a56e42 | 547,806 |
import requests
import logging
def post_peripheral(api_url: str, body: dict) -> dict:
""" Posts a new peripheral into Nuvla, via the Agent API
:param body: content of the peripheral
:param api_url: URL of the Agent API for peripherals
:return: Nuvla resource
"""
try:
r = requests.post(api_url, json=body)
r.raise_for_status()
return r.json()
except:
logging.error(f'Cannot create new peripheral in Nuvla. See agent logs for more details on the problem')
# this will be caught by the calling block
raise | 7da79de1a59973c5325a93ce2e316a6a45239c00 | 253,517 |
from typing import Any
from typing import Union
def check_is_union(data_type: Any) -> bool:
"""Check if `data_type` is based on a `typing.Union`."""
return hasattr(data_type, '__origin__') and \
data_type.__origin__ is Union | ea6da23d864cde40a3862bc12fd0ab20a7ae3ea0 | 219,611 |
def arrival_to_str(arrival):
"""
:param arrival: list (minutes, seconds, stops)
:return: arrival string
example : '4m(3)' for 4 minutes and 3 stops left
"""
arrival_str = "----"
if arrival is not None:
# omit seconds
arrival_str = '{}m({})'.format(arrival[0], arrival[2])
return arrival_str | c897b74d0646e8987ae55758f5a2d77de29972c4 | 278,495 |
import re
def cleanYear(media):
"""
Takes a Show/Movie object and returns the title of it with the year
properly appended. Prevents media with the year already in the title
from having duplicate years. (e.g., avoids situations like
"The Flash (2014) (2014)").
Arguments:
media -- a Show/Movie object
"""
title = ""
# year_regex matches any string ending with a year between 1000-2999 in
# parentheses. e.g. "The Flash (2014)"
year_regex = re.compile(".*\([12][0-9]{3}\)$")
title += media.title
if not year_regex.match(media.title):
title += " (" + str(media.year) + ")"
return title | 75bb401f8e09f3ae37473131fc761ea7d56b8f9d | 567,906 |
def versiontuple(v, version_index=-1):
""" convert a version string to a tuple of integers
argument <v> is the version string, <version_index> refers o how many '.' splitted shall be returned
example:
versiontuple("1.7.0") -> tuple([1,7,0])
versiontuple("1.7.0", 2) -> tuple([1,7])
versiontuple("1.7.0", 1) -> tuple([1])
"""
temp_list = map(int, (v.split(".")))
return tuple(temp_list)[:version_index] | e85d18e9005b0ffa9adb3e7dbb5c01c4fdb4a333 | 88,416 |
from datetime import datetime
from dateutil import tz
def max_time() -> datetime:
"""Returns max time datetime object
Returns
-------
datetime
max time datetime object
"""
return datetime.max.replace(tzinfo=tz.tzutc()) | 7bedef28756a5d4454ae82fda8e250ec74caf303 | 437,302 |
def from_camel_case(name):
"""Convert camel case to snake case.
Function and variable names are usually written in camel case in C++ and
in snake case in Python.
"""
new_name = str(name)
i = 0
while i < len(new_name):
if new_name[i].isupper() and i > 0:
new_name = new_name[:i] + "_" + new_name[i:]
i += 1
i += 1
return new_name.lower() | c6e7184598252a6db1bcaee5d5375969c5c9bd39 | 698,053 |
import requests
def query_url(url):
""" - Performs HTTP GET to the URL passed in params, then:
* Returns response container if HTTP 200 or 404
* Raise exception for other HTTP response codes. i.e >500 """
server_response = requests.get(url)
if server_response.status_code == 200:
return server_response
elif server_response.status_code == 404:
return server_response
else:
server_response.raise_for_status() | 07ffd112251cf92834de6dbbd65bc0b4ec9bf3ea | 534,579 |
def drop_rows(rows):
"""Drop rows in a DataFrame and reset the index."""
def dropper(data):
return data.drop(rows).reset_index(drop=True)
return dropper | 9edb8eab028c15355f9883fcab31e47018e4da57 | 574,142 |
def extr_hotdays_calc(data, thr_p95):
"""
Calculate number of extreme hotdays.
Return days with mean temperature above the 95th percentile
of climatology.
Parameters
----------
data: array
1D-array of temperature input timeseries
thr_p95: float
95th percentile daily mean value from climatology
"""
xtr_hotdays = ((data > thr_p95)).sum()
return xtr_hotdays | 3a082ffc5ef62089f8de25747272f3c32ad2409a | 117,352 |
def generate_bounds_for_fragments(x_size, y_size, move_size, image_dimension):
"""
Generate bounds for fragments, for an image of arbitrary size
Inputs:
x_size - width of the image
y_size - height of the image
move_size - pixels to move (horizontally and vertically) between each step
Returns:
a list of 4-tuples, of the format (x_start, y_start, x_end, y_end)
"""
bounds = []
moves_x = (x_size - image_dimension) // move_size
moves_y = (y_size - image_dimension) // move_size
for y in range(moves_y):
for x in range(moves_x):
y_start = y * move_size
x_start = x * move_size
x_end = x_start + image_dimension
y_end = y_start + image_dimension
bounds.append((x_start, y_start, x_end, y_end))
return bounds | 622bfdfb676dc1ded33e5449c8b591a10d8ee2d1 | 252,678 |
def elapsed(sec):
"""
Formatting elapsed time display
"""
mins, rem = int(sec / 60), sec % 60
text = "%.1f" %(rem)
ending = "s"
if mins > 0:
text = "%d:" %(mins) + text
ending = "m"
return text+ending | 3cb61df21df5343473dadfbce80989407b3cdab4 | 80,661 |
def gen_binding(role, members=None, condition=None):
"""Generate the "bindings" portion of an IAM Policy dictionary.
Generates list of dicts which each represent a
storage_v1_messages.Policy.BindingsValueListEntry object. The list will
contain a single dict which has attributes corresponding to arguments passed
to this method.
Args:
role: (str) An IAM policy role (e.g. "roles/storage.objectViewer"). Fully
specified in BindingsValueListEntry.
members: (List[str]) A list of members (e.g. ["user:[email protected]"]). If None,
bind to ["allUsers"]. Fully specified in BindingsValueListEntry.
condition: (Dict) A dictionary representing the JSON used to define a
binding condition, containing the keys "description", "expression", and
"title".
Returns:
(List[Dict[str, Any]]) A Python representation of the "bindings" portion of
an IAM Policy.
"""
binding = {
'members': ['allUsers'] if members is None else members,
'role': role,
}
if condition:
binding['condition'] = condition
return [binding] | 54de43c06a5d0886f1b71bd3a3777c5c843e9838 | 241,178 |
import copy
def copy_exc_info(exc_info):
"""Make copy of exception info tuple, as deep as possible."""
if exc_info is None:
return None
exc_type, exc_value, tb = exc_info
# NOTE(imelnikov): there is no need to copy type, and
# we can't copy traceback.
return (exc_type, copy.deepcopy(exc_value), tb) | 645259c64714053d8c5c9abfe3742352b54b6c68 | 255,389 |
def convert_to_jiant_ep_format(input_path: str):
"""
Converts the TREC-10 Question Classification file into samples for the Question Type Probing task in Jiant format
:return: A list of samples in jiant edge probing format.
"""
DOC_ID = "trec-qt"
samples = []
sample_id = 0
with open(input_path, encoding="latin-1") as input_file:
info = {"doc_id": DOC_ID, "q_id": str(sample_id)}
sample_id += 1
lines = input_file.readlines()
for line in lines:
line = line[:-1] # remove the last question mark
line_split = line.split(" ") # split each word in the line
label = line_split[0] # get label
text = line_split[1:-1] # get text content
# create an entry and add to sample list
entry = {"info": info,
"text": " ".join(text),
"targets": [{"span1": [0, len(text)], "label": label}]}
samples.append(entry)
return samples | 3a46cbbc4963175a27acf54f4c6e38f165c9f5b9 | 482,613 |
import logging
def get_recording_delay(distance=1.6, sample_rate=48828, play_from=None, rec_from=None):
"""
Calculate the delay it takes for played sound to be recorded. Depends
on the distance of the microphone from the speaker and on the processors
digital-to-analog and analog-to-digital conversion delays.
Args:
distance (float): distance between listener and speaker array in meters
sample_rate (int): sample rate under which the system is running
play_from (str): processor used for digital to analog conversion
rec_from (str): processor used for analog to digital conversion
"""
n_sound_traveling = int(distance / 343 * sample_rate)
if play_from:
if play_from == "RX8":
n_da = 24
elif play_from == "RP2":
n_da = 30
else:
logging.warning(f"dont know D/A-delay for processor type {play_from}...")
n_da = 0
else:
n_da = 0
if rec_from:
if rec_from == "RX8":
n_ad = 47
elif rec_from == "RP2":
n_ad = 65
else:
logging.warning(f"dont know A/D-delay for processor type {rec_from}...")
n_ad = 0
else:
n_ad = 0
return n_sound_traveling + n_da + n_ad | 7716794d36bc34856866c0ac6b6c811b3713b515 | 192,940 |
def filter_keras_submodules(kwargs):
"""Selects only arguments that define keras_application submodules. """
submodule_keys = kwargs.keys() & {'backend', 'layers', 'models', 'utils'}
return {key: kwargs[key] for key in submodule_keys} | 4f1ed71131b27dfe9e6c424d2dbbebd1a372d920 | 86,933 |
import time
def mongod_wait_for_primary(mongo, timeout=60, sleep_interval=3):
"""Return True if mongod primary is available in replica set, within the specified timeout."""
start = time.time()
while not mongo.admin.command("isMaster")["ismaster"]:
time.sleep(sleep_interval)
if time.time() - start >= timeout:
return False
return True | 926a40611b0202573f1a9dfe07a515f17245eaf7 | 267,105 |
import json
def pretty_print_dictionary(dictionary):
"""Convert an input dictionary to a pretty-printed string
Parameters
----------
dictionary: dict
Input dictionary
Returns
-------
pp: str
The dictionary pretty-printed as a string
"""
dict_as_str = {key: str(val) for key, val in dictionary.items()}
return json.dumps(dict_as_str, indent=2) | 4a30f069f6ae87d968f5865de6df15acb6520c18 | 376,282 |
def train_test_split(tokenized):
"""Returns a train, test tuple of dataframes"""
train = tokenized.query("category != 'title'")
test = tokenized.query("category == 'title'")
return train, test | 749d4c57087422fb2b610bf74ba2909229ef4e12 | 638,079 |
def get_hashtags(tokens):
"""Extract hashtags from a set of tokens"""
hashtags = [x for x in tokens if x.startswith("#")]
return hashtags | ae75465950411e447515a919ed73e303519ed9a6 | 531,876 |
import torch
def reverse_bound_from_rel_bound(batch, rel, order=2):
"""From a relative eps bound, reconstruct the absolute bound for the given batch"""
wavs, wav_lens = batch.sig
wav_lens = [int(wavs.size(1) * r) for r in wav_lens]
epss = []
for i in range(len(wavs)):
eps = torch.norm(wavs[i, : wav_lens[i]], p=order) / rel
epss.append(eps)
return torch.tensor(epss).to(wavs.device) | a8b87a73f4a862b037960baf32ad9565e70cfe84 | 263,425 |
def obb_text2listxy(text):
"""
Purpose: parse x, y coordinates from text of obb(rotbox) annotation in xml
Args:
text: text of obb(rotbox) annotation in xml, "[[x0,y0], [x1,y1], [x2,y2], [x3,y3]]"
Returns: lists of storing x y coordinates,
x: [x0, x1, x2, x3]
y: [y0, y1, y2, y3]
"""
strList = list(text)[1:-1] # 去掉首尾的'[', ']'
# list of storing x y coordinates
x = []
y = []
# double pointers
i = 0
j = 0
while i != len(strList):
if strList[i] == '[':
x_ = ''
j = i+1
while strList[j] != ',':
x_ += strList[j]
j += 1
x.append(int(x_))
i = j
if strList[i] == ' ' and strList[i+1] != '[':
y_ = ''
j = i+1
while strList[j] != ']':
y_ += strList[j]
j += 1
y.append(int(y_))
i = j
i += 1
return x, y | 916a9be1265ee3e1e922703e8ecb1d6c20188bc7 | 435,263 |
def displayhtml (public_key):
"""Gets the HTML to display for reCAPTCHA
public_key -- The public api key"""
return """<script src='https://www.google.com/recaptcha/api.js'></script>
<div class="g-recaptcha" data-sitekey="%(PublicKey)s"></div>""" % {
'PublicKey' : public_key
} | 84e4bf8c5ef36c1c4213e9eefa35882638578fc4 | 671,671 |
from typing import OrderedDict
def filter_resource_dict_sort(d):
"""
Used to sort a dictionary of resources, tuple-of-strings key and int value,
sorted reverse by value and alphabetically by key within each value set.
"""
items = list(d.items())
keyfunc = lambda x: tuple([-x[1]] + list(x[0]))
return OrderedDict(sorted(items, key=keyfunc)) | 2f0fcf9692fb8be460b972bced8440788033cce7 | 350,338 |
def IsInstanceV2(instance):
"""Returns a boolean indicating if the database instance is second gen."""
return instance.backendType == 'SECOND_GEN' | ca922402314fa4725d7eb03c713e819a08e4d0b9 | 156,741 |
def complement_residues(ligand_list_full, ligand_list_selected):
"""
Given a list of ligands and a list of ligands selected from the previous list, return the ligands included in
the first but not the second list.
Args:
ligand_list_full: A list of ``List[AnalysisActorClass]`` containing the full set of ligands
ligand_list_selected: A list of ``List[AnalysisActorClass]`` containing a subset of the ``ligand_list_full``
Returns:
A list of ``List[AnalysisActorClass]`` containing the complement of the two sets above
"""
drug_names_selected = set([ligand.drug_name for ligand in ligand_list_selected])
returned_list = [ligand for ligand in ligand_list_full if ligand.drug_name not in drug_names_selected]
return returned_list | 3ab914516421cb3d8e44cf7cb66a31a763aaa3e4 | 417,806 |
def match(first_list, second_list, attribute_name):
"""Compares two lists and returns true if in both there is at least one element which
has the same value for the attribute 'attribute_name' """
for i in first_list:
for j in second_list:
if i[attribute_name] == j[attribute_name]:
return True
return False | 2b7c38ef3132c5cb9e693be2995691600ac76ec7 | 14,302 |
import unicodedata
def asciify(string):
"""Convert unicode string to ascii, normalizing with NFKD
Strips away all non-ascii symbols"""
string = unicodedata.normalize('NFKD', string)
return string.encode('ascii', 'ignore').decode('ascii') | e6f61d09e865b3f240a17b66158de68c2dbfc378 | 594,572 |
def get_lomb_lambda(lomb_model):
"""Get the regularization parameter of a fitted Lomb-Scargle model."""
return lomb_model['freq_fits'][0]['lambda'] | 8e4188bf7b4099148a9b10096dc608aa14e3e1af | 126,055 |
def trp(doc,n,pad=['@PAD@']):
"""
Truncates and pads input doc
Arguments:
doc: interable object containing text.
pad: pad sequence e.g. @PAD@
Returns:
truncated and padded document
"""
l=[token for token in doc if token.has_vector]
pad_trunc=list(l[:n])+list(pad)*(n-len(l))
return(pad_trunc) | f5595ab06828aca44d14fb7793fc9b3707c5b7a9 | 362,936 |
def get_locale_and_nickname(account, message):
"""Returns a locale and nickname for the given account and email message."""
if account:
locale = account.locale or 'en'
nickname = account.nickname or account.email
else:
locale = 'en'
nickname = message.sender
return (locale, nickname) | 2e71ed528a58f483937688559a5da487d288d4d3 | 602,638 |
def int32_to_octets(value):
""" Given an int or long, return a 4-byte array of 8-bit ints."""
return [int(value >> 24 & 0xFF), int(value >> 16 & 0xFF),
int(value >> 8 & 0xFF), int(value & 0xFF)] | 06ea4d1c47b4a99ef9a8ff530b4e5b2d00abd54d | 61,822 |
def ensure_all_alternatives_are_chosen(alt_id_col, choice_col, dataframe):
"""
Ensures that all of the available alternatives in the dataset are chosen at
least once (for model identification). Raises a ValueError otherwise.
Parameters
----------
alt_id_col : str.
Should denote the column in `dataframe` that contains the alternative
identifiers for each row.
choice_col : str.
Should denote the column in `dataframe` that contains the ones and
zeros that denote whether or not the given row corresponds to the
chosen alternative for the given individual.
dataframe : pandas dataframe.
Should contain the data being used to estimate the model, as well as
the headers denoted by `alt_id_col` and `choice_col`.
Returns
-------
None.
"""
all_ids = set(dataframe[alt_id_col].unique())
chosen_ids = set(dataframe.loc[dataframe[choice_col] == 1,
alt_id_col].unique())
non_chosen_ids = all_ids.difference(chosen_ids)
if len(non_chosen_ids) != 0:
msg = ("The following alternative ID's were not chosen in any choice "
"situation: \n{}")
raise ValueError(msg.format(non_chosen_ids))
return None | 52ad5254951a1ac09a8dc20136494587338063cb | 112,245 |
def _generate_end_sequence(leds: int) -> bytes:
"""
Generate a byte sequence, that, when sent to the APA102 leds, ends a
led update message.
:param leds: number of chained LEDs.
:return: terminating byte sequence.
"""
edges_required = ((leds - 1) if leds else 0)
bytes_required = 0
output = bytearray()
# Each byte provides 16 clock edges, each LED except the first requires
# one clock edge to latch in the newly sent data.
if edges_required:
bytes_required = (((edges_required // 16) + 1) if (edges_required % 16)
else edges_required // 16)
for i in range(bytes_required):
output.append(0x00)
return bytes(output) | e5054bfb928a281c660ecdd14450ce189e3b520d | 123,701 |
def sort_feature_value_pairs_list(feature_value_pairs_list):
""" Sorts feature value pairs list.
Args:
feature_value_pairs_list: List of feature value pairs (list in itself)
Returns:
A Feature value pairs list sorted by score descending.
"""
sorted_list = [
sorted(x, key=lambda x: abs(x[1]), reverse=True)
for x in feature_value_pairs_list
]
return sorted_list | b11f27c27ba7cd1195adcde0f6c2cdc1cf4a35dd | 367,719 |
def get_total_negative(fp, tn):
"""
This functions returns the number of total negatives.
:param fp: Number of false positives
:type fp: int
:param tn: Number of true negatives
:type tn: int
:return: Number of total negative
:rtype: int
"""
return fp + tn | 296613c0c6c8a8d8a00b9b38abcb9c005a25d702 | 390,165 |
def ly_to_m(ly):
"""
Converts the input distance (or velocity) of the input from light years to meters.
"""
return ly * 9.4607 * 10**15 | 1f2a4e53c9936601901c03ceaab0b258a0059200 | 209,669 |
def valid_index(index, list_dim):
"""
Returns if a given index is valid
considering a list of dimensions list_dim.
"""
for i, ind in enumerate(index):
if not (0<= ind < list_dim[i]):
return False
return True | d54f01fc5e51d618ba8f15c2c9b93b9182b95fce | 354,059 |
def relative_url(value, field_name, url_encode=None):
"""
This snippet comes from the website simpleifbetterthancomplex.com and was made by Vitor Freitas, as part of his
article “Dealing with querystring parameters” (published on 22th 08/2016, read the 12/05/2016)
:param value: the parameter to add to the existing query string
:param field_name: the name of the parameter
:param url_encode: the existing query string
:return: a new querystring with the new parameter.
"""
url = '?{}={}'.format(field_name, value)
if url_encode:
querystring = url_encode.split('&')
filtered_querystring = filter(lambda p: p.split('=')[0] != field_name, querystring)
encoded_querystring = '&'.join(filtered_querystring)
url = '{}&{}'.format(url, encoded_querystring)
return url | 81eb2212afbea3e95b9e9e63ac324ec421673f25 | 196,931 |
def snake_to_camel(field_name: str) -> str:
"""Convert snake_case to camelCase"""
words = field_name.split('_')
if len(words) < 2:
return field_name
first_word = words[0]
other_words = words[1:]
return first_word + ''.join(word.capitalize() for word in other_words) | 0adf78aa4a10d15c17038eb4a2f58eead2d28020 | 176,670 |
import math
def num_tiles_not_in_position(state):
"""
Calculates and returns the number of tiles which are not in their
final positions.
"""
n = len(state)
total = 0
for row in state:
for tile in row:
try:
y = int(math.floor(float(tile)/n - (float(1)/n)))
x = (tile - 1) % n
except ValueError: #blank tile
continue
if row.index(tile) - x != 0 or state.index(row) - y != 0:
total += 1
return total | 102d2eb616f8b459fc31e68788f355440bac97e0 | 9,912 |
def normalize_images(fixed_image, moving_image):
"""
Noramlize image intensities by extracting joint minimum and dividing by joint maximum
Note: the function is inplace
fixed_image (Image): fixed image
moving_image (Image): moving image
return (Image, Image): normalized images
"""
fixed_min = fixed_image.image.min()
moving_min = moving_image.image.min()
min_val = min(fixed_min, moving_min)
fixed_image.image -= min_val
moving_image.image -= min_val
moving_max = moving_image.image.max()
fixed_max = fixed_image.image.max()
max_val = max(fixed_max, moving_max)
fixed_image.image /= max_val
moving_image.image /= max_val
return (fixed_image, moving_image) | 48bf06a138bb21dacb1708b92819ce415ad0ab3d | 302,576 |
def extract_appendix_from_fname(fname):
"""
Isolate only the appendix, starting from a full fname
:param fname:
:return:
"""
idx_start_pattern = fname.find('q_')
idx_end_pattern = fname.find('.csv')
appendix = fname[idx_start_pattern:idx_end_pattern]
return appendix | 0299c9ce22f089a85f82d1ed2633e08f0d159d9b | 511,893 |
def getInterestedRange(message_context):
"""Return a (start, end) pair of character index for the match in a MessageContext."""
if not message_context.match:
# whole line
return (0, len(message_context.line))
return (message_context.match.start(), message_context.match.end()) | f173a09a7281bb79f20e7658932d1c7f4e5ddd43 | 50,232 |
def peak_list_blank_annotate(dataframe):
"""Blank annotation function for peak_list_annotate"""
if dataframe.iloc[0]["analyte_blank_match"]:
return True
else:
return False | 48499e3e26113fe6c996f65aa0ee5d5a28bc5311 | 510,842 |
def p_correct_given_pos(sens, fpr, b):
"""Returns a simple Bayesian probability for the probability
that a prediction is correct, given that the prediction
was positive, for the prevailing sensitivity (sens),
false positive rate (fpr) and base rate of positive
examples.
"""
assert 0 <= sens <= 1, "Sensitivity must be in range [0,1]"
assert 0 <= fpr <= 1, "FPR must be in range [0,1]"
return sens * b / (sens * b + fpr * (1 - b)) | d4402ddb431dc3fe118468eda37576e730465406 | 517,013 |
import hmac
import hashlib
def validate_signature(key, body, signature):
""" Validate the received signature against the secret key
:param str key: secret key
:param str body: message body
:param str signature: received signature
:return:
:rtype: bool
"""
signature_parts = signature.split('=')
if signature_parts[0] != "sha1":
return False
generated_sig = hmac.new(str.encode(key), msg=body, digestmod=hashlib.sha1)
return hmac.compare_digest(generated_sig.hexdigest(), signature_parts[1]) | f3522abb59c4ab07bfbeea51cffb68d2b786e714 | 597,837 |
def is_layer_inside_image(image, layer):
"""
Return True if the layer is inside the image canvas (partially or completely).
Return False if the layer is completely outside the image canvas.
"""
return ((-image.width < layer.offsets[0] < image.width) and
(-image.height < layer.offsets[1] < image.height)) | 87987e2e8baf4b2e374fc6cbecc2e2196eb3d0f5 | 155,213 |
def feature_types(df):
"""
Get feature types
Args:
df: pd.DataFrame, input data
Returns:
f_types: feature type
"""
ftypes = {}
ftypes['num_cols'] = [str(x) for x in df.select_dtypes(include=['int32', 'int64', 'float32', 'float64']).columns]
ftypes['cat_cols'] = [str(x) for x in df.select_dtypes(include=['object', 'category']).columns]
ftypes['date_cols'] = [str(x) for x in df.select_dtypes(include=['datetime']).columns]
ftypes['bool_cols'] = [str(x) for x in df.select_dtypes(include=['bool']).columns]
return ftypes | 72f6dfebd159a274e9a8eed5c7ccb6da25c6d8c6 | 576,003 |
def create_yaml_workflow_schema_with_workspace(create_yaml_workflow_schema: str) -> str:
"""Return dummy YAML workflow schema with `/var/reana` workspace."""
reana_yaml_schema = f"""
{create_yaml_workflow_schema}
workspace:
root_path: /var/reana
"""
return reana_yaml_schema | e54c9254d489dc694e33a0dd5b6fbe3255c3e660 | 173,832 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.