content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import time
def StringToEpochTime ( string ):
""" Converts a time string in the format 'YYYYMMDDhhmm' in to a Unix time integer. """
return time.mktime ( time.strptime ( string, '%Y%m%d%H%M' ) ); | 98741485aed886a337a57e530d3c22ecabe806b3 | 189,474 |
def npieces(request):
"""Number of collocation pieces."""
return request.param | c98349245b5d846ff45e295e70898a2b36429efc | 655,655 |
def split_region(region):
"""
Returns the municipality and the province
"""
if region.find(", ") > 0:
return tuple(region.rsplit(", "))
else:
return None, region | 58da367b6da369a309838c453817521d35a2eeb6 | 498,896 |
import math
def bucketize(point, bucket_size):
"""floor the point to the next lower multiple of bucket size"""
return bucket_size * math.floor(point / bucket_size) | 43601b86dcc5d6af51dde4d761e488d0219c1ce1 | 548,465 |
import random
def getRandomSeeds(seeds, num):
"""Given a list of seeds, return a given number of them randomly"""
seedsResult = []
for i in range(0, num):
while True:
seed = seeds[random.randint(0, len(seeds) - 1)]
if not(seed in seedsResult):
seedsResult.append(seed)
break
return seedsResult | a474195e8d75c078d6a2642f5a9e934a881396c3 | 233,464 |
def fibonacci_tabulation(n):
"""DP implementation of fibonacci. O(n) runtime, O(n) space"""
d = [0] * (n + 1)
d[1] = 1
d[2] = 1
for i in range(3, n + 1):
d[i] = d[i - 1] + d[i - 2]
return d[n] | 0e5ed292767291956aeb6bfc93e9c451f5aa3ada | 513,192 |
def ensembl_genes(request, gene_bulk):
"""Return a dictionary that maps ensembl ids on genes"""
_ensembl_genes = {}
for gene_obj in gene_bulk:
_ensembl_genes[gene_obj['ensembl_id']] = gene_obj
return _ensembl_genes | 43f86025e5c8231156e8b060727a40213e4bc8ab | 582,231 |
def file_info_equal(file_info_1, file_info_2):
"""Return true if the two file-infos indicate the file hasn't changed."""
# Negative matches are never equal to each other: a file not
# existing is not equal to another file not existing.
if (None, None, None) in (file_info_1, file_info_2):
return False
# Equal if the size and the mtimes match.
if file_info_1[:2] == file_info_2[:2]:
return True
# Even if mtimes don't match, they're equal if the size and the
# crcs match. But we have to be careful, since crcs are optional,
# so we don't do this test if the crcs are None.
if file_info_1[2] is not None and file_info_1[1:] == file_info_2[1:]:
return True
return False | 0cc17448dba034d65521c86a0f6d7c70b98cf02c | 690,627 |
from datetime import datetime
def dt_is_naive(value: datetime) -> bool:
"""
Return whether datetime ``value`` is "naive".
>>> dt_naive = datetime(2018, 10, 23, 1, 54, 13)
>>> dt_is_naive(dt_naive)
True
>>> dt_is_naive(convert_naive_dt_to_tz_aware(dt_naive, TZ_UTC))
False
>>> dt_is_naive(convert_naive_dt_to_tz_aware(dt_naive, _TZ_CL_SANTIAGO))
False
"""
if not isinstance(value, datetime):
raise TypeError
# source: 'django.utils.timezone.is_naive' @ Django 2.1.7
return value.utcoffset() is None | 421fcabc6da664d361c604aa19f6be3afdd46437 | 548,351 |
import functools
def tolist_and_strip_outlist(func):
"""
This decorator works for functions that return a pandas Series. It converts the series to
a list and strips white space from the output.
"""
@functools.wraps(func)
def tolist_and_strip(*args,**kwargs):
result = func(*args, **kwargs)
return [r.strip() for r in result.tolist()]
return tolist_and_strip | 9082745ee4b77828a0d31bd5e92d68cea2be911a | 374,379 |
def _format_input_label(input_label):
""" Formats the input label into a valid configuration. """
return {
'name': input_label['name'],
'color': (input_label['color'][1:] if input_label['color'].startswith('#') else input_label['color']),
'description': input_label['description'] if 'description' in input_label else ''
} | 5bd63c54e1cb290be7a208c76ddc86375cc0a646 | 109,626 |
def remove_out_of_range(df, range_list):
"""Set any values out of range to NAN.
Args:
- df: a wide format df (e.g. the result of data_loader)
- range_list: a list of dictionaries. Each dictionary has three keys 'variable', 'high' and 'low'.
e.g. {'variable': 'aao2', 'high': 700., 'low':10.}
Returns:
- df: dataset with invalid values set to NAN
"""
if range_list is None:
return df
for f in range_list:
if f["variable"] in df.columns:
col = df[f["variable"]]
df[f["variable"]] = col.mask((col < f["low"]) | (col > f["high"]))
return df | 6d29a01b8665bca93c60459be5d27f1da5252b10 | 615,140 |
def timestamp_to_time(t):
"""Convert a Cassandra timestamp to the same units as Python's _time.time() returns, which is seconds since the Epoch."""
return float(t)/1000 | 5c03a088e70dfc959b83e3f8be56b1d4e8a692e4 | 332,586 |
def filter_words(text_list, dictionary):
""" Filter sentences to remove any words from that does not appear in our dictionary
Args:
text_list: list of words in a sentence
dictionary: dictionary of words in training set
Returns:
Filtered list of words in a sentence
"""
result = []
for t in text_list:
if dictionary.get(t) != None:
result.append(t)
return result | 0dc1f8384acf5ecd8113067abaf642203a92b3ea | 323,705 |
import requests
import json
def post(url, payload):
"""
make an http post
"""
return requests.post(
url=url,
headers={'content-type': 'application/json'},
data=json.dumps(payload),
) | 98ae6ee4707c7982664d39a4420d8c71bff017ac | 207,793 |
def _fixed_length_strings(strings, length=0):
"""Return list of fixed length strings, left-justified and right-padded
with spaces.
:param strings: List of variable length strings
:param length: Length of strings in returned list, defaults to the maximum
length in the original list if set to 0.
:type length: int or None
"""
if length == 0 and strings:
length = max(len(s) for s in strings)
return [s.ljust(length) for s in strings] | 4251e9e5fe8a8a6decb0925bd8a63be4ddd4ade6 | 164,312 |
def deep_flatten(nested_object):
""" flatten a nested list of object into a simple list """
result = []
for ob in nested_object:
if isinstance(ob, (list, tuple, set)):
result += deep_flatten(ob)
else:
result.append(ob)
return result | 6ccad27ac0ff7d8dc965b590268b696a959fc49f | 298,279 |
def loadCNFFormula( name ):
"""Load a CNF formula in the DIMACS ascii format from
the file "name" and return it as a list of clauses
Returns (V,F)
V -- highest variable number
F -- list of clauses"""
V = 0
L = []
f = open( name, "r" )
lines = f.readlines()
for l in lines:
s = l.split()
if(len(s) < 1): continue
if( s[0] == "c" ):
print(s)
continue
elif( s[0] == "p" ):
V = int(s[2])
else:
clause = [int(v) for v in s[:-1]]
L.append(clause)
f.close()
return (V,L) | 393024bec94858ba6e7fa2a70a4aab5a0ef9f3d7 | 568,368 |
def stochastic_string(reactant_name, number):
"""
This function returns the stochastic string expression for mass action kinetics
For instance the reaction 2A -> 3A would imply A*(A-1)/2
It only does so for one reactant, so it must be called for all reactants in the reaction
Parameters:
reactant_name (str) = species string involved in the reaction
number (int) = stoichiometry (number of times it appears)
Returns:
to_return_string (str) = the mass action kinetics string expression for only that species
"""
to_return_string = ''
for i in range(number):
if i == 0:
to_return_string = reactant_name
else:
to_return_string += f' * ({reactant_name} - {i})/{i + 1}'
return to_return_string | 161a7e1433fc925b2baceb960c1b41d5526bef58 | 455,341 |
def _get_help_lines(content):
"""
Return the help text split into lines, but replacing the
progname part of the usage line.
"""
lines = content.splitlines()
lines[0] = 'Usage: <progname> ' + ' '.join(lines[0].split()[2:])
return lines | 71dec5dec57f257a3fd9ec844b7d9fa727bdafa6 | 650,527 |
def _combine(lhs, rhs):
"""
Combines a list, tuple, or dict, with another list or tuple or dict
Build mode struct attributes are tuples by default, but many consumers
use the 'create_build_mode' function to initialize them to lists, or
even dictionaries. When combining build modes using the 'extend_build_mode'
function, this helper function handles the case in which a list is
extended with a tuple argument, or vice versa
Args:
lhs: A tuple, list, or dict
rhs: A tuple, list, or dict to add to the left hand argument
Returns:
A value of the lhs argument's type, with the elements from the rhs
added to it
"""
if not rhs:
return lhs
# Note: 'isinstance' and 'issubclass' are not available in this environment,
# so we check here for the exact type. Subclasses will not pass this check.
if type(lhs) == type({}):
result = {}
result.update(lhs)
result.update(rhs)
return result
elif type(lhs) == type([]):
return lhs + list(rhs)
elif type(lhs) == type(()):
return lhs + tuple(rhs)
return lhs + rhs | 396f029878fe557603fc436671c504f4e074da0f | 163,084 |
def make_pair_table(ss, base=0, chars=['.']):
"""Return a secondary struture in form of pair table.
Args:
ss (str): secondary structure in dot-bracket format
base (int, optional): choose between a pair-table with base 0 or 1
chars (list, optional): a list of characters to be are ignored, default:
['.']
**Example:**
base=0: ((..)). => [5,4,-1,-1,1,0,-1]
i.e. start counting from 0, unpaired = -1
base=1: ((..)). => [7,6,5,0,0,2,1,0]
i.e. start counting from 1, unpaired = 0, pt[0]=len(ss)
Returns:
[list]: A pair-table
"""
stack = []
if base is 0:
pt = [-1] * len(ss)
elif base == 1:
pt = [0] * (len(ss) + base)
pt[0] = len(ss)
else:
raise ValueError("unexpected value in make_pair_table: \
(base = " + str(base) + ")")
for i, char in enumerate(ss, base):
if (char == '('):
stack.append(i)
elif (char == ')'):
try:
j = stack.pop()
except IndexError as e:
raise RuntimeError(
"Too many closing brackets in secondary structure")
pt[i] = j
pt[j] = i
elif (char not in set(chars)):
raise ValueError(
"unexpected character in sequence: '" + char + "'")
if stack != []:
raise RuntimeError("Too many opening brackets in secondary structure")
return pt | 6fdf0a2d1574fb2ea6608521681592a1f0a53521 | 464,022 |
import re
def _re_match(pattern, s, flags=0):
"""
Helper function for tests. Return either the full match via
re.MatchObject.group() or None if there was no match.
'flags' is passed to re.search().
"""
match = re.search(pattern, s, flags)
if match is None:
return None
else:
return match.group() | 2cbf6c3b161deb3171a5085d354a5f472e3a6f9b | 145,518 |
def update_gender_count(gender, movie_id, movie_map):
"""Given gender, movieID, and a dictionary, returns an updated dictionary
reflecting an increase in the gender count for the input gender.
Parameters:
gender: a string that is either 'female_count' or 'male_count'.
movie_id: a string representing the identification code for a movie in
the dataset.
movie_map: a dictionary mapping from genre to release year to gender
count.
Returns:
movie_map: an updated version of the input dictionary reflecting an
increase in the gender count for the input gender.
"""
# Determines old gender count
gender_count = movie_map[movie_id][gender]
# Creates a new mapping for gender count where the count increases by one
movie_map[movie_id][gender] = gender_count + 1
return movie_map | 81debefc652cc87307db08b0e51f4e71b53f20c4 | 564,515 |
def clean_slice(
dim: int,
sl: slice,
) -> slice:
"""return a cleaned slice for a given length, converting start, stop and step
to non negative integer values. Only unit step are supported.
Args:
dim (int): the length of the axis
sl (slice): the slice regarding the axis
Raises:
ValueError: if any parameter of the slice is not an int or NoneType
IndexError: if any parameter of the slice is out of bounds
Returns:
slice: a slice with only non negative integer values
"""
# make sure steps are all 1 or None
if sl.step not in [1, None]:
raise ValueError(f"slice should have step of 1: {sl} ({sl.step=})")
start = sl.start
if start is None:
start = 0
if not isinstance(start, int):
raise ValueError(f"invalid start for slice {sl} ({start=})")
stop = sl.stop
if stop is None:
stop = dim
if not isinstance(stop, int):
raise ValueError(f"invalid stop for slice {sl} ({stop=})")
# update slices to have >= 0 start
if start < 0:
start += dim
if start < 0:
raise IndexError(f"bad start for slice {sl} ({start=} < 0)")
if start >= dim:
raise IndexError(f"bad start for slice {sl} ({start=} >= {dim=}")
# update slices to have <= len max
if stop < 0:
stop += dim
if stop < 0:
raise IndexError(f"bad stop for slice {sl} ({stop=} < 0)")
if stop > dim:
raise IndexError(f"bad stop for slice {sl} ({stop=} > {dim=})")
# must not be empty
if stop <= start:
raise ValueError(f"empty slice {sl} ({stop=} <= {start=})")
return slice(start, stop, 1) | a4e979001ac2678bc1d7395b328a8d1e936d5394 | 260,566 |
def round_half(x):
"""Rounds a floating point number to exclusively the nearest 0.5 mark,
rather than the nearest whole integer."""
x += 0.5
x = round(x)
return x-0.5 | 26fa072decf8b66a8a8ab54821d8cc297e311051 | 65,250 |
def fix_count(count):
"""Adds commas to a number representing a count"""
return '{:,}'.format(int(count)) | c0a85f118447a0643952ae0ebc02a0fe117de102 | 44,538 |
import json
def json_dumps(content):
"""Dump a python object using a nicely formated json format"""
return json.dumps(content, indent=4, sort_keys=True).replace(' \n', '\n') + '\n' | a57e470be937b8a34bfdc33a3187cbda1ab9fc08 | 587,025 |
def compare_list_of_committees(list1, list2):
"""Check whether two lists of committees are equal when the order (and multiplicities)
in these lists are ignored.
To be precise, two lists are equal if every committee in list1 is contained in list2 and
vice versa.
Committees are, as usual, sets of positive integers.
Parameters
----------
list1, list2 : iterable of sets"""
for committee in list1 + list2:
assert isinstance(committee, set)
return all(committee in list1 for committee in list2) and all(
committee in list2 for committee in list1
) | 136d837b2957d4882a2dd85a989164a1346e08e3 | 115,422 |
def mean(x1, x2):
"""Integer average."""
return (x1 + x2)/2 | a4f8a0df5d90bd8cde5b37682c7ca2a2e7e8a602 | 670,532 |
def clean(s):
"""Clean up a string"""
if s is None:
return None
s = s.replace("\n", " ")
s = s.replace(" ", " ")
s = s.strip()
return s | e706b68c7ed5b78ca54a3fd94af5de35ee142d43 | 46,968 |
import math
def calculate_DewP(AirT, RelH):
""" Calculates dew point using the same formula the Met Office uses
"""
if AirT == None or RelH == None: return None
ea = (8.082 - AirT / 556.0) * AirT
e = 0.4343 * math.log(RelH / 100) + ea / (256.1 + AirT)
sr = math.sqrt(((8.0813 - e) ** 2) - (1.842 * e))
return 278.04 * ((8.0813 - e) - sr) | a38a0a58baecd5b3436eed507ec24bed39b2a00d | 441,763 |
def compress(speakers, talks):
"""Reduce consecutive dialogue by the same person into single records"""
i = 0
current_text = ""
current_speaker = ""
compressed_speakers = []
compressed_talks = []
for speaker, text in zip(speakers, talks):
if speaker == current_speaker:
current_text = current_text + " " + text
else:
compressed_speakers.append(current_speaker)
compressed_talks.append(current_text.replace('"', ''))
current_speaker = speaker
current_text = text
compressed_speakers.append(current_speaker)
compressed_talks.append(current_text.replace('"', ''))
return compressed_speakers, compressed_talks | 23e70d879bac9d8f5c4224c9eeb86c2338a91be0 | 77,555 |
def uppercase(text):
"""
Uppercase given text
:param text:
:return: uppercase text
"""
return text.upper() | 140204263bd70560108fb72ab16286af073b10ef | 207,774 |
def remove_extra_space_from_args(args):
"""
Remove leading and trailing spaces from all the arguments and remove empty arguments.
:param args: Dictionary of arguments
:return: Dictionary of arguments
:rtype: ``Dict``
"""
return {key: value.strip() for (key, value) in args.items() if value and len(value.strip()) > 0} | fa5412e3ba2b6996439de441d631612cb04f7c54 | 577,315 |
def csv_beolvas(fajlnev):
"""
Beolvassa a CSV fájlt, amelyben minden sorban egy NEPTUN és egy eredmény van.
Paraméter:
fajlnev (str): fájlnév
Vissza:
(dict): neptun -> eredmény
"""
file = open(fajlnev, "r")
eredmenyek = {}
for line in file:
split = line.strip().replace(",", "\t").replace(";", "\t").split("\t")
neptun = split[0]
eredmeny = split[1]
if eredmeny == "":
eredmeny = "0"
eredmenyek[neptun] = int(eredmeny)
return eredmenyek | 9ee0773a8d67537633d6588ea15cf9def567837d | 305,391 |
from typing import Callable
import time
def wait_for_it(condition: Callable[..., bool], max_wait=5) -> bool:
"""Waits for up to ``max_wait`` seconds for ``condition`` to be truthy.
:returns: ``True`` if condition, else ``False``
"""
start = time.time()
while not condition() and (time.time() - start) <= max_wait:
time.sleep(0.1)
return bool(condition()) | 64d8f007e460462e4521ea523c356f0a8f67a320 | 278,475 |
def _validate_query(query: dict) -> dict:
"""Validate a query dictionary."""
if not isinstance(query, dict):
raise TypeError("Query must be a dictionary.")
return query | df03d567a1d2b696cb5b507f4c7a5f88cba13b4c | 529,265 |
def euclide_gcd_algo(a, b):
"""Calculate (r, u, v) so that u * a + v * b = r = GCD(a, b)"""
(r, u, v, r2, u2, v2) = (a, 1, 0, b, 0, 1)
while r2 > 0:
q = r // r2
(r, u, v, r2, u2, v2) = (r2, u2, v2, r - q * r2, u - q * u2, v - q * v2)
return r, u, v | 8ef73556bc7e31c2c2574e73bdb974f3b1e24a60 | 249,081 |
def handle_most_used_outputs(most_used_x):
"""
:param most_used_x: Element or list (e.g. from SQL-query output) which should only be one element
:return: most_used_x if it's not a list. The first element of most_used_x after being sorted if it's a list.
None if that list is empty.
"""
if isinstance(most_used_x, list):
if len(most_used_x) == 0:
return None
most_used_x.sort()
return most_used_x[0]
else:
return most_used_x | 74f57c658097d4c61f20ae92924483f9d0a89f78 | 139,539 |
def average_above_zero(table):
"""
make average from a table of non-null positiv value
Arg:
table : a list of numeric values
return:
he computed average
raise :
check if there is no positive value in table
"""
if not(isinstance(table, list)):
raise ValueError('expeced a list as input')
average = -1
sum=0.0
positifValueNumber=0
for val in table:
if val>=0:
sum=sum+val
positifValueNumber=positifValueNumber+1
if positifValueNumber <= 0:
raise ValueError('any positiv numbre in tab in function average_above_zero')
average = sum/positifValueNumber
return average | db7de455c31e8f714b1ad132d53251e756121b57 | 222,051 |
import re
def parse_direct_mention(message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search("^<@(|[WU].+?)>(.*)", message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None) | f72b248f8e31d0bbfb620fb37f382ca4f7c66d28 | 664,889 |
import inspect
def get_mro(cls):
"""
Wrapper on top of :func:`inspect.getmro` that recognizes ``None`` as a
type (treated like ``type(None)``).
"""
if cls is type(None) or cls is None:
return (type(None), object)
else:
assert isinstance(cls, type)
return inspect.getmro(cls) | 950616cd4e9f297638e0c48b78b42b5aa16d1776 | 687,137 |
from typing import List
from typing import Tuple
def pythagorean_triple(n: int) -> List[Tuple[int, int, int]]:
"""Find all pythagorean triple satisfying the following equation
a^2 + b^2 = c^2 (1 <= a <= b <= c and a, b, c are natural numbers)
(above 'c' is same to given 'n' parameter)
There can be many different solutions such as (3, 4, 5), (5, 12, 13) and so on
>>> pythagorean_triple(5)
[(3, 4, 5)]
>>> pythagorean_triple(14)
[(3, 4, 5), (5, 12, 13), (6, 8, 10)]
>>> pythagorean_triple(26)
[(3, 4, 5), (5, 12, 13), (6, 8, 10), (7, 24, 25), (8, 15, 17), (9, 12, 15), (10, 24, 26), (12, 16, 20), (15, 20, 25)]
>>> pythagorean_triple(1)
Traceback (most recent call last):
...
ValueError: n must be >= 2
"""
if n < 2:
raise ValueError('n must be >= 2')
triple = [(a, b, c) for a in range(1, n + 1) for b in range(a, n + 1) for c in range(b, n + 1) if
a ** 2 + b ** 2 == c ** 2]
return triple | 6b855d0244cdb9bcb72aa777ac74b50faae8fa3f | 507,614 |
def args2options(args):
"""Parse the command line args by stripping the -- and replacing - with _"""
return {k.strip('--').replace('-', '_'): args[k] for k in args.keys() if '--' in k} | ce25799a1bb1b0496d54e27819c77ca21b19c734 | 504,805 |
def strokes_to_lines(strokes):
"""Convert stroke-3 format to polyline format."""
x = 0
y = 0
lines = []
line = []
for i in range(len(strokes)):
if strokes[i, 2] == 1:
x += float(strokes[i, 0])
y += float(strokes[i, 1])
line.append([x, y])
lines.append(line)
line = []
else:
x += float(strokes[i, 0])
y += float(strokes[i, 1])
line.append([x, y])
return lines | 61eafa7fdc9e010220251fdf3ced0a0bf6286afc | 640,673 |
def echo(s):
"""Return the string passed in."""
return "We are echoing: %s" % s | 770497efe8d9b5662eae3811f59f155fc9202f43 | 624,746 |
import json
def dictify(response):
"""Decode json from response"""
if response.data is None:
return None
return json.loads(response.data.decode('utf8')) | 5004ead80bad6b53f1f639bce28fc006be5c57b4 | 311,160 |
def prepend(base, prefix):
"""Prepend a prefix to a string"""
return f"{prefix}{base}" | 7d9d240a10405d7e404b46a4f0cfbc968cd3af18 | 687,451 |
def build_concentartion(species_id, number):
"""
Builds the concentration component for each species
Parameters
----------
species_id : int
species id from the species_indices dictionary
number : float
stoichiometric co-eff of the species
for specific reactions
Returns
----------
concentration : string
the concentration component of that particular
species
"""
if abs(float(number)) == 1:
concentration = '* y[%s] ' % species_id
else:
concentration = '* y[%s] ** %s ' % (species_id, abs(float(number)))
return concentration | 4a9a0cbc22e6d9ecf13c86b10d5d5d56baae06dc | 679,880 |
import torch
def random_joints2D_deviation(joints2D,
delta_j2d_dev_range=[-5, 5],
delta_j2d_hip_dev_range=[-15, 15]):
"""
Deviate 2D joint locations with uniform random noise.
:param joints2D: (bs, num joints, num joints)
:param delta_j2d_dev_range: uniform noise range.
:param delta_j2d_hip_dev_range: uniform noise range for hip joints. You may wish to make
this bigger than for other joints since hip joints are semantically hard to localise and
can be predicted inaccurately by joint detectors.
"""
hip_joints = [11, 12]
other_joints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 13, 14,15, 16]
batch_size = joints2D.shape[0]
device = joints2D.device
l, h = delta_j2d_dev_range
delta_j2d_dev = (h - l) * torch.rand(batch_size, len(other_joints), 2, device=device) + l
joints2D[:, other_joints, :] = joints2D[:, other_joints, :] + delta_j2d_dev
l, h = delta_j2d_hip_dev_range
delta_j2d_hip_dev_range = (h - l) * torch.rand(batch_size, len(hip_joints), 2, device=device) + l
joints2D[:, hip_joints, :] = joints2D[:, hip_joints, :] + delta_j2d_hip_dev_range
return joints2D | 17f96ce0637af952fcb369bba658788e88a2e901 | 606,101 |
def callable_transformation(transformation, kwargs):
"""Allows kwargs to be given to transformation before calling it."""
func = lambda f: transformation(f, **kwargs)
return func | 1527631e1a0bf1bbef18104bc1f5ea011f4c0b13 | 356,829 |
import re
def clean_string(s: str) -> str:
"""
Replace common symbols with column-name safe alternatives.
:param s: incoming string
:return: string
"""
mp = {
"<": "_lt_",
">": "_gt_",
"[": "_osq_",
"]": "_csq_",
"(": "_op_",
")": "_cp_",
".": "_",
" ": "_",
}
s = s.strip()
s = re.sub(r"\s+", " ", s)
for (k, v) in mp.items():
s = s.replace(k, v)
return s | fbeeac221c13cf7f3b04911333a6148579a4ee37 | 470,416 |
def logger_to_ofp(port_stats):
""" Translates between the logger stat name and the OpenFlow stat name"""
return {'packets_out': port_stats.tx_packets,
'packets_in': port_stats.rx_packets,
'bytes_out' : port_stats.tx_bytes,
'bytes_in' : port_stats.rx_bytes,
'dropped_out' : port_stats.tx_dropped,
'dropped_in' : port_stats.rx_dropped,
'errors_in' : port_stats.rx_errors
} | cd35d8ca9d1aa4984f2d009dcb97e6bc1641f62e | 365,673 |
import re
def sanitize_name(name):
""" Sanitize a name to something that twitter allows. """
sanitized_name = '-'.join(
filter(None, re.sub("[^a-z0-9_-]+", '-', name.lower()).split('-'))
)
return sanitized_name[:25] | 5b554d4f0e287acead5867e93d9a1939d9b0834e | 339,848 |
def lookup(stmt, embeddings, dictionary):
"""
Look up a statement's embedded vector in the embedding matrix
:param stmt: statement to be looked up (string)
:param embeddings: embedding matrix
:param dictionary: [keys=statement, values==statement index]
:return: embedded vector
"""
return embeddings[dictionary[stmt], :] | b7dc20092618dbbffb75941c7ab04efdc7af16c7 | 664,615 |
import re
def parse_option_contents(content):
"""
Parse the option contents into a list
Args:
content (str): The content of the option
Returns:
content_list (list): A list contains the option parameters
"""
content_list = []
# Remove brackets
bracket = re.search(r'^\((.*?)\)$', content)
if bracket:
content = bracket.group()[1:-1]
# Parse each parameters
for item in content.split(','):
content_list.append(item.strip())
return content_list | d91f52d49b0b060bf2bbaebb0f44fa48aecea652 | 172,067 |
def run_q(db, q, *args, **kwargs):
"""
Helper function to turn database query results into list of python dicts
"""
if args and kwargs:
cur = db.execute(q, *args, **kwargs)
elif args:
cur = db.execute(q, *args)
elif kwargs:
cur = db.execute(q, **kwargs)
else:
cur = db.execute(q)
keys = list(cur.keys())
data = []
for row in cur:
data.append(dict(list(zip(keys, list(row)))))
return data | de77b2d8f4359df5ec6a824a1cb2510152a1cd27 | 394,429 |
def filter_linksearchtotals(queryset, filter_dict):
"""
Adds filter conditions to a LinkSearchTotal queryset based on form results.
queryset -- a LinkSearchTotal queryset
filter_dict -- a dictionary of data from the user filter form
Returns a queryset
"""
if "start_date" in filter_dict:
start_date = filter_dict["start_date"]
if start_date:
queryset = queryset.filter(date__gte=start_date)
if "end_date" in filter_dict:
end_date = filter_dict["end_date"]
if end_date:
queryset = queryset.filter(date__lte=end_date)
return queryset | 96a7e816e7e2d6632db6e6fb20dc50a56a273be9 | 701,424 |
def filer_elements(elements, element_filter):
"""Filter elements.
Ex.: If filtered on elements [' '], ['a', ' ', 'c']
becomes ['a', 'c']
"""
return [element for element in elements if element not in element_filter] | 205c9bae6bb28f96d67bc6a02c5bd1ed5ed6c534 | 675,159 |
def hs_bounds(K1, K2, M1, M2, f1, f2):
"""
Hashin Shtrickman bounds for a 2 mineral mixture
Usage:
(K_hsu, K_hsl, M_hsu, M_hsl) = hs_bounds(K1, K2, M1, M2, f1, f2)
Inputs:
K1 = bulk modulus mineral 1
K2 = bulk modulus mineral 2
M1 = shear modulus mineral 1
M2 = shear modulus mineral 2
f1 = volume fraction mineral 1
f2 = volume fraction mineral 2
Outputs:
K_hsu = Bulk Modulus Hashin-Shtrickman upper bound
K_hsl = Bulk Modulus Hashin-Shtrickman lower bound
M_hsu = Shear Modulus Hashin-Shtrickman upper bound
M_hsl = Shear Modulus Hashin-Shtrickman lower bound
"""
K_hsu = K1 + f2 * ( (K2-K1)**-1 + f1*(K1+4*M1/3)**-1 )**-1
K_hsl = K2 + f1 * ( (K1-K2)**-1 + f2*(K2+4*M2/3)**-1 )**-1
M_hsu = M1 + f2/( (M2-M1)**-1 + (2*f1*(K1+2*M1))/(5*M1*(K1+4*M1/3)) )
M_hsl = M2 + f1/( (M1-M2)**-1 + (2*f2*(K2+2*M2))/(5*M2*(K2+4*M2/3)) )
return K_hsu, K_hsl, M_hsu, M_hsl | 55e78b2b4c05b9a70f2cbae38e2b2f8f125d582f | 422,717 |
def strip(table, col):
"""Removes column col from the given table
Preconditions: table is a (non-ragged) 2d List,
col valid column"""
n_row = len(table)
n_col = len(table[0])
assert col < n_col, repr(col) + "要删除的列大于总列数!"
for row in range(n_row):
table[row] = table[row][:col]+table[row][col+1:]
# print(table)
return table | 09171295b7ed46d12188eb8c882a60f5fce80647 | 14,516 |
import torch
def h_function(x, epsilon=1.0):
"""h-function to normalize target Qs, described in the paper [1].
h(x) = sign(x) * [sqrt(abs(x) + 1) - 1] + epsilon * x
Used in [1] in combination with h_inverse:
targets = h(r + gamma * h_inverse(Q^))
"""
return torch.sign(x) * (torch.sqrt(torch.abs(x) + 1.0) - 1.0) + epsilon * x | 054595c550543a13ac1c8d47785181de3a903e8a | 227,779 |
from typing import List
from typing import Dict
def get_largest_num_objects(x: List[Dict]):
"""
Get the largest number of objects in batch
:param x: Batch data
:return:
(int)
"""
largest = 0
for sample, _ in x:
# (batch, num_objects, history_step, dimension)
objects = sample["others"]
num_objects = len(objects)
if num_objects > largest:
largest = num_objects
return largest | e3b332e0ce1a9ad47018b1fcff05f6e9eb3191d1 | 514,725 |
def validate_args(numargs, args):
"""
Check that there are enough args in the list, and truncate accordingly.
Raises ValueError if not.
"""
if len(args) < numargs:
raise ValueError("Not enough elements in list {}, need "
"{}.".format(args, numargs))
return args | aac854087361b849e11a17986930a8f61edfb54c | 332,242 |
def GetLiteral(parser, name):
"""Returns the literal with the given name, removing any single quotes if they exist"""
value = parser.literalNames[name]
if value.startswith("'"):
value = value[1:]
if value.endswith("'"):
value = value[:-1]
return value | 2299d22db9cfd4f2734ca3e8890c2748c288c7fe | 63,319 |
def get_word_from_tok_el(el):
"""
Get word from an element of tokens (instead morphs)
'4-N-AlEmlyp-3'
'1-V->DAf--1'
'4-PNX--RRB--2'
'6-PNX---0'
"""
return el.split('-', 2)[2].rsplit('-')[0] | 2a2ceb39193d87247ca18f660dcf0e4a3a050e17 | 353,712 |
import base64
def decode_password(base64_string: str) -> str:
"""
Decode a base64 encoded string.
Args:
base64_string: str
The base64 encoded string.
Returns:
str
The decoded string.
"""
base64_bytes = base64_string.encode("ascii")
sample_string_bytes = base64.b64decode(base64_bytes)
return sample_string_bytes.decode("ascii") | 0f04617c239fbc740a9b4c9c2d1ae867a52e0c74 | 707,015 |
import six
def is_start_piece(piece):
"""Check if the current word piece is the starting piece (sentence piece)."""
# special_pieces = set(list('!"#$%&\"()*+,-./:;?@[\\]^_`{|}~'))
# special_pieces.add(u"€".encode("utf-8"))
# special_pieces.add(u"£".encode("utf-8"))
# # Note(mingdachen):
# # For foreign characters, we always treat them as a whole piece.
# english_chars = set(list("abcdefghijklmnopqrstuvwxyz"))
# if (six.ensure_str(piece).startswith("▁") or
# six.ensure_str(piece).startswith("<") or piece in special_pieces or piece in ["[CLS]", "[SEP]", "[MASK]"] or
# not all([i.lower() in english_chars.union(special_pieces)
# for i in piece])):
# return True
# else:
# return False
if six.ensure_str(piece).startswith("▁") or piece == "[MASK]":
return True
else:
return False | 0ab1170127c0722b57f7413af0d4f051e9f85ee1 | 510,751 |
def transcribe(seq: str) -> str:
"""
transcribes DNA to RNA by generating
the complement sequence with T -> U replacement
"""
# transcription dictionary containing mapping of compliment bases.
nuc_dict = {
'A' : 'U',
'T' : 'A',
'G' : 'C',
'C' : 'G'
}
# initializing compliment sequence string
comp_seq = ''
# for each base in original sequence, find compliment and add it to output sequence
for nuc in seq:
comp_seq = comp_seq + nuc_dict[nuc]
return comp_seq | b60562128450afb587e0e6c1e6c19375adad1469 | 253,953 |
def create_compose(ctx):
"""
Creates a docker-compose file for this project.
"""
return ctx.obj['docker'].create_compose(
project=ctx.obj['project_name']
) | 9678e552a018d3aa54da156808fd8323f551a91d | 100,376 |
def get_tfp_model_obj(tfp_code):
"""Executes the generated python program, and returns an
un-instantiated model object
Parameters
----------
tfp_code : string
the program to execute
Returns
-------
tfd.Distribution
the model class
"""
exec_dict = {}
exec(tfp_code, exec_dict)
return exec_dict["model"] | 12bc10fcfe55e5a58beef146f2c4e5856fed7bcf | 381,749 |
def error_label(error):
"""Get the alert box/label coloring based on error found."""
levels = dict()
levels['wrong-import-position'] = 'info'
levels['fatal'] = 'danger'
levels['MC0001'] = 'warning' # McCabe's cyclomatic complexity warning.
if error not in levels:
return 'warning'
return levels[error] | e55ef153a491f4b9ab1c05b200d50c2faf8f0880 | 130,876 |
from typing import Optional
def get_scheduler_state(code: int) -> Optional[str]:
"""Translate an schduler state code to a human-readable state.
Official mapping is available
`here <https://github.com/BOINC/boinc/blob/master/py/Boinc/boinc_db.py>`.
Args:
code:
The code of scheduler state.
Returns:
The human-readable state related to the code.
"""
states = {
0: "UNINITIALIZED",
1: "PREEMPTED",
2: "SCHEDULED",
}
return states.get(int(code)) | 36ae5e82ddb2cee058101e286bd1e04eedcc8956 | 671,962 |
import json
def read_json(json_filename):
"""
Read JSON file and validate schema
:param str json_filename: json file name
:return: dict json_object: JSON object
:raise FileNotFoundError: if file can't be read
:raise JSONDecodeError: if file is not in json format
"""
try:
with open(json_filename, "r") as read_file:
try:
json_object = json.load(read_file)
except json.JSONDecodeError as jsone:
print(jsone)
raise
except FileNotFoundError as e:
print(e)
raise
return json_object | 1394802feac1a8ed9a72301b31ca779affb8fa86 | 490,198 |
def get_min_key_for_max_value(d):
"""
For a dictionary d, get the key for the largest value.
If largest value is attained several times, get the
smallest respective key.
"""
sorted_keys = sorted([key for key in d.keys()])
values = [d[key] for key in sorted_keys]
max_value = max(values)
for key, value in zip(sorted_keys, values):
if value == max_value:
return key | af5dfb5f9471e3b4757e129cc81eb0f89579c7fc | 671,634 |
import collections
def split_class_instances(classnames):
"""Split sounds from the same class into separate classes.
After splitting, we add a count number after instance number 1.
For example original class names of ['cat', 'dog', 'cat'] will be returned
as ['cat', 'dog', 'cat_2'].
Args:
classnames: list of classes.
Returns:
list of modified classnames.
"""
classname_count = collections.defaultdict(int)
new_classnames = []
for classname in classnames:
classname_count[classname] += 1
if classname_count[classname] > 1:
new_classnames.append(classname +
'_{}'.format(classname_count[classname]))
else:
new_classnames.append(classname)
return new_classnames | 05f8983d87800316e73075754f535a7f61a41558 | 530,488 |
def add_special_tags(word):
"""Returns the word including special tags to
distinguish between fasttext words and fasttext
ngrams.
"""
return '<' + word + '>' | f5674483e69067732e894699ceec23e46af13892 | 338,323 |
def get_government_factions(electoral_term):
"""Get the government factions for the given electoral_term"""
government_electoral_term = {
1: ["CDU/CSU", "FDP", "DP"],
2: ["CDU/CSU", "FDP", "DP"],
3: ["CDU/CSU", "DP"],
4: ["CDU/CSU", "FDP"],
5: ["CDU/CSU", "SPD"],
6: ["SPD", "FDP"],
7: ["SPD", "FDP"],
8: ["SPD", "FDP"],
9: ["SPD", "FDP"],
10: ["CDU/CSU", "FDP"],
11: ["CDU/CSU", "FDP"],
12: ["CDU/CSU", "FDP"],
13: ["CDU/CSU", "FDP"],
14: ["SPD", "BÜNDNIS 90/DIE GRÜNEN"],
15: ["SPD", "BÜNDNIS 90/DIE GRÜNEN"],
16: ["CDU/CSU", "SPD"],
17: ["CDU/CSU", "FDP"],
18: ["CDU/CSU", "SPD"],
19: ["CDU/CSU", "SPD"],
}
return government_electoral_term[electoral_term] | cfd502f81ebb0536432da4974ab9f25724dc83df | 11,475 |
import csv
def read_list( csvFile ):
"""Returns a list which has been stored as a csv file"""
with open( csvFile ) as csvFile:
reader = csv.reader( csvFile, quotechar='|' )
out = [ ]
for row in reader:
out += row
return out | 9fcb5b01496b39915e0169d0a19efc91a779aaf1 | 694,762 |
def get_rel_target_field(field):
"""Return the target field for a field's relation.
Args:
field (django.db.models.Field):
The relation field.
Returns:
django.db.models.Field:
The field on the other end of the relation.
"""
if hasattr(field, 'target_field'):
# Django >= 1.7
return field.target_field
else:
# Django < 1.7
return field.related_field | 619a65a56664e17398e31123bbcc9b21b29dca6d | 444,307 |
import json
import io
import csv
def csv_file_str_from_json(json_s, fieldnames):
"""return a csv file contents string from a json string that must be an array of objects"""
instances = json.loads(json_s)
if len(instances) == 0:
return None
out = io.StringIO()
writer = csv.DictWriter(out, fieldnames=fieldnames, extrasaction='ignore', dialect='editor')
writer.writeheader()
writer.writerows(instances)
return out.getvalue() | 1108adee65e8a45b15ffb16da7e8100225188686 | 501,277 |
def parameter_of_point_on_line(a, b, point):
"""
Get the parameter of a point on a line. For this function to give a
correct result it is important that the provided point already lies on the
line. The :func:`closest_point_on_line` can be used to get that point on the
line.
:param OpenMaya.MVector a:
:param OpenMaya.MVector b:
:param OpenMaya.MVector point:
:return: Parameter of the point on the line
:rtype: float
"""
# get vectors
ap = point - a
ab = b - a
# get parameter
parameter = ap.length() / ab.length()
return parameter | 4c8e130a8c5b791b021df088a7518038eee2da3a | 120,063 |
def two_sum(nums, target):
"""Returns a list of index of two numbers if the sum of them
is equal to target number, None otherwise"""
seen = {}
for index, num in enumerate(nums):
another = target - num
if another in seen:
return [seen[another], index]
else:
seen[num] = index
return "No such pair in the list" | b6bf32c646d8517d7e9bc23bfc118f2215cd42a5 | 420,093 |
import torch
def filter_scores_and_topk(scores, score_thr, topk, results=None):
"""Filter results using score threshold and topk candidates.
Args:
scores (Tensor): The scores, shape (num_bboxes, K).
score_thr (float): The score filter threshold.
topk (int): The number of topk candidates.
results (dict or list or Tensor, Optional): The results to
which the filtering rule is to be applied. The shape
of each item is (num_bboxes, N).
Returns:
tuple: Filtered results
- scores (Tensor): The scores after being filtered, \
shape (num_bboxes_filtered, ).
- labels (Tensor): The class labels, shape \
(num_bboxes_filtered, ).
- anchor_idxs (Tensor): The anchor indexes, shape \
(num_bboxes_filtered, ).
- filtered_results (dict or list or Tensor, Optional): \
The filtered results. The shape of each item is \
(num_bboxes_filtered, N).
"""
valid_mask = scores > score_thr
scores = scores[valid_mask]
valid_idxs = torch.nonzero(valid_mask)
num_topk = min(topk, valid_idxs.size(0))
# torch.sort is actually faster than .topk (at least on GPUs)
scores, idxs = scores.sort(descending=True)
scores = scores[:num_topk]
topk_idxs = valid_idxs[idxs[:num_topk]]
keep_idxs, labels = topk_idxs.unbind(dim=1)
filtered_results = None
if results is not None:
if isinstance(results, dict):
filtered_results = {k: v[keep_idxs] for k, v in results.items()}
elif isinstance(results, list):
filtered_results = [result[keep_idxs] for result in results]
elif isinstance(results, torch.Tensor):
filtered_results = results[keep_idxs]
else:
raise NotImplementedError(f'Only supports dict or list or Tensor, '
f'but get {type(results)}.')
return scores, labels, keep_idxs, filtered_results | 473467be00d702e7674bf0dc96874e40a1bb0f5e | 591,554 |
def calc_prob_class_given_sensitive(predicted, sensitive, predicted_goal, sensitive_goal):
"""
Returns P(predicted = predicted_goal | sensitive = sensitive_goal). Assumes that predicted
and sensitive have the same length. If there are no attributes matching the given
sensitive_goal, this will error.
"""
match_count = 0.0
total = 0.0
for sens, pred in zip(sensitive, predicted):
if str(sens) == str(sensitive_goal):
total += 1
if str(pred) == str(predicted_goal):
match_count += 1
return match_count / total | 65f7699eac7834353d0f0e70abf476858d8ecec9 | 258,707 |
def is_prime(n):
"""Returns if the given number is a prime number or not
>>> is_prime(10)
False
>>> is_prime(7)
True
"""
"""BEGIN PROBLEM 1.4"""
if n in [2, 3]: # shortcut for if n equals 2 or 3
return True
if n % 2 == 0 or n < 2: # skip even numbers, since they are not prime anyways
return False
# only iterate the odd numbers from 3 to the nearest whole number of sqrt(n), skipping even numbers
for i in range(3, round(n ** 0.5), 2):
if n % i == 0: # if this conditional is true, n isn't prime
return False
return True
"""END PROBLEM 1.4""" | b0f54c17621dd02e1494cb90685cd2cde5651961 | 75,192 |
from typing import List
def get_subs(subs_file='subreddits.txt', blacklist_file='blacklist.txt') -> List[str]:
"""
Get subs based on a file of subreddits and a file of blacklisted subreddits.
:param subs_file: List of subreddits. Each sub in a new line.
:param blacklist_file: List of blacklisted subreddits. Each sub in a new line.
:return: List of subreddits filtered with the blacklisted subs.
**Example files**::
sub0
sub1
sub2
...
"""
# Get subs and blacklisted subs
subsf = open(subs_file)
blacklf = open(blacklist_file)
subs = [b.lower().replace('\n','') for b in subsf.readlines()]
blacklisted = [b.lower().replace('\n','') for b in blacklf.readlines()]
subsf.close()
blacklf.close()
# Filter blacklisted
subs_filtered = list(sorted(set(subs).difference(set(blacklisted))))
return subs_filtered | 070d60f037b883ce6584c5a65c1c1d3facab9bdc | 507,519 |
def NormalizeGoogleStorageUri(uri):
"""Converts gs:// to http:// if uri begins with gs:// else returns uri."""
if uri and uri.startswith('gs://'):
return 'http://storage.googleapis.com/' + uri[len('gs://'):]
else:
return uri | ea009d64e333b6bd779131e0faa55457499f3d80 | 461,368 |
def seq_mult_scalar(a, s):
"""Returns a list of the products of s with the values in list a."""
prod_as = []
for n in a:
prod_as.append(n*s)
return prod_as | c6c20fc416a49d90eb840cb9090c702065297b5f | 190,181 |
import calendar
def getNumberOfDaysInMonth(month):
"""
Return the maximum number of days in a given month, 1 being January, etc.
For February always 29 will be given, even it is not a leap year.
"""
# use year 2000 which is a leap year
return calendar.monthrange(2000, month)[1] | 368a39b27556ebec9c83ca344f12b5d7ee7a6f60 | 474,233 |
def extension_duplicates(regexp_pair_list):
""" Return a new list with the pair items of `regexp_pair_list`
have all the '.nii' replaced by '.nii.gz'.
This is useful for the Datasink regexp_substitutions when
you don't know/care what extension the output image will have,
then you put all of them like:
"(r"/rc1[\w]+_corrected\.nii$", "/coreg_gm.nii")"
and then call this function to add the duplicates with modified
extension of these same pairs.
Parameters
----------
regexp_pair_list: list of 2-tuple of str
Returns
-------
mod_regexp_pair_list: list of 2-tuple of str
"""
def replace_ext(filepath):
return filepath.replace('.nii', '.nii.gz')
dups = [(replace_ext(pair[0]), replace_ext(pair[1]))
for pair in regexp_pair_list
if '.nii$' in pair[0]]
return dups | 3df8ebfd4bf411a0c4e4010a15428ed8e3046407 | 430,852 |
from typing import List
def get_setup_fixtures() -> List:
"""
This is a global list that contains the functions that should be executed
before fuzz-lightyear begins executing tests.
:rtype: list(function)
"""
return [] | 86c37dc03731d640e28cfc33c6475f8f490b2979 | 157,838 |
from typing import Union
from typing import Dict
from typing import List
def build_credentials_fetch(credentials: Union[Dict, List]) -> Union[Dict, List]:
"""Formats the API response to Demisto context.
Args:
credentials: The raw response from the API call. Can be a List or Dict.
Returns:
The formatted Dict or List.
Examples:
>>> build_credentials_fetch([{'username': 'user1', 'name': 'name1', 'password': 'password'}])
[{'user': 'user1', 'name': 'name1', 'password': 'password'}]
"""
if isinstance(credentials, list):
return [build_credentials_fetch(credential) for credential in credentials]
return {
'user': credentials.get('username'),
'name': credentials.get('name'),
'password': credentials.get('password')
} | 30d3889d62d949a3bf9700d2b08a1bdbc3b1e73b | 552,002 |
def sanitize_column_list(input_column_list):
"""Remove empty elements (Nones, '') from input columns list"""
sanitized_column_list = [input for input in input_column_list if input]
return sanitized_column_list | 77b635cd5589f46b9381803893b2c8d36cdcdcae | 652,921 |
def interpolate(col1, col2, ni):
"""
Linear interpolation between two adjacent colors in color palette.
"""
f = ni % 1 # fractional part of ni
r = (col2[0] - col1[0]) * f + col1[0]
g = (col2[1] - col1[1]) * f + col1[1]
b = (col2[2] - col1[2]) * f + col1[2]
return [r, g, b] | d8322984aaa16f03eff19bfa78ffbdb55346cc03 | 468,167 |
def extract_year(datestring):
"""
Return year part of date string as integer.
"""
return int(datestring[6:10]) | 9ea9d2eb8472a976e6ba185f43672859ee532920 | 230,801 |
def all_subsets(s, target):
"""Return all subsets of `s` that sum `target`.
TC: O(2^N), where N is number of elements in `s`."""
if not s:
return {}
solutions = set()
def _all_subsets(current_subset, index, current_sum):
if current_sum == target:
solutions.add(current_subset)
if index == len(s):
return
_all_subsets(current_subset, index + 1, current_sum)
_all_subsets(current_subset + (s[index],), index + 1, current_sum + s[index])
_all_subsets((), 0, 0)
return solutions | b8b23766151b051fac794c6cd6517ac9f7968da0 | 210,688 |
from datetime import datetime
def timestamp_seconds() -> str:
"""
Return a timestamp in 15-char string format: {YYYYMMDD}'T'{HHMMSS}
"""
now = str(datetime.now().isoformat(sep="T", timespec="seconds"))
ts: str = ""
for i in now:
if i not in (" ", "-", ":"):
ts += i
return ts | d4c03925949795e6f9993cc4a79cb088619e0527 | 696,439 |
def human_time(time_s):
"""
Converts a time in seconds to a string using days, hours, minutes and seconds.
"""
time_s = int(time_s) # Ensure int
out = []
days = time_s // 86400
if days == 1:
out.append("%i day" % days)
time_s -= days * 86400
elif days >= 1:
out.append("%i days" % days)
time_s -= days * 86400
hours = time_s // 3600
if hours >= 1:
out.append("%i hr" % hours)
time_s -= hours * 3600
minutes = time_s // 60
if minutes >= 1:
out.append("%i min" % minutes)
time_s -= minutes * 60
if time_s >= 1:
out.append("%i sec" % time_s)
return " ".join(out) | e9b508d4e5d0d9a07cc643537edf3391408d0c5b | 75,456 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.