content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import torch
import math
def attack(grad_honests, f_real, **kwargs):
""" Generate non-finite gradients.
Args:
grad_honests Non-empty list of honest gradients
f_real Number of Byzantine gradients to generate
... Ignored keyword-arguments
Returns:
Generated Byzantine gradients
"""
# Fast path
if f_real == 0:
return list()
# Generate the non-finite Byzantine gradient
byz_grad = torch.empty_like(grad_honests[0])
byz_grad.copy_(torch.tensor((math.nan,), dtype=byz_grad.dtype))
# Return this Byzantine gradient 'f_real' times
return [byz_grad] * f_real
|
af45b1e463beab738d293a249f1eea643bc253ff
| 431,196 |
def _ljust(input, width, fillchar=None):
"""Either ljust on a string or a list of string. Extend with fillchar."""
if fillchar is None:
fillchar = ' '
if isinstance(input, str):
return input.ljust(width, fillchar)
else:
delta_len = width - len(input)
if delta_len <= 0:
return input
else:
return input + [fillchar for _ in range(delta_len)]
|
49af82ed78f1b0a7e75cc135e911d3d0c733ce1e
| 447,411 |
from typing import List
import re
def getLinks(text: str) -> List[str]:
""" Get a list of all URLs starting with https? in the given text """
return [x[0] for x in
re.findall(r"((http|https):\/\/[\S-]+(\.[\S-]+)*(\/[\S]+)*(\.[\S-]*)?(\?[\S-]+=[\S-]+(\&[\S-]+=[\S-]+)*)?)",
text,
re.IGNORECASE)]
|
e01c2dd776239d57d2f4e000d5ddab95b87345c1
| 143,400 |
def _alternating_sequence(token1, token2, length):
"""Make alternating sequence of token1 and token2 with specified length."""
return [(token2 if i % 2 else token1) for i in range(length)]
|
7bbdd408014d7f61b05c655a29184ef33bb8a01d
| 553,678 |
import math
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
"""Create and return sliding window lattice as list of sliding windows
Args:
img (numpy.array): image to search
x_start_stop (list): horizontal limits, if [None, None] then [0, image width] will be used
y_start_stop (list): vertical limits, if [None, None] then [0, image height] will be used
xy_window (tuple): sliding window size, default is (64, 64)
xy_overlap (tuple): sliding window overlap factor, default is (0.5, 0.5)
Returns:
list of windows
"""
# If x and/or y start/stop positions not defined, set to image size
imgsizey = img.shape [0]
imgsizex = img.shape [1]
x_start_stop [0] = 0 if x_start_stop [0] is None else x_start_stop [0]
x_start_stop [1] = imgsizex if x_start_stop [1] is None else x_start_stop [1]
y_start_stop [0] = 0 if y_start_stop [0] is None else y_start_stop [0]
y_start_stop [1] = imgsizey if y_start_stop [1] is None else y_start_stop [1]
# Compute the span of the region to be searched
sizex = x_start_stop [1] - x_start_stop [0]
sizey = y_start_stop [1] - y_start_stop [0]
# Compute the number of pixels per step in x/y
stepx = int (xy_window [0] * xy_overlap [0])
stepy = int (xy_window [1] * xy_overlap [1])
# Compute the number of windows in x/y
step_count_x = int (math.floor(1.0 * sizex / stepx)) - 1
step_count_y = int (math.floor(1.0 * sizey / stepy)) - 1
# Initialize a list to append window positions to
window_list = []
for i in range (step_count_y):
for j in range (step_count_x):
# Calculate each window position
# Append window position to list
window_list.append ((
(x_start_stop [0] + j*stepx, y_start_stop [0] + i*stepy),
(x_start_stop [0] + j*stepx + xy_window [0], y_start_stop [0] + i*stepy + xy_window [1])
))
# Return the list of windows
return window_list
|
aa248aa96a4832eb2aeb404cfaf2c2bcf7d0dc8e
| 149,253 |
def _do_decoding(s, encoding):
"""A function to decode a bytes string, or return the object as-is."""
try:
return s.decode(encoding)
except UnicodeError:
raise
except (AttributeError, TypeError):
return s
|
4bda03779164567078eac6bdfcb750a4cf689205
| 668,916 |
def rotate_counter_clockwise(shape):
"""Given a shape, rotates it counter clockwise"""
return [
[shape[y][x] for y in range(len(shape))]
for x in range(len(shape[0]) - 1, -1, -1)
]
|
3991bfed91b2876b3b04520cd573d2c737a435cd
| 623,090 |
def detxy2kxy(xdet, ydet, xstart, ystart, x0, y0, fx, fy, xstep, ystep):
"""
Conversion from detector coordinates (xd, yd) to momentum coordinates (kx, ky).
**Parameters**\n
xdet, ydet: numeric, numeric
Pixel coordinates in the detector coordinate system.
xstart, ystart: numeric, numeric
The starting pixel number in the detector coordinate system
along the x and y axes used in the binning.
x0, y0: numeric, numeric
The center pixel position in binned image used in calibration.
fx, fy: numeric, numeric
Scaling factor along the x and y axes (in binned image).
xstep, ystep: numeric, numeric
Binning step size along x and y directions.
"""
xdet0 = xstart + xstep * x0
ydet0 = ystart + ystep * y0
kx = fx * ((xdet - xdet0) / xstep)
ky = fy * ((ydet - ydet0) / ystep)
return (kx, ky)
|
44d353a5c5b5cabeb5a4b9aba8b4a07fc6a3ac2c
| 33,254 |
def namify(idx):
"""
Helper function that pads a given file number and return it as per the dataset image name format.
"""
len_data = 6 #Ilsvr images are in the form of 000000.JPEG
len_ = len(str(idx))
need = len_data - len_
assert len_data >= len_, "Error! Image idx being fetched is incorrect. Invalid value."
pad = '0'*need
return pad+str(idx)
|
069ff7a297f944e9e0e51e5e100276a54fa51618
| 707,508 |
def compile(self):
"""Compile PrintNode."""
return "\tsay %s" % self.children[0].tok + "\n\n"
|
305a8f71cff38c0ee507d88be74fbf72895b64f1
| 485,029 |
def __is_utf8(rule_string):
"""
Takes the string of the rule and parses it to check if there are only utf-8 characters present.
:param rule_string: the string representation of the yara rule
:return: true if there are only utf-8 characters in the string
"""
try:
rule_string.encode('utf-8')
except UnicodeEncodeError:
return False
else:
return True
|
90d9842334b0e989d152577c840807380a0e4a31
| 108,602 |
def substitute(sequence, offset, ref, alt):
"""Mutate a sequence by substituting given `alt` at instead of `ref` at the
given `position`.
Parameters
----------
sequence : sequence
String of amino acids or DNA bases
offset : int
Base 0 offset from start of `sequence`
ref : sequence or str
What do we expect to find at the position?
alt : sequence or str
Alternate sequence to insert
"""
n_ref = len(ref)
sequence_ref = sequence[offset:offset + n_ref]
assert str(sequence_ref) == str(ref), \
"Reference %s at offset %d != expected reference %s" % \
(sequence_ref, offset, ref)
prefix = sequence[:offset]
suffix = sequence[offset + n_ref:]
return prefix + alt + suffix
|
5834da04d1c3565a8500336dab7edcacc1f2e462
| 84,623 |
import six
def longest_common_prefix(str1, str2):
"""Returns the longest common prefix length of two strings."""
limit = min(len(str1), len(str2))
for i in six.moves.range(limit):
if str1[i] != str2[i]:
return i
return limit
|
5748102df6fba2053c0ef5c4de429eaa46b21d91
| 62,404 |
def gradient_colors(nb_colors, color_start=None, color_end=None):
"""Produce a color gradient."""
if color_start is None:
color_start = [1, 0, 0]
if color_end is None:
color_end = [0, 0, 1]
# start at black, finish at white
gradient = [color_start]
# If only one color, return black
if nb_colors == 1:
return gradient
# Calcuate a color at each evenly spaced value
# of t = i / n from i in 0 to 1
for t in range(1, nb_colors):
gradient.append(
[
color_start[j]
+ (float(t) / (nb_colors - 1)) * (color_end[j] - color_start[j])
for j in range(3)
]
)
return gradient
|
5173a34e53a8a22c8648177994337824b4070cea
| 165,736 |
def _next_legen_der(n, x, p0, p01, p0d, p0dd):
"""Compute the next Legendre polynomial and its derivatives."""
# only good for n > 1 !
old_p0 = p0
old_p0d = p0d
p0 = ((2 * n - 1) * x * old_p0 - (n - 1) * p01) / n
p0d = n * old_p0 + x * old_p0d
p0dd = (n + 1) * old_p0d + x * p0dd
return p0, p0d, p0dd
|
13c0afaaca1f7e2c24bc38c45c1649eb8183b6f0
| 423,208 |
def get_client_ip(environ):
"""
Naively yank the first IP address in an X-Forwarded-For header
and assume this is correct.
Note: Don't use this in security sensitive situations since this
value may be forged from a client.
"""
try:
return environ["HTTP_X_FORWARDED_FOR"].split(",")[0].strip()
except (KeyError, IndexError):
return environ.get("REMOTE_ADDR")
|
92b6e45b37b2dd52e326a9e85589211745fd5c2d
| 583,951 |
def end_ord(l) :
"""Recherche l'indice du premier élément non trié.
Cette fonction recherche le premier élément d'une liste d'entiers
qui n'est pas dans l'ordre numérique croissant et retourne son indice. Si la
valeur retounée est égale à zéro alors les deux premiers éléments
ne sont pas dans le bonne ordre, la suite des éléments étant possiblement
triés. Si la valeur retournée est égale à la longueur de la liste,
alors cele-ci est triée.
Parameters
----------
l : list
`l` est une liste d'entiers.
Returns
-------
int
`int` est l'indice du premier élément mal positionné de `l`.
See Also
--------
permutation : Recherche itérative de toutes les inversions permettant d'améliorer le score.
nb_inversion : Recherche itérative du nombre minimal d'inversion.
Examples
--------
>>> L1 = [3,4,1,2]
>>> end_ord(L1)
2
>>> L2 = [1,2,3,4]
>>> end_ord(L2)
4
>>> L3 = [2,4,1,3]
>>> end_ord(L3)
0
"""
assert(type(l[0])== int)
ind=0
cond = True
while cond :
if ind < len(l) -1 :
if l[ind+1]==l[ind]+1 : # Si l'élément courrant +1 est équivalent au suivant.
ind+=1
cond = True
else :
cond = False
else : # La liste est entièrement triée.
cond = False
if ind ==0 :
return 0
else :
return ind+1
|
a129fc1148b09b148b67d6eeb46a23388d7b1347
| 231,156 |
def get_instance(compute, resource_group, instance_name):
"""Get Azure instance information
:param compute: Azure object using ComputeManagementClient
:param resource_group: string, name of Azure resource group
:param instance_name: string, name of Azure instance
"""
return compute.virtual_machines.get(
resource_group, instance_name, expand='instanceView')
|
045835391fe7febb31d131e4a40ffc823774c19a
| 531,240 |
def int_or_float(x):
"""Convert `x` to either `int` or `float`, preferring `int`.
Raises:
ValueError : If `x` is not convertible to either `int` or `float`
"""
try:
return int(x)
except ValueError:
return float(x)
|
d0a4def320f88655e494f89b7239e47e1ee70d0d
| 705,606 |
import re
def sysinfo(info):
"""
Parses information returned from drac getsysinfo command
and returns it as a dictionary
"""
out={}
parsers = [
r'(Firmware Version\s+=\s(?P<firmware>[.\d]+))',
r'(System BIOS Version\s+=\s(?P<bios>[.\d]+))',
r'(System Model\s+=\s(?P<model>[ \w]+))',
r'(Service Tag\s+=\s(?P<hw_tag>\w+))',
r'(MAC Address\s+=\s(?P<drac>[a-f:0-9]+))',
r'(?P<hwname>NIC\d+)\sEthernet\s+=\s(?P<hwaddr>[a-f:0-9]+)',
]
for line in info.split('\n'):
for pattern in parsers:
match = re.search(pattern, line)
if match:
if 'hwname' in match.groupdict().keys():
num = match.group('hwname').replace('NIC','')
out['eth%d' % (int(num)-1)] = match.group('hwaddr')
else:
for key in match.groupdict().keys():
out[key] = match.group(key)
if key == 'model' and re.match('PowerEdge', match.group(key)):
out['manufacturer'] = 'Dell'
return out
|
ecf7428ab92f7720b661045fbc5221da4b351fb2
| 223,980 |
def pressure_pseudocritical(ro_st):
"""
Natural gas pseudocritical pressure (CH4 > 85%), MPa
:param ro_st: (float) Natural gas density at standard parameters, kg/m3
:return: (float) Pseudocritical pressure of the natural gas, MPa
"""
return 0.1737 * (26.831 - ro_st)
|
8a1c8678936274443a675616d5d42b3dc4097b28
| 237,128 |
def mul_x_2(n):
"""
This function return the n number multiplied by 2
The n number is the number that will be rmultiplied
This function return the value after the operation
Example: mult-x_2(2) -> 4
"""
return n*2
|
912d5be8f4c11a139c7fb00f0f7ce43f39d9cddb
| 370,700 |
import yaml
from typing import OrderedDict
def _ordered_dump(data, stream=None, dumper_klass=yaml.Dumper, **kwargs):
"""Helper function for ordered dumping of structs as YAML"""
class OrderedDumper(dumper_klass):
"""Helper class"""
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()
)
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwargs)
|
a69f3fb97eeade342c8a82dcac97f11b465a94b8
| 325,812 |
def remove_prefix(path_prefix: str, path: str) -> str:
"""Removes a prefix and everything that comes before it from a path.
>>> remove_prefix("and", "here and there")
' there'
>>> remove_prefix("missing", "some other string")
'some other string'
"""
before, _, after = path.partition(path_prefix)
return after or before
|
926ee169bf2a7649344a7cf872a9fbc596d44e8e
| 375,314 |
from typing import MutableMapping
def flatten_dict(unflattend_dict, parent_key='', sep='.', keep_unflattend=False):
"""Flattens an nested dictionary to a one-level dictionary.
Parameters
----------
unflattend_dict: dict or MutableMapping
A dictionary with nested dictionaries
parent_key: str
A key to which the flattened keys are added. Mainly present to support flatting of subdicts, should be '' in
most primary calls.. Defaults to ''.
sep: str
The separator used for the flat items. Defaults to '.'
keep_unflattend: bool
Whether the keys, value pairs of nested dicts or MutableMappings should remain present in the output or not.
Defaults to False
Returns
-------
dict
A dictionary with one (flattened) key for a value.
"""
items = []
for key, value in unflattend_dict.items():
new_key = parent_key + sep + key if parent_key else key
if isinstance(value, MutableMapping):
if keep_unflattend:
items.append((new_key, value))
items.extend(flatten_dict(value, new_key, sep=sep).items())
else:
items.append((new_key, value))
return dict(items)
|
0dac305e3d718740007f8db117f88e55826f67db
| 496,672 |
def get_data_struct_name(sub):
###############################################################################
"""
>>> get_data_struct_name("my_sub_name")
'MySubNameData'
>>> get_data_struct_name("sub")
'SubData'
"""
return "".join([item.capitalize() for item in sub.split("_")]) + "Data"
|
66d081d822bd378fed0b54300fc7f78285a3c5eb
| 463,655 |
def binary_search_iterative(arr, val, start, end):
"""searches arr for val.
Parameters:
arr (array): array to search.
val (type used in array): value to search for in arr.
Returns:
(int) : index of val in arr if val is found.
Otherwise returns -1 if val cannot be found in arr.
"""
#while the size of the search space is greater than 0
while start <= end:
mid = ((end - start) // 2) + start
if arr[mid] == val:
#we found what we want. Yay!
return mid
elif arr[mid] > val:
#search bottom half of search space
end = mid - 1
elif arr[mid] < val:
#search top half of search space
start = mid + 1
#couldn't find the value
return -1
|
9814156514b8c11f3ed75490caad0ee15a762f06
| 490,387 |
def _grid(x):
"""Access the underlying ndarray of a Grid object or return the object itself"""
try:
return x.grid
except AttributeError:
return x
|
ce57badeb8dd2fbb3571df7291dac781008ba9d4
| 135,185 |
def groups(user):
"""
Returns all groups the user is a member of.
:param user: the logged-in user
:return: Array of groups
"""
try:
my_groups = user.groups.all()
return my_groups
except Exception as e:
print(e)
|
5a6978c69cb4e8c9a5a60961d598d6ce8f22aff7
| 567,412 |
def calculate_dates_and_durations(start=None, end=None, duration=None):
"""
Calculates Start Date, End Date or Duration from the other two known's
Rule is:
If all Given Data is not none, then calculate end date from start and duration,
if two are known, then calculate the third from them
if less than two are known, then do nothing
"""
if start is not None \
and duration is not None:
end = start + duration
if start is not None \
and end is not None:
duration = end - start
if end is not None \
and duration is not None:
start = end - duration
return start, end, duration
|
0cf890fea5b84ed13c5680c6b5fdea5f4107fa34
| 593,007 |
from typing import Dict
from typing import Counter
def top(data, n: int = 5) -> Dict:
"""
Get a dictionary of top-n items from a list.
Args:
data: Python collection
n: Number of top-values
Returns:
Dictionary of top-n items and count
"""
return dict(Counter(data).most_common(n))
|
848201d0a3c367ebf7b9fe4f9cd92f9c88bef65d
| 390,833 |
def _mpl_cmdata_to_bokeh_palette(cm_data):
"""Given the data from a Matplotlib colormap as a list of three-item lists in the range 0,1.0,
convert colors into the range 0,256 and return as a list of tuples"""
return [(int(r*256),int(g*256),int(b*256)) for r,g,b in cm_data]
|
931227e35279f1e52954f12827f72411b69ce0bc
| 162,452 |
import struct
def getTransactionId(packet):
"""Pulls out the transaction id of the packet"""
if isinstance(packet, list):
return struct.unpack(">H", struct.pack("BB", *packet[:2]) )[0]
else:
return struct.unpack(">H", packet[:2])[0]
|
2e99a39e17315dcef77ed3e58d9c275d034137a9
| 551,805 |
def health_status() -> tuple[dict[str, str], int]:
"""Ping-able server endpoint."""
return {"status": "ok"}, 200
|
533d15d6231c743750b453081791fa2dc4c499c7
| 597,851 |
def total_seconds(val):
"""
This is an alternative to the total_seconds method for python2.6.
:type val: datetime.timedelta
:param val: Instance of timedelta
"""
return (
(val.days * 86400 + val.seconds) * 10**6 + val.microseconds
) * 0.1**6
|
9110f2fd3c5e7e4e63ff51f2f4056164f0f1211f
| 271,114 |
import socket
def is_inet_4_or_6(gai):
"""Given a getaddrinfo struct, return True iff ipv4 or ipv6"""
return gai[0] in (socket.AF_INET, socket.AF_INET6)
|
eb8b315290a0a5b3b46659cfc9fad934f48de360
| 359,715 |
def clear_rightmost_set_bit(n):
"""Clear rightmost set bit of n and return it."""
return n & (n-1)
|
250e9bd42ec23d24443084fe7f603fdd7271692b
| 77,750 |
def SignalStrength(value):
"""
Returns a string indicating the signal strength of the WiFi signal
in human-readable terms.
Based on the table from:
https://www.metageek.com/training/resources/understanding-rssi.html
:param value: WiFI signal strength RSSI value
:return: String represent signal strength
"""
if value > -30:
return 'Excellent'
if value > -67:
return 'Very Good'
if value > -70:
return 'Good'
if value > -80:
return 'Poor'
if value > -90:
return 'Weak'
return 'N/A'
|
4a2915b907136f40fa2b0fde6116dea9d16d4357
| 457,178 |
def count_letters(word,find):
"""
Example function with types documented in the docstring.
Ce code doit retourner le nombre d'occurences d'un caractère passé en paramètre dans un mot donné également
Parameters
----------
param1 : str
Le 1er paramètre est une chaine de caractères
param2 : char
Le 2ème paramètre est un caractère
Returns
-------
int
Nombre d'occurences du caractère
Exemples
--------
>>> count_letters(abracadabra,a)
5
>>> count_letters(momomotus,u)
1
"""
count=0
for i in range(len(word)):
if word.find(find,i)!=0:
count+=1
return count
|
327e6b4fd99d03b27473b9620d6147909d0cf6e4
| 16,357 |
def scapy_layers_dot11_Dot11_essid(self):
"""Return the payload of the SSID Dot11Elt if it exists"""
elt = self.find_elt_by_id(0)
return elt.info if elt else None
|
c1634d01def034df8bbbff8cf02c207d279c8baa
| 70,255 |
def Location(child):
""" Return x,y offset of Region
See comment in Region.Child()
Args:
child: '0','1','2','3'
Returns:
(x,y): lower-left origin
"""
childnum = int(child)
x = childnum & 1 # mask the y bit
y = childnum >> 1 # shift over the y bit
y = not y
return (x,y)
|
ad27314cf34ecc1515507de129507ca14bcf7732
| 115,903 |
import string
def _start_alphabet(size_alphabet):
"""
Create an alphabet of a given size
:param size_alphabet
:type size_alphabet: int
:return: an alphabet of a given size
:rtype: list
"""
return list(string.ascii_lowercase)[0:size_alphabet]
|
68c0275fb6108d1109859d6134ffd734224eb7a0
| 227,218 |
def base36encode(num):
"""Converts a positive integer into a base36 string."""
if not isinstance(num, int):
raise TypeError("Positive integer must be provided for base36encode. " + repr(num) + " provided instead.")
if not num >= 0:
raise ValueError('Negative integers are not permitted for base36encode.')
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
res = ''
while not res or num > 0:
num, i = divmod(num, 36)
res = digits[i] + res
return res
|
b60e2aa5daaaadf1f8da98afa0a33686fbc20909
| 150,504 |
def next_enum_variation(enums, enum_indices):
"""Loop through indices from [0, 0, ...] to [L0-1, L1-1, ...]
where Li is len(enums[i]). The list can be thought of as a number with many
digits, where each digit is in [0, Li), and this function effectively implements
the increment operation, with the least-significant digit being the first item."""
for i in range(len(enums)):
current = enum_indices[i]
# if current digit has room, increment it.
if current + 1 < len(enums[i][1]):
enum_indices[i] = current + 1
return True
# otherwise reset it to 0 and carry to the next digit.
enum_indices[i] = 0
# if this is reached, the number has overflowed and the loop is finished.
return False
|
ab0893087eee7d64742902e5aa1000fa1247cc49
| 231,493 |
def load_balance_list(L, n):
"""Given a list of arbitrary items, split it to n roughly equal-sized parts.
This is useful for dividing a list of work items in MPI parallelization.
It is assumed that each work item takes the same amount of time; hence the
initial distribution is generated by naive integer division.
If len(L) does not divide evenly with n, the remaining items are distributed
on an item-by-item basis to the first (len(L) mod n) parts.
If n > len(L), the items will be distributed on an item-by-item basis to the
first len(L) parts, and the rest of the parts will get an empty list.
Examples:
load_balance_list(range(14), 2)
=> [[0, 1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12, 13]]
In this case, the division is even (no remainder).
load_balance_list(range(15), 2)
=> [[0, 1, 2, 3, 4, 5, 6, 14], [7, 8, 9, 10, 11, 12, 13]]
In this case, the item "14" is left over after the integer division.
The leftover item is placed in the first part.
load_balance_list(range(4), 8)
=> [[0], [1], [2], [3], [], [], [], []]
In this case, n is so large that there are not enough items to place
even one item in each part. The empty list is generated for those parts
for which no item is available.
Parameters:
L = any Python list.
Return value:
List of lists: L_out = [L1, L2, L3, ..., Ln]
where L1, L2, ... are sublists of L.
It always holds that len(L_out) == n. Note that the lengths of the
Lj (individual lists inside L_out) may differ by one item, depending on
whether the division was even.
If n == 1, the return value is [L] for compatibility
of the output format with the n > 1 case.
"""
if n < 1:
raise ValueError("n must be >= 1; got %d" % (n))
# If we're splitting L to one part, this is a no-op.
# But wrap it - the caller is expecting a list of lists.
#
if n == 1:
return [L]
nitems = len(L) # number of items to be distributed
out = []
if n <= nitems:
# Items per process.
blocklen = nitems // n # integer division!
# Leftover items.
remainder = nitems - blocklen*n # this is always < n
# Distribute the items that divided evenly.
for m in range(n):
offs = m*blocklen
out.append( L[offs:offs+blocklen] )
# Distribute the leftovers (if any).
if remainder > 0:
offs = nitems - remainder
for m in range(remainder):
out[m].append( L[offs] )
offs += 1
else:
# Distribute one item to each part as long as items are available.
for m in range(nitems):
out.append( [L[m]] ) # wrap the item to make a single-item list.
# Give an empty list to the rest of the parts.
nempties = n - nitems # this many empty lists are needed
for m in range(nempties):
out.append( [] )
assert( len(out) == n )
return out
|
f5d8c5927df0dfe3766137c0ec315151ddf25fbf
| 508,877 |
from typing import Any
from typing import Type
def _optimizerArgparse_encoder(arg: Any) -> Type:
"""
Transforms arguments passed to `optimizer_argparse.__call__`
at runtime to construct the key used for method lookup as
`tuple(map(arg_transform, args))`.
This custom arg_transform allow type variables to be passed
at runtime.
"""
# Allow type variables to be passed as arguments at runtime
return arg if isinstance(arg, type) else type(arg)
|
b0bb451e731feb25ed67995c4d98b8a19abd4806
| 159,867 |
def chi2_expval(nu):
"""
Expected value of Chi-squared distribution.
"""
return nu
|
54ef8557d45ed2359f91934ffbadd54b83f99d0b
| 375,483 |
def lm2idx(l, m):
""" Spherical Harmonics (l, m) to linear index
:param l: 0-based
:param m:
:return: 0-based index
"""
return l ** 2 + (l + m)
|
44c81ac7426939942232c2e2407c0f3a1627fa64
| 185,441 |
def cer(e):
"""
Canonicalize the representation of an undirected edge.
Works by returning a sorted tuple.
"""
return tuple(sorted(e))
|
799a7765c2b77d50828c5b3a365713305a84531e
| 633,751 |
def is_type(string):
"""Returns whether the given string is a type or not."""
return (string.startswith("[") and string.endswith("]")
and len(string) > 2)
|
22904b61e3a5c6af736be92be18cfb112969a6ed
| 177,516 |
def recall(cm):
"""
recall or sensitivity or true positive rate (TPR), hit rate
TPR = TP / P = TP / (TP+FN)
"""
return cm[1][1] / float(cm[1][0] + cm[1][1])
|
1594222c17864bcd773981cc87a1971bfca6539f
| 260,994 |
def inspect_data(df, n=5):
"""
Get first 'n' rows from dataframe, types of each feature,
and sum of na values for each feature
Parameters
- df: (pandas dataframe) dataframe
- n (default n=5): (int) num of rows to inspect
Return
- df_sample, df_types, df_na: (tuple)
df_sample: (pandas dataframe) dataframe with first n rows
df_types: (pandas Series) series with features and respective types
df_na: (pandas Series) series with number of na values in each feature
"""
# Getting first n rows of dataframe
df_sample = df.head(n)
# Getting type of each feature
df_types = df.dtypes
# Getting na values
df_na = df.isna().sum()
return df_sample, df_types, df_na
|
37b4972e3f88ae858199a6a8a5d0aae474203a6c
| 595,448 |
def query(entries, keyWord):
"""
Filters out entries that match the given substring in the DESCRIPTION
column.
:param entries: A pandas Dataframe containing transaction entries.
:param keyWord: The substring that is searched in the description column.
:return: A pandas Dataframe containing matched transaction entries only.
"""
return entries[entries["DESCRIPTION"].str.contains(keyWord, na=False)]
|
f44dffaa88280eeb41d6033e22f770f2fb82ca8d
| 293,924 |
def get_depth(string):
"""Calculates amount of whitespaces leading the given string and returns int"""
# expected input (example):
# string = [' 09140 Cellular Processes']
#
# expected output (example):
# depth = 13
depth = len(string) - len(string.lstrip(' '))
return depth
|
c2e8c2b8c480c2c990d3e7a3f06578e26b9440bb
| 57,293 |
import logging
def logged_class(cls):
"""
Class Decorator to add a class level logger to the class with module and
name.
"""
cls.logger = logging.getLogger(
"{0}.{1}".format(cls.__module__, cls.__name__))
return cls
|
7602ccde53c9060e440309239e678e4453416ec4
| 404,928 |
def list_blobs(storage_client, bucket_name, prefix=None):
# Helper functions for GCP from https://cloud.google.com/storage/docs/listing-objects#code-samples
"""Lists all the blobs in the bucket."""
blobs = storage_client.list_blobs(bucket_name, prefix=prefix)
return blobs
|
8c8d0a64da5a823baad1b0a9bd355e4130a60ae5
| 566,392 |
from typing import Iterable
from typing import Hashable
def _hash_triples(triples: Iterable[Hashable]) -> int:
"""Hash a list of triples."""
return hash(tuple(triples))
|
60d8dd7edf1cbc20e8928c8a1d3919853c1c633d
| 446,182 |
def _is_finite(constraints):
"""
Return ``True`` if the dictionary ``constraints`` corresponds to
a finite collection of ordered multiset partitions into sets.
If either ``weight`` or ``size`` is among the constraints, then
the constraints represent a finite collection of ordered multiset
partitions into sets. If both are absent, one needs ``alphabet`` to be
present (plus a bound on length or order) in order to have a
finite collection of ordered multiset partitions into sets.
EXAMPLES::
sage: from sage.combinat.multiset_partition_into_sets_ordered import _is_finite
sage: W = {"weight": {1:3, 2:3, 4:1}, "length": 5}
sage: S = {"size": 44, "min_length": 5}
sage: AO = {"alphabet": range(44), "max_order": 5}
sage: all(_is_finite(constr) for constr in (W, S, AO))
True
sage: AL = {"alphabet": range(44), "min_order": 5}
sage: _is_finite(AL)
False
"""
if "weight" in constraints or "size" in constraints:
return True
elif "alphabet" in constraints:
# Assume the alphabet is finite
Bounds = set(["length", "max_length", "order", "max_order"])
return Bounds.intersection(set(constraints)) != set()
|
5802604f8a338b8e0c7b5a99e63661637350371f
| 29,277 |
import _queue
def wait(jobs, notify=True, queue=None):
"""Wait for jobs to finish.
Only works on user jobs by default. To work on jobs so someone else,
initialize a fyrd.queue.Queue class with their user info and pass as an
argument to queue.
Parameters
----------
jobs : fyrd.job.Job or str or list of either (mixed list fine)
A single job or list of jobs, either Job objects or job numbers
notify : str, True, or False, optional
If True, both notification address and wait_time must be set in
the [notify] section of the config. A notification email will be
sent if the time exceeds this time. This is the default.
If a string is passed, notification is forced and the string must
be the to address.
False means no notification
queue : fyrd.queue.Queue, optional
An already initiated Queue class to use.
Returns
-------
success : bool
True if all jobs successful, false otherwise
"""
q = queue if queue else _queue.default_queue()
return q.wait(jobs, notify=notify)
|
4b0d08dbe89d0074e4ca9c350dc5aad593e234dd
| 309,009 |
def plot_sequence_content(read_composition, title,
nucleotide_dict, ax, nucleotides='ACGT'):
"""Plots per base composition.
Parameters
----------
read_composition : DataFrame
A DataFrame of per base content. Index is read coordinate. Columns are
nucleotides in the order of ACGTN.
title : str
The title for the generated plot.
nucleotide_dict : dict
Color for each base.
ax : Axes
Axes for plotting.
nucleotides : str, optional
Selected nucleotides to visualize.
Returns
-------
Axes
Distribution of sequence content per base.
"""
p_handles = list()
for i in list(nucleotides):
p = ax.plot(read_composition.index.values,
read_composition[i],
c=nucleotide_dict[i],
linewidth=1)
p_handles.append(p[0])
ax.legend(handles=p_handles,
labels=list(nucleotides),
loc='upper left',
fontsize=6,
frameon=True,
shadow=False,
framealpha=0)
ax.set_title(label=title, fontsize=7)
ax.tick_params(labelsize=6, labelcolor='black', direction='out')
ax.xaxis.set_ticks(range(0, read_composition.shape[0], 2))
ax.set_yticks(ax.get_yticks())
limits_y = list(ax.get_ylim())
if limits_y[0] < 0:
limits_y[0] = 0
if limits_y[1] > 1:
limits_y[1] = 1
ax.set_ylim(limits_y[0], limits_y[1])
ax.set_yticklabels(labels=[f'{i:3,.1%}' for i in ax.get_yticks()])
for i in ['top', 'bottom', 'left', 'right']:
ax.spines[i].set_linewidth(w=0.5)
ax.spines[i].set_color(c='#333333')
ax.set_xbound(lower=-1, upper=read_composition.shape[0] + 1)
a = (limits_y[1] - limits_y[0]) * 0.025
ax.set_ybound(
lower=limits_y[0] - a,
upper=limits_y[1] + a)
return ax
|
c4f658bc3862571bd65d5e4a756368f5f3a367aa
| 214,614 |
def BorueErukhimovich(q, C, r0, s, t):
"""Borue-Erukhimovich model of microphase separation in polyelectrolytes
Inputs:
-------
``q``: independent variable
``C``: scaling factor
``r0``: typical el.stat. screening length
``s``: dimensionless charge concentration
``t``: dimensionless temperature
Formula:
--------
``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0``
Literature:
-----------
o Borue and Erukhimovich. Macromolecules (1988) 21 (11) 3240-3249
o Shibayama and Tanaka. J. Chem. Phys (1995) 102 (23) 9392
o Moussaid et. al. J. Phys II (France) (1993) 3 (4) 573-594
o Ermi and Amis. Macromolecules (1997) 30 (22) 6937-6942
"""
x = q * r0
return C * (x ** 2 + s) / ((x ** 2 + s) * (x ** 2 + t) + 1)
|
86784d78691fc1e1d71224dbfb52f0aea0aaa2f7
| 339,389 |
def hash_map_key_sort_key(item):
"""
Used to sort hash maps based on their key.
Parameters
----------
item : `tuple` (`str`, `Any`)
An item of a hash map.
Returns
-------
key : `str`
"""
return item[0]
|
9d56cb37cf21dd3caf15128e3aba5c28c699e068
| 337,803 |
def get_number_of_bills(budget, denomination):
"""
:param budget: float - the amount of money you are planning to exchange.
:param denomination: int - the value of a single bill.
:return: int - number of bills after exchanging all your money
"""
return int(budget / denomination)
|
0fe48bd3ba32e528339c9e80ca6fda6a13150836
| 374,266 |
def clean_alphanumeric(text):
"""
If text has any non-alphanumeric characters, replace them with a hyphen.
"""
text_clean = ''
for charc in text:
text_clean += charc if charc.isalnum() else '-'
return text_clean
|
bb1e06a1d4e167218d733cf0bd9686d6a8abb836
| 188,128 |
def removesuffix(s: str, suffix: str) -> str:
"""Re-implementation of Python 3.9's str.removesuffix
https://docs.python.org/3/library/stdtypes.html#str.removesuffix
Args:
s (str): The string from which to remove the suffix
suffix (str): suffix to remove
Returns:
str: copy of `s`, with `suffix` removed if `s` ends with `suffix`
"""
if s.endswith(suffix) and len(suffix) > 0:
return s[: -len(suffix)]
else:
return s
|
8a9c6e85c39646272538e873899c38fe100f65f8
| 387,973 |
import time
def datetimeToUnixSec(date):
"""
Transform a datetime in unix format
"""
return int(time.mktime(date.timetuple()))
|
fc312a213ce7c9c5ae56e37adc787301e9a6ffba
| 336,268 |
def v0_is_perfect_square(number):
"""Return True if given number is the square of an integer."""
return int(number**0.5)**2 == number
|
0e2950429a84d622c7a386e9f54e1289689b9367
| 672,926 |
def parse_credentials(creds):
"""
Parses a credentials string. The first part is the password which
is separated by a forward-slash to the host-name. The host-name is
separated by a double-colon to the port-name.
Returns a tuple of ``(password, host, port)``. A *ValueError* is
raised if the format is invalid. The password is optional and
None is returned if it is not specified.
"""
password, _, creds = creds.rpartition('/')
if not password:
password = None
host, _, creds = creds.rpartition(':')
if not host:
raise ValueError('no host')
if not creds:
raise ValueError('no port')
try:
port = int(creds)
except ValueError:
raise ValueError('invalid port')
return (password, host, port)
|
c270f008b8a15a5c52947b207873fff2820c2b48
| 559,161 |
def calc_max_length(tensor):
"""Find the maximum length of any tensor"""
return max(len(t) for t in tensor)
|
21ad43f14d8952261a45b8efcd927b82eadc83bd
| 694,540 |
def capitalize(text):
"""
Returns a capitalized string. Note that this differs
from python's str.capitalize()/title() methods for
cases like "fooBarBaz"
"""
if text == '':
return ''
return text[0].upper() + text[1:]
|
5bedc2be5fa3aa7659d8bc9521c0082db2cccc43
| 96,317 |
def amplify_noise(noise):
""" Amplifly the noise to make it more apparent in the GUI display """
min, max = noise.min(), noise.max()
amplification = 255.0 / (max - min)
return (noise * amplification).astype("uint8")
|
ebcd6bae3721e6b4c296181d7eb2d445021d2017
| 250,064 |
def sanitize_conf_values(conf_values):
"""Sanitize the forseti_conf values not to be zero-length strings.
Args:
conf_values (dict): The conf values to replace in the
forseti_conf_server.yaml.
Returns:
dict: The sanitized values.
"""
for key in conf_values.keys():
if not conf_values[key]:
conf_values[key] = '""'
return conf_values
|
d09cfb513aa0dcb378ac238f0f3b7675c15ae79a
| 433,773 |
def truncate(text, max_length=1024):
"""Limit huge texts in number of characters"""
text = str(text)
if text and len(text) >= max_length:
return text[:max_length//2-3] + " ... " + text[-max_length//2+3:]
return text
|
fa69b875a1921bd6b0f2754a061ef6f8f4f77d2c
| 631,435 |
def keybase_lookup_url(username):
"""Returns the URL for looking up a user in Keybase"""
return "https://keybase.io/_/api/1.0/user/lookup.json?usernames=%s" \
% username
|
7c44d47ae2dfef9cdc699f7d7e9ee4c07adbfda3
| 77,643 |
import random
def random_color(max=230):
"""
Returns a randomly generated tuple of RGB values
@param: max: int
@return: tuple: (R, G, B)
"""
return (
random.randint(0, max),
random.randint(0, max),
random.randint(0, max)
)
|
a3ce0b76c0c11d3607d3ae40c12eacd0d811cf96
| 170,569 |
def month_delta(date, delta):
"""Function: month_delta
Description: Produces a month delta based on date passed to function.
Arguments:
(input) date -> Date time.
(input) delta -> Delta on date time (i.e. -n...0...n).
(output) month = Numeric month of the year.
(outout) year = Numeric year in 4-digit format.
"""
month, year = \
(date.month + delta) % 12, date.year + ((date.month) + delta - 1) // 12
if not month:
month = 12
return month, year
|
97f3e73e9b5b1679836a8aba8739fc9940a6f3c1
| 582,482 |
def read_file_str(filename):
"""Read the file and return as a string."""
with open(filename, "r") as f:
file_str = f.read()
return file_str
|
973939c2173ae374c12ca4dd8e325ef0adb03b95
| 631,536 |
def parse_int_value(value):
""" Parses string that is expected to be an integer.
"""
return int(value)
|
9888e129031a83f269c067d2697606fd17a6eb17
| 74,991 |
import torch
def rotation_matrix(angle):
"""Gets a transformation matrix for 2D rotation."""
angle = torch.as_tensor(angle).to(torch.float32)
mat = torch.eye(3, device=angle.device)
mat[0, 0] = angle.cos()
mat[0, 1] = angle.sin()
mat[1, 0] = -angle.sin()
mat[1, 1] = angle.cos()
return mat
|
d5884e835d3ccd773f0989284ba612e9c36ea4b7
| 611,887 |
def _get_paths(base_path):
"""
A service endpoints base path is typically something like /preview/mlflow/experiment.
We should register paths like /api/2.0/preview/mlflow/experiment and
/ajax-api/2.0/preview/mlflow/experiment in the Flask router.
"""
return ['/api/2.0{}'.format(base_path), '/ajax-api/2.0{}'.format(base_path)]
|
ab4c4cffd424ab87a155a414e6b9fe79563fdc33
| 190,617 |
import ipaddress
def netmask_to_prefixlen(netmask: str) -> int:
""" Takes an IP netmask and returns the corresponding prefix length
:param str netmask: IP netmask (e.g. 255.255.0.0)
:return: prefix length
"""
return ipaddress.ip_network('0.0.0.0/{}'.format(netmask)).prefixlen
|
890e5f5f480ad22605853a4d8fa40ce76368312d
| 626,012 |
def findUsername(data):
"""Find a username in a Element
Args:
data (xml.etree.ElementTree.Element): XML from PMS as a Element
Returns:
username or None
"""
elem = data.find('User')
if elem is not None:
return elem.attrib.get('title')
return None
|
f7b6bb816b9eeeca7e865582935a157cdf276928
| 707,252 |
def subtract(value, entries):
"""Subtract the entry off the value and return another set of remainders"""
return {(value - entry) for entry in entries}
|
c6e56663c4ee5b36f2d105250f1d0c52addb3c02
| 555,461 |
def get_extension(filename):
"""
Get the extension of a file (What comes right after the last dot).
"""
return filename.split(".")[-1]
|
840aaff72fd6741f5f3ce79196a56daf2d8d7fc9
| 224,329 |
def _qualname(obj):
"""Get the fully-qualified name of an object (including module)."""
return obj.__module__ + '.' + obj.__qualname__
|
34c251612104afff79b2b6cd3580a4a939cd01d2
| 25,531 |
from typing import Dict
def define_foot_name_to_index_mapping(robot: str) -> Dict:
"""Define the robot-specific mapping between feet frame names and indexes."""
if robot != "iCubV2_5":
raise Exception("Mapping between feet frame names and indexes only defined for iCubV2_5.")
foot_name_to_index = {"l_sole": 53, "r_sole": 147}
return foot_name_to_index
|
63f04e73343fe8cef9011612c528e0497ec78190
| 311,244 |
def set_alarm_email_delay(
self,
alarm_email_delay: int,
) -> bool:
"""Set alarm email delay configuration
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - alarm
- POST
- /alarm/delayEmail
:param alarm_email_delay: Alarm email delay duration in seconds
:type alarm_email_delay: int
:return: Returns True/False based on successful call
:rtype: bool
"""
data = {"duration": alarm_email_delay}
return self._post("/alarm/delayEmail", data=data, return_type="bool")
|
5d0d5c85af9ce51d16555a73cd04016e518d20b8
| 454,411 |
def graphviz_attrs(**attrs):
"""
>>> graphviz_attrs(resolution=23, label="My name")
'[resolution=23, label="My name"]'
"""
def gen():
for k, v in attrs.items():
if isinstance(v, str):
yield f'{k}="{v}"'
else:
yield f'{k}={v}'
return '[' + ', '.join(gen()) + ']'
|
0147558e14f937366a12a2fbf8d49cf373b5b773
| 346,160 |
def _generic_handler(line,num_for_key=None):
"""
Handle a gromacs itp-style line.
num_for_key: number of fields to use for the key, starting from the first
field. This means an atom field (1,) or bond field (1,2), or
angle field (1,2,3), etc. can use the same parsing function
"""
# Get rid of everything after comment
line = line.split(";")[0].strip()
# If line only has comments or is blank, return nothing
if line == "":
return None
else:
# If no num_for_key is specified, use the whole line (less the comment)
# as a key
if num_for_key is None:
key = line
# If num_for_key is specified, grab fields 0-num_for_key and use as key
else:
key = tuple(line.split()[0:num_for_key])
value = line
return key, value
|
5bb85a86b3d9c696ccf2ee8975cd92b73c0b3ade
| 210,931 |
def add_ground_truths(classifications_df, cazy_dict):
"""Retrieve ground truth CAZyme/non-CAZyme classifications and add to the df of predictions.
:param classifications_df: pandas dataframe of prediction tool CAZyme/non-CAZyme classifications
for each protein. Each unqiue protein is one row, each prediction tool is one column.
Return df containing the prediction tool predictions and CAZy ground truths added as an
additional column, called 'CAZy'.
"""
cazy_c_nc_classification = []
# tool_predictions = clasifications_df
protein_accessions = classifications_df.index
for protein_accession in protein_accessions:
try:
cazy_dict[protein_accession]
cazyme_classification = 1
except KeyError:
cazyme_classification = 0
cazy_c_nc_classification.append(cazyme_classification)
classifications_df["CAZy"] = cazy_c_nc_classification
return classifications_df
|
c2f48f77ad693f4168265b6a7107a4d93539f952
| 61,159 |
def parse_command_line_parameters(parser):
"""
@brief Parses the command line parameters provided by the user and makes
sure that mandatory parameters are present.
@param[in] parser argparse.ArgumentParser
@returns an object with the parsed arguments.
"""
msg = {
'--input-dir': 'Path to the input folder.',
'--output-dir': 'Path to the output folder.',
'--im-ext': """Extension of the image files inside the input
folder. Typically '.jpg'""",
'--seg-suffix': """Suffix of the segmentation files. For example, if
an input image is called image.jpg, and the
corresponding segmentation is image_seg.png,
then the suffix is '_seg'.""",
'--seg-ext': """Extension of the segmentation mask files.
Typically '.png'""",
'--max-inst': 'Maximum number of instruments present in the image.',
'--max-tips': 'Maximum number of instruments present in the image.',
}
parser.add_argument('--input-dir', required=True, help=msg['--input-dir'])
parser.add_argument('--output-dir', required=True, help=msg['--output-dir'])
parser.add_argument('--im-ext', required=False, default='.jpg', help=msg['--im-ext'])
parser.add_argument('--seg-suffix', required=False, default='_seg',
help=msg['--seg-suffix'])
parser.add_argument('--seg-ext', required=False, default='.png', help=msg['--seg-ext'])
parser.add_argument('--max-inst', required=True, help=msg['--max-inst'])
parser.add_argument('--max-tips', required=True, help=msg['--max-tips'])
args = parser.parse_args()
args.max_inst = int(args.max_inst)
args.max_tips = int(args.max_tips)
return args
|
f79b016118cb818893e484c6902c45f817a3d58a
| 73,777 |
def escape_quotes(text:str)->str:
"""Escape both single and double quotes in strings"""
return text.replace('"', '\\"').replace("'", "\\'")
|
c888be00ed0c941c80d33aca269d3860a43c0a14
| 361,406 |
def make_box(poly):
"""Generate a bounding box from a polygon"""
x = []
y = []
for p in poly:
for point in p:
x.append(point[0])
y.append(point[1])
return (min(x), min(y), max(x), max(y))
|
35d7de1c6bedf2341f83246c932b493b13b3db1b
| 554,219 |
def prod_nadate_process(prod_df, prod_col_dict, pnadrop=False):
"""
Processes rows of production data frame for missing time-stamp
info (NAN).
Parameters
----------
prod_df: DataFrame
A data frame corresponding to production data.
prod_df_col_dict: dict of {str : str}
A dictionary that contains the column names associated with
the production data, which consist of at least:
- **timestamp** (*string*), should be assigned to
associated time-stamp column name in prod_df
pnadrop: bool
Boolean flag that determines what to do with rows where
time-stamp is missing. A value of `True` will drop these
rows. Leaving the default value of `False` will identify
rows with missing time-stamps for the user, but the function
will output the same input data frame with no modifications.
Returns
-------
prod_df: DataFrame
The output data frame. If pflag = 'drop', an updated version
of the input data frame is output, but rows with missing
time-stamps are removed. If default value is maintained, the
input data frame is output with no modifications.
addressed: DataFrame
A data frame showing rows from the input that were addressed
or identified by this function.
"""
prod_df = prod_df.copy()
# creating local dataframes to not modify originals
prod_df = prod_df.copy()
prod_ts = prod_col_dict["timestamp"]
# Dropping rows
mask = prod_df.loc[:, prod_ts].isna()
addressed = prod_df[mask]
if pnadrop:
prod_df.dropna(subset=[prod_ts], inplace=True)
return prod_df, addressed
|
20381ef0cbeabae67d738f17ecd349513006e81e
| 672,752 |
def replace_check(c):
"""
Replace non-ASCII chars with their code point
"""
if ord(c) <= ord('~'):
return c
return '<%(orig)c:U+%(point)04X>' % {
'orig': c,
'point': ord(c)
}
|
ed0966f16426b2fc697a33dce93117bd60f39d0d
| 145,690 |
def custom_print(verbose=False):
"""return a print function that does nothing if the verbose parameter is set to false and everything if true"""
if verbose:
# print the message
def v_print(msg):
print(msg)
else:
# do nothing function
v_print = lambda msg: None
return v_print
|
5f3c4382218646357bd74f004c69321ca82be09c
| 347,995 |
import math
def percentageFloor(x):
""" Returns a float which is the input rounded down to the neared 0.01.
e.g. precentageFloor(0.941354) = 0.94
"""
return math.floor(x*100) / 100.0
|
77fbf5df97c94ac22a7b1243962788d581199fa7
| 94,678 |
def rgb_2_cmyk(color: tuple) -> tuple:
"""
Convert RGB color vales to CMYK color values.
:param color: tuple of RGB values for color eg. (255, 255, 255)
:returns: CMYK values eg. (C, M, Y, K)
"""
# if RGB color is black return CMYK black
if color == (0, 0, 0):
return (0, 0, 0, 100)
# convert the RGB values
r, g, b = color
k = 1 - max((r, g, b)) / 255
c = int(((1 - (r / 255) - k) / (1 - k)) * 100)
m = int(((1 - (g / 255) - k) / (1 - k)) * 100)
y = int(((1 - (b / 255) - k) / (1 - k)) * 100)
return (c, m, y, int(k * 100))
|
74276e217c4e63210e39bba371c1db9095ea422c
| 289,728 |
import torch
def split_stack(x, split_sizes, split_dim, stack_dim):
"""Split x along dimension split_dim and stack again at dimension stack_dim"""
t = torch.stack(torch.split(x, split_sizes, dim=split_dim), dim=stack_dim)
return t
|
63f0fdc7588baa392bf14e7b53f6881bbd16386e
| 674,815 |
import imp
def find_module(module_name, path=None):
""" Returns the filename for the specified module. """
components = module_name.split('.')
try:
# Look up the first component of the module name (of course it could be
# the *only* component).
f, filename, description = imp.find_module(components[0], path)
# If the module is in a package then go down each level in the package
# hierarchy in turn.
if len(components) > 0:
for component in components[1:]:
f, filename, description = imp.find_module(component,
[filename])
except ImportError:
filename = None
return filename
|
f36542717e0aa10000174ec827a4b2ee2c5a3850
| 483,308 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.