content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def dms_deg(degree_tuple):
"""
Convert degree minute' second'' to decimal angle
:param degree_tuple: (degree, minute, second) tuple
:return: Decimal angle in degrees
Example:
>>> import units as u
>>>
>>> u.dms_deg((45, 23, 34))
45.39277777777778
"""
degree, minute, second = degree_tuple
decimal_degree = degree + minute/60.0 + second/3600.0
return decimal_degree
|
e62402462c2c809df94f6844721efbdbd37acd5d
| 214,780 |
import re
def camel_to_snake(camel):
"""
make a camelcase from a snakecase
with a few things thrown in - we had a case where
we were parsing a spreadsheet and using the headings as keys in an object
one of the headings was "Who Uploads?"
"""
camel = camel.strip()
camel = re.sub(' ', '', camel)
camel = re.sub('?', '', camel)
return re.sub(r'(?<!^)(?=[A-Z])', '_', camel).lower()
|
9d96a3597fcf9fd73248c315e224e55d5fd1f96c
| 680,221 |
def variance(values, mean):
"""Calculate sample variance."""
return sum(map(lambda v: (v - mean)**2, values)) / len(values)
|
6b65dc8014b588ad6748b5ecd9097bd95d40ccbf
| 96,042 |
def validate_license_model(license_model):
"""
Validate LicenseModel for DBInstance
Property: DBInstance.LicenseModel
"""
VALID_LICENSE_MODELS = (
"license-included",
"bring-your-own-license",
"general-public-license",
"postgresql-license",
)
if license_model not in VALID_LICENSE_MODELS:
raise ValueError(
"DBInstance LicenseModel must be one of: %s"
% ", ".join(VALID_LICENSE_MODELS)
)
return license_model
|
d68d62e726f729688229b142e391d90c02922c12
| 425,841 |
def richclub(graph, fraction=0.1, highest=True, scores=None, indices_only=False):
# from http://igraph.wikidot.com/python-recipes#toc6
"""Extracts the "rich club" of the given graph, i.e. the subgraph spanned
between vertices having the top X% of some score.
Scores are given by the vertex degrees by default.
@param graph: the graph to work on
@param fraction: the fraction of vertices to extract; must be between 0 and 1.
@param highest: whether to extract the subgraph spanned by the highest or
lowest scores.
@param scores: the scores themselves. C{None} uses the vertex degrees.
@param indices_only: whether to return the vertex indices only (and not the
subgraph)
"""
if scores is None:
scores = graph.degree ( )
n = int (round (graph.vcount ( ) * fraction))
indices = range (graph.vcount ( ))
indices = sorted (indices, key=scores.__getitem__)
# indices.sort (key=scores.__getitem__)
# indices = sorted (zip (graph.vs, graph.degree (mode=ALL)), reverse=True)#get_top_n_for_list (graph, graph.degree (mode=ALL), n)
if highest:
indices = indices[-n:]
else:
indices = indices[:n]
if indices_only:
return indices
return graph.subgraph (indices)
|
ebbab04068709858fc3b88d2517e648a6a979c57
| 639,237 |
def getCommonAncestor(rng):
"""
Get lowest common ancestor element of the given js range
@param range js range to get ancestor element of
@return the lowest element that completely encompasses the range
"""
return rng.commonAncestorContainer
|
c6bfc510aacd7a44b492254c3450f77780916612
| 143,170 |
from typing import Dict
def find_coord_extrema (coords:tuple, extrema:Dict) -> Dict:
"""
find the minimum and maximum values from the solution coordinates.
"""
if coords[0] > extrema['x']:
extrema['x'] = coords[0]
if coords[1] > extrema['y']:
extrema['y'] = coords[1]
return extrema
|
e0e72b01306d51396a036ac3b1247191f4a01568
| 518,093 |
def clipped(piece_idx, move_idx):
"""
Determines whether the move is a valid knight move and is not clipped given
the 8x8 geometry
"""
if move_idx not in range(64):
return True
move_x, move_y = (move_idx % 8, move_idx // 8)
piece_x, piece_y = (piece_idx % 8, piece_idx // 8)
x_diff = (piece_x - move_x)
y_diff = (piece_y - move_y)
return bool(x_diff**2 > 9 or y_diff**2 > 9)
|
913cdccab5ca994b3366deb22b135a15cb612786
| 401,467 |
def cumsum(arr):
"""Do cummulative summation"""
cs=[arr[0]]
for a in arr[1:]: cs.append(cs[-1]+a)
return cs
|
802ffe79771e7aad85f9851f06913ff581b3d447
| 577,556 |
import inspect
from typing import Union
from typing import get_args
def get_annotation_class(parameter: inspect.Parameter) -> Union[type, None]:
"""
Get the class of the annotation.
# Parameters
parameter: `inspect.Parameter`
The parameter to get the class of.
# Returns
`Union[type, None]` The type or None.
"""
# If the type is a `_GenericAlias`, we care what the arguments are inside.
# Thus, we get the annotation args. If it is empty, it is not a
# `_GenericAlias` and we can return the value.
annotation_args = get_args(parameter.annotation)
if not annotation_args:
return parameter.annotation
# If there are more than one argument, we return None because it is easier
# to handle.
if len(annotation_args) > 1:
return None
# Otherwise return the found type.
return annotation_args[0]
|
f130a72b4cf2d6eac4e49689248af350bb76cf80
| 466,185 |
def gini(p):
"""Calculate the Gini impurity
:param p: The probability or proportion of the samples belonging to a class
:returns: Gini Index
:rtype: float
"""
return p * (1 - p) + (1 - p) * (1 - (1 - p))
|
1f5975854e5442f1dd97a576695da138f3764949
| 347,121 |
def make_safe_star_id(star_id):
""" Make a star id that is safe to include in a URL.
:param star_id: star id that may contain spaces or plus signs [string].
:return: star_id safe to include in a URL [string].
"""
return star_id.replace("+", "%2B").replace(" ", "+")
|
904c6919b5c52ccd472f08955057d254e58b5ea0
| 213,155 |
def subtract_vect(a, b):
"""
subtract vector b from vector a
Deprecated, use mpmath instead!!!
:param a: [float, float, float]
:param b: [float, float, float]
>>> subtract_vect([1, 2, 3], [3, 2, 2])
(-2, 0, 1)
"""
return (a[0] - b[0],
a[1] - b[1],
a[2] - b[2])
|
3465a670158a0ae34879a7d21599a9b098733f4d
| 696,304 |
def checklist_data(address_book, PersonFactory):
"""Create the data for the `Checklist` tests."""
return [PersonFactory(address_book, u'Vrozzek', first_name=u'Paul'),
PersonFactory(address_book, u'Vranzz', first_name=u'Peter')]
|
d5b14e1f589429de0144d00fb8923b8689dcff14
| 370,780 |
def dot_v2(vec1, vec2):
"""Return the dot product of two vectors"""
return vec1.x * vec2.x + vec1.y * vec2.y
|
afb309f1e6260f224c74cdc02f91a697e6225856
| 459,901 |
def col_list(nn):
"""
Get names for the columns containing Trace value and Signal intensity value
for a given nearest number of bases, from the dataframe returned by trace_df
Parameters
----------
nn : int for number of nearest neighbours
Returns
-------
list : list containing column names
"""
list = []
for i in range(2 * nn + 1):
list.append("SI_" + str(i - nn))
for i in range(2 * nn + 1):
list.append("TR_" + str(i - nn))
return list
|
58c7407bc362e4a72d4ea1b24664ac6dfef56e4c
| 200,489 |
def _vpd(es, ea):
"""Vapor pressure deficit
Parameters
----------
es : ee.Image or ee.Number
Saturated vapor pressure [kPa].
ea : ee.Image or ee.Number
Actual vapor pressure [kPa].
Returns
-------
ee.Image or ee.Number
Vapor pressure deficit [kPa].
"""
return es.subtract(ea).max(0)
|
c8942af990fc105886389cbbadedd9ae664d5a87
| 58,140 |
import re
def wrap_line(line, breakable_regex, line_cont,
base_indent=0, max_width=80, rewrap=False):
"""Wrap the given line within the given width.
This function is going to be exported to be used by template writers in
Jinja as a filter.
Parameters
----------
line
The line to be wrapped.
breakable_regex
The regular expression giving the places where the line can be broke.
The parts in the regular expression that needs to be kept can be put in
a capturing parentheses.
line_cont
The string to be put by the end of line to indicate line continuation.
base_indent
The base indentation for the lines.
max_width
The maximum width of the lines to wrap the given line within.
rewrap
if the line is going to be rewrapped.
Return
------
A list of lines for the breaking of the given line.
"""
# First compute the width that is available for actual content.
avail_width = max_width - base_indent - len(line_cont)
# Remove all the new lines and old line-continuation and indentation for
# rewrapping.
if rewrap:
line = re.sub(
line_cont + '\\s*\n\\s*', '', line
)
# Break the given line according to the given regular expression.
trunks = re.split(breakable_regex, line)
# Have a shallow check and issue warning.
for i in trunks:
if len(i) > avail_width:
print('WARNING')
print(
'Trunk {} is longer than the given width of {}'.format(
i, max_width
)
)
print('Longer width or finer partition can be given.')
continue
# Actually break the list of trunks into lines.
lines = []
curr_line = ''
for trunk in trunks:
if len(curr_line) == 0 or len(curr_line) + len(trunk) <= avail_width:
# When we are able to add the trunk to the current line. Note that
# when the current line is empty, the next trunk will be forced to
# be added.
curr_line += trunk
else:
# When the current line is already filled up.
#
# First dump the current line.
lines.append(curr_line)
# Then add the current trunk at the beginning of the next line. The
# left spaces could be striped.
curr_line = trunk.lstrip()
# Go on to the next trunk.
continue
else:
# We need to add the trailing current line after all the loop.
lines.append(curr_line)
# Before returning, we need to decorate the lines with indentation and
# continuation suffix.
decorated = [
''.join([
' ' * base_indent, v, line_cont if i != len(lines) - 1 else ''
])
for i, v in enumerate(lines)
]
return '\n'.join(decorated)
|
70e076188dbea29b7d8e5e8dea7a31897ad3e9a6
| 673,014 |
def xstr(s):
"""
Returns the given object as a string or if None then
returns the empty string
"""
return str(s) if s else ""
|
c1796e421dcafa98a0c6cdaa0ccd4aec63a0b641
| 653,098 |
def find_in_line(view, character, forward=True):
"""Find a character in the current line.
:param view:
The view where we are performing the search.
:param character:
The sought character.
:param forward:
If `True`, search forward. If `False`, search backwards.
Returns 0 or a positive integer if the character was found. The number indicates
the character position in the view. Returns a negative integer if the character
wasn't found.
"""
pt = view.sel()[0].b
limit = view.line(pt).end() if forward else view.line(pt).begin()
is_within_limits = (lambda x: x < limit) if forward else (lambda x: x >= limit)
increment = 1 if forward else -1
while is_within_limits(pt):
if view.substr(pt) == character:
break
pt += increment
else:
return -1
return pt
|
e46444ae0f3d13c504805ff8b4941ff351d23e97
| 273,235 |
def sample_line_from_center(polyline, length_to_sample):
"""Takes a polyline and samples it a target length using the segmentAlongLine method."""
line_length = float(polyline.length)
half_way_point = float(polyline.length) / 2
start_point = half_way_point - length_to_sample / 2
end_point = half_way_point + length_to_sample / 2
if line_length <= length_to_sample / 2:
start_point = 0
end_point = line_length
segment_returned = polyline.segmentAlongLine(start_point, end_point)
return segment_returned
|
7aedb254f894165fa01c84baac9306a596d64e0f
| 223,704 |
def recursive_config_join(config1: dict, config2: dict) -> dict:
"""Recursively join 2 config objects, where config1 values override config2 values"""
for key, value in config2.items():
if key not in config1:
config1[key] = value
elif isinstance(config1[key], dict) and isinstance(value, dict):
config1[key] = recursive_config_join(config1[key], value)
return config1
|
64dc0bbcebcf20ba913828d05e2004f461909b8f
| 689,939 |
from typing import List
import json
def read_jsonl_file(file_path: str, single_line: bool = False) -> List:
"""
Loads a jsonl file and returns a list corresponding to each line in the file.
Parameters
----------
file_path : str
The file to load the data from
single_line : bool, default=False
Indicates all of the json objects are on a single line (i.e., not separated
by a newline character, but separated by "")
Returns
-------
List
The items, where each item corresponds to one line
"""
items = []
if single_line:
decoder = json.JSONDecoder()
contents = open(file_path, "r").read()
offset = 0
while offset < len(contents):
item, length = decoder.raw_decode(contents[offset:])
items.append(item)
offset += length
else:
with open(file_path, "r") as f:
for line in f:
items.append(json.loads(line))
return items
|
38b9c5ba5226179e283f3f1dbbdc0ac69706bc0e
| 552,998 |
def get_suiting(string):
"""
Parses a string representing the suiting (suited/offsuit) of the player's
two hole cards. Returns True if suited, False if offsuit.
Returns None if the input could not be parsed.
"""
if "of" in string or "un" in string:
# handles offsuit, unsuited, etc.
return False
if "suit" in string:
return True
return None
|
07a93c5a87c3961243570df02a03d0c826d447ba
| 372,868 |
def delete_segment(seq, start, end):
"""Return the sequence with deleted segment from ``start`` to ``end``."""
return seq[:start] + seq[end:]
|
1eba39d373ac2ab28ea1ea414f708a508bdf48d2
| 9,966 |
def pv_units(pv, default=''):
"""get units for pv object, with optional default value
Parameters
----------
pv: pv object (created by PV())
default: string value for default units
Returns
-------
string with units
"""
try:
units = pv.units
except:
units = ''
if units in (None, ''):
units = default
return units
|
ba1c1e918a6dd4eade32f7daf58e1c0c0546b8bb
| 301,666 |
def make_permission_config_key(view):
"""
Generates the the key to be stored in configuration for a given view
:type view: rest_framework.views.APIView
"""
return "api_permission_{}".format(view.__class__.__name__)
|
cbb84f80250b72a6e433c606633bcd53cb19821a
| 84,762 |
def _extract_first_field(data):
"""Extract first field from a list of fields."""
return list(next(iter(zip(*data))))
|
1d9ab5619caef894963cc4a00626274731f792af
| 110,138 |
import struct
from io import BytesIO
def get_jpeg_size(jpeg_bytes):
"""Get the size of a JPEG image without reading and decompressing the entire
file. Based upon:
* https://github.com/shibukawa/imagesize_py/blob/master/imagesize.py#L87
"""
buf = BytesIO(jpeg_bytes)
head = buf.read(24)
if not head.startswith(b'\377\330'):
raise ValueError("Invalid JPEG header")
buf.seek(0)
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf or ftype in [0xc4, 0xc8, 0xcc]:
buf.seek(size, 1)
byte = buf.read(1)
while ord(byte) == 0xff:
byte = buf.read(1)
ftype = ord(byte)
size = struct.unpack('>H', buf.read(2))[0] - 2
# Now we're at a SOFn block
buf.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', buf.read(4))
return width, height
|
b43fe9bc71a14ed69a3093fa909b79122b378a4f
| 169,181 |
import math
def is_prime(n):
"""
What comes in: An integer.
What goes out: Returns True if the given integer is prime.
Returns False if the given integer is NOT prime.
Side effects: None.
Examples:
This function returns True or False, depending on whether
the given integer is prime or not. Since the smallest prime is 2,
this function returns False on all integers < 2.
It returns True on 2, 3, 5, 7, and other primes.
Note: The algorithm used here is simple and clear but slow.
Type hints:
:type n: int
"""
if n < 2:
return False
for k in range(2, int(math.sqrt(n) + 0.1) + 1):
if n % k == 0:
return False
return True
# ------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no TO DO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# ------------------------------------------------------------------
|
03b255f0fd7695635a5b59ef4849e112601f3bd9
| 302,130 |
def get_package_url(data):
"""Get the url from the extension data"""
# homepage, repository are optional
if "homepage" in data:
url = data["homepage"]
elif "repository" in data and isinstance(data["repository"], dict):
url = data["repository"].get("url", "")
else:
url = ""
return url
|
a100ac87cd28a2bf0fcd9e6491ba915377e827c8
| 643,626 |
def dict_deep_update(target, update):
"""Recursively update a dict. Subdict's won't be overwritten but also updated.
Args:
target: Target dictionary to update.
update: Parameters to update.
Returns:
dict: Updated dictionary.
"""
for key, value in update.items():
if key not in target:
target[key] = value
elif isinstance(value, dict):
target[key] = dict_deep_update(value, target[key])
return target
|
d19bb77711ffee670ba21266dbdb20c1708edcbd
| 186,698 |
def _get(redis, key):
""" Get the current hits per rolling time window.
:param redis: Redis client
:param key: Redis key name we use to keep counter
:return: int, how many hits we have within the current rolling time window
"""
return redis.zcard(key)
|
288d17e0ef4c0d667d984c7f462a2c07d6c66147
| 19,286 |
def bytes_pls(some_val):
"""Convenience function that returns bytes when given str or bytes
:param str or bytes some_val: thing desired as bytes
:rtype: bytes
"""
if isinstance(some_val, bytes):
return some_val
return some_val.encode()
|
5a1dbf1728fd6fb97c5e5f23d03760a698fc922e
| 188,693 |
import six
import tokenize
def extract_comments(source):
"""
Returns the text in each comment in a block of python code.
Uses tokenize to account for quotations.
CommandLine:
python -m xdoctest.static_analysis extract_comments
Example:
>>> from xdoctest import utils
>>> source = utils.codeblock(
>>> '''
# comment 1
a = '# not a comment' # comment 2
c = 3
''')
>>> comments = list(extract_comments(source))
>>> assert comments == ['# comment 1', '# comment 2']
>>> comments = list(extract_comments(source.splitlines()))
>>> assert comments == ['# comment 1', '# comment 2']
"""
if isinstance(source, six.string_types):
lines = source.splitlines()
else:
lines = source
# Only iterate through non-empty lines otherwise tokenize will stop short
iterable = (line for line in lines if line)
def _readline():
return next(iterable)
try:
for t in tokenize.generate_tokens(_readline):
if t[0] == tokenize.COMMENT:
yield t[1]
except tokenize.TokenError as ex:
pass
|
4371541815dd39ecf48852dad713eff855415da7
| 513,082 |
def email_address_str(name, email):
""" Create an email address from a name and email.
"""
return "%s <%s>" % (name, email)
|
22366d0e8ff8397564646ccd9d55e3ee64efdc27
| 414,168 |
def get_term_description(term, go_dag, ipr_map, pfam_map):
"""Takes an `InterPro`, `Gene Ontology` or `Pfam` annotation
and returns it's description.
Returns
-------
str
String description or None if one could not be found.
"""
term = term.upper()
if 'IPR' in term:
return ipr_map[term]
elif 'PF' in term:
return pfam_map[term]
elif "GO" in term:
term = term.replace('GO', 'GO:').replace('::', ':')
return go_dag[term].name
return None
|
c307aecd7eab1f8bcf0cafbda509f18b7865b260
| 570,857 |
def HHMMSS_to_seconds(string):
"""Converts a colon-separated time string (HH:MM:SS) to seconds since
midnight"""
(hhs,mms,sss) = string.split(':')
return (int(hhs)*60 + int(mms))*60 + int(sss)
|
f7a49ad5d14eb1e26acba34946830710384780f7
| 3,812 |
def get_color_commands(indices, colors):
"""Returns postscript coloring string given indices and colors.
- indices: base 1 index of color. NOT BASE 0.
- colors: color name corresponding to color name used to generate
color_map.
- indices and colors must be lists of the same length.
"""
color_commands = []
for index, color in zip(indices,colors):
color_commands.append('%s %s Colormark'%(str(index),color))
return ' '.join(color_commands)
|
3b111463146cf3e2d7025074dbd86e4c576a9013
| 662,572 |
def cq_blitz_mock(url, request):
"""
Mock for carrier update checking, blitz.
"""
thebody = b'<?xml version="1.0" encoding="UTF-8"?><updateDetailResponse version="2.2.1" sessionId="6158fdd7-4ac5-41ad-9849-b4ba9f18a3b5"><data authEchoTS="1366644680359"><status code="0"><friendlyMessage>Success</friendlyMessage><technicalMessage>Success</technicalMessage></status><content><updateDirectives><downloadCapOverCellular unit="MB">1035</downloadCapOverCellular><updateRequired>true</updateRequired><directive type="allowOSDowngrades" value="true"/></updateDirectives><transports><leastCostRouting>true</leastCostRouting><transport ordinal="0">serialbypass</transport><transport ordinal="1">wifigan</transport><transport ordinal="2">wifi</transport><transport ordinal="3">wan</transport><transport ordinal="4">wanroam</transport><transport ordinal="5">wanintlroam</transport></transports><softwareReleaseMetadata softwareReleaseVersion="10.3.1.1877" isSecurity="false" filterSetVersion="10.3.1.45" verbiageVersion="10.3.1.6"><cellularChargesMessage>Warning,this could be really expensive.</cellularChargesMessage></softwareReleaseMetadata><fileSets><fileSet url="http://cdn.fs.sl.blackberry.com/fs/qnx/production/f6832b88958f1c4c3f9bbfd44762e0c516760d8a"><package id="gYABgJBzlFCWITrWvadisQkRdpg" name="com.qnx.qcfm.radio.qc8960.wtr5" path="com.qnx.qcfm.radio.qc8960.wtr5/10.3.1.2727/qc8960.wtr5-10.3.1.2727-nto+armle-v7+signed.bar" downloadSize="53283856" operation="add" version="10.3.1.2727" checksum="swnw5y03_MNK3MqWF9227FynZSyIgiW3Nj42Zv96fmgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" type="system:package"/><package id="gYABgEd1yw2Ezd7gd-uX5-coqaE" name="com.qnx.coreos.qcfm.os.qc8960.factory_sfi" path="com.qnx.coreos.qcfm.os.qc8960.factory_sfi/10.3.1.2726/qc8960.factory_sfi-10.3.1.2726-nto+armle-v7+signed.bar" downloadSize="1909111199" operation="add" version="10.3.1.2726" checksum="eb7KMyZxajwgTkamg3VPHr8mEPT4CxjKF3TbmaoGJjMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" type="system:os"/></fileSet></fileSets></content></data><signature><root><cipher>EC521R1</cipher><shaType>SHA512</shaType><sigR>AOm43LzpCmSwglrzvup+oWjb0gRnlmz1DWZnFLTcmfqQ4zPY4w/KmyWXQD9vg6aUQPsfB4Sl7Ejdw/F9G41jNCva</sigR><sigS>AWYQGvQ9JwIDepdt+usc1lX6N3Sk9yElF4ezZNS1w6uhEfjBpRm06rtGA+CWJEAB9tVqvfwE1ByibMz18c6ANOmM</sigS></root><chain ordinal="1"><cipher>EC521R1</cipher><shaType>SHA512</shaType><publicKey notValidUntil="1434256882020" notValidAfter="1434688882020">BAF+BsRg/iDhyw7S3QsKBhc0hvv7xQ5+/QCsxHhzUzjjrQGuY9npBdHxN3hu2dA6NZdCzR+h35T+YNka9bZTe1tjMgB4txezGIuqh3nVmk+Gze69YCZ+22BANs3DNo8q3bYD7K3/kulm2zbZESLq9YnQcCoi336JkSrGNEEPaa1yU27D7Q==</publicKey><sigR>AJSk+Z4JLIyBy3aeSireNR+9Kx+69nLLRublGIq/Y/MrHatkmvKharH48SMZZl3v19p08H8PUfps4f7NgewHOHei</sigR><sigS>AJeRkTgkhkCtQsBi2+oBElFgcbua97vEXco0x5Xs/onMDAvSL0dlbsFXKOtblX6I2pYkUTajAFEZ2MLuCTe5s/l0</sigS></chain></signature></updateDetailResponse>'
return {'status_code': 200, 'content': thebody}
|
6894f4309d775a039cba00605fb3cabca34996b6
| 318,109 |
def get_root_val(root):
"""
:param root: current tree root
:return: current tree root value
"""
return root[0]
|
b834004fa48a9e1186d1fd1af697735700a11622
| 160,474 |
def _is_recipe_fitted(recipe):
"""Check if a recipe is ready to be used.
Fitting a recipe consists in wrapping every values of `fov`, `r`, `c` and
`z` in a list (an empty one if necessary). Values for `ext` and `opt` are
also initialized.
Parameters
----------
recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Can only contain the keys
`pattern`, `fov`, `r`, `c`, `z`, `ext` or `opt`.
Returns
-------
_ : bool
Indicates if the recipe is fitted or not
"""
# all keys should be initialized in the new recipe, with a list or a string
for key in ['fov', 'r', 'c', 'z']:
if key not in recipe or not isinstance(recipe[key], list):
return False
for key in ['ext', 'opt']:
if key not in recipe or not isinstance(recipe[key], str):
return False
if 'pattern' not in recipe or not isinstance(recipe['pattern'], str):
return False
return True
|
77e438dd00ac5606c52c88518c6932a09dff75df
| 700,281 |
def nest(dict_in, delim="__"):
"""Nests the input dict by splitting keys (opposite of flatten above)"""
# We will loop through all keys first, and keep track of any first-level keys
# that will require a recursive nest call. 'renest' stores these keys
output, renest = {}, []
for key in dict_in:
if delim not in key:
output[key] = dict_in[key]
continue
loc = key.split(delim, 1)
if loc[0] not in output:
output[loc[0]] = {}
# Uniquely add the key to the subdictionary we want to renest
renest.append(loc[0])
output[loc[0]][loc[1]] = dict_in[key]
# Renest all higher-level dictionaries
for renest_key in renest:
output[renest_key] = nest(output[renest_key])
return output
|
097cdf142f6f763424117ff6fc926464cee0b3bf
| 609,384 |
def prepare_features(dataframe):
"""Prepares the features for provided dataset.
Args:
dataframe: A Pandas DataFrame expected to contain data from the
desired data set.
Returns:
A new DataFrame that contains the features to be used for the model.
"""
processed_features = dataframe.copy()
return processed_features
|
a3772b46c0ad0a0331a303580b8c753aa9adb863
| 251,546 |
def processCheckboxes(checkboxes, options):
"""
Process the GUI checkboxes and return a list containing strings denoting which boxes were checked
:param checkboxes: list - A list of Checkbar objects
:param options: list - A list of all options represented by this group of checkboxes
:return: tag_selection: list - A list containing the type of tags to tag the track for
"""
tag_selection_bin = list()
tag_selection = list()
for checkbox in checkboxes:
tag_selection_bin += list(checkbox.state())
for i, tag in enumerate(tag_selection_bin):
if tag:
tag_selection.append(options[i])
return tag_selection
|
44e870120c093901a1efd885f7ece08f8ebd1b21
| 391,322 |
from typing import List
from typing import Tuple
from typing import Dict
def convert(day_input: List[str]) -> Tuple[Dict[str, List[range]], List[int], List[List[int]]]:
"""Converts the input into a tuple with:
1. A dictionary with the fields, where for each field the value is a list
of the valid ranges, each represented as a tuple with the min and max value
2. The values for your ticket, a list of ints
3. A list with the values for the other tickets"""
iter_in = iter(day_input)
# Convert fields
fields = {}
for line in iter_in:
if line == '': break
key, vals = line.split(': ')
fields[key] = [range(int(v.split('-')[0]), int(v.split('-')[1]) + 1 ) for v in vals.split(' or ')]
while next(iter_in) != 'your ticket:':
continue
our = [int(n) for n in next(iter_in).split(',')]
while next(iter_in) != 'nearby tickets:':
continue
tickets = [[int(n) for n in line.split(',')] for line in iter_in]
return (fields, our, tickets)
|
7ef01443251595891c4adcd147dd0487b8b2fedf
| 31,913 |
def indent(block_of_text, indentation):
"""
Helper function to indent a block of text.
Take a block of text, an indentation string and return the indented block.
"""
return "\n".join(map(lambda x: indentation + x, block_of_text.split("\n")))
|
55644a9692ad9ecc7b97808b4196820160d125e0
| 116,490 |
def clustercenter(samples):
"""
Computes the geometric center of a set of vectors.
samples = [ [v11,...,v1N], ... [vn1,...,vnN] ]
"""
N, dim = len(samples), len(samples[0])
if N == 1: # singleton cluster
return samples[0]
# Cluster center is the average in all dimensions
dsum = [0.0] * dim
for d in range(dim):
for i in range(N):
dsum[d] += samples[i][d]
dsum[d] /= N
return dsum
|
7d9cba5290cdbdce6da5fcf71943eaf14f75cad6
| 133,315 |
import random
def pick(*args):
"""
Randomly picks one of its arguments to return.
Parameters
----------
*args : variadic
A series of values from which one will be randomly selected.
Returns
-------
One value from the argument list.
"""
return args[random.randrange(len(args))]
|
0e20c1fd7d27826943804098181aae6572fe9af3
| 260,534 |
import re
def insert_table_name(
query: str,
table_prefix: str,
table_suffix: str,
placeholder: str = "@table_name",
) -> str:
"""
Replace @table_name with the actual table name
BigQuery doesn't support parameterized table names :(
Sanitizes the suffix, lol
Args:
query (str): The query to insert the table name into
table_prefix (str): The table prefix
table_suffix (str): The table suffix
placeholder (str): The placeholder to replace
"""
table_suffix = re.sub(r"[^a-zA-Z0-9_]", "", table_suffix)
return query.replace(placeholder, f"{table_prefix}{table_suffix}")
|
493255d24cbb0241c4aa29ee558f1890cfd3ea5a
| 141,483 |
def resolve_absolute_path(filename):
"""Fetch filename from absolute path"""
return filename.split('/')[-1]
|
4d8a22b78fbc3a3b4963c69907d57289caac88cc
| 502,937 |
def remove_id_version(s, force=False):
"""
Remove the optional '.VERSION' from an id if it's an Ensembl id or if
`force` is True.
"""
if force or s.startswith('ENS'):
return s.split('.')[0]
else:
return s
|
ba3eef05ddeb8f41e1fcd083fe78d3047276ed4d
| 318,396 |
import string
def remove_punctuation(text):
""" Removes all punctuation from the given string
:param str text: Removes all punctuation from the given text
:return: Punctuation-removed string
:rtype: str
"""
translator = str.maketrans('', '', string.punctuation)
return text.translate(translator)
|
2659a780375990329925b64f0b8c6f49e436017e
| 64,944 |
def remove_protected_attrs(item: dict) -> dict:
"""
Remove protected (AWS-only) attributes from a DynamoDB item.
"""
attrs = [attr for attr in item.keys() if attr.startswith('aws:')]
for attr in attrs:
del item[attr]
return item
|
55ed4fe710fb78423ef002a2b8d655bdb33897a6
| 616,520 |
def dot_to_point(dot):
""" (str) -> tuple
Maps a <dot> to it's location on the board. used to render a dot in it's
proper place.
"""
if dot == 'Northwest':
return 100, 500
elif dot == 'North':
return 300, 500
elif dot == 'Northeast':
return 500, 500
elif dot == 'West':
return 100, 300
elif dot == 'Center':
return 300, 300
elif dot == 'East':
return 500, 300
elif dot == 'Southwest':
return 100, 100
elif dot == 'South':
return 300, 100
elif dot == 'Southeast':
return 500, 100
|
cc7fa3b732e9c8cc667132785c2a59246c207ddf
| 284,832 |
def create_cursor(connection):
"""
Create a cursor for executing SQL in the database
on the provided connection.
"""
curs = connection.cursor()
return curs
|
e68a564c22bc91490febaf8a0112670a177121ac
| 219,592 |
def bitmap_to_bytes(bitmap):
"""Turn bitmap into string of bytes
The returned value is representation of bitmap suitable for store
in the DataFrame, and ultimately in the HDF5 file.
Parameters
----------
bitmap : BitMap
Bitmap / bitset, which representation we want to get.
Returns
-------
bytes (str in Python 2.x)
[Compact] representation of bitmap as bytes. Note that it may
contain NUL ('\x00') characters.
"""
return bitmap.bitmap.tostring()
|
577faeadb6cc6f117308111f18282e4f6d6ebd0e
| 572,794 |
def convert_string(string, chars=None):
"""Remove certain characters from a string."""
if chars is None:
chars = [',', '.', '-', '/', ':', ' ']
for ch in chars:
if ch in string:
string = string.replace(ch, ' ')
return string
|
131a5c472f96709da98e042a988e026edc41cd2e
| 654,698 |
import requests
import json
def put(url, data = {}):
"""
Submit http put to a URL and parse the JSON that's returned
:param url: The URL to submit put to
:return: The JSON response
"""
r = requests.put(url, data)
return json.loads(r.text)
|
f29bff5a0cc976fa7612d7e6bdd6f5efd59cb2d9
| 223,890 |
def toList(given):
"""
This will take what is given and wrap it in a list if it is not already
a list, otherwise it will simply return what it has been given.
:return: list()
"""
if not isinstance(given, (tuple, list)):
given = [given]
return given
|
5a9e799332340a26b686b35b06d2e246dc7ebbfd
| 544,884 |
from typing import List
import random
def generate_multiplications(multiplier: int, shuffle_order: bool = False
) -> List[str]:
"""Returns a list of multiplication questions for the given multiplier."""
# Define multiplicands (i.e. numbers 1-12)
multiplicands = list(range(1, 13))
if shuffle_order:
# Randomise order
random.shuffle(multiplicands)
# Build multiplication questions list, and return
questions = []
for multiplicand in multiplicands:
questions.append(f"{multiplicand: >2} x {multiplier} = ")
return questions
|
c04c3bb8a4fd1dfdc723dd2522294a5ac82bd4e2
| 209,123 |
def input_param(name, value):
"""
Display input name and value for checking.
:param name: parameter name.
:param value: parameter value.
:return: value.
"""
print('{} = {}'.format(name, value))
return value
|
0ac4759a1b69742f29697bb67c97ef90759d5d2f
| 435,525 |
import collections
import pickle
def load_autofolio(fn:str):
""" Read a pickled autofolio model.
Parameters
----------
fn: string
The path to the file
Returns
-------
A namedtuple with the following fields:
- scenario: ASlibScenario
The aslib scenario information used to learn the model
- preprocessing: list of autofolio.feature_preprocessing objects
All of the preprocessing objects
- pre_solver: autofolio.pre_solving.aspeed_schedule.Aspeed
Presolving schedule
- selector: autofolio.selector
The trained pairwise selection model
- config: ConfigSpace.configuration_space.Configuration
The dict-like configuration information
"""
af = collections.namedtuple("af",
"scenario,preprocessing,pre_solver,selector,config"
)
with open(fn, "br") as fp:
autofolio_model = pickle.load(fp)
autofolio_model = af(
scenario = autofolio_model[0],
preprocessing = autofolio_model[1],
pre_solver = autofolio_model[2],
selector = autofolio_model[3],
config = autofolio_model[4]
)
return autofolio_model
|
82f5a0d3a7ec31d8e97cb08fd864df1e6ec32018
| 80,647 |
def map_qubit_to_x(qubit_no, num_variables):
"""This reverses map_x_to_qubit."""
return divmod(qubit_no, num_variables)
|
9755fffba0d1790f910c23fde1845dd473bd4548
| 329,253 |
def get_class_name_with_new_suffix(klass, existing_suffix, new_suffix):
"""
Generates new name by replacing the existing suffix with a new one.
Args:
klass (type): original class from which new name is generated
existing_suffix (str): the suffix which needs to remain where it is
new_suffix (str): the new suffix desired
Example:
>>> get_class_name_with_new_suffix(FooForm, 'Form', 'NewForm')
'FooNewForm'
Returns:
new_name (str): the name with the new suffix
"""
class_name = klass.__name__
if existing_suffix in class_name:
prefix, suffix = class_name.rsplit(existing_suffix, 1)
else:
prefix, suffix = class_name, ''
new_name = str('{}{}{}'.format(prefix, new_suffix, suffix))
return new_name
|
fe3a758f845faf165884e1ee1586fb3c2c4297f3
| 329,962 |
def copy_column(column, schema):
"""
Safely create a copy of a column.
"""
return column.copy(schema=schema)
|
1842db07939a3d2aee7923e72230819afacd7751
| 34,238 |
def strike_volume(grain_mass, ratio):
"""
Calculate amount of liquor to strike the grain with
:param grain_mass: [float] in pounds
:param ratio: [float] desired ratio in quarts / pound
:return: [float] quarts of H2O
"""
return grain_mass * ratio
|
748dc1fbaf5833b6969c2f9a4793e1a1750c8d33
| 270,992 |
import re
def regex_boolean_detect_creating(text_item):
"""
This helps with detecting the ready-state of a new/being made
dynamoDB table, checking to see if the status = "creating"
"""
# look for the pattern of: tablestatus = creating
pattern = r"'TableStatus': 'CREATING'"
# run regex to search for the pattern in the text
results = re.findall(pattern, text_item)
# return a boolean for True if the text is found
return len(results) == 1
|
ec344b15206733b522b136c97e5909489ef2a509
| 576,791 |
def distance_cumulative(a, b):
"""
Manhattan distance between two 2D vectors
(the sum of the absolute difference along all parts).
"""
return sum(abs(x - y) for x, y in zip(a, b))
|
95bd2f6fc3cac4de6b8f126a5ce086a67103aa6b
| 426,437 |
def getSignedVal(num: int, bitSize: int):
"""
Return the signed value of the number.
:param num: unsigned integer value
:param bitSize: length of the value in bits
"""
mask = (2 ** bitSize) - 1
if num & (1 << (bitSize - 1)):
return num | ~mask
else:
return num & mask
|
95666388e235955d7922a33def6b1a1d1abf661d
| 532,666 |
def curate_urls(start_date, end_date):
"""
A function that curates the urls for the data in the given time range.
:param list [int] start_date: [matchday, year]
:param list [int] end_date: [matchday, year]
:return: List of urls of matches from each game day in the given
time period
"""
start_season = start_date[1]
end_season = end_date[1]
end_day = end_date[0]
start_day = start_date[0]
urls = []
# dates are in same year
if end_season == start_season:
for day in list(range(start_day, end_day + 1)):
urls += ['https://api.openligadb.de/getmatchdata/bl1/'
+ str(start_season) + '/' + str(day)]
else:
# starting date doesn't begin with 1.matchday
if start_day != 1:
for day in list(range(start_day, 35)):
urls += ['https://api.openligadb.de/getmatchdata/bl1/'
+ str(start_season) + '/' + str(day)]
for season in range(start_season + 1, end_season):
urls += ['https://api.openligadb.de/getmatchdata/bl1/'
+ str(season)]
# if it does start on 1. matchday we take the whole season and add
# seasons between dates
else:
for season in range(start_season, end_season):
urls += ['https://api.openligadb.de/getmatchdata/bl1/'
+ str(season)]
# adding last season we want to look at
if end_day != 34:
for day in list(range(1, end_day + 1)):
urls += ['https://api.openligadb.de/getmatchdata/bl1/'
+ str(end_season) + '/' + str(day)]
else:
urls += ['https://api.openligadb.de/getmatchdata/bl1/'
+ str(end_season)]
return urls
|
8c17db3d2b6455291c13271c8c12630607015f1c
| 309,876 |
import math
def norm_entropy(probs):
"""get the normalized entropy based on a list of proabilities
Parameters
----------
probs: list
list of probabilities
Returns
-------
normalized entropy of the probabilities
"""
entropy = 0
for prob in probs:
if prob > 0:
entropy += prob * math.log(prob, math.e)
else:
entropy += 0
return - entropy / len(probs)
|
cfbcd504670d9eda7e47f2301309271b916b1d11
| 111,334 |
def flatten_tokens(dom):
"""Flattens <token> nodes in DOM"""
# Modify the DOM, flattening <token>s
# './/*token' does not find <token>s that are children of <doc>
for node in dom.findall('.//token'):
# Flatten the children of <token> with \t
flat = "\t".join(node.itertext())
# Clean the children
for child in list(node):
node.remove(child)
node.text = flat + '\n'
# Flatten DOM and return:
return dom
|
061229966b3d823c19ff0997528977e4179b750c
| 428,538 |
def _safe_divide(x, y):
"""This returns zero if the denominator is zero
"""
if y == 0:
return 0
return x / y
|
05d91cbb0a56fa38dd0f544961c1e1c209c42da8
| 250,378 |
def aggregate_video_cluster_metrics(responses, metrics, info=None):
"""Aggregates the video cluster metrics with one step responses.
Args:
responses: a dictionary of names, observed responses.
metrics: A dictionary mapping from metric_name to its value in float.
info: Additional info for computing metrics (ignored here)
Returns:
A dictionary storing metrics after aggregation.
"""
del info # Unused.
is_clicked = False
metrics['impression'] += 1
for response in responses:
if not response['click']:
continue
is_clicked = True
metrics['click'] += 1
metrics['quality'] += response['quality']
cluster_id = response['cluster_id']
metrics['cluster_watch_count_cluster_%d' % cluster_id] += 1
if not is_clicked:
metrics['cluster_watch_count_no_click'] += 1
return metrics
|
c5fbba0fe686f8edb7e92cdbf4d155331fd5c46c
| 321,716 |
import time
def epoch_milliseconds(d):
"""Convert a datetime to a number of milliseconds since the epoch."""
return time.mktime(d.timetuple()) * 1000
|
4f66faf9daf425af56c64c875e294c9ef02c7752
| 649,597 |
def concatenate(s1: str, s2: str) -> str:
""" Return the concatenation of `s1` with `s2`. """
return s1 + s2
|
56b1481ce4779404a1ab1018645ae2aa2e02c72d
| 370,261 |
def tribonacci(n: int) -> int:
"""Calculate the Nth Tribonacci number."""
f0: int = 0
f1: int = 0
f2: int = 1
for _ in range(n):
f0, f1, f2 = f1, f2, f0 + f1 + f2
return f0
|
8e407cee4988c4b419c273c77d5e40680f631918
| 473,267 |
def get_forces(system):
"""
get forces from a system as a dictionary
arguments
system : simtk.openmm.System
system to query
returns
_dict : dict
{force_name: (force, force_idx)} where force_name is a string, force is a simtk.openmm.Force, and
force_idx is the index of the force
"""
_dict = {}
for force_idx, force in enumerate(system.getForces()):
_dict[force.__class__.__name__] = (force, force_idx)
return _dict
|
d905b3d691ed61c787bd9b2a6ae62e9d64656a2d
| 439,343 |
def mirror_search_terms(terms):
"""
Interchange the sidedness of a query
:param terms: List of strings matching (H|L)[0-9]+(l|r)?
<has/lacks><ringid>[<left/right>]
:return: The same terms with occurrences of 'left' and 'right' interchanged
"""
terms = [term.replace('l', 'Q').replace('r', 'l').replace('Q', 'r') for term in terms]
return terms
|
4d44eae7d3ca1301ce7d1aba42b4ef8737ae852f
| 67,577 |
import math
def vdist(vect1,vect2):
"""
This function returns the euclidean distance (the straight-line distance)
between two vector coordinates.
The distance between two points is the length of the vector joining them.
Parameters
----------
vect1:
*float list*. The first vector - in the format[x,y,z]
(or [x,y,z,0] for affine transformations in an homogeneous space).
vect2:
*float list*. The second vector - in the format [x,y,z]
(or [x,y,z,0] for affine transformations in an homogeneous space).
"""
joiningVect = [vect1[0] - vect2[0], vect1[1] - vect2[1], vect1[2] - vect2[2]]
return math.sqrt(joiningVect[0] * joiningVect[0] + joiningVect[1] * joiningVect[1] + joiningVect[2] * joiningVect[2])
|
f03d9469b52a7037b5754d1add55d68d6aa691a9
| 282,491 |
def constraint3(lowest_right_hand_keypoint: int, lowest_left_hand_keypoint: int) -> bool:
""" Is lowest right hand keypoint higher than lowest left hand keypoint? """
return lowest_right_hand_keypoint < lowest_left_hand_keypoint
|
9718ecf1fde499152817f926dc15b779d553e263
| 424,921 |
def union_intervals(intervals):
"""Size of the union of a set of intervals
:param intervals: list or set of integer pairs (left, right)
:returns: size of the union of those intervals
:complexity: :math:`O(n \\log n)`
"""
furthest = float('-inf') # furthest interval endpoint seen so far
length = 0 # size of union
for left, right in sorted(intervals):
if right > furthest: # union increases
if left > furthest: # interval disjoint from
length += right - left # current union
else: # interval intersects the
length += right - furthest # current union
furthest = right # update
return length
|
8e3fd8440289c36aa540393396bbb409d5c78847
| 225,439 |
def hidden_thankers(num_hidden, all_hidden=False):
"""
Description of hidden thankers.
Parameters
----------
num_hidden : int
Number of hidden users who liked this post.
all_hidden : bool, optional
Indicates if all users who liked this post are hidden.
Returns
-------
str
Information about hidden users who liked this post.
"""
if all_hidden:
if num_hidden == 1:
return "1 user"
return f"{num_hidden} users"
if num_hidden == 1:
return "and 1 other user"
return f"and {num_hidden} other users"
|
d8ad2e8f9f30ac7bb01067758f450b8ce6e3cb86
| 519,747 |
def rotated_binary_search(sorted_collection: list[int], item: int, low: int, high: int) -> int:
"""
:param sorted_collection: the rotated list we would like to find the item within
:param item: the item to be found within the sorted_collection
:param low: for the binary search, the lowest index on the current search - initially 0
:param high: for the binary search, the highest index on the current search - initially the length of sorted_collection - 1
:return: index i that item appears in sequence
Examples:
>>> rotated_binary_search([5, 6, 7, 1, 2, 3], 8, 0, 5)
-1
>>> rotated_binary_search([11, 12, 1, 2, 3, 4, 5, 6], 5, 0, 7)
6
>>> rotated_binary_search([11, 12, 13, 16, 20, 2], 2, 0, 5)
5
>>> rotated_binary_search([20, 21, 1, 2, 3], 20, 0, 4)
0
"""
mid = int((high + low) / 2)
if sorted_collection[mid] == item: # found the item
return mid
if low >= high: # couldn't find the item
return -1
elif sorted_collection[low] < sorted_collection[mid]:
if (item <= sorted_collection[mid]) and (item >= sorted_collection[low]):
return rotated_binary_search(sorted_collection, item, low, mid)
else:
return rotated_binary_search(sorted_collection, item, mid + 1, high)
else:
if (item <= sorted_collection[high]) and (item >= sorted_collection[mid + 1]):
return rotated_binary_search(sorted_collection, item, mid + 1, high)
else:
return rotated_binary_search(sorted_collection, item, low, mid)
|
229a1da0ae096cdd5ad550a2dd1fbf08fdf202d5
| 605,134 |
import re
def longest_matching(lines, regex):
"""Returns the index of the first : in the longest matching line
:param lines: list of strings to find the longest parameter in
:param regex: regex to identify the lines to compare
:returns: The highest index of the first : in the matching lines,
or None if no line matches the given regex
"""
longest = None
for line in lines:
if re.match(regex, line):
index = line.index(":")
if index > longest:
longest = index
return longest
|
009171898a11a8a745a92d4c2cb76a5888046ce1
| 92,752 |
def count_parameters(model) -> int:
"""Count parameters in a torch model.
Parameters
----------
model : torch.nn.module
The model from which you want to count parameters.
Returns
-------
int
Total number of parameters in the model.
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
a3f398bb5969cd4d81c1702089698a2ed9d79d31
| 28,727 |
def fast_forward(lottery_info, top_pick_list,
teams_selected, current_slot):
""" fast_forward automatically advances the site
to the top 4 if all 4 teams are known
@param lottery_info (dict): Dictionary keyed by
reverse standings order, with dictionary
values containing 'name' and 'id' keys
for the team
@param top_pick_list (list): List of key values for
lottery_info that correspond to the teams
that are "skipped" as the back of the
lottery is revealed. This means that team
has a pick in the top 4
@param teams_selected (list): List of key values for
lottery_info that represent the reverse
standings order. This list is filled
in as teams are revealed
@param current_slot (int): Integer corresponding to
the current lottery slot, starting at 14 and
diminishing by 1 as each team is revealed
Returns:
- teams_selected (list): If a team is not in the top 4,
it is added to teams_selected
- current_slot (int): Current slot is advanced to 5
"""
current_slot = 5
for x in range(len(lottery_info), 0, -1):
if x not in teams_selected and x not in top_pick_list:
teams_selected.append(x)
return teams_selected, current_slot
|
dc083e1765f796e9248be305481bd688c833b629
| 403,600 |
def parse_number(f):
"""Parse numbers like 123.456 or 1,234M."""
if f[-1] == 'M':
return int(float(f[:-1].replace(',', '.')) * 1e6)
return int(f.replace('.', ''))
|
9c6152a12b58a898187f28009fa785bf0b16841c
| 596,717 |
def add_area(vector):
"""
Function to add area as a property to a GEE feature collection representing a polygon or multipolygon.
:param vector: GEE feature collection representing a polygon or multipolygon feature.
:return: GEE feature collection of a polygon/multipolygon with an area property for each feature
"""
area = vector.geometry().area(maxError=1)
return vector.set('area', area)
|
f414dd56c375eebaf422f2e02b50135bea13e5f8
| 257,912 |
def remove_error_codes(df, dependent_variable = 'components.cont.conditions.logic.errorCode', skip=True):
"""
Remove errorcode columns from features except the target errorcode column. Usually, this is not necessary.
:param df: dask dataframe
:param dependent_variable: target errorcode column (String)
:param skip: skip function (Binary)
:return: dask dataframe
"""
if not skip:
errorCode_columns = [col for col in df.columns if 'errorCode' in col]
errorCode_columns.remove(dependent_variable)
df = df.drop(columns=errorCode_columns)
return df
|
8384b0c76205840e5220e5abd2cee70dbbcae647
| 491,863 |
def cif2float(cifnum):
"""
Convert a cif-floating point number that may include an uncertainty
indication to a proper floating point number.
In a .cif file the value "0.4254(4)" is a floating point number where
the digit in brackets gives the uncertainty. To convert this number to
a regular Python floating point number the uncertainty needs to be
eliminated and the resulting string converted.
"""
ii = cifnum.find("(")
if ii >= 0:
pnum = float(cifnum[:ii])
else:
pnum = float(cifnum)
return pnum
|
ecac9f0aeff17da6b38355cf87c9fcc35a6a025f
| 637,649 |
def get_file_name(path):
"""
Extracts the name of the file from the given path
:param path: location of the file in which the name will be extracted from
:return:
"""
split_path = path.split("/")
file_name = split_path[len(split_path) - 1]
return file_name
|
41b3ed5726db86c037b0e7b85a99f6f8cd37a72b
| 550,566 |
def get_living_neighbors(i, j, generation):
"""
returns living neighbors around the cell
"""
living_neighbors = 0 # count for living neighbors
neighbors = [(i-1, j), (i+1, j), (i, j-1), (i, j+1),
(i-1, j+1), (i-1, j-1), (i+1, j+1), (i+1, j-1)]
for k, l in neighbors:
if 0 <= k < len(generation) and 0 <= l < len(generation[0]):
if generation[k][l] == 1:
living_neighbors += 1
return living_neighbors
|
437229b8152c3b2ce5b90ef6ddef83daa5c24a85
| 6,979 |
def calculate(input_):
"""
Given a string with numbers separated by comma like "+1, -2"
calculate the sum of the numbers
"""
return sum([int(x.strip()) for x in input_.split(",") if x])
|
ef25cd0aa02cd0c470bb103ebade09bcf141b70f
| 415,046 |
import math
def get_neuron_distance_periodic(grid_w, grid_h, pre_pos, post_pos):
"""
Compute distance between two neurons on a grid with periodic boundaries
:param grid_w: width of grid
:param grid_h: height of grid
:param pre_pos: presynaptic neuron position
:param post_pos: postsynaptic neuron position
:return: Euclidean distance
"""
x1, y1 = pre_pos
x2, y2 = post_pos
delta_x = abs(x1 - x2)
delta_y = abs(y1 - y2)
return math.sqrt(math.pow(min(delta_x, grid_w - delta_x), 2) +
math.pow(min(delta_y, grid_h - delta_y), 2))
|
46d2ec2568f912d7afac650c2fa7d3164be3e063
| 486,497 |
def qs(q):
"""
Helper method for quantile list.
Calculates quantile steps for q number of quantiles.
Parameters:
--------------
q: int, number of quantiles
Returns:
--------------
quantile_list: list of quantile steps
"""
step = 100 / q
quantile_list = []
for i in range(1,q+1):
quantile_list.append(i * step / 100)
return quantile_list
|
5ea4791636d6cb36ad4a924b7d66b0aba8378d8d
| 335,092 |
def dict_to_yaml_list(pdict):
"""Converts a python dict into a yaml list (volumes, configs etc)
:pdict: python dict
:return: list of a yaml containing colon separated entries
"""
return [f'{k}:{v}' for (k, v) in pdict.items()]
|
43a6a9b5b42df64c5a9217a629a98af98b2ff371
| 490,486 |
def user_required(handler):
"""
Decorator that checks if there's a user associated with the current session.
Will also fail if there's no session present.
"""
def check_login(self, *args, **kwargs):
auth = self.auth
if not auth.get_user_by_session():
self.redirect(self.uri_for('login'), abort=True)
else:
return handler(self, *args, **kwargs)
return check_login
|
a748f7944043b2ced96927e8231e52ccb627f507
| 482,436 |
def on_segment(p, q, r):
"""Given three colinear points p, q, r, the function checks if point q
lies on line segment 'pr'."""
if (q.x <= max(p.x, r.x)) and (q.x >= min(p.x, r.x)):
if (q.y <= max(p.y, r.y)) and (q.y >= min(p.y, r.y)):
return True
return False
|
b76e319669a76c7a7a314c69649b3f2c556a5815
| 521,343 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.