content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def layer_type(flags):
"""
Returns the layer type from the feature classification flag
0 = invalid (bad or missing data)
1 = "clear air"
2 = cloud
3 = aerosol
4 = stratospheric feature
5 = surface
6 = subsurface
7 = no signal (totally attenuated)
"""
# type flag : bits 1 to 3
return flags & 7
|
01924018d5d4524c61ba02351ce20c9f91799d8b
| 291,867 |
def get_image_data(image):
"""Return a list of pixels from image
"""
return list(image.getdata())
|
115e09543995d9cd17458cfadf6d2335f610e3d8
| 332,879 |
def mirror(f):
"""
Creates a new interpolating function by taking a interpolating function, flipping it and placing the reflected
version beside itself. This does not just flips the function, rather combines the original and the flipped version.
.. plot::
import animator as am
x = np.linspace(0, 1, 100)
plt.plot(x, np.vectorize(am.interp.cubic_inout)(x), label="original")
plt.plot(x, np.vectorize(am.interp.mirror(am.interp.cubic_inout))(x), label="mirrored")
plt.legend()
plt.tight_layout()
plt.show()
:param f:
The function to mirror.
:return:
The mirrored interpolating function.
"""
return lambda t: f(2 * (t if t < .5 else 1 - t))
|
77cd04dc1d088a33f5e1eb972064f139e107017d
| 458,871 |
def left_shift(number, n):
"""
Left shift on 10 base number.
Parameters
----------
number : integer
the number to be shift
n : integer
the number of digit to shift
Returns
-------
shifted number : integer
the number left shifted by n digit
Examples
--------
>>> left_shift(152, 1)
15
>>> left_shift(14589, 3)
14
"""
return number // 10 ** n
|
e1d088fbfc2c64d8a976a15c26ce33b89824ad79
| 683,425 |
def filter_cellular_barcodes_manual(matrix, cell_barcodes):
""" Take take all barcodes that were given as cell barcodes """
barcodes = list(set(matrix.bcs) & set(cell_barcodes))
metrics = {
'filtered_bcs': len(barcodes),
'filtered_bcs_lb': len(barcodes),
'filtered_bcs_ub': len(barcodes),
'max_filtered_bcs': 0,
'filtered_bcs_var': 0,
'filtered_bcs_cv': 0,
}
return barcodes, metrics, None
|
58a07034139fac1d189303203d6e849d5637e3fe
| 696,726 |
def calculateAverage(list):
""" Calculate the average value from a list """
size = len(list)
if size > 0:
sum = 0
for i in range(0, size):
sum += list[i]
return sum / size
return 0
|
cdcb8db816348ec3a2eae04ddc22719dc41a12a4
| 664,279 |
import sympy
def is_quad_residue(n, p):
"""
Returns True if n is a quadratic residue mod p.
"""
return sympy.ntheory.residue_ntheory.is_quad_residue(n, p)
|
e4f9e820c88fc3c696d6157581d1392a30eda449
| 652,412 |
from importlib import import_module
def importFrom(modname, objname, asTuple=False):
"""
Import `modname` and return reference to `objname` within the module.
:param modname: (str) the name of a Python module
:param objname: (str) the name of an object in module `modname`
:param asTuple: (bool) if True a tuple is returned, otherwise just the object
:return: (object or (module, object)) depending on `asTuple`
"""
module = import_module(modname, package=None)
obj = getattr(module, objname)
return ((module, obj) if asTuple else obj)
|
54d93306a76912cb2e57751932731359b10450e3
| 209,845 |
def build_cluster_info(config):
"""
Construct our cluster settings. Dumps global settings config to each cluster
settings, however, does not overwrite local cluster settings. Cluster config
takes precedence over global. Use global config for generic info to be
applied by default.
Note:
However, as in the docstring for :func:`send_to_notifiers`, the local
settings are not used for anything other than user/pass for getting
snapshot information. There's not a need to send notifications to
multiple areas at the time of writing. I doubt this will change in the
future.
Args:
config: global settings config
Returns:
Global settings config, but with each cluster config is updated with
global settings.
"""
global_settings = config['settings']
cluster_settings = config['clusters']
for cluster in cluster_settings:
# Set global as settings if not present
if 'settings' not in cluster:
cluster['settings'] = global_settings
continue
# Only add/update keys not present in cluster
for k, v in global_settings.items():
if k not in cluster['settings']:
cluster['settings'][k] = v
return config
|
02dec2a6ec659c0a579fbc77b5a9677fff9a6871
| 631,216 |
def unique(lst):
"""
Identify the unique elements of a list (the elements that do not appear
anywhere else in the list)
"""
un_lst = []
for i,v in enumerate(lst):
if i == len(lst) - 1:
continue
if v != lst[i-1] and v != lst[i+1]:
un_lst.append(i)
return un_lst
|
1401ffe3f1f899c0fa1227b11222da6b8459222a
| 447,120 |
def _get_title_info(html, key):
"""Pulls title information for the regulation.
Parameters
----------
html : bs4.BeautifulSoupt
The bs4 representation of the site
key : str
The last element of the class name for the div to target
Returns
-------
title_info : str
The relevant title information
"""
div = html.find("div", {"id": f"ContentPlaceHolder1_div{key}"})
return div.text if div else None
|
8ccbdb3dd557588d012911f800ccd1046f055307
| 244,569 |
def get_capillary_diameter(line1, line2):
"""
Defines capillary diameter in pixel length.
line1 = int32 - first point on left edge of capillary
line2 = int32 - second point on right edge of capillary
"""
#express first points on opposite side of capillary as x,z coordinates
L1x,L1y = line1
L2x,L2y = line2
#find straight line distance between points
dist = ((L2x-L1x)**2+(L2y-L1y)**2)**0.5
#Assumption: rotation of image is very small such that the scaled straight
#line distance is very close to true diameter of the capillary
return dist
|
5b11b6025c17b373d2f014dbb0519397561b0f30
| 40,654 |
def getMediaName(prefix, slideNumber, frmt='png'):
"""Returns the relative name of the media file."""
return prefix + '-' + str(slideNumber) + '.' + frmt
|
af54045ef1b4914af903e1c183f2f79e598c7d4b
| 263,576 |
def meter2centimeter(dist):
""" Function that converts m to cm. """
return dist * 100
|
b5db785532e4a677a2726b4502b5192cb73ba7f8
| 590,697 |
def adjust_val_to_360(val):
"""
Take in a single numeric (or null) argument.
Return argument adjusted to be between
0 and 360 degrees.
"""
if not val and (val != 0):
return None
else:
try:
return float(val) % 360
except ValueError:
return val
|
6f40929dc1bb3295736a3b2e198680fcb65b10cd
| 153,800 |
def import_dfg_from_rows(rows, parameters=None):
"""
Import a DFG (along with the start and end activities) from the rows of a .dfg file
Parameters
--------------
rows
Rows the DFG file
parameters
Possible parameters of the algorithm
Returns
--------------
dfg
DFG
start_activities
Start activities
end_activities
End activities
"""
if parameters is None:
parameters = {}
activities = []
start_activities = {}
end_activities = {}
dfg = {}
num_activities = int(rows[0])
i = 1
while i <= num_activities:
activities.append(rows[i].strip())
i = i + 1
num_sa = int(rows[i])
target = i + num_sa
i = i + 1
while i <= target:
act, count = rows[i].strip().split("x")
act = activities[int(act)]
count = int(count)
start_activities[act] = count
i = i + 1
num_ea = int(rows[i])
target = i + num_ea
i = i + 1
while i <= target:
act, count = rows[i].strip().split("x")
act = activities[int(act)]
count = int(count)
end_activities[act] = count
i = i + 1
while i < len(rows):
acts, count = rows[i].strip().split("x")
count = int(count)
a1, a2 = acts.split(">")
a1 = activities[int(a1)]
a2 = activities[int(a2)]
dfg[(a1, a2)] = count
i = i + 1
return dfg, start_activities, end_activities
|
62216287a866ce4426cb1ba6b64ddbeab860d3d0
| 101,274 |
from datetime import datetime
def _get_filename(from_date, to_date, time_format='%Y%m%dT%H%M%SZ'):
"""Generates filename (without file extension) as time interval."""
from_date_str = datetime.strftime(from_date, time_format)
to_date_str = datetime.strftime(to_date, time_format)
return f'{from_date_str}--{to_date_str}'
|
eaa099dca13b99dd4a33c5286a4a769eb5cea26b
| 334,024 |
def data_to_dict(these_keys, these_values) -> dict:
"""Zip up two lists to make a dict
:param these_keys: keys for new dict
:param these_values: values for new dict
:return: dictionary
"""
return dict(zip(these_keys, these_values))
|
cba0977851d42dbc68911886d638b9382d51d0e5
| 151,395 |
from typing import List
import re
def find_urls(string: str) -> List[str]:
"""
Search for URL inside given `string` and return list of found urls.
"""
regex = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
url = re.findall(regex, string)
return [x[0] for x in url]
|
b2ee0a88984c9fe12f6207ad62c4dddb23182e7d
| 515,245 |
def pytest_make_parametrize_id(config, val, argname):
"""
Prettify output for parametrized tests
"""
if isinstance(val, dict):
return "{}({})".format(
argname, ", ".join("{}={}".format(k, v) for k, v in val.items())
)
|
cca37b4365a9144eec1e077e86b555eb36dfc019
| 356,092 |
def _compressKerningPhase1(kerning):
"""
>>> kerning = {
... ("A", "A") : 100,
... ("A", "Aacute") : 100,
... ("Aacute", "A") : 100,
... ("A", "Agrave") : 200,
... ("Agrave", "A") : 200,
... ("A", "Adieresis") : 300,
... }
>>> expected = {
... ("A", 100) : set(["A", "Aacute"]),
... ("Aacute", 100) : set(["A"]),
... ("A", 200) : set(["Agrave"]),
... ("Agrave", 200) : set(["A"]),
... ("A", 300) : set(["Adieresis"]),
... }
>>> result = _compressKerningPhase1(kerning)
>>> result == expected
True
"""
# create a dict of form {(glyph1, value) : set(glyph2s)}
compressed = {}
for (glyph1, glyph2), value in kerning.items():
k = (glyph1, value)
if k not in compressed:
compressed[k] = set()
compressed[k].add(glyph2)
return compressed
|
40006086b3cd00ba96019535bd79e44983bf4b7b
| 556,289 |
def _parse_options_from_cmd_args(args):
"""
Parse out build options from the command line.
Args:
args (Argparse obj): The output of 'ArgumentParser.parse_args()'
Returns:
dict of options.
"""
opts = {}
if args.name is not None:
opts['app_name'] = args.name
if args.entry_point is not None:
opts['entry_point'] = args.entry_point
if args.requirements is not None:
# Check if we're given a file.
if args.requirements.startswith('@'):
opts['requirements_txt'] = args.requirements
else:
# Try and break it apart.
opts['requirements'] = args.requirements.split(',')
if args.ignore is not None:
opts['ignore'] = args.ignore.split(',')
if args.leave_pyc:
opts['clean_pyc'] = False
if args.python_shebang:
opts['python_shebang'] = args.python_shebang
return opts
|
d421066945a2eee174f9a18e80a0697d33175d41
| 633,442 |
import asyncio
def spin(task):
"""Run a task asynchronously in the event loop, shortcut"""
return asyncio.get_event_loop().create_task(task)
|
a54ce103cd0eeac5a859ce3ec384c94d63aebbd6
| 499,503 |
def hex2rgb(hex_str: str) -> tuple:
"""
Converts HEX format to RGB
:param hex_str:
:return: tuple(r, g, b)
"""
hex_str = hex_str.lstrip('#')
return tuple(int(hex_str[i:i + 2], 16) for i in (0, 2, 4))
|
d9c08c8d2bbf940df9dcc9e8fa8e7f74d5827235
| 404,827 |
from typing import List
from typing import Any
from typing import Optional
def get_(arr: List[Any], value: Any, key: str) -> Optional[Any]:
"""Get object with requested attrubute value
from list
:param arr: List to search elements in
:param value: Value to search
:param key: Key to search value of
:return: First object if exists, else None
"""
try:
search_results = [obj for obj in arr if obj[key] == value]
except KeyError:
raise KeyError('Key "%s" does not exist in list element(s)' % key)
if search_results:
return list(search_results)[0]
return None
|
9c8816a385a2cdbcdb7b0918bcb8cbb4590e9220
| 490,216 |
def _parse_selections(dpkgselection):
"""
Parses the format from ``dpkg --get-selections`` and return a format that
pkg.get_selections and pkg.set_selections work with.
"""
ret = {}
if isinstance(dpkgselection, str):
dpkgselection = dpkgselection.split("\n")
for line in dpkgselection:
if line:
_pkg, _state = line.split()
if _state in ret:
ret[_state].append(_pkg)
else:
ret[_state] = [_pkg]
return ret
|
a2ef463626e80f5dad7b3a81d634c07e59a51930
| 376,003 |
import re
def is_time(time_str):
"""Checks if the supplied string is formatted as a time value for Pymesync
A string is formatted correctly if it matches the pattern
<value>h<value>m
where the first value is the number of hours and the second is the number
of minutes.
"""
return True if re.match(r"\A[\d]+h[\d]+m\Z", time_str) else False
|
0ed77688e9e84f0780eb30eb4f422bdf003f4db8
| 201,317 |
from typing import List
def ensure_substring_free(strings: List[str]) -> List[str]:
"""
Remove strings that are substrings of some other strings in the given list
"""
strings = list(set(strings))
substrings = set()
for i in range(len(strings)):
for j in range(len(strings)):
if i == j:
continue
if strings[i] in strings[j]:
substrings.add(i)
return [string for i, string in enumerate(strings) if i not in substrings]
|
47db828a34815d622ba74b4de23a6303e9aae4c9
| 319,951 |
def cs_water_T(T):
"""
Calculates chemical shift of water based on Temparature.
"""
# according to Gottlieb, H. E., Kotlyar, V. & Nudelman, A.
# NMR Chemical Shifts of Common Laboratory Solvents as Trace Impurities.
# J. Org. Chem. 62, 7512-7515 (1997).
cs = 5.060 - 0.0122*T + (2.11e-5)*T**2
return(cs)
|
a93aa1334c69b1bf042b5fdda9fdfb045b073974
| 209,927 |
import requests
import copy
def get_dimensions_citations_web(headers, altmetric_id):
"""
Use the dimensions web URL and requests API to obtain the citations
for a given altmetric id associated with a scholarly article
Parameters
----------
arg1 | headers: str
The login headers from dimensions to query
arg2 | altmetric_id: int
The altmetric id of a scholarly article
Returns
-------
Dictionary
dict
"""
# create the query
query = """search publications where altmetric_id in""" + \
str([altmetric_id]) + \
"""return publications[id+doi+altmetric_id+title+times_cited+authors]"""
# Execute DSL query.
resp = requests.post(
'https://app.dimensions.ai/api/dsl.json',
data=query.encode(),
headers=headers)
# check for 200 status
if resp.status_code == 200 and 'publications' in resp.json() \
and len(resp.json()['publications']) > 0:
# just obtain the first author
response = copy.deepcopy(resp.json()['publications'][0])
if 'authors' in response.keys():
# set the first name
response['first_name'] = response['authors'][0]['first_name'] + \
' ' + response['authors'][0]['last_name']
# remove the authors key
del response['authors']
# return the final dict
return response
else:
# return the json
return dict()
|
b31a8ef784f9f5bc014085d735ce36cc5a351976
| 127,961 |
def compute_tower_height(TS, N_stages: int, top=True, bot=True):
"""
Return the height of a tower [H; in meter].
Parameters
----------
TS : float
Tray spacing [mm].
N_stages : float
Number of stages.
Notes
-----
The tower height is given by [3]_. See source code for details.
"""
# 3 m bottoms surge capacity, 1.25 m above top tray to remove entrained liquid
H = TS*N_stages/1000
if top:
H += 1.2672
if bot:
H += 3
return H
|
a151c3002fc31a95aa508e9d5feb18b9bee1eccc
| 337,316 |
def norm1(m):
"""
Return the L1-norm of the point m
"""
s = 0.0
for (name, value) in m.items():
s += abs(value)
return s
|
a6a74882cc86a8ca1b1a0e6dfbcdf5b9c3b11569
| 76,471 |
from typing import List
import logging
def cols_to_keep(numeric_cols: List, encoder_cols: List):
"""
Combines numerical columns with encoder columns and subtract not needed columns (Churn and CLIENTNUM)
input:
numeric_cols: list of dataframe numeric columns
encoder_cols: list of encoded categorical columns
output:
keep_cols: list of columns to keep for training X array
"""
keep_cols = list(set(numeric_cols + encoder_cols) - {"Churn"} - {"CLIENTNUM"})
logging.info("Defined columns to keep for X train dataframe")
return keep_cols
|
326a1603e8da20c413cedf3a97495921fda014ab
| 322,087 |
def calc_company_check_digit(number):
"""Calculate the check digit for the 10-digit ИНН for organisations."""
weights = (2, 4, 10, 3, 5, 9, 4, 6, 8)
return str(sum(weights[i] * int(n)
for i, n in enumerate(number[:9])) % 11 % 10)
|
33c1ca3209fbe819dcbbd80a7b33df1aa6051216
| 50,020 |
def local_name(name):
"""Return a name with all namespace prefixes stripped."""
return name[name.rindex("::") + 2 :] if "::" in name else name
|
b0f5370fe19517c7f323975019d9db664c042c1d
| 404,441 |
import sympy
def _validate_expression(expression):
"""Check to see that an expression is a valid sympy expression"""
if expression is None or isinstance(expression, sympy.Expr):
pass
elif isinstance(expression, str):
expression = sympy.sympify(expression)
else:
raise ValueError("Please enter a string, sympy expression, "
"or None for expression")
return expression
|
9b5b317e3d2816502ce9ed617f91bc41c4f68aa3
| 348,430 |
def format_remap(remap):
"""Format remap value for DT.
Args:
remap: Remap definition.
Returns:
DT remap definition.
"""
if remap == 0 or remap is None:
return "NO_REMAP"
else:
return remap
|
bc9e72d6165f438ef8c387aeaa078900cf2cd76f
| 530,186 |
def PySequence_Concat(space, w_o1, w_o2):
"""Return the concatenation of o1 and o2 on success, and NULL on failure.
This is the equivalent of the Python expression o1 + o2."""
return space.add(w_o1, w_o2)
|
0e36fefce384de1c11ea91aff72df364af5b86b4
| 196,076 |
def euclidean_gcd_recursive(first, second):
"""
Calculates GCD of two numbers using the recursive Euclidean Algorithm
:param first: First number
:param second: Second number
"""
if not second:
return first
return euclidean_gcd_recursive(second, first % second)
|
c846cdf18b524fa632a18ef6edc24f5d29dbbabd
| 549,634 |
def int_to_bin_string(x, bits_for_element):
"""
Convert an integer to a binary string and put initial padding
to make it long bits_for_element
x: integer
bit_for_element: bit length of machine words
Returns:
string
"""
encoded_text = "{0:b}".format(x)
len_bin = len(encoded_text)
if len_bin < bits_for_element: #aggiungo gli 0 iniziali che perdo trasformando in int
encoded_text = "0"*(bits_for_element-len_bin)+encoded_text
return encoded_text
|
e8590223d0581985f7dcd256fa0290878ba95f68
| 668,157 |
def make_strategy_id(strategy):
"""
Return a string that will identify a strategy - just concatenate
the numbers of transfers per gameweek.
"""
strat_id = ",".join([str(nt) for nt in strategy[0].values()])
return strat_id
|
50dea9aa8724a974ba22790e191c88d9eb6e0bae
| 626,760 |
def ConvertImageVersionToNamespacePrefix(image_version):
"""Converts an image version string to a kubernetes namespace string."""
return image_version.replace('.', '-')
|
564fa4f34ad9d24554828c87c9be35efa85ea4b8
| 199,993 |
from collections import OrderedDict
def _template(inlist):
"""
Return an OrderedDict with templates
>>> list(_template(['MNI152NLin2009c']).keys())
['MNI152NLin2009c']
>>> _template(['MNI152NLin2009c', 'MNI152NLin2009c:res-2'])
OrderedDict([('MNI152NLin2009c', {})])
>>> _template(['MNI152NLin2009c', 'MNI152NLin2009c:res-2',
... 'MNI152NLin6Asym:res-2', 'MNI152NLin6Asym'])
OrderedDict([('MNI152NLin2009c', {}), ('MNI152NLin6Asym', {'res': '2'})])
"""
if isinstance(inlist, str):
inlist = [inlist]
templates = []
for item in reversed(inlist):
item = item.split(':')
tpl_arg = (item[0], {})
for i in item[1:]:
modifier = i.split('-', 1)
tpl_arg[1][modifier[0]] = modifier[1] if len(modifier) == 2 else None
templates.append(tpl_arg)
return OrderedDict(reversed(OrderedDict(templates).items()))
|
493e20f03391c8f3fee722d803f9335c7bcf2c9b
| 275,360 |
def graph_labeling_to_list(t, keys):
"""Creates a list that contains the tree's labels (according to the pre-order traversal).
Args:
t: a dictionary that contains the nodes of a labeled tree
keys: sorted keys of the given t
Returns:
list: contains the tree's labels
"""
label_list = []
for i in keys:
label_list.append(t[i].label)
return tuple(label_list)
|
bd2357a1c9dced894529d1d953124a62fc2af70c
| 526,144 |
def get_arrival(event, pickid):
"""Find the arrival object in a Catalog Event corresponding to input pick id.
Args:
event (Event): Obspy Catalog Event object.
pickid (str): Pick ID string.
Returns:
Arrival: Obspy Catalog arrival object.
"""
for origin in event.origins:
idlist = [arr.pick_id for arr in origin.arrivals]
if pickid not in idlist:
continue
idx = idlist.index(pickid)
arrival = origin.arrivals[idx]
return arrival
if pickid is None:
return None
|
c4f8e47b0b6483fc372f374b68984f433304875b
| 60,230 |
def list_to_map(item_list, key_name):
"""
Given a list of dicts/objects return a dict mapping item[key_name] -> item
:param item_list:
:param key_name:
:return:
"""
return {x.pop(key_name): x for x in item_list}
|
bca888c0d3c0a62fb591de9c110f18857adbf268
| 406,353 |
def num_desc_prime_seq_given_total_and_head(total, head, list_of_primes, set_of_primes):
"""
Subproblem in dynamic programming.
Using a pre-computed list & set of primes, count the number of descending prime sequences given a total and the head.
Note that a one-term sequence is also considered a sequence.
"""
# sanity check
assert head in set_of_primes, f"total: {total}, head: {head}"
assert total >= head, f"total: {total}, head: {head}"
# base case: sequence has only one term
if total == head:
return 1
# recursive case: sequence has more than one term
# the second term cannot exceed the head; take advantage of transitivity
num_seq = 0
for _second in list_of_primes:
if _second > head or _second > total - head:
break
else:
num_seq += num_desc_prime_seq_given_total_and_head(
total - head, _second, list_of_primes, set_of_primes
)
return num_seq
|
131cb6334c798d7384a81f2c96d1c08f425046b7
| 370,635 |
def trim(string, length=70): # type: (str, int) -> str
"""Trim a string to the given length."""
return (string[:length - 1] + '...') if len(string) > length else string
|
a1c5639c954825b0552d572c8b807c39c61ea525
| 321,820 |
def check_continue_and_end(action):
"""
Checks if an action should continue, and if it should not, ends the action
action:
The action to check
return:
False when the action is over
True when the action should continue
"""
if action.is_over():
action.end()
return False
else:
return True
|
d2d24b71543e81eaf37eb085be7d88285d055056
| 196,481 |
async def anext(iterable):
"""Return the next item from an async iterator."""
return await iterable.__anext__()
|
8cb73b7326a9d32160d6dbaab5c5500cf4aa15d4
| 141,754 |
def RGBStringToTuple(rgb_str, make7bit = True):
"""Takes a color string of format #ffffff and returns an RGB tuple.
By default the values of the tuple are converted to 7-bits.
Pass False as the second parameter for 8-bits."""
rgb_tuple = (0, 0, 0)
if (len(rgb_str) >= 7) and (rgb_str[0] == "#"):
red = int(rgb_str[1:3], 16)
green = int(rgb_str[3:5], 16)
blue = int(rgb_str[5:7], 16)
if make7bit:
red = red // 2
green = green // 2
blue = blue // 2
rgb_tuple = (red, green, blue)
return rgb_tuple
|
345c3dd1d6b89943497cd79950c81ad5441292f2
| 217,582 |
def clamp(val, minVal, maxVal):
"""
Clamp a value between minVal and maxVal
:param val: Incoming value
:param minVal: bottom of range
:param maxVal: top of range
:return: clamped value
"""
return max(minVal, min(val, maxVal))
|
d6cc80b790558f75bf15c9cc67b11dc9edc9d145
| 569,244 |
def remove_unwanted_texts(txt):
"""
Removed messeges are of the type:
'<media omitted>': Used by whatsapp to tell where a file was uploaded
"(file attached)": Also Used by whatsapp to tell where a file was uploaded
"this message was deleted": Used by whatsapp to tell where other users deleted a message
"you message was deleted": Used by whatsapp to tell where user deleted a message
"""
if (txt != '<media omitted>') & (txt.find("(file attached)")==-1) & (txt !="this message was deleted") & (txt!="you deleted this message"):
return 1
else:
return 0
|
62abc7a4a09002117d5d994960e288dce5297588
| 245,246 |
def get_output_filename_value(ctx, output_attr_name, default_filename):
"""Returns the filename for an output attribute within the context.
If the output_attr_name is defined in the context, the function returns
the value of the output_attr_name. Otherwise, the function returns the
default_filename.
Args:
ctx: The current rule's context object.
output_attr_name: The name of the output attribute.
default_filename: The default filename.
Returns:
The filename for an output attribute within the context.
"""
attribute_value = getattr(ctx.attr, output_attr_name)
if attribute_value:
return attribute_value.name
else:
return default_filename
|
10d680c2b9dc0abbfc9583204a87a31c6ddc4ccf
| 353,495 |
def subset_of_df(df, row_names=None, col_names=None):
"""
:param df: pd.DataFrame; DataFrame to be subsetted
:param row_names: list(); List of the rows to include in subset
:param col_names: col_names; list(); List of the columns to include in subset
:return: df; pd.DataFrame: Subset of original DataFrame
"""
subset_df = df[col_names].loc[row_names]
return subset_df
|
d76f90147a1945f6146b98a3aaa55e8aaa558a99
| 295,188 |
def is_ptr_variable_type(type):
"""
Determines whether or not the specified type is a pointer variable type.
"""
return type.isVar() and type.isPtr()
|
5aa2ee46fb5d232920034aa9d0616530728f834c
| 86,644 |
def area_filter_gdf(pred_gdf, min_area=2000):
"""Clean polygon geodataframe so outputs greater than or equal to minimum threshold
Args:
pred_gdf (geopandas.GeoDataFrame): Geodataframe with polygon geometries in geometry column
min_area (int): Area in metres squared below which polygons will be dropped. Defaults to 5000.
Returns:
geopandas.GeoDataFrame: Geodataframe of just polygons above minimum size
"""
if pred_gdf.crs.to_string() == "EPSG:4326":
print("prediction image not in projected crs, cannot filter by area")
return pred_gdf
else:
pred_gdf["area"] = pred_gdf["geometry"].area
pred_gdf = pred_gdf[pred_gdf["area"] >= min_area]
return pred_gdf
|
89ed920a0ac3ece5f5ea00b481e7f942ebbcc2bf
| 251,923 |
import typing
def rinex_version(s: str) -> typing.Tuple[typing.Union[float, str], bool]:
"""
Parameters
----------
s : str
first line of RINEX/CRINEX file
Results
-------
version : float
RINEX file version
is_crinex : bool
is it a Compressed RINEX CRINEX Hatanaka file
"""
if not isinstance(s, str):
raise TypeError('need first line of RINEX file as string')
if len(s) < 2:
raise ValueError(f'first line of file is corrupted {s}')
if len(s) >= 80:
if s[60:80] not in ('RINEX VERSION / TYPE', 'CRINEX VERS / TYPE'):
raise ValueError('The first line of the RINEX file header is corrupted.')
# %% .sp3 file
if s[0] == '#':
if s[1] != 'c':
raise ValueError('Georinex only handles version C of SP3 files.')
return 'sp3' + s[1], False
# %% typical RINEX files
try:
vers = float(s[:9]) # %9.2f
except ValueError as err:
raise ValueError(f'Could not determine file version from {s[:9]} {err}')
is_crinex = s[20:40] == 'COMPACT RINEX FORMAT'
return vers, is_crinex
|
e47ac25e35d97e68c1dff4448a836e52a09f3e91
| 526,249 |
def calculate_diff(user, ledgers, split_pool):
""" Calculate profit/loss for given user and ledgers """
win_amount = ledgers['winLedger'].get(user, 0)
deposited_bets = ledgers['depositedBets'].get(user, 0)
split_liquidity = ledgers['splitLedger'].get(user, 0)
share = ledgers['liquidityShares'].get(user, 0)
total_shares = sum(ledgers['liquidityShares'].values())
bets_diff = win_amount - deposited_bets
split_share = (0 if total_shares == 0 else
split_pool * share / total_shares)
liquidity_diff = split_share - split_liquidity
return int(bets_diff + liquidity_diff)
|
596f3ab4a832f66ee5fcf3528dd017aef18c9cd7
| 546,012 |
def _get_unique_child(xtag, eltname):
"""Get the unique child element under xtag with name eltname"""
try:
results = xtag.findall(eltname)
if len(results) > 1:
raise Exception("Multiple elements found where 0/1 expected")
elif len(results) == 1:
return results[0]
else:
return None
except Exception:
return None
|
9f9de45c3d1bcfbfb49b2dc011cdf61bc7d38f69
| 525,094 |
import json
def filter_xiaomi_aqara_battery_low(topic, message):
"""Ignore messages from Xiaomi Aqara when the battery is OK."""
data = json.loads(message)
if "battery" in data:
return int(data["battery"]) > 20
return True
|
f42c57c5f33bafa7af2618590e4fffb6b3c23b75
| 547,865 |
def is_inferior_running(inferior):
"""Check if the inferior is a running process
For the purposes of this check, running does not necessarily mean currently
executing, but rather that there is a running process on the system for the
inferior. If this function is being run in the first place then the inferior
is more than likely stopped by GDB anyway, unless this function were called
from a background thread.
"""
for thread in inferior.threads():
if thread.is_running():
return True
return False
|
ea97fa4d63e4e75d7b6cf1dab8c4f9df59a2806b
| 570,950 |
def clip(value, minimum=-float("inf"), maximum=float("inf")):
"""Clips a value to a certain range
Arguments:
value {float} -- Value to clip
Keyword Arguments:
minimum {float} -- Minimum value output can take
(default: {-float("inf")})
maximum {float} -- Maximum value output can take
(default: {float("inf")})
Returns:
float -- clipped value
"""
if value < minimum:
return minimum
if value > maximum:
return maximum
return value
|
db88f1150b355e920f24ee12b98a95b4fafc2260
| 381,639 |
def need_to_rotate_log(min_size, max_size, max_time_interval, log_size, time_interval):
"""Check if log match criteria for rotation"""
return log_size >= max_size or (time_interval == max_time_interval and log_size >= min_size)
|
9aed588ea3eb198be58c46a8f25f666412d44440
| 298,575 |
import inspect
def get_mismatching_default_values(f1, f2, mapping=None):
"""Check that two functions have the same default values for shared parameters."""
# Additional mappings from f1 parameters to f2 parameters may be provided
if mapping is None:
mapping = {}
params1 = inspect.signature(f1).parameters
params2 = inspect.signature(f2).parameters
mismatch = []
for f1_param_name in params1:
# If the param is named differently in f2, rename
f2_param_name = mapping[f1_param_name] if f1_param_name in mapping else f1_param_name
# If the parameter does not appear in the signature of f2, there"s
# nothing to do
if f2_param_name not in params2:
continue
val1 = params1[f1_param_name].default
val2 = params2[f2_param_name].default
if val1 != val2:
mismatch.append((f1_param_name, val1, f2_param_name, val2))
return mismatch
|
4085c3ae14209d17b4245abf0ee24db81bbc63cc
| 55,467 |
def contain(x1, y1, w1, h1, x2, y2, w2, h2):
"""
check if the first rectangle fully contains the second one
"""
return x1 <= x2 and y1 <= y2 and x2 + w2 <= x1 + w1 and y2 + h2 <= y1 + h1
|
1d1baa1569cd42e72a819d9fa2029b5339952e10
| 277,545 |
def enforce_file_extension(file, extension):
"""
Returns the given string (file name or full path) with the given extension.
string had no file extension .extension will be appended. If it had another
extension it is changed to the given extension. If it had the given
extension already the unchanged string is returned.
Parameters
----------
file: File name or full path. Can include a file extension .*
extension: File extension the given file name will be returned with.
Returns
-------
The given file with the given extension.
"""
if not extension.startswith('.'):
extension = '.' + extension
split_str = file.split('.', 1)
if (len(split_str) != 2) or (split_str[-1] != extension[1:]):
file = split_str[0] + extension
return file
|
021d69bcc1d4cf3f3fc500e9c8418c60b8c99d9f
| 41,947 |
import re
def extract_floats(string):
"""Extract all real numbers from the string into a list (used to parse the CMI gateway's cgi output)."""
return [float(t) for t in re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', string)]
|
0dc26261d45bd0974e925df5ed660a6e31adf30c
| 703,957 |
def run_trial(exp, info):
"""Execute a block."""
exp.s.pattern.pos = exp.p.stim_pos[int(info.stim_pos)]
block_dur = exp.p.block_dur
update_hz = exp.p.update_hz
for i in range(block_dur * update_hz):
exp.s.pattern.randomize_phases(limits=(.2, .8))
end = info["block_time"] + (i + 1) * (1 / update_hz)
if not i:
info["block_onset"] = exp.clock.getTime()
exp.wait_until(end, draw=["pattern", "fix"])
exp.check_fixation(allow_blinks=True)
exp.check_abort()
return info
|
4a1dc403985fa529a62fec432798e0698882d723
| 445,570 |
def split_operations_by_type(operations: list) -> tuple:
"""
Split input operations into sub-lists of each transformation type
the normalization operation is placed last to apply correctly the other operations
:param operations: list of pipeline operations
:return: tuple of lists of the operations separated into color, geometry and independent
"""
color, geometry, independent = [], [], []
normalize = None
for op in operations:
if op.get_op_type() == 'color':
color.append(op)
elif op.get_op_type() == 'geometry':
geometry.append(op)
elif op.get_op_type() == 'normalize':
normalize = op
else:
independent.append(op)
if normalize is not None:
color.append(normalize) # normalization must be last color operation
return color, geometry, independent
|
6db2b3b348df4c44afeea5be0edd1ad2ceab9d14
| 309,842 |
from datetime import datetime
import pytz
def nowtz() -> datetime:
"""Fetch Datetime now object in UTC.
Returns:
Datetime object
"""
return datetime.now(tz=pytz.utc)
|
394e6786a23ddf908dc182daf6a55572b9246920
| 196,505 |
def _func_destroy_scheduler_session(sessionId, dask_scheduler):
"""
Remove session date from _raft_comm_state, associated with sessionId
Parameters
----------
sessionId : session Id to be destroyed.
dask_scheduler : dask_scheduler object
(Note: this is supplied by DASK, not the client)
"""
if sessionId is not None and sessionId in dask_scheduler._raft_comm_state:
del dask_scheduler._raft_comm_state[sessionId]
else:
return 1
return 0
|
b819ea3240ca6dc322546027128c65d38df92967
| 95,501 |
def IssueIsInHotlist(hotlist, issue_id):
"""Returns T/F if the issue is in the hotlist."""
return any(issue_id == hotlist_issue.issue_id
for hotlist_issue in hotlist.items)
|
31f48cb2c4eeddc450239edcf8d293e554cba265
| 674,956 |
def key_func(x):
"""
Used to group data. Takes in a tuple and returns the first value. This is the key that
is used to group on (in this case a specific word).
"""
return x[0]
|
3897807a77f7c6432def1f78eb07a2107554c036
| 556,987 |
import json
def parse_message(msg):
"""
Load JSON data from Zabbix {ALERT.MESSAGE}
:param msg: JSON as string
:return: JSON obj
"""
res = json.loads(str(msg))
return res
|
61f77b2007dc960c67190cdfc8afc076977dd2c9
| 407,463 |
import random
import string
def rnd_string(n=10):
"""Generate a random string."""
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(n))
|
1869afa5e950c24d75a446c54b9f53abd942c007
| 696,291 |
def get_filenames_from_urls(urls):
""" Returns a list of filenames from a list of urls"""
return [url.split("/")[-1] for url in urls]
|
ca02bcb0efe88beb008a45c572b276cd9bd4ae9f
| 593,905 |
def calculatePostIapAggregateInterference(q_p, num_sas, iap_interfs):
"""Computes post IAP allowed aggregate interference.
Routine to calculate aggregate interference from all the CBSDs managed by the
SAS at protected entity.
Args:
q_p: Pre IAP threshold value for protection type (mW)
num_sas: Number of SASs in the peer group
iap_interfs: A list of tuple
(latitude, longitude, asas_interference, agg_interference) where:
asas_interference: a list of interference (per channel) for managing SAS.
agg_interference: a list of total interference (per channel).
The interference is in mW/IAPBW.
Returns:
ap_iap_ref: The post-IAP allowed interference, as a dict formatted as:
{latitude : {longitude : [interference(mW/IAPBW), .., interference(mW/IAPBW)]}}
where the list holds all values per channel of that protection point.
"""
if not isinstance(iap_interfs, list):
iap_interfs = [iap_interfs]
ap_iap_ref = {}
for lat, lon, asas_interfs, agg_interfs in iap_interfs:
if lat not in ap_iap_ref: ap_iap_ref[lat] = {}
ap_iap_ref[lat][lon] = [
(float(q_p - aggr_interf) / num_sas + asas_interf)
for asas_interf, aggr_interf in zip(asas_interfs, agg_interfs)]
return ap_iap_ref
|
269aa10ff5a4a22193525698248ab56e59759fb6
| 668,103 |
def key_exists(dic, key):
"""Check if key exists in dictionary and return Boolean.
Args:
d (dict): Dictionary which might contain key.
key (Any): Identifier.
Returns:
Boolean : Whether dictionary contains key.
"""
if key in dic:
return True
else:
return False
|
0c59fdd3d904b7570b03d21c9f551666f4b2c203
| 568,976 |
def bond_length(element1, element2):
"""
Returns approximate bond-length between atoms of element1 and element2.
Bond lengths taken from Handbook of Chemistry and Physics. The
information provided there was very specific, so representative
examples were used to specify the bond lengths. Sitautions could
arise where these lengths would be incorrect, probably slight errors
(<0.06) in the hundreds.
Parameters
----------
element1: string:
Name of first element.
element2: string
Name of second element.
"""
# All distances are in Angstroms. Duplicate pairs not specified. For
# example, to find distance ("H", "C"), the lookup key is ("C", "H")
distances = {
("C", "C"): 1.53,
("N", "N"): 1.425,
("O", "O"): 1.469,
("S", "S"): 2.048,
("SI", "SI"): 2.359,
("C", "H"): 1.059,
("C", "N"): 1.469,
("C", "O"): 1.413,
("C", "S"): 1.819,
("C", "F"): 1.399,
("C", "CL"): 1.790,
("C", "BR"): 1.910,
("C", "I"): 2.162,
("N", "H"): 1.009,
("N", "O"): 1.463,
("N", "BR"): 1.843,
("N", "CL"): 1.743,
("N", "F"): 1.406,
("N", "I"): 2.2,
("O", "S"): 1.577,
("O", "H"): 0.967,
# This one not from source sited above. Not sure where it's from, but
# it wouldn't ever be used in the current context ("AutoGrow")
("S", "H"): 2.025/1.5,
("S", "N"): 1.633,
("S", "BR"): 2.321,
("S", "CL"): 2.283,
("S", "F"): 1.640,
("S", "I"): 2.687,
("P", "BR"): 2.366,
("P", "CL"): 2.008,
("P", "F"): 1.495,
("P", "I"): 2.490,
# estimate based on eye balling Handbook of Chemistry and Physics
("P", "O"): 1.6,
("SI", "BR"): 2.284,
("SI", "CL"): 2.072,
("SI", "F"): 1.636,
("SI", "P"): 2.264,
("SI", "S"): 2.145,
("SI", "C"): 1.888,
("SI", "N"): 1.743,
("SI", "O"): 1.631,
("H", "H"): .7414,
}
if (element1, element2) in distances:
return distances[(element1, element2)]
elif (element2, element1) in distances:
return distances[(element2, element1)]
else:
raise ValueError("Distance between %s and %s is unknown" %
(element1, element2))
|
38dd6a28b92ecc4df844d1a9ad1a5c5cb57c8ac9
| 548,106 |
import json
def load_questions(data_dir, data_type):
"""Load questions information from directory.
Args:
data_dir: str, data directory.
data_type: str, one of 'train', 'val', 'test'
Returns:
questions: list of dictionaries. Each dictionary contains information
about a question, its corresponding image and answer.
"""
filename = data_dir + '/questions/CLEVR_' + data_type + '_questions.json'
with open(filename) as data_file:
return json.load(data_file)['questions']
|
caf1939258e0637d4d366ce8ef7d66d84248ec71
| 102,535 |
def is_iterable(obj):
"""
Returns True if 'obj' is iterable and False otherwise.
We check for __iter__ and for __getitem__ here because an iterable type
must, by definition, define one of these attributes but is not required or
guarenteed to define both of them. For more information on iterable types
see:
http://docs.python.org/2/glossary.html#term-iterable
http://docs.python.org/2/library/functions.html#iter
"""
return hasattr(obj, '__iter__') or hasattr(obj, '__getitem__')
|
b981e2d802a48b1cba626317ed812954de6b2bbf
| 132,910 |
import base64
def decodeDataFromBase64(data):
"""Decode data from base 64
Keyword arguments:
data -- string, Base 64 encoded data
return: (string) Normal data string
"""
return base64.b64decode(data)
|
e94e95b15163a264c3a338f57d6a7cfe60c63f28
| 537,997 |
def sumofDigits(n):
"""calculate the sum of the digits of an input integer"""
assert n >= 0 and int(n) == n, 'The number has to be positive integers only'
if n == 0:
return 0
else:
return int(n%10) + sumofDigits(int(n/10))
|
2c861029ea128e15d5ffbe77de12aed19d8f8ba4
| 386,472 |
from typing import Union
def prepare_raw_state(raw_state: Union[int, str]) -> str:
"""
Antivirus product or firewall product states given by securitycenter2 are numeric
We need to convert them to hexadecimal and pad prefix zeros so we get 0x123456 length value
we can actually use
Formatter string
{ # Format identifier
0: # first parameter
# # use "0x" prefix
0 # fill with zeroes
{1} # to a length of n characters (including 0x), defined by the second parameter
x # hexadecimal number, using lowercase letters for a-f
} # End of format identifier
"""
state = "{0:#0{1}x}".format(int(raw_state), 8)
if len(state) > 8:
raise ValueError("Given state is too long.")
return state
|
ced8aa227b301a28daa2aa57ab318e4089242a98
| 312,188 |
def get_ds_uncommitted(datastore):
"""
Get vsphere datastore uncommitted size
:param datastore: vsphere datastore object
:return: datastore uncommitted size
"""
uncommitted_size = datastore.summary.uncommitted
return uncommitted_size
|
16b7a131867f8e26eeaf86bf0c31fd592641cd0b
| 471,933 |
from pathlib import Path
def hash_from_path(path: Path) -> str:
"""Returns hash from a given path.
Simply removes extension and folder structure leaving the hash.
Args:
path: path to get hash from
Returns:
Hash reference for file.
"""
return path.with_suffix('').name
|
89ab2383f042fb31bd27adc759241b0eae2032a0
| 88,721 |
def from_perfect_answer(text: str) -> str:
"""
Generates GIFT-ready text from a perfect answer.
Parameters
----------
text : str
The phrasing of the answer.
Returns
-------
out: str
GIFT-ready text.
"""
return f'={text}'
|
2ec06f37b4309abcd23e18508a8f4aa17d95fb73
| 362,763 |
def translate_time(x):
"""
translate time from hours to hours and minutes
Parameters
----------
x : float
Something like 6.75
Returns
-------
y : str
Something like '6:45'
"""
y = str(int(x)).zfill(2) + ":" + str(int((x*60) % 60)).zfill(2)
return y
|
ce461402c9149641ba58b1b23822871ba81b1747
| 254,564 |
def filter_on_n_datasets(counts_in_datasets, min_datasets):
""" Given a data frame with columns gene_ID, transcript_ID, dataset,
and count (in that dataset), count the number of datasets that each
transcript appears in. Then, filter the data such that only transcripts
found in at least 'min_datasets' remain. """
cols = ['gene_ID', 'transcript_ID']
dataset_count_df = counts_in_datasets[cols].groupby(cols).size()
dataset_count_df = dataset_count_df.reset_index()
dataset_count_df.columns = cols + ["n_datasets"]
filtered = dataset_count_df.loc[dataset_count_df['n_datasets'] >= min_datasets]
return filtered
|
41c0238559955a39d1d9a3d8577491ed45d9dd0e
| 338,299 |
import traceback
import textwrap
def get_stacktrace(n_skip=1, indent=None):
"""
Return a stacktrace ready for printing; usage:
# Dump the stack of the current location of this line.
'\n'.join(get_stacktrace(0))
logger.info("First line before stacktrace\n{stack}".format(
stack=get_stacktrace())
# Outputs to stdout:
ID=14751/MainProcess @ finish, profilers.py:1658 :: 2019-07-09 15:52:26,015 INFO: First line before stacktrace
File "*.py", line 375, in <module>
...
...
File "*.py", line 1658, in finish
logger.info("First line before stacktrace\n{stack}".format(
:param n_skip:
Number of stack-levels to skip in the caller.
By default we skip the first one, since it's the call to traceback.extrace_stack()
inside the get_stacktrace() function.
If you make n_skip=2, then it will skip you function-call to get_stacktrace() as well.
:return:
"""
stack = traceback.extract_stack()
stack_list = traceback.format_list(stack)
stack_list_keep = stack_list[0:len(stack_list)-n_skip]
stack_str = ''.join(stack_list_keep)
if indent is not None:
stack_str = textwrap.indent(stack_str, prefix=" "*indent)
return stack_str
|
2063cb7527aaffdecd200c9ee82c799c8191c4bb
| 449,269 |
def rectangleSelect(x1, y1, x2, y2, ts):
"""
Returns the coordinates of a rectangle whose edges are snapped to the
divisions between tiles. The returned value is in pixel units in the form
(x, y, w, h)
@type x1: int
@param x1: left x-coordinate in tiles
@type y1: int
@param y1: top y-coordinate in tiles
@type x2: int
@param x2: right x-coordinate in tiles
@type y2: int
@param y2: bottom y-coordinate in tiles
@type ts: int
@param ts: size of tile in pixels
"""
rx = min(x1, x2) * ts
ry = min(y1, y2) * ts
rw = (abs(x2 - x1) + 1) * ts
rh = (abs(y2 - y1) + 1) * ts
return int(rx), int(ry), int(rw), int(rh)
|
c44022a11b72807a55467220ead473dddc488178
| 122,850 |
def lcm(*args):
"""Least commom multiple of a n-uple of integers
>>> lcm(1,2,3)
6
>>> lcm(4,8)
8
>>> lcm(10,4,20)
20
"""
sum = args[0]
while any(map(lambda x: sum % x != 0, args)):
sum += args[0]
return sum
|
78cc00acef553aa5ae1b9e3cbba6fd3298e66c8f
| 326,287 |
def get_all_ips(instance):
""" Returns the public and private ip addresses of an AWS EC2 instances
"""
output = []
output.append(instance.private_ip_address)
output.append(instance.public_ip_address)
return output
|
9426b0682da2ffe3120e6826b1a1d112a6e2d63e
| 508,476 |
from typing import Set
def _negative_to_positive_state_indexes(indexes: Set[int], n_entries) -> Set[int]:
""" Convert negative indexes of an iterable to positive ones
Parameters
----------
indexes: Set[int]
indexes to check and convert
n_entries: int
total number of entries
Returns
-------
new_entries: Set[int]
the positive indexes
"""
new_entries: Set[int] = set()
for _, index in enumerate(indexes):
new_index = index + n_entries if index < 0 else index
if new_index >= n_entries:
err_msg = "State '{0}' exceeds the maximum number of states of '{1}'"
raise ValueError(err_msg.format(index, n_entries))
new_entries.add(new_index)
return new_entries
|
65a66766a1eef881393ee5b89d6785f0ebcab6a5
| 685,916 |
def process_comment(comment, colors):
"""
Helper function that scans a text comment for any mention of a color. Returns a list containing
lists, where each sublist contains comment metadata if a color was in the comment. If no color was in the comment, empty list is returned
"""
color_info = []
for color in colors:
if ' ' + color + ' ' in comment.body.lower():
if color == 'purple':
color_info.append({'comment_id': comment.id,
'created_utc': comment.created_utc,
'color': 'violet',
'subreddit_display_name': comment.subreddit.display_name,
'body': comment.body,
'score': comment.score})
else:
color_info.append({'comment_id': comment.id,
'created_utc': comment.created_utc,
'color': color,
'subreddit_display_name': comment.subreddit.display_name,
'body': comment.body,
'score': comment.score})
return color_info
|
e417adfee5d9df631c8617499c578707a73cabad
| 431,098 |
def get_angle_diff(angle1, angle2):
"""Return the angle, between 0 and 2*pi, between angle1 and angle2.
"""
diff = angle2 - angle1
while diff < -3.1415:
diff += 3.1415*2
while diff > 3.1415:
diff -= 3.1415*2
return abs(diff), diff
|
970d1444380955ca7d8ee546baffa5ed9c06d4e5
| 419,847 |
def is_mnt(path_in: str):
"""Check if the path posix and lives in '/mnt/'."""
return path_in[:5] == "/mnt/"
|
32aa743b43568a0afb45e4e7a4f1382a757bd7e8
| 366,708 |
def int_to_abbrev_str(n: int):
"""Given an integer returns an abbreviated string representing the integer, e.g., '100K' given 100000"""
if n > 0 and n % 10**6 == 0:
return f'{n // 10**6}M'
elif n > 0 and n % 10**3 == 0:
return f'{n // 10**3}K'
else:
return f'{n}'
|
862a5932954426fc0f2f923a721e53834f5067ab
| 599,911 |
def getContour(semitones):
""" Given a list of integers defining the size and direction of a series of musical intervals in semitones,
this function encodes the contour of the melody with Parsons code for musical contour where u=up, d=down, r=repeat.
"""
contour = ''
for p in semitones:
if p == 0:
contour = contour + 'r' # repeated
elif p > 0:
contour = contour + 'u' # up
elif p < 0:
contour = contour + 'd' # down
return contour
|
f0b2bce3fd082b6d368742ac38c1d7377d647894
| 286,472 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.