content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def remove_duplicate_index(df):
"""Remove all entries in a data frame that have a duplicate index.
:param df: DataFrame containing duplicate indicies.
:returns: DataFrame with the duplicates removed
"""
index_name = df.index.name
md = df.reset_index()
md.drop_duplicates(index_name, inplace=True)
md.set_index(index_name, inplace=True)
return md
|
1a22c39b82c91aa00bbb7c7c71e18427cbd3058a
| 296,938 |
def struct_dict(struct):
"""
take a ctypes.Structure and return its field/value pairs
as a dict.
"""
get_pair = lambda field_type: (
field_type[0], getattr(struct, field_type[0]))
return dict(list(map(get_pair, struct._fields_)))
|
dcabfa370af5446255cb71faeccb2f8086b35ff6
| 324,051 |
def _get_c_string(data, position):
"""Decode a BSON 'C' string to python unicode string."""
end = data.index(b"\x00", position)
return data[position:end].decode('utf8'), end + 1
|
6cd01aba9c675cdef0db5df4d523e3fdbf7a2ff6
| 249,232 |
from typing import List
def as_package(names: List[str]) -> str:
"""Joins names as a package name."""
return '.'.join(names)
|
7369de516104d94d11784e1dfef64ca9c86ddb26
| 612,463 |
def option_list(opts):
"""Convert key, value pairs into command-line options.
Parameters
----------
opts : dict-like
Convert a dictionary into command-line options.
Returns
-------
:class:`list`
A list of command-line options.
"""
optlist = []
for key, val in opts.items():
keystr = "--{}".format(key)
if isinstance(val, bool):
if val:
optlist.append(keystr)
else:
optlist.append(keystr)
if isinstance(val, float):
optlist.append("{:.14e}".format(val))
elif isinstance(val, (list, tuple)):
optlist.extend(val)
else:
optlist.append("{}".format(val))
return optlist
|
db4acd20b1509b757776916411609cfa576168ea
| 555,866 |
def list_all_equal(lst):
"""Return true if all elements in the list are equal."""
return len(set(lst)) == 1
|
1c01928305ada17390e9198cb25d8af2e913ad9b
| 437,092 |
import re
def configSingleEntry (lineData, configSection):
""" Searches for section and returns single entry lines after section heading """
sectionContentsString = ""
error='NONE'
sectionfound = 0
for line in lineData:
#print(line)
if sectionfound is 0:
if re.match(configSection, line):
sectionfound = 1
#print(line + "\n")
# section found get single line data
else:
sectionContentsString = line
#print(line)
break
if sectionfound is 0:
error = "Section " + configSection + " not found"
return error, sectionContentsString
|
da5325d6ff525e70a9fa46b7380a2585d831a15e
| 474,887 |
def narrowpeaks_cols() -> list:
"""
Return list of narrowpeak column names
:return: a list of string
"""
return ['chr', 'start', 'stop', 'name', 'score', 'strand',
'signalValue', 'pValue', 'qValue', 'peak']
|
3cd49c738fff19775c0ee934a1a2158dd0b06fce
| 557,953 |
import logging
def collect_element(tree, selector: str, default=""):
""" Selects first element from html tree by selector and returns its text_content(), or default if element not found """
elements = tree.xpath(selector)
if not elements:
logging.info("Collecting isn't possible by selector: %s", selector)
return default
return elements[0].text_content()
|
d055b3c638d5c2982173c04635e8c699a038d200
| 242,818 |
def keep_expired_by(flags, mstone):
"""Filter flags to contain only flags that expire by mstone.
Only flags that either never expire or have an expiration milestone <= mstone
are in the returned list.
>>> keep_expired_by([{'expiry_milestone': 3}], 2)
[]
>>> keep_expired_by([{'expiry_milestone': 3}], 3)
[{'expiry_milestone': 3}]
>>> keep_expired_by([{'expiry_milestone': -1}], 3)
[]
"""
return [f for f in flags if -1 != f['expiry_milestone'] <= mstone]
|
c5c6b7e7a2cc730a77e3928e78977b14d10759b4
| 452,922 |
def argmax(x):
"""
Returns the index of the largest element of the iterable `x`.
If two or more elements equal the maximum value, the index of the first
such element is returned.
>>> argmax([1, 3, 2, 0])
1
>>> argmax(-abs(x) for x in range(-3, 4))
3
"""
argmax_ = None
max_ = None
for (nItem, item) in enumerate(x):
if (argmax_ is None) or (item > max_):
argmax_ = nItem
max_ = item
return argmax_
|
847f7f7a599c0623d8ee6218ea85a8176fa07d79
| 220,926 |
import tempfile
import requests
def file_from_url(url: str):
"""
file_from_url requests a file from an URL.
Raises an exception if the request fails.
Parameters
==========
url : str
The resource URL.
Returns
=======
_TemporaryFileWrapper
Requested file as temporary file handler.
"""
CHUNK_SIZE = 1024*1024
file = tempfile.NamedTemporaryFile()
res = requests.get(url, stream=True)
if not res.ok:
raise Exception(
'request failed with status code {0}'.format(res.status_code))
for chunk in res.iter_content(chunk_size=CHUNK_SIZE):
file.write(chunk)
return file
|
f0ae2801e49604129f359bc8d5d6cbb97d58a887
| 116,395 |
def get_unicode(text: str) -> bytes:
"""Returns the unicode for input text
Parameters
----------
text : str
Text to encode
Returns
-------
bytes
Text with characters encoded in raw unicode.
"""
return text.encode("raw_unicode_escape")
|
701e37c96e9c93921c744a16afae570fc345d02c
| 247,704 |
def fix_initial_data(initial, initial_data_keymap):
"""
Take a dict like this as `initial`:
{ 'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}
and a dict like this as `initial_data_keymap`:
{ 'newkey1': ['key1', 'key2'], 'newkey2': ['key3']}
and remap the initial dict to have this form:
{ 'newkey1': ['val1', 'val2'], 'newkey2': ['val3']}
Used for rearranging initial data in fields to match declared maps.
"""
if initial:
for dest, sources in list(initial_data_keymap.items()):
data = [initial.pop(s, None) for s in sources]
initial[dest] = data
return initial
|
5f72592e460c058c7a3ffee6f1e2d5957c5fe7b1
| 576,708 |
def invcalcbarycentric(pointuv, element_vertices):
"""
Convert barycenteric coordinates into 3d
https://en.wikipedia.org/wiki/Barycentric_coordinate_system
https://math.stackexchange.com/questions/2292895/walking-on-the-surface-of-a-triangular-mesh
:param pointuv: Point in barycenteric coordinates (u, v)
:param element_vertices: Vertices of current element
:return: pointuv in 3d coordinates (x, y, z)
"""
return element_vertices[0] + pointuv[0] * (element_vertices[1] - element_vertices[0]) + pointuv[1] * (element_vertices[2] - element_vertices[0])
|
9aebf9e0579321788b242653a8c51b20dcad2fea
| 80,752 |
def readall(read_fn, sz):
"""Reads `sz` bytes using `read_fn`
Raises `EOFError` if `read_fn` returned the empty byte array while reading
all `sz` bytes.
"""
buff = b''
have = 0
while have < sz:
chunk = read_fn(sz - have)
have += len(chunk)
buff += chunk
if len(chunk) == 0:
raise EOFError
return buff
|
a5f1395dd5306f7e907a598e716090fc3f19681d
| 163,504 |
def get_load_config_timestamp(pefile_object):
"""
Retrieves the timestamp from the Load Configuration directory.
:param pefile.PE pefile_object: pefile object.
:return: Recovered timestamps from PE load config (if any).
None if there aren't.
:rtype: int
"""
timestamp = 0
if hasattr(pefile_object, 'DIRECTORY_ENTRY_LOAD_CONFIG'):
loadconfigdata = pefile_object.DIRECTORY_ENTRY_LOAD_CONFIG
timestamp = getattr(loadconfigdata.struct, 'TimeDateStamp', 0)
return timestamp
|
4b04afa7d844ce05761fa5b8f484540a1ae243a4
| 21,184 |
def append_default_extension(filename, default_extension='.png'):
"""If a filename has no extension yet, add the default extension to it"""
if '.' in filename:
return filename
else:
return filename + default_extension
|
70181b03aba9ec73836d6e33784b2123ade67406
| 118,006 |
def hexify(s):
"""
Used to convert message to hex integers when encrypting.
:param s: a string.
:return: a string like '0x..', decode each character as a ascii hex number.
"""
if not s: # if input is null, evcrypt the message: '!'
return '0x21' # ascii code for '!'
lst = []
for ch in s:
hv = hex(ord(ch)).replace('0x', '')
lst.append(hv)
return '0x' + ''.join(lst)
|
e457880c713cbc8d70ba3463a6fe9331a96ba5a8
| 630,709 |
import torch
def init_optimizerD(opt, D, train_last_layer_only=False):
"""Initialize optimizer for discriminator D
"""
params_to_train = D.parameters()
if train_last_layer_only and opt.DiscArch == 'concat_first':
params_to_train = D.last_linear.parameters()
optimizerD = torch.optim.Adam(params_to_train, lr=opt.lrD, betas=(opt.beta1, opt.beta2), weight_decay=opt.wdecay)
return optimizerD
|
9c788aae510ab6cf64f588646450b9c285934375
| 159,934 |
def _substitute(template, fuzzer, benchmark):
"""Replaces {fuzzer} or {benchmark} with |fuzzer| or |benchmark| in
|template| string."""
return template.format(fuzzer=fuzzer, benchmark=benchmark)
|
84e110aa6aca334176ae1bd682472c008d0ee228
| 291,880 |
import yaml
def read_params_from_file(fname):
"""Read model parameters from a file.
Parameters
----------
fname : str
Name of YAML-formatted parameters file.
Returns
-------
dict
A dict of parameters for the heat model.
"""
with open(fname, "r") as fp:
params = yaml.safe_load(fp)
return params
|
cec5fe2af0578a6288c415823618d005144f6151
| 472,702 |
def _CustomMachineTypeString(cpu, memory_mib):
"""Creates a custom machine type from the CPU and memory specs.
Args:
cpu: the number of cpu desired for the custom machine type
memory_mib: the amount of ram desired in MiB for the custom machine
type instance
Returns:
The custom machine type name for the 'instance create' call
"""
machine_type = 'db-custom-{0}-{1}'.format(cpu, memory_mib)
return machine_type
|
bfa285d9de4a66c9b88ef89925ce5266bb8e68c7
| 479,528 |
def _strip_shape(name: str) -> str:
"""Strip the dimension name."""
if name[0] == "/":
return name[1:]
return name
|
ba6329bdca1cf4e7c4db7daf0fcbb3c5c5f475b7
| 579,191 |
def serialize_results(results : dict) -> str:
"""Serialize a results dict into something usable in markdown."""
n_first_col = 20
ans = []
for k, v in results.items():
s = k + " "*(n_first_col-len(k))
s = s + f"| {v[0]*100:.1f} | {v[1]*100:.1f} |"
ans.append(s)
return "\n".join(ans)
|
c64ba7bbf898e3085290615d163401b7c2382ca6
| 626,979 |
def is_in_any_txt(txt, within_txts_list, case_insensitive=False):
"""is "txt" in any of the texts list ?"""
for within_txt in within_txts_list:
if case_insensitive: # slower
if txt.lower() in within_txt.lower():
return True
else:
if txt in within_txt:
return True
return False
|
5e5316fdbaf21617ee8ae3172a0efe4a3c17ae8c
| 669,588 |
def tensor_size_bytes(tensor, unit='MB'):
"""
Get the size of the tensor in bytes, or a unit that's multiple of bytes
:param tensor: the pytorch tensor
:param unit: GigaBytes or GB (assumes GB=1e9 Bytes), MegaBytes or MB (assumes MB=1e6 Bytes),
KiloBytes or KB (assumes KB=1e3 Bytes), Bytes or B
:return: the size of the tensor in the desired unit
"""
if 'G' in unit.upper():
factor = 1e9
elif 'M' in unit.upper():
factor = 1e6
elif 'K' in unit.upper():
factor = 1e3
else:
factor = 1.0
return (tensor.element_size() * tensor.nelement()) / factor
|
f8d68b99597b2bcfb90ff18f7f1d9f9b02478de1
| 110,700 |
def slicex(df, values, keep_margins=True):
"""
Return an index-wise slice of df, keeping margins if desired.
Assuming a Quantipy-style view result this function takes an index
slice of df as indicated by values and returns the result.
Parameters
----------
df : pandas.DataFrame
The dataframe that should be sliced along the index.
values : list-like
A list of index values that should be sliced from df.
keep_margins : bool, default=True
If True and the margins index row exists, it will be kept.
Returns
-------
df : list
The sliced dataframe.
"""
# If the index is from a frequency then the rule
# should be skipped
if df.index.levels[1][0]=='@':
return df
name_x = df.index.levels[0][0]
slicer = [(name_x, value) for value in values]
if keep_margins and (name_x, 'All') in df.index:
slicer = [(name_x, 'All')] + slicer
df = df.loc[slicer]
return df
|
6719eba7824b04b2e1d5735fd699412190a8811e
| 53,718 |
def _step_id(step):
"""Return the 'ID' of a deploy step.
The ID is a string, <interface>.<step>.
:param step: the step dictionary.
:return: the step's ID string.
"""
return '.'.join([step['interface'], step['step']])
|
c1ff629a09758ff5817f76ea487af467fbbffc84
| 68,298 |
def add_titlebox(ax, text, x=0.05, y=0.9, alignment='left', fontsize=12.5):
"""
add title text box.
Arguments:
ax {`matplotlib.axes.Axes`} -- the `Axes` instance used for plotting.
text {string} -- title text
Keyword Arguments:
x {float} -- title x position (default: {0.05})
y {float} -- title y position (default: {0.9})
alignment {str} -- horizontal alignment (default: {'left'})
fontsize {float} -- text font size (default: {12.5})
Returns:
`matplotlib.axes.Axes` -- the `Axes` instance used for plotting.
"""
ax.text(
x, y, text, horizontalalignment=alignment, transform=ax.transAxes,
bbox=dict(facecolor='white'), fontsize=fontsize)
return ax
|
ce8804d5f25097046c54cbeb4c15936888539eae
| 554,533 |
import re
def byteform_to_num(byte_format):
"""Converts a string expressing a size of a file in bytes into the
corresponding number of bytes. Accepts commas and decimal points in nums.
Allows 'b', 'mb', 'gb', and variants like 'bytes', but not 'tb', 'zb', etc.
Note that format_bytes is a lossy function (it doesn't retain all sigfigs
by default), so byteform_to_num(format_bytes(x)) does not always equal x.
"""
x = re.findall("([\d,\.]+)\s*([a-z]*)", byte_format, re.I)[0]
num, suf = float(x[0].replace(",", "")), x[1].lower()
if suf == "" or suf[0] == "b":
return num
elif suf[:2] == "kb":
return num * 1e3
elif suf[:2] == "mb":
return num * 1e6
elif suf[:2] == "gb":
return num * 1e9
raise ValueError(f"byteform_to_num couldn't recognize quantifier '{suf}'")
|
300ede4ef120b9e3a8db85effcb6611dd9299953
| 10,953 |
import tempfile
def write_tmpacl(acl, process_name='_tmpacl'):
"""Write a temporary file to disk from an Trigger acl.ACL object & return the filename"""
tmpfile = tempfile.mktemp() + process_name
f = open(tmpfile, 'w')
for x in acl.output(acl.format, replace=True):
f.write(x)
f.write('\n')
f.close()
return tmpfile
|
7aa735a85dc60ce1975769045c6ee52031f7ba38
| 360,283 |
def set_config_var(from_file, from_env, default):
"""
Set a configuration value based on the hierarchy of:
default => file => env
:param from_file: value from the configuration file
:param from_env: value from the environment
:param default: default configuration value
:return: value to use as configuration
"""
if from_env is not None:
return from_env
elif from_file is not None:
return from_file
else:
return default
|
48e0fa5aa4b9e67ceb4a7cb1fea7e23529a5f3bf
| 102,741 |
def get_synsets(lemma, synset_dict):
"""Return synonym set given a word lemma.
The function requires that the synset_dict is passed into it. In our case
we provide downloadable models from MCR (Multilingual-Central-Repository).
:cite:`gonzalez2012multilingual`. If the lemma is not found in the
synset_dict, then this function returns a set with the lemma in it.
:param lemma: Lemma to be look-up into the synset
:type lemma: string
:param synset_dict: key-value pairs, lemma to synset
:type synset_dict: Python dict
:return: The set of synonyms of a given lemma
:rtype: Python set of strings
"""
return synset_dict.get(lemma, {lemma})
|
1e0ef9669a6bcdc3f8c1a19013d1cbf109266330
| 296,981 |
import traceback
def _str_backtrace(backtrace=None):
""" Return a string representation of an existing or new backtrace """
if backtrace:
return "".join(traceback.format_tb(backtrace))
else:
return "".join(traceback.format_stack()[:-1])
|
6f4038fde8d8e0ab350cb9a41bfabe75b4017997
| 506,266 |
def vshale_to_vclay(vshale, multiplier):
"""
Converts a shale volume to clay volume using a multiplier.
Parameters
----------
vshale : float
Shale volume
multiplier : float
Shale to clay multiplier (decimal)
Returns
-------
float
Returns a clay volume.
References
----------
Bhuyan, K. and Passey, Q. R. (1994) ‘Clay estimation from GR and neutron-density porosity logs’, SPWLA 35th Annual Logging Symposium, pp. 1–15.
"""
return vshale * multiplier
|
f46fd70940a513d990b25d505a59d516ae525a15
| 356,192 |
def is_nonterminal(text: str):
"""Return True if string text starts with ( and doesn't end with )."""
return text[0] == '(' and not text[-1] == ')'
|
d668d1edf541e5a381282caadb36daa974c17d8a
| 545,782 |
def read_mg_types(mgfile):
"""
Read the metagenome types from a file where the first four columns
are [domain, type, species, seqid]. For example, sharks_fish.distance.labelled.tsv
which is the output from join.pl
:param mgfile: the sharks_fish.distance.labelled.tsv file
:return: a dict of all the metagenomes
"""
val = {}
with open(mgfile, 'r') as f:
for l in f:
p=l.strip().split("\t")
if "Metagenome" in p[0]:
val[p[3]] = [p[1], p[2]]
return val
|
b7eabc1a52199951404f114e0732020a6c807334
| 133,897 |
def _parse_line(line):
"""If line is in the form Tag=value[#Comment] returns a (tag, value)
tuple, otherwise returns None."""
non_comment = line.split('#')[0].strip()
if len(non_comment) > 0:
tag_value = [x.strip() for x in non_comment.split('=')]
if len(tag_value) != 2:
raise Exception("Line '" + line + "' not understood")
return (tag_value[0], tag_value[1])
return None
|
b39f6862768fe8937acd17a9830329dc213fc31f
| 477,679 |
def quotify(comment, username):
"""
Converts 'Foo\nbar' to:
> @username said:
> Foo
> bar
\n\n
"""
header = "@%(username)s said:" % {'username': username, }
lines = comment.splitlines()
quote = "\n> ".join(lines)
quote = "> %(header)s\n> %(quote)s\n\n" % {'header': header, 'quote': quote}
return quote
|
ad601fb15199cbf481daea26b9baecb4b0b0028d
| 459,403 |
def normalize(name):
"""
normalizes the name of the company for comparison by lowercasing, substitution
:param name: the name string
:return: normalized string
"""
if name == "-":
return "company_name_not_available"
name = name.lower().strip().replace("co ", "company").replace("co.", "company").replace("&", "and")
name = name.lower().strip().replace("inc ", "incorporated").replace("inc.", "incorporated")
name = ''.join(e for e in name if e.isalnum()).replace("ltd", "limited")
return name
|
bfabb84b004c242a6c34b8a4efd54e265c1c711d
| 321,751 |
def get_normalized(x, xmax, xmin):
""" Normalized a value given max and min """
x_norm = (x - xmin)/(xmax - xmin)
return x_norm
|
6db770cf71eb73d2a5cddbfc5bc1f88db92cf397
| 280,592 |
from datetime import datetime
def text_to_datetime(dt):
"""Convert text date and time to datetime format then put in ISO8601 format."""
converted = datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
reformat_iso = datetime.strftime(converted, "%Y-%m-%dT%H:%M:%S")
return reformat_iso
|
5d7e9c2a744e4b56d815987cd5110fccd0476cc8
| 114,320 |
def interaction_test(configuration, order=None):
"""
Returns True if the configuration has an interaction
Parameters
----------
order : int, optional
Specific order to check for. E.g. a value of 3 checks for ternary interactions
Returns
-------
bool
True if there is an interaction.
Examples
--------
>>> configuration = [['A'], ['A','B']]
>>> interaction_test(configuration)
True
>>> interaction_test(configuration, order=2)
True
>>> interaction_test(configuration, order=3)
False
"""
interacting_species = [len(subl) for subl in configuration if isinstance(subl, (tuple,list))]
if order is None: # checking for any interaction
return any([subl_occupation > 1 for subl_occupation in interacting_species])
else:
return any([subl_occupation == order for subl_occupation in interacting_species])
|
052d82d9c218ae54abb739e789d5789f83029dcf
| 253,465 |
def dichotomy(list, value):
"""
list: array of integers in ascending order
value: value to return its position in list
return: False in value not in list, else the position of value in the list
"""
list_beginning = 0
list_end = len(list)
list_middle = ((list_beginning+list_end)//2)
if value not in list:
return False
while list[list_middle] != value:
# middle index is calculated at each iteration
list_middle = ((list_beginning+list_end)//2)
# tests if value is superior to the middle value
if value > list[list_middle]:
list_beginning = list_middle
# tests if value is inferior to the middle value
if value < list[list_middle]:
list_end = list_middle
return list_middle
|
4aa89c0b28a92cf371c1b4b688931cdf99331c70
| 304,392 |
import torch
def particle_to_quantile(predictions):
""" Converts a particle prediction to a quantile prediction.
Not fully differentiable because of the sorting operation involved. Should be thought of as a permutation function
Args:
predictions (tensor): array [batch_size, n_particles], a batch of particle predictions
Returns:
tensor: a batch of quantile predictions
"""
return torch.sort(predictions, dim=1)[0]
|
11a88bee1b3f420344a6d5f8f0eab027856ec27d
| 290,176 |
def get_mu_tilda(x_i, r, n):
"""Calculates the conditional descendant normal distribution *expectation*
for generation-gap n.
Latex equation:
tilde{\mu}_{i+n} = r^n X_i
(See the paper for the derivation.)"""
return (r**n) * x_i
|
3ff119704d3c9db2fbc1b52c683948a44a0ceb89
| 243,673 |
def imageSplit(
data,
labelcol='Label_nr',
imagedim=[455,423],
testsize=0.3):
"""
Split test and traing dataset taking the testsize proportion from the
top of the image as test dataset and the rest as training dataset
Parameters
----------
data: numpy.dataFrame
dataframe with data
label_col: str (optional)
name of label column
imagedim: list (optional)
list containing image size in width and height
testsize: float
proportion of image to use for test dataset (values between 0 and 1)
Examples
--------
>>> x_train, x_test, y_train, y_test = imageSplit(
data_raw,
"Label_nr",
[455, 423],
0.2)
Returns
-------
list
test and training datasets
"""
row_count = round(imagedim[1] * testsize) * imagedim[0]
x = data.drop(labelcol, axis=1)
y = data[labelcol]
x_test = x.iloc[0:row_count]
x_train = x.iloc[row_count+1:x.shape[0]]
y_test = y.iloc[0:row_count]
y_train = y.iloc[row_count+1:y.shape[0]]
return x_train, x_test, y_train, y_test
|
1b3501572242e4091259970a882bd5fd950ea0ca
| 345,082 |
def _resample_params(N, samples):
"""Decide whether to do permutations or random resampling
Parameters
----------
N : int
Number of observations.
samples : int
``samples`` parameter (number of resampling iterations, or < 0 to
sample all permutations).
Returns
-------
actual_n_samples : int
Adapted number of resamplings that will be done.
samples_param : int
Samples parameter for the resample function (-1 to do all permutations,
otherwise same as n_samples).
"""
n_perm = 2 ** N
if n_perm - 1 <= samples:
samples = -1
if samples < 0:
n_samples = n_perm - 1
else:
n_samples = samples
return n_samples, samples
|
f1493aa3409cef023d05c9e74951d59f481b11bc
| 121,021 |
def is_influx_rule(rule):
"""
A function that detects if the alert rule defined uses influx,
Influx rules should not contain a '#' because split returns
"""
if len(rule.check_field.split('#')) == 1:
return True
else:
return False
|
60f168f2f859522607127fa82cc32f75401794f6
| 424,383 |
def timedelta_to_seconds(delta):
"""
Take a timedelta object and return the total number of seconds.
"""
return delta.seconds + (delta.days * 86400)
|
23bc2b9e36f58db68e39625e41dd4f0728d25457
| 646,039 |
def strftime(format, p_tuple=None): # real signature unknown; restored from __doc__
"""
strftime(format[, tuple]) -> string
Convert a time tuple to a string according to a format specification.
See the library reference manual for formatting codes. When the time tuple
is not present, current time as returned by localtime() is used.
"""
return ""
|
218d2a78bbd6bacadb2dcfd17800708dd7526038
| 539,830 |
def list_square_to_input(list_signs,list_index_signs):
"""
Take the list of squares fill in by a sign and create a 9x1
array (input_layer for neural network) with values -1 for a cross,
0 if empty square and 1 for a circle.
"""
input_layer = [0 for k in range(9)]
for i in range(len(list_signs)):
if type(list_signs[i]) == list:
input_layer[list_index_signs[i]] = -1
else:
input_layer[list_index_signs[i]] = 1
return input_layer
|
532918046029493ca8e2fb43369808e46dcd8501
| 79,743 |
import re
def alphanum_key(key):
"""
Used to generate a natural numeric sort key
Example: ("p10" => ["p", 10]) is greater than ("p1" => ["p", 1])
adapted from https://stackoverflow.com/a/2669120/240553
"""
def convert(text):
return int(text) if text.isdigit() else text
return [convert(c) for c in re.split(r'([0-9]+)', key)]
|
16a0f085b2051dbd951b801dd6713845eae9c09c
| 88,034 |
def abs(var):
"""
Wrapper function for __abs__
"""
return var.__abs__()
|
c2e834da463e9e3e57933489537545d927f40e85
| 615,720 |
def to_digits_base10(n):
"""
Return the digits of a number in base 10.
"""
digits = []
remaining = n
while remaining > 0:
digit = remaining % 10
remaining = (remaining - digit) // 10
digits.append(digit)
return digits[::-1]
|
5acc6a2ef1e10bc3142371944232c7d1bcad3a32
| 24,843 |
def power_spectrum(k,a):
"""Simple function for power laws
Args:
k (float array): wave number
a (float): power law
Returns:
float array: k^-a (note minus sign is assumed!)
"""
return k**-a
|
4caa3bf6af923d4406a939cf14cd9d8893ef2e09
| 523,731 |
def get_turn_on_descriptions(get_turn_on, initial_state, current_state):
"""
Get all 'turn on' descriptions from the current state (if any).
Parameters
----------
get_turn_on: function
Function that gets the color of light which is on.
initial_state: nd.array
Initial state of the environment.
current_state: nd.array
Current state of the environment.
Returns
-------
descr: list of str
List of 'turn on' descriptions satisfied by the current state.
"""
turn_on_descriptions = []
light_on = get_turn_on(initial_state, current_state)
for c in light_on:
turn_on_descriptions.append('Turn on the {} light'.format(c))
return turn_on_descriptions.copy()
|
fe5e2bbca20be6fe3e008fd6167ec035d13d5fd7
| 279,702 |
def get_default_guess_params(model, df, pressure_key, loading_key):
"""
Get dictionary of default parameters for starting guesses in data fitting
routine.
The philosophy behind the default starting guess is that (1) the saturation
loading is close to the highest loading observed in the data, and (2) the
default assumption is a Langmuir isotherm.
Reminder: pass your own guess via `param_guess` in instantiation if these
default guesses do not lead to a converged set of parameters.
:param model: String name of analytical model
:param df: DataFrame adsorption isotherm data
:param pressure_key: String key for pressure column in df
:param loading_key: String key for loading column in df
"""
# guess saturation loading to 10% more than highest loading
saturation_loading = 1.1 * df[loading_key].max()
# guess Langmuir K using the guess for saturation loading and lowest
# pressure point (but not zero)
df_nonzero = df[df[loading_key] != 0.0]
idx_min = df_nonzero[loading_key].idxmin()
langmuir_k = df_nonzero[loading_key].loc[idx_min] / \
df_nonzero[pressure_key].loc[idx_min] / (
saturation_loading - df_nonzero[loading_key].loc[idx_min])
if model == "Langmuir":
return {"M": saturation_loading, "K": langmuir_k}
if model == "Freundlich":
return {"n_inv": 0.5, "K": 10}
if model == "Quadratic":
# Quadratic = Langmuir when Kb = Ka^2. This is our default assumption.
# Also, M is half of the saturation loading in the Quadratic model.
return {
"M": saturation_loading / 2.0,
"Ka": langmuir_k,
"Kb": langmuir_k**2.0
}
if model == "BET":
# BET = Langmuir when Kb = 0.0. This is our default assumption.
return {
"M": saturation_loading,
"Ka": langmuir_k,
"Kb": langmuir_k * 0.01
}
if model == "DSLangmuir":
return {
"M1": 0.5 * saturation_loading,
"K1": 0.4 * langmuir_k,
"M2": 0.5 * saturation_loading,
"K2": 0.6 * langmuir_k
}
if model == "Henry":
return {"KH": saturation_loading * langmuir_k}
if model == "TemkinApprox":
# equivalent to Langmuir model if theta = 0.0
return {"M": saturation_loading, "K": langmuir_k, "theta": 0.0}
|
5921078cf6c566b4b4bf040941b3241ac519d431
| 410,662 |
def recall_pos_conf(conf):
"""compute recall of the positive class"""
TN, FP, FN, TP = conf
if (TN + FP) == 0:
return float('nan')
return TP/float(TP+FN)
|
333f0aec1a6e84e0affa340c8bfcaa44d153230c
| 574,820 |
def m3_hr2cfm(m3_hr):
"""m^3/hr -> cfm"""
return 0.58857778*m3_hr
|
f36cde87efb1c9a34477eee128450ea8d157add8
| 315,486 |
def get_geo_coordinates(tweet):
"""
Get the user's geo coordinates, if they are included in the payload
(otherwise return None)
Args:
tweet (Tweet or dict): A Tweet object or dictionary
Returns:
dict: dictionary with the keys "latitude" and "longitude"
or, if unavaiable, None
Example:
>>> from tweet_parser.getter_methods.tweet_geo import get_geo_coordinates
>>> tweet_geo = {"geo": {"coordinates": [1,-1]}}
>>> get_geo_coordinates(tweet_geo)
{'latitude': 1, 'longitude': -1}
>>> tweet_no_geo = {"geo": {}}
>>> get_geo_coordinates(tweet_no_geo) #returns None
"""
if "geo" in tweet:
if tweet["geo"] is not None:
if "coordinates" in tweet["geo"]:
[lat, lon] = tweet["geo"]["coordinates"]
return {"latitude": lat, "longitude": lon}
return None
|
a78af74152670cc29277a9b3c4f23dce4d778c7e
| 110,100 |
def get_file_attachment(res_client, incident_id, artifact_id=None, task_id=None, attachment_id=None):
"""
call the Resilient REST API to get the attachment or artifact data
:param res_client: required for communication back to resilient
:param incident_id: required
:param artifact_id: optional
:param task_id: optional
:param attachment_id: optional
:return: byte string of attachment
"""
if incident_id and artifact_id:
data_uri = "/incidents/{}/artifacts/{}/contents".format(incident_id, artifact_id)
elif attachment_id:
if task_id:
data_uri = "/tasks/{}/attachments/{}/contents".format(task_id, attachment_id)
elif incident_id:
data_uri = "/incidents/{}/attachments/{}/contents".format(incident_id, attachment_id)
else:
raise ValueError("task_id or incident_id must be specified with attachment")
else:
raise ValueError("artifact or attachment or incident id must be specified")
# Get the data
return res_client.get_content(data_uri)
|
8dcf887f2773addbbf8a29791157f9be833ab9e9
| 646,803 |
import json
def parseEnvLog(envLog):
"""
Parse env log file
Parameters
----------
envLog : str
Env lof file path.
Returns
-------
sysInfo : dict
system information .
progList : dict
programs used.
"""
#parse the env log
with open(envLog) as f:
envdata=f.read().splitlines()
sysInfo={}
progList={}
for l in envdata:
if not l.startswith("#"):
if not sysInfo:
sysInfo=json.loads(l)
else:
thisProgram=json.loads(l)
if 'name' in thisProgram:
progList[thisProgram['name']]=thisProgram
return sysInfo,progList
|
896e44939a9f4ec70538c7c556c93ae72f894911
| 218,462 |
def fake_task_sync(data):
"""Fake task for testing."""
return data
|
e51214646642999147ac4f5a9fba71447f63154f
| 133,085 |
from typing import Any
from typing import Mapping
from typing import Counter
from typing import Iterable
def process_result(result) -> Any:
"""Make the result of a query JSON-serializable.
Parameters
----------
result :
The result of a query
Returns
-------
:
The processed result
"""
# Any fundamental type
if isinstance(result, (int, str, bool, float)):
return result
# Any dict query
elif isinstance(result, (dict, Mapping, Counter)):
res_dict = dict(result)
return {k: process_result(v) for k, v in res_dict.items()}
# Any iterable query
elif isinstance(result, (Iterable, list, set)):
list_res = list(result)
# Check for empty list
if list_res and hasattr(list_res[0], "to_json"):
list_res = [res.to_json() for res in list_res]
return list_res
else:
raise TypeError(f"Don't know how to process result of type {type(result)}")
|
710e791b8aa802f0d147bf446a2db272a52033a4
| 169,541 |
def addCol(adjMatrix):
"""Adds a column to the end of adjMatrix and returns the index of the comlumn that was added"""
for j in range(len(adjMatrix)):
adjMatrix[j].append(0)
return len(adjMatrix[0])-1
|
29ee11953cbdb757e8ea80e897751059e42f1d90
| 6,572 |
from typing import Sequence
from typing import Dict
def _get_dict(l: Sequence[str], name_as_key: bool, starts_with: int = 1) -> Dict[str, str]:
"""
Converts a list into a dict, where keys/values are the list indices.
:param l: A list of categories
:param name_as_key: Whether to return the dict with category names as key (True)
:param starts_with: index count start
:return: A dictionary of list indices/list elements.
"""
if name_as_key:
return {v: str(k) for k, v in enumerate(l, starts_with)}
return {str(k): v for k, v in enumerate(l, starts_with)}
|
6f8c682c99d332648443f0cfeea59e02f54162fc
| 569,448 |
def tidy_source(code):
"""
Strips and fixes the indentation of python code.
eg.
| def foo():
| bar()
or
|def foo():
| bar()
becomes
|def foo():
| bar()
"""
raw_lines = code.splitlines()
lines = []
# Remove all empty lines
for line in raw_lines:
if line.strip() != "":
lines.append(line)
if len(lines) > 1:
if lines[0].rstrip()[-1] == ":":
indents = 4 # If you don't use 4 spaces, you will break stuff.
else:
indents = 0
lines[0] = lines[0].lstrip()
remove = len(lines[1])-len(lines[1].lstrip())-indents
code = lines[0]
for idx in range(1, len(lines)):
lines[idx] = lines[idx][remove:]
code = "\n".join((code, lines[idx]))
else:
code = code.strip()
return code
|
48748013b8e2ccd5d118ba661807d3ff9e6f052e
| 271,790 |
def _compare_first_n(series_1, series_2, n):
"""
Utility function that sees if the first n rows of a Pandas series are the same
"""
for i in range(n):
if series_1.iloc[i] != series_2.iloc[i]:
return False
return True
|
a06059cebd758a12953fc31eb4b06867ea3851b0
| 52,562 |
def check_intensifiers(text, INTENSIFIER_MAP):
"""
Utility function to check intensifiers of an emotion
:param text: text chunk with the emotion term
:return: boolean value and booster value for intensifiers
"""
# BOOSTER_MAP = {"B_INCR": 2,
# "B_DECR": 0.5}
intensity_word_list = INTENSIFIER_MAP
print(intensity_word_list)
has_intensity = False
booster = 'NULL'
for int_term in intensity_word_list:
intensifier = int_term.split(':')[0].strip()
# print(intensifier)
if intensifier in text:
# print('yes')
has_intensity = True
booster = float(int_term.split(':')[2].strip())
return has_intensity, booster
|
2c35b6b66395bc7105b9fb1b9cf7f04b5686cb8d
| 14,278 |
import re
def limit(pageurl):
"""Finds the current '?start=' limit from the page"""
b = re.findall('Τέλος.*start=([0-9]+)', pageurl)
slimit = int(b[0])
# print(type(slimit))
return slimit
|
3fcaa343aed8e48820db79e14bd1bc9fcf75f2f7
| 568,399 |
def phase_fold(times, period):
"""
Folds the given light curve over its period to express the curve in terms
of phase rather than time.
Parameters
----------
times : numpy.ndarray
The light curve times.
period : numpy.float64
The light curve period.
Returns
-------
phase_times : numpy.ndarray
The light curve times in terms of phase.
"""
phase_times = (times % period) / period
return phase_times
|
949c71bab8d070e312c6a41b4d5393b0375de6d2
| 413,584 |
def widget_generator(klass, fields):
"""
Takes in a class and a list of tuples consisting of field_name and an attribute dictionary
:param klass: Class of the input widget
:param fields: List of tuples mapping field names to attribute dictionaries
:return: A dict of input widget instances
"""
widgets = {}
for field_name, attrs in fields:
widgets[field_name] = klass(attrs=attrs)
return widgets
|
5e09950148f902ace17b6f52e06ee0f16760c195
| 687,610 |
import random
def generateDistribution(maxLength, totalCount, numDice, randomness):
"""
Returns a bell curve distribution.
maxLength controls how wide your x-axis will be.
totalCount is the total number of points you want to generate.
numDice is the number of random numbers you want to add together to create
one length. The more dice you roll, the steeper you curve will be.
randomness is a number between 0 and 100 that represents a percentage chance
that a number is randomly dropped from the number creation. This is
useful for shifting a data set to the left and gives a better
horizontal asymptote.
"""
lengthList = list()
for i in range(totalCount):
# Generate numbers
num = 0
for d in range(numDice):
# Pass randomness test
randomizer = random.randint(1,100)
if (randomness < randomizer):
# Add random number in range to total
num += random.randint(0,(maxLength//numDice))
if(num >= maxLength):
#print(num)
num = maxLength - 1
lengthList.append(num)
countList = list(0 for i in range(maxLength))
for l in lengthList:
countList[l] += 1
return countList, lengthList
|
0415a90ece99c22634f6abb2b652e93c410f9310
| 589,578 |
import math
def xy_intersect_of_polar_coords(rho1, theta1, rho2, theta2):
"""(x, y) coordinates of 2 lines defined by their parameters in polar coordinates
@param rho1: distance from origin to line 1
@param theta1: angle in radians between horizontal line and perpendicular to line 1
@param rho2: distance from origin to line 2
@param theta2: angle in radians between horizontal line and perpendicular to line 2
@note: This function assumes that the lines cross and doesn't take special
care to be numerically stable. Its intended usage is to straighten a
rectangle that has only be slightly warped by the perspective of the
camera taking its picture.
>>> xy_intersect_of_polar_coords(0, 0, 0, math.pi / 2)
(0.0, 0.0)
>>> xy_intersect_of_polar_coords(0, math.pi/4, math.sqrt(2), - math.pi / 4)
(1.0, -1.0000000000000002)
"""
# Represent line 1 as a*x + b*y = e
a = math.cos(theta1)
b = math.sin(theta1)
e = rho1
# Represent line 2 as c*x + d*y = f
c = math.cos(theta2)
d = math.sin(theta2)
f = rho2
# These two lines cross in x, y where x, y is the solution of the 2 linear
# equations defined above.
det = a * d - b * c
x = (d * e - b * f) / det
y = (a * f - c * e) / det
return x, y
|
144a0518878af5af2a37d1d4dfbdf8f0d39954d3
| 312,283 |
def encode_all(
header: bytes, stream: bytes, funcs: bytes, strings: bytes, lib_mode: bool
) -> bytes:
"""
Combine the various parts of the bytecode into a single byte string.
Parameters
----------
header: bytes
The bytecode's header data.
stream: bytes
The actual bytecode instructions.
funcs: bytes
The function pool.
strings: bytes
The string pool.
lib_mode: bool
Whether to build a library bytecode file or an application one.
Returns
-------
bytes
The full bytecode file as it should be passed to the VM.
"""
if lib_mode:
return b"".join((header, b"\r\n\r\n\r\n", strings, b"\r\n\r\n", funcs))
return b"".join(
(header, b"\r\n\r\n\r\n", strings, b"\r\n\r\n", funcs, b"\r\n\r\n", stream)
)
|
9325a01b6b485feb3c634b56df1da9b6c9182e04
| 578,947 |
def get_two_hospital_plot_labels(measurement_type):
"""A function to get necessary labels for the two hospitals plot"""
if measurement_type == "w":
title = "Waiting times of two hospitals over different distribution of patients"
y_axis_label = "Waiting Time"
else:
title = (
"Blocking times of two hospitals over different distribution of patients"
)
y_axis_label = "Blocking Time"
x_axis_label = "Hospital 1 arrival proportion"
return (x_axis_label, y_axis_label, title)
|
1be47f2a320eb55c5cb1da40c56e6d3a6fc6615d
| 166,183 |
def indented_setup(setup, indentation):
"""Indent each line of setup with given amount of indentation.
>>> indented_setup("x = 1\\n", " ")
' x = 1\\n'
>>> indented_setup("x = 1\\ny = 2\\n", " ")
' x = 1\\n y = 2\\n'
"""
return ''.join([indentation + line for line in setup.splitlines(True)])
|
8ca289d78253cea6435399f0a44cdce3bd1e32a1
| 217,809 |
import requests
def lookup_mailinglists(prefix):
"""Find all mailing lists owned by `prefix`"""
req = requests.get("https://lists.srcf.net/getlists.cgi", params={'prefix': prefix})
req.raise_for_status()
assert req.headers['content-type'].split(';')[0] == 'text/plain'
return [listname for listname in req.text.split("\n") if listname]
|
293043fdead6affd667655a5a2135dd32c6f7893
| 217,735 |
from typing import List
def convertTo(data) -> List[dict]:
"""
Converst scores from new to old format
:param data: New format data
:return: Old format data
"""
return [
{
'Team': {
'Number': team['teamno'],
'DisplayName': team['teamname']
},
'Scores': team['scores'],
'TotalGuess': -1
} for team in data
]
|
68d4686e93bb2fb4b754643168f3b181d9ec9438
| 207,367 |
def jaccard(r_tokens: list, s_tokens: list) -> float:
"""Computes jaccard similarity.
JAC(r, s) = |r ∩ s| / |r ∪ s|
Parameters
----------
r_tokens : list
First token list.
s_tokens : list
Second token list.
Returns
-------
Jaccard similarity of r and s.
"""
r_set = set(r_tokens)
s_set = set(s_tokens)
return len(r_set.intersection(s_set)) / len(r_set.union(s_set))
|
427bd308e153cf6781ada6ba45f4bdd0b8f73220
| 82,120 |
import torch
import math
def chebyshevLobatto(n: int):
"""
Compute the chebyshev lobatto points which
are in the range [-1.0, 1.0]
Args :
n : number of points
Returns :
A tensor of length n with x locations from
negative to positive including -1 and 1
[-1,...,+1]
"""
k = torch.arange(0, n)
ans = -torch.cos(k * math.pi / (n - 1))
ans = torch.where(torch.abs(ans) < 1e-15, 0 * ans, ans)
return ans
|
28ab75cad21812ed32e689b73ca535190de49e91
| 219,336 |
def filter_kwargs(kwargs, prefix):
"""Keep only kwargs starting with `prefix` and strip `prefix` from keys of kept items."""
return {k.replace(prefix, ''): v for k, v in kwargs.items() if k.startswith(prefix)}
|
3f1475a2a655f37094383491745996027d1363bb
| 532,270 |
def get_key(rule_tracker, value):
"""
Given an event index, its corresponding key from the dictionary is returned.
Parameters:
rule_tracker (dict): Key-value pairs specific to a rule where key is an activity, pair is an event index
value (int): Index of event in event log
Returns:
key (int): Position of value in rule_tracker
"""
for key in rule_tracker:
if rule_tracker[key] == value:
return key
|
1921e9a68d0df0867248ca83e2ba641101735fc7
| 708,421 |
def prefixify(d: dict, p: str):
"""Create dictionary with same values but with each key prefixed by `{p}_`."""
return {f'{p}_{k}': v for (k, v) in d.items()}
|
aaad6b11640df7c5e288bdd039b039ce373dcba1
| 657,513 |
def _validate_args(user_input_args: tuple[str, ...],
num_allowed: int,
*args: str) -> list:
"""
Ensures kwargs entered in a function call are acceptable.
Args:
input (dict): The kwarg(s) input into a function.
num_allowed (int): Limit, if any, on number of kwargs allowed.
args (iterable): The acceptable keys for kwargs.
Raises:
SystemExit: Raised if input doesn't match any accepted kwargs.
Returns:
list: Containing user_input_args that passed test, if any.
"""
# Establish lists for testing input and containing inputs that passed.
acceptable_args = [arg for arg in args]
lowercase_args = [item.lower() for item in acceptable_args]
accepted_args = []
# If user didn't input an arg, skip testing.
if not len(user_input_args) > 0:
return []
# Test user_input_args against acceptable_args.
for item in user_input_args:
# Check if non-string args are in acceptable_args.
if not isinstance(item, str) and item in acceptable_args:
accepted_args.append(item)
# Test string args as case-insensitive.
elif item.lower() in lowercase_args:
# Ensure output arg is properly cased even if input arg wasn't.
right_arg = acceptable_args[lowercase_args.index(item.lower())]
accepted_args.append(right_arg)
# Skip unacceptable user_input_arg, removing it from args list.
else:
print(f'{item} removed - not in {acceptable_args}.')
continue
# Return user_input_args that passed test if not more than num_allowed.
if len(accepted_args) > 0 and len(accepted_args) <= num_allowed:
return accepted_args
# Exit program. user_input_args failed to validate.
else:
print('Keyword arg must be no more than'
f' {num_allowed} of {".".join(acceptable_args)}.')
raise SystemExit
|
9e309329dd9f20799ed7c80c31af0cb5e046f3f0
| 142,212 |
from functools import reduce
from operator import mul
def prod(iterable):
"""Product of all values in an iterable, like sum() but for multiplication."""
# NOTE: in Python 3.8 this is now available as math.prod()
return reduce(mul, iterable, 1)
|
51a7fd605d2df46b4c81f89defbc66e474833b82
| 346,232 |
def manhattan_distance(x, y):
""" Returns the Manhattan (City Block) distance between two lists
"""
return sum(abs(a - b) for a, b in zip(x, y))
|
4887024603a8fe3398ec80a17d1d70fbe15fdfab
| 10,407 |
def get_resource_path(resource):
""" Return the full path for the resource (with or without a parent).
"""
return ("%s/%s" % (resource.parent and resource.parent.id or '',
resource.id.lstrip('/'))).lstrip('/')
|
2ece218a78f93050d008f5729839e68a9c1a4a06
| 122,533 |
def clean_path(path):
""" remove index.html from end of a path, add / if not at beginning """
path = path.split("index.html")[0]
if not path.startswith("/"): path = "/" + path
if not path.endswith("/"): path += "/"
return path
|
55360f0e0729b9cb0510308f5e5be32a404f1e70
| 48,930 |
def linear_search(item, my_list):
"""
Searching position by position
:param item: the number to look for
:param my_list: a list of integers
:return: either True or False if the item is in the list or not.
"""
found = False
for i in range(len(my_list)):
if item == my_list[i]:
found = True
return found
|
463c23c85626be396c06f56d913fca9b5972fc0e
| 19,110 |
import cgi
def check_api_v3_error(response, status_code):
"""
Make sure that ``response`` is a valid error response from
API v3.
- check http status code to match ``status_code``
:param response: a ``requests`` response
:param status_code: http status code to be checked
"""
assert not response.ok
assert response.status_code == status_code
# Should return json anyways..
content_type = cgi.parse_header(response.headers['content-type'])
assert content_type[0] == 'application/json'
assert content_type[1]['charset'] == 'utf-8'
data = response.json()
# This is an error!
assert data['success'] is False
assert 'error' in data
assert 'result' not in data
return data
|
ca1be2ac3cff99b06fc0aaba73a075ff90024c9b
| 675,264 |
import re
def Author_name_sep(value: str) -> list:
"""Splits the authors up into a list of different authors. checks on commas and 'and'.
:param value: string of author text
:return: list of authors
"""
x = re.split(", +| +and +", value)
return x
|
e0fca4494968e396b425606b25aa94cc0cc2085c
| 155,261 |
def cagr(prices):
"""
Calculates the Compound Annual Growth Rate (CAGR) of a stock with given prices
"""
delta = (prices.index[-1] - prices.index[0]).days / 365.25
return ((prices[-1] / prices[0]) ** (1 / delta) - 1) * 100
|
49fec9f5eb5cca70c9d9cd52f386a4f312a0671a
| 230,417 |
def _get_indentation(value: str) -> int:
"""Calculates the number of leading spaces in a string.
Args:
value: the string to check for leading spaces.
Returns:
An integer indicating the number of leading spaces in the string.
"""
return len(value) - len(value.lstrip())
|
9e7d4923a9fde230bc4ce3d0c91685d8d7cbf78a
| 399,176 |
def unpad(string):
# type: (str) -> str
"""
Simple function that exctracts the initial string from a padded string
"""
return string[0 : -ord(string[-1])]
|
375518cc656f8eaaf03e6626ad729a7f34ea71b3
| 255,225 |
def binary_search_iterative(array, target):
"""
Search target element in the given array by iterative binary search
- Worst-case space complexity: O(1)
- Worst-case performance: O(log n)
:param array: given array
:type array: list
:param target: target element to search
:type target: Any
:return: index of target element, -1 for not found
:rtype: int
"""
# check base case
if len(array) == 0:
return -1
# lower bound
lb = 0
# upper bound
ub = len(array) - 1
while lb <= ub:
mid = lb + (ub - lb) // 2
# if element is present at the middle itself
if target == array[mid]:
return mid
# if element is greater than mid, then it
# can only be present in right subarray
elif target > array[mid]:
lb = mid + 1
# else the element can only be present
# in left subarray
else:
ub = mid - 1
return -1
|
0cca6c4344e384b058408e935c663409b0f78b00
| 585,589 |
from functools import reduce
def ip_string_to_num(s):
"""Convert dotted IPv4 address to integer."""
return reduce(lambda a, b: a << 8 | b, map(int, s.split(".")))
|
3dc5e9ac103149ff12bef43728421e3d30973281
| 603,712 |
def get_accept(environ):
"""get_accept(environ) -> accept header
Return the Accept header from the request, or */* if it is not present.
environ is the WSGI environment variable, from which the Accept header is read.
"""
if 'HTTP_ACCEPT' in environ:
return environ['HTTP_ACCEPT']
return '*/*'
|
c58e94a185e475c48fa2f6dd58a0719297db730a
| 109,509 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.