content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def parse_bibfile(bib_path):
"""Read a bibtex file into a dictionary."""
dct = {}
with open(bib_path, 'r') as f:
for line in f:
line = line.strip()
if line:
if line.startswith('@'):
entries = {}
start = line.find('{')
end = line.find(',')
citekey = '\cite{%s}' % line[start+1:end]
while not line.startswith('}'):
line = next(f)
if ' = ' not in line:
continue
key, value = line.split(' = ')
key = key.strip()
value = value.strip()[1:-1].strip('{}')
entries[key] = value
dct[citekey] = entries
return dct
|
4b1cef105c6e6ad2e650afafd5d9a0189b722f1a
| 450,615 |
def eval_assignment(observations_a, observations_b, assignment):
""" Compute the two means according to the given assignment as follows: if
the i-th bit in the assignment is set to 0, the i-th observation will not
be changed between observations_a and observations_b, if it is 1 the
i-th observation will be changed between the two
Examples
--------
>>> eval_assignment([1.0, 2.0, 3.0], [4.0, 5.0, 6.0], int("000", 2))
(2.0, 5.0)
>>> eval_assignment([1.0, 2.0, 3.0], [4.0, 5.0, 6.0], int("100", 2))
(3.0, 4.0)
>>> eval_assignment([1.0, 2.0, 3.0], [4.0, 5.0, 6.0], int("111", 2))
(5.0, 2.0)
>>> eval_assignment([1.0], [2.0, 3.0], int("11", 2))
Traceback (most recent call last):
...
ValueError: The observation lists must have the same length
"""
if not (len(observations_a) == len(observations_b)):
raise ValueError("The observation lists must have the same length")
n = len(observations_a)
sum_a = 0
sum_b = 0
for i in range(n):
if assignment & (1 << (n - i - 1)): # i = 1, change position
sum_a += observations_b[i]
sum_b += observations_a[i]
else:
sum_b += observations_b[i]
sum_a += observations_a[i]
return (sum_a / n, sum_b / n)
|
55cf95e7cc1bac733045c6ae92c4855cabbe27c6
| 172,390 |
def create_groups_dict(data, keys, columns):
"""
Function creates a dictionary containing different subsets of a dataset. Subsets are created using dummies.
Args:
------
data(pd.DataFrame): Dataset that should be split into subsets.
keys(list): List of keys that should be used in the dataframe.
columns(list): List of dummy variables in dataset that are used for creating subsets.
Returns:
---------
groups_dict(dictionary)
"""
groups_dict = {}
for i in range(len(keys)):
groups_dict[keys[i]] = data[data[columns[i]] == 1]
return groups_dict
|
b5b93598ef927f0a8134900f8bee069e230a63f8
| 186,933 |
def normalized(component, type='morph_max'):
"""Normalize SED or morphology
In order to break degeneracies, either the SED or
morphology should have some form of normalization.
For consistency this should normally be the same for
all components in a blend.
For `type='sed'` the sed matrix is normalized to
sum to unity.
For `type='morph'` the morphology matrix is normalized
to sum to unity.
For `type='morph_max'` the morphology matrix is
normalized so that its maximum value is one.
"""
t = type.lower()
if t == 'sed':
norm = component.sed.sum()
component.sed[:] = component.sed / norm
component.morph[:] = component.morph * norm
elif t == 'morph':
norm = component.morph.sum()
component.sed[:] = component.sed * norm
component.morph[:] = component.morph / norm
elif t == 'morph_max':
norm = component.morph.max()
component.sed[:] = component.sed * norm
component.morph[:] = component.morph / norm
else:
raise ValueError("Unrecognized normalization '{0}'".format(type))
return component
|
f457ef3a147dbf68fa98c970262d496eb42cbdc9
| 260,074 |
def identity(arr):
"""Identity operation on array"""
return arr
|
02443ecdc94d3b43daa36b17b06174eca9f8fa1e
| 421,825 |
def format_dependency(dependency: str) -> str:
"""Format the dependency for the table."""
return "[coverage]" if dependency == "coverage" else f"[{dependency}]"
|
981a38074dbfb1f332cc49bce2c6d408aad3e9e2
| 707,143 |
def load_vdata(filename):
"""Load vertice data into dict."""
vertices = {}
with open(filename) as f:
for line in f:
l = [int(n) for n in line.split()]
vertices[l[0]] = l[1:]
return vertices
|
b84e7f26efa0972329a9d8d589ba8d2259f047ef
| 575,029 |
def coord_to_int(coordstring):
"""Reads a coordinate into a couple in the following way: The input is of the form
'(x,y)' where x, y are integers. The output should then be
(x, y), where (x, y) is a tuple of values of type int.
None of these values are strings.
Example:
coord_to_int('(0,1)') returns
(0, 1)
"""
comma = coordstring.find(',')
first_int = int(coordstring[1:comma])
second_int = int(coordstring[comma+1:-1])
return (first_int, second_int)
|
3bf79f12d46b237b36da15c5bf25dafbf6a23de9
| 375,487 |
def write_list(data, delims="[]"):
"""Writes a formatted string from a list.
The format of the output is as for a standard python list,
[list[0], list[1],..., list[n]]. Note the space after the commas, and the
use of square brackets.
Args:
data: The value to be read in.
delims: An optional string of two characters giving the first and last
character to be printed. Defaults to "[]".
Returns:
A formatted string.
"""
rstr = delims[0]
for v in data:
rstr += str(v) + ", "
rstr = rstr.rstrip(", ")
rstr += delims[1]
return rstr
|
321bdafd47e17bc688e8003e8bda8d882ef7378b
| 252,432 |
def get_quota(trans, id):
"""Get a Quota from the database by id."""
# Load user from database
id = trans.security.decode_id(id)
quota = trans.sa_session.query(trans.model.Quota).get(id)
return quota
|
3c989923e77a837541f414a3e54a7be8911e1faf
| 693,920 |
import math
def _round_down(x, b):
"""Round float x down to the nearest multiple of int b
"""
return int(math.floor(float(x) / b)) * b
|
93d2db2de0785546f4fabd75382ad28a450eb438
| 395,660 |
def plot_scalar_2d(dataarray, ax, **kwargs):
"""
Plot a pcolormesh of a scalar array on a profile
Parameters
----------
dataarray : :class:`xarray.DataArray`
Array containing values of the scalar data. It must have only two dimensions
given by ``x`` and ``z`` coordinates.
ax : :class:`matplotlib:Axes`
Axe where the plot will be added.
kwargs : dict
Keyword arguments passed to :func:`xarray.plot.pcolormesh`.
Returns
-------
artist :
The same type of primitive artist that the :func:`matplotlib.Axes.pcolormesh`
function returns.
"""
return dataarray.plot.pcolormesh(ax=ax, x="x", y="z", **kwargs)
|
25968c761e0d28cfe360b2f8f4b0e318032c5431
| 440,125 |
import re
def default_splitter(text):
"""Searches the given spring for all integer and floating-point
values, returning them as a list _of strings_.
E.g., the call
default_splitter('Give me $10.52 in exchange for 91 kitten stickers.')
will return ['10.52', '91'].
"""
fields = re.findall('(\d+\.?\d+)', text)
return fields
|
fe5bb5cbf34c481e65f82359c245fefbc0ddce11
| 488,838 |
def key_by(items, key, children_key=False):
"""
Index a list of dicts by the value of given key - forced to lowercase.
Set child_key to say 'children' to recurse down a tree with 'children' attributes on nodes.
"""
index = {}
def inner(items, parent_key=None):
for item in items:
if parent_key:
item["parent_key"] = parent_key
_item_key = item[key] if isinstance(item[key], int) else item[key].lower()
index[_item_key] = item
if item.get(children_key):
inner(item[children_key], _item_key)
inner(items)
return index
|
cba14d2a1529d79ff879180c044d9ede11edc251
| 555,675 |
from typing import Tuple
import torch
def concat_last_4_layers(sequence_outputs: Tuple[torch.Tensor]) -> torch.Tensor:
"""Concatenate the last 4 tensors of a tuple of tensors."""
last_layers = sequence_outputs[-4:]
return torch.cat(last_layers, dim=-1)
|
39d640afe94004529af60e87b317f864483ba406
| 443,982 |
import math
def safe_log2(x: float) -> float:
"""
Returns the base-2 logarithm if x > 0, else returns 0
"""
return math.log2(x) if x > 0 else 0
|
9aad3a591dccc9631652d2200f4ecb98065234cd
| 401,886 |
def derivative_from_polycoefficients(coeff, loc):
"""
Return derivative of a polynomial of the form
f(x) = coeff[0] + coeff[1]*x + coeff[2]*x**2 + ...
at x = loc
"""
derivative = 0.
for n, c in enumerate(coeff):
if n == 0:
continue
derivative += n*c*loc**(n-1)
return derivative
|
29991fa0bccb29af85376759ce8c212606c027b2
| 687,912 |
def _resolve_oct(value: str) -> int:
"""
Resolves an octal string to a numeric value (e.g. for umask or mode).
Raises a ValueError if the value is malformed.
Parameters
----------
value
The octal string value
Returns
-------
int
The numeric representation of the octal value
"""
try:
return int(value, 8)
except ValueError:
raise ValueError(f"Invalid value '{value}': Must be in octal format.")
|
7f5fe506c89e547d51ca028a1e33a7be20cddb7d
| 317,697 |
import math
def polar_to_cart(deg, amt):
"""Convert polar coordinates to cartesian"""
x = float(amt) * math.sin(math.radians(int(deg)))
y = float(amt) * math.cos(math.radians(int(deg)))
return x,y
|
aa8ccfdfef1e91dba180d3fdf349d6576b71d04a
| 366,150 |
import math
def _norm_pdf(x): # pragma: no cover
"""
Returns the probability density function value of a standard normal
Gaussian distribtion.
"""
return math.exp(-x**2/2) / math.sqrt(2 * math.pi)
|
e468e1b2ec674a062b9f4b3c8af4a9e2aa3169ef
| 60,384 |
def endot(text):
"""Terminate string with a period.
"""
if text and text[-1] not in '.,:;?!':
text += '.'
return text
|
090a3165948c8be2431b29d71b359d1abff8e4cf
| 321,161 |
def totalup(statlist,dataindex,numbcores):
"""
:param statlist: The collective thread statistics lists
:param dataindex: The data from each list to add up
0-Player one wins
1-Player two wins
2-Draws
3-Total games played
:param numbcores: The number of threads (Used for a for statement to correctly add up the data)
:return: The total value of each threads statistics combines
"""
returnresult = 0
internalcount = numbcores
for dive in range(0,numbcores):
returnresult += statlist[dive][dataindex] #Dive is used as an index into the realtime list of lists.
return returnresult
|
c240c9a3747d39d18678ac661e157e912abeefe4
| 492,004 |
def get_dict_for_attrs(obj, attrs):
"""
Returns dictionary for each attribute from given ``obj``.
"""
data = {}
for attr in attrs:
data[attr] = getattr(obj, attr)
return data
|
5c05eed78a3efc2229441fddbc01ea0e503fa2e8
| 566,800 |
def fracToRainbowColour(frac):
"""
frac is a number from 0 to 1. Map to
a 3-tuple representing a rainbow colour.
1 -> (0, 1, 0) #green
0.75 -> (1, 0, 1) #yellow
0.5 -> (1, 0, 0) #red
0.25 -> (1, 1, 0) #violet
0 -> (0, 0, 1) #blue
"""
unscaledTuple = (min(1, 2 * frac) - max(0, (frac - 0.5) * 2),
max(0, 1 - (2 * frac)), max(0, 2 * (frac - 0.5)))
# scale so max is 1
theMax = max(unscaledTuple)
scaledTuple = [x / float(theMax) for x in unscaledTuple]
return scaledTuple
|
381250be8f1352c3d2809609ebc7239085b3947e
| 217,702 |
def slice_epitope_predictions(
epitope_predictions,
start_offset,
end_offset):
"""
Return subset of EpitopePrediction objects which overlap the given interval
and slice through their source sequences and adjust their offset.
"""
return [
p.slice_source_sequence(start_offset, end_offset)
for p in epitope_predictions
if p.offset >= start_offset and p.offset + p.length <= end_offset
]
|
707dd026a2cd004cdcd24f8379a7ae772c42f6ae
| 112,090 |
def heuristic(tile1, tile2):
"""
Manhattan distance between two tiles.
:param tile1: Tile
:param tile2: Tile
:return: int distance
"""
(x1, y1) = (tile1.r, tile1.c)
(x2, y2) = (tile2.r, tile2.c)
return abs(x1 - x2) + abs(y1 - y2)
|
099c64e4289589c63cb788d88ab6053241ecf385
| 136,856 |
def string_is_true(value):
"""Check to see if a string has a value that should be treated as True."""
return value and value != 'false' and value != 'False' and value != '0'
|
43ee1a50f63b10d918a8c37ccff7130a7739ced4
| 587,303 |
def apply_polarity(value, polarity):
"""
Combine value and polarity.
# Arguments
value: int >= 0, Absolute value.
polarity: int, Value indicating the sign.
Value is considered to be negative when `polarity == 0`.
Any non-zero polarity indicates positive value.
# Return
Integer with absolute value of `value` and sign indicated by `polarity`.
"""
return value if polarity != 0 else -value
|
7a54966732a9223e0bb41c20d14fc11699d96e2c
| 365,520 |
import string
def isprintable(data) -> bool:
"""
This is a convenience function to be used rather than the usual
``str.printable`` boolean value, as that built-in **DOES NOT** consider
newlines to be part of the printable data set (weird!)
"""
if type(data) is str:
data = data.encode("utf-8")
return all(c in bytes(string.printable, "ascii") for c in data)
|
1d2a542ea1d3ebd499363f57911f36858d82e876
| 104,158 |
def add_xml_data(**property_paths_types):
"""Add properties to a class that access data using :meth:`get_data`.
This provides an alternative usage to directly calling
:meth:`ActivityElement._add_data_properties` below a class definition.
Args:
**property_paths_types: kwargs mapping property_name : tuple(path, data_type)
- property_name will be added to the decorated class.
- path is the tag name or path of the desired subelement of the
contained lxml element whose text will be retrieved
- data_type is the type that the subelement's text will be converted
to. A python type, or datetime.datetime to read a timestamp using
:meth:`dateutil.parser.isoparse`.
Returns:
callable: A class decorator.
Examples:
>>> @add_xml_data(time=('Time', datetime.datetime))
... class MyElement(ActivityElement):
... [...]
See also:
:meth:`ActivityElement._add_data_properties`
"""
def add_data_properties(cls):
cls._add_data_properties(**property_paths_types)
return cls
return add_data_properties
|
a5359482890d79f7c4d55b9370499ad314cefb0f
| 616,427 |
def _decode(reader, filename, **kwargs):
"""Successively try different methods to open ``filename`` with
``reader``."""
try: # First check if the reader is an open ``read`` method.
return reader(**kwargs)
except TypeError:
pass
try: # Next possibility is that the reader just needs a string reference
return reader(filename, **kwargs)
except TypeError:
pass
try: # Finally, check if we can open the string reference to read.
with open(filename, 'r') as fileobj:
return reader(fileobj, **kwargs)
except UnicodeDecodeError:
with open(filename, 'rb') as fileobj:
return reader(fileobj, **kwargs)
|
5408fffd404713053df957c43ee136d1951ae64b
| 183,621 |
def pad_left(s, target_len):
"""pads string s on the left such that is has at least length target_len"""
return ' ' * (target_len - len(s)) + s
|
7c30df9e70ed5e6ec96cd9fa35593b6da605deb5
| 214,419 |
def _set_scope_for_type_definition(type_definition, scope):
"""Sets the current scope for an ir_pb2.TypeDefinition."""
return {"scope": scope[type_definition.name.name.text]}
|
e97b932673d6e86b2325455ff7afc70cd655921b
| 133,012 |
import mimetypes
def guess_mimetype(fn, default="application/octet-stream"):
"""Guess a mimetype from filename *fn*.
>>> guess_mimetype("foo.txt")
'text/plain'
>>> guess_mimetype("foo")
'application/octet-stream'
"""
if "." not in fn:
return default
bfn, ext = fn.lower().rsplit(".", 1)
if ext == "jpg": ext = "jpeg"
return mimetypes.guess_type(bfn + "." + ext)[0] or default
|
13944a525594fc8a1c5e20d2c861ef1e3eec7bec
| 222,219 |
from datetime import datetime
def get_timestamp(dt):
"""
Python2 compatible way to compute the timestamp (seconds since 1/1/1970)
"""
return (dt.replace(tzinfo=None) - datetime(1970, 1, 1)).total_seconds()
|
e66e007e005460d3535db433ed723f63a46d0f9b
| 94,610 |
def strip_namespace(tag_name):
"""
Strip all namespaces or namespace prefixes if present in an XML tag name .
For example:
>>> tag_name = '{http://maven.apache.org/POM/4.0.0}geronimo.osgi.export.pkg'
>>> expected = 'geronimo.osgi.export.pkg'
>>> assert expected == strip_namespace(tag_name)
"""
head, brace, tail = tag_name.rpartition('}')
return tail if brace else head
|
425b41db7a75a17122e50208b64242d1aeeea5ce
| 27,670 |
def create_system_id(os_string, architecture):
"""
Create a system-ID by joining the OS-String and the architecture with a hyphen.
Args:
os_string (str):
The Operating system string.
architecture (str):
The Architecture string.
Returns:
The System-ID string.
"""
system_id_format = '{os_string} {architecture}'
return system_id_format.format(os_string=os_string.replace('_', ' '),
architecture=architecture)
|
7ae682e2d57784ca771c1e50b7e980b56f631947
| 8,217 |
def _create_shifted_copy(arrays, arrays_index, index, delta):
"""Creates a copy of the arrays with one element shifted."""
A = arrays[arrays_index]
A_shifted = A.copy()
A_shifted[index] += delta
arrays_shifted = list(arrays)
arrays_shifted[arrays_index] = A_shifted
return arrays_shifted
|
c30aa615a85ec3999413df9d8bb8f71c46d1c4ce
| 278,678 |
import itertools
def partitionby(func, seq):
""" Partition a sequence according to a function
Partition `s` into a sequence of lists such that, when traversing
`s`, every time the output of `func` changes a new list is started
and that and subsequent items are collected into that list.
>>> is_space = lambda c: c == " "
>>> list(partitionby(is_space, "I have space"))
[('I',), (' ',), ('h', 'a', 'v', 'e'), (' ',), ('s', 'p', 'a', 'c', 'e')]
>>> is_large = lambda x: x > 10
>>> list(partitionby(is_large, [1, 2, 1, 99, 88, 33, 99, -1, 5]))
[(1, 2, 1), (99, 88, 33, 99), (-1, 5)]
See also:
partition
groupby
itertools.groupby
"""
# Note: applying `list` seems to be required both Python 2 and 3
# compatible (Python 3 works without it).
return (tuple(v) for k, v in itertools.groupby(seq, key=func))
|
cd526a5bb2c276ba9e37519dba901e3c2f6fa045
| 353,894 |
import math
def jump_search(array, value):
"""
:param array: list of values
:param value: value to search for
:return: index of the value
If not found, return -1
Time complexity: O(sqrt(n))
"""
n = len(array)
if n == 0:
return -1
# Choose the step to jump
step = int(math.sqrt(n))
# Loop through the list with step
idx = 0
while True:
# Found the value
if array[idx] == value:
return idx
# If jump through the value, run linear search from the last jump index to current index
if array[idx] > value:
for offset in range(1, step):
if array[idx - offset] == value:
return idx - offset
return -1
# Not found through the whole list
if idx >= n - 1:
return -1
# Increment by step
idx = min(idx + step, n - 1)
|
e98296bf6a8cb9af4571409f6e08c9e2eaa3dc72
| 284,168 |
def msg_get_hw_id(raw_json):
""" extract hardware ID from JSON """
return raw_json['hardware_serial']
|
e9a0adf13a31a1d884b741992822631b9faea61b
| 452,334 |
import json
def dumpJSON(x):
"""
Generic function to return 'x' in formatted JSON.
Parameters:
x any thing to see
Returns:
String containing 'x' as formatted JSON
"""
return json.dumps(x, indent=4)
|
bfc9c46fbd770623ff422f346d67507e751ffa16
| 352,412 |
def use_installerfw(ks, feature):
""" Check if the installer framework has to be used for a feature
"feature". """
features = ks.handler.installerfw.features
if features:
if feature in features or "all" in features:
return True
return False
|
32cc88d999b6dae71bb2f6257174bcec3d415a85
| 430,122 |
def kmax_perpendicular(d_comoving, sigma_beam):
"""Calculate maximum k (across sky) from angular resolution
Parameters
----------
d_comoving :
comoving radial distance (pre-calculated from redshift) [Mpc]
sigma_beam :
arc length of beam width [radians]
"""
kmax_perp = 1. / (d_comoving * sigma_beam)
return kmax_perp
|
977a8e0358c2ad17caeba5f9fc20fd0fff166603
| 130,009 |
def jaccard_similarity(x, y):
""" Returns the Jaccard Similarity Coefficient (Jarccard Index) between two
lists.
From http://en.wikipedia.org/wiki/Jaccard_index: The Jaccard
coefficient measures similarity between finite sample sets, as is defined as
the size of the intersection divided by the size of the union of the sample
sets.
"""
intersection_cardinality = len(set.intersection(*[set(x), set(y)]))
union_cardinality = len(set.union(*[set(x), set(y)]))
return intersection_cardinality / float(union_cardinality)
|
81cf0c882ff4b06e79b102abb2d8f13755b68873
| 5,873 |
def detokenize_enzymatic_reaction_smiles(rxn: str) -> str:
"""Detokenize an enzymatic reaction SMILES in the form precursors|EC>>products.
Args:
rxn: a tokenized enzymatic reaction SMILES.
Returns:
the detokenized enzymatic reaction SMILES.
"""
rxn = rxn.replace(" ", "")
if "[v" in rxn and "|" not in rxn:
pipe_index = rxn.index("[v")
if pipe_index > -1:
rxn = rxn[:pipe_index] + "|" + rxn[pipe_index:]
if "[v" not in rxn and "|" in rxn:
rxn = rxn.replace("|", "")
if "|" not in rxn:
return rxn
precursor_split = rxn.split("|")
if len(precursor_split) < 2:
return ""
reaction_split = precursor_split[1].split(">>")
if len(reaction_split) < 2:
return ""
ec = (
reaction_split[0]
.replace("][", ".")
.replace("[v", "")
.replace("u", "")
.replace("t", "")
.replace("q", "")
.replace("]", "")
)
return precursor_split[0] + "|" + ec + ">>" + reaction_split[1]
|
9d3260dceb905f5d74c19b9067de15ce73eea22d
| 365,266 |
from bs4 import BeautifulSoup
import six
def _convert_toc(wiki_html):
"""Convert Table of Contents from mediawiki to markdown"""
soup = BeautifulSoup(wiki_html, 'html.parser')
for toc_div in soup.findAll('div', id='toc'):
toc_div.replaceWith('[TOC]')
return six.text_type(soup)
|
90e1c58f194f54004539ee4cece082988add5050
| 690,035 |
def get_doc_id(element_tree):
"""
returns the document ID of a SaltXML document.
:param tree: an ElementTree that represents a complete SaltXML document
:type tree: ``lxml.etree._ElementTree``
"""
id_element = element_tree.xpath('labels[@name="id"]')[0]
return id_element.attrib['valueString']
|
64e3e2abda9a0182866cc34b2f510a6c6dffe05b
| 18,168 |
from typing import List
def how_many_namefellows(queue: List[str], person_name: str) -> int:
"""
:param queue: list - names in the queue.
:param person_name: str - name you wish to count or track.
:return: int - the number of times the name appears in the queue.
"""
return queue.count(person_name)
|
8364adb4226d0f09bb5a7a90af0aee518d448a95
| 630,273 |
def make_path2loc(lgrps):
""" Set path2loc to map each path in lgrps (location groups) to
the id of the location group associated with that path. Input is:
lgrps = { 'loc1': [ 'path1', 'path2', ...]; 'loc2': ['path3', ...], ...}
Return is a dictionary of form:
{ 'path1': 'loc1', 'path2': 'loc1', 'path3': loc2', ... }"""
path2loc = {}
for loc in lgrps:
for path in lgrps[loc]:
path2loc[path] = loc
return path2loc
|
feb6bb0d24bcf785a92981565dabb4420cae251b
| 498,373 |
import re
def check_ignore(item, ignores=[]):
"""
take a string (item)
and see if any of the strings in ignores list are in the item
if so ignore it.
"""
ignore = False
for i in ignores:
if i and re.search(i, str(item)):
# print "ignoring item: %s for ignore: %s" % (item, i)
ignore = True
return ignore
|
0d31b2ef2ddbe48a4de7f743c412b1a72a19b774
| 700,074 |
import re
def convert_multipoint_coords(text):
""" convert multipoint coords in a string into a list of points """
pattern = '(\d+\.\d+)|(-\d+\.\d+)'
allcords = re.findall(pattern, text)
final = []
for i in range(0, len(allcords)-1, 2):
coord0 = allcords[i]
coord0 = coord0[0] if coord0[0] else coord0[1]
coord1 = allcords[i+1]
coord1 = coord1[0] if coord1[0] else coord1[1]
final.append([float(coord0), float(coord1)])
return final
|
4af8a78a25145a6065531c1a4c212c631852708c
| 343,919 |
def _copy_dict(dct, description):
"""Return a copy of `dct` after overwriting the `description`"""
_dct = dct.copy()
_dct['description'] = description
return _dct
|
557bc7da87069846c088983228079d3c762af69c
| 686,842 |
import math
def area_triangle_sss(side1, side2, side3):
"""Returns the area of a triangle, given the lengths of its three sides."""
# Use Heron's formula
s = (side1 + side2 + side3)/2.0
return math.sqrt(s * (s-side1) * (s-side2) * (s-side3))
|
b4f0ffd1646cbea000f446297cf3465d7ea9cd95
| 71,993 |
def ordered_node_greedy_best_first(node, h, node_tiebreaker):
"""
Creates an ordered search node (basically, a tuple containing the node
itself and an ordering) for greedy best first search (the value with lowest
heuristic value is used).
@param node The node itself.
@param h The heuristic value.
@param node_tiebreaker An increasing value to prefer the value first
inserted if the ordering is the same.
@returns A tuple to be inserted into priority queues.
"""
f = h
return (f, h, node_tiebreaker, node)
|
4a48132913af41d9fa713c6d62b404e0df658e50
| 306,561 |
import select
import json
def read_next_json(process):
"""Return the next JSON formatted output from Burp Suite as a Python object."""
# We will wait on Burp Suite's standard output
pollobj = select.poll()
pollobj.register(process.stdout, select.POLLIN)
jsonobject = None # Default to a failure
while True:
# Wait for max. 30 s, if timeout, return None.
descriptors = pollobj.poll(30000)
if descriptors == []:
break
# Read a line; if not JSON, continue polling with a new timeout.
line = process.stdout.readline()
if line == '': # Burp Suite has exited
break
try:
jsonobject = json.loads(line)
except ValueError:
continue
break
return jsonobject
|
4bf2a2ba5de738b24982584086f902a05c944ada
| 548,056 |
def reindexstruct(structobj, startidx):
"""
Reindexes residues per chain per model,
directly modifies structobj
Parameters
----------
structobj: Bio.PDB.Structure.Structure
startidx: int
Returns
-------
structobj: Bio.PDB.Structure.Structure
"""
for model in structobj:
for chain in model:
count = startidx
for resi in chain.get_residues():
if resi.id[0] == ' ':
resi._id = (' ', count, ' ')
count += 1
return structobj
|
0df1f2434361234b692a27648ccec3b7f9e3559c
| 273,197 |
import random
def get_best_move(board, scores):
"""
Finds empty squares with maximum score and randomly returns one
"""
#Create list of empty coordinates and their scores
scores_of_empties = []
for empty in list(board.get_empty_squares()):
scores_of_empties.append(((scores[empty[0]][empty[1]]) , (empty[0], empty[1])))
#Determine the highest score
max_score = max(scores_of_empties)[0]
#Create list of best options for next move
move_options = []
for empty_score in list(scores_of_empties):
if empty_score[0] == max_score:
move_options.append(empty_score[1])
#Select tuple coordinates randomly from empty squares with highest score
return random.choice(move_options)
|
16bc209845652e851f5149674fd6603f07177fc1
| 507,280 |
import torch
def unsqueeze(input, dim=0, ndim=1):
"""Adds singleton dimensions to a tensor.
This function expands `torch.unsqueeze` with additional options.
Parameters
----------
input : tensor_like
Input tensor.
dim : int, default=0
Position at which to insert singleton dimensions.
ndim : int, default=1
Number of singleton dimensions to insert.
Returns
-------
output : tensor
Tensor with additional singleton dimensions.
"""
for _ in range(ndim):
input = torch.unsqueeze(input, dim)
return input
|
d1c50f440d5bf495ba03811bdebff9f6b2d34dc4
| 548,495 |
def uri_base(uri):
"""
Get the base URI from the supplied URI by removing any parameters and/or fragments.
"""
base_uri = uri.split("#", 1)[0]
base_uri = base_uri.split("?", 1)[0]
return base_uri
|
8655b8717261dcd419f1c3ac0153ec51d348ca57
| 11,443 |
import random
def compute_delay(interval):
"""Compute action delay using exponential distribution"""
return int(random.expovariate(1/interval)) + 1
|
8e3fa92e9872947c66b622696350d2cee4a7c149
| 258,377 |
def stripdefaults(target, removeempty=list(), removeexact=dict()):
""" Make output dicts smaller by removing default entries. Keys are
removed from `target` in place
Args:
target (dict): dictionary to remove entries from
removeempty (list): Remove keys in this list from target if the key
tests as false (None, "", [], {}, etc)
removeexact (dict): Remove keys in this dict from target if the
correspondong values are equal
Returns:
target with the keys removed
"""
for key in removeempty:
# if val is an np.array, `not` raises an exception
try:
if not target.get(key, True):
del target[key]
except Exception:
pass
for key, val in removeexact.items():
if target.get(key) == val:
del target[key]
return target
|
120727952cbfb8e8393374dfbd1b7e37c04fd071
| 651,053 |
def _prefix_keys(results: dict, prefix: str) -> dict:
"""
Add a prefix to existing keys
Args:
results (dict): The dictionary of results
prefix (str): A string to prefix each key with
Returns:
dict: The result dictionary with prefixed keys.
"""
prefixed = {}
for key, val in results.items():
prefixed[f'{prefix}_{key}'] = val
return prefixed
|
fab96f54b2df8b8aea566049448c05951839c0d5
| 583,829 |
import math
def calc_entropy(data, base=2):
"""
Calculate the entropy of data. Using documentation from
scipy.stats.entropy as the basis for this code
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.entropy.html).
:param data: Measure the entropy of this object
:return: Calculated entropy value
"""
if not data:
return 0
# calculate frequency list
chars = set(data)
frequencies = [float(data.count(ch)) / len(data) for ch in chars]
# calculate shannon entropy
H = -sum([freq * math.log(freq) / math.log(base) for freq in frequencies ])
return H
|
72a9d111120415471c7d54e11c862ce8ebde4a55
| 10,424 |
def oneHotEncode_4_evtypes_tau_decay_length(x, r_vals):
"""
This function one hot encodes the input for the event types
cascade, tracks, doubel-bang, starting tracks
"""
cascade = [1., 0., 0., 0.]
track = [0., 1., 0., 0.]
doublebang = [0., 0., 1., 0.]
s_track = [0., 0., 0., 1.]
cut = 5. # aus config auslesen, no hardcode
# map x to possible classes
if int(x) in [5, 6]:
if r_vals[8] >= cut:
return doublebang
else:
return cascade
else:
mapping = {0: cascade, 1: cascade, 2: track, 3: s_track, 4: track,
5: doublebang, 6: doublebang, 7: cascade, 8: track, 9: cascade}
return mapping[int(x)]
|
ba41d82b51587782d9b4e8381ce360a62999a338
| 493,832 |
def d2l(d):
"""Convenience function that converts a dict into a sorted tuples list."""
return sorted([(k, v) for k, v in d.items()])
|
19e883d1412e4cf40aa46f6db903ec05c2988cd2
| 254,462 |
def strip_fuzz_config_params(input_args):
"""Delete fuzz related command line args because we have to add the seed manually."""
ret = []
for arg in input_args:
if "--fuzzMongodConfigs" not in arg and "--fuzzConfigSeed" not in arg:
ret.append(arg)
return ret
|
1d73a5a5d70067fd6949f86c2ebbcdba3a8fc8e3
| 528,935 |
def indent(text, count=1, prefix=" "):
"""Indent the given text prefix times the count.
Args:
text (str): string to indent (line by line).
count (number): number of indents.
prefix (str): the prefix of the indent.
Returns:
str. the indented text.
"""
lines = text.split("\n")
return "\n".join("{}{}".format(prefix * count, line)
for line in lines)
|
09590ef707b818b23bbf4d92aba43e69b925b104
| 115,318 |
def is_existing_db_server(db_server_config):
"""
Check if an external DB server config template is referring to an existing external DB server.
If a DB server config template has host and port defined, it is referring to an existing server.
:param db_server_config: External DB server config template
:return: True if the External DB server config template is referring to an existing DB server
"""
return db_server_config.get('host') and db_server_config.get('port')
|
63542500513ffbf395d7b1b0934ee51b0bf29518
| 470,798 |
def _get_cell_range(sheet, start_row, start_col, end_row, end_col):
"""Returns the values from a range
https://stackoverflow.com/a/33938163
"""
return [sheet.row_slice(row, start_colx=start_col, end_colx=end_col+1) for row in range(start_row, end_row+1)]
|
8cd0ade546d7352ca61d7b6eb71e26086fd7b44a
| 383,697 |
def add_suffix_to_filename(filename, suffix_text):
"""Append a suffix to a filename
The name is appended before the file extension (if one exists)
Examples:
add_suffix_to_filename('my_file.txt', '_old') -> 'myfile_old.txt'
add_suffix_to_filename('my_file', '_old') -> 'myfile_old'
Args:
filename: Filename string
suffix_text: Attend string
Returns
Modified filename
"""
if not '.' in filename:
return filename + suffix_text
pieces = filename.split('.')
return '.'.join(pieces[:-1]) + suffix_text + '.' + pieces[-1]
|
f512fa14afbbf254773df28942d7f8fd4f2b07b8
| 228,597 |
def apply_Permutation(permutation_table, sbox_32bits):
""" It takes Sboxes output and a permutation table and return 32 bit binary string"""
final_32bits = ""
for index in permutation_table:
final_32bits += sbox_32bits[index-1]
return final_32bits
|
027981ca6aceb0546b4cfe3736f3f959cb9a5f68
| 463,417 |
def dup_integrate(f, m, K):
"""
Computes the indefinite integral of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_integrate(x**2 + 2*x, 1)
1/3*x**3 + x**2
>>> R.dup_integrate(x**2 + 2*x, 2)
1/12*x**4 + 1/3*x**3
"""
if m <= 0 or not f:
return f
g = [K.zero]*m
for i, c in enumerate(reversed(f)):
n = i + 1
for j in range(1, m):
n *= i + j + 1
g.insert(0, K.exquo(c, K(n)))
return g
|
0f1981d699c4c80b61d4f0aececa1ccc4601712b
| 30,783 |
def get_warning_msgs(outcarfile):
"""
Parse warning messages from VASP
Args:
outcarfile (string): parth to OUTCAR file
Returns:
warningmsg (list of strings): warning messages in OUTCAR
"""
warningmsg = []
with open(outcarfile,'r') as rf:
for line in rf:
if 'You have a (more or less)' in line:
warningmsg.append('large_supercell')
warningmsg = list(set(warningmsg))
return warningmsg
|
e28c14f21c18785b140d12930723e34eb1ffdfe3
| 265,218 |
import re
def process_tweets(text):
"""Exclude mentions, urls, and html reference characters in a string using regular expression"""
text = re.sub("(\@|https:\/\/)\S+", "", text) # remove mentions and urls
text = re.sub(r"&[a-z]+;", "", text) # exclude html reference characters
return text
|
934d78f691767bc12bdc40701a1953c0eba1d97d
| 683,837 |
def guess_casing(s: str) -> str:
"""Guess snake case or Pascal case."""
if "_" in s:
return "snake"
if any(c.isupper() for c in s):
return "pascal"
return "snake"
|
d7059efde571bd4e9f4b5ec8a1686fd4b5c81ab1
| 92,441 |
def health_metadata(parent_path: list, output_keys: dict):
"""
Make a decorator that attaches metadata to the target function.
``output_keys`` is a dictionary that documents the output keys of the checker.
Each key is a key-path into the ``all_results`` dictionary, relative to ``parent_path``.
If the key is a string, it is appended to the parent path,
otherwise it extends the parent path.
The ``parent_path``, then, is just a list of strings that is the prefix
of all the output keys that are generated by the checker.
Each output-key value is a dictionary containing documentation of that key
under the key ``'doc'``.
"""
# Build full path for each output key, based on the parent path.
expanded_output_keys = {}
for k, v in output_keys.items():
# String key equivalent to a path of just one element
key_more = [k] if isinstance(k, str) else list(k)
key_path = tuple(parent_path + key_more)
expanded_output_keys[key_path] = v
def health_metadata_decorator(func):
"""Add metadata to function documenting the output keys it generates."""
func.__dict__['pytest_repo_health'] = {
'output_keys': expanded_output_keys
}
return func
return health_metadata_decorator
|
0aec7f827bb89986e536157ae868d765965e9c74
| 430,425 |
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
|
3ee4d6b5b66c108a09408af70e6f6a3d37ccc729
| 47,798 |
def package_available(pkg):
"""Test whether a package is available or not
Parameters
----------
pkg : string
Name of the package to look for
Returns
-------
pkg_available : bool
*True* if the package is available, *False* otherwise
"""
try:
exec('import %s' % pkg)
except ImportError:
return False
else:
return True
|
81af51478d24e9b4983a5ea2e0577cfadc268217
| 589,469 |
def parse_claim(line):
"""Parse line to tuple with edge coordinates and size of rectangle."""
_, claim = line.split(' @ ')
edge_part, rectangle_part = claim.split(': ')
edge = [int(num) - 1 for num in edge_part.split(',')]
rectangle = [int(num) for num in rectangle_part.split('x')]
return edge, rectangle
|
4d6d7a315ecbda9b7f626633dc5f7e8e25dcf3aa
| 243,394 |
import random
import string
def _generate_run_id(size=6, chars=None):
"""Generate a random ID of length `size`.
Parameters
----------
size : int
chars : Optional[str]
Optional list of characters to use for generating the ID.
Returns
-------
str
Returns a random identifier of length `size`.
"""
if chars is None:
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
|
606f4e3a3116a14d652aa245af8fed5e85682efc
| 178,174 |
def denormalize_data(batch_data, batch_std, batch_mean):
"""
Denormalize data given the standard deviation and mean.
"""
batch_data = (batch_data * batch_std) + batch_mean
return batch_data
|
cb048f93c93b578b332ec210d838f67e4d5cf3e3
| 96,005 |
import torch
def temporal_separation_loss(cfg: dict, coords: torch.Tensor) -> torch.Tensor:
""" Encourages key-point to have different temporal trajectories.
:param cfg: Configuration dictionary
:param coords: Key-point coordinates tensor in (N, T, C, 3)
:return: The separation loss
"""
# Trajectories are centered first
x_coordinates = coords[..., 0] - torch.mean(coords[..., 0], dim=1, keepdim=True) # (N, T, C)
y_coordinates = coords[..., 1] - torch.mean(coords[..., 1], dim=1, keepdim=True)
# Compute the pair-wise distance matrix
x_1 = x_coordinates.unsqueeze(-1) # (N, T, C, 1)
x_2 = x_coordinates.unsqueeze(-2) # (N, T, 1, C)
y_1 = y_coordinates.unsqueeze(-1)
y_2 = y_coordinates.unsqueeze(-2)
d = ((x_1 - x_2)**2 + (y_1 - y_2)**2) # (N, T, C, C)
# Average across time
d = torch.mean(d, dim=1) # (N, 1, C, C)
# Transform by gaussian
loss_matrix = torch.exp(-d / (2.0 * cfg['training']['separation_loss_sigma']**2))
loss_matrix = torch.mean(loss_matrix, dim=0) # Average across batch
loss = torch.sum(loss_matrix)
# Substract values on diagonal (1 per key-point)
loss = loss - cfg['model']['n_feature_maps']
# Normalize to [0, 1]
loss = loss / (cfg['model']['n_feature_maps'] * (cfg['model']['n_feature_maps'] - 1))
return loss
|
7de1f78311454266ba529e326e0d313a69ffb376
| 658,371 |
def davidson_to_binary(label: str) -> str:
"""
Convert Davidson labels to binary labels.
:label: Raw label as string
:returns (str): label as str.
"""
if label in ['0', '1']:
return 'abuse'
else:
return 'not-abuse'
|
fe271c0d80893f7dced22a9cffe1d6bdbf658576
| 246,947 |
def get_e_rtd(e_dash_rtd, bath_function):
"""「エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)
に定義される「エネルギー消費効率」 から 当該給湯器の効率を取得 (9)
Args:
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
bath_function(str): ふろ機能の種類
Returns:
float: 換算された当該給湯器の効率
"""
if bath_function == '給湯単機能' or bath_function == 'ふろ給湯機(追焚なし)':
return e_dash_rtd - 0.046 # (9a)
elif bath_function == 'ふろ給湯機(追焚あり)':
return e_dash_rtd - 0.064 # (9b)
else:
raise ValueError()
|
91ba3840857d8f22b808a6ab74baa84a51bdcf5b
| 654,309 |
def pos_distribution(doc):
"""Get POS distribution from a processed text.
Let us suppose that a given sentence has the following pos tags:
``[NOUN, VERB, ADJ, VERB, ADJ]``. The PoS distribution would be
* ``NOUN: 1``
* ``VERB: 2``
* ``ADJ: 2``
This function returns this distrubution as a dict.
:param doc: Processed text
:type doc: Spacy Doc
:return: POS distribution as a dict key POS, value Count
:rtype: dict
"""
distribution = {}
for token in doc:
distribution[token.pos_] = distribution.get(token.pos_, 0) + 1
return distribution
|
8d4aae39d779f26990d1acd933e8979453cdd155
| 621,133 |
def _fetch_none(_, __):
"""Fetch no rows and return `None`."""
return None
|
95e786f4473e10db0bfaf5af7fcae7b904b456af
| 126,498 |
def sum_price(df, column, product_id):
"""
Считает суммарную выручку по каждому такому продукту.
:param df: объединенный фрейм
:param column: колонка с продуктами ('ProductId')
:param product_id: id продукта из topn
:return: суммарную выручку по выбранному продукту
"""
return df[df[column] == product_id]['Price'].sum()
|
048659b3eb1d02b4f411f330cb860a14550c8414
| 462,514 |
def row_as_json(sqlite_row):
"""Return a dict from a sqlite_row."""
return {
key: sqlite_row[key]
for key in sqlite_row.keys()
}
|
828e130cc88b0a020702eec95eca4ae95bf98644
| 84,376 |
import re
def parse_numbers(line):
"""Return list of numbers from the given input"""
numbers = []
matched = re.finditer(r'(\d+)', line)
for match in matched:
numbers.append(int(match.group()))
return numbers
|
e17c6898c5d3fc956d38c6f069ce51c8b0a0445c
| 501,254 |
def is_han(c):
"""Return true if the input is a han character, false otherwise"""
return 0x4E00 <= ord(c) <= 0x9FFF
|
c244a1abd1a6603a3ff5fdb9e8baa17fcf6b7ca4
| 616,479 |
def save_image(args, image, media_info, output_path):
"""Save the image to `output_path`
"""
image = image.convert("RGB")
try:
image.save(output_path, optimize=True, quality=args.image_quality)
return True
except KeyError:
return False
|
a001df6c3acdc0b121b115619296e505efe18e57
| 578,938 |
def find_cuds_object(criterion, root, rel, find_all, max_depth=float("inf"),
current_depth=0, visited=None):
"""Recursively finds an element inside a container.
Use the given relationship for traversal.
Args:
criterion (Callable): Function that returns True on the Cuds object
that is searched.
root (Cuds): Starting point of search
rel (OntologyRelationship): The relationship (incl. subrelationships)
to consider for traversal.
find_all (bool): Whether to find all cuds_objects satisfying
the criterion.
max_depth (int, optional): The maximum depth for the search.
Defaults to float("inf").
current_depth (int, optional): The current search depth. Defaults to 0.
visited (Set[Union[UUID, URIRef]], optional): The set of uids
already visited. Defaults to None.
Returns:
Union[Cuds, List[Cuds]]: The element(s) found.
"""
visited = visited or set()
visited.add(root.uid)
output = [root] if criterion(root) else []
if output and not find_all:
return output[0]
if current_depth < max_depth:
for sub in root.iter(rel=rel):
if sub.uid not in visited:
result = find_cuds_object(criterion=criterion,
root=sub,
rel=rel,
find_all=find_all,
max_depth=max_depth,
current_depth=current_depth + 1,
visited=visited)
if not find_all and result is not None:
return result
if result is not None:
output += result
return output if find_all else None
|
78e227f425ba386aaa26577a1b0f9900a552cde4
| 618,689 |
def _text_of_first_tag(dom, tag):
"""Returns the text inside the first tag of the dom object.
Args:
dom: The dom object.
tag: The tag name.
Returns:
A string.
Raises:
ValueError: If dom object doesn't contain the specified tag or if the
first tag doesn't have a text.
"""
tags = dom.getElementsByTagName(tag)
# Tag not found.
if len(tags) == 0 or tags[0].firstChild is None:
raise ValueError('No tag {} found'.format(tag))
# No text in first tag.
if tags[0].firstChild is None:
raise ValueError('No text in tag {} found'.format(tag))
return dom.getElementsByTagName(tag)[0].firstChild.nodeValue
|
81a21dafc6db4bc14faba1b03d9ea09b91426773
| 97,848 |
def index_duplicated(index, keep="first"):
"""
Indicate duplicate index values.
Duplicated values are indicated as ``True`` values in the resulting
array. Either all duplicates, all except the first, or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
The value or values in a set of duplicates to mark as missing.
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Tensor
See Also
--------
Series.duplicated : Equivalent method on pandas.Series.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Index.drop_duplicates : Remove duplicate values from Index.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set to False and all others to True:
>>> import mars.dataframe as md
>>> idx = md.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> idx.duplicated().execute()
array([False, False, True, False, True])
which is equivalent to
>>> idx.duplicated(keep='first').execute()
array([False, False, True, False, True])
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> idx.duplicated(keep='last').execute()
array([ True, False, True, False, False])
By setting keep on ``False``, all duplicates are True:
>>> idx.duplicated(keep=False).execute()
array([ True, False, True, False, True])
"""
return index.to_series().duplicated(keep=keep).to_tensor()
|
438e0c3d16c4a5adfe3102b610779dcdb4df89d4
| 524,316 |
def quote_message(body: str, message):
"""Construct a body (with a signature) and a quoted reply."""
original = body.split("\n")
original.append("")
original.append(message.conversation.sender_name)
original.append("CEO, %s" % message.conversation.domain.company_name)
reply = []
reply.append(
"On %s, %s wrote:"
% (message.timestamp.strftime("%d/%m/%Y %H:%M %p"), message.sender_name)
)
reply.extend(["> " + line for line in message.best_body.split("\n")])
return "\n".join(original), "\n".join(reply)
|
bbff4d670f94768aba6c5dff6a309abf9fb0c238
| 33,918 |
import time
def track_rate(*, current, last_recorded, last_recorded_time, metric_name, min_increase=1_000_000, stats):
"""Calculates rate of change given current value and previously handled value and time. If there is
a relevant change (as defined by min_increase) and some time has passed, the current rate of change
is sent as integer gauge using given stats client and metric name. Returns the values to pass to the
next invocation of this function."""
now = time.monotonic()
return_value, return_time = last_recorded, last_recorded_time
if current > last_recorded + min_increase and now > last_recorded_time:
time_elapsed = now - last_recorded_time
diff = current - last_recorded
per_second = int(diff / time_elapsed)
if per_second > 0:
stats.gauge_int(metric_name, per_second)
return_value = current
return_time = now
return return_value, return_time
|
5a0ac101eb3bdb6bc1af64b666e8d5bc3a8796d5
| 399,927 |
def _parse_results(results):
""" Split the keys into a list of each key and the date that the data covers
Args:
results (dict): Dictionary of search results
Returns:
list (tuple): List of date, data key list pairs
"""
date_keys = {}
for key in results.keys():
keys = sorted(results[key])
start_key = keys[0]
end_key = keys[-1]
# Get the first and last dates from the keys in the search results
start_date = start_key.split("/")[-1].split("_")[0]
end_date = end_key.split("/")[-1].split("_")[-1]
dates_covered = start_date + "_" + end_date
date_keys[key] = {"dates": dates_covered, "keys": keys}
return date_keys
|
4da151d517b2d60445c97cb6dd1414a142d02c55
| 82,790 |
def lua_property(name):
""" Decorator for marking methods that make attributes available to Lua """
def decorator(meth):
def setter(method):
meth._setter_method = method.__name__
return method
meth._is_lua_property = True
meth._name = name
meth.lua_setter = setter
return meth
return decorator
|
97cd57cf21c4afdb43b6504af56139228df751cd
| 8,632 |
def test_fit(x, y, n, board_state):
"""
tests whether n can be placed in x,y on the current board_state
:param x: horizontal position on the board
:param y: vertical position on the board
:param n: the number to place
:param board_state: the current board state as a numpy array
:return: true if nothing would stop n from being placed in x,y on board_state, else returns false
"""
# first test if something is already in that position
if board_state[x, y] != 0:
return False
# then test if n already exists in that column or row
for i in range(9):
if board_state[x, i] == n:
return False
elif board_state[i, y] == n:
return False
# finally test if it fits in the block
x_block = 0
y_block = 0
if x < 3:
x_block = 0
elif x < 6:
x_block = 1
else:
x_block = 2
if y < 3:
y_block = 0
elif y < 6:
y_block = 1
else:
y_block = 2
for i in range(x_block * 3, x_block * 3 + 3):
for j in range(y_block * 3, y_block * 3 + 3):
if board_state[i, j] == n:
return False
return True
|
569b6971d9bba6fc06d44e65d827bc02dd1caacc
| 308,796 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.