content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import struct
def get_rgba_str(src):
"""Convert cairo surface data to RGBA."""
lmax = len(src) / 4
argb = list(struct.unpack("=%dL" % lmax, src))
rgba = [(((d & 0x0ffffff) << 8) + ((d >> 24) & 0x0ff)) for d in argb]
return struct.pack(">%dL" % lmax, *rgba)
|
ca1c94193be349b83b9e99a1d76ccbde1e251db7
| 244,568 |
import struct
def of_slicer(remaining_data):
"""Slice a raw bytes into OpenFlow packets"""
data_len = len(remaining_data)
pkts = []
while data_len > 3:
length_field = struct.unpack('!H', remaining_data[2:4])[0]
if data_len >= length_field:
pkts.append(remaining_data[:length_field])
remaining_data = remaining_data[length_field:]
data_len = len(remaining_data)
else:
break
return pkts, remaining_data
|
77bd851e4b3fc1111c5142fab1439820c165989b
| 686,253 |
import socket
import struct
def format_ipv4(ip, port=None):
"""Format an ipv4 address into a human-readable string.
Args:
ip (varies): the ip address as extracted in an LTTng event.
Either an integer or a list of integers, depending on the
tracer version.
port (int, optional): the port number associated with the
address.
Returns:
The formatted string containing the ipv4 address and, optionally,
the port number.
"""
# depending on the version of lttng-modules, the v4addr is an
# integer (< 2.6) or sequence (>= 2.6)
try:
ip_str = '{}.{}.{}.{}'.format(ip[0], ip[1], ip[2], ip[3])
except TypeError:
# The format string '!I' tells pack to interpret ip as a
# packed structure of network-endian 32-bit unsigned integers,
# which inet_ntoa can then convert into the formatted string
ip_str = socket.inet_ntoa(struct.pack('!I', ip))
if port is not None:
ip_str += ':{}'.format(port)
return ip_str
|
9b34a59376962d0c095b39bc846e546fa75250a4
| 514,157 |
import six
def range_join(numbers, to_str=False, sep=",", range_sep=":"):
"""
Takes a sequence of positive integer numbers given either as integer or string types, and
returns a sequence 1- and 2-tuples, denoting either single numbers or inclusive start and stop
values of possible ranges. When *to_str* is *True*, a string is returned in a format consistent
to :py:func:`range_expand` with ranges constructed by *range_sep* and merged with *sep*.
Example:
.. code-block:: python
range_join([1, 2, 3, 5])
# -> [(1, 3), (5,)]
range_join([1, 2, 3, 5, 7, 8, 9])
# -> [(1, 3), (5,), (7, 9)]
range_join([1, 2, 3, 5, 7, 8, 9], to_str=True)
# -> "1:3,5,7:9"
"""
if not numbers:
return "" if to_str else []
# check type, convert, make unique and sort
_numbers = []
for n in numbers:
if isinstance(n, six.string_types):
try:
n = int(n)
except ValueError:
raise ValueError("invalid number format '{}'".format(n))
if isinstance(n, six.integer_types):
_numbers.append(n)
else:
raise TypeError("cannot handle non-integer value '{}' in numbers to join".format(n))
numbers = sorted(set(_numbers))
# iterate through numbers, keep track of last starts and stops and fill a list of range tuples
ranges = []
start = stop = numbers[0]
for n in numbers[1:]:
if n == stop + 1:
stop += 1
else:
ranges.append((start,) if start == stop else (start, stop))
start = stop = n
ranges.append((start,) if start == stop else (start, stop))
# convert to string representation
if to_str:
ranges = sep.join(
(str(r[0]) if len(r) == 1 else "{1}{0}{2}".format(range_sep, *r))
for r in ranges
)
return ranges
|
c1b2d10ec1b47fa5c917fccead2ef8d5fc506370
| 1,981 |
def containing_block(code, idx, delimiters=['()','[]','{}'], require_delim=True):
"""
Find the code block given by balanced delimiters that contains the position ``idx``.
INPUT:
- ``code`` - a string
- ``idx`` - an integer; a starting position
- ``delimiters`` - a list of strings (default: ['()', '[]',
'{}']); the delimiters to balance. A delimiter must be a single
character and no character can at the same time be opening and
closing delimiter.
- ``require_delim`` - a boolean (default: True); whether to raise
a SyntaxError if delimiters are present. If the delimiters are
unbalanced, an error will be raised in any case.
OUTPUT:
- a 2-tuple ``(a,b)`` of integers, such that ``code[a:b]`` is
delimited by balanced delimiters, ``a<=idx<b``, and ``a``
is maximal and ``b`` is minimal with that property. If that
does not exist, a ``SyntaxError`` is raised.
- If ``require_delim`` is false and ``a,b`` as above can not be
found, then ``0, len(code)`` is returned.
EXAMPLES::
sage: from sage.repl.preparse import containing_block
sage: s = "factor(next_prime(L[5]+1))"
sage: s[22]
'+'
sage: start, end = containing_block(s, 22)
sage: start, end
(17, 25)
sage: s[start:end]
'(L[5]+1)'
sage: s[20]
'5'
sage: start, end = containing_block(s, 20); s[start:end]
'[5]'
sage: start, end = containing_block(s, 20, delimiters=['()']); s[start:end]
'(L[5]+1)'
sage: start, end = containing_block(s, 10); s[start:end]
'(next_prime(L[5]+1))'
TESTS::
sage: containing_block('((a{))',0)
Traceback (most recent call last):
...
SyntaxError: Unbalanced delimiters
sage: containing_block('((a{))',1)
Traceback (most recent call last):
...
SyntaxError: Unbalanced delimiters
sage: containing_block('((a{))',2)
Traceback (most recent call last):
...
SyntaxError: Unbalanced delimiters
sage: containing_block('((a{))',3)
Traceback (most recent call last):
...
SyntaxError: Unbalanced delimiters
sage: containing_block('((a{))',4)
Traceback (most recent call last):
...
SyntaxError: Unbalanced delimiters
sage: containing_block('((a{))',5)
Traceback (most recent call last):
...
SyntaxError: Unbalanced delimiters
sage: containing_block('(()()',1)
(1, 3)
sage: containing_block('(()()',3)
(3, 5)
sage: containing_block('(()()',4)
(3, 5)
sage: containing_block('(()()',0)
Traceback (most recent call last):
...
SyntaxError: Unbalanced delimiters
sage: containing_block('(()()',0, require_delim=False)
(0, 5)
sage: containing_block('((})()',1, require_delim=False)
(0, 6)
sage: containing_block('abc',1, require_delim=False)
(0, 3)
"""
openings = "".join([d[0] for d in delimiters])
closings = "".join([d[-1] for d in delimiters])
levels = [0] * len(openings)
p = 0
start = idx
while start >= 0:
if code[start] in openings:
p = openings.index(code[start])
levels[p] -= 1
if levels[p] == -1:
break
elif code[start] in closings and start < idx:
p = closings.index(code[start])
levels[p] += 1
start -= 1
if start == -1:
if require_delim:
raise SyntaxError("Unbalanced or missing delimiters")
else:
return 0, len(code)
if levels.count(0) != len(levels)-1:
if require_delim:
raise SyntaxError("Unbalanced delimiters")
else:
return 0, len(code)
p0 = p
# We now have levels[p0]==-1. We go to the right hand side
# till we find a closing delimiter of type p0 that makes
# levels[p0]==0.
end = idx
while end < len(code):
if code[end] in closings:
p = closings.index(code[end])
levels[p] += 1
if p==p0 and levels[p] == 0:
break
elif code[end] in openings and end > idx:
p = openings.index(code[end])
levels[p] -= 1
end += 1
if levels.count(0) != len(levels):
# This also occurs when end==len(code) without finding a closing delimiter
if require_delim:
raise SyntaxError("Unbalanced delimiters")
else:
return 0, len(code)
return start, end+1
|
a23d584d9890ae24b17ab0fdd48b9327b94a8f2a
| 373,527 |
def groups_balanced(arg):
"""
Match [, {, and ( for balance
>>> groups_balanced("(a) and (b)")
True
>>> groups_balanced("((a) and (b))")
True
>>> groups_balanced("((a) and (b)")
False
>>> groups_balanced(" [a] and [b] ")
True
>>> groups_balanced("((a) and [(b)])")
True
>>> groups_balanced("((a) and [(b))]")
False
"""
arg = arg.strip()
open_list = ["(", "[", "{"]
close_list = [")", "]", "}"]
stack = []
for i in arg:
if i in open_list:
stack.append(i)
elif i in close_list:
pos = close_list.index(i)
if ((len(stack) > 0) and
(open_list[pos] == stack[len(stack)-1])):
stack.pop()
else:
return False
if len(stack) == 0:
return True
else:
return False
|
cef90251e5dfe9f3be17af062d6042df6505bb0c
| 81,223 |
import math
def entropy(U, N, weight=None):
""" Returns information entropy of the assignment
Args:
U: assignment {class_id: [members,],}
N: number of objects
weight: weight factor {object_id: weight},]
"""
H = 0
for class_id, members in U.items():
size = len(members)
if not size:
continue
p = size / N
if weight is not None:
w = sum(weight[m] for m in members) / size
else:
w = 1
H -= p * math.log2(p) * w
return H
|
5437b3dd843fbc82b3f26852f3525f02fb1e95ae
| 613,104 |
from typing import Optional
def verify_input_date_format(date: Optional[str]) -> Optional[str]:
"""
Make sure a date entered by the user is in the correct string format (with a Z at the end).
Args:
date (str): Date string given by the user. Can be None.
Returns:
str: Fixed date in the same format as the one needed by the API.
"""
if date and not date.endswith('Z'):
date += 'Z'
return date
|
7e8e17df937693d6cf53aaff4018df1ff95fb1fc
| 133,326 |
def apply_polynomial(coeff, x):
"""Given coefficients [a0, a1, ..., an] and x, compute f(x) = a0 + a1*x + ... + ai*x**i +
... + an*x**n.
Args:
coeff (list(int)): Coefficients [a0, a1, ..., an].
x (int): Point x to on which to evaluate the polynomial.
Returns:
int: f(x) = a0 + a1*x + ... + ai*x**i + ... an*x**n.
"""
f = 0
xi = 1
for ai in coeff:
f += ai * xi
xi *= x
return f
|
08bc04ca19b9289b726fec0b9ad832470709270d
| 665,654 |
def check_has_backup_this_month(year, month, existing_backups):
"""
Check if any backup in existing_backups matches year and month.
"""
for backup in existing_backups:
if backup.createdYear == year and backup.createdMonth == month:
return True
return False
|
ade68262f374f2e56ba2f60c6e3689a50b87d7a0
| 618,536 |
import math
def rule_of_thumb(m,alpha=0.92,s=0.20):
"""
Return number of precincts to audit for given margin m.
m = margin of victory a fraction of total votes cast
Roughly 92\% confidence rate.
"""
return 1.0/m * (- 2.0 * s * math.log(alpha))
|
4acf633d7b57893ee78c74636027212703ac10fa
| 328,548 |
def match_module(names):
"""
line is a stripped Fortran statement. (no comment or
white space at beginning or end.) if it contains the beginning of
a module definition, return the module name in upper case
"""
if len(names) < 2: return ""
if names[0] == "MODULE" and names[1] != "PROCEDURE":
return names[1]
return ""
|
9793112985c15448362636dd8c4c947fc79e455e
| 159,662 |
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to underscores.
"""
value = value.strip().lower()
value = value.replace(',', '')
value = value.replace(' ', '_')
return value
|
ac006d287c9efaef3ee8d210a71681536ce84c17
| 271,856 |
def _get_mapping_dict(ch_names: list[str]) -> dict:
"""Create dictionary for remapping channel types.
Arguments
---------
ch_names : list
Channel names to be remapped.
Returns
-------
remapping_dict : dict
Dictionary mapping each channel name to a channel type.
"""
remapping_dict = {}
for ch_name in ch_names:
if ch_name.startswith("ECOG"):
remapping_dict[ch_name] = "ecog"
elif ch_name.startswith(("LFP", "STN")):
remapping_dict[ch_name] = "dbs"
elif ch_name.startswith("EMG"):
remapping_dict[ch_name] = "emg"
elif ch_name.startswith("EEG"):
remapping_dict[ch_name] = "eeg"
elif ch_name.startswith(
("MOV", "ANALOG", "ROT", "ACC", "AUX", "X", "Y", "Z", "MISC")
):
remapping_dict[ch_name] = "misc"
else:
remapping_dict[ch_name] = "misc"
return remapping_dict
|
782cda9c43749f71241dbef65f5654eafd7e07f4
| 43,066 |
def is_vlan(v):
"""
Check value is valid VLAN ID
>>> is_vlan(1)
True
>>> is_vlan(-1)
False
>>> is_vlan(4095)
True
>>> is_vlan(4096)
False
>>> is_vlan("g")
False
"""
try:
v = int(v)
return 1 <= v <= 4095
except ValueError:
return False
|
48c8fddc1e733ac3697c14b3d78641863dd84fdd
| 294,398 |
import torch
def create_random_binary_mask(features):
"""
Creates a random binary mask of a given dimension with half of its entries
randomly set to 1s.
:param features: Dimension of mask.
:return: Binary mask with half of its entries set to 1s, of type torch.Tensor.
"""
mask = torch.zeros(features).byte()
weights = torch.ones(features).float()
num_samples = features // 2 if features % 2 == 0 else features // 2 + 1
indices = torch.multinomial(
input=weights, num_samples=num_samples, replacement=False
)
mask[indices] += 1
return mask
|
4f98743fc27d4abec219b9317dd16e9c61a0eac9
| 682,738 |
def _pep8_violations(report_file):
"""
Returns the list of all PEP 8 violations in the given report_file.
"""
with open(report_file) as f:
return f.readlines()
|
d7b399e8d7b67a1ed22feea74bed2a0755396d09
| 181,603 |
def int_def(value, default=0):
"""Parse the value into a int or return the default value.
Parameters
----------
value : `str`
the value to parse
default : `int`, optional
default value to return in case of pasring errors, by default 0
Returns
-------
`int`
the parsed value
"""
try:
return int(value)
except ValueError:
return default
|
b4394805007c9dcd8503d3062f76aa6840672604
| 659,272 |
def new(num_buckets=256):
"""Initializes a Map with the given number of buckets."""
aMap = []
for i in range(0,num_buckets):
aMap.append([])
return aMap
|
7d4496ce082950343ee43e1ce97a196361f04e47
| 525,732 |
def list_from_i_to_n(i,n):
"""
make list [i,i+1,...,n]
for example:
list_from_i_to_n(3,7) => [3,4,5,6,7]
list_from_i_to_n(4,6) => [4,5,6]
"""
result = []
for j in range(i,n+1):
result = result + [j]
return result
|
308a30dde3f74726755343a34a78d9a9c949cb8e
| 106,412 |
def iob_scheme(tags, iobes=True):
""" Transform tag sequence without any scheme into IOB or IOBES scheme """
iob = []
tags = ["O"] + tags + ["O"]
for i in range(1, len(tags) - 1):
prev_tag, tag, next_tag = tags[i - 1: i + 2]
if not tag == "O":
if tag not in [prev_tag, next_tag]:
if iobes:
iob.append("S-" + tag)
else:
iob.append("B-" + tag)
elif not tag == prev_tag:
iob.append("B-" + tag)
elif not tag == next_tag:
if iobes:
iob.append("E-" + tag)
else:
iob.append("I-" + tag)
else:
iob.append("I-" + tag)
else:
iob.append("O")
return iob
|
8c7c39eafab90b07864e18315fb391417d3dcf81
| 250,963 |
def get_corpus(tokens):
""" Combine the list of tokens into a string
:return: a long string that combines all the tokens together
"""
corpus = ' '.join(tokens)
return corpus
|
5a728c231a81462d09bdab64b48e7d3a1fac76cc
| 448,747 |
import torch
def get_plan_feats(plans, scene_tensor, agent_tensor):
"""
Returns location coordinates, map and agent features for a given batch of plans
Inputs
plans: Sequences of row and column values on grid. shape: (Batchsize, horizon, 2)
scene_tensor: Tensor of scene features: (Batchsize, C_s, H, W)
agent_tensor: Tensor of agent features: (Batchsize, C_a, H, W)
Output
scene_feats: Scene features along plan (Batchsize, horizon, C_s)
agent_feats: Agent features along plan (Batchsize, horizon, C_a)
"""
h = scene_tensor.shape[2]
scene_tensor = scene_tensor.reshape(scene_tensor.shape[0], scene_tensor.shape[1], -1)
agent_tensor = agent_tensor.reshape(agent_tensor.shape[0], agent_tensor.shape[1], -1)
plans = plans[:, :, 0] * h + plans[:, :, 1]
plans_s = plans[:, None, :].repeat(1, scene_tensor.shape[1], 1).long()
plans_a = plans[:, None, :].repeat(1, agent_tensor.shape[1], 1).long()
scene_feats = torch.gather(scene_tensor, 2, plans_s)
agent_feats = torch.gather(agent_tensor, 2, plans_a)
scene_feats = scene_feats.permute(0, 2, 1)
agent_feats = agent_feats.permute(0, 2, 1)
return scene_feats, agent_feats
|
d90b74fc96e1547ac1f1f3f6f160bf9805701738
| 316,296 |
def discouont_rewards(raw_rewards, gamma):
"""
Discount rewards in episode
:param raw_rewards: raw rewards
:param gamma: discount rate
:return: discounted rewards
"""
R = 0
rewards = []
for r in reversed(raw_rewards):
R = r + gamma * R
rewards.append(R)
rewards = rewards[::-1]
return rewards
|
564428678e9ecb923926651a46d583827d11690c
| 151,971 |
def get_missing_object(wizard, obj_dict, display_name, fallback_func):
"""Get a prerequisite object.
The ``obj_dict`` here is typically a mapping of objects known by the
Wizard. If it is empty, the ``fallback_func`` is used to create a new
object. If it has exactly 1 entry, that is used implicitly. If it has
more than 1 entry, the user must select which one to use.
Parameters
----------
wizard : :class:`.Wizard`
the wizard for user interaction
obj_dict : Dict[str, Any]
mapping of object name to object
display_name: str
the user-facing name of this type of object
fallback_func: Callable[:class:`.Wizard`] -> Any
method to create a new object of this type
Returns
-------
Any :
the prerequisite object
"""
if len(obj_dict) == 0:
obj = fallback_func(wizard)
elif len(obj_dict) == 1:
obj = list(obj_dict.values())[0]
else:
objs = list(obj_dict.keys())
sel = wizard.ask_enumerate(f"Which {display_name} would you like "
"to use?", options=objs)
obj = obj_dict[sel]
return obj
|
739a453a42c902f2a3dcbe89360fba78a2557cf4
| 279,557 |
def strip_prefix_from_items(prefix, items):
"""Strips out the prefix from each of the items if it is present.
Args:
prefix: the string for that you wish to strip from the beginning of each
of the items.
items: a list of strings that may or may not contain the prefix you want
to strip out.
Returns:
items_no_prefix: a copy of the list of items (same order) without the
prefix (if present).
"""
items_no_prefix = []
for item in items:
if item.startswith(prefix):
items_no_prefix.append(item[len(prefix):])
else:
items_no_prefix.append(item)
return items_no_prefix
|
ce509d99b410ee07469cbdb3250cd8768cfd5aed
| 391,750 |
def import_name(name):
"""
Import an object by its qualified name.
Can be used to load a module, submodule, class, function, or other
object, including in situations where the regular `import` statement
would fail.
Parameters
----------
name : str
The fully qualified name of the object to import
Returns
-------
object
The imported object
See Also
--------
IPython.utils.importstring.import_item :
https://github.com/ipython/ipython/blob/b3355a9/IPython/utils/importstring.py#L10
Notes
-----
This is a near-exact reimplementation of
`IPython.utils.importstring.import_item`. This version exists to
make it available in pure Python environments where `IPython` may
not be installed.
"""
parts = name.rsplit('.', maxsplit=1)
if len(parts) == 2:
mod_name, obj_name = parts
# built-in __import__ should be faster than importlib.__import__
module = __import__(mod_name, fromlist=[obj_name])
try:
obj = getattr(module, obj_name)
except AttributeError as e:
raise ImportError(
f'No module or object "{obj_name}" found in "{mod_name}"'
) from e
return obj
return __import__(parts[0])
|
ca87ea4f6e3305657a36e9864416e6e5d797ca46
| 438,830 |
def nop(stack):
""" no-operation. Returns stack unchanged. """
return stack
|
6a5cc77693adf325e6945e6548873f4d2af7b10a
| 273,427 |
def finish_present_answer_feedback(question_id):
"""
Feedback when the user tries to answer another question, when he is in mid of answering a question.
:param question_id: Q-id of the question, you are currently answering
:return: Feedback text
"""
return "Please complete answering present question: QID ->{0}".format(question_id)
|
49d330e2becdb85e616f0c7af8ce76e590a389e5
| 605,317 |
import json
def get_counts_from_json(year):
""" Reads a json file containing total phrase and doc freqs for each year and returns the total
no. of phrases in the year in the arguments, and the total no. of documents in the year."""
# Read the Json file which has the monthly total phrases and documents -- 2 Json objects in a
# json array. Assign each object to a dictionary.
with open('phrase_urls_and_docs_yearly.json', 'r') as file:
json_array = json.load(file)
# json_array is a list of 2 dicts (1st -> phrase freq, 2nd -> doc_freq).
yearly_phrases_total = json_array[0]
yearly_docs_total = json_array[1]
# Get the 2 frequencies for the current year from the 2 dictionaries.
current_year_phrases_freq = yearly_phrases_total.get(year)
current_year_docs_freq = yearly_docs_total.get(year)
return current_year_phrases_freq, current_year_docs_freq
|
456918bd5beede3e0f51af3e10603fe05888e773
| 232,089 |
def isOnGrid(p, grid_size):
"""
Check if position `p` is valid for the grid.
"""
return p[0] >= 0 and p[1] >= 0 and p[0] < grid_size and p[1] < grid_size
|
43ade6c302fb7f2c0b6c743a00e351eaed21a6e5
| 134,433 |
from xml.dom.minidom import parse
import typing
def parse_element(stream: typing.BinaryIO):
"""Parse the content of the PSML file to determine the element.
:param stream: a filelike object with the binary content of the file.
:return: the symbol of the element following the IUPAC naming standard.
"""
try:
xml = parse(stream)
element = xml.getElementsByTagName('pseudo-atom-spec')[0].attributes['atomic-label'].value
except (AttributeError, IndexError, KeyError) as exception:
raise ValueError(f'could not parse the element from the PSML content: {exception}') from exception
return element.capitalize()
|
639d4770cf73955204590c516040eeefbce51af3
| 397,065 |
def upload_media(request, form_cls, up_file_callback, instance=None, **kwargs):
"""
Uploads media files and returns a list with information about each media:
name, url, thumbnail_url, width, height.
Args:
* request object
* form class, used to instantiate and validate form for upload
* callback to save the file given its content and creator
* extra kwargs will all be passed to the callback
"""
form = form_cls(request.POST, request.FILES)
if request.method == 'POST' and form.is_valid():
return up_file_callback(request.FILES, request.user, **kwargs)
elif not form.is_valid():
return form.errors
return None
|
3e8fa5429cfd9f19b69525a4d7d97d414fb02820
| 662,369 |
def create_expr_string(clip_level_value):
"""Create the expression arg string to run AFNI 3dcalc via Nipype.
:type clip_level_value: int
:param clip_level_value: The integer of the clipping threshold.
:rtype: str
:return The string intended for the Nipype AFNI 3dcalc "expr" arg inputs.
"""
expr_string = "step(a-%s)" % clip_level_value
return expr_string
|
37b8219a16eff829cc80cbc3da282a565ad06ded
| 159,209 |
import re
def split_url_at_version_tag(url):
"""Split a DataONE REST API URL.
Return: BaseURL, version tag, path + query E.g.:
http://dataone.server.edu/dataone/mn/v1/objects/mypid ->
'http://dataone.server.edu/dataone/mn/', 'v1', 'objects/mypid'
"""
m = re.match(r"(.*?)(/|^)(v[123])(/|$)(.*)", url)
if not m:
raise ValueError('Unable to get version tag from URL. url="{}"'.format(url))
return m.group(1), m.group(3), m.group(5)
|
42ba4ca3a1b746012e90db0f5fb046374cb5e0e1
| 195,081 |
import re
def get_glyphorder_cps_and_truncate(glyphOrder):
"""This scans glyphOrder for names that correspond to a single codepoint
using the 'u(ni)XXXXXX' syntax. All names that don't match are moved
to the front the glyphOrder list in their original order, and the
list is truncated. The ones that do match are returned as a set of
codepoints."""
glyph_name_re = re.compile(r'^u(?:ni)?([0-9a-fA-F]{4,6})$')
cps = set()
write_ix = 0
for ix, name in enumerate(glyphOrder):
m = glyph_name_re.match(name)
if m:
cps.add(int(m.group(1), 16))
else:
glyphOrder[write_ix] = name
write_ix += 1
del glyphOrder[write_ix:]
return cps
|
7763b704d45d5a05a829effec3dc49c447c4a71d
| 160,292 |
def ocr_url(client, image_url):
"""
Calls Azure Computer Vision API to read the contents of a file via a public url
Returns the result object
"""
recognize_printed_results = client.batch_read_file(image_url, raw=True)
return recognize_printed_results
|
76b70d9b7ce815fa352dd54043e77253ee40a6f4
| 165,021 |
import click
def color_setup_options(f):
"""Create common options for handing color scales"""
f = click.option('--normalized_by_size', '-nbs',
help='Normalize the quantity used for color function by the number of atoms in each frame.',
show_default=True, default=False, is_flag=True)(f)
f = click.option('--colormap', '-cmap',
help='Colormap used. Common options: gnuplot, tab10, viridis, bwr, rainbow.',
show_default=True, default='gnuplot')(f)
f = click.option('--color_from_zero', '-c0',
help='Set the minimum to zero and only plot the excess.',
show_default=True, default=False, is_flag=True)(f)
f = click.option('--color_label', '-clab',
help='The label for the color bar.',
default=None)(f)
f = click.option('--color_column', '-ccol',
help='The column number used in the color file. Starts from 0.',
default=0)(f)
f = click.option('--color', '-c',
help='Location of a file or name of the properties in the XYZ file. \
Used to color the scatter plot for all samples (N floats).',
default='none', type=str)(f)
return f
|
06b1d483ecc100eb55b4483d055c26062d4dc917
| 362,623 |
import re
def alias(s):
"""
Generate alias for s.
Aliases contain only numbers, lowercase letters, and underscores.
Any groups of invalid characters will be replaced by one underscore.
"""
return re.sub('[^a-z0-9]+', '_', s.lower())
|
1320f543950d46e8298dc4354aeaa0c48f675be9
| 162,930 |
def get_int_df(adata, lr, use_label, sig_interactions, title):
"""Retrieves the relevant interaction count matrix."""
no_title = type(title) == type(None)
if type(lr) == type(None): # No LR inputted, so just use all
int_df = (
adata.uns[f"lr_cci_{use_label}"]
if sig_interactions
else adata.uns[f"lr_cci_raw_{use_label}"]
)
title = "Cell-Cell LR Interactions" if no_title else title
else:
int_df = (
adata.uns[f"per_lr_cci_{use_label}"][lr]
if sig_interactions
else adata.uns[f"per_lr_cci_raw_{use_label}"][lr]
)
title = f"Cell-Cell {lr} interactions" if no_title else title
return int_df, title
|
957d6d400189d4804406dfdbf20aa7a196f04e67
| 169,845 |
def convertDictToValueNameList(valueDict):
"""Returns a list of name/value dictionaries. Not recursive. Scrubs empties."""
return [
{"name": k, "value": valueDict[k]} for k in valueDict if valueDict[k]
]
|
6e587d3b2eb88c60038128686d96890b2a02bd07
| 560,132 |
def get_importmappings(mappings):
"""Build list of importmappings
Args:
mappings: dict<string,string> src: dst
Returns
list<string> in the form 'M{src}={dst}`
"""
if not mappings:
return []
return ["M%s=%s" % (src, dst) for src, dst in mappings.items()]
|
b0afa33ddd6a9ed3a91f7219a982713af7eae92a
| 382,385 |
def command_condition(*args):
"""
Parameters
----------
args: list
containing boolean values for:
if a command has been specified explicitly
if the dbms is oracle
if the os is posix
if windows authentication has been specified
Returns
-------
tuple of values which can be used to determine the appropriate command
to run an sql script
"""
try:
most_significant_argument = args.index(True)
except ValueError:
most_significant_argument = -1
conditions = {
0: ('command'),
1: ('oracle'),
2: ('mssql', 'posix'),
3: ('mssql', 'nt', 'windows_authentication'),
-1: ('mssql', 'nt', 'mssql_authentication')
}
return conditions[most_significant_argument]
|
47755a875109896676f995a52e8da7899aff0a81
| 458,234 |
def check_if_tuple_exists(cursor, table_name, kennziffer, year):
"""
Checks if the tuple is already in te DB.
:param cursor: for the db to check
:param table_name: name of the table to check in
:param kennziffer: this value is checked
:param year: this value is checked
:return: True if the tuple exists in the table false if it does not
"""
cursor.execute("""SELECT * FROM %s WHERE `KENNZIFFER`= %s AND `YEAR` = '%s'"""
% (table_name, kennziffer, year))
tuples = cursor.fetchall()
if tuples:
return True
return False
|
ae6bf3c4dab0e98ef01c0e1fcee7de351f66b0cd
| 626,026 |
def calculate_distance_between_colors(color1, color2):
""" Takes 2 color tupes and returns the average between them
"""
return ((color1[0] + color2[0]) / 2, (color1[1] + color2[1]) / 2, (color1[2] + color2[2]) / 2)
|
17143102cd8f7270009d692d707c51c1b84b4528
| 452,270 |
def Hex8_shape_function_loc(xi_vec,node_local_coords): #Test function written
"""Compute the value of the shape function at xi_vec
for a node with local coordinates node_local_coords"""
xi1,xi2,xi3 = xi_vec
nc1,nc2,nc3 = node_local_coords
return 0.125*(1+xi1*nc1)*(1+xi2*nc2)*(1+xi3*nc3)
|
11fe45ef2ce6d2fc1d4145875397a00fae486d98
| 515,866 |
def invert_edge_predicate(edge_predicate):
"""Build an edge predicate that is the inverse of the given edge predicate.
:param edge_predicate: An edge predicate
:type edge_predicate: (pybel.BELGraph, BaseEntity, BaseEntity, str) -> bool
:rtype: (pybel.BELGraph, BaseEntity, BaseEntity, str) -> bool
"""
def _inverse_filter(graph, u, v, k):
return not edge_predicate(graph, u, v, k)
return _inverse_filter
|
d218705e7c4a5101ddde487171f9c7007cd4dfde
| 155,535 |
def get_number_of_arrays(vtk_polydata):
"""Returns the names and number of arrays for a vtkPolyData object
Args:
vtk_polydata (vtkPolyData): object / line to investigate the array.
Returns:
count (int): Number of arrays in the line.
names (list): A list of names of the arrays.
"""
count = 0
names = []
name = 0
while name is not None:
name = vtk_polydata.GetPointData().GetArrayName(count)
if name is not None:
names.append(name)
count += 1
return count, names
|
7aae492d37a8312e083161b1824a26407af15296
| 141,655 |
def to_string(obj):
"""Convert input to string"""
if isinstance(obj, str):
pass
elif isinstance(obj, bytes):
obj = obj.decode()
return obj
|
023917319cde93a8b95444f1f57f588c7224404d
| 373,717 |
def triangle_area(a, b, c):
"""Returns the area of a triangle using Heron's Formula."""
# Get the semiperimeter
s = (a+b+c)/2
return (s*(s-a)*(s-b)*(s-c))**(1/2)
|
50bb74f6c43ee4e23c22a28108b4d4e0ffb1b069
| 451,775 |
import binascii
def npbytearray2hexstring(npbytearray, prefix="0x"):
"""Convert a NumPy array of uint8 dtype into a hex string.
Example:
npbytearray2hexstring(array([15, 1], dtype=uint8)) = "0x0f01"
"""
return prefix + binascii.hexlify(bytearray(npbytearray)).decode("utf-8")
|
0d2dd571b25261774d7aad5156dbdb2bf9ec6357
| 664,985 |
def merge(df1, df2, monthly=False):
"""
Merge two dataframes
:param df1: (pd.DataFrame) - dataframe to merge
:param df2: (pd.DataFrame) - another dataframe to merge
:param monthly: (boolean) - whether to groupby month first
:return merged_df: (pd.DataFrame) - combined df1 and df2
"""
if monthly:
df1 = df1.groupby('1M').mean()
df2 = df2.groupby('1M').mean()
return df1.merge(df2,
how='outer',
left_index=True,
right_index=True)
|
04cf5cfeaa7207be4807c96843f35cfb1fd1b0ac
| 380,576 |
def dot(v1,v2):
"""Dot product of two vectors"""
n = len(v1)
prod = 0
if n == len(v2):
for i in range(n):
prod += v1[i]*v2[i]
return prod
|
569ad317cf8d875a0c0592a576c3987b02b54f95
| 673,683 |
def interpret(text):
"""
Decode text to allow escape sequences (e.g. \n or \t) input by the user
to be processed as expected. Important for the -f and -s options.
"""
if text:
return bytes(text, "utf-8").decode("unicode_escape")
else:
return None
|
799423e5784f20b500848b41c43f735571cb0986
| 372,809 |
def length(string):
"""returns the length of string.
"""
return len(string)
|
8af2fa272039bad4a8fa1d0158895f537aeddf8c
| 292,826 |
def strpos(string, search_val, offset = 0):
"""
Returns the position of `search_val` in `string`, or False if it doesn't exist. If `offset` is defined, will start looking if `search_val` exists after the `offset`.
"""
try:
return string[offset:].index(search_val) + offset
except ValueError:
return False
|
f4c1d5226ae56201d276d7cd3b4b29edd799b10e
| 285,740 |
def is_child(child, parent):
"""
Check if the child class is inherited from the parent.
Args:
child: child class
parent: parent class
Returns:
boolean
"""
for base in child.__bases__:
if base is parent:
return True
for base in child.__bases__:
if is_child(base, parent):
return True
return False
|
df72d8e754dab6db2f0d2edc0f399ab23b2cb6d1
| 70,672 |
def build_trainset_surprise(dataset):
"""
Build a training set from a Surprise Dataset so that
it can be used for model training.
"""
trainset = dataset.build_full_trainset()
return trainset
|
0c43284c11654e4bfce7f52caf21e01652be80bd
| 600,984 |
def get_last_folder_as_id(value):
"""Use the last part of the directories as test identifier."""
if isinstance(value, (tuple,)):
return value[-1]
return value
|
e1e90424f1c06ccf7d4a58d89fd0a24eb668144a
| 315,702 |
def string_to_tuple(version):
"""Convert version as string to tuple
Example:
>>> string_to_tuple("1.0.0")
(1, 0, 0)
>>> string_to_tuple("2.5")
(2, 5)
"""
return tuple(map(int, version.split(".")))
|
b987fbd25ef46839b24ab32282fc3403588a473c
| 164,819 |
def get_reverse_bits(bytes_array):
"""
Reverse all bits in arbitrary-length bytes array
"""
num_bytes = len(bytes_array)
formatstring = "{0:0%db}" % (num_bytes * 8)
bit_str = formatstring.format(int.from_bytes(bytes_array, byteorder='big'))
return int(bit_str[::-1], 2).to_bytes(num_bytes, byteorder='big')
|
c4c64624a9fab5d9c564b8f781885a47984f1eaf
| 23,577 |
def get_sim_name(obj):
"""
Get an in-simulation object name: if ``obj`` has attribute
``__sim_name__``, it will be returned, otherwise ``__name__``
standard attribute.
Args:
obj: an object to get name of
Returns: object name
"""
if hasattr(obj, '__sim_name__') and obj.__sim_name__ is not None:
return obj.__sim_name__
else:
return obj.__name__
|
80c7b29047f09d5ca1f3cdb2d326d1f2e75d996b
| 39,082 |
def LIFR(conf):
"""Get LIFR Color code from config"""
return conf.get_color("colors", "color_lifr")
|
a7ea2f4303acb28ef9a8851a7cc72d2b9eee9406
| 202,474 |
def join_positions(pos1, pos2):
"""Merge two positions and return as a list of strings
pos1: iterable object containing the first positions data
pos2: iterable object containing the second positions data
Example:
>>> join_positions('ABCD','1234')
['A1', 'B2', 'C3', 'D4']
"""
return ["".join([r1, r2]) for r1, r2 in zip(pos1, pos2)]
|
a8b430898ec807d74974ef27ebc6fe04349caa09
| 131,802 |
import random
def randtour(n):
"""Construct a random tour of size 'n'."""
sol = list(range(n)) # set solution equal to [0,1,...,n-1]
random.shuffle(sol) # place it in a random order
return sol
|
267da3ed15924f8bb5505e3d881714e4d1917bd4
| 505,907 |
def get_functions(template):
"""
Extracts functions and environment variables from a template
This returns a dict with the function name as the key and a list of variable names
as value.
"""
# Extract variables from the Globals section
try:
global_variables = list(template["Globals"]["Function"]["Environment"]["Variables"].keys())
except KeyError:
global_variables = []
# Extract functions in the Resources section
functions = {}
for name, resource in template.get("Resources", {}).items():
if resource.get("Type", "") != "AWS::Serverless::Function":
continue
try:
local_variables = list(resource["Properties"]["Environment"]["Variables"].keys())
except KeyError:
local_variables = []
# Don't add functions without environment variables
if local_variables or global_variables:
functions[name] = global_variables + local_variables
return functions
|
bbfb158c62a7f764041fe78804fd523fbc4b553a
| 438,056 |
def allocated_ticket(user):
"""Returns True if the user has any allocated tickets."""
return user.raffleticket_set.count() > 0
|
fda884e79500b0b34d58d4224032e06aa87a8df4
| 234,934 |
import copy
def run_hook(name, workspace, hooks):
"""Runs all hooks added under the give name.
Parameters
----------
name - str
Name of the hook to invoke
workspace - dict
Workspace that the hook functions operate on
hooks - dict of lists
Mapping with hook names and callback functions
"""
data = copy.copy(workspace)
for hook_listener in hooks.get(name, []):
# Hook functions may mutate the data and returns nothing
hook_listener(data)
return data
|
e6caa07125a0e9d52ac3047d54537e3ef8a9361d
| 284,975 |
from typing import Optional
def get_pr_number(branch: str) -> Optional[int]:
"""Retrieve the pull request number from the branch name."""
if 'pull/' in branch:
parts = branch.split('/')
return int(parts[parts.index('pull') + 1])
return None
|
f6467a0ead306ff13474761228d83c39d4734425
| 305,149 |
from random import randint
def rand_colour() -> int:
"""Gets a random colour."""
return randint(0, 0xFFFFFF)
|
764445570f16c60bad4ba6aa17977dcb09cb02a3
| 562,820 |
import glob
def get_photos(folder_path, pics_extension):
"""Function create a list of photos from given folder"""
pics_path = folder_path + "/*." + pics_extension
return glob.glob(pics_path)
|
3d32d5b173d74a2425f1229ee5e45784c1283053
| 687,453 |
import collections
def tail(n, seq):
""" The last n elements of a sequence
>>> tail(2, [10, 20, 30, 40, 50])
[40, 50]
See Also:
drop
take
"""
try:
return seq[-n:]
except (TypeError, KeyError):
return tuple(collections.deque(seq, n))
|
4cf7fa4301f1989ea16a9d653d083fd4edb6bc76
| 630,917 |
def get_ros_type(type_string):
"""Parses the type_string to get package and type"""
parts = type_string.split("/")
if len(parts) != 2:
raise Exception("Type lookup requires two parts, split by a slash: Package and type")
package = parts[0]
ros_type = parts[1]
return (package, ros_type)
|
f52f1bdd71bb08724d155e9ae7ec6764740e21da
| 290,779 |
def format_duration(seconds):
"""Format a duration as ``[hours:]minutes:seconds``.
Parameters
----------
seconds : int
Duration in seconds.
Returns
-------
str
"""
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
if hours > 0:
return "{}:{:02d}:{:02d}".format(hours, minutes, seconds)
else:
return "{}:{:02d}".format(minutes, seconds)
|
b5b612f0bf613ec429d8570265531560cb03ba7d
| 391,200 |
def normalize_sql_str(string):
"""Normalizes the format of a SQL string for string comparison."""
string = string.lower()
while ' ' in string:
string = string.replace(' ', ' ')
string = string.strip()
string = string.replace('( ', '(').replace(' )', ')')
string = string.replace(' ;', ';')
string = string.replace('"', '\'')
if ';' not in string:
string += ';'
return string
|
4696d5758a3ba20c92e7c0128a71f98cd76333f9
| 381,826 |
import io
def slurp(path, encoding='UTF-8'):
"""
Reads file `path` and returns the entire contents as a unicode string
By default assumes the file is encoded as UTF-8
Parameters
----------
path : str
File path to file on disk
encoding : str, default `UTF-8`, optional
Encoding of the file
Returns
-------
The txt read from the file as a unicode string
"""
with io.open(path, 'r', encoding=encoding) as f:
return f.read()
|
1a0697b7c2858308a2ad4844d92620d3e7206a02
| 72,098 |
def fact_imp(n: int) -> int:
""" Factorielle de n, n! = 1 * 2 * .. * n."""
f: int = 1
for i in range(2, n+1):
f = f * i # f *= i est aussi possible
return f
|
792fc9df08f2df0a6118ac37067f2f06f8471f9d
| 448,294 |
from typing import Any
import torch
def unsqueeze_scalar_tensor(data: Any) -> Any:
""" Un-squeezes a 0-dim tensor. """
if isinstance(data, torch.Tensor) and data.dim() == 0:
data = data.unsqueeze(0)
return data
|
9af53ffa3d0a100672a3060969627fb6547f6a23
| 476,812 |
def _get_all_no_id(cursor, table_name, table_id, print_string, verbose, **kwargs):
"""Gets all elements from a table as a dictionary, excluding id"""
cursor.execute("""SELECT * FROM """ + table_name)
values = []
for row in cursor:
val = dict(zip(row.keys(), row))
val.pop(table_id)
values.append(val)
return values
|
4b2160b945128d30bc4ad15ab2b3ca9543e43645
| 569,014 |
import torch
def one_hot_vector(length, index, device=torch.device('cpu')):
"""
Create a one-hot-vector of a specific length with a 1 in the given index.
Args:
length: Total length of the vector.
index: Index of the 1 in the vector.
device: Torch device (GPU or CPU) to load the vector to.
Returns: Torch vector of size [1 x length] filled with zeros, and a 1 only at index, loaded to device.
"""
vector = torch.zeros([1, length]).to(device)
vector[0, index] = 1.
return vector
|
88dca8d63792b5a8eb58abbf12eb2800e28b3264
| 22,686 |
import re
def construct_filename(fn_format, **kwargs):
"""
Function that takes a string with n-number of format placeholders (e.g. {0]})
and uses the values from n-kwargs to populate the string.
"""
kwarg_num = len(kwargs)
fn_var_num = len(re.findall(r"\{.*?\}", fn_format))
if kwarg_num != fn_var_num:
raise Exception(
"Format error: Given {0} kwargs, but "
"filename format has {1} sets of "
"braces.".format(kwarg_num, fn_var_num)
)
fn = fn_format.format(*kwargs.values())
return fn
|
83edcb1703814d891af6d13577b87efdce2fd02c
| 457,713 |
def changePlayer(player):
"""Returns the opposite player given any player"""
if player == "X":
return "O"
else:
return "X"
|
118d4c7ad04c16edebb1c8b829462327cccc4235
| 628,830 |
def _header_dict(project_id, auth_token):
"""
Create a header dict from the project ID and auth token
"""
return {
"accept": "application/json",
"project": str(project_id),
"Authorization": "Bearer " + str(auth_token),
}
|
9b13f5a3472d51303fd554e35a7462ae83a7c860
| 489,492 |
def get_review_rating(soup):
""" Analyze "soup" to extract rating review.
Args:
soup -- bs4.BeautifulSoup from http request of book url.
Return:
rating review
"""
str_to_number = {"One": 1, "Two": 2, "Three": 3, "Four": 4, "Five": 5}
p = soup.find("p", {"class": "star-rating"})
str_rating = p["class"][1]
review_rating = str_to_number.get(str_rating, 0)
return int(review_rating)
|
e4fa5e0ca2c290cfba952a1c3237da3cf6aa991e
| 231,639 |
def get_ngram_universe(sequence, n):
"""
Computes the universe of possible ngrams given a sequence. Where n is equal to the length of the sequence, the resulting number represents the sequence universe.
Example
--------
>>> sequence = [2,1,1,4,2,2,3,4,2,1,1]
>>> ps.get_ngram_universe(sequence, 3)
64
"""
# if recurrance is possible, the universe is given by k^t (SSA pg 68)
k = len(set(sequence))
if k > 10 and n > 10:
return 'really big'
return k**n
|
3dbfe1822fdefb3e683b3f2b36926b4bb066468f
| 705,907 |
def set_fig_in_pt_set(rectangle, value, points):
"""
General routine to set tiles found in points to value
"""
if not points:
return rectangle
rectangle[points[0][0]][points[0][1]] = value
set_fig_in_pt_set(rectangle, value, points[1:])
return rectangle
|
319dac7f6d14fc1e46073cce7e574937ca0a323c
| 427,962 |
def mate_after(aln):
""" Check if mate is after (to the right) of aln
Alignment A is after alignment B if A appears after B in a sorted BAM
alignment. If A and B are on different chromosomes, the reference ID is
compared.
Args:
aln (:obj:`pysam.AlignedSegment`): An aligned segment
Returns:
bool: True if alignment's mate is after, False otherwise
"""
if aln.next_reference_id == aln.reference_id:
return aln.next_reference_start > aln.reference_start
return aln.next_reference_id > aln.reference_id
|
890a069bb54340e41579d532966bb608c52bcab6
| 384,961 |
def composite_identity(f, g):
"""
Return a function with one parameter x that returns True if f(g(x)) is
equal to g(f(x)). You can assume the result of g(x) is a valid input for f
and vice versa.
>>> add_one = lambda x: x + 1 # adds one to x
>>> square = lambda x: x**2
>>> b1 = composite_identity(square, add_one)
>>> b1(0) # (0 + 1)^2 == 0^2 + 1
True
>>> b1(4) # (4 + 1)^2 != 4^2 + 1
False
"""
return lambda x: f(g(x)) == g(f(x))
|
df89773a3a01bf7d8b3ba5e33ec4866a0f365026
| 645,843 |
from datetime import datetime
def get_date(dt):
"""Return datetime from string."""
return datetime.strptime(dt[:-1] if dt.endswith('Z') else dt,
"%Y-%m-%dT%H:%M:%S.%f")
|
937f40766f3b224ca9430538ff08ec14f9711788
| 269,558 |
def get_contributors(raw):
"""
Extract contributors.
Only contributors tagged as belonging
to any of the valid roles will be extracted.
@param raw: json object of a Libris edition
@type raw: dictionary
"""
valid_roles = ["author", "editor",
"translator", "illustrator"]
agents = {"author": [], "editor": [],
"translator": [], "illustrator": []}
contribution = raw["mainEntity"]["instanceOf"].get("contribution")
for agent in contribution:
raw_role = agent.get("role")
if not raw_role:
ag_role = "author"
else:
ag_role = agent["role"][0]["@id"].split("/")[-1]
if ag_role in valid_roles:
ag_id = agent["agent"].get("@id") or ""
ag_first = agent["agent"].get("givenName") or ""
ag_last = agent["agent"].get("familyName") or ""
ag_full = "{} {}".format(ag_first, ag_last)
agents[ag_role].append({"name": ag_full, "id": ag_id})
return agents
|
18e8b3737643e825b7e94a75603883086f5a37d1
| 45,672 |
def make_revisit_location_translations(query_metadata_table):
"""Return a dict mapping location revisits to the location being revisited, for rewriting."""
location_translations = dict()
for location, _ in query_metadata_table.registered_locations:
location_being_revisited = query_metadata_table.get_revisit_origin(location)
if location_being_revisited != location:
location_translations[location] = location_being_revisited
return location_translations
|
17b9d431c340b46b972a9e5248d354396c512bd9
| 332,995 |
def get_default_ylabel(args):
"""Compute default ylabel for commandline args"""
label = ''
if args.transform == '':
label = args.metric
else:
label = args.transform + '(' + args.metric + ')'
if args.relative_to is not None:
label += ' relative to %s' % args.relative_to
return label
|
ccf414395536a64414b973ca0f3c30b2342cfa1b
| 298,402 |
def sign(x):
""" Returns the sign of float x.
"""
if abs(x) == x:
return 1.
else:
return -1.
|
24d5440a869e012be7f76527dc5459a792e7c28b
| 365,930 |
def get_emissions_info(df, feature_1, feature_2):
"""Get CO2 emissions data as absolute and relative numbers as well
as per dwelling (mean).
Parameters
----------
df : pandas.DataFrame
Dataframe from which to retrieve emission data.
Needs to have columns named CO2_EMISSIONS_CURRENT
and CO2_EMISS_CURR_PER_FLOOR_AREA.
feature_1: str
Emission feature, ideally "CO2_EMISSIONS_CURRENT"
or "CO2_EMISS_CURR_PER_FLOOR_AREA" (y-axis).
feature_2: str
Feature by which to plot emissions (x-axis).
Return
----------
emissions_dict : dict
Dictionary holding data on emissions (absolute, relative and mean)."""
# Total emissions
total_emissions = df["CO2_EMISSIONS_CURRENT"].sum()
total_emissions_by_area = df["CO2_EMISS_CURR_PER_FLOOR_AREA"].sum()
total = total_emissions = df[feature_1].sum()
# Get absolute, relative and mean emissions
emissions = df.groupby(feature_2)[feature_1]
emissions_rel = emissions.sum() / total * 100
emissions_abs = emissions.sum()
emissions_mean = emissions.mean()
emissions_by_dwelling = emissions_abs / df[feature_2].value_counts()
# Set up emissions dictionary
emissions_dict = {
"total emissions": total_emissions,
"total emissions by area": total_emissions_by_area,
"total": total,
"relative emissions": emissions_rel,
"absolute emissions": emissions_abs,
"mean emissions": emissions_mean,
"emisisons by dwelling": emissions_by_dwelling,
}
return emissions_dict
|
7786e31db22914a1ba250d9270cd6ff066543bfd
| 224,340 |
def CalculateNewPassRate(existing_pass_rate, existing_iterations,
incoming_pass_rate, incoming_iterations):
"""Incorporates a new pass rate into an exsting one.
Args:
existing_pass_rate (float): The pass rate to merge into.
exisitng_iterations (int): The number of iterations used to calculate the
existing pass rate.
incoming_pass_rate (float): The new pass rate to incorporate.
incoming_iterations (int): The number of iterations used to calculate the
incoming pass rate.
Returns:
(float): The new combined pass rate.
"""
existing_pass_count = existing_pass_rate * existing_iterations
incoming_pass_count = incoming_pass_rate * incoming_iterations
return float(existing_pass_count + incoming_pass_count) / (
existing_iterations + incoming_iterations)
|
551ec00d10a8ce30f4ad04cab6bf65cc17f8f55f
| 301,793 |
def AUC_analysis(AUC):
"""
Analysis AUC with interpretation table.
:param AUC: area under the ROC curve
:type AUC : float
:return: interpretation result as str
"""
try:
if AUC == "None":
return "None"
if AUC < 0.6:
return "Poor"
if AUC >= 0.6 and AUC < 0.7:
return "Fair"
if AUC >= 0.7 and AUC < 0.8:
return "Good"
if AUC >= 0.8 and AUC < 0.9:
return "Very Good"
return "Excellent"
except Exception: # pragma: no cover
return "None"
|
433583977c2dc0a077f050691d31f17582d947a2
| 343,929 |
import pathlib
def data_file(module, *comps):
"""Return Path object of file in the data directory of an app."""
return pathlib.Path(module.__file__).parent.joinpath('..', 'data', *comps)
|
1ee38b920eacb1ac90ee260c73242fdf5d7db98f
| 687,755 |
def identity(x):
"""Transparent function, returning what's been input"""
return x
|
8e443dfaaf1c0f0e9b07aaf8cf631a1dd5137b19
| 693,291 |
def ok(b):
"""
:returns: 'ok' if b is True, else, return 'error'.
"""
if b:
return "ok"
return "error"
|
806e0dee4d4183c57bd72c2bc9166bd067172561
| 305,380 |
def _scalars_match(scalar_name1: str, scalar_name2: str) -> bool:
"""Return whether the two input scalars are considered to be the same.
For now, two input scalars are considered to be the same if they're the same scalar, if
one is ID and the other is String, or if one is ID and the other is Int. This may be
extended in the future.
Args:
scalar_name1: name of a scalar, may be built-in or user defined.
scalar_name2: name of a scalar, may be built-in or user defined.
Returns:
True if the two scalars are considered the same, False otherwise.
"""
if scalar_name1 == scalar_name2:
return True
scalar_names = frozenset((scalar_name1, scalar_name2))
if scalar_names == frozenset(("String", "ID")) or scalar_names == frozenset(("Int", "ID")):
return True
return False
|
fd4412278f9804eb8852f8c1a831f252ac55fa9e
| 490,132 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.