content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import hashlib
def encrypt(password):
"""Encrypt a plain text password.
:param password [str]: plain text password
:returns [str]: SHA256-encrypted password
"""
return hashlib.sha256(password.encode('utf-8')).hexdigest()
|
a6bc55a5e87023fe78d98fb0bb4d1ea34151cf51
| 520,686 |
def digit_arr(n):
"""
Given a number, this function returns an array with the digits of the given number
"""
return list(str(n))
|
bc298053ee713b1ac3cdae7a9cf76f10e0904782
| 115,196 |
def _find_largest_segment(segments):
"""Find the largest segment given a list of start and end
indices of segments
Args:
segments: a list of start and end indices
Returns:
longest_segment: the start and end indices of the longest segment
"""
segment_list = zip(segments[:-1], segments[1:])
return max(segment_list, key=lambda x: x[1] - x[0])
|
10168aac3b6378e1f3d25774a63f7a358b3a0c77
| 356,116 |
def isLowerHull(dx, dy):
"""
Checks if a line is part of the lower hull
:param float dx:
:param float dy:
"""
lowerHull = (dx < 0.0) or (dx == 0.0 and dy < 0.0)
return lowerHull
|
344c1997046744a6af6e4dae8d17162dee97e8e9
| 132,953 |
def select_scan_no_qc(scans_meta):
"""Select a scan from available scans in case there is not an available QC.
Args:
scans_meta: DataFrame containing the metadata for images to choose among
Returns:
DataFrame row containing selected scan
"""
# We choose the first scan (not containing repeat in name)
selected_scan = scans_meta[
~scans_meta.Sequence.str.contains("repeat", case=False, na=False)
]
if selected_scan.empty:
selected_scan = scans_meta
scan = selected_scan.iloc[0]
return scan
|
cdeb34c727ea4976db977f349a54d6b21c86f26a
| 631,936 |
def to_ascii20(name: str) -> str:
"""
Converts the name to max 20 ascii-only characters.
"""
# ascii characters have codes from 0 to 127
# but 127 is "delete" and we don't want it
return "".join([c for c in name if ord(c) < 127])[:20]
|
0cd96fe4fdc1c80d7a10768e196bc98f8efc9435
| 496,063 |
def _hasExplicitOid(store, table):
"""
Does the given table have an explicit oid column?
"""
return any(info[1] == 'oid' for info
in store.querySchemaSQL(
'PRAGMA *DATABASE*.table_info({})'.format(table)))
|
412e8991508e64c805537c92a072622661638c45
| 249,974 |
def pdir(obj):
"""
Puts dir of an object on stdout
"""
print(dir(obj))
return obj
|
7c7ec7c30ec7ccbf31efb87329b548310eec1edf
| 476,330 |
def linux_linebreaks(source):
"""Convert Windows CRLF, Mac CR and <br>'s in the input string to \n"""
result = source.replace('\r\n', '\n').replace('\r', '\n')
# There are multiple valid variants of HTML breaks
result = result.replace('<br>', '\n').replace('<br/>', '\n').replace('<br />', '\n')
return result
|
5a82b8f16c91dae539a69398b7010545d0bea817
| 346,145 |
def process_data(data, dict_):
"""This function process the data for the optimization process and returns the
different arrays for the upcoming optimization.
Parameters
----------
data: pandas.DataFrame
Data set to perform the estimation on. Specified
under dict_["ESTIMATION"]["file"].
dict_: dict
Estimation dictionary. Returned by grmpy.read(init_file)).
Returns
------
D: numpy.array
Treatment indicator
X1: numpy.array
Outcome related regressors of the treated individuals
X0: numpy.array
Outcome related regressors of the untreated individuals
Z1: numpy.array
Choice related regressors of the treated individuals
Z0: numpy.array
Choice related regressors of the untreated individuals
Y1: numpy.array
Outcomes of the treated individuals
Y0: numpy.array
Outcomes of the untreated individuals
"""
indicator = dict_["ESTIMATION"]["indicator"]
outcome = dict_["ESTIMATION"]["dependent"]
D = data[indicator].values
data1 = data[data[indicator] == 1]
data2 = data[data[indicator] == 0]
X1 = data1[dict_["TREATED"]["order"]].values
X0 = data2[dict_["UNTREATED"]["order"]].values
Z1 = data1[dict_["CHOICE"]["order"]].values
Z0 = data2[dict_["CHOICE"]["order"]].values
Y1 = data1[outcome].values
Y0 = data2[outcome].values
return D, X1, X0, Z1, Z0, Y1, Y0
|
61763bf3ee98f345a9ec7c84b1e2c6c5240de04e
| 515,269 |
def timestr(seconds):
"""
Convert seconds into seconds, minutes or hours
"""
if seconds < 60:
return '{:.2f} seconds'.format(seconds)
minutes = seconds / 60
if minutes < 60:
return '{:.2f} minutes'.format(minutes)
hours = minutes / 60
return '{:.2f} hours'.format(hours)
|
da2041210e70c7f311af50651094575d80928234
| 200,924 |
def process_single(word):
"""
Process a single word, whether it's identifier, number or symbols.
:param word: str, the word to process
:return: str, the input
"""
if word[0].isnumeric():
try:
int(word)
except ValueError:
raise ValueError("Expression {} not valid".format(word))
return word
|
0a08fd6ed7402fb4351adff8fb7f59106fbe4ca8
| 682,833 |
def convert_data_types(df):
"""
Takes a pandas Dataframe (part of the black_friday_data_hack project) as input
and convert the data types of some of its features.
Arguments: data - DataFrame
Returns: same DataFrame with converted data types
"""
# Convert categorical features into numeric type
df['Age'] = df['Age'].map({'0-17': 15, '18-25': 21, '26-35': 30, '36-45': 40, '46-50': 48, '51-55': 53, '55+': 55})
df['Stay_In_Current_City_Years'] = df['Stay_In_Current_City_Years'].map({'0': 0, '1': 1, '2': 2, '3': 3, '4+': 4})
# Convert numeric features into categorical type
df['Occupation'] = df['Occupation'].astype('category')
df['Product_Category_1'] = df['Product_Category_1'].astype('category')
df['Product_Category_2'] = df['Product_Category_2'].astype('category')
df['Product_Category_3'] = df['Product_Category_3'].astype('category')
df['Marital_Status'] = df['Marital_Status'].astype('category')
# Convert Product_ID to numerical type by discarding the 1st letter 'P'
df['Product_ID'] = df['Product_ID'].map(lambda x: x[1:])
df['Product_ID'] = df['Product_ID'].astype('int64')
# Convert Purchase to numerical type
df['Purchase'] = df['Purchase'].astype('int64')
return df
|
1a24fe16bdf270c3bad2d56722269f4355cb3b96
| 99,412 |
def prettyprint_binding(binding, indent_level=0):
"""Pretty print a binding with variable id and data."""
indent = " " * indent_level
if not binding:
return indent + "<>"
return "%s<v%d : %r>" % (indent, binding.variable.id, binding.data)
|
e1e196afd53027bbab076585dd8b6b6464b578bc
| 15,531 |
def div2D(v1,v2):
"""Elementwise division of vector v1 by v2"""
return (v1[0] / v2[0], v1[1] / v2[1])
|
59415639346e89c5aed230caea807e8662c2f1ff
| 663,492 |
from pathlib import Path
import logging
import json
def make_viridian_sample_header(new_run_uuid, sp3_sample_name):
"""
Args:
new_run_uuid (uuid): The unique id for the current run
sp3_sample_name: The name of the SP3 sample
Returns:
dict: An object containing a subset of information from the Viridian log
file for the run. If no log file could be found None is return
"""
log = None
vn = (
Path("/work/output")
/ new_run_uuid
/ "qc"
/ f"{sp3_sample_name}.json"
)
if not vn.is_file():
logging.error(f"The {vn} file could not be found")
return None
else:
log = {}
with open(vn) as f:
log["viridian_log"] = json.load(f)
if log:
return log
else:
logging.error(f"The {vn} file is empty")
return None
|
823af746833581ed5ffb9c23cbaca42bb2350135
| 473,530 |
import json
def json_formatter(subtitles):
"""
Serialize a list of subtitles as a JSON blob.
"""
subtitle_dicts = [
{
'start': start,
'end': end,
'content': text,
}
for ((start, end), text)
in subtitles
]
return json.dumps(subtitle_dicts)
|
3e1a28486425c5573084723acf63ed2ae8096bf0
| 508,133 |
def get_beta_y_cubic(x, mu):
"""
Evaluation of cubic polynomial form of LBCCs.
Solutions are pinned at Beta(mu=1)=1 and y(mu=1)=0.
:param x: parameter values as list/list-like
:param mu: value of mu
:return: Beta and y
"""
beta = 1. - (x[0] + x[1] + x[2]) + x[0] * mu + x[1] * mu ** 2 + x[2] * mu ** 3
y = -(x[3] + x[4] + x[5]) + x[3] * mu + x[4] * mu ** 2 + x[5] * mu ** 3
return beta, y
|
09508b8904b520600b6281d416e12d5b0f62588a
| 425,561 |
def _get_master_pod(release_name, pods):
"""Given a release name and a list of pods, returns the master pod of the release
Args:
release_name (str): Release name
pods (:obj:`V1PodList`): List of pods
Returns:
(:obj:`Pod`, optional): The master pod
"""
master_pod_name = "{}-mlbench-master-".format(release_name)
for pod in pods.items:
if master_pod_name in pod.metadata.name:
return pod
return None
|
2830d070ce9b3b2e99c2fcaf0654c49205fc4c44
| 301,122 |
def get_changed_data_from_form(form):
"""Returns a dictionary with keys of all changed fields
and values that are a dict with 'before' and 'after' keys
values of 'before' and 'after' are display values
for example:
{
'First name': {
'before': 'George',
'after': 'Jorge'
},
'Date of birth': {
'before': 'February/6/1791',
'after': '2/6/1791'}
}
}
Expects fields prefixed with 'existing_' in order to make that comparison
"""
changes = {}
existing_data_form = form.__class__(
form.raw_input_data, prefix='existing_', validate=True,
skip_validation_parse_only=True)
for field in form.iter_fields():
after = field.get_display_value()
existing_data_field = existing_data_form.fields[field.context_key]
before = existing_data_field.get_display_value()
if before != after:
changes[field.get_display_label()] = {
'before': before,
'after': after}
return changes
|
421d9eee266baa9d02bd4034b1b3a56dfaacf5b1
| 112,734 |
def read_bst_file(filename):
"""Reads data from sequence alignment test file.
Args:
filename (str): The file containing the edge list.
Returns:
list: The frequencies for each key.
"""
with open(filename, 'r') as f:
next(f) # Skip first line
freq = [int(x) for x in next(f).strip().split(',')]
return freq
|
a307d05202485acba99cad5f4c0152c0734c7d88
| 483,584 |
def ppi2ppm(val):
"""convert pixels per inch to pixels per mm"""
return val/25.4
|
df526f71fc99426e3122c2598180def565c520c9
| 394,038 |
from typing import List
from typing import Dict
from typing import Any
import json
def get_full_execution_list() -> List[Dict[str, Any]]:
"""
Get pre-set list of execution commands for the entire Cassini UVIS auroral dataset.
:return: List of commands, each in form of a dict.
"""
execution_list_file = importlib_resources.files("uvisaurorae.resources").joinpath( # type: ignore
"full_command_list.json"
)
with open(execution_list_file, "r") as f:
execution_list: List[Dict[str, Any]] = json.loads(f.read())
return execution_list
|
a9bbc7f8a1d63c502ab9a5778e7d5854d9dc7907
| 272,828 |
def rsa_decrypt(cipher: int, d: int, n: int) -> int:
"""
decrypt ciphers with the rsa cryptosystem
:param cipher: the ciphertext
:param d: your private key
:param n: your public key (n)
:return: the plaintext
"""
return pow(cipher, d, n)
|
33822a0a683eca2f86b0e2b9b319a42806ae56cc
| 6,542 |
def get_indices(plot, speckeys):
"""
Returns indices of spectra we want to take a closer look at
Parameters
----------
plot : instance of InteractivePlot class
the interactive plot
speckeys : list
a list of all the indices contained within the current block
"""
subplots = plot.subplots
check_spec = speckeys[subplots]
return check_spec
|
56d103bd17cbd146eeeb9c15329884c9f4b581b8
| 451,275 |
def translate_bbox(bbox, translation):
"""
Translate given bbox by the amount in translation.
Parameters
----------
bbox: tuple
tuple of integers defining coordinates of the form
(x1, y1, x2, y2, x3, y3, x4, y4).
translation: tuple
tuple of integers defining the translation to apply on
the x axis (translation[0]) and the y axis (translation[1]).
"""
new_bbox = (bbox[0] + translation[0],
bbox[1] + translation[1],
bbox[2] + translation[0],
bbox[3] + translation[1],
bbox[4] + translation[0],
bbox[5] + translation[1],
bbox[6] + translation[0],
bbox[7] + translation[1])
return new_bbox
|
cfa3c9c3233ea5646b7213c1c396f2555dd829e7
| 204,111 |
def determine_header_length(trf_contents: bytes) -> int:
"""Returns the header length of a TRF file
Determined through brute force reverse engineering only. Not based
upon official documentation.
Parameters
----------
trf_contents : bytes
Returns
-------
header_length : int
The length of the TRF header, prior to the table portion of the
TRF file.
"""
column_end = b"\t\xdc\x00\xe8\t\xdc\x00\xe9\t\xdc\x00\xea\t\xdc\x00\xeb\t\xdc\x00"
header_length = trf_contents.index(column_end) + len(column_end)
# test = trf_contents.split(b"\t")
# row_skips = 6
# i = next(i for i, item in enumerate(test[row_skips::]) if len(item) > 3) + row_skips
# header_length_old_method = len(b"\t".join(test[0:i])) + 3
# if header_length_old_method != header_length:
# raise ValueError("Inconsistent header length determination")
return header_length
|
e8aa5110691e877c34f208af5bd508f0f5ec4760
| 703,469 |
from typing import Tuple
def is_multi_class(output_shape: Tuple[int, ...]) -> bool:
"""Checks if output is multi-class."""
# If single value output
if len(output_shape) == 1:
return False
return True
|
66726be66cce6f837b40287b75c547d28d709a74
| 699,374 |
def perforationskineffect(s_h=0, s_v=0, s_wb=0):
"""
Calculate perforation skin effect or s_p given...
s_h == horizontal skin effect
s_v == vertical pseudoskin
s_wb == well bore blockage effect
"""
s_p = s_h + s_v + s_wb
return(s_p)
|
5fcd3f6f383ea24ae50b9789bc525be46c4b4648
| 201,739 |
def mcd(a, b):
"""Restituisce il Massimo Comune Divisore tra a e b"""
if a * b == 0: return 1
if a == b:
return a
elif a > b:
return mcd(a - b, b)
else:
return mcd(b - a, a)
|
44dc2a0a05c6703f4333fd6241e79220a43540c5
| 264,315 |
import random
def random_string(length=4):
"""Generate a random string."""
random_str = ""
for i in range(length):
random_integer = random.randint(97, 97 + 26 - 1)
flip_bit = random.randint(0, 1)
random_integer = random_integer - 32 if flip_bit == 1 else random_integer
random_str += chr(random_integer)
return random_str
|
44b3698da7b4e88c0c1221806b3163ef24688d29
| 528,507 |
def minimise_xyz(xyz):
"""Minimise an (x, y, z) coordinate."""
x, y, z = xyz
m = max(min(x, y), min(max(x, y), z))
return (x-m, y-m, z-m)
|
78005610682605beec77a4bc2e7a84de876d7e8f
| 672,092 |
def height(tree):
"""Return the height of tree."""
if tree.is_empty():
return 0
else:
return 1+ max(height(tree.left_child()),\
height(tree.right_child()))
|
a469216fc13ed99acfb1bab8db7e031acc759f90
| 598 |
import torch
def create_padding_mask_from_length(length, maxlen=None):
"""
Transform a sequence length matrix to padding mask
Args:
length: sequence length matrix
:math:`(N)` where N is batch size
Returns:
- padding mask indicating length
:math:`(N, L)` where N is batch size and L is maximum length in `length`
"""
bsz = length.size(0)
if maxlen is None:
maxlen = length.max()
index = torch.arange(maxlen).long().unsqueeze(0).repeat(bsz, 1).to(length)
padding_mask = index.ge(length.unsqueeze(1))
return padding_mask
|
aa4e4f60c203647169de2f529a1ed3657a2c2f7c
| 482,691 |
def third_measurer_I_P(uniqueTuplesDf):
"""
third_measurer_I_P: computes the measure I_P that counts the number of problematic tuples
(tuples participating in a violation of the constraints).
Parameters
----------
uniqueTuplesDf : dataframe
the result of the query that finds all tuples that particiapte in a violation.
Returns
-------
int
number of tuples participating in a violation of the constraints.
"""
return len(uniqueTuplesDf)
|
0e5f1d4802fdd7b680a77fc227c48d45ca31e2e2
| 625,961 |
def parse_awards(movie):
"""
Convert awards information to a dictionnary for dataframe.
Keeping only Oscar, BAFTA, Golden Globe and Palme d'Or awards.
:param movie: movie dictionnary
:return: well-formated dictionnary with awards information
"""
awards_kept = ['Oscar', 'BAFTA Film Award', 'Golden Globe', 'Palme d\'Or']
awards_category = ['won', 'nominated']
parsed_awards = {}
for category in awards_category:
for awards_type in awards_kept:
awards_cat = [award for award in movie['awards'][category] if award['category'] == awards_type]
for k, award in enumerate(awards_cat):
parsed_awards['{}_{}_{}'.format(awards_type, category, k+1)] = award["award"]
return parsed_awards
|
75d722da2778ff37897cb5a3a7efa0af3aa447e7
| 446,476 |
def mapRanges(povpct, povlow, povhigh, paylow, payhigh): # 135, (133..150), (3.0..4.0)
"""
Evenly map povpct in [povlow..povhigh] to [paylow..payhigh].
All aruments and return value are scaled percentages (*100).
See # comments to the right for an example's expected calcs.
This scheme may or may not match the final IRS technique.
"""
povrange = povhigh - povlow # 17 = 150 - 133
payrange = payhigh - paylow # 1.0 = 4.0 - 3.0
povincr = povpct - povlow # 2 = 135 - 133
pctincr = povincr / povrange # pct = 2 / 17
paypct = paylow + (pctincr * payrange) # 3.0 + (pct * 1.0)
return round(paypct, 2) # per IRS: round to nearest 100th
|
4a29b9f9336c894cc7dc035471e2109eb8ec2f3e
| 359,556 |
def createYangHexStr(textString):
""" Convert plain hex string into yang:hex-string """
data = textString[0:2]
i = 2
while i < len(textString):
data = data + ':' + textString[i:i+2]
i = i + 2
return data
|
9e081e50efca5331f1bbad924c61549c6835e8e3
| 47,872 |
def lerp2d(a: tuple, b: tuple, r: float) -> tuple:
"""Returns a point interpolated from a to b, at r."""
return (
a[0] + (b[0] - a[0]) * r,
a[1] + (b[1] - a[1]) * r
)
|
03efab57d09f12a313ac8f649ea461931af0f979
| 52,770 |
from typing import List
from typing import Tuple
from pathlib import Path
import fnmatch
def get_ignore_regex(ignore_rules: List[Tuple[str, str]], regexes: List[str], logfile: Path) -> str:
"""Combine together regex for the given log file using file specific and global ignore rules."""
regex_list = regexes[:]
for record in ignore_rules:
files_glob, regex = record
if fnmatch.filter([logfile.name], files_glob):
regex_list.append(regex)
return "|".join(regex_list)
|
2deece35a191018bcbbcfe279514b7ded440dd04
| 434,188 |
def min_sec(secs):
"""
Takes an epoch timestamp and returns string of minutes:seconds.
:param secs:
Timestamp (in seconds)
>>> import time
>>> start = time.time() # Wait a few seconds
>>> finish = time.time()
>>> min_sec(finish - start)
'0:11'
"""
secs = int(secs)
return '%d:%02d' % (secs / 60, secs % 60)
|
6c06866c0c44d7662b7b655798fd2f92357af14a
| 629,704 |
import ast
def get_function_details(functions):
"""
Create a generator for function name/docstring tuples.
:param functions: Iterable of function objects from source code
:return: Generator of tuples of fuction names and docstrings
"""
return ((f.name, ast.get_docstring(f)) for f in functions)
|
d75d1f122da9dde3fbbbc42c3c51e33f298af5bb
| 319,122 |
def gc(seq):
""" Calculates GC content
Args:
seq (str): non empty
Returns:
GC content (float)
Raises:
ValueError: if the sequence is None or empty str
"""
if not seq:
raise ValueError("Input must be non-empty string")
g = seq.count("G")
c = seq.count("C")
return (g + c) / len(seq)
|
4d86ff88dc13f9321c6e3837955614bc58633a01
| 490,365 |
import tokenize
def is_comment_token(token1):
"""
Returns True if the token1 is a comment token, False otherwise.
Since there is an incompatibility between Python versions,
this function resolves it.
Some more information about incompatibility:
Python 3.6:
TokenInfo(type=59 (ENCODING), string='utf-8', start=(0, 0), end=(0, 0), line='')
https://docs.python.org/3.6/library/token.html
Python 3.7:
TokenInfo(type=57 (ENCODING), string='utf-8', start=(0, 0), end=(0, 0), line='')
https://docs.python.org/3.7/library/token.html
Changed in version 3.7: Added COMMENT, NL and ENCODING tokens.
"""
assert isinstance(token1, tokenize.TokenInfo)
result = False
if "type=54 (COMMENT)" in str(token1):
# Python 3.4.4
result = True
elif "type=57 (COMMENT)" in str(token1):
# Python 3.6.9
result = True
elif "type=55 (COMMENT)" in str(token1):
# Python 3.7.4
result = True
elif " (COMMENT)" in str(token1):
# a generic solution since Python changes it in every version.
result = True
return result
|
e312dd859ba8d9c72266e02fc30af59e3f691b6b
| 32,459 |
def device_mapper(os_type: str, proto: str = "netmiko"):
"""
map an os type to a netmiko device_type
:params os_type: type str
:params proto: type str, default "netmiko"
:returns: device_type string
"""
if proto == "netmiko":
device_types = {
"ios": "cisco_ios",
"iosxr": "cisco_xr",
"iosxe": "cisco_xe",
"nxos": "cisco_nxos",
"eos": "arista_eos",
}
try:
result = device_types[os_type]
except KeyError:
return os_type
elif proto == "netconf":
device_types = {
"csr": "csr",
"iosxr": "iosxr",
"iosxe": "iosxe",
"nxos": "nexus",
"junos": "junos",
}
try:
result = device_types[os_type]
except KeyError:
return "default"
else:
result = os_type
return result
|
81ad4c4dd86c7e6930cf0fb070681872783a5fb8
| 14,495 |
def block_summary(block):
"""
Return a summary of a block
"""
return f"{block['blockhash']} {block['timestamp']} {block['proof']}"
|
3a6499ad7a432185a459e132c0c554cd2d5944b5
| 342,777 |
def can_view_group(group, user, level="member"):
"""
See if the user is allowed to view the group, or if they own it
Params:
=======
group: EAPGroup instance, as obtained from EAPGroup.objects.get(pk)
user: EAPUser instance, as returned from request.user
level: str, either "member" or "owner", to select level of permission to view
Returns:
=======
True if user is member / owner of the group.
False otherwise.
"""
if level not in ["owner", "member"]:
raise RuntimeError("'level' parameter should be 'owner' or 'member'")
if not hasattr(user, "all_groups"):
# probably AnonymousUser
return False
if level == "owner" and group.owner and group.owner == user:
return True
elif level == "member" and group in user.all_groups.get_queryset():
return True
return False
|
8aa0fae5a654abe537e02ba06a255764ad2426fd
| 261,246 |
def imgarea(im, zeroel=None):
"""Returns the area of the image, as determined by checking against the 'zeroel'.
i.e., the number of pixels that are != zeroel.
If 'zeroel' is not given, it's determined from the image type.
"""
zeros = {'L':0, '1':0, 'I':0, 'F': 0.0, 'RGB': (0,0,0), 'RGBA': (0,0,0,0)}
if zeroel is None:
zeroel = zeros[im.mode]
nz = sum(1 for p in im.getdata() if p != zeroel)
return nz
|
37fb47f1f96c623623695952d6624b0c52cb756f
| 142,394 |
def flattenjson(obj, delim="__"):
"""
Flattens a JSON object
Arguments:
obj -- dict or list, the object to be flattened
delim -- string, delimiter for sub-fields
Returns:
The flattened JSON object
"""
val = {}
for i in obj:
if isinstance(obj[i], dict):
ret = flattenjson(obj[i], delim)
for j in ret:
val[i + delim + j] = ret[j]
else:
val[i] = obj[i]
return val
|
470719e9570e20ee1ddc93e4d684f04981825f21
| 59,382 |
def write_metadata_to_file(filename, cs_id, process,
units_e, units_sigma, ref='\\N',
lhs_a='\\N', lhs_b='\\N',
rhs_a='\\N', rhs_b='\\N', threshold='-1', wavelength='-1',
lhs_v=-1, rhs_v=-1, lhs_j=-1, rhs_j=-1,
background='\\N', lpu='-1', upu='-1'):
"""Given metadata for a cross section data set,
write the metadata to a file in the correct format for a NEPC database.
Parameters
----------
filename: str
Name of the file where values of ``data_array`` should be written.
cs_id: int
``cs_id`` corresponding to the cross section in the database.
process: str
The short name corresponding to the electron scattering process in the
database.
units_e: float
The units, in eV, of the electron energies.
units_sigma: float
The units, in :math:`m^2`, of the cross sections.
ref: str
The short name for the reference corresponding to the dataset.
lhs_a: str
The short name for the lhs_a state for the process.
lhs_b: str
The short name for the lhs_b state.
rhs_a: str
The short name for the rhs_a state.
rhs_b: str
The short name for the rhs_b state.
threshold: float
The threshold electron energy for the process.
wavelength: float
The wavelength of the photon associated with the process, if applicable.
lhs_v: int
The vibrational energy associated with the LHS state, if applicable.
rhs_v: int
The vibrational energy associated with the RHS state, if applicable.
lhs_j: int
The rotational energy associated with the LHS state, if applicable.
rhs_j: int
The rotational energy associated with the RHS state, if applicable.
background: str
Background information for the cross section data.
lpu: float
Lower percent uncertainty of the cross section data.
upu: float
Upper percent uncertainty of the cross section data.
Returns
-------
: int
The next cs_id to use.
"""
write_met = open(filename, "x")
write_met.write(
"\t".join(
('cs_id',
'process',
'units_e',
'units_sigma',
'ref',
'lhs_a',
'lhs_b',
'rhs_a',
'rhs_b',
'threshold',
'wavelength',
'lhs_v',
'rhs_v',
'lhs_j',
'rhs_j',
'background',
'lpu',
'upu')) + "\n")
write_met.write(
"\t".join(
(str(cs_id),
process,
str(units_e),
str(units_sigma),
ref,
lhs_a,
lhs_b,
rhs_a,
rhs_b,
str(threshold),
str(wavelength),
str(lhs_v),
str(rhs_v),
str(lhs_j),
str(rhs_j),
background,
str(lpu),
str(upu))))
write_met.close()
return cs_id + 1
|
aa43876c53664e0c8d66bfa5d195750d186695a1
| 227,765 |
def batch_size(batch):
""" Calculates the size of a batch.
Args:
batch: A list of dictionaries representing mutations.
Returns:
An integer specifying the size in bytes of the batch.
"""
size = 0
for mutation in batch:
size += len(mutation['key'])
if 'values' in mutation:
for value in mutation['values'].values():
size += len(value)
return size
|
d141914a01959da54d9e431f8ff8ba4a7fcaabf8
| 571,175 |
def _check_handler_present(logger, handler_type, *compare):
"""
Check if a handler of given and with given attributes
has been added to the logger already.
:param logging.Logger logger: logger whose handlers to check
:param type handler_type: handler class to check for
:param [(str, obj)] compare:
list of (attribute, value) items to check the handler against
(e.g., (stream, sys.stderr) will check if handler.stream == sys.stderr)
:returns: True if such a handler is configured, False otherwise
:rtype: bool
"""
for handler in logger.handlers:
if isinstance(handler, handler_type):
attrs_match = True
for attr, value in compare:
try:
if getattr(handler, attr) != value:
attrs_match = False
break
except AttributeError:
continue
if attrs_match:
return True
return False
|
326df0a397e2e5d22764702e6035ea8b1f5afbd2
| 241,105 |
def flatten_xml_string(xml_string):
"""
Flatten the xml string so that the output is in one line and without spaces/tabs etc.
This is because otherwise the request is not valid and gets rejected.
"""
return ''.join([line.strip() for line in xml_string.split('\n')])
|
8a35c03b972b6d8bd6a3a1686196703ff4b6a9ae
| 595,279 |
def _extract_prop_set(line):
"""
Extract the (key, value)-tuple from a string like:
>>> 'set foo = "bar"'
:param line:
:return: tuple (key, value)
"""
token = ' = "'
line = line[4:]
pos = line.find(token)
return line[:pos], line[pos + 4:-1]
|
2bdb491c14308ca1b634e7d840893af3c698fa60
| 690,946 |
import math
def get_bar_for_progress(full_width, progress):
"""get_bar_for_progress - returns string with a width of 'full_width'
which illustrates specific progress value.
"""
number_of_equal_signs = int(
math.ceil(progress * float(full_width - 2) / 100)
)
return "[{0}{1}{2}]".format(
"=" * number_of_equal_signs,
">" if number_of_equal_signs < full_width - 2 else "",
" " * (full_width - 3 - number_of_equal_signs)
)
|
41a9236f7f9ab52768d3fa465e6d8c37a68c0b3d
| 342,251 |
def obtainPredictedClasses(outputSceneLabel):
"""
Fucntion to obtain the indices for the 10 most-scored scene labels
:param outputSceneLabel: Tensor obtain from the network
:return: numpy array 1x10 with scene labels indices
"""
# Obtain the predicted class by obtaining the maximum score.
_, pred = outputSceneLabel.topk(10, 1, largest=True, sorted=True)
idx = pred.cpu().numpy()[0]
return idx
|
a23e11db4389329199981216294f3ea89e48937d
| 231,687 |
def match_value(df, col1, x, col2):
""" Match value x from col1 row to value in col2. """
return df[df[col1] == x][col2].values[0]
|
6735924eb39792f6c61ea193c87b31638ef4589b
| 575,145 |
def standardize_positions(s: str) -> str:
"""Standardizes position names
Args:
s (str): the position string
Returns:
str: the standardized string
"""
mapping = {'Def': 'DST', 'Defense': 'DST', 'DEF': 'DST', 'def': 'DST', 'dst': 'DST', 'PK': 'K', 'Kicker': 'K'}
std = mapping[s] if s in mapping else s
return std
|
44261362e6846b3310dd0b3078cfdb9aacdb6182
| 601,242 |
def generate_test_uuid(tail_value=0):
"""Returns a blank uuid with the given value added to the end segment."""
return '00000000-0000-0000-0000-{value:0>{pad}}'.format(value=tail_value,
pad=12)
|
f113eef54eba9d8d1fb5234c87af3cb6290ea25e
| 701,487 |
def points(a, b, answer_given):
"""Check answer. Correct: 1 point, else 0"""
true_answer = a*b
if answer_given == true_answer:
print('Correct!')
return 1
else:
print('Sorry! Correct answer was: {:d}'.format(true_answer))
return 0
|
2e0ac980b6cc140dd4cd812bd59f7e25cd12d865
| 19,289 |
def character_tokenizer(text: str) -> list:
"""Tokenize by single characters, keeping whitespace.
Args:
text: The text to tokenize.
Returns:
list: A list of character tokens.
"""
return [char for char in text]
|
c6806b488e2d39e2516baf7956d09bebc948a25f
| 128,688 |
def greenshieldFlow(speed, capacity, free_flow_speed):
"""
Returns the flow of a link following the Fundamental Diagram (Greenshield's Model).
Parameters
----------
speed: miles/hr or km/hr
capacity: link's capacity in veh/hr
free_flow_speed: link's free flow speed in miles/hr or km/hr
Returns
-------
flow: resulting flow
"""
if speed > free_flow_speed or capacity < 0:
return 0
x = 4 * capacity * speed / free_flow_speed - 4 * capacity * (speed ** 2) / (free_flow_speed ** 2)
return x
|
2d753d367f7c37d3751327b5fcbb8a79aa54354d
| 605,857 |
import random
def random_location(x_upper_bound = 100, y_upper_bound = 100):
""" Returns a bounded, random Location.
"""
assert x_upper_bound > 0 and y_upper_bound > 0
x = random.randint(0, x_upper_bound - 1)
y = random.randint(0, y_upper_bound - 1)
return (x, y)
|
e9f8b3d9c6abc6461203dc77869a5467545911f7
| 458,777 |
import re
def string_remove_regex(value,ex):
""" remove sub-string of string that matches regex
"""
return re.sub(ex, '', value)
|
f834469992000df3b7bdea773ae15785c78c9ddf
| 178,023 |
def get_stats(equipment):
"""
Adds up the stats of the equipment
:param equipment: A list with multiple lists inside - the inner lists are the equipment stats
:return: Total cost, dmg, and armor points
"""
dmg = 0
armor_points = 0
cost = 0
for thing in equipment:
dmg += thing[1]
armor_points += thing[2]
cost += thing[0]
return cost, dmg, armor_points
|
457c6cf0f9168fad9bf91205f21b2c0a51f0d3c0
| 388,949 |
def _clean_up_path(path):
"""Strips an initial "./" and any trailing slashes from 'path'."""
if path.startswith("./"):
path = path[2:]
return path.rstrip("/")
|
66f2e3b2794d8ebadac0f51bd30d49aa6e7416cc
| 455,854 |
import hashlib
import struct
import base64
def volume_label_hash( s ):
"""
Linux volume labels are typically limited to 12 or 16 characters while the strings we want to
use for them are longer, usually a namespaced role name with additional data at the end. This
hash function returns a 12-character string that is reasonably representative of the input
string.
>>> volume_label_hash( 'hannes_spark-master__0' )
'i0u77fnocoo'
>>> volume_label_hash( '' )
'PZ2FQWP48Ho'
>>> volume_label_hash( ' ' )
'oIf03JUELnY'
>>> volume_label_hash( '1' )
'yQYSos_Mpxk'
"""
h = hashlib.md5( s )
h = h.digest( )
assert len( h ) == 16
hi, lo = struct.unpack( '!QQ', h )
h = hi ^ lo
h = struct.pack( '!Q', h )
assert len( h ) == 8
h = base64.urlsafe_b64encode( h )
assert h[ -1 ] == '='
return h[ :-1 ]
|
050ba25238c1f99fc9d0d72e435f76ea077b917d
| 375,613 |
import math
def _ln_binomial(n, k):
"""log of binomial coefficient function (n k), i.e., n choose k"""
if k > n:
raise ValueError
if k == n or k == 0:
return 0
if k * 2 > n:
k = n - k
return math.lgamma(n + 1) - math.lgamma(k + 1) - math.lgamma(n - k + 1)
|
e4573a3223f2bcadb290b3378b70be1ef8af683b
| 591,359 |
def to_text(source):
""" Generates a text value (an instance of text_type) from an arbitrary
source.
* False and None are converted to empty strings
* text is passed through
* bytes are decoded as UTF-8
* rest is textified via the current version's relevant data model method
"""
if source is None or source is False:
return u''
if isinstance(source, bytes):
return source.decode('utf-8')
return str(source)
|
b471e03eebd4a3e9c41742d555123e5958b95013
| 611,419 |
def calculate(x: int, y: int = 1, *, subtract: bool = False) -> int:
"""Calculates the sum (or difference) of two numbers.
Parameters:
`x` : int
The first number
`y` : int, optional
The second number (default is 1)
`subtraction`: bool, optional
Whether to perform subtraction. (Default is `False`.)
Returns:
int
"""
if subtract:
return x - y
else:
return x + y
|
8f46dadf73536167c23e17d3b5da0f57fd084d7e
| 679,757 |
def trim_to_constant_numtimesteps(ds):
"""
Trims a :class:`DataSet` so that each circuit's data comprises the same number of timesteps.
Returns a new dataset that has data for the same number of time steps for
every circuit. This is achieved by discarding all time-series data for every
circuit with a time step index beyond 'min-time-step-index', where
'min-time-step-index' is the minimum number of time steps over circuits.
Parameters
----------
ds : DataSet
The dataset to trim.
Returns
-------
DataSet
The trimmed dataset, obtained by potentially discarding some of the data.
"""
trimmedds = ds.copy_nonstatic()
numtimes = []
for circuit in ds.keys():
numtimes.append(ds[circuit].number_of_times)
minnumtimes = min(numtimes)
for circuit in ds.keys():
times, series = ds[circuit].counts_as_timeseries()
trimmedtimes = times[0:minnumtimes]
trimmedseries = series[0:minnumtimes]
trimmedds.add_series_data(circuit, trimmedseries, trimmedtimes, aux=ds.auxInfo[circuit])
trimmedds.done_adding_data()
return trimmedds
|
d88c535d6d0ae34d3d136b4606ff19c8c413ef80
| 593,508 |
def create_dict_from_guard_rows(col_dict):
"""Create a dictionary of lists from a dictionary of guard values
Parameters
----------
col_dict : `dict`
The dictionary with the guard values
Returns
-------
ret_dict : `dict`
The dictionary we created
"""
ret_dict = {}
for key in col_dict.keys():
ret_dict[key] = []
return ret_dict
|
3684b1ec801456cbae7ac09f223c4616a28b7f1b
| 456,636 |
def powerlaw(x, scale, power, return_components=False):
"""
Defines a power law
Returns
-------
scale * x**power
"""
return scale*x**power
|
04f047bbcdc34a87e788bd7d9727d9cde32d67c7
| 595,090 |
def _files_header_size_calc(files, farc_type):
"""Sums the size of the files header section for the given files and farc_type data."""
size = 0
for fname, info in files.items():
size += len(fname) + 1
size += farc_type['files_header_fields_size']
return size
|
683edc524a2f353cae8c974f5c72616bddafa27a
| 65,263 |
from typing import Mapping
def validate_schema(assert_,
data,
schema,
optional=None,
on_unexpected_cols='raise'):
"""Check if a data frame complies with a schema
Parameters
----------
data : pandas.DataFrame
Data frame to test
schema : list or dict
List with column names (will only validate names)
or dict with column names as keys, dtypes as values (will validate
names and dtypes)
optional : list, optional
List of optional column names, no warns nor errors if they appear
on_unexpected_cols : str, optional
One of 'warn', 'raise' or None. If 'warn', it will warn on extra
columns, if 'raise' it will raise an error, if None it will completely
ignore extra columns
"""
if on_unexpected_cols not in {'warn', 'raise', None}:
raise ValueError("'on_unexpected_cols' must be one of 'warn', 'raise' "
"or None")
optional = optional or {}
cols = set(data.columns)
expected = set(schema)
missing = expected - cols
unexpected = cols - expected - set(optional)
msg = '(validate_schema) Missing columns {missing}.'.format(
missing=missing)
assert_(not missing, msg)
if on_unexpected_cols is not None:
msg = ('(validate_schema) Unexpected columns {unexpected}'.format(
unexpected=unexpected))
caller = assert_ if on_unexpected_cols == 'raise' else assert_.warn
caller(not unexpected, msg)
# if passing a mapping, schema is validated (even for optional columns)
for schema_to_validate in [schema, optional]:
if isinstance(schema_to_validate, Mapping):
# validate column types (as many as you can)
dtypes = data.dtypes.astype(str).to_dict()
for name, dtype in dtypes.items():
expected = schema_to_validate.get(name)
if expected is not None:
msg = (
'(validate_schema) Wrong dtype for column "{name}". '
'Expected: "{expected}". Got: "{dtype}"'.format(
name=name, expected=expected, dtype=dtype))
assert_(dtype == expected, msg)
return assert_
|
068c5784a8ea4721fbd1fcdc4fd4328b6bfbd099
| 599,834 |
def _get_manager_log_hashes(manager, logger):
"""Get a dict mapping file paths to hashes for logs on the specified
manager.
File paths will be relative to the root of the logs (/var/log/cloudify).
journalctl.log and supervisord.log will be excluded as they can't be kept
from changing.
"""
logger.info('Checking log hashes for %s`', manager.private_ip_address)
log_hashes = {
f.split()[1][len('/var/log/cloudify'):]: f.split()[0]
for f in manager.run_command(
'find /var/log/cloudify -type f -not -name \'supervisord.log\''
' -exec md5sum {} + | sort',
use_sudo=True
).stdout.splitlines()
}
logger.info('Calculated log hashes for %s are %s',
manager.private_ip_address,
log_hashes)
return log_hashes
|
bb3d3bfb85ee923eacebdcf8051b97507165d36a
| 55,815 |
def Repository_exists(Command):
"""
Tests if YUM Repo with specific Name exists and is enabled:
- **repo** - repo name to look for
**returns** - True if String is found
"""
def f(repo):
return (repo in Command.check_output("yum repolist"))
return f
|
c32ecd98156e24246ccde393db431e2559254288
| 209,801 |
def dict2cfgString(dictionary, separator="\n", assigner="="):
"""
Converts a dictionary into a string
Parameters
----------
dictionary : dict
The dictionary to be transformed.
separator : str, optional
The character to be used to separate individual
entries. The default is "\n".
assigner: str, optional
The character to represent the assignment from the key
to the value. The default is "=".
Returns
-------
str
"""
return "{}".format(separator).join([f"{k}{assigner}{v}" for k, v in dictionary.items()])
|
12c1f3b102429c22d1bb15714631908be41a63f2
| 22,010 |
from pathlib import Path
def is_file(path: Path) -> bool:
"""
Return true if the given path is a file, false otherwise.
:param path: a Path
:return: a bool
"""
return path.is_file()
|
16a110aed72917683bea00672778aba8676c0790
| 690,847 |
def RobustL2Loss(output, log_std, target) -> float:
"""Robust L2 loss using a Gaussian prior with aleatoric uncertainty estimation."""
loss = 0.5 * (output - target) ** 2 / (2 * log_std).exp() + log_std
return loss.mean()
|
53a47a5c81b768271fee6bc4f45a26e047f9d438
| 99,491 |
def wordlists(*wl):
""" Input is arbitrary number of lists of strings.
Output is one dictionary where each string is a key
and the count of those strings is the value """
word_dict = {}
for i in wl:
for x in i:
if x in word_dict:
word_dict[x] += 1
else:
word_dict[x] = 1
return word_dict
|
4af789da735886447c02f089065e30b46f348b22
| 688,080 |
import io
def parse(stdin: io.TextIOWrapper) -> list:
"""
Parse the input into a list of tuples, the first element being a set of
allergens and the second a set of ingredients
"""
result = list()
for line in stdin.read().strip().splitlines():
ingredients, allergens = line.strip(")").split(" (contains ")
result.append((set(allergens.split(", ")), set(ingredients.split())))
return result
|
c65d4adaffdab2a2bd44336f58bdd316079b43d0
| 569,480 |
import json
def parse_json_data(filename) -> dict:
"""Returns the contents of a JSON file as a dict"""
with open(filename, "r") as f:
return json.loads("".join(f.readlines()))
|
d061043cf29d1ed8d790a51d135c251e2226de77
| 25,940 |
def liquid_to_dry_malt_weight(malt):
"""
LME to DME Weight
:param float malt: Weight of LME
:return: DME Weight
:rtype: float
"""
return malt / 1.25
|
fbfbef7aafcfcf5e187ef7b18467b3648e9198d9
| 487,425 |
def _get_bucket_filter_leading(bucket, len_seq):
"""Filtering function for sequence "contains" a "leading bucket".
All n-grams in the leading bucket must be of the same length. The length of
the sequence to be subject to the filtering function must be of length
len_seq, and it must be longer than the n-grams in the leading bucket.
A sequence is here defined as "containing" a "leading bucket" if the leading
n-gram of the sequence, of the same length as the n-grams in the bucket, is
present in the bucket.
E.g.
bucket = ["as", "we", "op"]
seqs = ["asdf", "qwer", "uiop"]
seqs filters to: ["asdf"]
Args:
bucket: Bucket of n-grams used to filter sequences.
len_seq: Length of sequences to be filtered.
Returns: Filter function for sequence to be filtered "containing" bucket.
"""
len_ng = len(bucket[0])
assert len_ng < len_seq
bucket_set = set(bucket)
def filter_fn(seq):
assert len(seq) == len_seq
return seq[:len_ng] in bucket_set
return filter_fn
|
cec8c3fa310bc1e49c0b30196a8b708ce4eae683
| 167,011 |
from typing import List
def _get_characters(sentence: str, whitespace: bool) -> List[str]:
"""Split sentence into individual characters.
Args:
sentence: An input sentence to split.
whitespace: An indication whether to keep whitespaces during character n-gram extraction.
Return:
A list of separated characters.
"""
if whitespace:
return list(sentence)
return list(sentence.strip().replace(" ", ""))
|
63d5c6b5a75529b58cd97cad118ce9cd8b8cf88a
| 590,376 |
import torch
def PReLU(x, a):
"""define PReLU(x)=max(x,0)+a*min(x,0)"""
constant_zero = torch.FloatTensor([0.0])
return torch.max(x, constant_zero) + (a * torch.min(x, constant_zero))
|
0a6fdfe977dc1f068550c19c37068a0a2a3a4bfe
| 303,728 |
def violation(risk_estimates, risk_limits):
"""Return false if all estimates are within limits, true otherwise"""
for i in range(len(risk_estimates)):
if risk_estimates[i] > risk_limits[i]:
return True
return False
|
de86907ba792fb50a9678742ec2c66464cd8b5be
| 560,940 |
def round_up(x: float, decimal_places: int) -> float:
"""
Round a float up to decimal_places.
Parameters
----------
x : float
decimal_places : int
Returns
-------
rounded_float : float
Examples
--------
>>> round_up(1.2344, 3)
1.235
>>> round_up(1.234, 3)
1.234
>>> round_up(1.23456, 3)
1.235
>>> round_up(1.23456, 2)
1.24
"""
return round(x + 5 * 10 ** (-1 * (decimal_places + 1)), decimal_places)
|
159e5b8bf09ee1339a7c49ac05c6926d75658145
| 443,051 |
import re
def max_num(x: str)->int:
"""
Input: String
Output: Integer
Finds the maximum integer in the string
"""
c = re.findall(r"[1-9]+",x)
maxele = 0
for i in c:
if int(i) > int(maxele):
maxele = i
return maxele
|
640695ea55a5b2278a4382b02399d1c5672ae969
| 110,253 |
def reverse(x: int) -> int:
"""
https://leetcode.com/problems/reverse-integer
:param x: a value to revert
:return: reverse of a 32-bit signed integer
"""
# Prepare value
unsigned_value = (x // 10) if (x % 10 == 0) else x
unsigned_value = -unsigned_value if x < 0 else unsigned_value
# Do some maths
new_value = 0
while unsigned_value > 0:
new_value = (new_value * 10) + (unsigned_value % 10)
unsigned_value //= 10
# Apply sign
new_value = -new_value if x < 0 else new_value
# Check boundaries
if new_value < -(2 ** 31) or new_value >= (2 ** 31):
return 0
return new_value
|
0ba8c2c5695ca18e7e17252e37aa385ccf02dca0
| 406,572 |
def obtain_ECG(tensec_data):
""" obtain ECG values of ten second data
:param tensec_data: 10 seconds worth of heart rate data points
:return ECGData: ECG unmultiplexed data
"""
ECGData = tensec_data[1::2]
return ECGData
|
ce63b58435b67b6995d19e04a64bcc24d9687cd5
| 30,252 |
def get_char_mode(n):
"""Return different num according to the type of given letter:
'1': num,
'2': upper_letter,
'4': lower_letter,
'8': other symbols
"""
if (n >= 48 and n <= 57): #nums
return 1;
if (n >= 65 and n <= 90): #uppers
return 2;
if (n >= 97 and n <= 122): #lowers
return 4;
else:
return 8;
|
cce56663551251b52d8a47eb6416b7ff1dee16cf
| 286,398 |
from typing import List
from typing import Union
def add_args(list_args: List[Union[str, List[str]]]) -> str:
"""Concatenate the given arguments and turn any string list in a space-separated string.
:param list_args: list of arguments for the output command.
"""
out_cmd = ''
for arg in list_args:
if isinstance(arg, str):
out_cmd += arg + ' '
else:
for inner_arg in arg:
out_cmd += inner_arg + ' '
# remove any trailing space character
return out_cmd.strip()
|
458197557c779e42f271954b2ec318acdfdfd3be
| 448,604 |
import json
def decode_transaction(raw):
"""Decode a transaction from bytes to a dict."""
return json.loads(raw.decode('utf8'))
|
24376971f21b9410b881de6e85fd3105425f89be
| 620,133 |
def addr_to_hash(addr):
"""
Creates hashable address representation
"""
return bytes(addr.spend_public_key + addr.view_public_key)
|
9a6584c8200c8c4252afb5b4a8c46eb7aa3f7cfd
| 608,811 |
def __matrix_blocks(A, m, n):
"""
Returns a list of four matrix blocks, where the first block is of size (m, n).
"""
return A[:m, :n], A[:m, n:], A[m:, :n], A[m:, n:]
|
e4060df4c85d87fc1e4d3e4c56cfbce700183e41
| 205,360 |
def get_parent_pb(xml_pbtype):
"""
Returns a parent pb_type of the given one or none if it is a top-level
complex block
"""
assert xml_pbtype.tag in ["pb_type", "mode", "interconnect"], xml_pbtype.tag
# Get immediate parent
xml_parent = xml_pbtype.getparent()
# We've hit the end
if xml_parent is None or xml_parent.tag == "complexblocklist":
return None
# The immediate parent is a mode, jump one more level up
if xml_parent is not None and xml_parent.tag == "mode":
xml_parent = xml_parent.getparent()
return xml_parent
|
135ba082dc3c1a37c171925bcddfc4bb11fc1573
| 285,144 |
def sp(dividend, divisor):
"""Returns the percentage for dividend/divisor, safely."""
if not divisor:
return 0.
return 100. * float(dividend) / float(divisor)
|
ca027d5637738f3a268e24d329d2d876f855e794
| 678,098 |
def _mjd2000_to_decimal_year_simple(mjd2k):
""" Convert Modified Julian Date 2000 to decimal year.
"""
return 2000.0 + mjd2k / 365.25
|
f0c43a7bbc275ecaf7efccdfe324434bf467d03e
| 127,944 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.