content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def extract_model_parameters(state_dict):
"""
Extracts model parameters from saved PyTorch state_dict
:param state_dict: Result from torch.load()
:return: Scale factor of pretrained model
"""
if state_dict['module.tail.0.0.weight'].shape[0] == 2304:
scale = 3
elif 'module.tail.0.2.weight' in state_dict.keys():
scale = 4
else:
scale = 2
return scale
|
360bb6578cc85c3c2af6bf5415226ee2ec70733d
| 452,589 |
def is_empty(value: object) -> bool:
"""
Check if value is None or not empty in case if not None.
"""
return (value is None) or (not value)
|
88b848e6a25da134a3408def4db83d258a031919
| 677,108 |
def listify(obj) -> list:
"""Converts obj to a list intelligently.
Examples
--------
Normal usage::
listify('str') # ['str']
listify((1, 2)) # [1, 2]
listify(len) # [<built-in function len>]
"""
if isinstance(obj, str):
return [obj]
try:
return list(obj)
except TypeError:
return [obj]
|
82e3110f29da0eb85c176c4ea8783ce22ff93e93
| 324,895 |
def split_train_test(X, y, test_percentage):
"""
Randomly split given dataset into training- and testing sets
:param X: Design matrix to split
:param y: Response vector to split
:param test_percentage: Percentage of samples to use as test
:return: Two tuples of: (train set X, train set y), (test set X, test set y)
"""
X = X.sample(frac=1)
y = y.reindex_like(X)
n = round(test_percentage * len(y))
return (X[:-n], y[:-n]), (X[-n:], y[-n:])
|
7f98f9bb5ef9376308da9e10518c94ee1680f71e
| 688,417 |
from textwrap import dedent
def dedent_docstring(docstring):
"""This removes initial spaces from the lines of the docstring.
After the first line of the docstring, all other lines will include some
spaces. This removes them.
Examples
--------
>>> from _pyodide.docstring import dedent_docstring
>>> dedent_docstring(dedent_docstring.__doc__).split("\\n")[2]
'After the first line of the docstring, all other lines will include some'
"""
first_newline = docstring.find("\n")
if first_newline == -1:
return docstring
docstring = docstring[:first_newline] + dedent(docstring[first_newline:])
return docstring
|
8e727f56561d715ea9de1a6e93d4f5cbc6abe764
| 471,503 |
def move_to_next_pixel(fdr, row, col):
""" Given fdr (flow direction array), row (current row index), col (current col index).
return the next downstream neighbor as row, col pair
See How Flow Direction works
http://desktop.arcgis.com/en/arcmap/latest/tools/spatial-analyst-toolbox/how-flow-direction-works.htm
D8 flow direction grid
| 32 | 64 | 128 |
| 16 | X | 1 |
| 8 | 4 | 2 |
"""
# get the fdr pixel value (x,y)
value = fdr[row, col]
# Update the row, col based on the flow direction
if value == 1:
col += 1
elif value == 2:
col += 1
row += 1
elif value == 4:
row += 1
elif value == 8:
row += 1
col -= 1
elif value == 16:
col -= 1
elif value == 32:
row -= 1
col -= 1
elif value == 64:
row -= 1
elif value == 128:
row -= 1
col += 1
else:
# Indetermine flow direction, sink. Do not move.
row = row
col = col
return (row, col)
|
d134bb35ed4962945c86c0ac2c6af1aff5acd06b
| 707,105 |
def roundplus(number):
"""
given an number, this fuction rounds the number as the following examples:
87 -> 87, 100 -> 100+, 188 -> 100+, 999 -> 900+, 1001 -> 1000+, ...etc
"""
num = str(number)
if not num.isdigit():
return num
num = str(number)
digits = len(num)
rounded = '100+'
if digits < 3:
rounded = num
elif digits == 3:
rounded = num[0]+'00+'
elif digits == 4:
rounded = num[0]+'K+'
elif digits == 5:
rounded = num[:1]+'K+'
else:
rounded = '100K+'
return rounded
|
895704e849694927ac391a06a95e40f52f6fec21
| 157,424 |
def _find_config(pav_cfg, conf_type, conf_name):
"""Search all of the known configuration directories for a config of the
given type and name.
:param pav_cfg: The pavilion config data.
:param str conf_type: 'host', 'mode', or 'test'
:param str conf_name: The name of the config (without a file extension).
:return: The path to the first matching config found, or None if one wasn't
found.
"""
for conf_dir in pav_cfg.config_dirs:
path = conf_dir/conf_type/'{}.yaml'.format(conf_name)
if path.exists():
return path
return None
|
0f2dcbbdb3ac77717bd89a7f1b5ac3e3d17464f2
| 301,069 |
import copy
def get_category_obj(data, category_name):
"""Extract obj with `name` == `category_name` from `data`."""
cats = copy.copy(data)
while(cats):
cat = cats.pop()
if cat['name'] == category_name:
return cat
if 'children' in cat:
cats.extend(cat['children'])
return None
|
82782dba9ae9374abc5e3f510ba53f9e03e14a62
| 666,937 |
import torch
def DH(theta, d, r, alpha, device):
"""
Calculates the Denavit-Hartenberg Matrix
where
d: offset along previous z to the common normal
theta: angle about previous z, from old x to new x
r: length of the common normal (aka a, but if using this notation, do not confuse with alpha).
Assuming a revolute joint, this is the radius about previous z.
alpha: angle about common normal, from old z axis to new z axis
"""
T = torch.zeros([theta.shape[0], 4, 4]).to(device)
T[:, :, :] = torch.eye(4).to(device)
cTheta = torch.cos(theta)
sTheta = torch.sin(theta)
calpha = torch.cos(alpha)
salpha = torch.sin(alpha)
T[:, 0, 0] = cTheta
T[:, 0, 1] = -sTheta
T[:, 0, 2] = 0.0
T[:, 0, 3] = r
T[:, 1, 0] = sTheta * calpha
T[:, 1, 1] = cTheta * calpha
T[:, 1, 2] = -salpha
T[:, 1, 3] = - d * salpha
T[:, 2, 0] = sTheta * salpha
T[:, 2, 1] = cTheta * salpha
T[:, 2, 2] = calpha
T[:, 2, 3] = d * calpha
return T
|
9b40c28bbe93e2b3b08be4e171ec0d0aebe14810
| 329,142 |
import json
def to_json(raw_data):
"""
Pretty-prints JSON data
:param str raw_data: raw JSON data
"""
return json.dumps(raw_data, sort_keys=True,
indent=4, separators=(',', ': '))
|
9faf1a31e2f906e8170226d81160a5523c201566
| 673,725 |
def get_max(list_of_string):
"""Return maximum value from a list of string or integer """
return max(map(int, list_of_string))
|
cd4a3cbf67b3d2c663ff45ac294b1ac6f2678062
| 125,006 |
def replace_conditional(record: dict, field: str, value: str, replacement: str):
"""
Function to conditionally replace a value in a field.
Parameters
----------
record : dict
Input record.
field : str
Key of field to be conditionally altered.
value : str
Value to identify and replace.
replacement : str
Value to insert on replacement.
Returns
-------
type
Record with specified key altered if `record[key] == value`. Otherwise, the original record is returned.
"""
if record[field] == value:
record[field] = replacement
return(record)
|
4f3e25c5ca174289de3154332bfeac988391979e
| 135,365 |
def retrive_bgp_neighbor(bgp_data, neighbor):
"""
Parse the output from NAPALM's get_bgp_neighbors()
Retrieve the specified neighbor's BGP dictionary.
"""
return bgp_data['global']['peers'][neighbor]
|
c733c6518849f0e342173df6feb58b0708f7a514
| 189,045 |
from operator import and_
def e(*operands):
"""
Alias for and_().
"""
return and_(*operands)
|
360ba57648006e090dca94c83a70d46f2eaa1e79
| 492,847 |
def madd(a,b,c):
"""Return a+c*b where a and b are vectors."""
if len(a)!=len(b):
raise RuntimeError('Vector dimensions not equal')
return [ai+c*bi for ai,bi in zip(a,b)]
|
3a7c6345b8699a72bbc8ea5d2a3dd2c8523391f2
| 289,786 |
import jinja2
def generate_template(templatename, **kwargs):
"""This renders a JINJA template as a generator.
The templates exist in our lib/python/treadmill/templates directory.
:param ``str`` templatename:
The name of the template file.
:param ``dict`` kwargs:
key/value passed into the template.
"""
jinja_env = jinja2.Environment(loader=jinja2.PackageLoader('treadmill'))
template = jinja_env.get_template(templatename)
return template.generate(**kwargs)
|
cc8789e25fef2daaba783446294bb07dc68797f3
| 520,896 |
def temporal_affine_forward(x, w, b):
"""
Inputs:
- x: Input data of shape (N, T, D)
- w: Weights of shape (D, M)
- b: Biases of shape (M,)
Returns a tuple of:
- out: Output data of shape (N, T, M)
- cache: Values needed for the backward pass
"""
N, T, D = x.shape
M = b.shape[0]
out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b
cache = x, w, b, out
return out, cache
|
eae8f0dbbea596a4bcc1a7739cfd249e8898df90
| 662,301 |
import json
def check_file_contents_for_email_alerts( app ):
"""
See if any admin users have chosen to receive email alerts when a repository is updated.
If so, the file contents of the update must be checked for inappropriate content.
"""
sa_session = app.model.context.current
admin_users = app.config.get( "admin_users", "" ).split( "," )
for repository in sa_session.query( app.model.Repository ) \
.filter( app.model.Repository.table.c.email_alerts != None ):
email_alerts = json.loads( repository.email_alerts )
for user_email in email_alerts:
if user_email in admin_users:
return True
return False
|
5f4f7b02178bd129a695bd3330a28f974d66ea52
| 289,227 |
def load_data(path_to_dir):
"""
Loads the train and test set into four different lists.
"""
train_pos = []
train_neg = []
test_pos = []
test_neg = []
with open(path_to_dir+"train-pos.txt", "r") as f:
for i,line in enumerate(f):
words = [w.lower() for w in line.strip().split() if len(w)>=3]
train_pos.append(words)
with open(path_to_dir+"train-neg.txt", "r") as f:
for line in f:
words = [w.lower() for w in line.strip().split() if len(w)>=3]
train_neg.append(words)
with open(path_to_dir+"test-pos.txt", "r") as f:
for line in f:
words = [w.lower() for w in line.strip().split() if len(w)>=3]
test_pos.append(words)
with open(path_to_dir+"test-neg.txt", "r") as f:
for line in f:
words = [w.lower() for w in line.strip().split() if len(w)>=3]
test_neg.append(words)
return train_pos, train_neg, test_pos, test_neg
|
d5e0bbf8e1033c7256b44c2f61624c07dedcfff3
| 548,747 |
from operator import add
def check_ts(number):
"""
Verifica la validità del codice di identificazione della tessera sanitaria
:param number: codice di identificazione da verificare
:return: True/False
"""
if not number.isdigit():
return False
if len(number) != 20:
return False
if number[0:5] != "80380":
return False
even = [sum([int(digit) for digit in str(int(x) * 2)]) for x in number[-2::-2]]
odd = [int(x) for x in number[-1::-2]]
tot = sum(map(add, even, odd))
if tot % 10 != 0:
return False
return True
|
dc8e19cfa79ab806f2810a2cab7809e98edec642
| 298,545 |
def format_metric(metric: float) -> str:
"""
Returns a readable string from the given Dice or loss function value, rounded to 3 digits.
"""
return "{:0.3f}".format(metric)
|
e117d483fd7f4398296cf370cd0427593c694100
| 381,185 |
def quote_literal(value):
"""Quote provided literal."""
return "'{}'".format(value
.replace("\\", "\\\\")
.replace("'", "\\'"))
|
561309d4b473d9daf0f46e55cfc6be830c805a5f
| 406,754 |
def bestAlgorithm_(sequencingChemistries):
"""
Identify the (de novo) consensus algorithm we expect to deliver
the best results, given the sequencing chemistries represented in
an alignment file.
We key off the sequencing chemistries as follows:
- Just RS chemistry data? Then use quiver (at least for now, until
we get arrow > quiver on P6-C4)
- Else (either all Sequel data, or a mix of Sequel and RS data),
use arrow.
- Unknown chemistry found? Return None; we should abort if this is found
Note that the handling/rejection of chemistry mixtures (including
mixtures of Sequel and RS data) is left to the algorithm itself.
"""
if len(sequencingChemistries) == 0:
raise ValueError("sequencingChemistries must be nonempty list or set")
chems = set(sequencingChemistries)
anyUnknown = "unknown" in chems
allRS = all(not(chem.startswith("S/")) for chem in chems) and (not anyUnknown)
if anyUnknown:
return None
elif allRS:
return "quiver"
else:
return "arrow"
|
25b73bbba48d1a58b6c31645f04f810dc025178c
| 588,599 |
import copy
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
|
2d759603ad7cf1ada5741333be138f57957677f6
| 690,606 |
def check_hypervisor_availability(available_images, fname, hyp_type):
"""
This routine checks if a given hypervisor is present in given list of images
in FVM.
Args:
available_images(str): JSON of response of enumerating hypervisors in FVM.
fname(str): Name of file to be searched in response.
hyp_type(str): Hypervisor type.
Returns:
(boolean): True if it is present, False otherwise.
"""
for i in available_images.get(hyp_type, ""):
if i["filename"] in fname:
return True
return False
|
7cb928cb86cab0c5e39eace543b67e0ca18bd66f
| 479,463 |
def get_colour_key_hitmask(image, rect, key=None):
"""
Returns a hit-mask using an image's colour_key.
image -> pygame Surface,
rect -> pygame Rect that fits image,
key -> an over-ride colour, if not None will
be used instead of the image's colour_key
"""
if not key:
colour_key = image.get_colorkey()
else:
colour_key = key
mask = []
for x in range(rect.width):
mask.append([])
for y in range(rect.height):
mask[x].append(not image.get_at((x, y)) == colour_key)
return mask
|
8de9a6f9f14fcc19e0816cac8fd922b95293aed4
| 130,497 |
def compute_sma(df, column_source, column_target_sma, time_periods):
"""
Compute Simple Moving Average (SMA).
:param df: dataframe (sorted in ascending time order)
:param column_source: name of source column in dataframe with values to compute SMA (e.g. close price)
:param column_target_sma: prefix of target column in dataframe for SMA results
:param time_periods: list of time periods (number of days for SMA)
:return: modified dataframe
"""
# compute SMA for each time period and add results back to dataframe
for time_period in time_periods:
key_sma = column_target_sma + "-{:d}".format(time_period)
df[key_sma] = df[column_source].rolling(window=time_period, min_periods=1).mean()
return df
|
861f0e8cc5a8c5321eae74166b34f2851ed1ed53
| 460,812 |
import csv
def load_categories_from_csv_file(args):
"""Loads categories from a csv file.
The CSV file should have one comma delimited numeric category id and string
category name pair per line. For example:
0,"cat"
1,"dog"
2,"bird"
...
Args:
csv_path: Path to the csv file to be parsed into categories.
Returns:
categories: A list of dictionaries representing all possible categories.
The categories will contain an integer 'id' field and a string
'name' field.
Raises:
ValueError: If the csv file is incorrectly formatted.
"""
categories = []
with open(args.label_map_csv_path, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for i,row in enumerate(reader):
if not row:
continue
if len(row) != 2:
raise ValueError('Expected 2 fields per row in csv: %s' % ','.join(row))
category_id = int(i+1)
category_name = row[0]
category_display_name = row[1]
categories.append({'id': category_id, 'name': category_name, 'display_name': category_display_name})
return categories
|
d2352159d86ccaceaba605c844a8a6951159760e
| 574,034 |
def get_enrichment_analysis_name_from_file_name(fn):
"""
Output from ChromHMM Overlap Enrichment usually denotes the enrichment analysis name as the file names existing in the coordinate directory: For example: mutation_occ1.bed.gz
We want the enrichment_analysis_name to be mutation_occ1
"""
return (fn.split("."))[0]
|
2c13b5af94210ee68fb34f877c610542e34f2f56
| 525,335 |
from datetime import datetime
from dateutil import tz
def add_tz(dt: datetime, zone: str) -> datetime:
"""add_tz datetime doesnt assign a time zone. This function shows how to do it.
Args:
dt (datetime): original datetime
zone (str): desired zone
Returns:
datetime: a new datetime with the zone assigned
"""
return dt.replace(tzinfo=tz.gettz(zone))
|
bb10df403031e4d3f175539c35ffaa0f250958f6
| 133,879 |
def get_bin_values(base_dataset, bin_value):
"""Gets the values to be used when sorting into bins for the given dataset, from the configured options."""
values = None
if bin_value == "results":
values = base_dataset.get_output()
elif bin_value == "all":
# We set all values to 0, assuming single bin will also set its value to 0.
values = [0] * base_dataset.get_number_of_samples()
else:
raise Exception(f"Invalid bin value configured: {bin_value}")
return values
|
cf2419066d6e642e65d9a8747081ebfee417ed64
| 706,934 |
def most_expensive_menu_item(restaurant):
"""
Loops through a list of menu items and determines the most expensive item
Parameters:
restaurant (dict): A dictionary with three lists items, prices, and cals
Returns:
str: A string with the name of the most expensive item
"""
highest_price = 0
highest_price_index = None
for index, price in enumerate(restaurant['prices']):
if price > highest_price:
highest_price = price
highest_price_index = index
return restaurant['items'][highest_price_index]
|
a06e0ec925dc573a2dd6523497870dda663d051d
| 104,529 |
def checkOutfileOption(outfileOption):
"""
function to set the default value for outfileOptions
Args:
outfileOption: relative path to output location or None
Returns: relative path to output location
"""
if outfileOption is None:
return './index.rst'
else:
return outfileOption
|
788b56889304f06400bb11b27c40243a577ac106
| 249,039 |
def _get_number_of_warmup_and_kept_draws_from_fit(fit):
"""Get number of warmup draws and kept draws."""
if 'warmup2' not in fit:
n_warmup = 0
n_draws = fit['n_save'][0]
else:
n_warmup = fit['warmup2'][0]
n_draws = fit['n_save'][0] - fit['warmup2'][0]
return n_warmup, n_draws
|
e1928cd5eb6eb47d1d1347ebf05f65434fb1336c
| 661,531 |
def crop_pcm(value, sample_rate=16000, time=5):
"""Returns a cropped part of PCM."""
start = sample_rate
end = time * sample_rate + start
contain_time = int(value.shape[0] / sample_rate)
if time > contain_time or end > value.shape[0]:
return value
return value[start:end]
|
30b6307dc5a6ca782e46264a71f73dd1635e93bc
| 548,610 |
import torch
def get_state_dict(model):
"""
Gets state dict for a given tacotron2 model.
Handles parallel & non-parallel model types.
Parameters
----------
model : Tacotron2
tacotron2 model
Returns
-------
dict
Model state dict
"""
if isinstance(model, torch.nn.DataParallel):
return model.module.state_dict()
else:
return model.state_dict()
|
ea07d2bb1d025909c479242b37aebf7941dc9775
| 267,237 |
def flag_match(obj, flags):
"""
Given a component or attribute, return true if it matches all of the given flags
and False otherwise. If there are no flags, always return True.
"""
if "flags" in obj:
return all(obj['flags'][key] == value for key, value in flags.items())
return True
|
449ffa243879550e7870adbf6d02c1965053a467
| 392,455 |
def exec_string(string, language="python", decorators=None):
"""Execute a string as python code.
The languages available are ``python`` and ``mel``.
During the process, creates a new function and calls it using the
:func:`exec` builtin function.
With this process, it is possible to apply decorators to the string to be
executed. Even if the language is set to "python" or "mel", because in the
case where the string is written in "mel", a python function is still
created and called the :func:`mel.eval` command.
Also, like any python function, it can have a :obj:`return` statement.
If specified in the string to be executed, the value will be returned.
See the examples for more details.
Warning:
The :obj:`return` statement only works for the python language.
Examples:
>>> from maya import cmds
>>> _ = cmds.file(new=True, force=True)
>>> command = \"\"\"
... from maya import cmds
... return cmds.polyCube(name="pythonCube")[0]
... \"\"\"
>>> exec_string(command)
'pythonCube'
>>> cmds.objExists("pythonCube")
True
>>> command = \"\"\"
... polyCube -name "melCube";
... \"\"\"
>>> exec_string(command, language="mel")
>>> cmds.objExists("melCube")
True
Arguments:
string (str): The command to execute as string.
language (str, optional): The language in which the object provided in
the ``string`` parameter is written.
decorators (list, optional): The python decorators to apply at runtime.
Returns:
any: Anything that the string will return.
Raises:
ValueError: The specified language is not supported by the function.
"""
lines = ["def _callback():\n"]
if language == "python":
lines.extend(string.splitlines(True))
elif language == "mel":
line = "from maya import mel;mel.eval('{}')"
lines.append(line.format(string.replace("\n", "")))
else:
msg = "The language '{}' is not supported.".format(language)
raise ValueError(msg)
exec((" " * 4).join(lines)) # pylint: disable=exec-used
callback = locals()["_callback"]
for decorator in decorators or []:
try:
callback = decorator()(callback)
except TypeError:
callback = decorator(callback)
return callback()
|
850e595523453b4d28abf745290886b4587e3a8b
| 571,689 |
def decoder(obj):
"""Decodes 'bytes' object to UTF-8."""
if isinstance(obj, bytes):
return obj.decode("utf-8")
return obj
|
09ba08af32bd145470ec2a3c6c8ebb875330e22b
| 433,951 |
import typing
from typing import Counter
def alignment_conservation(alignment: typing.Dict[str, str]) -> typing.List[float]:
"""
Obtain residue conservations of each position in a sequence alignment
Parameters
----------
alignment
dict of key: aligned sequence
Returns
-------
list of conservation values
"""
conservations = []
length = len(alignment[list(alignment.keys())[0]])
for pos in range(length):
aligned_residues_count = Counter(
[alignment[key][pos] for key in alignment if alignment[key][pos] != "-"]
)
conservation = aligned_residues_count.most_common(1)[0][1] / len(alignment)
conservations.append(conservation)
return conservations
|
d82c5313538b9c7f87184a98fffc9f6c56d86182
| 443,223 |
import inspect
def is_functional_member(member):
"""
Check whether a class member from the __dict__ attribute is a method.
This can be true in two ways:
- It is literally a Python function
- It is a method descriptor (wrapping a function)
Args:
member (object): An object in the class __dict__.
Returns:
bool: `True` if the member is a function (or acts like one).
"""
return (
inspect.isfunction(member)
or (
inspect.ismethoddescriptor(member)
and isinstance(member, (classmethod, staticmethod))
)
)
|
268068600689a7935c9a8b26aa14ca09f9679228
| 12,690 |
def parse_pots(initial):
"""Parse initial pots."""
return [[pot, index]
for index, pot in enumerate(initial.split(': ')[1])]
|
bacc10b5a1e737cbd58662b360a7bf058c9abf5d
| 248,132 |
from typing import Optional
def get_fullname(namespace: Optional[str], name: str) -> str:
"""
Constructs a fullname from a namespace and a name.
"""
if namespace:
return namespace + "." + name
else:
return name
|
5bc82836186089368eeeea66c206d8f09b006ba6
| 666,198 |
def ListFeatures(font):
"""List features for specified font. Table assumed structured like GPS/GSUB.
Args:
font: a TTFont.
Returns:
List of 3-tuples of ('GPOS', tag, name) of the features in the font.
"""
results = []
for tbl in ["GPOS", "GSUB"]:
if tbl in font.keys():
results += [
(tbl,
f.FeatureTag,
"lookups: [{}]".format(", ".join(map(str, f.Feature.LookupListIndex)))
) for f in font[tbl].table.FeatureList.FeatureRecord
]
return results
|
0fb2a1ac5b367d7d711cab7b303d2ba1fd487514
| 678,446 |
def percentage(value, total_sum):
"""calculate a percentage"""
if total_sum == 0:
return 0
else:
return round(100 * float(value) / float(total_sum))
|
b6aa712b58f93fd1aa18587d7816e500a1585971
| 419,030 |
import yaml
def read_yaml_file(filename):
"""Read a YAML file."""
with open(filename, 'r') as f:
return yaml.safe_load(f.read())
|
4f13467fcf675d26f5b1118ad48dc7b03f769d47
| 97,774 |
import pickle
def read_encoded_ctx_file(encoded_ctx_file: str):
"""
Returns dictionary containing the encoded passages and their vector embeddings
:param encoded_ctx_file:
:return:
"""
with open(encoded_ctx_file, mode="rb") as f:
p_emb = pickle.load(f)
# create dictionary, so that i can lookup embeddings
p_emb_dict = {}
for passage in p_emb:
p_emb_dict.update({passage[0]: passage[1]})
return p_emb_dict
|
23bbf7e76d53c3b3ea8228b9e8b2d53d932bae33
| 66,149 |
def split_lhs_rhs(expr):
"""Split the equation into left and right hand side.
>>> split_lhs_rhs(" 12 + a ")
(None, '12 + a')
>>> split_lhs_rhs(" c = 12 + a ")
('c', '12 + a')
"""
expr = [x.strip() for x in expr.split("=")]
if len(expr) == 1:
rhs = expr[0]
output = None
else:
output, rhs = expr
return output, rhs
|
ac7fd4861ad3289365030d6eac656e021ee39e6f
| 42,664 |
def reformat_ssh_key_to_pem_bytes(ssh_key_str: str) -> bytes:
"""
reformat the ssh key string to pem format bytes for github client.
:param ssh_key_str: utf-8 string without header and footer for the github app rsa private key
:return: pem formatted private key in bytes with header and footer
"""
chunked = '\n'.join(ssh_key_str[i:i+64] for i in range(0, len(ssh_key_str), 64))
return f"-----BEGIN RSA PRIVATE KEY-----\n{chunked}\n-----END RSA PRIVATE KEY-----\n".encode("utf-8")
|
404e7198e2d5c07b8badd7bfaa4ece344303be98
| 245,407 |
def has_children(obj_json, client):
"""Checks whether an archival object has children using the tree/node endpoint."""
resource_uri = obj_json['resource']['ref']
tree_node = client.get('{}/tree/node?node_uri={}'.format(resource_uri, obj_json['uri'])).json()
return True if tree_node['child_count'] > 0 else False
|
0933dbdda99b1577ecc78d4b9dc73f50dcc72b4e
| 57,676 |
def ensure_existence(f):
""" Ensures that method is not marked as non_existent
Parameters
----------
f Method
Raises
------
NotImplementedError if the method is marked as non existent
Returns
-------
Method f
"""
if getattr(f, 'not_existing', False):
raise NotImplementedError('The distribution has no ' + f.__name__ + ' function. '
'You may use an adapter that supports its approximation.')
return f
|
18410fe298fcb5dab3114dea1632010d4778417c
| 639,802 |
def place_word(grid, coords, word):
"""
Return the grid with the new word placed
"""
for i, l in enumerate(word):
x, y = coords[0] + i, coords[1]
grid[y][x] = l
return grid
|
6921f13c378aa69ec87bb097db6ce38df13340a3
| 315,151 |
def validToken(token):
"""
Return True if token is in hex and is 64 characters long, False
otherwise
"""
if len(token) != 64:
return False
try:
token.decode("hex")
except TypeError:
return False
return True
|
1fa264acc43dd67ba400ddb50ccf3aaa04f33300
| 660,938 |
import string
def extract_words(text):
"""Return the words in a tweet, not including punctuation.
>>> extract_words('anything else.....not my job')
['anything', 'else', 'not', 'my', 'job']
>>> extract_words('i love my job. #winning')
['i', 'love', 'my', 'job', 'winning']
>>> extract_words('make justin # 1 by tweeting #vma #justinbieber :)')
['make', 'justin', 'by', 'tweeting', 'vma', 'justinbieber']
>>> extract_words("paperclips! they're so awesome, cool, & useful!")
['paperclips', 'they', 're', 'so', 'awesome', 'cool', 'useful']
"""
s = ""
c = ''
for i in text:
if i not in string.ascii_letters:
i = ' '
s += i
return s.split()
|
cc0b7dbc548696ed74b48dec57b386fe38adfa41
| 699,019 |
from typing import OrderedDict
def get_stepped_value(value, step, step_type='Down'):
"""Returns the rounded value, given an initial input value.
Args:
value(int): The input value
step(int | OrderedDict): The step value
step_type(str): Whether the value is incremented to the next step
Raises:
ValueError
Returns:
int
"""
if isinstance(step, OrderedDict):
attribute_step = step.copy()
threshold_value, threshold_step = attribute_step.popitem(last=False)
while value >= threshold_value:
try:
threshold_value, threshold_step = attribute_step.popitem(last=False)
except KeyError:
break
step = threshold_step
if not isinstance(step, int):
raise KeyError("Unexpected argument for step.")
if step_type in {'Up', 'Down'}:
added_step = (step if value % step > 0 and step_type == 'Up' else 0)
elif step_type == 'Round':
added_step = (step if float(value % step) / step >= 0.5 else 0)
else:
raise TypeError
return value // step * step + added_step
|
a622d42e1a875a311a10e36ea129fd926cdd10d8
| 311,958 |
import torch
def categorical_kl(p1, p2):
"""
calculates KL between two Categorical distributions
:param p1: (B, D)
:param p2: (B, D)
"""
p1 = torch.clamp_min(p1, 0.0001) # actually no need to clamp
p2 = torch.clamp_min(p2, 0.0001) # avoid zero division
return torch.mean((p1 * torch.log(p1 / p2)).sum(dim=-1))
|
64b10348322743228e1877ac2164d2725788972e
| 482,839 |
def get_atom_indices(labels, atom):
"""
labels - a list of coordinate labels ("Elements")
atom - the atom whose indices in labels are sought
Returns a list of all location of [atom] in [labels]
"""
indices = []
for i in range(len(labels)):
if labels[i] == atom:
indices.append(i)
return indices
|
a5b36423becc9935e64225a6a029999e24471a65
| 690,825 |
import re
def _is_tab_line(line: str) -> bool:
"""
Check if this line is a tab line
:param line: Line for which we want to check if it is a tab line
:return: Boolean indicating if line is a tablature line
"""
if re.match(r'\s*[eBGDAE]? {0,5}(\|\))? {0,5}[-0-9|b/hp ]{10,}.*', line) \
and line.count('-') > line.count(' '):
return True
return False
|
aaaf018451b30a921d756cf1fa5161ff3af478dd
| 494,870 |
def traverse_pip(conn, wire_in_tile_pkey):
""" Given a generic wire, find (if any) the wire on the other side of a pip.
Returns None if no wire or pip connects to this wire.
"""
cur = conn.cursor()
cur.execute(
"""
SELECT src_wire_in_tile_pkey FROM pip_in_tile WHERE
is_pseudo = 0 AND
dest_wire_in_tile_pkey = ?
;""", (wire_in_tile_pkey, )
)
result = cur.fetchone()
if result is not None:
return result[0]
cur.execute(
"""
SELECT dest_wire_in_tile_pkey FROM pip_in_tile WHERE
is_pseudo = 0 AND
src_wire_in_tile_pkey = ?
;""", (wire_in_tile_pkey, )
)
result = cur.fetchone()
if result is not None:
return result[0]
return None
|
b74f702647e45a151eb08177e2c90148b44dc161
| 524,577 |
def _get_function_name(addr, p):
"""
Return a function name
:param addr: function address
:param p: angr project
:return: function name
"""
return p.loader.find_plt_stub_name(addr)
|
0e55ebb28200ac2670d1079c5bf9ea6aec2b9ed3
| 246,396 |
from typing import Tuple
def bbox(polygon) -> Tuple[float, float, float, float]:
"""Compute the Bounding Box of a polygon.
:param polygon: List of coordinate pairs (x,y)
"""
x,y = 0,1
vtx = polygon[0]
minx, miny, maxx, maxy = vtx[x], vtx[y], vtx[x], vtx[y]
for vtx in polygon[1:]:
if vtx[x] < minx:
minx = vtx[x]
elif vtx[y] < miny:
miny = vtx[y]
elif vtx[x] > maxx:
maxx = vtx[x]
elif vtx[y] > maxy:
maxy = vtx[y]
return minx, miny, maxx, maxy
|
c0e07607a94ee6b7f6945622c39e06a11e2bf4d0
| 610,338 |
def _findCookie(filenames, cookie_re):
""" Look through a group of files for a cookie that satisfies a
given compiled RE, returning first such cookie found, or None. """
for file in filenames:
data = open(file, 'r').read()
m = cookie_re.search(data)
if m: return m.group(1)
|
894e5d3426c15857c3ef140cb72efbfcb2891bba
| 452,469 |
def date2epoch(dt):
"""
Convert list of datetime objects to epoch time
Parameters
----------
dt : datetime.datetime
Single or list of datetime object(s)
Returns
-------
time : float
Datetime converted to epoch time (seconds since 1/1/1970 00:00:00)
"""
if not isinstance(dt, list):
dt = [dt]
return [t.timestamp() for t in dt]
|
1d13af15d856ca9d8f45fcda5f37d7a7eaff284a
| 104,210 |
from unicodedata import normalize
def unicodify(s, encoding='utf-8', norm=None):
"""Ensure string is Unicode.
.. versionadded:: 1.31
Decode encoded strings using ``encoding`` and normalise Unicode
to form ``norm`` if specified.
Args:
s (str): String to decode. May also be Unicode.
encoding (str, optional): Encoding to use on bytestrings.
norm (None, optional): Normalisation form to apply to Unicode string.
Returns:
unicode: Decoded, optionally normalised, Unicode string.
"""
if not isinstance(s, str):
s = str(s, encoding)
if norm:
s = normalize(norm, s)
return s
|
ee4882dd7450ba0b146e3fb9ddabf9deaf0e7903
| 29,888 |
def sec_to_exposure_decimation(sec):
"""
Convert seconds to exposure and decimation.
The algorithm is limited since it multiplies decimation by 10 until the
resulting exposure is less than 65_535. This is not perfect because it
limits decimation to 10_000 (the next step would be 100_000 which is
bigger then max decimation of 65_535).
The max theoretical value is ~497 days. This algorithm is limited to
~75 days. If it is not enough for you feel free to improve it :-)
(max theoretical = datetime.timedelta(seconds = 2**16 * 2**16 * 10E-3))
"""
decimation = 1
deci_millis = sec * 100
while (2 ** 16 * decimation) < deci_millis:
decimation *= 10
exposure = round(deci_millis / decimation)
return exposure, decimation
|
c880a371bc3aa9420de094df4815a876bb504b33
| 20,176 |
import requests
import json
def post(url, params, proxies, headers):
"""Send a request with the POST method."""
response = requests.post(url, data=json.dumps(params),proxies=proxies, headers=headers)
return response
|
a6d57ee82021154540ea7b8c68329d2f85eaa157
| 653,074 |
import torch
from typing import Optional
def create_src_lengths_mask(batch_size: int, src_lengths: torch.Tensor, max_src_len: Optional[int] = None):
"""
Generate boolean mask to prevent attention beyond the end of source
Inputs:
batch_size : int
src_lengths : [batch_size] of sentence lengths
max_src_len: Optionally override max_src_len for the mask
Outputs:
[batch_size, max_src_len]
"""
if max_src_len is None:
max_src_len = int(src_lengths.max())
src_indices = torch.arange(0, max_src_len).unsqueeze(0).type_as(src_lengths)
src_indices = src_indices.expand(batch_size, max_src_len)
src_lengths = src_lengths.unsqueeze(dim=1).expand(batch_size, max_src_len)
return (src_indices < src_lengths).int().detach()
|
b26c42a5fdc0ec25c1320d4db668e457ada30af2
| 630,996 |
import json
import hashlib
def _generate_etag(etag_data, etag_schema=None, extra_data=None):
"""Generate an ETag from data
etag_data: Data to use to compute ETag
etag_schema: Schema to dump data with before hashing
extra_data: Extra data to add before hashing
Typically, extra_data is used to add pagination metadata to the hash. It is
not dumped through the Schema.
"""
if etag_schema is None:
raw_data = etag_data
else:
if isinstance(etag_schema, type):
etag_schema = etag_schema()
raw_data = etag_schema.dump(etag_data)[0]
if extra_data:
raw_data = (raw_data, extra_data)
# flask's json.dumps is needed here
# as vanilla json.dumps chokes on lazy_strings
data = json.dumps(raw_data, sort_keys=True)
return hashlib.sha1(bytes(data, 'utf-8')).hexdigest()
|
6fa4828787f4d38a0503f14867350aa35f4109ad
| 518,833 |
from typing import Tuple
def split_url(url: str) -> Tuple[str, str, str]:
"""
Splits a url into its components: filename, basename, extension
Example: ('v.png', 'v', 'png')
:param url:
:return: Tuple[str, str, str]
"""
filename = url.split("/")[-1]
x, _ = filename.split("?")
basename, extension = x.split(".")
return x, basename, extension
|
0672fac3b96e4c6175c81f0d4e9fe0915b1d9eaa
| 519,953 |
def __ngrams(s, n=3):
""" Raw n-grams from a sequence
If the sequence is a string, it will return char-level n-grams.
If the sequence is a list of words, it will return word-level n-grams.
Note: it treats space (' ') and punctuation like any other character.
>>> ngrams('This is not a test!')
[('T', 'h', 'i'), ('h', 'i', 's'), ('i', 's', ' '), ('s', ' ', 'i'),
(' ', 'i', 's'), ('i', 's', ' '), ('s', ' ', 'n'), (' ', 'n', 'o'),
('n', 'o', 't'), ('o', 't', ' '), ('t', ' ', 'a'), (' ', 'a', ' '),
('a', ' ', 't'), (' ', 't', 'e'), ('t', 'e', 's'), ('e', 's', 't'),
('s', 't', '!')]
>>> ngrams(["This", "is", "not", "a", "test!"])
[('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')]
Args:
s: a string or a list of strings
n: an int for the n in n-gram
Returns:
list: tuples of char-level or word-level n-grams
"""
return list(zip(*[s[i:] for i in range(n)]))
|
e0cdbf7a231adcec2ab2c48742c2c8f5b285530d
| 327,269 |
def _is_comment(cells):
"""Returns True if it is a comment row, False otherwise."""
for cell in cells:
if cell.strip() == '':
continue
return cell.lstrip().startswith('#')
return False
|
db9e6125cdd3c4ae3ea2613682e0cb942277f386
| 140,886 |
def shorten(txt: str, length: int = 10, ellipsis: str = "…") -> str:
"""Truncate ``txt``, adding ``ellipsis`` to end, with total ``length``."""
if len(txt) > length:
return txt[: length - len(ellipsis)] + ellipsis
else:
return txt
|
ed53866d76d50cae216ced01b6527d3e87180b06
| 507,795 |
def dict_diff(first, second):
""" Return a dict of keys that differ with another config object. If a value is
not found in one fo the configs, it will be represented by KEYNOTFOUND.
@param first: Fist dictionary to diff.
@param second: Second dicationary to diff.
@return diff: Dict of Key => (first.val, second.val)
"""
KEYNOTFOUND = '<KEYNOTFOUND>'
diff = {}
# Check all keys in first dict
for key in first:
if (key not in second):
diff[key] = (first[key], KEYNOTFOUND)
elif (first[key] != second[key]):
diff[key] = (first[key], second[key])
# Check all keys in second dict to find missing
for key in second:
if (key not in first):
diff[key] = (KEYNOTFOUND, second[key])
return diff
|
163e2c7de61ad6758bcb6307c427441624c28266
| 630,802 |
def map_value_or(f, value, default):
"""Return Value or Default if Value is None."""
return f(value) if value is not None else default
|
e450e021a567fd57935dcebd120cbf3a74fc4098
| 150,915 |
import torch
from typing import Optional
def dkl_diag_gaussian(
mu_1: torch.Tensor,
var_1: torch.Tensor,
mu_2: Optional[torch.Tensor] = None,
var_2: Optional[torch.Tensor] = None,
log_flag: bool = False,
avg: bool = True,
) -> torch.Tensor:
"""Computes the analytical KL divergence between two diagonal-covariance Gaussians.
Consider two Gaussian distributions D_1 = N(mu_1, var_1) and D_2 = N(mu_2, var_2).
This function will compute D_KL(D_1 || D_2). If the parameters of D_2 are none,
then D_2 is assumed to be the standard normal distribution.
Parameters
----------
mu_1 : torch.Tensor, shape=(B, X)
Mean of D_1.
var_1 : torch.Tensor, shape=(B, X)
Diagonal entries of covariance of D_1.
mu_2 : torch.Tensor, shape=(B, X), default=None
Mean of D_2. Optional.
var_2 : torch.Tensor, shape=(B, X), default=None
Diagonal entries of covariance of D_2. Optional.
log_flag : bool, default=False
Flag indicating whether the variances are passed as log-variances
avg : bool, default=True
Flag indicating whether to batch average the KL divergence.
Returns
-------
dkl : torch.Tensor, shape=(1) or shape=(B, 1)
The KL divergence.
"""
B, X = mu_1.shape
if log_flag:
if mu_2 is None or var_2 is None:
inner = -var_1 + torch.exp(var_1) + mu_1 ** 2
else:
mu_diff = mu_2 - mu_1
inner = var_2 - var_1 + (torch.exp(var_1) + mu_diff ** 2) / torch.exp(var_2)
else:
if mu_2 is None or var_2 is None:
inner = -torch.log(var_1) + var_1 + mu_1 ** 2
else:
inner = torch.log(var_2 / var_1) + (var_1 + (mu_2 - mu_1) ** 2) / var_2
dkl = 0.5 * (torch.sum(inner, dim=-1) - X)
if avg:
dkl = torch.sum(dkl) / B
return dkl
|
ee9fa71a94142ab7fbbdc3a021b5d1edaa515f27
| 243,166 |
def specNameToCName(specName):
"""
Given an string containing an ASN.1 name, returns a string that is a valid
C++ identifier that is as similar to that name as possible. Since most
ASN.1 identifiers used in PKIX specifications are legal C++ names except
for containing hyphens, this function just converts the hyphens to
underscores. This may need to be improved in the future if we encounter
names with other funny characters.
"""
return specName.replace("-", "_")
|
42c9bba5c9bf76720cd4747588ddc291ec27d316
| 224,238 |
import re
def PHONOTACTIC(pattern):
"""Takes a string representing the regex pattern of a particular
phonotactic restriction, and returns a function that will count
the number of times that pattern occurs in an unparsed output
string."""
def F(output):
return len(re.findall(pattern, output))
return F
|
44bb47602df700f4400d8abad4cc45424d42a35e
| 604,373 |
def bestScheduleChannel(sched, node_id):
"""Choose the best single channel for the given node to use from the
schedule.
If the schedule allows the given node to transmit on multiple channels, pick
the channel with the most air time.
Args:
sched: A schedule
node_id: A node
Returns:
The best channel to transmit on
"""
if not (sched == node_id).any():
raise ValueError("No slot for node {}".format(node_id))
return (sched == node_id).sum(axis=1).argmax()
|
0ede5bbf290572bd1b20f1948b1e7f29c075b417
| 470,972 |
def binary_tail(n: int) -> int:
""" The last 1 digit and the following 0s of a binary representation, as a number """
return ((n ^ (n - 1)) + 1) >> 1
|
63460cef7b39b7e7ee2ec880810ff71d82be01e9
| 5,459 |
def cel2kel(t):
"""Converts from Celsius to Kelvin"""
return t + 274.2
|
04d6d474e9de56a8200a0fc147611906d4e58a16
| 650,776 |
def solve_1d_quadratic(p, q, r=0):
"""
Finds the minimizer of an 1-D quadratic system,
raises an error if there is no minimizer (p<0)
Inputs:
- p, q, r: the coefficients of the 1D quadratic system
Output:
- xstar: the minimizer
"""
assert ...
return ...
|
1495a5933e183b3ad6ea5e6be41e6aeac372771b
| 451,136 |
def prop(x, s):
"""Returns the proportion of `x` in `s`.
"""
return list(s).count(x)/len(s)
|
e35acf9e1ab49507822f2d316a1c8e79eb70cc50
| 556,020 |
import itertools
def styleguide_lint_with_options(request, styleguide_lint, config_write, tmp_path):
"""Fixture which will run the styleguide with the "lint" subcommand, given the provided options.
The options will be tested on the command line as well as in the config.
`base_args` and `lint_args` are similar to the `styleguide_lint` fixture, with the exception of
being mappings from option name to value.
"""
def _to_cmd_line(item):
return (f"--{item[0].replace('_', '-')}", item[1])
def runner(base_args={}, lint_args={}):
if request.param:
base_args_strs = []
lint_args_strs = []
config_write(**base_args, **lint_args)
else:
base_args_strs = itertools.chain(*map(_to_cmd_line, base_args.items()))
lint_args_strs = itertools.chain(*map(_to_cmd_line, lint_args.items()))
return styleguide_lint(base_args=base_args_strs, lint_args=lint_args_strs)
return runner
|
ca745bc90c760d89cdf842aac903f0d9728ac2df
| 158,403 |
def count_type_changes(pattern: str):
"""Count the number of type changes in the given pattern.
The pattern is expected to describe Bloch points with single characters
where each character type represents one type of Bloch point. Example:
`iooi` -> two type changes.
"""
count = 0
for left, right in zip(pattern[:-1], pattern[1:]):
if left != right:
count += 1
return count
|
91959d61d1c76833aeeff6e91d97f83e5bf0bd46
| 472,659 |
import requests
def check_csv(url: str) -> bool:
"""Checks if the CSV from URL file is available.
Args:
url: str - an URL link or a filepath to a CSV file
Returns:
bool: True if the CSV file is available
"""
status_code = requests.get(url).ok
if status_code:
print("CSV status code: 200")
return True
print("Error: Can't connect to CSV file.")
return False
|
1e696bba28b49b527aad24db9d176562b534dee3
| 574,403 |
def class_labels(column):
"""
Takes in target column and creates list of binary values. 1 (>=0.5) being in the
positive class (toxic), 0 (<0.5) being in the negative class (Not toxic)
"""
class_label = []
for row in column:
if row < 0.5:
class_label.append(0)
else:
class_label.append(1)
return class_label
|
3b94e6fbc918abb50a932550544408acca42843a
| 668,867 |
def parse_requirements(fpath):
"""
Parse a requirements file and return a dict of deps and versions.
"""
with open(fpath, 'r') as fh:
data = fh.read()
lines = data.split('\n')
lines = [line.strip() for line in lines if line and line[0] != '#']
deps = {}
for line in lines:
parts = line.split(' ')
if len(parts) > 1:
ver = parts[-1]
if ver[0] == '=':
ver = '=' + ver
deps[parts[0].lower()] = ver
else:
deps[parts[0].lower()] = None
return deps
|
ae9f4ee13d9e3d3d193c6114145da92995364c44
| 454,782 |
def create_agg_log(db, sessionID):
"""
Instantiate the Aggregate Data Collection.
:param db: The database object.
:param sessionID: Current user's session ID.
:return: The Aggregate Data Collection object.
"""
collection_name = 'agg_collection_' + sessionID
agg_collection = db[collection_name]
return agg_collection
|
23d8ea94adfe98aad28376184f173e3c772a8546
| 558,195 |
def get_dependencies(doc, n):
"""Get dependencies in the format of a list of
(token, deprel, dependent_token) pairs-
for all 'n' sentences in doc"""
def getdeps(i):
deps = []
for head, rel, dep in doc.sentences[i].dependencies:
deps.append((head.text, rel, dep.text))
return deps
return [getdeps(i) for i in range(n)]
|
cf60c17d64e8d45ce1cc9449db7fed4763bafbae
| 85,455 |
def get_hours_minutes_by_seconds(seconds):
"""
Gets hours and minutes by input seconds
:param seconds: seconds
:type seconds: int
:return: hours and minutes
:rtype: tuple
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return h, m
|
83526c2bb40dc19cf797e3f8b6369b8da9e2aea5
| 552,551 |
def indent(str, dent):
"""Simple function to uniformly add whitespace to the front of lines."""
return '\n'.join(map(lambda x: ' ' * dent + x, str.split('\n')))
|
72369e6dbab5413eed1f845833c96f5bef199950
| 516,044 |
def _CheckForDangerousTestFunctions(input_api, output_api):
"""Tests should not be using serveAsynchronousMockedRequests, since it does
not guarantee that the threaded HTML parser will have completed."""
serve_async_requests_re = input_api.re.compile(
r'serveAsynchronousMockedRequests')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not serve_async_requests_re.search(x),
input_api, None)
errors = [' * %s' % violation for violation in errors]
if errors:
return [output_api.PresubmitError(
'You should be using FrameTestHelpers::'
'pumpPendingRequests() instead of '
'serveAsynchronousMockedRequests() in the following '
'locations:\n%s' % '\n'.join(errors))]
return []
|
3daa6c5760abcbe82f718ae9129afdb5b582c1a8
| 365,182 |
def strip_nones(row):
"""
Remove all items with None for a value, because why include it if there's
no value?
"""
return dict([(k,v) for k, v in row.items() if v is not None])
|
31c7a9d269b84bb7a4a375b766d1dad19e2c58a4
| 143,421 |
def calculate_bps(current_sample_octets, current_sample_time, historical_sample_octets, historical_sample_time):
"""Calculate the bits-per-second based on the octets and timeticks (hundreths of a second)."""
# When the SNMP counter reaches 18446744073709551615, it will rollover and reset to ZERO.
# If this happens, we want to make sure we don't output a negative bps
if current_sample_octets < historical_sample_octets:
# If we reset to 0, add the max value of the octets counter
current_sample_octets += 18446744073709551615
delta = current_sample_octets - historical_sample_octets
# SysUpTime is in TimeTicks (Hundreds of a second), so covert to seconds
seconds_between_samples = (current_sample_time - historical_sample_time) / 100.0
# Multiply octets by 8 to get bits
bps = (delta * 8) / seconds_between_samples
bps /= 1048576 # Convert to Mbps (use 1024 for Kbps)
bps = round(bps, 2)
return bps
|
af26b62cb0dba703c8f41514b0afae835e387158
| 210,616 |
import torch
def get_device(logger):
"""
Get device model will be run on (GPU or CPU)
:param logger: Logger object to note the device
:return: device type, num_of_gpus
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(device, n_gpu))
return device, n_gpu
|
9b6ed3ecbd54741b4feb200f1a67f0d10cb4518b
| 246,186 |
def replace_tags(string, from_tag="i", to_tag="italic"):
"""
Replace tags such as <i> to <italic>
<sup> and <sub> are allowed and do not need to be replaced
This does not validate markup
"""
string = string.replace("<" + from_tag + ">", "<" + to_tag + ">")
string = string.replace("</" + from_tag + ">", "</" + to_tag + ">")
return string
|
dccb4f34d0c26575a9a0c74fdb02c9a3dc4b2cd2
| 672,406 |
def formatNumber(number):
"""Ensures that number is at least length 4 by
adding extra 0s to the front.
"""
temp = str(number)
while len(temp) < 4:
temp = '0' + temp
return temp
|
33caf6a3304f8c0e39f937fc11f2daee463a6286
| 620,573 |
import random
def generate_random_data(nb_objs, nb_attrs):
"""
Generates a matrix of random data.
:param nb_attrs: the dimension of the data
:type nb_attrs: int
:return: a matrix with nb_objs rows and nb_attrs+1 columns.
The 1st column is filled with line numbers (integers, from 1 to nb_objs).
"""
data = []
for i in range(nb_objs):
line = [i + 1] + list(map(lambda x: random.random(), range(nb_attrs)))
data.append(tuple(line))
return data
|
4e7205746b5aa8e31fab1a91c9e8bcd67cb91776
| 157,073 |
def test_name(msg_name):
"""Generate the name of a serialization unit test given a message name"""
return "test_{}_serialization".format(msg_name)
|
261c4c4e215b84c0358ca51cbbd4723208dedb12
| 664,000 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.